From cfa1e01ecf516511bfc0fa87f96aa19bc6ee401f Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 1 Feb 2022 19:06:26 +0100 Subject: net: ieee802154: ca8210: Fix lifs/sifs periods These periods are expressed in time units (microseconds) while 40 and 12 are the number of symbol durations these periods will last. We need to multiply them both with the symbol_duration in order to get these values in microseconds. Fixes: ded845a781a5 ("ieee802154: Add CA8210 IEEE 802.15.4 device driver") Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/r/20220201180629.93410-2-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/ca8210.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index f3438d3e104a..2bc730fd260e 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2975,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; ca8210_hw->phy->symbol_duration = 16; - ca8210_hw->phy->lifs_period = 40; - ca8210_hw->phy->sifs_period = 12; + ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; + ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | -- cgit v1.2.3 From 731cddce6dd110fb2cdee34eddb48599e7251517 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 1 Feb 2022 19:06:27 +0100 Subject: net: mac802154: Convert the symbol duration into nanoseconds Tdsym is often given in the spec as pretty small numbers in microseconds and hence was reflected in the code as symbol_duration and was stored as a u8. Actually, for UWB PHYs, the symbol duration is given in nanoseconds and are as precise as picoseconds. In order to handle better these PHYs, change the type of symbol_duration to u32 and store this value in nanoseconds. All the users of this variable are updated in a mechanical way. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220201180629.93410-3-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/at86rf230.c | 22 +++++++++++----------- drivers/net/ieee802154/atusb.c | 22 +++++++++++----------- drivers/net/ieee802154/ca8210.c | 2 +- drivers/net/ieee802154/mcr20a.c | 8 ++++---- include/net/cfg802154.h | 4 ++-- net/mac802154/main.c | 8 ++++---- 6 files changed, 33 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 4f5ef8a9a9a8..0264e43a1080 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1075,24 +1075,24 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) if (channel == 0) { if (page == 0) { /* SUB:0 and BPSK:0 -> BPSK-20 */ - lp->hw->phy->symbol_duration = 50; + lp->hw->phy->symbol_duration = 50 * NSEC_PER_USEC; } else { /* SUB:1 and BPSK:0 -> BPSK-40 */ - lp->hw->phy->symbol_duration = 25; + lp->hw->phy->symbol_duration = 25 * NSEC_PER_USEC; } } else { if (page == 0) /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */ - lp->hw->phy->symbol_duration = 40; + lp->hw->phy->symbol_duration = 40 * NSEC_PER_USEC; else /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */ - lp->hw->phy->symbol_duration = 16; + lp->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; } - lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD * - lp->hw->phy->symbol_duration; - lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD * - lp->hw->phy->symbol_duration; + lp->hw->phy->lifs_period = + (IEEE802154_LIFS_PERIOD * lp->hw->phy->symbol_duration) / 1000; + lp->hw->phy->sifs_period = + (IEEE802154_SIFS_PERIOD * lp->hw->phy->symbol_duration) / 1000; return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } @@ -1569,7 +1569,7 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->data = &at86rf231_data; lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 11; - lp->hw->phy->symbol_duration = 16; + lp->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; lp->hw->phy->supported.tx_powers = at86rf231_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers); lp->hw->phy->supported.cca_ed_levels = at86rf231_ed_levels; @@ -1582,7 +1582,7 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->supported.channels[0] = 0x00007FF; lp->hw->phy->supported.channels[2] = 0x00007FF; lp->hw->phy->current_channel = 5; - lp->hw->phy->symbol_duration = 25; + lp->hw->phy->symbol_duration = 25 * NSEC_PER_USEC; lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; lp->hw->phy->supported.tx_powers = at86rf212_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); @@ -1594,7 +1594,7 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->data = &at86rf233_data; lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 13; - lp->hw->phy->symbol_duration = 16; + lp->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; lp->hw->phy->supported.tx_powers = at86rf233_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers); lp->hw->phy->supported.cca_ed_levels = at86rf233_ed_levels; diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 07bafbf94680..abe27d1c51ca 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -625,24 +625,24 @@ static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) if (channel == 0) { if (page == 0) { /* SUB:0 and BPSK:0 -> BPSK-20 */ - lp->hw->phy->symbol_duration = 50; + lp->hw->phy->symbol_duration = 50 * NSEC_PER_USEC; } else { /* SUB:1 and BPSK:0 -> BPSK-40 */ - lp->hw->phy->symbol_duration = 25; + lp->hw->phy->symbol_duration = 25 * NSEC_PER_USEC; } } else { if (page == 0) /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */ - lp->hw->phy->symbol_duration = 40; + lp->hw->phy->symbol_duration = 40 * NSEC_PER_USEC; else /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */ - lp->hw->phy->symbol_duration = 16; + lp->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; } - lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD * - lp->hw->phy->symbol_duration; - lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD * - lp->hw->phy->symbol_duration; + lp->hw->phy->lifs_period = + (IEEE802154_LIFS_PERIOD * lp->hw->phy->symbol_duration) / 1000; + lp->hw->phy->sifs_period = + (IEEE802154_SIFS_PERIOD * lp->hw->phy->symbol_duration) / 1000; return atusb_write_subreg(lp, SR_CHANNEL, channel); } @@ -869,7 +869,7 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) chip = "AT86RF230"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ - atusb->hw->phy->symbol_duration = 16; + atusb->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; @@ -879,7 +879,7 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) chip = "AT86RF231"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ - atusb->hw->phy->symbol_duration = 16; + atusb->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; @@ -891,7 +891,7 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) atusb->hw->phy->supported.channels[0] = 0x00007FF; atusb->hw->phy->supported.channels[2] = 0x00007FF; atusb->hw->phy->current_channel = 5; - atusb->hw->phy->symbol_duration = 25; + atusb->hw->phy->symbol_duration = 25 * NSEC_PER_USEC; atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; atusb->hw->phy->supported.tx_powers = at86rf212_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 2bc730fd260e..e88cdbf6673b 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2974,7 +2974,7 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) ca8210_hw->phy->cca.mode = NL802154_CCA_ENERGY_CARRIER; ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; - ca8210_hw->phy->symbol_duration = 16; + ca8210_hw->phy->symbol_duration = 16 * NSEC_PER_USEC; ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 383231b85464..c925e629ddf3 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -975,9 +975,9 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) dev_dbg(printdev(lp), "%s\n", __func__); - phy->symbol_duration = 16; - phy->lifs_period = 40 * phy->symbol_duration; - phy->sifs_period = 12 * phy->symbol_duration; + phy->symbol_duration = 16 * NSEC_PER_USEC; + phy->lifs_period = (40 * phy->symbol_duration) / NSEC_PER_USEC; + phy->sifs_period = (12 * phy->symbol_duration) / NSEC_PER_USEC; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | @@ -1006,7 +1006,7 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) phy->current_page = 0; /* MCR20A default reset value */ phy->current_channel = 20; - phy->symbol_duration = 16; + phy->symbol_duration = 16 * NSEC_PER_USEC; phy->supported.tx_powers = mcr20a_powers; phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers); phy->cca_ed_level = phy->supported.cca_ed_levels[75]; diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 833672d6fbe4..92d4f178caca 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -203,8 +203,8 @@ struct wpan_phy { /* PHY depended MAC PIB values */ - /* 802.15.4 acronym: Tdsym in usec */ - u8 symbol_duration; + /* 802.15.4 acronym: Tdsym in nsec */ + u32 symbol_duration; /* lifs and sifs periods timing */ u16 lifs_period; u16 sifs_period; diff --git a/net/mac802154/main.c b/net/mac802154/main.c index 520cedc594e1..53153367f9d0 100644 --- a/net/mac802154/main.c +++ b/net/mac802154/main.c @@ -131,10 +131,10 @@ static void ieee802154_setup_wpan_phy_pib(struct wpan_phy *wpan_phy) * Should be done when all drivers sets this value. */ - wpan_phy->lifs_period = IEEE802154_LIFS_PERIOD * - wpan_phy->symbol_duration; - wpan_phy->sifs_period = IEEE802154_SIFS_PERIOD * - wpan_phy->symbol_duration; + wpan_phy->lifs_period = + (IEEE802154_LIFS_PERIOD * wpan_phy->symbol_duration) / 1000; + wpan_phy->sifs_period = + (IEEE802154_SIFS_PERIOD * wpan_phy->symbol_duration) / 1000; } int ieee802154_register_hw(struct ieee802154_hw *hw) -- cgit v1.2.3 From b8e508f42138b42b44f65679d02aad3c200aff68 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 1 Feb 2022 19:06:29 +0100 Subject: net: ieee802154: Drop duration settings when the core does it already The core now knows how to set the symbol duration in a few cases, when drivers correctly advertise the protocols used on each channel. For these drivers, there is no more need to bother with symbol duration, lifs and sifs periods so just drop the duplicated code. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220201180629.93410-5-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/at86rf230.c | 33 --------------------------------- drivers/net/ieee802154/atusb.c | 33 --------------------------------- drivers/net/ieee802154/ca8210.c | 3 --- drivers/net/ieee802154/mcr20a.c | 5 ----- 4 files changed, 74 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 0264e43a1080..563031ce76f0 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1064,36 +1064,6 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) if (rc < 0) return rc; - /* This sets the symbol_duration according frequency on the 212. - * TODO move this handling while set channel and page in cfg802154. - * We can do that, this timings are according 802.15.4 standard. - * If we do that in cfg802154, this is a more generic calculation. - * - * This should also protected from ifs_timer. Means cancel timer and - * init with a new value. For now, this is okay. - */ - if (channel == 0) { - if (page == 0) { - /* SUB:0 and BPSK:0 -> BPSK-20 */ - lp->hw->phy->symbol_duration = 50 * NSEC_PER_USEC; - } else { - /* SUB:1 and BPSK:0 -> BPSK-40 */ - lp->hw->phy->symbol_duration = 25 * NSEC_PER_USEC; - } - } else { - if (page == 0) - /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */ - lp->hw->phy->symbol_duration = 40 * NSEC_PER_USEC; - else - /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */ - lp->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; - } - - lp->hw->phy->lifs_period = - (IEEE802154_LIFS_PERIOD * lp->hw->phy->symbol_duration) / 1000; - lp->hw->phy->sifs_period = - (IEEE802154_SIFS_PERIOD * lp->hw->phy->symbol_duration) / 1000; - return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } @@ -1569,7 +1539,6 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->data = &at86rf231_data; lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 11; - lp->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; lp->hw->phy->supported.tx_powers = at86rf231_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers); lp->hw->phy->supported.cca_ed_levels = at86rf231_ed_levels; @@ -1582,7 +1551,6 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->supported.channels[0] = 0x00007FF; lp->hw->phy->supported.channels[2] = 0x00007FF; lp->hw->phy->current_channel = 5; - lp->hw->phy->symbol_duration = 25 * NSEC_PER_USEC; lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; lp->hw->phy->supported.tx_powers = at86rf212_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); @@ -1594,7 +1562,6 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->data = &at86rf233_data; lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 13; - lp->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; lp->hw->phy->supported.tx_powers = at86rf233_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers); lp->hw->phy->supported.cca_ed_levels = at86rf233_ed_levels; diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index abe27d1c51ca..9b41c7654f6b 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -614,36 +614,6 @@ static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) if (rc < 0) return rc; - /* This sets the symbol_duration according frequency on the 212. - * TODO move this handling while set channel and page in cfg802154. - * We can do that, this timings are according 802.15.4 standard. - * If we do that in cfg802154, this is a more generic calculation. - * - * This should also protected from ifs_timer. Means cancel timer and - * init with a new value. For now, this is okay. - */ - if (channel == 0) { - if (page == 0) { - /* SUB:0 and BPSK:0 -> BPSK-20 */ - lp->hw->phy->symbol_duration = 50 * NSEC_PER_USEC; - } else { - /* SUB:1 and BPSK:0 -> BPSK-40 */ - lp->hw->phy->symbol_duration = 25 * NSEC_PER_USEC; - } - } else { - if (page == 0) - /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */ - lp->hw->phy->symbol_duration = 40 * NSEC_PER_USEC; - else - /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */ - lp->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; - } - - lp->hw->phy->lifs_period = - (IEEE802154_LIFS_PERIOD * lp->hw->phy->symbol_duration) / 1000; - lp->hw->phy->sifs_period = - (IEEE802154_SIFS_PERIOD * lp->hw->phy->symbol_duration) / 1000; - return atusb_write_subreg(lp, SR_CHANNEL, channel); } @@ -869,7 +839,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) chip = "AT86RF230"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ - atusb->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; @@ -879,7 +848,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) chip = "AT86RF231"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ - atusb->hw->phy->symbol_duration = 16 * NSEC_PER_USEC; atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; @@ -891,7 +859,6 @@ static int atusb_get_and_conf_chip(struct atusb *atusb) atusb->hw->phy->supported.channels[0] = 0x00007FF; atusb->hw->phy->supported.channels[2] = 0x00007FF; atusb->hw->phy->current_channel = 5; - atusb->hw->phy->symbol_duration = 25 * NSEC_PER_USEC; atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; atusb->hw->phy->supported.tx_powers = at86rf212_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index e88cdbf6673b..fc74fa0f1ddd 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2974,9 +2974,6 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) ca8210_hw->phy->cca.mode = NL802154_CCA_ENERGY_CARRIER; ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; - ca8210_hw->phy->symbol_duration = 16 * NSEC_PER_USEC; - ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; - ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index c925e629ddf3..2b4ba3f9ecd7 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -975,10 +975,6 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) dev_dbg(printdev(lp), "%s\n", __func__); - phy->symbol_duration = 16 * NSEC_PER_USEC; - phy->lifs_period = (40 * phy->symbol_duration) / NSEC_PER_USEC; - phy->sifs_period = (12 * phy->symbol_duration) / NSEC_PER_USEC; - hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; @@ -1006,7 +1002,6 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) phy->current_page = 0; /* MCR20A default reset value */ phy->current_channel = 20; - phy->symbol_duration = 16 * NSEC_PER_USEC; phy->supported.tx_powers = mcr20a_powers; phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers); phy->cca_ed_level = phy->supported.cca_ed_levels[75]; -- cgit v1.2.3 From ba9177fcef21fa98406e73c472b5ac2eb4ec5f31 Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Mon, 14 Mar 2022 07:18:15 +0200 Subject: ath11k: Add basic WoW functionalities Implement basic WoW functionalities such as magic-packet, disconnect and pattern. The logic is very similar to ath10k. When WoW is configured, ath11k_core_suspend and ath11k_core_resume are skipped as WoW configuration and hif suspend/resume are done in ath11k_wow_op_suspend() and ath11k_wow_op_resume(). Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1 Signed-off-by: Carl Huang Signed-off-by: Baochen Qiang Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1644308006-22784-2-git-send-email-quic_cjhuang@quicinc.com --- drivers/net/wireless/ath/ath11k/Makefile | 4 +- drivers/net/wireless/ath/ath11k/core.c | 33 +++ drivers/net/wireless/ath/ath11k/core.h | 4 + drivers/net/wireless/ath/ath11k/htc.c | 6 + drivers/net/wireless/ath/ath11k/mac.c | 59 +++-- drivers/net/wireless/ath/ath11k/mac.h | 1 + drivers/net/wireless/ath/ath11k/wmi.c | 158 ++++++++++++ drivers/net/wireless/ath/ath11k/wmi.h | 76 +++++- drivers/net/wireless/ath/ath11k/wow.c | 414 +++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wow.h | 45 ++++ 10 files changed, 781 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index c1fce4159f1f..444c00c2bce2 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -16,14 +16,14 @@ ath11k-y += core.o \ ce.o \ peer.o \ dbring.o \ - hw.o \ - wow.o + hw.o ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o ath11k-$(CONFIG_ATH11K_TRACING) += trace.o ath11k-$(CONFIG_THERMAL) += thermal.o ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o +ath11k-$(CONFIG_PM) += wow.o obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o ath11k_ahb-y += ahb.o diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 71eb7d04c3bf..104149050205 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -428,13 +428,30 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { }, }; +static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab) +{ + WARN_ON(!ab->hw_params.single_pdev_only); + + return &ab->pdevs[0]; +} + int ath11k_core_suspend(struct ath11k_base *ab) { int ret; + struct ath11k_pdev *pdev; + struct ath11k *ar; if (!ab->hw_params.supports_suspend) return -EOPNOTSUPP; + /* so far single_pdev_only chips have supports_suspend as true + * and only the first pdev is valid. + */ + pdev = ath11k_core_get_single_pdev(ab); + ar = pdev->ar; + if (!ar || ar->state != ATH11K_STATE_OFF) + return 0; + /* TODO: there can frames in queues so for now add delay as a hack. * Need to implement to handle and remove this delay. */ @@ -447,6 +464,12 @@ int ath11k_core_suspend(struct ath11k_base *ab) return ret; } + ret = ath11k_mac_wait_tx_complete(ar); + if (ret) { + ath11k_warn(ab, "failed to wait tx complete: %d\n", ret); + return ret; + } + ret = ath11k_wow_enable(ab); if (ret) { ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret); @@ -479,10 +502,20 @@ EXPORT_SYMBOL(ath11k_core_suspend); int ath11k_core_resume(struct ath11k_base *ab) { int ret; + struct ath11k_pdev *pdev; + struct ath11k *ar; if (!ab->hw_params.supports_suspend) return -EOPNOTSUPP; + /* so far signle_pdev_only chips have supports_suspend as true + * and only the first pdev is valid. + */ + pdev = ath11k_core_get_single_pdev(ab); + ar = pdev->ar; + if (!ar || ar->state != ATH11K_STATE_OFF) + return 0; + ret = ath11k_hif_resume(ab); if (ret) { ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index c0228e91a596..7b658d4c7d19 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -23,6 +23,7 @@ #include "thermal.h" #include "dbring.h" #include "spectral.h" +#include "wow.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -590,6 +591,9 @@ struct ath11k { struct work_struct wmi_mgmt_tx_work; struct sk_buff_head wmi_mgmt_tx_queue; + struct ath11k_wow wow; + struct completion target_suspend; + bool target_suspend_ack; struct ath11k_per_peer_tx_stats peer_tx_stats; struct list_head ppdu_stats_info; u32 ppdu_stat_list_depth; diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c index 6913b7494b9b..069c29a4fac7 100644 --- a/drivers/net/wireless/ath/ath11k/htc.c +++ b/drivers/net/wireless/ath/ath11k/htc.c @@ -272,6 +272,11 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab, ep_tx_complete(htc->ab, skb); } +static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab) +{ + ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot wakeup from suspend is received\n"); +} + void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, struct sk_buff *skb) { @@ -376,6 +381,7 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, ath11k_htc_suspend_complete(ab, false); break; case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID: + ath11k_htc_wakeup_from_suspend(ab); break; default: ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n", diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index d5b83f90d27a..a58e56ca9a99 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -16,6 +16,8 @@ #include "testmode.h" #include "peer.h" #include "debugfs_sta.h" +#include "hif.h" +#include "wow.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ @@ -7258,31 +7260,47 @@ static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) return -EOPNOTSUPP; } -static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u32 queues, bool drop) +static int ath11k_mac_flush_tx_complete(struct ath11k *ar) { - struct ath11k *ar = hw->priv; long time_left; - - if (drop) - return; + int ret = 0; time_left = wait_event_timeout(ar->dp.tx_empty_waitq, (atomic_read(&ar->dp.num_tx_pending) == 0), ATH11K_FLUSH_TIMEOUT); - if (time_left == 0) - ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left); + if (time_left == 0) { + ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n", + atomic_read(&ar->dp.num_tx_pending)); + ret = -ETIMEDOUT; + } time_left = wait_event_timeout(ar->txmgmt_empty_waitq, (atomic_read(&ar->num_pending_mgmt_tx) == 0), ATH11K_FLUSH_TIMEOUT); - if (time_left == 0) - ath11k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n", - time_left); + if (time_left == 0) { + ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n", + atomic_read(&ar->num_pending_mgmt_tx)); + ret = -ETIMEDOUT; + } - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, - "mac mgmt tx flush mgmt pending %d\n", - atomic_read(&ar->num_pending_mgmt_tx)); + return ret; +} + +int ath11k_mac_wait_tx_complete(struct ath11k *ar) +{ + ath11k_mac_drain_tx(ar); + return ath11k_mac_flush_tx_complete(ar); +} + +static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct ath11k *ar = hw->priv; + + if (drop) + return; + + ath11k_mac_flush_tx_complete(ar); } static int @@ -8104,6 +8122,13 @@ static const struct ieee80211_ops ath11k_ops = { .flush = ath11k_mac_op_flush, .sta_statistics = ath11k_mac_op_sta_statistics, CFG80211_TESTMODE_CMD(ath11k_tm_cmd) + +#ifdef CONFIG_PM + .suspend = ath11k_wow_op_suspend, + .resume = ath11k_wow_op_resume, + .set_wakeup = ath11k_wow_op_set_wakeup, +#endif + #ifdef CONFIG_ATH11K_DEBUGFS .sta_add_debugfs = ath11k_debugfs_sta_op_add, #endif @@ -8473,6 +8498,12 @@ static int __ath11k_mac_register(struct ath11k *ar) NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; } + ret = ath11k_wow_init(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to init wow: %d\n", ret); + goto err_free_if_combs; + } + ar->hw->queues = ATH11K_HW_MAX_QUEUES; ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN; ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1; diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 0e6c870b09c8..045bf4fe1706 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -172,4 +172,5 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher); void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb); void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id); void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif); +int ath11k_mac_wait_tx_complete(struct ath11k *ar); #endif diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index b4f86c45d81f..25222293f162 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -8235,3 +8235,161 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID); } + +int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct wmi_wow_add_del_event_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->is_add = enable; + cmd->event_bitmap = (1 << event); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", + wow_wakeup_event(event), enable, vdev_id); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); +} + +int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset) +{ + struct wmi_wow_add_pattern_cmd *cmd; + struct wmi_wow_bitmap_pattern *bitmap; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *ptr; + size_t len; + + len = sizeof(*cmd) + + sizeof(*tlv) + /* array struct */ + sizeof(*bitmap) + /* bitmap */ + sizeof(*tlv) + /* empty ipv4 sync */ + sizeof(*tlv) + /* empty ipv6 sync */ + sizeof(*tlv) + /* empty magic */ + sizeof(*tlv) + /* empty info timeout */ + sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + /* cmd */ + ptr = (u8 *)skb->data; + cmd = (struct wmi_wow_add_pattern_cmd *)ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_ADD_PATTERN_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + ptr += sizeof(*cmd); + + /* bitmap */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap)); + + ptr += sizeof(*tlv); + + bitmap = (struct wmi_wow_bitmap_pattern *)ptr; + bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_BITMAP_PATTERN_T) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE); + + memcpy(bitmap->patternbuf, pattern, pattern_len); + ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4)); + memcpy(bitmap->bitmaskbuf, mask, pattern_len); + ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4)); + bitmap->pattern_offset = pattern_offset; + bitmap->pattern_len = pattern_len; + bitmap->bitmask_len = pattern_len; + bitmap->pattern_id = pattern_id; + + ptr += sizeof(*bitmap); + + /* ipv4 sync */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* ipv6 sync */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* magic */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* pattern info timeout */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, 0); + + ptr += sizeof(*tlv); + + /* ratelimit interval */ + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, sizeof(u32)); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n", + vdev_id, pattern_id, pattern_offset); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); +} + +int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id) +{ + struct wmi_wow_del_pattern_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_DEL_PATTERN_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->pattern_id = pattern_id; + cmd->pattern_type = WOW_BITMAP_PATTERN; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", + vdev_id, pattern_id); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); +} diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 587f42307250..b82f432a3d95 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -5534,6 +5534,45 @@ static inline const char *wow_reason(enum wmi_wow_wake_reason reason) #undef C2S +struct wmi_wow_ev_arg { + u32 vdev_id; + u32 flag; + enum wmi_wow_wake_reason wake_reason; + u32 data_len; +}; + +enum wmi_tlv_pattern_type { + WOW_PATTERN_MIN = 0, + WOW_BITMAP_PATTERN = WOW_PATTERN_MIN, + WOW_IPV4_SYNC_PATTERN, + WOW_IPV6_SYNC_PATTERN, + WOW_WILD_CARD_PATTERN, + WOW_TIMER_PATTERN, + WOW_MAGIC_PATTERN, + WOW_IPV6_RA_PATTERN, + WOW_IOAC_PKT_PATTERN, + WOW_IOAC_TMR_PATTERN, + WOW_PATTERN_MAX +}; + +#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148 +#define WOW_DEFAULT_BITMASK_SIZE 148 + +#define WOW_MIN_PATTERN_SIZE 1 +#define WOW_MAX_PATTERN_SIZE 148 +#define WOW_MAX_PKT_OFFSET 128 +#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \ + sizeof(struct rfc1042_hdr)) +#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \ + offsetof(struct ieee80211_hdr_3addr, addr1)) + +struct wmi_wow_add_del_event_cmd { + u32 tlv_header; + u32 vdev_id; + u32 is_add; + u32 event_bitmap; +} __packed; + struct wmi_wow_enable_cmd { u32 tlv_header; u32 enable; @@ -5546,12 +5585,36 @@ struct wmi_wow_host_wakeup_ind { u32 reserved; } __packed; -struct wmi_wow_ev_arg { +struct wmi_tlv_wow_event_info { u32 vdev_id; u32 flag; - enum wmi_wow_wake_reason wake_reason; + u32 wake_reason; u32 data_len; -}; +} __packed; + +struct wmi_wow_bitmap_pattern { + u32 tlv_header; + u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE]; + u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE]; + u32 pattern_offset; + u32 pattern_len; + u32 bitmask_len; + u32 pattern_id; +} __packed; + +struct wmi_wow_add_pattern_cmd { + u32 tlv_header; + u32 vdev_id; + u32 pattern_id; + u32 pattern_type; +} __packed; + +struct wmi_wow_del_pattern_cmd { + u32 tlv_header; + u32 vdev_id; + u32 pattern_id; + u32 pattern_type; +} __packed; int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id); @@ -5714,4 +5777,11 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, const u8 mac_addr[ETH_ALEN]); int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap, struct ath11k_fw_dbglog *dbglog); +int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id); +int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset); +int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable); #endif diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 43c62e99dd0e..837dab469ab2 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -6,12 +6,22 @@ #include #include "mac.h" + +#include #include "core.h" #include "hif.h" #include "debug.h" #include "wmi.h" #include "wow.h" +static const struct wiphy_wowlan_support ath11k_wowlan_support = { + .flags = WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_MAGIC_PKT, + .pattern_min_len = WOW_MIN_PATTERN_SIZE, + .pattern_max_len = WOW_MAX_PATTERN_SIZE, + .max_pkt_offset = WOW_MAX_PKT_OFFSET, +}; + int ath11k_wow_enable(struct ath11k_base *ab) { struct ath11k *ar = ath11k_ab_to_ar(ab, 0); @@ -71,3 +81,407 @@ int ath11k_wow_wakeup(struct ath11k_base *ab) return 0; } + +static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + int i, ret; + + for (i = 0; i < WOW_EVENT_MAX; i++) { + ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0); + if (ret) { + ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + for (i = 0; i < ar->wow.max_num_patterns; i++) { + ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i); + if (ret) { + ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n", + i, arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath11k_wow_cleanup(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath11k_wow_vif_cleanup(arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +/* Convert a 802.3 format to a 802.11 format. + * +------------+-----------+--------+----------------+ + * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | + * +------------+-----------+--------+----------------+ + * |__ |_______ |____________ |________ + * | | | | + * +--+------------+----+-----------+---------------+-----------+ + * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | + * +--+------------+----+-----------+---------------+-----------+ + */ +static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, + const struct cfg80211_pkt_pattern *old) +{ + u8 hdr_8023_pattern[ETH_HLEN] = {}; + u8 hdr_8023_bit_mask[ETH_HLEN] = {}; + u8 hdr_80211_pattern[WOW_HDR_LEN] = {}; + u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {}; + + int total_len = old->pkt_offset + old->pattern_len; + int hdr_80211_end_offset; + + struct ieee80211_hdr_3addr *new_hdr_pattern = + (struct ieee80211_hdr_3addr *)hdr_80211_pattern; + struct ieee80211_hdr_3addr *new_hdr_mask = + (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask; + struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern; + struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask; + int hdr_len = sizeof(*new_hdr_pattern); + + struct rfc1042_hdr *new_rfc_pattern = + (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len); + struct rfc1042_hdr *new_rfc_mask = + (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len); + int rfc_len = sizeof(*new_rfc_pattern); + + memcpy(hdr_8023_pattern + old->pkt_offset, + old->pattern, ETH_HLEN - old->pkt_offset); + memcpy(hdr_8023_bit_mask + old->pkt_offset, + old->mask, ETH_HLEN - old->pkt_offset); + + /* Copy destination address */ + memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN); + memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN); + + /* Copy source address */ + memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN); + memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN); + + /* Copy logic link type */ + memcpy(&new_rfc_pattern->snap_type, + &old_hdr_pattern->h_proto, + sizeof(old_hdr_pattern->h_proto)); + memcpy(&new_rfc_mask->snap_type, + &old_hdr_mask->h_proto, + sizeof(old_hdr_mask->h_proto)); + + /* Compute new pkt_offset */ + if (old->pkt_offset < ETH_ALEN) + new->pkt_offset = old->pkt_offset + + offsetof(struct ieee80211_hdr_3addr, addr1); + else if (old->pkt_offset < offsetof(struct ethhdr, h_proto)) + new->pkt_offset = old->pkt_offset + + offsetof(struct ieee80211_hdr_3addr, addr3) - + offsetof(struct ethhdr, h_source); + else + new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN; + + /* Compute new hdr end offset */ + if (total_len > ETH_HLEN) + hdr_80211_end_offset = hdr_len + rfc_len; + else if (total_len > offsetof(struct ethhdr, h_proto)) + hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN; + else if (total_len > ETH_ALEN) + hdr_80211_end_offset = total_len - ETH_ALEN + + offsetof(struct ieee80211_hdr_3addr, addr3); + else + hdr_80211_end_offset = total_len + + offsetof(struct ieee80211_hdr_3addr, addr1); + + new->pattern_len = hdr_80211_end_offset - new->pkt_offset; + + memcpy((u8 *)new->pattern, + hdr_80211_pattern + new->pkt_offset, + new->pattern_len); + memcpy((u8 *)new->mask, + hdr_80211_bit_mask + new->pkt_offset, + new->pattern_len); + + if (total_len > ETH_HLEN) { + /* Copy frame body */ + memcpy((u8 *)new->pattern + new->pattern_len, + (void *)old->pattern + ETH_HLEN - old->pkt_offset, + total_len - ETH_HLEN); + memcpy((u8 *)new->mask + new->pattern_len, + (void *)old->mask + ETH_HLEN - old->pkt_offset, + total_len - ETH_HLEN); + + new->pattern_len += total_len - ETH_HLEN; + } +} + +static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif, + struct cfg80211_wowlan *wowlan) +{ + int ret, i; + unsigned long wow_mask = 0; + struct ath11k *ar = arvif->ar; + const struct cfg80211_pkt_pattern *patterns = wowlan->patterns; + int pattern_id = 0; + + /* Setup requested WOW features */ + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_IBSS: + __set_bit(WOW_BEACON_EVENT, &wow_mask); + fallthrough; + case WMI_VDEV_TYPE_AP: + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask); + __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask); + __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask); + __set_bit(WOW_HTT_EVENT, &wow_mask); + __set_bit(WOW_RA_MATCH_EVENT, &wow_mask); + break; + case WMI_VDEV_TYPE_STA: + if (wowlan->disconnect) { + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_BMISS_EVENT, &wow_mask); + __set_bit(WOW_CSA_IE_EVENT, &wow_mask); + } + + if (wowlan->magic_pkt) + __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); + break; + default: + break; + } + + for (i = 0; i < wowlan->n_patterns; i++) { + u8 bitmask[WOW_MAX_PATTERN_SIZE] = {}; + u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {}; + u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {}; + struct cfg80211_pkt_pattern new_pattern = {}; + struct cfg80211_pkt_pattern old_pattern = patterns[i]; + int j; + + new_pattern.pattern = ath_pattern; + new_pattern.mask = ath_bitmask; + if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE) + continue; + /* convert bytemask to bitmask */ + for (j = 0; j < patterns[i].pattern_len; j++) + if (patterns[i].mask[j / 8] & BIT(j % 8)) + bitmask[j] = 0xff; + old_pattern.mask = bitmask; + + if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode == + ATH11K_HW_TXRX_NATIVE_WIFI) { + if (patterns[i].pkt_offset < ETH_HLEN) { + u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {}; + + memcpy(pattern_ext, old_pattern.pattern, + old_pattern.pattern_len); + old_pattern.pattern = pattern_ext; + ath11k_wow_convert_8023_to_80211(&new_pattern, + &old_pattern); + } else { + new_pattern = old_pattern; + new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN; + } + } + + if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; + + ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id, + pattern_id, + new_pattern.pattern, + new_pattern.mask, + new_pattern.pattern_len, + new_pattern.pkt_offset); + if (ret) { + ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n", + pattern_id, + arvif->vdev_id, ret); + return ret; + } + + pattern_id++; + __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask); + } + + for (i = 0; i < WOW_EVENT_MAX; i++) { + if (!test_bit(i, &wow_mask)) + continue; + ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1); + if (ret) { + ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath11k_wow_set_wakeups(struct ath11k *ar, + struct cfg80211_wowlan *wowlan) +{ + struct ath11k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath11k_vif_wow_set_wakeups(arvif, wowlan); + if (ret) { + ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +int ath11k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct ath11k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + ret = ath11k_wow_cleanup(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n", + ret); + goto exit; + } + + ret = ath11k_wow_set_wakeups(ar, wowlan); + if (ret) { + ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n", + ret); + goto cleanup; + } + + ret = ath11k_mac_wait_tx_complete(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret); + goto cleanup; + } + + ret = ath11k_wow_enable(ar->ab); + if (ret) { + ath11k_warn(ar->ab, "failed to start wow: %d\n", ret); + goto cleanup; + } + + ath11k_ce_stop_shadow_timers(ar->ab); + ath11k_dp_stop_shadow_timers(ar->ab); + + ath11k_hif_irq_disable(ar->ab); + ath11k_hif_ce_irq_disable(ar->ab); + + ret = ath11k_hif_suspend(ar->ab); + if (ret) { + ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret); + goto wakeup; + } + + goto exit; + +wakeup: + ath11k_wow_wakeup(ar->ab); + +cleanup: + ath11k_wow_cleanup(ar); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret ? 1 : 0; +} + +void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct ath11k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + device_set_wakeup_enable(ar->ab->dev, enabled); + mutex_unlock(&ar->conf_mutex); +} + +int ath11k_wow_op_resume(struct ieee80211_hw *hw) +{ + struct ath11k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + ret = ath11k_hif_resume(ar->ab); + if (ret) { + ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret); + goto exit; + } + + ath11k_hif_ce_irq_enable(ar->ab); + ath11k_hif_irq_enable(ar->ab); + + ret = ath11k_wow_wakeup(ar->ab); + if (ret) + ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret); + +exit: + if (ret) { + switch (ar->state) { + case ATH11K_STATE_ON: + ar->state = ATH11K_STATE_RESTARTING; + ret = 1; + break; + case ATH11K_STATE_OFF: + case ATH11K_STATE_RESTARTING: + case ATH11K_STATE_RESTARTED: + case ATH11K_STATE_WEDGED: + ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n", + ar->state); + ret = -EIO; + break; + } + } + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +int ath11k_wow_init(struct ath11k *ar) +{ + if (WARN_ON(!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))) + return -EINVAL; + + ar->wow.wowlan_support = ath11k_wowlan_support; + + if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode == + ATH11K_HW_TXRX_NATIVE_WIFI) { + ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; + ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; + } + + ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS; + ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; + ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; + + device_set_wakeup_capable(ar->ab->dev, true); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath11k/wow.h b/drivers/net/wireless/ath/ath11k/wow.h index dabc4ee63cf6..553ba850d910 100644 --- a/drivers/net/wireless/ath/ath11k/wow.h +++ b/drivers/net/wireless/ath/ath11k/wow.h @@ -3,8 +3,53 @@ * Copyright (c) 2020 The Linux Foundation. All rights reserved. */ +#ifndef _WOW_H_ +#define _WOW_H_ + +struct ath11k_wow { + u32 max_num_patterns; + struct completion wakeup_completed; + struct wiphy_wowlan_support wowlan_support; +}; + +struct rfc1042_hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + __be16 snap_type; +} __packed; + #define ATH11K_WOW_RETRY_NUM 3 #define ATH11K_WOW_RETRY_WAIT_MS 200 +#define ATH11K_WOW_PATTERNS 22 +#ifdef CONFIG_PM + +int ath11k_wow_init(struct ath11k *ar); +int ath11k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); +int ath11k_wow_op_resume(struct ieee80211_hw *hw); +void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); int ath11k_wow_enable(struct ath11k_base *ab); int ath11k_wow_wakeup(struct ath11k_base *ab); + +#else + +static inline int ath11k_wow_init(struct ath11k *ar) +{ + return 0; +} + +static inline int ath11k_wow_enable(struct ath11k_base *ab) +{ + return 0; +} + +static inline int ath11k_wow_wakeup(struct ath11k_base *ab) +{ + return 0; +} + +#endif /* CONFIG_PM */ +#endif /* _WOW_H_ */ -- cgit v1.2.3 From fec4b898f369a9b9d516f7bfc459eb4a8c5ceb2c Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Mon, 14 Mar 2022 07:18:16 +0200 Subject: ath11k: Add WoW net-detect functionality Implement net-detect feature by setting flag WIPHY_WOWLAN_NET_DETECT if firmware supports this feature. Driver sets the related PNO configuration to firmware before entering WoW and firmware then scans periodically and wakes up host if a specific SSID is found. Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1 Signed-off-by: Carl Huang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1644308006-22784-3-git-send-email-quic_cjhuang@quicinc.com --- drivers/net/wireless/ath/ath11k/core.h | 1 + drivers/net/wireless/ath/ath11k/mac.c | 12 +++ drivers/net/wireless/ath/ath11k/wmi.c | 154 +++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.h | 169 +++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wow.c | 175 ++++++++++++++++++++++++++++++++- 5 files changed, 510 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 7b658d4c7d19..1f5e977aa976 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -617,6 +617,7 @@ struct ath11k { bool regdom_set_by_user; int hw_rate_code; u8 twt_enabled; + bool nlo_enabled; }; struct ath11k_band_cap { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index a58e56ca9a99..7a6c758b8e45 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -8498,6 +8498,18 @@ static int __ath11k_mac_register(struct ath11k *ar) NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; } + if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { + ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; + ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; + ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; + ar->hw->wiphy->max_sched_scan_plan_interval = + WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; + ar->hw->wiphy->max_sched_scan_plan_iterations = + WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; + ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; + } + ret = ath11k_wow_init(ar); if (ret) { ath11k_warn(ar->ab, "failed to init wow: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 25222293f162..d4d34b932e0b 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -8393,3 +8393,157 @@ int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id) return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); } + +static struct sk_buff * +ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar, + u32 vdev_id, + struct wmi_pno_scan_req *pno) +{ + struct nlo_configured_parameters *nlo_list; + struct wmi_wow_nlo_config_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u32 *channel_list; + size_t len, nlo_list_len, channel_list_len; + u8 *ptr; + u32 i; + + len = sizeof(*cmd) + + sizeof(*tlv) + + /* TLV place holder for array of structures + * nlo_configured_parameters(nlo_list) + */ + sizeof(*tlv); + /* TLV place holder for array of uint32 channel_list */ + + channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; + len += channel_list_len; + + nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; + len += nlo_list_len; + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = (u8 *)skb->data; + cmd = (struct wmi_wow_nlo_config_cmd *)ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = pno->vdev_id; + cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN; + + /* current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = pno->active_max_time; + cmd->passive_dwell_time = pno->passive_max_time; + + if (pno->do_passive_scan) + cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE; + + cmd->fast_scan_period = pno->fast_scan_period; + cmd->slow_scan_period = pno->slow_scan_period; + cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles; + cmd->delay_start_time = pno->delay_start_time; + + if (pno->enable_pno_scan_randomization) { + cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ; + ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); + ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); + ath11k_ce_byte_swap(cmd->mac_addr.addr, 8); + ath11k_ce_byte_swap(cmd->mac_mask.addr, 8); + } + + ptr += sizeof(*cmd); + + /* nlo_configured_parameters(nlo_list) */ + cmd->no_of_ssids = pno->uc_networks_count; + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, nlo_list_len); + + ptr += sizeof(*tlv); + nlo_list = (struct nlo_configured_parameters *)ptr; + for (i = 0; i < cmd->no_of_ssids; i++) { + tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv)); + + nlo_list[i].ssid.valid = true; + nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; + memcpy(nlo_list[i].ssid.ssid.ssid, + pno->a_networks[i].ssid.ssid, + nlo_list[i].ssid.ssid.ssid_len); + ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid, + roundup(nlo_list[i].ssid.ssid.ssid_len, 4)); + + if (pno->a_networks[i].rssi_threshold && + pno->a_networks[i].rssi_threshold > -300) { + nlo_list[i].rssi_cond.valid = true; + nlo_list[i].rssi_cond.rssi = + pno->a_networks[i].rssi_threshold; + } + + nlo_list[i].bcast_nw_type.valid = true; + nlo_list[i].bcast_nw_type.bcast_nw_type = + pno->a_networks[i].bcast_nw_type; + } + + ptr += nlo_list_len; + cmd->num_of_channels = pno->a_networks[0].channel_count; + tlv = (struct wmi_tlv *)ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | + FIELD_PREP(WMI_TLV_LEN, channel_list_len); + ptr += sizeof(*tlv); + channel_list = (u32 *)ptr; + for (i = 0; i < cmd->num_of_channels; i++) + channel_list[i] = pno->a_networks[0].channels[i]; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", + vdev_id); + + return skb; +} + +static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar, + u32 vdev_id) +{ + struct wmi_wow_nlo_config_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->flags = WMI_NLO_CONFIG_STOP; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi tlv stop pno config vdev_id %d\n", vdev_id); + return skb; +} + +int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id, + struct wmi_pno_scan_req *pno_scan) +{ + struct sk_buff *skb; + + if (pno_scan->enable) + skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); + else + skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id); + + if (IS_ERR_OR_NULL(skb)) + return -ENOMEM; + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); +} + diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index b82f432a3d95..9150e0555c6d 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -5616,6 +5616,173 @@ struct wmi_wow_del_pattern_cmd { u32 pattern_type; } __packed; +#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100 +#define WMI_PNO_MAX_NETW_CHANNELS 26 +#define WMI_PNO_MAX_NETW_CHANNELS_EX 60 +#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID +#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN + +/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */ +#define WMI_PNO_MAX_PB_REQ_SIZE 450 + +#define WMI_PNO_24G_DEFAULT_CH 1 +#define WMI_PNO_5G_DEFAULT_CH 36 + +#define WMI_ACTIVE_MAX_CHANNEL_TIME 40 +#define WMI_PASSIVE_MAX_CHANNEL_TIME 110 + +/* SSID broadcast type */ +enum wmi_ssid_bcast_type { + BCAST_UNKNOWN = 0, + BCAST_NORMAL = 1, + BCAST_HIDDEN = 2, +}; + +#define WMI_NLO_MAX_SSIDS 16 +#define WMI_NLO_MAX_CHAN 48 + +#define WMI_NLO_CONFIG_STOP BIT(0) +#define WMI_NLO_CONFIG_START BIT(1) +#define WMI_NLO_CONFIG_RESET BIT(2) +#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4) +#define WMI_NLO_CONFIG_FAST_SCAN BIT(5) +#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6) + +/* This bit is used to indicate if EPNO or supplicant PNO is enabled. + * Only one of them can be enabled at a given time + */ +#define WMI_NLO_CONFIG_ENLO BIT(7) +#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8) +#define WMI_NLO_CONFIG_ENLO_RESET BIT(9) +#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10) +#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11) +#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12) +#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13) + +struct wmi_nlo_ssid_param { + u32 valid; + struct wmi_ssid ssid; +} __packed; + +struct wmi_nlo_enc_param { + u32 valid; + u32 enc_type; +} __packed; + +struct wmi_nlo_auth_param { + u32 valid; + u32 auth_type; +} __packed; + +struct wmi_nlo_bcast_nw_param { + u32 valid; + u32 bcast_nw_type; +} __packed; + +struct wmi_nlo_rssi_param { + u32 valid; + s32 rssi; +} __packed; + +struct nlo_configured_parameters { + /* TLV tag and len;*/ + u32 tlv_header; + struct wmi_nlo_ssid_param ssid; + struct wmi_nlo_enc_param enc_type; + struct wmi_nlo_auth_param auth_type; + struct wmi_nlo_rssi_param rssi_cond; + + /* indicates if the SSID is hidden or not */ + struct wmi_nlo_bcast_nw_param bcast_nw_type; +} __packed; + +struct wmi_network_type { + struct wmi_ssid ssid; + u32 authentication; + u32 encryption; + u32 bcast_nw_type; + u8 channel_count; + u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX]; + s32 rssi_threshold; +}; + +struct wmi_pno_scan_req { + u8 enable; + u8 vdev_id; + u8 uc_networks_count; + struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS]; + u32 fast_scan_period; + u32 slow_scan_period; + u8 fast_scan_max_cycles; + + bool do_passive_scan; + + u32 delay_start_time; + u32 active_min_time; + u32 active_max_time; + u32 passive_min_time; + u32 passive_max_time; + + /* mac address randomization attributes */ + u32 enable_pno_scan_randomization; + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; +}; + +struct wmi_wow_nlo_config_cmd { + u32 tlv_header; + u32 flags; + u32 vdev_id; + u32 fast_scan_max_cycles; + u32 active_dwell_time; + u32 passive_dwell_time; + u32 probe_bundle_size; + + /* ART = IRT */ + u32 rest_time; + + /* Max value that can be reached after SBM */ + u32 max_rest_time; + + /* SBM */ + u32 scan_backoff_multiplier; + + /* SCBM */ + u32 fast_scan_period; + + /* specific to windows */ + u32 slow_scan_period; + + u32 no_of_ssids; + + u32 num_of_channels; + + /* NLO scan start delay time in milliseconds */ + u32 delay_start_time; + + /* MAC Address to use in Probe Req as SA */ + struct wmi_mac_addr mac_addr; + + /* Mask on which MAC has to be randomized */ + struct wmi_mac_addr mac_mask; + + /* IE bitmap to use in Probe Req */ + u32 ie_bitmap[8]; + + /* Number of vendor OUIs. In the TLV vendor_oui[] */ + u32 num_vendor_oui; + + /* Number of connected NLO band preferences */ + u32 num_cnlo_band_pref; + + /* The TLVs will follow. + * nlo_configured_parameters nlo_list[]; + * u32 channel_list[num_of_channels]; + */ +} __packed; + int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id); struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); @@ -5777,6 +5944,8 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, const u8 mac_addr[ETH_ALEN]); int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap, struct ath11k_fw_dbglog *dbglog); +int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id, + struct wmi_pno_scan_req *pno_scan); int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id); int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, const u8 *pattern, const u8 *mask, diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 837dab469ab2..5f3aa0b7eb9b 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -228,6 +228,101 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, } } +static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id, + struct cfg80211_sched_scan_request *nd_config, + struct wmi_pno_scan_req *pno) +{ + int i, j; + u8 ssid_len; + + pno->enable = 1; + pno->vdev_id = vdev_id; + pno->uc_networks_count = nd_config->n_match_sets; + + if (!pno->uc_networks_count || + pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS) + return -EINVAL; + + if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX) + return -EINVAL; + + /* Filling per profile params */ + for (i = 0; i < pno->uc_networks_count; i++) { + ssid_len = nd_config->match_sets[i].ssid.ssid_len; + + if (ssid_len == 0 || ssid_len > 32) + return -EINVAL; + + pno->a_networks[i].ssid.ssid_len = ssid_len; + + memcpy(pno->a_networks[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid_len); + pno->a_networks[i].authentication = 0; + pno->a_networks[i].encryption = 0; + pno->a_networks[i].bcast_nw_type = 0; + + /* Copying list of valid channel into request */ + pno->a_networks[i].channel_count = nd_config->n_channels; + pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold; + + for (j = 0; j < nd_config->n_channels; j++) { + pno->a_networks[i].channels[j] = + nd_config->channels[j]->center_freq; + } + } + + /* set scan to passive if no SSIDs are specified in the request */ + if (nd_config->n_ssids == 0) + pno->do_passive_scan = true; + else + pno->do_passive_scan = false; + + for (i = 0; i < nd_config->n_ssids; i++) { + j = 0; + while (j < pno->uc_networks_count) { + if (pno->a_networks[j].ssid.ssid_len == + nd_config->ssids[i].ssid_len && + (memcmp(pno->a_networks[j].ssid.ssid, + nd_config->ssids[i].ssid, + pno->a_networks[j].ssid.ssid_len) == 0)) { + pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN; + break; + } + j++; + } + } + + if (nd_config->n_scan_plans == 2) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations; + pno->slow_scan_period = + nd_config->scan_plans[1].interval * MSEC_PER_SEC; + } else if (nd_config->n_scan_plans == 1) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = 1; + pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + } else { + ath11k_warn(ar->ab, "Invalid number of scan plans %d !!", + nd_config->n_scan_plans); + } + + if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + /* enable mac randomization */ + pno->enable_pno_scan_randomization = 1; + memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN); + memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN); + } + + pno->delay_start_time = nd_config->delay; + + /* Current FW does not support min-max range for dwell time */ + pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME; + pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME; + + return 0; +} + static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif, struct cfg80211_wowlan *wowlan) { @@ -261,6 +356,26 @@ static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif, if (wowlan->magic_pkt) __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); + + if (wowlan->nd_config) { + struct wmi_pno_scan_req *pno; + int ret; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + ar->nlo_enabled = true; + + ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id, + wowlan->nd_config, pno); + if (!ret) { + ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); + __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask); + } + + kfree(pno); + } break; default: break; @@ -354,6 +469,51 @@ static int ath11k_wow_set_wakeups(struct ath11k *ar, return 0; } +static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif) +{ + int ret = 0; + struct ath11k *ar = arvif->ar; + + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_STA: + if (ar->nlo_enabled) { + struct wmi_pno_scan_req *pno; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + pno->enable = 0; + ar->nlo_enabled = false; + ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); + kfree(pno); + } + break; + default: + break; + } + return ret; +} + +static int ath11k_wow_nlo_cleanup(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath11k_vif_wow_clean_nlo(arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + int ath11k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { @@ -439,8 +599,16 @@ int ath11k_wow_op_resume(struct ieee80211_hw *hw) ath11k_hif_irq_enable(ar->ab); ret = ath11k_wow_wakeup(ar->ab); - if (ret) + if (ret) { ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret); + goto exit; + } + + ret = ath11k_wow_nlo_cleanup(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret); + goto exit; + } exit: if (ret) { @@ -477,6 +645,11 @@ int ath11k_wow_init(struct ath11k *ar) ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; } + if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { + ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; + ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + } + ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS; ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; -- cgit v1.2.3 From c417b247ba042161ddfb34f26a42ec67edc5c378 Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Mon, 14 Mar 2022 07:18:16 +0200 Subject: ath11k: implement hardware data filter Host needs to set hardware data filter before entering WoW to let firmware drop needless broadcast/mulitcast frames to avoid frequent wakeup. Host clears hardware data filter when leaving WoW. Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1 Signed-off-by: Carl Huang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1644308006-22784-4-git-send-email-quic_cjhuang@quicinc.com --- drivers/net/wireless/ath/ath11k/wmi.c | 33 ++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.h | 15 +++++++++ drivers/net/wireless/ath/ath11k/wow.c | 57 +++++++++++++++++++++++++++++++++++ 3 files changed, 105 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index d4d34b932e0b..62b13f5cb8b4 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -8165,6 +8165,39 @@ void ath11k_wmi_detach(struct ath11k_base *ab) ath11k_wmi_free_dbring_caps(ab); } +int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, + u32 filter_bitmap, bool enable) +{ + struct wmi_hw_data_filter_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_hw_data_filter_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->enable = enable; + + /* Set all modes in case of disable */ + if (cmd->enable) + cmd->hw_filter_bitmap = filter_bitmap; + else + cmd->hw_filter_bitmap = ((u32)~0U); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi hw data filter enable %d filter_bitmap 0x%x\n", + enable, filter_bitmap); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); +} + int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) { struct wmi_wow_host_wakeup_ind *cmd; diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 9150e0555c6d..17e50987e115 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -5390,6 +5390,19 @@ struct ath11k_wmi_base { struct ath11k_targ_cap *targ_cap; }; +/* Definition of HW data filtering */ +enum hw_data_filter_type { + WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0), + WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1), +}; + +struct wmi_hw_data_filter_cmd { + u32 tlv_header; + u32 vdev_id; + u32 enable; + u32 hw_filter_bitmap; +} __packed; + /* WOW structures */ enum wmi_wow_wakeup_event { WOW_BMISS_EVENT = 0, @@ -5953,4 +5966,6 @@ int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, enum wmi_wow_wakeup_event event, u32 enable); +int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, + u32 filter_bitmap, bool enable); #endif diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 5f3aa0b7eb9b..50dd50944edd 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -514,6 +514,50 @@ static int ath11k_wow_nlo_cleanup(struct ath11k *ar) return 0; } +static int ath11k_wow_set_hw_filter(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + u32 bitmap; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC | + WMI_HW_DATA_FILTER_DROP_NON_ARP_BC; + ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, + bitmap, + true); + if (ret) { + ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath11k_wow_clear_hw_filter(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false); + + if (ret) { + ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + int ath11k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { @@ -542,6 +586,13 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw, goto cleanup; } + ret = ath11k_wow_set_hw_filter(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to set hw filter: %d\n", + ret); + goto cleanup; + } + ret = ath11k_wow_enable(ar->ab); if (ret) { ath11k_warn(ar->ab, "failed to start wow: %d\n", ret); @@ -610,6 +661,12 @@ int ath11k_wow_op_resume(struct ieee80211_hw *hw) goto exit; } + ret = ath11k_wow_clear_hw_filter(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret); + goto exit; + } + exit: if (ret) { switch (ar->state) { -- cgit v1.2.3 From 90bf5c8d0f7ecddf96fc1cd9434af4e157b51970 Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Mon, 14 Mar 2022 07:18:16 +0200 Subject: ath11k: purge rx pktlog when entering WoW This change is to purge rx pktlog when entering WoW and reap the mon_status buffer to keep it empty. When leaving WoW, host restarts the reap timer. In WoW state, it's not allowed to feed into mon_status rings per firmware team's recommendation. Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1 Signed-off-by: Carl Huang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1644308006-22784-5-git-send-email-quic_cjhuang@quicinc.com --- drivers/net/wireless/ath/ath11k/wow.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 50dd50944edd..602b8f4c5985 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -13,6 +13,7 @@ #include "debug.h" #include "wmi.h" #include "wow.h" +#include "dp_rx.h" static const struct wiphy_wowlan_support ath11k_wowlan_support = { .flags = WIPHY_WOWLAN_DISCONNECT | @@ -566,6 +567,14 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); + ret = ath11k_dp_rx_pktlog_stop(ar->ab, true); + if (ret) { + ath11k_warn(ar->ab, + "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n", + ret); + goto exit; + } + ret = ath11k_wow_cleanup(ar); if (ret) { ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n", @@ -599,6 +608,14 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw, goto cleanup; } + ret = ath11k_dp_rx_pktlog_stop(ar->ab, false); + if (ret) { + ath11k_warn(ar->ab, + "failed to stop dp rx pktlog during wow suspend: %d\n", + ret); + goto cleanup; + } + ath11k_ce_stop_shadow_timers(ar->ab); ath11k_dp_stop_shadow_timers(ar->ab); @@ -649,6 +666,12 @@ int ath11k_wow_op_resume(struct ieee80211_hw *hw) ath11k_hif_ce_irq_enable(ar->ab); ath11k_hif_irq_enable(ar->ab); + ret = ath11k_dp_rx_pktlog_start(ar->ab); + if (ret) { + ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret); + return ret; + } + ret = ath11k_wow_wakeup(ar->ab); if (ret) { ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret); -- cgit v1.2.3 From c3c36bfe998b3ad14aa87a57037a05d861889ac8 Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Mon, 14 Mar 2022 07:18:16 +0200 Subject: ath11k: support ARP and NS offload Support ARP and NS offload in WoW state. Tested this way: put machine A with QCA6390 to WoW state, ping/ping6 machine A from another machine B, check sniffer to see any ARP response and Neighbour advertisement from machine A. Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1 Signed-off-by: Carl Huang Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1644308006-22784-6-git-send-email-quic_cjhuang@quicinc.com --- drivers/net/wireless/ath/ath11k/core.h | 19 ++++ drivers/net/wireless/ath/ath11k/mac.c | 118 +++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.c | 155 +++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.h | 47 ++++++++++ drivers/net/wireless/ath/ath11k/wow.c | 52 +++++++++++ 5 files changed, 391 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 1f5e977aa976..4ba12de44415 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -213,6 +213,23 @@ enum ath11k_monitor_flags { ATH11K_FLAG_MONITOR_VDEV_CREATED, }; +#define ATH11K_IPV6_UC_TYPE 0 +#define ATH11K_IPV6_AC_TYPE 1 + +#define ATH11K_IPV6_MAX_COUNT 16 +#define ATH11K_IPV4_MAX_COUNT 2 + +struct ath11k_arp_ns_offload { + u8 ipv4_addr[ATH11K_IPV4_MAX_COUNT][4]; + u32 ipv4_count; + u32 ipv6_count; + u8 ipv6_addr[ATH11K_IPV6_MAX_COUNT][16]; + u8 self_ipv6_addr[ATH11K_IPV6_MAX_COUNT][16]; + u8 ipv6_type[ATH11K_IPV6_MAX_COUNT]; + bool ipv6_valid[ATH11K_IPV6_MAX_COUNT]; + u8 mac_addr[ETH_ALEN]; +}; + struct ath11k_vif { u32 vdev_id; enum wmi_vdev_type vdev_type; @@ -264,6 +281,8 @@ struct ath11k_vif { bool bcca_zero_sent; bool do_not_send_tmpl; struct ieee80211_chanctx_conf chanctx; + struct ath11k_arp_ns_offload arp_ns_offload; + #ifdef CONFIG_ATH11K_DEBUGFS struct dentry *debugfs_twt; #endif /* CONFIG_ATH11K_DEBUGFS */ diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 7a6c758b8e45..dee1cb4121f8 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -6,6 +6,11 @@ #include #include +#include +#include +#include +#include + #include "mac.h" #include "core.h" #include "debug.h" @@ -3095,6 +3100,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, int ret = 0; u8 rateidx; u32 rate; + u32 ipv4_cnt; mutex_lock(&ar->conf_mutex); @@ -3387,6 +3393,18 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) ath11k_mac_fils_discovery(arvif, info); + if (changed & BSS_CHANGED_ARP_FILTER) { + ipv4_cnt = min(info->arp_addr_cnt, ATH11K_IPV4_MAX_COUNT); + memcpy(arvif->arp_ns_offload.ipv4_addr, info->arp_addr_list, + ipv4_cnt * sizeof(u32)); + memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN); + arvif->arp_ns_offload.ipv4_count = ipv4_cnt; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n", + info->arp_addr_cnt, + vif->addr, arvif->arp_ns_offload.ipv4_addr); + } + mutex_unlock(&ar->conf_mutex); } @@ -8087,6 +8105,101 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, } } +static void ath11k_generate_ns_mc_addr(struct ath11k *ar, + struct ath11k_arp_ns_offload *offload) +{ + int i; + + for (i = 0; i < offload->ipv6_count; i++) { + offload->self_ipv6_addr[i][0] = 0xff; + offload->self_ipv6_addr[i][1] = 0x02; + offload->self_ipv6_addr[i][11] = 0x01; + offload->self_ipv6_addr[i][12] = 0xff; + offload->self_ipv6_addr[i][13] = + offload->ipv6_addr[i][13]; + offload->self_ipv6_addr[i][14] = + offload->ipv6_addr[i][14]; + offload->self_ipv6_addr[i][15] = + offload->ipv6_addr[i][15]; + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n", + offload->self_ipv6_addr[i]); + } +} + +static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct ath11k *ar = hw->priv; + struct ath11k_arp_ns_offload *offload; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct inet6_ifaddr *ifa6; + struct ifacaddr6 *ifaca6; + struct list_head *p; + u32 count, scope; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac op ipv6 changed\n"); + + offload = &arvif->arp_ns_offload; + count = 0; + + read_lock_bh(&idev->lock); + + memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr)); + memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr)); + memcpy(offload->mac_addr, vif->addr, ETH_ALEN); + + /* get unicast address */ + list_for_each(p, &idev->addr_list) { + if (count >= ATH11K_IPV6_MAX_COUNT) + goto generate; + + ifa6 = list_entry(p, struct inet6_ifaddr, if_list); + if (ifa6->flags & IFA_F_DADFAILED) + continue; + scope = ipv6_addr_src_scope(&ifa6->addr); + if (scope == IPV6_ADDR_SCOPE_LINKLOCAL || + scope == IPV6_ADDR_SCOPE_GLOBAL) { + memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr, + sizeof(ifa6->addr.s6_addr)); + offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE; + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac count %d ipv6 uc %pI6 scope %d\n", + count, offload->ipv6_addr[count], + scope); + count++; + } else { + ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope); + } + } + + /* get anycast address */ + for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) { + if (count >= ATH11K_IPV6_MAX_COUNT) + goto generate; + + scope = ipv6_addr_src_scope(&ifaca6->aca_addr); + if (scope == IPV6_ADDR_SCOPE_LINKLOCAL || + scope == IPV6_ADDR_SCOPE_GLOBAL) { + memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr, + sizeof(ifaca6->aca_addr)); + offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE; + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac count %d ipv6 ac %pI6 scope %d\n", + count, offload->ipv6_addr[count], + scope); + count++; + } else { + ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope); + } + } + +generate: + offload->ipv6_count = count; + read_unlock_bh(&idev->lock); + + /* generate ns multicast address */ + ath11k_generate_ns_mc_addr(ar, offload); +} + static const struct ieee80211_ops ath11k_ops = { .tx = ath11k_mac_op_tx, .start = ath11k_mac_op_start, @@ -8132,6 +8245,11 @@ static const struct ieee80211_ops ath11k_ops = { #ifdef CONFIG_ATH11K_DEBUGFS .sta_add_debugfs = ath11k_debugfs_sta_op_add, #endif + +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = ath11k_mac_op_ipv6_changed, +#endif + }; static void ath11k_mac_update_ch_list(struct ath11k *ar, diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 62b13f5cb8b4..828f9cacbe57 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -8580,3 +8580,158 @@ int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id, return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); } +static void ath11k_wmi_fill_ns_offload(struct ath11k *ar, + struct ath11k_arp_ns_offload *offload, + u8 **ptr, + bool enable, + bool ext) +{ + struct wmi_ns_offload_tuple *ns; + struct wmi_tlv *tlv; + u8 *buf_ptr = *ptr; + u32 ns_cnt, ns_ext_tuples; + int i, max_offloads; + + ns_cnt = offload->ipv6_count; + + tlv = (struct wmi_tlv *)buf_ptr; + + if (ext) { + ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns)); + i = WMI_MAX_NS_OFFLOADS; + max_offloads = offload->ipv6_count; + } else { + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns)); + i = 0; + max_offloads = WMI_MAX_NS_OFFLOADS; + } + + buf_ptr += sizeof(*tlv); + + for (; i < max_offloads; i++) { + ns = (struct wmi_ns_offload_tuple *)buf_ptr; + ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE); + + if (enable) { + if (i < ns_cnt) + ns->flags |= WMI_NSOL_FLAGS_VALID; + + memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); + memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); + ath11k_ce_byte_swap(ns->target_ipaddr[0], 16); + ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16); + + if (offload->ipv6_type[i]) + ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST; + + memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); + ath11k_ce_byte_swap(ns->target_mac.addr, 8); + + if (ns->target_mac.word0 != 0 || + ns->target_mac.word1 != 0) { + ns->flags |= WMI_NSOL_FLAGS_MAC_VALID; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi index %d ns_solicited %pI6 target %pI6", + i, ns->solicitation_ipaddr, + ns->target_ipaddr[0]); + } + + buf_ptr += sizeof(*ns); + } + + *ptr = buf_ptr; +} + +static void ath11k_wmi_fill_arp_offload(struct ath11k *ar, + struct ath11k_arp_ns_offload *offload, + u8 **ptr, + bool enable) +{ + struct wmi_arp_offload_tuple *arp; + struct wmi_tlv *tlv; + u8 *buf_ptr = *ptr; + int i; + + /* fill arp tuple */ + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); + buf_ptr += sizeof(*tlv); + + for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + arp = (struct wmi_arp_offload_tuple *)buf_ptr; + arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); + + if (enable && i < offload->ipv4_count) { + /* Copy the target ip addr and flags */ + arp->flags = WMI_ARPOL_FLAGS_VALID; + memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); + ath11k_ce_byte_swap(arp->target_ipaddr, 4); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi arp offload address %pI4", + arp->target_ipaddr); + } + + buf_ptr += sizeof(*arp); + } + + *ptr = buf_ptr; +} + +int ath11k_wmi_arp_ns_offload(struct ath11k *ar, + struct ath11k_vif *arvif, bool enable) +{ + struct ath11k_arp_ns_offload *offload; + struct wmi_set_arp_ns_offload_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + size_t len; + u8 ns_cnt, ns_ext_tuples = 0; + + offload = &arvif->arp_ns_offload; + ns_cnt = offload->ipv6_count; + + len = sizeof(*cmd) + + sizeof(*tlv) + + WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) + + sizeof(*tlv) + + WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple); + + if (ns_cnt > WMI_MAX_NS_OFFLOADS) { + ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; + len += sizeof(*tlv) + + ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple); + } + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + buf_ptr = skb->data; + cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->flags = 0; + cmd->vdev_id = arvif->vdev_id; + cmd->num_ns_ext_tuples = ns_ext_tuples; + + buf_ptr += sizeof(*cmd); + + ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); + ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); + + if (ns_ext_tuples) + ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); +} diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 17e50987e115..e2d03c3b422c 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -13,6 +13,7 @@ struct ath11k_base; struct ath11k; struct ath11k_fw_stats; struct ath11k_fw_dbglog; +struct ath11k_vif; #define PSOC_HOST_MAX_NUM_SS (8) @@ -5796,6 +5797,49 @@ struct wmi_wow_nlo_config_cmd { */ } __packed; +#define WMI_MAX_NS_OFFLOADS 2 +#define WMI_MAX_ARP_OFFLOADS 2 + +#define WMI_ARPOL_FLAGS_VALID BIT(0) +#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1) +#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2) + +struct wmi_arp_offload_tuple { + u32 tlv_header; + u32 flags; + u8 target_ipaddr[4]; + u8 remote_ipaddr[4]; + struct wmi_mac_addr target_mac; +} __packed; + +#define WMI_NSOL_FLAGS_VALID BIT(0) +#define WMI_NSOL_FLAGS_MAC_VALID BIT(1) +#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2) +#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3) + +#define WMI_NSOL_MAX_TARGET_IPS 2 + +struct wmi_ns_offload_tuple { + u32 tlv_header; + u32 flags; + u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16]; + u8 solicitation_ipaddr[16]; + u8 remote_ipaddr[16]; + struct wmi_mac_addr target_mac; +} __packed; + +struct wmi_set_arp_ns_offload_cmd { + u32 tlv_header; + u32 flags; + u32 vdev_id; + u32 num_ns_ext_tuples; + /* The TLVs follow: + * wmi_ns_offload_tuple ns_tuples[WMI_MAX_NS_OFFLOADS]; + * wmi_arp_offload_tuple arp_tuples[WMI_MAX_ARP_OFFLOADS]; + * wmi_ns_offload_tuple ns_ext_tuples[num_ns_ext_tuples]; + */ +} __packed; + int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id); struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); @@ -5968,4 +6012,7 @@ int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, u32 enable); int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, u32 filter_bitmap, bool enable); +int ath11k_wmi_arp_ns_offload(struct ath11k *ar, + struct ath11k_vif *arvif, bool enable); + #endif diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 602b8f4c5985..bc6a4d3d6d7c 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -559,6 +559,43 @@ static int ath11k_wow_clear_hw_filter(struct ath11k *ar) return 0; } +static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable) +{ + struct ath11k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable); + + if (ret) { + ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n", + arvif->vdev_id, enable, ret); + return ret; + } + } + + return 0; +} + +static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable) +{ + int ret; + + ret = ath11k_wow_arp_ns_offload(ar, enable); + if (ret) { + ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n", + enable, ret); + return ret; + } + + return 0; +} + int ath11k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { @@ -589,6 +626,14 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw, goto cleanup; } + ret = ath11k_wow_protocol_offload(ar, true); + if (ret) { + ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n", + ret); + goto cleanup; + } + + ath11k_mac_drain_tx(ar); ret = ath11k_mac_wait_tx_complete(ar); if (ret) { ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret); @@ -690,6 +735,13 @@ int ath11k_wow_op_resume(struct ieee80211_hw *hw) goto exit; } + ret = ath11k_wow_protocol_offload(ar, false); + if (ret) { + ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n", + ret); + goto exit; + } + exit: if (ret) { switch (ar->state) { -- cgit v1.2.3 From a16d9b50cfbaf112401b8e5ccfa852709f498cd4 Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Mon, 14 Mar 2022 07:18:16 +0200 Subject: ath11k: support GTK rekey offload Host sets GTK related info to firmware before WoW is enabled, and gets rekey replay_count and then disables GTK rekey when WoW quits. Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1 Signed-off-by: Carl Huang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1644308006-22784-7-git-send-email-quic_cjhuang@quicinc.com --- drivers/net/wireless/ath/ath11k/core.h | 8 +++ drivers/net/wireless/ath/ath11k/mac.c | 37 ++++++++++ drivers/net/wireless/ath/ath11k/wmi.c | 119 +++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.h | 49 ++++++++++++++ drivers/net/wireless/ath/ath11k/wow.c | 46 ++++++++++++- 5 files changed, 258 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 4ba12de44415..e556beb945ac 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -230,6 +230,13 @@ struct ath11k_arp_ns_offload { u8 mac_addr[ETH_ALEN]; }; +struct ath11k_rekey_data { + u8 kck[NL80211_KCK_LEN]; + u8 kek[NL80211_KCK_LEN]; + u64 replay_ctr; + bool enable_offload; +}; + struct ath11k_vif { u32 vdev_id; enum wmi_vdev_type vdev_type; @@ -282,6 +289,7 @@ struct ath11k_vif { bool do_not_send_tmpl; struct ieee80211_chanctx_conf chanctx; struct ath11k_arp_ns_offload arp_ns_offload; + struct ath11k_rekey_data rekey_data; #ifdef CONFIG_ATH11K_DEBUGFS struct dentry *debugfs_twt; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index dee1cb4121f8..e0e31605e0b0 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -2757,6 +2757,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, } arvif->is_up = true; + arvif->rekey_data.enable_offload = false; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vdev %d up (associated) bssid %pM aid %d\n", @@ -2814,6 +2815,8 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw, arvif->is_up = false; + memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data)); + cancel_delayed_work_sync(&arvif->connection_loss_work); } @@ -8200,6 +8203,39 @@ generate: ath11k_generate_ns_mc_addr(ar, offload); } +static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; + + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set rekey data vdev %d\n", + arvif->vdev_id); + + mutex_lock(&ar->conf_mutex); + + memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN); + memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN); + + /* The supplicant works on big-endian, the firmware expects it on + * little endian. + */ + rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr); + + arvif->rekey_data.enable_offload = true; + + ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL, + rekey_data->kck, NL80211_KCK_LEN); + ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL, + rekey_data->kck, NL80211_KEK_LEN); + ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL, + &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr)); + + mutex_unlock(&ar->conf_mutex); +} + static const struct ieee80211_ops ath11k_ops = { .tx = ath11k_mac_op_tx, .start = ath11k_mac_op_start, @@ -8214,6 +8250,7 @@ static const struct ieee80211_ops ath11k_ops = { .hw_scan = ath11k_mac_op_hw_scan, .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan, .set_key = ath11k_mac_op_set_key, + .set_rekey_data = ath11k_mac_op_set_rekey_data, .sta_state = ath11k_mac_op_sta_state, .sta_set_4addr = ath11k_mac_op_sta_set_4addr, .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr, diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 828f9cacbe57..7f97c26bffd9 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -7765,6 +7765,56 @@ exit: kfree(tb); } +static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_gtk_offload_status_event *ev; + struct ath11k_vif *arvif; + __be64 replay_ctr_be; + u64 replay_ctr; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch gtk offload status ev"); + kfree(tb); + return; + } + + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); + if (!arvif) { + ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n", + ev->vdev_id); + kfree(tb); + return; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n", + ev->refresh_cnt); + ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt", + NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES); + + replay_ctr = ev->replay_ctr.word1; + replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0; + arvif->rekey_data.replay_ctr = replay_ctr; + + /* supplicant expects big-endian replay counter */ + replay_ctr_be = cpu_to_be64(replay_ctr); + + ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, + (void *)&replay_ctr_be, GFP_KERNEL); + + kfree(tb); +} + static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -7896,6 +7946,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_DIAG_EVENTID: ath11k_wmi_diag_event(ab, skb); break; + case WMI_GTK_OFFLOAD_STATUS_EVENTID: + ath11k_wmi_gtk_offload_status_event(ab, skb); + break; /* TODO: Add remaining events */ default: ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id); @@ -8735,3 +8788,69 @@ int ath11k_wmi_arp_ns_offload(struct ath11k *ar, return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); } + +int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar, + struct ath11k_vif *arvif, bool enable) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; + int len; + struct sk_buff *skb; + __le64 replay_ctr; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = arvif->vdev_id; + + if (enable) { + cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE; + + /* the length in rekey_data and cmd is equal */ + memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); + ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES); + memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); + ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES); + + replay_ctr = cpu_to_le64(rekey_data->replay_ctr); + memcpy(cmd->replay_ctr, &replay_ctr, + sizeof(replay_ctr)); + ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES); + } else { + cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", + arvif->vdev_id, enable); + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, + struct ath11k_vif *arvif) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + int len; + struct sk_buff *skb; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = arvif->vdev_id; + cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n", + arvif->vdev_id); + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index e2d03c3b422c..925b8003feae 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -5840,6 +5840,51 @@ struct wmi_set_arp_ns_offload_cmd { */ } __packed; +#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000 +#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000 +#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000 +#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000 + +#define GTK_OFFLOAD_KEK_BYTES 16 +#define GTK_OFFLOAD_KCK_BYTES 16 +#define GTK_REPLAY_COUNTER_BYTES 8 +#define WMI_MAX_KEY_LEN 32 +#define IGTK_PN_SIZE 6 + +struct wmi_replayc_cnt { + union { + u8 counter[GTK_REPLAY_COUNTER_BYTES]; + struct { + u32 word0; + u32 word1; + } __packed; + } __packed; +} __packed; + +struct wmi_gtk_offload_status_event { + u32 vdev_id; + u32 flags; + u32 refresh_cnt; + struct wmi_replayc_cnt replay_ctr; + u8 igtk_key_index; + u8 igtk_key_length; + u8 igtk_key_rsc[IGTK_PN_SIZE]; + u8 igtk_key[WMI_MAX_KEY_LEN]; + u8 gtk_key_index; + u8 gtk_key_length; + u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES]; + u8 gtk_key[WMI_MAX_KEY_LEN]; +} __packed; + +struct wmi_gtk_rekey_offload_cmd { + u32 tlv_header; + u32 vdev_id; + u32 flags; + u8 kek[GTK_OFFLOAD_KEK_BYTES]; + u8 kck[GTK_OFFLOAD_KCK_BYTES]; + u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES]; +} __packed; + int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id); struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); @@ -6014,5 +6059,9 @@ int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, u32 filter_bitmap, bool enable); int ath11k_wmi_arp_ns_offload(struct ath11k *ar, struct ath11k_vif *arvif, bool enable); +int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar, + struct ath11k_vif *arvif, bool enable); +int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, + struct ath11k_vif *arvif); #endif diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index bc6a4d3d6d7c..93714df834b2 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -17,7 +17,9 @@ static const struct wiphy_wowlan_support ath11k_wowlan_support = { .flags = WIPHY_WOWLAN_DISCONNECT | - WIPHY_WOWLAN_MAGIC_PKT, + WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE, .pattern_min_len = WOW_MIN_PATTERN_SIZE, .pattern_max_len = WOW_MAX_PATTERN_SIZE, .max_pkt_offset = WOW_MAX_PKT_OFFSET, @@ -582,6 +584,41 @@ static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable) return 0; } +static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable) +{ + struct ath11k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA || + !arvif->is_up || + !arvif->rekey_data.enable_offload) + continue; + + /* get rekey info before disable rekey offload */ + if (!enable) { + ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable); + + if (ret) { + ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n", + arvif->vdev_id, enable, ret); + return ret; + } + } + + return 0; +} + static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable) { int ret; @@ -593,6 +630,13 @@ static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable) return ret; } + ret = ath11k_gtk_rekey_offload(ar, enable); + if (ret) { + ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n", + enable, ret); + return ret; + } + return 0; } -- cgit v1.2.3 From 9503a1fc123d9c9c502a7e518634296874a7b8cc Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Mon, 14 Mar 2022 06:45:01 +0000 Subject: ath9k: Use platform_get_irq() to get the interrupt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is not recommened to use platform_get_resource(pdev, IORESOURCE_IRQ) for requesting IRQ's resources any more, as they can be not ready yet in case of DT-booting. platform_get_irq() instead is a recommended way for getting IRQ even if it was not retrieved earlier. It also makes code simpler because we're getting "int" value right away and no conversion from resource to int is required. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314064501.2114002-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ath/ath9k/ahb.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index cdefb8e2daf1..c9b853af41d1 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -98,14 +98,12 @@ static int ath_ahb_probe(struct platform_device *pdev) return -ENOMEM; } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res == NULL) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(&pdev->dev, "no IRQ resource found\n"); - return -ENXIO; + return irq; } - irq = res->start; - ath9k_fill_chanctx_ops(); hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); if (hw == NULL) { -- cgit v1.2.3 From b7d174479c8ac365828d474b385da2da858f2089 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 14 Mar 2022 12:53:27 +0100 Subject: ath6kl: fix typos in comments Various spelling mistakes in comments. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314115354.144023-4-Julia.Lawall@inria.fr --- drivers/net/wireless/ath/ath6kl/htc_mbox.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c index e3874421c4c0..1963d3145481 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -1538,7 +1538,7 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target, queue, n_msg); /* - * This is due to unavailabilty of buffers to rx entire data. + * This is due to unavailability of buffers to rx entire data. * Return no error so that free buffers from queue can be used * to receive partial data. */ -- cgit v1.2.3 From 2c3fc50591ff3b7c90afb39c6f4ac67a0ffeeb76 Mon Sep 17 00:00:00 2001 From: Youghandhar Chintala Date: Tue, 15 Mar 2022 13:59:44 +0530 Subject: ath10k: Trigger sta disconnect on hardware restart Currently after the hardware restart triggered from the driver, the station interface connection remains intact, since a disconnect trigger is not sent to userspace. This can lead to a problem in targets where the wifi mac sequence is added by the firmware. After the target restart, its wifi mac sequence number gets reset to zero. Hence AP to which our device is connected will receive frames with a wifi mac sequence number jump to the past, thereby resulting in the AP dropping all these frames, until the frame arrives with a wifi mac sequence number which AP was expecting. To avoid such frame drops, its better to trigger a station disconnect upon target hardware restart which can be done with API ieee80211_reconfig_disconnect exposed to mac80211. The other targets are not affected by this change, since the hardware params flag is not set. Tested-on: WCN3990 hw1.0 SNOC WLAN.HL.3.1-01040-QCAHLSWMTPLZ-1 Tested-on: QCA6174 hw3.2 PCI WLAN.RM.4.4.1-00110-QCARMSWP-1 Tested-on: QCA6174 hw3.2 SDIO WLAN.RMH.4.4.1-00048 Signed-off-by: Youghandhar Chintala Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220315082944.12406-3-youghand@codeaurora.org --- drivers/net/wireless/ath/ath10k/core.c | 25 +++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/hw.h | 2 ++ 2 files changed, 27 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 9e1f483e1362..2092bfd02cd1 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -94,6 +94,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = true, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA988X_HW_2_0_VERSION, @@ -131,6 +132,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = true, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA9887_HW_1_0_VERSION, @@ -169,6 +171,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA6174_HW_3_2_VERSION, @@ -202,6 +205,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .bmi_large_size_download = true, .supports_peer_stats_info = true, .dynamic_sar_support = true, + .hw_restart_disconnect = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -239,6 +243,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -276,6 +281,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA6174_HW_3_0_VERSION, @@ -313,6 +319,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA6174_HW_3_2_VERSION, @@ -354,6 +361,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .tx_stats_over_pktlog = false, .supports_peer_stats_info = true, .dynamic_sar_support = true, + .hw_restart_disconnect = false, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -397,6 +405,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -447,6 +456,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -494,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -531,6 +542,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -570,6 +582,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -600,6 +613,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .uart_pin_workaround = true, .credit_size_workaround = true, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -644,6 +658,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = false, + .hw_restart_disconnect = false, }, { .id = WCN3990_HW_1_0_DEV_VERSION, @@ -674,6 +689,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .credit_size_workaround = false, .tx_stats_over_pktlog = false, .dynamic_sar_support = true, + .hw_restart_disconnect = true, }, }; @@ -2442,6 +2458,7 @@ EXPORT_SYMBOL(ath10k_core_napi_sync_disable); static void ath10k_core_restart(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, restart_work); + struct ath10k_vif *arvif; int ret; set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); @@ -2480,6 +2497,14 @@ static void ath10k_core_restart(struct work_struct *work) ar->state = ATH10K_STATE_RESTARTING; ath10k_halt(ar); ath10k_scan_finish(ar); + if (ar->hw_params.hw_restart_disconnect) { + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_up && + arvif->vdev_type == WMI_VDEV_TYPE_STA) + ieee80211_hw_restart_disconnect(arvif->vif); + } + } + ieee80211_restart_hw(ar->hw); break; case ATH10K_STATE_OFF: diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 5215a6816d71..93acf0dd580a 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -633,6 +633,8 @@ struct ath10k_hw_params { bool supports_peer_stats_info; bool dynamic_sar_support; + + bool hw_restart_disconnect; }; struct htt_resp; -- cgit v1.2.3 From c4e9705c50121771e7582830359894431f24db73 Mon Sep 17 00:00:00 2001 From: Meng Tang Date: Fri, 18 Mar 2022 10:53:31 +0800 Subject: ath10k: Use of_device_get_match_data() helper Only the device data is needed, not the entire struct of_device_id. Use of_device_get_match_data() instead of of_match_device(). Signed-off-by: Meng Tang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318025331.23030-1-tangmeng@uniontech.com --- drivers/net/wireless/ath/ath10k/ahb.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index ab8f77ae5e66..f0c615fa5614 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -728,20 +728,17 @@ static int ath10k_ahb_probe(struct platform_device *pdev) struct ath10k *ar; struct ath10k_ahb *ar_ahb; struct ath10k_pci *ar_pci; - const struct of_device_id *of_id; enum ath10k_hw_rev hw_rev; size_t size; int ret; struct ath10k_bus_params bus_params = {}; - of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev); - if (!of_id) { - dev_err(&pdev->dev, "failed to find matching device tree id\n"); + hw_rev = (enum ath10k_hw_rev)of_device_get_match_data(&pdev->dev); + if (!hw_rev) { + dev_err(&pdev->dev, "OF data missing\n"); return -EINVAL; } - hw_rev = (enum ath10k_hw_rev)of_id->data; - size = sizeof(*ar_pci) + sizeof(*ar_ahb); ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB, hw_rev, &ath10k_ahb_hif_ops); -- cgit v1.2.3 From 997dc60f0855b39aec0400511b37d65781da9255 Mon Sep 17 00:00:00 2001 From: Karthikeyan Periyasamy Date: Mon, 28 Feb 2022 10:24:39 +0530 Subject: ath11k: Refactor the peer delete Introduce new helper function for peer delete to reuse this logic in all peer cleanup procedures. Found this in code review. Also this change is applicable for all the platform. Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01067-QCAHKSWPL_SILICONZ-1 Signed-off-by: Karthikeyan Periyasamy Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1646024079-26391-1-git-send-email-quic_periyasa@quicinc.com --- drivers/net/wireless/ath/ath11k/mac.c | 16 +++------------- drivers/net/wireless/ath/ath11k/peer.c | 31 ++++++++++++++++--------------- 2 files changed, 19 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index e0e31605e0b0..7cf1ea276e24 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -6387,22 +6387,12 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, err_peer_del: if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - reinit_completion(&ar->peer_delete_done); - - fbret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, - arvif->vdev_id); + fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr); if (fbret) { - ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", - arvif->vdev_id, vif->addr); + ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n", + vif->addr, arvif->vdev_id, fbret); goto err; } - - fbret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id, - vif->addr); - if (fbret) - goto err; - - ar->num_peers--; } err_vdev_del: diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c index 332886bc6b33..392985b873bf 100644 --- a/drivers/net/wireless/ath/ath11k/peer.c +++ b/drivers/net/wireless/ath/ath11k/peer.c @@ -217,7 +217,7 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id, return 0; } -int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr) +static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr) { int ret; @@ -237,6 +237,19 @@ int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr) if (ret) return ret; + return 0; +} + +int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = __ath11k_peer_delete(ar, vdev_id, addr); + if (ret) + return ret; + ar->num_peers--; return 0; @@ -323,22 +336,10 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, return 0; cleanup: - reinit_completion(&ar->peer_delete_done); - - fbret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr, - param->vdev_id); - if (fbret) { - ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", - param->vdev_id, param->peer_addr); - goto exit; - } - - fbret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id, - param->peer_addr); + fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr); if (fbret) - ath11k_warn(ar->ab, "failed wait for peer %pM delete done id %d fallback ret %d\n", + ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n", param->peer_addr, param->vdev_id, fbret); -exit: return ret; } -- cgit v1.2.3 From 72a9bff386545d3f8e9c432cb8e036272ad4e1fa Mon Sep 17 00:00:00 2001 From: Hari Chandrakanthan Date: Wed, 9 Mar 2022 16:54:25 +0530 Subject: ath11k: change fw build id format in driver init log Currently fw build id is printed during init as follows. fw_version 0x250684a5 fw_build_timestamp 2021-07-13 10:57 fw_build_id QC_IMAGE_VERSION_STRING=WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 The string "QC_IMAGE_VERSION_STRING=" is removed from the log to improve readability. With this patch the fw build id is printed during init as follows. fw_version 0x250684a5 fw_build_timestamp 2021-07-13 10:57 fw_build_id WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Signed-off-by: Hari Chandrakanthan Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1646825065-7736-1-git-send-email-quic_haric@quicinc.com --- drivers/net/wireless/ath/ath11k/qmi.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 04e966830c18..ec110fe81378 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -16,6 +16,8 @@ #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 #define HOST_CSTATE_BIT 0x04 +#define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING=" + bool ath11k_cold_boot_cal = 1; EXPORT_SYMBOL(ath11k_cold_boot_cal); module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644); @@ -2008,6 +2010,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) struct qmi_txn txn; int ret = 0; int r; + char *fw_build_id; + int fw_build_id_mask_len; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); @@ -2073,6 +2077,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n"); } + fw_build_id = ab->qmi.target.fw_build_id; + fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK); + if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len)) + fw_build_id = fw_build_id + fw_build_id_mask_len; + ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n", ab->qmi.target.chip_id, ab->qmi.target.chip_family, ab->qmi.target.board_id, ab->qmi.target.soc_id); @@ -2080,7 +2089,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s", ab->qmi.target.fw_version, ab->qmi.target.fw_build_timestamp, - ab->qmi.target.fw_build_id); + fw_build_id); r = ath11k_core_check_dt(ab); if (r) -- cgit v1.2.3 From 2db80f93869d491be57cbc2b36f30d0d3a0e5bde Mon Sep 17 00:00:00 2001 From: Niels Dossche Date: Mon, 21 Mar 2022 12:58:23 +0200 Subject: ath11k: acquire ab->base_lock in unassign when finding the peer by addr ath11k_peer_find_by_addr states via lockdep that ab->base_lock must be held when calling that function in order to protect the list. All callers except ath11k_mac_op_unassign_vif_chanctx have that lock acquired when calling ath11k_peer_find_by_addr. That lock is also not transitively held by a path towards ath11k_mac_op_unassign_vif_chanctx. The solution is to acquire the lock when calling ath11k_peer_find_by_addr inside ath11k_mac_op_unassign_vif_chanctx. I am currently working on a static analyser to detect missing locks and this was a reported case. I manually verified the report by looking at the code, but I do not have real hardware so this is compile tested only. Fixes: 701e48a43e15 ("ath11k: add packet log support for QCA6390") Signed-off-by: Niels Dossche Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314215253.92658-1-dossche.niels@gmail.com --- drivers/net/wireless/ath/ath11k/mac.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 7cf1ea276e24..62d9558a8e85 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -7133,6 +7133,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_peer *peer; int ret; mutex_lock(&ar->conf_mutex); @@ -7144,9 +7145,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, WARN_ON(!arvif->is_started); if (ab->hw_params.vdev_start_delay && - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && - ath11k_peer_find_by_addr(ab, ar->mac_addr)) - ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); + arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_addr(ab, ar->mac_addr); + spin_unlock_bh(&ab->base_lock); + if (peer) + ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); + } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath11k_mac_monitor_stop(ar); -- cgit v1.2.3 From 7fb376ad7d3f200575b9f9374e21b39d30b57267 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Mon, 21 Mar 2022 13:03:23 +0200 Subject: ath11k: remove unused ATH11K_BD_IE_BOARD_EXT Currently ATH11K_BD_IE_BOARD_EXT is not used, so remove it. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220319023543.14288-2-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/hw.h | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 27ca4a9c20fc..1c701e16a3b5 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -293,7 +293,6 @@ enum ath11k_bd_ie_board_type { enum ath11k_bd_ie_type { /* contains sub IEs of enum ath11k_bd_ie_board_type */ ATH11K_BD_IE_BOARD = 0, - ATH11K_BD_IE_BOARD_EXT = 1, }; struct ath11k_hw_regs { -- cgit v1.2.3 From 0c104b6163e344e972dbbd255ca2441c171a8a87 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Mon, 21 Mar 2022 13:03:29 +0200 Subject: ath11k: disable regdb support for QCA6390 Currently it does not have regdb files for QCA6390, so disable its regdb support feature now. Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-05266-QCAHSTSWPLZ_V2_TO_X86-1 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220319023543.14288-3-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 104149050205..700fd4ff62e4 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -219,7 +219,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), - .supports_regdb = true, + .supports_regdb = false, .fix_l1ss = true, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, -- cgit v1.2.3 From 13da397f884d9c9a3fb6616206eeb6c6ab097287 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Mon, 28 Feb 2022 01:46:03 -0500 Subject: ath11k: add support for device recovery for QCA6390/WCN6855 Currently ath11k has device recovery logic, it is introduced by this patch "ath11k: Add support for subsystem recovery" which is upstream by https://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git/commit/?h=ath11k-bringup&id=3a7b4838b6f6f234239f263ef3dc02e612a083ad. The patch is for AHB devices such as IPQ8074, it has remote proc module which is used to download the firmware and boots the processor which firmware is running on. If firmware crashed, remote proc module will detect it and download and boot firmware again. Below command will trigger a firmware crash, and then user can test feature of device recovery. Test command: echo assert > /sys/kernel/debug/ath11k/qca6390\ hw2.0/simulate_fw_crash echo assert > /sys/kernel/debug/ath11k/wcn6855\ hw2.0/simulate_fw_crash Unfortunately, QCA6390 is PCIe bus, it does not have the remote proc module, it use mhi module to communicate between firmware and ath11k. So ath11k does not support device recovery for QCA6390 currently. This patch is to add the extra logic which is different for QCA6390. When firmware crashed, MHI_CB_EE_RDDM event will be indicate by firmware and then ath11k_mhi_op_status_cb which is the callback of mhi_controller will receive the MHI_CB_EE_RDDM event, then ath11k will start to do recovery process, ath11k_core_reset() calls ath11k_hif_power_down()/ath11k_hif_power_up(), then the mhi/ath11k will start to download and boot firmware. There are some logic to avoid deadloop recovery and two simultaneous recovery operations. And because it has muti-radios for the soc, so it add some logic in ath11k_mac_op_reconfig_complete() to make sure all radios has reconfig complete and then complete the device recovery. Also it add workqueue_aux, because ab->workqueue is used when receive ATH11K_QMI_EVENT_FW_READY in recovery process(queue_work(ab->workqueue, &ab->restart_work)), and ath11k_core_reset will wait for max ATH11K_RESET_TIMEOUT_HZ for the previous restart_work finished, if ath11k_core_reset also queued in ab->workqueue, then it will delay restart_work of previous recovery and lead previous recovery fail. ath11k recovery success for QCA6390/WCN6855 after apply this patch. Tested-on: QCA6390 hw2.0 PCI WLAN.HST.1.0.1-01740-QCAHSTSWPLZ_V2_TO_X86-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03003-QCAHSPSWPL_V1_V2_SILICONZ_LITE-2 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220228064606.8981-2-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 68 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/core.h | 13 +++++++ drivers/net/wireless/ath/ath11k/mac.c | 18 +++++++++ drivers/net/wireless/ath/ath11k/mhi.c | 33 +++++++++++++++++ 4 files changed, 132 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 700fd4ff62e4..97ee3dd230ed 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1381,6 +1381,65 @@ static void ath11k_core_restart(struct work_struct *work) complete(&ab->driver_recovery); } +static void ath11k_core_reset(struct work_struct *work) +{ + struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work); + int reset_count, fail_cont_count; + long time_left; + + if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) { + ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags); + return; + } + + /* Sometimes the recovery will fail and then the next all recovery fail, + * this is to avoid infinite recovery since it can not recovery success. + */ + fail_cont_count = atomic_read(&ab->fail_cont_count); + + if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL) + return; + + if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST && + time_before(jiffies, ab->reset_fail_timeout)) + return; + + reset_count = atomic_inc_return(&ab->reset_count); + + if (reset_count > 1) { + /* Sometimes it happened another reset worker before the previous one + * completed, then the second reset worker will destroy the previous one, + * thus below is to avoid that. + */ + ath11k_warn(ab, "already reseting count %d\n", reset_count); + + reinit_completion(&ab->reset_complete); + time_left = wait_for_completion_timeout(&ab->reset_complete, + ATH11K_RESET_TIMEOUT_HZ); + + if (time_left) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n"); + atomic_dec(&ab->reset_count); + return; + } + + ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ; + /* Record the continuous recovery fail count when recovery failed*/ + atomic_inc(&ab->fail_cont_count); + } + + ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n"); + + ab->is_reset = true; + atomic_set(&ab->recovery_count, 0); + + ath11k_hif_power_down(ab); + ath11k_qmi_free_resource(ab); + ath11k_hif_power_up(ab); + + ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n"); +} + static int ath11k_init_hw_params(struct ath11k_base *ab) { const struct ath11k_hw_params *hw_params = NULL; @@ -1450,6 +1509,7 @@ EXPORT_SYMBOL(ath11k_core_deinit); void ath11k_core_free(struct ath11k_base *ab) { + destroy_workqueue(ab->workqueue_aux); destroy_workqueue(ab->workqueue); kfree(ab); @@ -1472,9 +1532,14 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, if (!ab->workqueue) goto err_sc_free; + ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq"); + if (!ab->workqueue_aux) + goto err_free_wq; + mutex_init(&ab->core_lock); spin_lock_init(&ab->base_lock); mutex_init(&ab->vdev_id_11d_lock); + init_completion(&ab->reset_complete); INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); @@ -1483,6 +1548,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, INIT_WORK(&ab->restart_work, ath11k_core_restart); INIT_WORK(&ab->update_11d_work, ath11k_update_11d); INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work); + INIT_WORK(&ab->reset_work, ath11k_core_reset); timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->wow.wakeup_completed); @@ -1493,6 +1559,8 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, return ab; +err_free_wq: + destroy_workqueue(ab->workqueue); err_sc_free: kfree(ab); return NULL; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index e556beb945ac..bd31413d84ca 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -40,6 +40,10 @@ extern unsigned int ath11k_frame_mode; #define ATH11K_MON_TIMER_INTERVAL 10 +#define ATH11K_RESET_TIMEOUT_HZ (20 * HZ) +#define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3 +#define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5 +#define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ) enum ath11k_supported_bw { ATH11K_BW_20 = 0, @@ -820,6 +824,15 @@ struct ath11k_base { struct work_struct restart_work; struct work_struct update_11d_work; u8 new_alpha2[3]; + struct workqueue_struct *workqueue_aux; + struct work_struct reset_work; + atomic_t reset_count; + atomic_t recovery_count; + bool is_reset; + struct completion reset_complete; + /* continuous recovery fail count */ + atomic_t fail_cont_count; + unsigned long reset_fail_timeout; struct { /* protected by data_lock */ u32 fw_crash_counter; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 62d9558a8e85..f2979dc00fdb 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -7915,6 +7915,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + int recovery_count; if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) return; @@ -7926,6 +7928,22 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, ar->pdev->pdev_id); ar->state = ATH11K_STATE_ON; ieee80211_wake_queues(ar->hw); + + if (ab->is_reset) { + recovery_count = atomic_inc_return(&ab->recovery_count); + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "recovery count %d\n", recovery_count); + /* When there are multiple radios in an SOC, + * the recovery has to be done for each radio + */ + if (recovery_count == ab->num_radios) { + atomic_dec(&ab->reset_count); + complete(&ab->reset_complete); + ab->is_reset = false; + atomic_set(&ab->fail_cont_count, 0); + ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n"); + } + } } mutex_unlock(&ar->conf_mutex); diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index fc3524e83e52..61d83be4841f 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -292,15 +292,48 @@ static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl) { } +static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason) +{ + switch (reason) { + case MHI_CB_IDLE: + return "MHI_CB_IDLE"; + case MHI_CB_PENDING_DATA: + return "MHI_CB_PENDING_DATA"; + case MHI_CB_LPM_ENTER: + return "MHI_CB_LPM_ENTER"; + case MHI_CB_LPM_EXIT: + return "MHI_CB_LPM_EXIT"; + case MHI_CB_EE_RDDM: + return "MHI_CB_EE_RDDM"; + case MHI_CB_EE_MISSION_MODE: + return "MHI_CB_EE_MISSION_MODE"; + case MHI_CB_SYS_ERROR: + return "MHI_CB_SYS_ERROR"; + case MHI_CB_FATAL_ERROR: + return "MHI_CB_FATAL_ERROR"; + case MHI_CB_BW_REQ: + return "MHI_CB_BW_REQ"; + default: + return "UNKNOWN"; + } +}; + static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback cb) { struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev); + ath11k_dbg(ab, ATH11K_DBG_BOOT, "mhi notify status reason %s\n", + ath11k_mhi_op_callback_to_str(cb)); + switch (cb) { case MHI_CB_SYS_ERROR: ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); break; + case MHI_CB_EE_RDDM: + if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))) + queue_work(ab->workqueue_aux, &ab->reset_work); + break; default: break; } -- cgit v1.2.3 From 38194f3a605e4a961f28bc38a73a4f4d43123968 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Mon, 21 Mar 2022 13:16:57 +0200 Subject: ath11k: add synchronization operation between reconfigure of mac80211 and ath11k_base ieee80211_reconfig() of mac80211 is the main function for recovery of each ieee80211_hw and ath11k, and ath11k_core_reconfigure_on_crash() is the main function for recovery of ath11k_base, it has more than one ieee80211_hw and ath11k for each ath11k_base, so it need to add synchronization between them, otherwise it has many issue. For example, when ath11k_core_reconfigure_on_crash() is not complete, mac80211 send a hw scan request to ath11k, it leads firmware crash, because firmware has not been initialized at that moment, firmware is only finished downloaded and loaded, it can not receive scan command. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03003-QCAHSPSWPL_V1_V2_SILICONZ_LITE-2 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220228064606.8981-3-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 51 ++++++++++++++++++++++++++++------ drivers/net/wireless/ath/ath11k/core.h | 5 ++++ drivers/net/wireless/ath/ath11k/mac.c | 22 +++++++++++++++ 3 files changed, 70 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 97ee3dd230ed..612efaeabcf4 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1302,12 +1302,11 @@ static void ath11k_update_11d(struct work_struct *work) } } -static void ath11k_core_restart(struct work_struct *work) +static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab) { - struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work); struct ath11k *ar; struct ath11k_pdev *pdev; - int i, ret = 0; + int i; spin_lock_bh(&ab->base_lock); ab->stats.fw_crash_counter++; @@ -1340,12 +1339,13 @@ static void ath11k_core_restart(struct work_struct *work) wake_up(&ab->wmi_ab.tx_credits_wq); wake_up(&ab->peer_mapping_wq); +} - ret = ath11k_core_reconfigure_on_crash(ab); - if (ret) { - ath11k_err(ab, "failed to reconfigure driver on crash recovery\n"); - return; - } +static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_pdev *pdev; + int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; @@ -1381,6 +1381,27 @@ static void ath11k_core_restart(struct work_struct *work) complete(&ab->driver_recovery); } +static void ath11k_core_restart(struct work_struct *work) +{ + struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work); + int ret; + + if (!ab->is_reset) + ath11k_core_pre_reconfigure_recovery(ab); + + ret = ath11k_core_reconfigure_on_crash(ab); + if (ret) { + ath11k_err(ab, "failed to reconfigure driver on crash recovery\n"); + return; + } + + if (ab->is_reset) + complete_all(&ab->reconfigure_complete); + + if (!ab->is_reset) + ath11k_core_post_reconfigure_recovery(ab); +} + static void ath11k_core_reset(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work); @@ -1432,6 +1453,18 @@ static void ath11k_core_reset(struct work_struct *work) ab->is_reset = true; atomic_set(&ab->recovery_count, 0); + reinit_completion(&ab->recovery_start); + atomic_set(&ab->recovery_start_count, 0); + + ath11k_core_pre_reconfigure_recovery(ab); + + reinit_completion(&ab->reconfigure_complete); + ath11k_core_post_reconfigure_recovery(ab); + + ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n"); + + time_left = wait_for_completion_timeout(&ab->recovery_start, + ATH11K_RECOVER_START_TIMEOUT_HZ); ath11k_hif_power_down(ab); ath11k_qmi_free_resource(ab); @@ -1540,6 +1573,8 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, spin_lock_init(&ab->base_lock); mutex_init(&ab->vdev_id_11d_lock); init_completion(&ab->reset_complete); + init_completion(&ab->reconfigure_complete); + init_completion(&ab->recovery_start); INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index bd31413d84ca..70eb6e418422 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -44,6 +44,8 @@ extern unsigned int ath11k_frame_mode; #define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3 #define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5 #define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ) +#define ATH11K_RECONFIGURE_TIMEOUT_HZ (10 * HZ) +#define ATH11K_RECOVER_START_TIMEOUT_HZ (20 * HZ) enum ath11k_supported_bw { ATH11K_BW_20 = 0, @@ -828,8 +830,11 @@ struct ath11k_base { struct work_struct reset_work; atomic_t reset_count; atomic_t recovery_count; + atomic_t recovery_start_count; bool is_reset; struct completion reset_complete; + struct completion reconfigure_complete; + struct completion recovery_start; /* continuous recovery fail count */ atomic_t fail_cont_count; unsigned long reset_fail_timeout; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index f2979dc00fdb..34a62933ccd2 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -5749,6 +5749,27 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) return ret; } +static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab) +{ + int recovery_start_count; + + if (!ab->is_reset) + return; + + recovery_start_count = atomic_inc_return(&ab->recovery_start_count); + ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count); + + if (recovery_start_count == ab->num_radios) { + complete(&ab->recovery_start); + ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n"); + } + + ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n"); + + wait_for_completion_timeout(&ab->reconfigure_complete, + ATH11K_RECONFIGURE_TIMEOUT_HZ); +} + static int ath11k_mac_op_start(struct ieee80211_hw *hw) { struct ath11k *ar = hw->priv; @@ -5765,6 +5786,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw) break; case ATH11K_STATE_RESTARTING: ar->state = ATH11K_STATE_RESTARTED; + ath11k_mac_wait_reconfigure(ab); break; case ATH11K_STATE_RESTARTED: case ATH11K_STATE_WEDGED: -- cgit v1.2.3 From 78e3e6094220a71504e7136c42b49fc8ed3a72b4 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Mon, 21 Mar 2022 13:17:03 +0200 Subject: ath11k: Add hw-restart option to simulate_fw_crash Add hw-restart to directly restart wlan. Like UTF mode start it will restart hardware and download firmware again. Usage: 1. Run command: echo hw-restart > /sys/kernel/debug/ath11k/qca6390\ hw2.0/simulate_fw_crash echo hw-restart > /sys/kernel/debug/ath11k/wcn6855\ hw2.0/simulate_fw_crash 2. wlan will be restart and do recovery process and success. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03003-QCAHSPSWPL_V1_V2_SILICONZ_LITE-2 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220228064606.8981-4-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/debugfs.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index a82266c8befc..9648e0017393 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -596,6 +596,10 @@ static ssize_t ath11k_write_simulate_fw_crash(struct file *file, ret = ath11k_wmi_force_fw_hang_cmd(ar, ATH11K_WMI_FW_HANG_ASSERT_TYPE, ATH11K_WMI_FW_HANG_DELAY); + } else if (!strcmp(buf, "hw-restart")) { + ath11k_info(ab, "user requested hw restart\n"); + queue_work(ab->workqueue_aux, &ab->reset_work); + ret = 0; } else { ret = -EINVAL; goto exit; -- cgit v1.2.3 From 0d7a8a6204ea9271f1d0a8c66a9fd2f54d2e3cbc Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Mon, 21 Mar 2022 13:17:08 +0200 Subject: ath11k: fix the warning of dev_wake in mhi_pm_disable_transition() When test device recovery with below command, it has warning in message as below. echo assert > /sys/kernel/debug/ath11k/wcn6855\ hw2.0/simulate_fw_crash echo assert > /sys/kernel/debug/ath11k/qca6390\ hw2.0/simulate_fw_crash warning message: [ 1965.642121] ath11k_pci 0000:06:00.0: simulating firmware assert crash [ 1968.471364] ieee80211 phy0: Hardware restart was requested [ 1968.511305] ------------[ cut here ]------------ [ 1968.511368] WARNING: CPU: 3 PID: 1546 at drivers/bus/mhi/core/pm.c:505 mhi_pm_disable_transition+0xb37/0xda0 [mhi] [ 1968.511443] Modules linked in: ath11k_pci ath11k mac80211 libarc4 cfg80211 qmi_helpers qrtr_mhi mhi qrtr nvme nvme_core [ 1968.511563] CPU: 3 PID: 1546 Comm: kworker/u17:0 Kdump: loaded Tainted: G W 5.17.0-rc3-wt-ath+ #579 [ 1968.511629] Hardware name: Intel(R) Client Systems NUC8i7HVK/NUC8i7HVB, BIOS HNKBLi70.86A.0067.2021.0528.1339 05/28/2021 [ 1968.511704] Workqueue: mhi_hiprio_wq mhi_pm_st_worker [mhi] [ 1968.511787] RIP: 0010:mhi_pm_disable_transition+0xb37/0xda0 [mhi] [ 1968.511870] Code: a9 fe ff ff 4c 89 ff 44 89 04 24 e8 03 46 f6 e5 44 8b 04 24 41 83 f8 01 0f 84 21 fe ff ff e9 4c fd ff ff 0f 0b e9 af f8 ff ff <0f> 0b e9 5c f8 ff ff 48 89 df e8 da 9e ee e3 e9 12 fd ff ff 4c 89 [ 1968.511923] RSP: 0018:ffffc900024efbf0 EFLAGS: 00010286 [ 1968.511969] RAX: 00000000ffffffff RBX: ffff88811d241250 RCX: ffffffffc0176922 [ 1968.512014] RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffff888118a90a24 [ 1968.512059] RBP: ffff888118a90800 R08: 0000000000000000 R09: ffff888118a90a27 [ 1968.512102] R10: ffffed1023152144 R11: 0000000000000001 R12: ffff888118a908ac [ 1968.512229] R13: ffff888118a90928 R14: dffffc0000000000 R15: ffff888118a90a24 [ 1968.512310] FS: 0000000000000000(0000) GS:ffff888234200000(0000) knlGS:0000000000000000 [ 1968.512405] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1968.512493] CR2: 00007f5538f443a8 CR3: 000000016dc28001 CR4: 00000000003706e0 [ 1968.512587] Call Trace: [ 1968.512672] [ 1968.512751] ? _raw_spin_unlock_irq+0x1f/0x40 [ 1968.512859] mhi_pm_st_worker+0x3ac/0x790 [mhi] [ 1968.512959] ? mhi_pm_mission_mode_transition.isra.0+0x7d0/0x7d0 [mhi] [ 1968.513063] process_one_work+0x86a/0x1400 [ 1968.513184] ? pwq_dec_nr_in_flight+0x230/0x230 [ 1968.513312] ? move_linked_works+0x125/0x290 [ 1968.513416] worker_thread+0x6db/0xf60 [ 1968.513536] ? process_one_work+0x1400/0x1400 [ 1968.513627] kthread+0x241/0x2d0 [ 1968.513733] ? kthread_complete_and_exit+0x20/0x20 [ 1968.513821] ret_from_fork+0x22/0x30 [ 1968.513924] Reason is mhi_deassert_dev_wake() from mhi_device_put() is called but mhi_assert_dev_wake() from __mhi_device_get_sync() is not called in progress of recovery. Commit 8e0559921f9a ("bus: mhi: core: Skip device wake in error or shutdown state") add check for the pm_state of mhi in __mhi_device_get_sync(), and the pm_state is not the normal state untill recovery is completed, so it leads the dev_wake is not 0 and above warning print in mhi_pm_disable_transition() while checking mhi_cntrl->dev_wake. Add check in ath11k_pci_write32()/ath11k_pci_read32() to skip call mhi_device_put() if mhi_device_get_sync() does not really do wake, then the warning gone. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03003-QCAHSPSWPL_V1_V2_SILICONZ_LITE-2 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220228064606.8981-5-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/pci.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 903758751c99..8a3ff12057e8 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -191,6 +191,7 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 window_start; + int ret = 0; /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. @@ -198,7 +199,7 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) if (ab->hw_params.wakeup_mhi && test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) - mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); if (offset < WINDOW_START) { iowrite32(value, ab->mem + offset); @@ -222,7 +223,8 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) if (ab->hw_params.wakeup_mhi && test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ACCESS_ALWAYS_OFF) + offset >= ACCESS_ALWAYS_OFF && + !ret) mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); } @@ -230,6 +232,7 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 val, window_start; + int ret = 0; /* for offset beyond BAR + 4K - 32, may * need to wakeup MHI to access. @@ -237,7 +240,7 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) if (ab->hw_params.wakeup_mhi && test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && offset >= ACCESS_ALWAYS_OFF) - mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); if (offset < WINDOW_START) { val = ioread32(ab->mem + offset); @@ -261,7 +264,8 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) if (ab->hw_params.wakeup_mhi && test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ACCESS_ALWAYS_OFF) + offset >= ACCESS_ALWAYS_OFF && + !ret) mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); return val; -- cgit v1.2.3 From 1e4ac7173c9394de7f54a4a861377ac3f030c614 Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Mon, 11 Oct 2021 13:56:02 +0800 Subject: ath11k: enable PLATFORM_CAP_PCIE_GLOBAL_RESET QMI host capability In Qualcomm ARM platforms there is WL_EN pin and other power regulators which can be controlled at platform side to completely reset the chip. For most of x86 and other platforms, the chip is connected via PCIe M.2 interface, and there is no way to control WL_EN pin. Instead the host driver needs to reset the chip via PCIE_SOC_GLOBAL_RESET hardware register, just like ath11k does currently. But when using PCIE_SOC_GLOBAL_RESET there are some hardware registers which are not cleared/restored. To handle those cases we can enable PLATFORM_CAP_PCIE_GLOBAL_RESET QMI host capability to tell the firmware to do some platform specific operations after firmware download. This does not fix any known issues, but is recommended by the firmware team, so enable the capability on QCA6390 and WCN6855 PCI devices. It is currently unclear if this should be enabled also on QCN9074, so leave it disabled for now. On AHB devices this is not needed as they don't use PCIE_SOC_GLOBAL_RESET. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20211011055602.77342-1-bqiang@codeaurora.org --- drivers/net/wireless/ath/ath11k/core.c | 6 ++++++ drivers/net/wireless/ath/ath11k/hw.h | 1 + drivers/net/wireless/ath/ath11k/qmi.c | 4 ++++ 3 files changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 612efaeabcf4..9cfba73b9e22 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -100,6 +100,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, + .global_reset = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -166,6 +167,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, + .global_reset = false, }, { .name = "qca6390 hw2.0", @@ -231,6 +233,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, + .global_reset = true, }, { .name = "qcn9074 hw1.0", @@ -296,6 +299,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, + .global_reset = false, }, { .name = "wcn6855 hw2.0", @@ -361,6 +365,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, + .global_reset = true, }, { .name = "wcn6855 hw2.1", @@ -425,6 +430,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, + .global_reset = true, }, }; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 1c701e16a3b5..5955c6f5da24 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -194,6 +194,7 @@ struct ath11k_hw_params { bool fw_wmi_diag_event; bool current_cc_support; bool dbr_debug_support; + bool global_reset; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index ec110fe81378..ffb8aa7dfb02 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -15,6 +15,7 @@ #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 #define HOST_CSTATE_BIT 0x04 +#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08 #define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING=" @@ -1676,6 +1677,9 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; } + if (ab->hw_params.global_reset) + req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET; + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n"); ret = qmi_txn_init(&ab->qmi.handle, &txn, -- cgit v1.2.3 From 62abdc06c50eb18e3fa62f7136e66842a96f6b58 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Wed, 23 Mar 2022 11:14:16 +0200 Subject: ath11k: add fallback board name without variant while searching board-2.bin Sometimes it has a variant value which read from DT or SMBIOS by ath11k, and meanwhile it does not have the matched board name in board-2.bin, then it will failed at boot up phase. Add fallback board name which removed variant value and search again in board-2.bin when failed with variant and try to load the board data again to increase boot up success rate. dmesg log after this patch: [169547.248472] ath11k_pci 0000:05:00.0: boot using board name 'bus=pci,vendor=17cb,device=1103,subsystem-vendor=17cb,subsystem-device=3374,qmi-chip-id=2,qmi-board-id=262,variant=test' [169547.248565] ath11k_pci 0000:05:00.0: boot firmware request ath11k/WCN6855/hw2.0/board-2.bin size 180324 [169547.248568] ath11k_pci 0000:05:00.0: board name [169547.248570] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 76 65 6e 64 6f 72 3d 31 bus=pci,vendor=1 [169547.248571] ath11k_pci 0000:05:00.0: 00000010: 37 63 62 2c 64 65 76 69 63 65 3d 31 31 30 33 2c 7cb,device=1103, [169547.248572] ath11k_pci 0000:05:00.0: 00000020: 73 75 62 73 79 73 74 65 6d 2d 76 65 6e 64 6f 72 subsystem-vendor [169547.248574] ath11k_pci 0000:05:00.0: 00000030: 3d 31 37 63 62 2c 73 75 62 73 79 73 74 65 6d 2d =17cb,subsystem- [169547.248575] ath11k_pci 0000:05:00.0: 00000040: 64 65 76 69 63 65 3d 33 33 37 34 2c 71 6d 69 2d device=3374,qmi- [169547.248576] ath11k_pci 0000:05:00.0: 00000050: 63 68 69 70 2d 69 64 3d 32 2c 71 6d 69 2d 62 6f chip-id=2,qmi-bo [169547.248577] ath11k_pci 0000:05:00.0: 00000060: 61 72 64 2d 69 64 3d 32 36 32 ard-id=262 [169547.248578] ath11k_pci 0000:05:00.0: board name [169547.248579] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 76 65 6e 64 6f 72 3d 31 bus=pci,vendor=1 [169547.248581] ath11k_pci 0000:05:00.0: 00000010: 37 63 62 2c 64 65 76 69 63 65 3d 31 31 30 33 2c 7cb,device=1103, [169547.248582] ath11k_pci 0000:05:00.0: 00000020: 73 75 62 73 79 73 74 65 6d 2d 76 65 6e 64 6f 72 subsystem-vendor [169547.248583] ath11k_pci 0000:05:00.0: 00000030: 3d 31 37 63 62 2c 73 75 62 73 79 73 74 65 6d 2d =17cb,subsystem- [169547.248584] ath11k_pci 0000:05:00.0: 00000040: 64 65 76 69 63 65 3d 33 33 37 34 2c 71 6d 69 2d device=3374,qmi- [169547.248585] ath11k_pci 0000:05:00.0: 00000050: 63 68 69 70 2d 69 64 3d 32 2c 71 6d 69 2d 62 6f chip-id=2,qmi-bo [169547.248587] ath11k_pci 0000:05:00.0: 00000060: 61 72 64 2d 69 64 3d 32 36 36 ard-id=266 [169547.248588] ath11k_pci 0000:05:00.0: board name [169547.248589] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 76 65 6e 64 6f 72 3d 31 bus=pci,vendor=1 [169547.248590] ath11k_pci 0000:05:00.0: 00000010: 37 63 62 2c 64 65 76 69 63 65 3d 31 31 30 33 2c 7cb,device=1103, [169547.248591] ath11k_pci 0000:05:00.0: 00000020: 73 75 62 73 79 73 74 65 6d 2d 76 65 6e 64 6f 72 subsystem-vendor [169547.248592] ath11k_pci 0000:05:00.0: 00000030: 3d 31 37 63 62 2c 73 75 62 73 79 73 74 65 6d 2d =17cb,subsystem- [169547.248594] ath11k_pci 0000:05:00.0: 00000040: 64 65 76 69 63 65 3d 33 33 37 34 2c 71 6d 69 2d device=3374,qmi- [169547.248595] ath11k_pci 0000:05:00.0: 00000050: 63 68 69 70 2d 69 64 3d 31 38 2c 71 6d 69 2d 62 chip-id=18,qmi-b [169547.248596] ath11k_pci 0000:05:00.0: 00000060: 6f 61 72 64 2d 69 64 3d 32 36 36 oard-id=266 [169547.248597] ath11k_pci 0000:05:00.0: failed to fetch board data for bus=pci,vendor=17cb,device=1103,subsystem-vendor=17cb,subsystem-device=3374,qmi-chip-id=2,qmi-board-id=262,variant=test from ath11k/WCN6855/hw2.0/board-2.bin [169547.248476] ath11k_pci 0000:05:00.0: boot using board name 'bus=pci,vendor=17cb,device=1103,subsystem-vendor=17cb,subsystem-device=3374,qmi-chip-id=2,qmi-board-id=262' [169547.248634] ath11k_pci 0000:05:00.0: boot firmware request ath11k/WCN6855/hw2.0/board-2.bin size 180324 [169547.248636] ath11k_pci 0000:05:00.0: board name [169547.248637] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 76 65 6e 64 6f 72 3d 31 bus=pci,vendor=1 [169547.248638] ath11k_pci 0000:05:00.0: 00000010: 37 63 62 2c 64 65 76 69 63 65 3d 31 31 30 33 2c 7cb,device=1103, [169547.248639] ath11k_pci 0000:05:00.0: 00000020: 73 75 62 73 79 73 74 65 6d 2d 76 65 6e 64 6f 72 subsystem-vendor [169547.248641] ath11k_pci 0000:05:00.0: 00000030: 3d 31 37 63 62 2c 73 75 62 73 79 73 74 65 6d 2d =17cb,subsystem- [169547.248642] ath11k_pci 0000:05:00.0: 00000040: 64 65 76 69 63 65 3d 33 33 37 34 2c 71 6d 69 2d device=3374,qmi- [169547.248643] ath11k_pci 0000:05:00.0: 00000050: 63 68 69 70 2d 69 64 3d 32 2c 71 6d 69 2d 62 6f chip-id=2,qmi-bo [169547.248645] ath11k_pci 0000:05:00.0: 00000060: 61 72 64 2d 69 64 3d 32 36 32 ard-id=262 [169547.248646] ath11k_pci 0000:05:00.0: boot found match for name 'bus=pci,vendor=17cb,device=1103,subsystem-vendor=17cb,subsystem-device=3374,qmi-chip-id=2,qmi-board-id=262' [169547.248647] ath11k_pci 0000:05:00.0: boot found board data for 'bus=pci,vendor=17cb,device=1103,subsystem-vendor=17cb,subsystem-device=3374,qmi-chip-id=2,qmi-board-id=262' [169547.248649] ath11k_pci 0000:05:00.0: using board api 2 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220315104721.26649-2-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 48 +++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 9cfba73b9e22..6e7f952b315d 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -571,13 +571,13 @@ int ath11k_core_check_dt(struct ath11k_base *ab) return 0; } -static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, - size_t name_len) +static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, + size_t name_len, bool with_variant) { /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; - if (ab->qmi.target.bdf_ext[0] != '\0') + if (with_variant && ab->qmi.target.bdf_ext[0] != '\0') scnprintf(variant, sizeof(variant), ",variant=%s", ab->qmi.target.bdf_ext); @@ -607,6 +607,18 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, return 0; } +static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, + size_t name_len) +{ + return __ath11k_core_create_board_name(ab, name, name_len, true); +} + +static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name, + size_t name_len) +{ + return __ath11k_core_create_board_name(ab, name, name_len, false); +} + const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, const char *file) { @@ -810,7 +822,7 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, out: if (!bd->data || !bd->len) { - ath11k_err(ab, + ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch board data for %s from %s\n", boardname, filepath); ret = -ENODATA; @@ -842,10 +854,13 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, #define BOARD_NAME_SIZE 200 int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { - char boardname[BOARD_NAME_SIZE]; + char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE]; + char *filename, filepath[100]; int ret; - ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); + filename = ATH11K_BOARD_API2_FILE; + + ret = ath11k_core_create_board_name(ab, boardname, sizeof(boardname)); if (ret) { ath11k_err(ab, "failed to create board name: %d", ret); return ret; @@ -856,10 +871,29 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) if (!ret) goto success; + ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname, + sizeof(fallback_boardname)); + if (ret) { + ath11k_err(ab, "failed to create fallback board name: %d", ret); + return ret; + } + + ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname); + if (!ret) + goto success; + ab->bd_api = 1; ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE); if (ret) { - ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n", + ath11k_core_create_firmware_path(ab, filename, + filepath, sizeof(filepath)); + ath11k_err(ab, "failed to fetch board data for %s from %s\n", + boardname, filepath); + if (memcmp(boardname, fallback_boardname, strlen(boardname))) + ath11k_err(ab, "failed to fetch board data for %s from %s\n", + fallback_boardname, filepath); + + ath11k_err(ab, "failed to fetch board.bin from %s\n", ab->hw_params.fw.dir); return ret; } -- cgit v1.2.3 From 9d97114d222047c0699bbdbf8f128907f423bbe6 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Wed, 23 Mar 2022 11:14:17 +0200 Subject: ath11k: add read variant from SMBIOS for download board data This is to read variant from SMBIOS such as read from DT, the variant string will be used to one part of string which used to search board data from board-2.bin. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220315104721.26649-3-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 70 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/core.h | 20 +++++++++- drivers/net/wireless/ath/ath11k/qmi.c | 4 ++ 3 files changed, 93 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 6e7f952b315d..7b91b029074f 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -9,6 +9,7 @@ #include #include #include + #include "core.h" #include "dp_tx.h" #include "dp_rx.h" @@ -548,6 +549,75 @@ int ath11k_core_resume(struct ath11k_base *ab) } EXPORT_SYMBOL(ath11k_core_resume); +static void ath11k_core_check_bdfext(const struct dmi_header *hdr, void *data) +{ + struct ath11k_base *ab = data; + const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC; + struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr; + ssize_t copied; + size_t len; + int i; + + if (ab->qmi.target.bdf_ext[0] != '\0') + return; + + if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE) + return; + + if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "wrong smbios bdf ext type length (%d).\n", + hdr->length); + return; + } + + if (!smbios->bdf_enabled) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n"); + return; + } + + /* Only one string exists (per spec) */ + if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "bdf variant magic does not match.\n"); + return; + } + + len = min_t(size_t, + strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext)); + for (i = 0; i < len; i++) { + if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "bdf variant name contains non ascii chars.\n"); + return; + } + } + + /* Copy extension name without magic prefix */ + copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic), + sizeof(ab->qmi.target.bdf_ext)); + if (copied < 0) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate\n"); + return; + } + + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "found and validated bdf variant smbios_type 0x%x bdf %s\n", + ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext); +} + +int ath11k_core_check_smbios(struct ath11k_base *ab) +{ + ab->qmi.target.bdf_ext[0] = '\0'; + dmi_walk(ath11k_core_check_bdfext, ab); + + if (ab->qmi.target.bdf_ext[0] == '\0') + return -ENODATA; + + return 0; +} + int ath11k_core_check_dt(struct ath11k_base *ab) { size_t max_len = sizeof(ab->qmi.target.bdf_ext); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 70eb6e418422..6b272eb8bb4a 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -37,6 +39,15 @@ #define ATH11K_INVALID_HW_MAC_ID 0xFF #define ATH11K_CONNECTION_LOSS_HZ (3 * HZ) +/* SMBIOS type containing Board Data File Name Extension */ +#define ATH11K_SMBIOS_BDF_EXT_TYPE 0xF8 + +/* SMBIOS type structure length (excluding strings-set) */ +#define ATH11K_SMBIOS_BDF_EXT_LENGTH 0x9 + +/* The magic used by QCA spec */ +#define ATH11K_SMBIOS_BDF_EXT_MAGIC "BDF_" + extern unsigned int ath11k_frame_mode; #define ATH11K_MON_TIMER_INTERVAL 10 @@ -154,6 +165,13 @@ struct ath11k_ext_irq_grp { struct net_device napi_ndev; }; +struct ath11k_smbios_bdf { + struct dmi_header hdr; + u32 padding; + u8 bdf_enabled; + u8 bdf_ext[]; +}; + #define HEHANDLE_CAP_PHYINFO_SIZE 3 #define HECAP_PHYINFO_SIZE 9 #define HECAP_MACINFO_SIZE 5 @@ -1046,7 +1064,7 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, const char *name); void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd); int ath11k_core_check_dt(struct ath11k_base *ath11k); - +int ath11k_core_check_smbios(struct ath11k_base *ab); void ath11k_core_halt(struct ath11k *ar); int ath11k_core_resume(struct ath11k_base *ab); int ath11k_core_suspend(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index ffb8aa7dfb02..0442faa3b7af 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -2095,6 +2095,10 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) ab->qmi.target.fw_build_timestamp, fw_build_id); + r = ath11k_core_check_smbios(ab); + if (r) + ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); + r = ath11k_core_check_dt(ab); if (r) ath11k_dbg(ab, ATH11K_DBG_QMI, "DT bdf variant name not set.\n"); -- cgit v1.2.3 From 7b0c70d92a435913f6e11d6a248b935697e8a3eb Mon Sep 17 00:00:00 2001 From: Karthikeyan Periyasamy Date: Wed, 23 Mar 2022 11:14:17 +0200 Subject: ath11k: Add peer rhash table support When more clients (128) are connected, the UL data traffic KPI measurement is low compared to single client. This issue is due to more CPU cycles spent on the peer lookup operation with more clients. So reduce the peer lookup operation by modifying the linear based lookup operation into the rhash based lookup operation. This improve the peak throughput measurement. Since this is a software algorithm change, it is applicable for all the platforms. TCP UL 128 Clients test case Observation (64bit system): Previous: ~550 Mbps Now : ~860 Mbps Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01067-QCAHKSWPL_SILICONZ-1 Signed-off-by: Karthikeyan Periyasamy Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1644036628-5334-1-git-send-email-quic_periyasa@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 3 +- drivers/net/wireless/ath/ath11k/core.h | 14 ++ drivers/net/wireless/ath/ath11k/mac.c | 16 +- drivers/net/wireless/ath/ath11k/peer.c | 344 +++++++++++++++++++++++++++++++-- drivers/net/wireless/ath/ath11k/peer.h | 10 +- 5 files changed, 363 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 7b91b029074f..2a867237e817 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1680,6 +1680,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, goto err_free_wq; mutex_init(&ab->core_lock); + mutex_init(&ab->tbl_mtx_lock); spin_lock_init(&ab->base_lock); mutex_init(&ab->vdev_id_11d_lock); init_completion(&ab->reset_complete); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 6b272eb8bb4a..41001ec174e2 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_CORE_H @@ -12,6 +13,7 @@ #include #include #include +#include #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -803,6 +805,18 @@ struct ath11k_base { struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS]; struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS]; unsigned long long free_vdev_map; + + /* To synchronize rhash tbl write operation */ + struct mutex tbl_mtx_lock; + + /* The rhashtable containing struct ath11k_peer keyed by mac addr */ + struct rhashtable *rhead_peer_addr; + struct rhashtable_params rhash_peer_addr_param; + + /* The rhashtable containing struct ath11k_peer keyed by id */ + struct rhashtable *rhead_peer_id; + struct rhashtable_params rhash_peer_id_param; + struct list_head peers; wait_queue_head_t peer_mapping_wq; u8 mac_addr[ETH_ALEN]; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 34a62933ccd2..7a7641e8e325 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -875,13 +875,16 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar) lockdep_assert_held(&ar->conf_mutex); + mutex_lock(&ab->tbl_mtx_lock); spin_lock_bh(&ab->base_lock); list_for_each_entry_safe(peer, tmp, &ab->peers, list) { ath11k_peer_rx_tid_cleanup(ar, peer); + ath11k_peer_rhash_delete(ab, peer); list_del(&peer->list); kfree(peer); } spin_unlock_bh(&ab->base_lock); + mutex_unlock(&ab->tbl_mtx_lock); ar->num_peers = 0; ar->num_stations = 0; @@ -4554,6 +4557,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, } ath11k_mac_dec_num_stations(arvif, sta); + mutex_lock(&ar->ab->tbl_mtx_lock); spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (skip_peer_delete && peer) { @@ -4561,12 +4565,14 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, } else if (peer && peer->sta == sta) { ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", vif->addr, arvif->vdev_id); + ath11k_peer_rhash_delete(ar->ab, peer); peer->sta = NULL; list_del(&peer->list); kfree(peer); ar->num_peers--; } spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); kfree(arsta->tx_stats); arsta->tx_stats = NULL; @@ -8574,6 +8580,8 @@ void ath11k_mac_unregister(struct ath11k_base *ab) __ath11k_mac_unregister(ar); } + + ath11k_peer_rhash_tbl_destroy(ab); } static int __ath11k_mac_register(struct ath11k *ar) @@ -8802,6 +8810,10 @@ int ath11k_mac_register(struct ath11k_base *ab) ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ; ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; + ret = ath11k_peer_rhash_tbl_init(ab); + if (ret) + return ret; + for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; @@ -8831,6 +8843,8 @@ err_cleanup: __ath11k_mac_unregister(ar); } + ath11k_peer_rhash_tbl_destroy(ab); + return ret; } diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c index 392985b873bf..9e22aaf34b88 100644 --- a/drivers/net/wireless/ath/ath11k/peer.c +++ b/drivers/net/wireless/ath/ath11k/peer.c @@ -1,23 +1,22 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" #include "peer.h" #include "debug.h" -struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id, - const u8 *addr) +static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab, + int peer_id) { struct ath11k_peer *peer; lockdep_assert_held(&ab->base_lock); list_for_each_entry(peer, &ab->peers, list) { - if (peer->vdev_id != vdev_id) - continue; - if (!ether_addr_equal(peer->addr, addr)) + if (peer->peer_id != peer_id) continue; return peer; @@ -26,15 +25,15 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id, return NULL; } -static struct ath11k_peer *ath11k_peer_find_by_pdev_idx(struct ath11k_base *ab, - u8 pdev_idx, const u8 *addr) +struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id, + const u8 *addr) { struct ath11k_peer *peer; lockdep_assert_held(&ab->base_lock); list_for_each_entry(peer, &ab->peers, list) { - if (peer->pdev_idx != pdev_idx) + if (peer->vdev_id != vdev_id) continue; if (!ether_addr_equal(peer->addr, addr)) continue; @@ -52,14 +51,13 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab, lockdep_assert_held(&ab->base_lock); - list_for_each_entry(peer, &ab->peers, list) { - if (!ether_addr_equal(peer->addr, addr)) - continue; + if (!ab->rhead_peer_addr) + return NULL; - return peer; - } + peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr, + ab->rhash_peer_addr_param); - return NULL; + return peer; } struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, @@ -69,11 +67,13 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, lockdep_assert_held(&ab->base_lock); - list_for_each_entry(peer, &ab->peers, list) - if (peer_id == peer->peer_id) - return peer; + if (!ab->rhead_peer_id) + return NULL; - return NULL; + peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id, + ab->rhash_peer_id_param); + + return peer; } struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab, @@ -99,7 +99,7 @@ void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id) spin_lock_bh(&ab->base_lock); - peer = ath11k_peer_find_by_id(ab, peer_id); + peer = ath11k_peer_find_list_by_id(ab, peer_id); if (!peer) { ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n", peer_id); @@ -167,6 +167,76 @@ static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id, return 0; } +static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab, + struct rhashtable *rtbl, + struct rhash_head *rhead, + struct rhashtable_params *params, + void *key) +{ + struct ath11k_peer *tmp; + + lockdep_assert_held(&ab->tbl_mtx_lock); + + tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params); + + if (!tmp) + return 0; + else if (IS_ERR(tmp)) + return PTR_ERR(tmp); + else + return -EEXIST; +} + +static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab, + struct rhashtable *rtbl, + struct rhash_head *rhead, + struct rhashtable_params *params) +{ + int ret; + + lockdep_assert_held(&ab->tbl_mtx_lock); + + ret = rhashtable_remove_fast(rtbl, rhead, *params); + if (ret && ret != -ENOENT) + return ret; + + return 0; +} + +static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer) +{ + int ret; + + lockdep_assert_held(&ab->base_lock); + lockdep_assert_held(&ab->tbl_mtx_lock); + + if (!ab->rhead_peer_id || !ab->rhead_peer_addr) + return -EPERM; + + ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id, + &ab->rhash_peer_id_param, &peer->peer_id); + if (ret) { + ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n", + peer->addr, peer->peer_id, ret); + return ret; + } + + ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr, + &ab->rhash_peer_addr_param, &peer->addr); + if (ret) { + ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n", + peer->addr, peer->peer_id, ret); + goto err_clean; + } + + return 0; + +err_clean: + ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id, + &ab->rhash_peer_id_param); + return ret; +} + void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id) { struct ath11k_peer *peer, *tmp; @@ -174,6 +244,7 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id) lockdep_assert_held(&ar->conf_mutex); + mutex_lock(&ab->tbl_mtx_lock); spin_lock_bh(&ab->base_lock); list_for_each_entry_safe(peer, tmp, &ab->peers, list) { if (peer->vdev_id != vdev_id) @@ -182,12 +253,14 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id) ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n", peer->addr, vdev_id); + ath11k_peer_rhash_delete(ab, peer); list_del(&peer->list); kfree(peer); ar->num_peers--; } spin_unlock_bh(&ab->base_lock); + mutex_unlock(&ab->tbl_mtx_lock); } static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr) @@ -220,14 +293,35 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id, static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr) { int ret; + struct ath11k_peer *peer; + struct ath11k_base *ab = ar->ab; lockdep_assert_held(&ar->conf_mutex); + mutex_lock(&ab->tbl_mtx_lock); + spin_lock_bh(&ab->base_lock); + + peer = ath11k_peer_find_by_addr(ab, addr); + if (!peer) { + spin_unlock_bh(&ab->base_lock); + mutex_unlock(&ab->tbl_mtx_lock); + + ath11k_warn(ab, + "failed to find peer vdev_id %d addr %pM in delete\n", + vdev_id, addr); + return -EINVAL; + } + + ath11k_peer_rhash_delete(ab, peer); + + spin_unlock_bh(&ab->base_lock); + mutex_unlock(&ab->tbl_mtx_lock); + reinit_completion(&ar->peer_delete_done); ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id); if (ret) { - ath11k_warn(ar->ab, + ath11k_warn(ab, "failed to delete peer vdev_id %d addr %pM ret %d\n", vdev_id, addr, ret); return ret; @@ -276,7 +370,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, } spin_lock_bh(&ar->ab->base_lock); - peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr); + peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr); if (peer) { spin_unlock_bh(&ar->ab->base_lock); return -EINVAL; @@ -296,11 +390,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, if (ret) return ret; + mutex_lock(&ar->ab->tbl_mtx_lock); spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr); if (!peer) { spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n", param->peer_addr, param->vdev_id); @@ -308,6 +404,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, goto cleanup; } + ret = ath11k_peer_rhash_add(ar->ab, peer); + if (ret) { + spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); + goto cleanup; + } + peer->pdev_idx = ar->pdev_idx; peer->sta = sta; @@ -332,6 +435,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, ar->num_peers++; spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); return 0; @@ -343,3 +447,201 @@ cleanup: return ret; } + +int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer) +{ + int ret; + + lockdep_assert_held(&ab->base_lock); + lockdep_assert_held(&ab->tbl_mtx_lock); + + if (!ab->rhead_peer_id || !ab->rhead_peer_addr) + return -EPERM; + + ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr, + &ab->rhash_peer_addr_param); + if (ret) { + ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n", + peer->addr, peer->peer_id, ret); + return ret; + } + + ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id, + &ab->rhash_peer_id_param); + if (ret) { + ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n", + peer->addr, peer->peer_id, ret); + return ret; + } + + return 0; +} + +static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab) +{ + struct rhashtable_params *param; + struct rhashtable *rhash_id_tbl; + int ret; + size_t size; + + lockdep_assert_held(&ab->tbl_mtx_lock); + + if (ab->rhead_peer_id) + return 0; + + size = sizeof(*ab->rhead_peer_id); + rhash_id_tbl = kzalloc(size, GFP_KERNEL); + if (!rhash_id_tbl) { + ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n", + size); + return -ENOMEM; + } + + param = &ab->rhash_peer_id_param; + + param->key_offset = offsetof(struct ath11k_peer, peer_id); + param->head_offset = offsetof(struct ath11k_peer, rhash_id); + param->key_len = sizeof_field(struct ath11k_peer, peer_id); + param->automatic_shrinking = true; + param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab); + + ret = rhashtable_init(rhash_id_tbl, param); + if (ret) { + ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret); + goto err_free; + } + + spin_lock_bh(&ab->base_lock); + + if (!ab->rhead_peer_id) { + ab->rhead_peer_id = rhash_id_tbl; + } else { + spin_unlock_bh(&ab->base_lock); + goto cleanup_tbl; + } + + spin_unlock_bh(&ab->base_lock); + + return 0; + +cleanup_tbl: + rhashtable_destroy(rhash_id_tbl); +err_free: + kfree(rhash_id_tbl); + + return ret; +} + +static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab) +{ + struct rhashtable_params *param; + struct rhashtable *rhash_addr_tbl; + int ret; + size_t size; + + lockdep_assert_held(&ab->tbl_mtx_lock); + + if (ab->rhead_peer_addr) + return 0; + + size = sizeof(*ab->rhead_peer_addr); + rhash_addr_tbl = kzalloc(size, GFP_KERNEL); + if (!rhash_addr_tbl) { + ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n", + size); + return -ENOMEM; + } + + param = &ab->rhash_peer_addr_param; + + param->key_offset = offsetof(struct ath11k_peer, addr); + param->head_offset = offsetof(struct ath11k_peer, rhash_addr); + param->key_len = sizeof_field(struct ath11k_peer, addr); + param->automatic_shrinking = true; + param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab); + + ret = rhashtable_init(rhash_addr_tbl, param); + if (ret) { + ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret); + goto err_free; + } + + spin_lock_bh(&ab->base_lock); + + if (!ab->rhead_peer_addr) { + ab->rhead_peer_addr = rhash_addr_tbl; + } else { + spin_unlock_bh(&ab->base_lock); + goto cleanup_tbl; + } + + spin_unlock_bh(&ab->base_lock); + + return 0; + +cleanup_tbl: + rhashtable_destroy(rhash_addr_tbl); +err_free: + kfree(rhash_addr_tbl); + + return ret; +} + +static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab) +{ + lockdep_assert_held(&ab->tbl_mtx_lock); + + if (!ab->rhead_peer_id) + return; + + rhashtable_destroy(ab->rhead_peer_id); + kfree(ab->rhead_peer_id); + ab->rhead_peer_id = NULL; +} + +static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab) +{ + lockdep_assert_held(&ab->tbl_mtx_lock); + + if (!ab->rhead_peer_addr) + return; + + rhashtable_destroy(ab->rhead_peer_addr); + kfree(ab->rhead_peer_addr); + ab->rhead_peer_addr = NULL; +} + +int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab) +{ + int ret; + + mutex_lock(&ab->tbl_mtx_lock); + + ret = ath11k_peer_rhash_id_tbl_init(ab); + if (ret) + goto out; + + ret = ath11k_peer_rhash_addr_tbl_init(ab); + if (ret) + goto cleanup_tbl; + + mutex_unlock(&ab->tbl_mtx_lock); + + return 0; + +cleanup_tbl: + ath11k_peer_rhash_id_tbl_destroy(ab); +out: + mutex_unlock(&ab->tbl_mtx_lock); + return ret; +} + +void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab) +{ + mutex_lock(&ab->tbl_mtx_lock); + + ath11k_peer_rhash_addr_tbl_destroy(ab); + ath11k_peer_rhash_id_tbl_destroy(ab); + + mutex_unlock(&ab->tbl_mtx_lock); +} diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h index 63fe5665badf..6dd17bafe3a0 100644 --- a/drivers/net/wireless/ath/ath11k/peer.h +++ b/drivers/net/wireless/ath/ath11k/peer.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_PEER_H @@ -20,6 +21,11 @@ struct ath11k_peer { struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1]; + /* peer id based rhashtable list pointer */ + struct rhash_head rhash_id; + /* peer addr based rhashtable list pointer */ + struct rhash_head rhash_addr; + /* Info used in MMIC verification of * RX fragments */ @@ -47,5 +53,7 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id, const u8 *addr); struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab, int vdev_id); - +int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab); +void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab); +int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer); #endif /* _PEER_H_ */ -- cgit v1.2.3 From 26c31016fe7eb1911e9ffa50dbaac6f2c42c799c Mon Sep 17 00:00:00 2001 From: Wenli Looi Date: Sun, 20 Mar 2022 17:30:05 -0600 Subject: ath9k: make ATH_SREV macros more consistent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes the macros more consistent and removes hidden dependencies on ah for macros that take _ah as a parameter. This change does not appear to affect the final binary. Signed-off-by: Wenli Looi Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220320233010.123106-2-wlooi@ucalgary.ca --- drivers/net/wireless/ath/ath9k/reg.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 653e79611830..8983ea6fc727 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -834,8 +834,8 @@ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22)) || \ ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100)) -#define AR_SREV_9100(ah) \ - ((ah->hw_version.macVersion) == AR_SREV_VERSION_9100) +#define AR_SREV_9100(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9100)) #define AR_SREV_9100_OR_LATER(_ah) \ (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100)) @@ -891,7 +891,7 @@ #define AR_SREV_9300_20_OR_LATER(_ah) \ ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300) #define AR_SREV_9300_22(_ah) \ - (AR_SREV_9300(ah) && \ + (AR_SREV_9300((_ah)) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22)) #define AR_SREV_9330(_ah) \ @@ -994,8 +994,8 @@ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561)) #define AR_SREV_SOC(_ah) \ - (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \ - AR_SREV_9561(ah)) + (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(_ah) || \ + AR_SREV_9561(_ah)) /* NOTE: When adding chips newer than Peacock, add chip check here */ #define AR_SREV_9580_10_OR_LATER(_ah) \ -- cgit v1.2.3 From a96474a794e11eda2c96b06270178ac1ccd6e1c0 Mon Sep 17 00:00:00 2001 From: Wenli Looi Date: Sun, 20 Mar 2022 17:30:06 -0600 Subject: ath9k: split set11nRateFlags and set11nChainSel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes the code clearer since set11nRateFlags currently sets both the rate flags and chain sel. This may also be required for QCN550x support, where the rate flags and chain sel are in separate fields. This change does not appear to affect the final binary. Signed-off-by: Wenli Looi Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220320233010.123106-3-wlooi@ucalgary.ca --- drivers/net/wireless/ath/ath9k/ar9002_mac.c | 9 +++++---- drivers/net/wireless/ath/ath9k/ar9003_mac.c | 9 +++++---- drivers/net/wireless/ath/ath9k/mac.h | 6 ++++-- 3 files changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index fba5a847c3bb..a8c0e8e2d78c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -301,10 +301,11 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2) | set11nPktDurRTSCTS(i->rates, 3)); - WRITE_ONCE(ads->ds_ctl7, set11nRateFlags(i->rates, 0) - | set11nRateFlags(i->rates, 1) - | set11nRateFlags(i->rates, 2) - | set11nRateFlags(i->rates, 3) + WRITE_ONCE(ads->ds_ctl7, + set11nRateFlags(i->rates, 0) | set11nChainSel(i->rates, 0) + | set11nRateFlags(i->rates, 1) | set11nChainSel(i->rates, 1) + | set11nRateFlags(i->rates, 2) | set11nChainSel(i->rates, 2) + | set11nRateFlags(i->rates, 3) | set11nChainSel(i->rates, 3) | SM(i->rtscts_rate, AR_RTSCTSRate)); WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1)); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 5184a0aacfe2..ff8ab58e67d9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -144,10 +144,11 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2) | set11nPktDurRTSCTS(i->rates, 3)); - WRITE_ONCE(ads->ctl18, set11nRateFlags(i->rates, 0) - | set11nRateFlags(i->rates, 1) - | set11nRateFlags(i->rates, 2) - | set11nRateFlags(i->rates, 3) + WRITE_ONCE(ads->ctl18, + set11nRateFlags(i->rates, 0) | set11nChainSel(i->rates, 0) + | set11nRateFlags(i->rates, 1) | set11nChainSel(i->rates, 1) + | set11nRateFlags(i->rates, 2) | set11nChainSel(i->rates, 2) + | set11nRateFlags(i->rates, 3) | set11nChainSel(i->rates, 3) | SM(i->rtscts_rate, AR_RTSCTSRate)); WRITE_ONCE(ads->ctl19, AR_Not_Sounding); diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index fd6aa49adadf..af44b33814dd 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -35,8 +35,10 @@ |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ AR_GI##_index : 0) \ |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \ - AR_STBC##_index : 0) \ - |SM((_series)[_index].ChSel, AR_ChainSel##_index)) + AR_STBC##_index : 0)) + +#define set11nChainSel(_series, _index) \ + (SM((_series)[_index].ChSel, AR_ChainSel##_index)) #define CCK_SIFS_TIME 10 #define CCK_PREAMBLE_BITS 144 -- cgit v1.2.3 From 3096a4d9eb9bf8a0975c222bd7013d2db323d749 Mon Sep 17 00:00:00 2001 From: Wenli Looi Date: Sun, 20 Mar 2022 17:30:07 -0600 Subject: ath9k: use AR9300_MAX_CHAINS when appropriate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace other constants with AR9300_MAX_CHAINS when appropriate. This change does not appear to affect the final binary. Signed-off-by: Wenli Looi Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220320233010.123106-4-wlooi@ucalgary.ca --- drivers/net/wireless/ath/ath9k/ar9003_calib.c | 2 +- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index dc24da1ff00b..6ca089f15629 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -177,7 +177,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah) int i; /* Accumulate IQ cal measures for active chains */ - for (i = 0; i < AR5416_MAX_CHAINS; i++) { + for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (ah->txchainmask & BIT(i)) { ah->totalPowerMeasI[i] += REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index b0a4ca3559fd..669b49b560a1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3911,7 +3911,7 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan) } /* Test value. if 0 then attenuation is unused. Don't load anything. */ - for (i = 0; i < 3; i++) { + for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (ah->txchainmask & BIT(i)) { value = ar9003_hw_atten_chain_get(ah, i, chan); REG_RMW_FIELD(ah, ext_atten_reg[i], @@ -5126,7 +5126,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) frequency, correction[0], correction[1], correction[2]); /* Store calibrated noise floor values */ - for (ichain = 0; ichain < AR5416_MAX_CHAINS; ichain++) + for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) if (mode) { ah->nf_5g.cal[ichain] = nf_cal[ichain]; ah->nf_5g.pwr[ichain] = nf_pwr[ichain]; -- cgit v1.2.3 From 9aaff3864b603408c02c629957ae8d8ff5d5a4f2 Mon Sep 17 00:00:00 2001 From: Wenli Looi Date: Sun, 20 Mar 2022 17:30:08 -0600 Subject: ath9k: fix ar9003_get_eepmisc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current implementation is reading the wrong eeprom type. Fixes: d8ec2e2a63e8 ("ath9k: Add an eeprom_ops callback for retrieving the eepmisc value") Signed-off-by: Wenli Looi Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220320233010.123106-5-wlooi@ucalgary.ca --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 669b49b560a1..a109a44a1999 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -5615,7 +5615,7 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, static u8 ar9003_get_eepmisc(struct ath_hw *ah) { - return ah->eeprom.map4k.baseEepHeader.eepMisc; + return ah->eeprom.ar9300_eep.baseEepHeader.opCapFlags.eepMisc; } const struct eeprom_ops eep_ar9300_ops = { -- cgit v1.2.3 From 193025378c446d53fa36bfefc33c170f1ec374c8 Mon Sep 17 00:00:00 2001 From: Wenli Looi Date: Sun, 20 Mar 2022 17:30:09 -0600 Subject: ath9k: refactor ar9003_hw_spur_mitigate_ofdm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Similar to ar9003_hw_spur_mitigate_mrc_cck, simplify the code by using ar9003_get_spur_chan_ptr. This may also be required for QCN550x support, to provide an abstraction over the underlying EEPROM format. Signed-off-by: Wenli Looi Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220320233010.123106-6-wlooi@ucalgary.ca --- drivers/net/wireless/ath/ath9k/ar9003_phy.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index daf30f9946b4..dc0e5ea25673 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -523,21 +523,10 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah, int synth_freq; int range = 10; int freq_offset = 0; - int mode; - u8* spurChansPtr; + u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan)); unsigned int i; - struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - - if (IS_CHAN_5GHZ(chan)) { - spurChansPtr = &(eep->modalHeader5G.spurChans[0]); - mode = 0; - } - else { - spurChansPtr = &(eep->modalHeader2G.spurChans[0]); - mode = 1; - } - if (spurChansPtr[0] == 0) + if (spur_fbin_ptr[0] == 0) return; /* No spur in the mode */ if (IS_CHAN_HT40(chan)) { @@ -554,16 +543,18 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah, ar9003_hw_spur_ofdm_clear(ah); - for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) { - freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode); + for (i = 0; i < AR_EEPROM_MODAL_SPURS && spur_fbin_ptr[i]; i++) { + freq_offset = ath9k_hw_fbin2freq(spur_fbin_ptr[i], + IS_CHAN_2GHZ(chan)); freq_offset -= synth_freq; if (abs(freq_offset) < range) { ar9003_hw_spur_ofdm_work(ah, chan, freq_offset, range, synth_freq); if (AR_SREV_9565(ah) && (i < 4)) { - freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1], - mode); + freq_offset = + ath9k_hw_fbin2freq(spur_fbin_ptr[i + 1], + IS_CHAN_2GHZ(chan)); freq_offset -= synth_freq; if (abs(freq_offset) < range) ar9003_hw_spur_ofdm_9565(ah, freq_offset); -- cgit v1.2.3 From 673424ce0e77450c24b300153387e271f470fc7c Mon Sep 17 00:00:00 2001 From: Wenli Looi Date: Sun, 20 Mar 2022 17:30:10 -0600 Subject: ath9k: add functions to get paprd rate mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This removes some code duplication with le32_to_cpu. This may also be required for QCN550x support, to provide an abstraction over the underlying EEPROM format. Signed-off-by: Wenli Looi Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220320233010.123106-7-wlooi@ucalgary.ca --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 33 ++++++++++++++------------ drivers/net/wireless/ath/ath9k/ar9003_eeprom.h | 2 ++ drivers/net/wireless/ath/ath9k/ar9003_paprd.c | 10 ++++---- 3 files changed, 25 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index a109a44a1999..abf12de0e381 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -5449,8 +5449,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_common *common = ath9k_hw_common(ah); - struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; - struct ar9300_modal_eep_header *modal_hdr; u8 targetPowerValT2[ar9300RateSize]; u8 target_power_val_t2_eep[ar9300RateSize]; u8 targetPowerValT2_tpc[ar9300RateSize]; @@ -5465,17 +5463,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2); if (ar9003_is_paprd_enabled(ah)) { - if (IS_CHAN_2GHZ(chan)) - modal_hdr = &eep->modalHeader2G; - else - modal_hdr = &eep->modalHeader5G; - ah->paprd_ratemask = - le32_to_cpu(modal_hdr->papdRateMaskHt20) & + ar9003_get_paprd_rate_mask_ht20(ah, IS_CHAN_2GHZ(chan)) & AR9300_PAPRD_RATE_MASK; ah->paprd_ratemask_ht40 = - le32_to_cpu(modal_hdr->papdRateMaskHt40) & + ar9003_get_paprd_rate_mask_ht40(ah, IS_CHAN_2GHZ(chan)) & AR9300_PAPRD_RATE_MASK; paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan); @@ -5592,23 +5585,33 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is2ghz) return ar9003_modal_header(ah, is2ghz)->spurChans; } +u32 ar9003_get_paprd_rate_mask_ht20(struct ath_hw *ah, bool is2ghz) +{ + return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->papdRateMaskHt20); +} + +u32 ar9003_get_paprd_rate_mask_ht40(struct ath_hw *ah, bool is2ghz) +{ + return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->papdRateMaskHt40); +} + unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, struct ath9k_channel *chan) { - struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; + bool is2ghz = IS_CHAN_2GHZ(chan); - if (IS_CHAN_2GHZ(chan)) - return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20), + if (is2ghz) + return MS(ar9003_get_paprd_rate_mask_ht20(ah, is2ghz), AR9300_PAPRD_SCALE_1); else { if (chan->channel >= 5700) - return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20), + return MS(ar9003_get_paprd_rate_mask_ht20(ah, is2ghz), AR9300_PAPRD_SCALE_1); else if (chan->channel >= 5400) - return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), + return MS(ar9003_get_paprd_rate_mask_ht40(ah, is2ghz), AR9300_PAPRD_SCALE_2); else - return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), + return MS(ar9003_get_paprd_rate_mask_ht40(ah, is2ghz), AR9300_PAPRD_SCALE_1); } } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index e8fda54acfe3..f8ae20318302 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -363,6 +363,8 @@ u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz); u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz); +u32 ar9003_get_paprd_rate_mask_ht20(struct ath_hw *ah, bool is2ghz); +u32 ar9003_get_paprd_rate_mask_ht40(struct ath_hw *ah, bool is2ghz); unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, struct ath9k_channel *chan); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index 34e100940284..b2d53b6c0ffd 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -21,7 +21,7 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val) { struct ath9k_channel *chan = ah->curchan; - struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; + bool is2ghz = IS_CHAN_2GHZ(chan); /* * 3 bits for modalHeader5G.papdRateMaskHt20 @@ -36,17 +36,17 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val) * -- disable PAPRD for lower band 5GHz */ - if (IS_CHAN_5GHZ(chan)) { + if (!is2ghz) { if (chan->channel >= UPPER_5G_SUB_BAND_START) { - if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20) + if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz) & BIT(30)) val = false; } else if (chan->channel >= MID_5G_SUB_BAND_START) { - if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20) + if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz) & BIT(29)) val = false; } else { - if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20) + if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz) & BIT(28)) val = false; } -- cgit v1.2.3 From b2beae327e039736299f3c6559f3467323fe68c5 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Wed, 23 Mar 2022 21:18:56 -0400 Subject: ath11k: store and send country code to firmware after recovery Currently ath11k does not send the country code to firmware after device recovery, as a result the regdomain info is reported from firmware by default. Regdomain info is important, so ath11k also need to restore it to the value which was used before recovery. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220324011856.11014-1-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 1 + drivers/net/wireless/ath/ath11k/core.h | 1 + drivers/net/wireless/ath/ath11k/mac.c | 8 ++++++++ drivers/net/wireless/ath/ath11k/reg.c | 1 + 4 files changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 2a867237e817..9395a5a271f1 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1404,6 +1404,7 @@ static void ath11k_update_11d(struct work_struct *work) pdev = &ab->pdevs[i]; ar = pdev->ar; + memcpy(&ar->alpha2, &set_current_param.alpha2, 2); ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); if (ret) ath11k_warn(ar->ab, diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 41001ec174e2..b4d58e01fffa 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -671,6 +671,7 @@ struct ath11k { int hw_rate_code; u8 twt_enabled; bool nlo_enabled; + u8 alpha2[REG_ALPHA2_LEN + 1]; }; struct ath11k_band_cap { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 7a7641e8e325..9caa8b87c313 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -7957,6 +7957,14 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, ar->state = ATH11K_STATE_ON; ieee80211_wake_queues(ar->hw); + if (ar->ab->hw_params.current_cc_support && + ar->alpha2[0] != 0 && ar->alpha2[1] != 0) { + struct wmi_set_current_country_params set_current_param = {}; + + memcpy(&set_current_param.alpha2, ar->alpha2, 2); + ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + } + if (ab->is_reset) { recovery_count = atomic_inc_return(&ab->recovery_count); ath11k_dbg(ab, ATH11K_DBG_BOOT, diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 81e11cde31d7..35eb1a0c04dc 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -83,6 +83,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) */ if (ar->ab->hw_params.current_cc_support) { memcpy(&set_current_param.alpha2, request->alpha2, 2); + memcpy(&ar->alpha2, &set_current_param.alpha2, 2); ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); if (ret) ath11k_warn(ar->ab, -- cgit v1.2.3 From 1216c4d30723f5ed5b7576a2b96dafc4c5a8cbf9 Mon Sep 17 00:00:00 2001 From: Edmond Gagnon Date: Fri, 25 Mar 2022 17:42:12 -0500 Subject: wcn36xx: Implement tx_rate reporting Currently, the driver reports a tx_rate of 6.0 MBit/s no matter the true rate: root@linaro-developer:~# iw wlan0 link Connected to 6c:f3:7f:eb:9b:92 (on wlan0) SSID: SQ-DEVICETEST freq: 5200 RX: 4141 bytes (32 packets) TX: 2082 bytes (15 packets) signal: -77 dBm rx bitrate: 135.0 MBit/s MCS 6 40MHz short GI tx bitrate: 6.0 MBit/s bss flags: short-slot-time dtim period: 1 beacon int: 100 This patch requests HAL_GLOBAL_CLASS_A_STATS_INFO via a hal_get_stats firmware message and reports it via ieee80211_ops::sta_statistics. root@linaro-developer:~# iw wlan0 link Connected to 6c:f3:7f:eb:73:b2 (on wlan0) SSID: SQ-DEVICETEST freq: 5700 RX: 26788094 bytes (19859 packets) TX: 1101376 bytes (12119 packets) signal: -75 dBm rx bitrate: 135.0 MBit/s MCS 6 40MHz short GI tx bitrate: 108.0 MBit/s VHT-MCS 5 40MHz VHT-NSS 1 bss flags: short-slot-time dtim period: 1 beacon int: 100 Tested on MSM8939 with WCN3680B running firmware CNSS-PR-2-0-1-2-c1-00083, and verified by sniffing frames over the air with Wireshark to ensure the MCS indices match. Signed-off-by: Edmond Gagnon Reviewed-by: Benjamin Li Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325224212.159690-1-egagnon@squareup.com --- drivers/net/wireless/ath/wcn36xx/hal.h | 7 +++- drivers/net/wireless/ath/wcn36xx/main.c | 16 +++++++++ drivers/net/wireless/ath/wcn36xx/smd.c | 57 +++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/wcn36xx/smd.h | 2 ++ drivers/net/wireless/ath/wcn36xx/txrx.c | 29 +++++++++++++++++ drivers/net/wireless/ath/wcn36xx/txrx.h | 1 + 6 files changed, 111 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 2a1db9756fd5..46a49f0a51b3 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -2626,7 +2626,12 @@ enum tx_rate_info { HAL_TX_RATE_SGI = 0x8, /* Rate with Long guard interval */ - HAL_TX_RATE_LGI = 0x10 + HAL_TX_RATE_LGI = 0x10, + + /* VHT rates */ + HAL_TX_RATE_VHT20 = 0x20, + HAL_TX_RATE_VHT40 = 0x40, + HAL_TX_RATE_VHT80 = 0x80, }; struct ani_global_class_a_stats_info { diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 95ea7d040d8c..864d1547e6f8 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1400,6 +1400,21 @@ static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx, return 0; } +static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct station_info *sinfo) +{ + struct wcn36xx *wcn; + u8 sta_index; + int status; + + wcn = hw->priv; + sta_index = get_sta_index(vif, wcn36xx_sta_to_priv(sta)); + status = wcn36xx_smd_get_stats(wcn, sta_index, HAL_GLOBAL_CLASS_A_STATS_INFO, sinfo); + + if (status) + wcn36xx_err("wcn36xx_smd_get_stats failed\n"); +} + static const struct ieee80211_ops wcn36xx_ops = { .start = wcn36xx_start, .stop = wcn36xx_stop, @@ -1423,6 +1438,7 @@ static const struct ieee80211_ops wcn36xx_ops = { .set_rts_threshold = wcn36xx_set_rts_threshold, .sta_add = wcn36xx_sta_add, .sta_remove = wcn36xx_sta_remove, + .sta_statistics = wcn36xx_sta_statistics, .ampdu_action = wcn36xx_ampdu_action, #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = wcn36xx_ipv6_addr_change, diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 59ad332156ae..872b37875c57 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -2627,6 +2627,62 @@ out: return ret; } +int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask, + struct station_info *sinfo) +{ + struct wcn36xx_hal_stats_req_msg msg_body; + struct wcn36xx_hal_stats_rsp_msg *rsp; + void *rsp_body; + int ret; + + if (stats_mask & ~HAL_GLOBAL_CLASS_A_STATS_INFO) { + wcn36xx_err("stats_mask 0x%x contains unimplemented types\n", + stats_mask); + return -EINVAL; + } + + mutex_lock(&wcn->hal_mutex); + INIT_HAL_MSG(msg_body, WCN36XX_HAL_GET_STATS_REQ); + + msg_body.sta_id = sta_index; + msg_body.stats_mask = stats_mask; + + PREPARE_HAL_BUF(wcn->hal_buf, msg_body); + + ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); + if (ret) { + wcn36xx_err("sending hal_get_stats failed\n"); + goto out; + } + + ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len); + if (ret) { + wcn36xx_err("hal_get_stats response failed err=%d\n", ret); + goto out; + } + + rsp = (struct wcn36xx_hal_stats_rsp_msg *)wcn->hal_buf; + rsp_body = (wcn->hal_buf + sizeof(struct wcn36xx_hal_stats_rsp_msg)); + + if (rsp->stats_mask != stats_mask) { + wcn36xx_err("stats_mask 0x%x differs from requested 0x%x\n", + rsp->stats_mask, stats_mask); + goto out; + } + + if (rsp->stats_mask & HAL_GLOBAL_CLASS_A_STATS_INFO) { + struct ani_global_class_a_stats_info *stats_info = rsp_body; + + wcn36xx_process_tx_rate(stats_info, &sinfo->txrate); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + rsp_body += sizeof(struct ani_global_class_a_stats_info); + } +out: + mutex_unlock(&wcn->hal_mutex); + + return ret; +} + static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info) { struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate; @@ -3316,6 +3372,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, case WCN36XX_HAL_ADD_BA_SESSION_RSP: case WCN36XX_HAL_ADD_BA_RSP: case WCN36XX_HAL_DEL_BA_RSP: + case WCN36XX_HAL_GET_STATS_RSP: case WCN36XX_HAL_TRIGGER_BA_RSP: case WCN36XX_HAL_UPDATE_CFG_RSP: case WCN36XX_HAL_JOIN_RSP: diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 957cfa87fbde..3fd598ac2a27 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -138,6 +138,8 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn, int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id); int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index); int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn); +int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask, + struct station_info *sinfo); int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value); diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index df749b114568..8da3955995b6 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -699,3 +699,32 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, return ret; } + +void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info) +{ + /* tx_rate is in units of 500kbps; mac80211 wants them in 100kbps */ + if (stats->tx_rate_flags & HAL_TX_RATE_LEGACY) + info->legacy = stats->tx_rate * 5; + + info->flags = 0; + info->mcs = stats->mcs_index; + info->nss = 1; + + if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_HT40)) + info->flags |= RATE_INFO_FLAGS_MCS; + + if (stats->tx_rate_flags & (HAL_TX_RATE_VHT20 | HAL_TX_RATE_VHT40 | HAL_TX_RATE_VHT80)) + info->flags |= RATE_INFO_FLAGS_VHT_MCS; + + if (stats->tx_rate_flags & HAL_TX_RATE_SGI) + info->flags |= RATE_INFO_FLAGS_SHORT_GI; + + if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_VHT20)) + info->bw = RATE_INFO_BW_20; + + if (stats->tx_rate_flags & (HAL_TX_RATE_HT40 | HAL_TX_RATE_VHT40)) + info->bw = RATE_INFO_BW_40; + + if (stats->tx_rate_flags & HAL_TX_RATE_VHT80) + info->bw = RATE_INFO_BW_80; +} diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h index b54311ffde9c..fb0d6cabd52b 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.h +++ b/drivers/net/wireless/ath/wcn36xx/txrx.h @@ -164,5 +164,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb); int wcn36xx_start_tx(struct wcn36xx *wcn, struct wcn36xx_sta *sta_priv, struct sk_buff *skb); +void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info); #endif /* _TXRX_H_ */ -- cgit v1.2.3 From 801cb1d234288d9bc2afeaa98c7e98f8038b7a6c Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Mon, 28 Mar 2022 14:57:19 +0300 Subject: ath11k: add support to search regdb data in board-2.bin for WCN6855 Currently ath11k only download the same regdb.bin file for all WCN6855 chips, actually ath11k needs to distinguish all different WCN6855 chips. This is to re-use the string type which include bus, chip id, board id, vendor, device, subsystem-vendor, subsystem-device and variant for WCN6855 to distinguish different regdb in board-2.bin. ath11k will first load board-2.bin and search in it for the regdb data with the above parameters, if matched one regdb data, then download it to firmware, if not matched any one, then ath11k will download the file regdb.bin to firmware. Add enum value ATH11K_BD_IE_REGDB and enum type ath11k_bd_ie_regdb_type to distinguish regdb data and board data since they are in the same file board-2.bin. This only take effect for WCN6855 which supports regdb in hardware parameters. Test log: [ 3833.091948] ath11k_pci 0000:05:00.0: boot using board name 'bus=pci,vendor=17cb,device=1103,subsystem-vendor=17cb,subsystem-device=3374,qmi-chip-id=2,qmi-board-id=262' [ 3833.092072] ath11k_pci 0000:05:00.0: boot firmware request ath11k/WCN6855/hw2.0/board-2.bin size 205316 [ 3833.092079] ath11k_pci 0000:05:00.0: board name [ 3833.092083] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 71 6d 69 2d 63 68 69 70 bus=pci,qmi-chip [ 3833.092088] ath11k_pci 0000:05:00.0: 00000010: 2d 69 64 3d 31 -id=1 [ 3833.092091] ath11k_pci 0000:05:00.0: board name [ 3833.092095] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 71 6d 69 2d 63 68 69 70 bus=pci,qmi-chip [ 3833.092099] ath11k_pci 0000:05:00.0: 00000010: 2d 69 64 3d 32 -id=2 [ 3833.092102] ath11k_pci 0000:05:00.0: board name [ 3833.092105] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 71 6d 69 2d 63 68 69 70 bus=pci,qmi-chip [ 3833.092109] ath11k_pci 0000:05:00.0: 00000010: 2d 69 64 3d 33 -id=3 [ 3833.092112] ath11k_pci 0000:05:00.0: board name [ 3833.092116] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 76 65 6e 64 6f 72 3d 31 bus=pci,vendor=1 [ 3833.092119] ath11k_pci 0000:05:00.0: 00000010: 37 63 62 2c 64 65 76 69 63 65 3d 31 31 30 33 2c 7cb,device=1103, [ 3833.092123] ath11k_pci 0000:05:00.0: 00000020: 73 75 62 73 79 73 74 65 6d 2d 76 65 6e 64 6f 72 subsystem-vendor [ 3833.092126] ath11k_pci 0000:05:00.0: 00000030: 3d 31 37 63 62 2c 73 75 62 73 79 73 74 65 6d 2d =17cb,subsystem- [ 3833.092130] ath11k_pci 0000:05:00.0: 00000040: 64 65 76 69 63 65 3d 33 33 37 34 2c 71 6d 69 2d device=3374,qmi- [ 3833.092133] ath11k_pci 0000:05:00.0: 00000050: 63 68 69 70 2d 69 64 3d 32 2c 71 6d 69 2d 62 6f chip-id=2,qmi-bo [ 3833.092137] ath11k_pci 0000:05:00.0: 00000060: 61 72 64 2d 69 64 3d 32 36 36 2c 76 61 72 69 61 ard-id=266,varia [ 3833.092140] ath11k_pci 0000:05:00.0: 00000070: 6e 74 3d 48 50 5f 47 38 5f 4c 61 6e 63 69 61 31 nt=HP_G8_Lancia1 [ 3833.092144] ath11k_pci 0000:05:00.0: 00000080: 35 5 [ 3833.092147] ath11k_pci 0000:05:00.0: board name [ 3833.092150] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 76 65 6e 64 6f 72 3d 31 bus=pci,vendor=1 [ 3833.092154] ath11k_pci 0000:05:00.0: 00000010: 37 63 62 2c 64 65 76 69 63 65 3d 31 31 30 33 2c 7cb,device=1103, [ 3833.092157] ath11k_pci 0000:05:00.0: 00000020: 73 75 62 73 79 73 74 65 6d 2d 76 65 6e 64 6f 72 subsystem-vendor [ 3833.092161] ath11k_pci 0000:05:00.0: 00000030: 3d 31 37 63 62 2c 73 75 62 73 79 73 74 65 6d 2d =17cb,subsystem- [ 3833.092165] ath11k_pci 0000:05:00.0: 00000040: 64 65 76 69 63 65 3d 33 33 37 34 2c 71 6d 69 2d device=3374,qmi- [ 3833.092168] ath11k_pci 0000:05:00.0: 00000050: 63 68 69 70 2d 69 64 3d 32 2c 71 6d 69 2d 62 6f chip-id=2,qmi-bo [ 3833.092172] ath11k_pci 0000:05:00.0: 00000060: 61 72 64 2d 69 64 3d 32 36 36 ard-id=266 [ 3833.092206] ath11k_pci 0000:05:00.0: board name [ 3833.092209] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 76 65 6e 64 6f 72 3d 31 bus=pci,vendor=1 [ 3833.092213] ath11k_pci 0000:05:00.0: 00000010: 37 63 62 2c 64 65 76 69 63 65 3d 31 31 30 33 2c 7cb,device=1103, [ 3833.092216] ath11k_pci 0000:05:00.0: 00000020: 73 75 62 73 79 73 74 65 6d 2d 76 65 6e 64 6f 72 subsystem-vendor [ 3833.092220] ath11k_pci 0000:05:00.0: 00000030: 3d 31 37 63 62 2c 73 75 62 73 79 73 74 65 6d 2d =17cb,subsystem- [ 3833.092223] ath11k_pci 0000:05:00.0: 00000040: 64 65 76 69 63 65 3d 33 33 37 34 2c 71 6d 69 2d device=3374,qmi- [ 3833.092227] ath11k_pci 0000:05:00.0: 00000050: 63 68 69 70 2d 69 64 3d 32 2c 71 6d 69 2d 62 6f chip-id=2,qmi-bo [ 3833.092230] ath11k_pci 0000:05:00.0: 00000060: 61 72 64 2d 69 64 3d 32 36 32 ard-id=262 [ 3833.092234] ath11k_pci 0000:05:00.0: boot found match regdb data for name 'bus=pci,vendor=17cb,device=1103,subsystem-vendor=17cb,subsystem-device=3374,qmi-chip-id=2,qmi-board-id=262' [ 3833.092238] ath11k_pci 0000:05:00.0: board name [ 3833.092241] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 71 6d 69 2d 63 68 69 70 bus=pci,qmi-chip [ 3833.092245] ath11k_pci 0000:05:00.0: 00000010: 2d 69 64 3d 31 31 -id=11 [ 3833.092248] ath11k_pci 0000:05:00.0: board name [ 3833.092251] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 71 6d 69 2d 63 68 69 70 bus=pci,qmi-chip [ 3833.092255] ath11k_pci 0000:05:00.0: 00000010: 2d 69 64 3d 32 32 -id=22 [ 3833.092258] ath11k_pci 0000:05:00.0: board name [ 3833.092261] ath11k_pci 0000:05:00.0: 00000000: 62 75 73 3d 70 63 69 2c 71 6d 69 2d 63 68 69 70 bus=pci,qmi-chip [ 3833.092265] ath11k_pci 0000:05:00.0: 00000010: 2d 69 64 3d 33 33 -id=33 [ 3833.092268] ath11k_pci 0000:05:00.0: boot found regdb data for 'bus=pci,vendor=17cb,device=1103,subsystem-vendor=17cb,subsystem-device=3374,qmi-chip-id=2,qmi-board-id=262' [ 3833.092272] ath11k_pci 0000:05:00.0: fetched regdb Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220319023543.14288-4-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 84 ++++++++++++++++++++++++---------- drivers/net/wireless/ath/ath11k/hw.h | 19 ++++++++ 2 files changed, 78 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 9395a5a271f1..d1d8a2f99196 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -723,7 +723,9 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, struct ath11k_board_data *bd, const void *buf, size_t buf_len, const char *boardname, - int bd_ie_type) + int ie_id, + int name_id, + int data_id) { const struct ath11k_fw_ie *hdr; bool name_match_found; @@ -733,7 +735,7 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, name_match_found = false; - /* go through ATH11K_BD_IE_BOARD_ elements */ + /* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */ while (buf_len > sizeof(struct ath11k_fw_ie)) { hdr = buf; board_ie_id = le32_to_cpu(hdr->id); @@ -744,48 +746,50 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, buf += sizeof(*hdr); if (buf_len < ALIGN(board_ie_len, 4)) { - ath11k_err(ab, "invalid ATH11K_BD_IE_BOARD length: %zu < %zu\n", + ath11k_err(ab, "invalid %s length: %zu < %zu\n", + ath11k_bd_ie_type_str(ie_id), buf_len, ALIGN(board_ie_len, 4)); ret = -EINVAL; goto out; } - switch (board_ie_id) { - case ATH11K_BD_IE_BOARD_NAME: + if (board_ie_id == name_id) { ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "", board_ie_data, board_ie_len); if (board_ie_len != strlen(boardname)) - break; + goto next; ret = memcmp(board_ie_data, boardname, strlen(boardname)); if (ret) - break; + goto next; name_match_found = true; ath11k_dbg(ab, ATH11K_DBG_BOOT, - "boot found match for name '%s'", + "boot found match %s for name '%s'", + ath11k_bd_ie_type_str(ie_id), boardname); - break; - case ATH11K_BD_IE_BOARD_DATA: + } else if (board_ie_id == data_id) { if (!name_match_found) /* no match found */ - break; + goto next; ath11k_dbg(ab, ATH11K_DBG_BOOT, - "boot found board data for '%s'", boardname); + "boot found %s for '%s'", + ath11k_bd_ie_type_str(ie_id), + boardname); bd->data = board_ie_data; bd->len = board_ie_len; ret = 0; goto out; - default: - ath11k_warn(ab, "unknown ATH11K_BD_IE_BOARD found: %d\n", + } else { + ath11k_warn(ab, "unknown %s id found: %d\n", + ath11k_bd_ie_type_str(ie_id), board_ie_id); - break; } - +next: /* jump over the padding */ board_ie_len = ALIGN(board_ie_len, 4); @@ -802,7 +806,10 @@ out: static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, struct ath11k_board_data *bd, - const char *boardname) + const char *boardname, + int ie_id_match, + int name_id, + int data_id) { size_t len, magic_len; const u8 *data; @@ -867,22 +874,23 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, goto err; } - switch (ie_id) { - case ATH11K_BD_IE_BOARD: + if (ie_id == ie_id_match) { ret = ath11k_core_parse_bd_ie_board(ab, bd, data, ie_len, boardname, - ATH11K_BD_IE_BOARD); + ie_id_match, + name_id, + data_id); if (ret == -ENOENT) /* no match found, continue */ - break; + goto next; else if (ret) /* there was an error, bail out */ goto err; /* either found or error, so stop searching */ goto out; } - +next: /* jump over the padding */ ie_len = ALIGN(ie_len, 4); @@ -893,7 +901,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, out: if (!bd->data || !bd->len) { ath11k_dbg(ab, ATH11K_DBG_BOOT, - "failed to fetch board data for %s from %s\n", + "failed to fetch %s for %s from %s\n", + ath11k_bd_ie_type_str(ie_id_match), boardname, filepath); ret = -ENODATA; goto err; @@ -937,7 +946,10 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) } ab->bd_api = 2; - ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname); + ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname, + ATH11K_BD_IE_BOARD, + ATH11K_BD_IE_BOARD_NAME, + ATH11K_BD_IE_BOARD_DATA); if (!ret) goto success; @@ -948,7 +960,10 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) return ret; } - ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname); + ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname, + ATH11K_BD_IE_BOARD, + ATH11K_BD_IE_BOARD_NAME, + ATH11K_BD_IE_BOARD_DATA); if (!ret) goto success; @@ -975,13 +990,32 @@ success: int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd) { + char boardname[BOARD_NAME_SIZE]; int ret; + ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); + if (ret) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "failed to create board name for regdb: %d", ret); + goto exit; + } + + ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname, + ATH11K_BD_IE_REGDB, + ATH11K_BD_IE_REGDB_NAME, + ATH11K_BD_IE_REGDB_DATA); + if (!ret) + goto exit; + ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME); if (ret) ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n", ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir); +exit: + if (!ret) + ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n"); + return ret; } diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 5955c6f5da24..44025bc5d082 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -291,9 +291,16 @@ enum ath11k_bd_ie_board_type { ATH11K_BD_IE_BOARD_DATA = 1, }; +enum ath11k_bd_ie_regdb_type { + ATH11K_BD_IE_REGDB_NAME = 0, + ATH11K_BD_IE_REGDB_DATA = 1, +}; + enum ath11k_bd_ie_type { /* contains sub IEs of enum ath11k_bd_ie_board_type */ ATH11K_BD_IE_BOARD = 0, + /* contains sub IEs of enum ath11k_bd_ie_regdb_type */ + ATH11K_BD_IE_REGDB = 1, }; struct ath11k_hw_regs { @@ -361,4 +368,16 @@ extern const struct ath11k_hw_regs qca6390_regs; extern const struct ath11k_hw_regs qcn9074_regs; extern const struct ath11k_hw_regs wcn6855_regs; +static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type) +{ + switch (type) { + case ATH11K_BD_IE_BOARD: + return "board data"; + case ATH11K_BD_IE_REGDB: + return "regdb data"; + } + + return "unknown"; +} + #endif -- cgit v1.2.3 From 1f682dc9fb3790aa7ec27d3d122ff32b1eda1365 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Sun, 27 Mar 2022 23:58:32 -0400 Subject: ath11k: reduce the wait time of 11d scan and hw scan while add interface Currently ath11k will wait 11d scan complete while add interface in ath11k_mac_op_add_interface(), when system resume without enable wowlan, ath11k_mac_op_add_interface() is called for each resume, thus it increase the resume time of system. And ath11k_mac_op_hw_scan() after ath11k_mac_op_add_interface() also needs some time cost because the previous 11d scan need more than 5 seconds when 6 GHz is enabled, then the scan started event will indicated to ath11k after the 11d scan completed. While 11d scan/hw scan is running in firmware, if ath11k update channel list to firmware by WMI_SCAN_CHAN_LIST_CMDID, then firmware will cancel the current scan which is running, it lead the scan failed. The patch commit 9dcf6808b253 ("ath11k: add 11d scan offload support") used finish_11d_scan/finish_11d_ch_list/pending_11d to synchronize the 11d scan/hw scan/channel list between ath11k/firmware/mac80211 and to avoid the scan fail. Add wait operation before ath11k update channel list, function ath11k_reg_update_chan_list() will wait until the current 11d scan/hw scan completed. And remove the wait operation of start 11d scan and waiting channel list complete in hw scan. After these changes, resume time cost reduce about 5 seconds and also hw scan time cost reduced obviously, and scan failed not seen. The 11d scan is sent to firmware only one time for each interface added in mac.c, and it is moved after the 1st hw scan because 11d scan will cost some time and thus leads the AP scan result update to UI delay. Currently priority of ath11k's hw scan is WMI_SCAN_PRIORITY_LOW, and priority of 11d scan in firmware is WMI_SCAN_PRIORITY_MEDIUM, then the 11d scan which sent after hw scan will cancel the hw scan in firmware, so change the priority to WMI_SCAN_PRIORITY_MEDIUM for the hw scan which is in front of the 11d scan, thus it will not happen scan cancel in firmware. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Fixes: 9dcf6808b253 ("ath11k: add 11d scan offload support") Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328035832.14122-1-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 1 + drivers/net/wireless/ath/ath11k/core.h | 13 +++++-- drivers/net/wireless/ath/ath11k/mac.c | 71 ++++++++++++++-------------------- drivers/net/wireless/ath/ath11k/mac.h | 2 +- drivers/net/wireless/ath/ath11k/reg.c | 43 +++++++++++++------- drivers/net/wireless/ath/ath11k/reg.h | 2 +- drivers/net/wireless/ath/ath11k/wmi.c | 16 +++++++- 7 files changed, 84 insertions(+), 64 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index d1d8a2f99196..7f4462cf5787 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1465,6 +1465,7 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab) ieee80211_stop_queues(ar->hw); ath11k_mac_drain_tx(ar); + complete(&ar->completed_11d_scan); complete(&ar->scan.started); complete(&ar->scan.completed); complete(&ar->peer_assoc_done); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index b4d58e01fffa..17a61d0d684e 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -52,6 +52,8 @@ extern unsigned int ath11k_frame_mode; +#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ) + #define ATH11K_MON_TIMER_INTERVAL 10 #define ATH11K_RESET_TIMEOUT_HZ (20 * HZ) #define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3 @@ -216,6 +218,12 @@ enum ath11k_scan_state { ATH11K_SCAN_ABORTING, }; +enum ath11k_11d_state { + ATH11K_11D_IDLE, + ATH11K_11D_PREPARING, + ATH11K_11D_RUNNING, +}; + enum ath11k_dev_flags { ATH11K_CAC_RUNNING, ATH11K_FLAG_CORE_REGISTERED, @@ -664,9 +672,8 @@ struct ath11k { bool dfs_block_radar_events; struct ath11k_thermal thermal; u32 vdev_id_11d_scan; - struct completion finish_11d_scan; - struct completion finish_11d_ch_list; - bool pending_11d; + struct completion completed_11d_scan; + enum ath11k_11d_state state_11d; bool regdom_set_by_user; int hw_rate_code; u8 twt_enabled; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 9caa8b87c313..175a4ae752f3 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -3621,26 +3621,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, if (ret) goto exit; - /* Currently the pending_11d=true only happened 1 time while - * wlan interface up in ath11k_mac_11d_scan_start(), it is called by - * ath11k_mac_op_add_interface(), after wlan interface up, - * pending_11d=false always. - * If remove below wait, it always happened scan fail and lead connect - * fail while wlan interface up, because it has a 11d scan which is running - * in firmware, and lead this scan failed. - */ - if (ar->pending_11d) { - long time_left; - unsigned long timeout = 5 * HZ; - - if (ar->supports_6ghz) - timeout += 5 * HZ; - - time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout); - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, - "mac wait 11d channel list time left %ld\n", time_left); - } - memset(&arg, 0, sizeof(arg)); ath11k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; @@ -3706,6 +3686,10 @@ exit: kfree(arg.extraie.ptr); mutex_unlock(&ar->conf_mutex); + + if (ar->state_11d == ATH11K_11D_PREPARING) + ath11k_mac_11d_scan_start(ar, arvif->vdev_id); + return ret; } @@ -5859,7 +5843,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw) /* TODO: Do we need to enable ANI? */ - ath11k_reg_update_chan_list(ar); + ath11k_reg_update_chan_list(ar, false); ar->num_started_vdevs = 0; ar->num_created_vdevs = 0; @@ -5926,6 +5910,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw) cancel_work_sync(&ar->ab->update_11d_work); cancel_work_sync(&ar->ab->rfkill_work); + if (ar->state_11d == ATH11K_11D_PREPARING) { + ar->state_11d = ATH11K_11D_IDLE; + complete(&ar->completed_11d_scan); + } + spin_lock_bh(&ar->data_lock); list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { list_del(&ppdu_stats->list); @@ -6096,7 +6085,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab) return false; } -void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait) +void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id) { struct wmi_11d_scan_start_params param; int ret; @@ -6124,28 +6113,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait) ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n"); - if (wait) - reinit_completion(&ar->finish_11d_scan); - ret = ath11k_wmi_send_11d_scan_start_cmd(ar, ¶m); if (ret) { ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n", vdev_id, ret); } else { ar->vdev_id_11d_scan = vdev_id; - if (wait) { - ar->pending_11d = true; - ret = wait_for_completion_timeout(&ar->finish_11d_scan, - 5 * HZ); - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, - "mac 11d scan left time %d\n", ret); - - if (!ret) - ar->pending_11d = false; - } + if (ar->state_11d == ATH11K_11D_PREPARING) + ar->state_11d = ATH11K_11D_RUNNING; } fin: + if (ar->state_11d == ATH11K_11D_PREPARING) { + ar->state_11d = ATH11K_11D_IDLE; + complete(&ar->completed_11d_scan); + } + mutex_unlock(&ar->ab->vdev_id_11d_lock); } @@ -6168,12 +6151,15 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar) vdev_id = ar->vdev_id_11d_scan; ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id); - if (ret) + if (ret) { ath11k_warn(ar->ab, "failed to stopt 11d scan vdev %d ret: %d\n", vdev_id, ret); - else + } else { ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; + ar->state_11d = ATH11K_11D_IDLE; + complete(&ar->completed_11d_scan); + } } mutex_unlock(&ar->ab->vdev_id_11d_lock); } @@ -6369,8 +6355,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, goto err_peer_del; } - ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true); - + if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) { + reinit_completion(&ar->completed_11d_scan); + ar->state_11d = ATH11K_11D_PREPARING; + } break; case WMI_VDEV_TYPE_MONITOR: set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); @@ -7230,7 +7218,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, } if (arvif->vdev_type == WMI_VDEV_TYPE_STA) - ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false); + ath11k_mac_11d_scan_start(ar, arvif->vdev_id); mutex_unlock(&ar->conf_mutex); } @@ -8920,8 +8908,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab) ar->monitor_vdev_id = -1; clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; - init_completion(&ar->finish_11d_scan); - init_completion(&ar->finish_11d_ch_list); + init_completion(&ar->completed_11d_scan); } return 0; diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 045bf4fe1706..7f93e3a9ca23 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -130,7 +130,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default; #define ATH11K_SCAN_11D_INTERVAL 600000 #define ATH11K_11D_INVALID_VDEV_ID 0xFFFF -void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait); +void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id); void ath11k_mac_11d_scan_stop(struct ath11k *ar); void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 35eb1a0c04dc..79ac2142317a 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -103,7 +103,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) ar->regdom_set_by_user = true; } -int ath11k_reg_update_chan_list(struct ath11k *ar) +int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait) { struct ieee80211_supported_band **bands; struct scan_chan_list_params *params; @@ -112,7 +112,32 @@ int ath11k_reg_update_chan_list(struct ath11k *ar) struct channel_param *ch; enum nl80211_band band; int num_channels = 0; - int i, ret; + int i, ret, left; + + if (wait && ar->state_11d != ATH11K_11D_IDLE) { + left = wait_for_completion_timeout(&ar->completed_11d_scan, + ATH11K_SCAN_TIMEOUT_HZ); + if (!left) { + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "failed to receive 11d scan complete: timed out\n"); + ar->state_11d = ATH11K_11D_IDLE; + } + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "reg 11d scan wait left time %d\n", left); + } + + if (wait && + (ar->scan.state == ATH11K_SCAN_STARTING || + ar->scan.state == ATH11K_SCAN_RUNNING)) { + left = wait_for_completion_timeout(&ar->scan.completed, + ATH11K_SCAN_TIMEOUT_HZ); + if (!left) + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "failed to receive hw scan complete: timed out\n"); + + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "reg hw scan wait left time %d\n", left); + } bands = hw->wiphy->bands; for (band = 0; band < NUM_NL80211_BANDS; band++) { @@ -194,11 +219,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar) ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params); kfree(params); - if (ar->pending_11d) { - complete(&ar->finish_11d_ch_list); - ar->pending_11d = false; - } - return ret; } @@ -264,15 +284,8 @@ int ath11k_regd_update(struct ath11k *ar) goto err; } - if (ar->pending_11d) - complete(&ar->finish_11d_scan); - rtnl_lock(); wiphy_lock(ar->hw->wiphy); - - if (ar->pending_11d) - reinit_completion(&ar->finish_11d_ch_list); - ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy); wiphy_unlock(ar->hw->wiphy); rtnl_unlock(); @@ -283,7 +296,7 @@ int ath11k_regd_update(struct ath11k *ar) goto err; if (ar->state == ATH11K_STATE_ON) { - ret = ath11k_reg_update_chan_list(ar); + ret = ath11k_reg_update_chan_list(ar, true); if (ret) goto err; } diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h index 5fb9dc03a74e..2f284f26378d 100644 --- a/drivers/net/wireless/ath/ath11k/reg.h +++ b/drivers/net/wireless/ath/ath11k/reg.h @@ -32,5 +32,5 @@ struct ieee80211_regdomain * ath11k_reg_build_regd(struct ath11k_base *ab, struct cur_regulatory_info *reg_info, bool intersect); int ath11k_regd_update(struct ath11k *ar); -int ath11k_reg_update_chan_list(struct ath11k *ar); +int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait); #endif diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 7f97c26bffd9..3af24b18204e 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -2015,7 +2015,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar, { /* setup commonly used values */ arg->scan_req_id = 1; - arg->scan_priority = WMI_SCAN_PRIORITY_LOW; + if (ar->state_11d == ATH11K_11D_PREPARING) + arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; + else + arg->scan_priority = WMI_SCAN_PRIORITY_LOW; arg->dwell_time_active = 50; arg->dwell_time_active_2g = 0; arg->dwell_time_passive = 150; @@ -6350,8 +6353,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb) { const struct wmi_11d_new_cc_ev *ev; + struct ath11k *ar; + struct ath11k_pdev *pdev; const void **tb; - int ret; + int ret, i; tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); if (IS_ERR(tb)) { @@ -6377,6 +6382,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s kfree(tb); + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + ar->state_11d = ATH11K_11D_IDLE; + complete(&ar->completed_11d_scan); + } + queue_work(ab->workqueue, &ab->update_11d_work); return 0; -- cgit v1.2.3 From 2c977be2cc5cd4743d2f223360e685d91f0cdffc Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Wed, 30 Mar 2022 11:11:49 +0300 Subject: ath10k: add support for MSDU IDs for USB devices commit 93bbdec6683e1c8ba2cc4e6 ("ath10k: htt: support MSDU ids with SDIO") introduced MSDU ID allocation in the htt TX path for high latency devices. This feature needs to be enabled for USB as well in order to have a functional TX path. Tested-on: QCA9377 hw1.0 USB 1.0.0.299 Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220327171340.7893-1-erik.stromdahl@gmail.com --- drivers/net/wireless/ath/ath10k/usb.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index 3d98f19c6ec8..7ed2022ac3c2 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -1013,6 +1013,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, bus_params.dev_type = ATH10K_DEV_TYPE_HL; /* TODO: don't know yet how to get chip_id with USB */ bus_params.chip_id = 0; + bus_params.hl_msdu_ids = true; ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_warn(ar, "failed to register driver core: %d\n", ret); -- cgit v1.2.3 From d930e2560ebee73411cfa5d5b0de4e82254c33e9 Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Wed, 30 Mar 2022 11:11:49 +0300 Subject: ath10k: enable napi on RX path for usb commit cfee8793a74dc3afabb08fc9 ("ath10k: enable napi on RX path for sdio") introduced napi for SDIO and updated the htt interface for high latency devices. These changes breaks USB, so USB code must be updated to use napi as well in order to have a working RX path. Tested-on: QCA9377 hw1.0 USB 1.0.0.299 Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220327171340.7893-2-erik.stromdahl@gmail.com --- drivers/net/wireless/ath/ath10k/usb.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index 7ed2022ac3c2..10df6ca303a1 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -345,6 +345,12 @@ static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb) ep->ep_ops.ep_rx_complete(ar, skb); /* The RX complete handler now owns the skb... */ + if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) { + local_bh_disable(); + napi_schedule(&ar->napi); + local_bh_enable(); + } + return; out_free_skb: @@ -387,6 +393,7 @@ static int ath10k_usb_hif_start(struct ath10k *ar) int i; struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + ath10k_core_napi_enable(ar); ath10k_usb_start_recv_pipes(ar); /* set the TX resource avail threshold for each TX pipe */ @@ -462,6 +469,7 @@ err: static void ath10k_usb_hif_stop(struct ath10k *ar) { ath10k_usb_flush_all(ar); + ath10k_core_napi_sync_disable(ar); } static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) @@ -966,6 +974,20 @@ err: return ret; } +static int ath10k_usb_napi_poll(struct napi_struct *ctx, int budget) +{ + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done; + + done = ath10k_htt_rx_hl_indication(ar, budget); + ath10k_dbg(ar, ATH10K_DBG_USB, "napi poll: done: %d, budget:%d\n", done, budget); + + if (done < budget) + napi_complete_done(ctx, done); + + return done; +} + /* ath10k usb driver registered functions */ static int ath10k_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) @@ -992,6 +1014,9 @@ static int ath10k_usb_probe(struct usb_interface *interface, return -ENOMEM; } + netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll, + ATH10K_NAPI_BUDGET); + usb_get_dev(dev); vendor_id = le16_to_cpu(dev->descriptor.idVendor); product_id = le16_to_cpu(dev->descriptor.idProduct); @@ -1045,6 +1070,7 @@ static void ath10k_usb_remove(struct usb_interface *interface) return; ath10k_core_unregister(ar_usb->ar); + netif_napi_del(&ar_usb->ar->napi); ath10k_usb_destroy(ar_usb->ar); usb_put_dev(interface_to_usbdev(interface)); ath10k_core_destroy(ar_usb->ar); -- cgit v1.2.3 From 7c45823064122e8bdc97972e9861ea03b086d0ab Mon Sep 17 00:00:00 2001 From: Wenli Looi Date: Fri, 25 Mar 2022 23:47:55 -0600 Subject: ath9k: make is2ghz consistent in ar9003_eeprom MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace a "mode" variable indicating "is 5ghz" with an "is2ghz" variable to make it consistent with other functions in the file. Signed-off-by: Wenli Looi Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220326054754.27812-1-wlooi@ucalgary.ca --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 46 ++++++++++++-------------- 1 file changed, 21 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index abf12de0e381..16bfcd0a1f6e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4747,7 +4747,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah, } static int ar9003_hw_cal_pier_get(struct ath_hw *ah, - int mode, + bool is2ghz, int ipier, int ichain, int *pfrequency, @@ -4757,7 +4757,6 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, { u8 *pCalPier; struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct; - int is2GHz; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ath_common *common = ath9k_hw_common(ah); @@ -4768,17 +4767,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, return -1; } - if (mode) { /* 5GHz */ - if (ipier >= AR9300_NUM_5G_CAL_PIERS) { - ath_dbg(common, EEPROM, - "Invalid 5GHz cal pier index, must be less than %d\n", - AR9300_NUM_5G_CAL_PIERS); - return -1; - } - pCalPier = &(eep->calFreqPier5G[ipier]); - pCalPierStruct = &(eep->calPierData5G[ichain][ipier]); - is2GHz = 0; - } else { + if (is2ghz) { if (ipier >= AR9300_NUM_2G_CAL_PIERS) { ath_dbg(common, EEPROM, "Invalid 2GHz cal pier index, must be less than %d\n", @@ -4788,10 +4777,18 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, pCalPier = &(eep->calFreqPier2G[ipier]); pCalPierStruct = &(eep->calPierData2G[ichain][ipier]); - is2GHz = 1; + } else { + if (ipier >= AR9300_NUM_5G_CAL_PIERS) { + ath_dbg(common, EEPROM, + "Invalid 5GHz cal pier index, must be less than %d\n", + AR9300_NUM_5G_CAL_PIERS); + return -1; + } + pCalPier = &(eep->calFreqPier5G[ipier]); + pCalPierStruct = &(eep->calPierData5G[ichain][ipier]); } - *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz); + *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2ghz); *pcorrection = pCalPierStruct->refPower; *ptemperature = pCalPierStruct->tempMeas; *pvoltage = pCalPierStruct->voltMeas; @@ -4960,7 +4957,6 @@ tempslope: static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) { int ichain, ipier, npier; - int mode; int lfrequency[AR9300_MAX_CHAINS], lcorrection[AR9300_MAX_CHAINS], ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS], @@ -4976,12 +4972,12 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) int pfrequency, pcorrection, ptemperature, pvoltage, pnf_cal, pnf_pwr; struct ath_common *common = ath9k_hw_common(ah); + bool is2ghz = frequency < 4000; - mode = (frequency >= 4000); - if (mode) - npier = AR9300_NUM_5G_CAL_PIERS; - else + if (is2ghz) npier = AR9300_NUM_2G_CAL_PIERS; + else + npier = AR9300_NUM_5G_CAL_PIERS; for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { lfrequency[ichain] = 0; @@ -4990,7 +4986,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) /* identify best lower and higher frequency calibration measurement */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { for (ipier = 0; ipier < npier; ipier++) { - if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain, + if (!ar9003_hw_cal_pier_get(ah, is2ghz, ipier, ichain, &pfrequency, &pcorrection, &ptemperature, &pvoltage, &pnf_cal, &pnf_pwr)) { @@ -5127,12 +5123,12 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) /* Store calibrated noise floor values */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) - if (mode) { - ah->nf_5g.cal[ichain] = nf_cal[ichain]; - ah->nf_5g.pwr[ichain] = nf_pwr[ichain]; - } else { + if (is2ghz) { ah->nf_2g.cal[ichain] = nf_cal[ichain]; ah->nf_2g.pwr[ichain] = nf_pwr[ichain]; + } else { + ah->nf_5g.cal[ichain] = nf_cal[ichain]; + ah->nf_5g.pwr[ichain] = nf_pwr[ichain]; } return 0; -- cgit v1.2.3 From 9149a94adad204a1301b55bd49ea1c12c179d144 Mon Sep 17 00:00:00 2001 From: Benjamin Stürz Date: Mon, 28 Mar 2022 23:29:12 +0200 Subject: wcn36xx: Improve readability of wcn36xx_caps_name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use macros to force strict ordering of the elements. Signed-off-by: Benjamin Stürz Reviewed-by: Jeff Johnson Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328212912.283393-1-benni@stuerz.xyz --- drivers/net/wireless/ath/wcn36xx/main.c | 126 ++++++++++++++++---------------- 1 file changed, 65 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 864d1547e6f8..61c47f958e4a 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -192,70 +192,74 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif, sta_priv->sta_index; } +#define DEFINE(s) [s] = #s + static const char * const wcn36xx_caps_names[] = { - "MCC", /* 0 */ - "P2P", /* 1 */ - "DOT11AC", /* 2 */ - "SLM_SESSIONIZATION", /* 3 */ - "DOT11AC_OPMODE", /* 4 */ - "SAP32STA", /* 5 */ - "TDLS", /* 6 */ - "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */ - "WLANACTIVE_OFFLOAD", /* 8 */ - "BEACON_OFFLOAD", /* 9 */ - "SCAN_OFFLOAD", /* 10 */ - "ROAM_OFFLOAD", /* 11 */ - "BCN_MISS_OFFLOAD", /* 12 */ - "STA_POWERSAVE", /* 13 */ - "STA_ADVANCED_PWRSAVE", /* 14 */ - "AP_UAPSD", /* 15 */ - "AP_DFS", /* 16 */ - "BLOCKACK", /* 17 */ - "PHY_ERR", /* 18 */ - "BCN_FILTER", /* 19 */ - "RTT", /* 20 */ - "RATECTRL", /* 21 */ - "WOW", /* 22 */ - "WLAN_ROAM_SCAN_OFFLOAD", /* 23 */ - "SPECULATIVE_PS_POLL", /* 24 */ - "SCAN_SCH", /* 25 */ - "IBSS_HEARTBEAT_OFFLOAD", /* 26 */ - "WLAN_SCAN_OFFLOAD", /* 27 */ - "WLAN_PERIODIC_TX_PTRN", /* 28 */ - "ADVANCE_TDLS", /* 29 */ - "BATCH_SCAN", /* 30 */ - "FW_IN_TX_PATH", /* 31 */ - "EXTENDED_NSOFFLOAD_SLOT", /* 32 */ - "CH_SWITCH_V1", /* 33 */ - "HT40_OBSS_SCAN", /* 34 */ - "UPDATE_CHANNEL_LIST", /* 35 */ - "WLAN_MCADDR_FLT", /* 36 */ - "WLAN_CH144", /* 37 */ - "NAN", /* 38 */ - "TDLS_SCAN_COEXISTENCE", /* 39 */ - "LINK_LAYER_STATS_MEAS", /* 40 */ - "MU_MIMO", /* 41 */ - "EXTENDED_SCAN", /* 42 */ - "DYNAMIC_WMM_PS", /* 43 */ - "MAC_SPOOFED_SCAN", /* 44 */ - "BMU_ERROR_GENERIC_RECOVERY", /* 45 */ - "DISA", /* 46 */ - "FW_STATS", /* 47 */ - "WPS_PRBRSP_TMPL", /* 48 */ - "BCN_IE_FLT_DELTA", /* 49 */ - "TDLS_OFF_CHANNEL", /* 51 */ - "RTT3", /* 52 */ - "MGMT_FRAME_LOGGING", /* 53 */ - "ENHANCED_TXBD_COMPLETION", /* 54 */ - "LOGGING_ENHANCEMENT", /* 55 */ - "EXT_SCAN_ENHANCED", /* 56 */ - "MEMORY_DUMP_SUPPORTED", /* 57 */ - "PER_PKT_STATS_SUPPORTED", /* 58 */ - "EXT_LL_STAT", /* 60 */ - "WIFI_CONFIG", /* 61 */ - "ANTENNA_DIVERSITY_SELECTION", /* 62 */ + DEFINE(MCC), + DEFINE(P2P), + DEFINE(DOT11AC), + DEFINE(SLM_SESSIONIZATION), + DEFINE(DOT11AC_OPMODE), + DEFINE(SAP32STA), + DEFINE(TDLS), + DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN), + DEFINE(WLANACTIVE_OFFLOAD), + DEFINE(BEACON_OFFLOAD), + DEFINE(SCAN_OFFLOAD), + DEFINE(ROAM_OFFLOAD), + DEFINE(BCN_MISS_OFFLOAD), + DEFINE(STA_POWERSAVE), + DEFINE(STA_ADVANCED_PWRSAVE), + DEFINE(AP_UAPSD), + DEFINE(AP_DFS), + DEFINE(BLOCKACK), + DEFINE(PHY_ERR), + DEFINE(BCN_FILTER), + DEFINE(RTT), + DEFINE(RATECTRL), + DEFINE(WOW), + DEFINE(WLAN_ROAM_SCAN_OFFLOAD), + DEFINE(SPECULATIVE_PS_POLL), + DEFINE(SCAN_SCH), + DEFINE(IBSS_HEARTBEAT_OFFLOAD), + DEFINE(WLAN_SCAN_OFFLOAD), + DEFINE(WLAN_PERIODIC_TX_PTRN), + DEFINE(ADVANCE_TDLS), + DEFINE(BATCH_SCAN), + DEFINE(FW_IN_TX_PATH), + DEFINE(EXTENDED_NSOFFLOAD_SLOT), + DEFINE(CH_SWITCH_V1), + DEFINE(HT40_OBSS_SCAN), + DEFINE(UPDATE_CHANNEL_LIST), + DEFINE(WLAN_MCADDR_FLT), + DEFINE(WLAN_CH144), + DEFINE(NAN), + DEFINE(TDLS_SCAN_COEXISTENCE), + DEFINE(LINK_LAYER_STATS_MEAS), + DEFINE(MU_MIMO), + DEFINE(EXTENDED_SCAN), + DEFINE(DYNAMIC_WMM_PS), + DEFINE(MAC_SPOOFED_SCAN), + DEFINE(BMU_ERROR_GENERIC_RECOVERY), + DEFINE(DISA), + DEFINE(FW_STATS), + DEFINE(WPS_PRBRSP_TMPL), + DEFINE(BCN_IE_FLT_DELTA), + DEFINE(TDLS_OFF_CHANNEL), + DEFINE(RTT3), + DEFINE(MGMT_FRAME_LOGGING), + DEFINE(ENHANCED_TXBD_COMPLETION), + DEFINE(LOGGING_ENHANCEMENT), + DEFINE(EXT_SCAN_ENHANCED), + DEFINE(MEMORY_DUMP_SUPPORTED), + DEFINE(PER_PKT_STATS_SUPPORTED), + DEFINE(EXT_LL_STAT), + DEFINE(WIFI_CONFIG), + DEFINE(ANTENNA_DIVERSITY_SELECTION), }; +#undef DEFINE + static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x) { if (x >= ARRAY_SIZE(wcn36xx_caps_names)) -- cgit v1.2.3 From 948171b5f6fcf11253355bd836e6e8b613bea12f Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 1 Apr 2022 14:53:08 +0300 Subject: ath11k: PCI changes to support WCN6750 In order to add the support for WCN6750 in ATH11K , it is required to move certain PCI definitions to the header file. As a result, add ATH11K_PCI_* prefix to these definitions. Also, change the scope of certain PCI APIs that are required to enable WCN6750 from static to global. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328055714.6449-2-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/pci.c | 110 +++++++++++++++------------------- drivers/net/wireless/ath/ath11k/pci.h | 35 +++++++++++ 2 files changed, 84 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 8a3ff12057e8..5e71eafdf9a3 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -17,25 +18,10 @@ #define ATH11K_PCI_BAR_NUM 0 #define ATH11K_PCI_DMA_MASK 32 -#define ATH11K_PCI_IRQ_CE0_OFFSET 3 -#define ATH11K_PCI_IRQ_DP_OFFSET 14 - -#define WINDOW_ENABLE_BIT 0x40000000 -#define WINDOW_REG_ADDRESS 0x310c -#define WINDOW_VALUE_MASK GENMASK(24, 19) -#define WINDOW_START 0x80000 -#define WINDOW_RANGE_MASK GENMASK(18, 0) - #define TCSR_SOC_HW_VERSION 0x0224 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0) -/* BAR0 + 4k is always accessible, and no - * need to force wakeup. - * 4K - 32 = 0xFE0 - */ -#define ACCESS_ALWAYS_OFF 0xFE0 - #define QCA6390_DEVICE_ID 0x1101 #define QCN9074_DEVICE_ID 0x1104 #define WCN6855_DEVICE_ID 0x1103 @@ -147,27 +133,30 @@ static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offse { struct ath11k_base *ab = ab_pci->ab; - u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset); + u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset); lockdep_assert_held(&ab_pci->window_lock); if (window != ab_pci->register_window) { - iowrite32(WINDOW_ENABLE_BIT | window, - ab->mem + WINDOW_REG_ADDRESS); - ioread32(ab->mem + WINDOW_REG_ADDRESS); + iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window, + ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); + ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); ab_pci->register_window = window; } } static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) { - u32 umac_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET); - u32 ce_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE); + u32 umac_window; + u32 ce_window; u32 window; + umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET); + ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE); window = (umac_window << 12) | (ce_window << 6); - iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); + iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window, + ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); } static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab, @@ -176,13 +165,13 @@ static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 window_start; /* If offset lies within DP register range, use 3rd window */ - if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) - window_start = 3 * WINDOW_START; + if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) + window_start = 3 * ATH11K_PCI_WINDOW_START; /* If offset lies within CE register range, use 2nd window */ - else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) - window_start = 2 * WINDOW_START; + else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < ATH11K_PCI_WINDOW_RANGE_MASK) + window_start = 2 * ATH11K_PCI_WINDOW_START; else - window_start = WINDOW_START; + window_start = ATH11K_PCI_WINDOW_START; return window_start; } @@ -198,32 +187,32 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) */ if (ab->hw_params.wakeup_mhi && test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ACCESS_ALWAYS_OFF) + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF) ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); - if (offset < WINDOW_START) { + if (offset < ATH11K_PCI_WINDOW_START) { iowrite32(value, ab->mem + offset); } else { if (ab->bus_params.static_window_map) window_start = ath11k_pci_get_window_start(ab, offset); else - window_start = WINDOW_START; + window_start = ATH11K_PCI_WINDOW_START; - if (window_start == WINDOW_START) { + if (window_start == ATH11K_PCI_WINDOW_START) { spin_lock_bh(&ab_pci->window_lock); ath11k_pci_select_window(ab_pci, offset); iowrite32(value, ab->mem + window_start + - (offset & WINDOW_RANGE_MASK)); + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); spin_unlock_bh(&ab_pci->window_lock); } else { iowrite32(value, ab->mem + window_start + - (offset & WINDOW_RANGE_MASK)); + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); } } if (ab->hw_params.wakeup_mhi && test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ACCESS_ALWAYS_OFF && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && !ret) mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); } @@ -239,32 +228,32 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) */ if (ab->hw_params.wakeup_mhi && test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ACCESS_ALWAYS_OFF) + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF) ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); - if (offset < WINDOW_START) { + if (offset < ATH11K_PCI_WINDOW_START) { val = ioread32(ab->mem + offset); } else { if (ab->bus_params.static_window_map) window_start = ath11k_pci_get_window_start(ab, offset); else - window_start = WINDOW_START; + window_start = ATH11K_PCI_WINDOW_START; - if (window_start == WINDOW_START) { + if (window_start == ATH11K_PCI_WINDOW_START) { spin_lock_bh(&ab_pci->window_lock); ath11k_pci_select_window(ab_pci, offset); val = ioread32(ab->mem + window_start + - (offset & WINDOW_RANGE_MASK)); + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); spin_unlock_bh(&ab_pci->window_lock); } else { val = ioread32(ab->mem + window_start + - (offset & WINDOW_RANGE_MASK)); + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); } } if (ab->hw_params.wakeup_mhi && test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ACCESS_ALWAYS_OFF && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && !ret) mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); @@ -474,8 +463,8 @@ int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector) return pci_irq_vector(pci_dev, vector); } -static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, - u32 *msi_addr_hi) +void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct pci_dev *pci_dev = to_pci_dev(ab->dev); @@ -519,8 +508,7 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_nam return -EINVAL; } -static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, - u32 *msi_idx) +void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) { u32 i, msi_data_idx; @@ -536,9 +524,9 @@ static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, *msi_idx = msi_data_idx; } -static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, - int *num_vectors, u32 *user_base_data, - u32 *base_vector) +int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); @@ -561,7 +549,7 @@ static void ath11k_pci_free_ext_irq(struct ath11k_base *ab) } } -static void ath11k_pci_free_irq(struct ath11k_base *ab) +void ath11k_pci_free_irq(struct ath11k_base *ab) { int i, irq_idx; @@ -710,7 +698,7 @@ static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } -static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) +void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) { int i; @@ -741,7 +729,7 @@ static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab) } } -static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab) +void ath11k_pci_ext_irq_disable(struct ath11k_base *ab) { __ath11k_pci_ext_irq_disable(ab); ath11k_pci_sync_ext_irqs(ab); @@ -854,8 +842,8 @@ static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) return 0; } -static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, - const struct cpumask *m) +int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, + const struct cpumask *m) { if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) return 0; @@ -863,7 +851,7 @@ static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, return irq_set_affinity_hint(ab_pci->pdev->irq, m); } -static int ath11k_pci_config_irq(struct ath11k_base *ab) +int ath11k_pci_config_irq(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct ath11k_ce_pipe *ce_pipe; @@ -939,7 +927,7 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) &cfg->shadow_reg_v2_len); } -static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) +void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) { int i; @@ -1151,7 +1139,7 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci) set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags); } -static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) +void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) { if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, @@ -1234,20 +1222,20 @@ static void ath11k_pci_kill_tasklets(struct ath11k_base *ab) } } -static void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab) +void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab) { ath11k_pci_ce_irqs_disable(ab); ath11k_pci_sync_ce_irqs(ab); ath11k_pci_kill_tasklets(ab); } -static void ath11k_pci_stop(struct ath11k_base *ab) +void ath11k_pci_stop(struct ath11k_base *ab) { ath11k_pci_ce_irq_disable_sync(ab); ath11k_ce_cleanup_pipes(ab); } -static int ath11k_pci_start(struct ath11k_base *ab) +int ath11k_pci_start(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); @@ -1277,8 +1265,8 @@ static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab) ath11k_pci_ce_irq_disable_sync(ab); } -static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe) +int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index 61d67b20a0eb..31ee843c1cab 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH11K_PCI_H #define _ATH11K_PCI_H @@ -52,6 +53,21 @@ #define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 +#define ATH11K_PCI_IRQ_CE0_OFFSET 3 +#define ATH11K_PCI_IRQ_DP_OFFSET 14 + +#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000 +#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c +#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19) +#define ATH11K_PCI_WINDOW_START 0x80000 +#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0) + +/* BAR0 + 4k is always accessible, and no + * need to force wakeup. + * 4K - 32 = 0xFE0 + */ +#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0 + struct ath11k_msi_user { char *name; int num_vectors; @@ -103,5 +119,24 @@ int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_nam int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector); void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value); u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset); +void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi); +void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx); +void ath11k_pci_free_irq(struct ath11k_base *ab); +int ath11k_pci_config_irq(struct ath11k_base *ab); +void ath11k_pci_ext_irq_enable(struct ath11k_base *ab); +void ath11k_pci_ext_irq_disable(struct ath11k_base *ab); +void ath11k_pci_stop(struct ath11k_base *ab); +int ath11k_pci_start(struct ath11k_base *ab); +int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab); +void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab); +int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector); +void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci); +int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, + const struct cpumask *m); #endif -- cgit v1.2.3 From bbfdc5a751a634fcdaae669cc98b3d0e1dc0eedf Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 1 Apr 2022 14:53:08 +0300 Subject: ath11k: Refactor PCI code to support WCN6750 Unlike other ATH11K PCIe devices which are enumerated by APSS processor (Application Processor SubSystem), WCN6750 gets enumerated by the WPSS Q6 processor (Wireless Processor SubSystem); In simple terms, though WCN6750 is PCIe device, it is not attached to the APSS processor, APSS will not know of such a device being present in the system and therefore WCN6750 will be registered as a platform device to the kernel core like other supported AHB devices. WCN6750 needs both AHB and PCI APIs for it's operation, it uses AHB APIs for device probe/boot and PCI APIs for device setup and register accesses. Because of this nature, it is referred as a hybrid bus device. Refactor PCI code to support hybrid bus devices like WCN6750. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328055714.6449-3-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/Makefile | 2 +- drivers/net/wireless/ath/ath11k/mhi.c | 27 +- drivers/net/wireless/ath/ath11k/pci.c | 816 ++----------------------------- drivers/net/wireless/ath/ath11k/pci.h | 41 -- drivers/net/wireless/ath/ath11k/pcic.c | 747 ++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/pcic.h | 53 ++ 6 files changed, 856 insertions(+), 830 deletions(-) create mode 100644 drivers/net/wireless/ath/ath11k/pcic.c create mode 100644 drivers/net/wireless/ath/ath11k/pcic.h (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index 444c00c2bce2..0ebfe41d6143 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -29,7 +29,7 @@ obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o ath11k_ahb-y += ahb.o obj-$(CONFIG_ATH11K_PCI) += ath11k_pci.o -ath11k_pci-y += mhi.o pci.o +ath11k_pci-y += mhi.o pci.o pcic.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 61d83be4841f..3724eebba4a2 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -1,5 +1,8 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear -/* Copyright (c) 2020 The Linux Foundation. All rights reserved. */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ #include #include @@ -11,6 +14,7 @@ #include "debug.h" #include "mhi.h" #include "pci.h" +#include "pcic.h" #define MHI_TIMEOUT_DEFAULT_MS 90000 #define RDDM_DUMP_SIZE 0x420000 @@ -205,7 +209,7 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab) { u32 val; - val = ath11k_pci_read32(ab, MHISTATUS); + val = ath11k_pcic_read32(ab, MHISTATUS); ath11k_dbg(ab, ATH11K_DBG_PCI, "MHISTATUS 0x%x\n", val); @@ -213,29 +217,29 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab) * has SYSERR bit set and thus need to set MHICTRL_RESET * to clear SYSERR. */ - ath11k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK); + ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK); mdelay(10); } static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab) { - ath11k_pci_write32(ab, PCIE_TXVECDB, 0); + ath11k_pcic_write32(ab, PCIE_TXVECDB, 0); } static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab) { - ath11k_pci_write32(ab, PCIE_TXVECSTATUS, 0); + ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0); } static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab) { - ath11k_pci_write32(ab, PCIE_RXVECDB, 0); + ath11k_pcic_write32(ab, PCIE_RXVECDB, 0); } static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab) { - ath11k_pci_write32(ab, PCIE_RXVECSTATUS, 0); + ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0); } void ath11k_mhi_clear_vector(struct ath11k_base *ab) @@ -254,9 +258,9 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) int *irq; unsigned int msi_data; - ret = ath11k_pci_get_user_msi_assignment(ab_pci, - "MHI", &num_vectors, - &user_base_data, &base_vector); + ret = ath11k_pcic_get_user_msi_assignment(ab_pci, + "MHI", &num_vectors, + &user_base_data, &base_vector); if (ret) return ret; @@ -273,8 +277,7 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) msi_data += i; - irq[i] = ath11k_pci_get_msi_irq(ab->dev, - msi_data); + irq[i] = ath11k_pcic_get_msi_irq(ab->dev, msi_data); } ab_pci->mhi_ctrl->irq = irq; diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 5e71eafdf9a3..bffbcace63ae 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -14,6 +14,7 @@ #include "hif.h" #include "mhi.h" #include "debug.h" +#include "pcic.h" #define ATH11K_PCI_BAR_NUM 0 #define ATH11K_PCI_DMA_MASK 32 @@ -75,76 +76,6 @@ static const struct ath11k_msi_config msi_config_one_msi = { }, }; -static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { - "bhi", - "mhi-er0", - "mhi-er1", - "ce0", - "ce1", - "ce2", - "ce3", - "ce4", - "ce5", - "ce6", - "ce7", - "ce8", - "ce9", - "ce10", - "ce11", - "host2wbm-desc-feed", - "host2reo-re-injection", - "host2reo-command", - "host2rxdma-monitor-ring3", - "host2rxdma-monitor-ring2", - "host2rxdma-monitor-ring1", - "reo2ost-exception", - "wbm2host-rx-release", - "reo2host-status", - "reo2host-destination-ring4", - "reo2host-destination-ring3", - "reo2host-destination-ring2", - "reo2host-destination-ring1", - "rxdma2host-monitor-destination-mac3", - "rxdma2host-monitor-destination-mac2", - "rxdma2host-monitor-destination-mac1", - "ppdu-end-interrupts-mac3", - "ppdu-end-interrupts-mac2", - "ppdu-end-interrupts-mac1", - "rxdma2host-monitor-status-ring-mac3", - "rxdma2host-monitor-status-ring-mac2", - "rxdma2host-monitor-status-ring-mac1", - "host2rxdma-host-buf-ring-mac3", - "host2rxdma-host-buf-ring-mac2", - "host2rxdma-host-buf-ring-mac1", - "rxdma2host-destination-ring-mac3", - "rxdma2host-destination-ring-mac2", - "rxdma2host-destination-ring-mac1", - "host2tcl-input-ring4", - "host2tcl-input-ring3", - "host2tcl-input-ring2", - "host2tcl-input-ring1", - "wbm2host-tx-completions-ring3", - "wbm2host-tx-completions-ring2", - "wbm2host-tx-completions-ring1", - "tcl2host-status-ring", -}; - -static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset) -{ - struct ath11k_base *ab = ab_pci->ab; - - u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset); - - lockdep_assert_held(&ab_pci->window_lock); - - if (window != ab_pci->register_window) { - iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window, - ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); - ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); - ab_pci->register_window = window; - } -} - static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) { u32 umac_window; @@ -159,116 +90,15 @@ static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); } -static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab, - u32 offset) -{ - u32 window_start; - - /* If offset lies within DP register range, use 3rd window */ - if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) - window_start = 3 * ATH11K_PCI_WINDOW_START; - /* If offset lies within CE register range, use 2nd window */ - else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < ATH11K_PCI_WINDOW_RANGE_MASK) - window_start = 2 * ATH11K_PCI_WINDOW_START; - else - window_start = ATH11K_PCI_WINDOW_START; - - return window_start; -} - -void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - u32 window_start; - int ret = 0; - - /* for offset beyond BAR + 4K - 32, may - * need to wakeup MHI to access. - */ - if (ab->hw_params.wakeup_mhi && - test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF) - ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); - - if (offset < ATH11K_PCI_WINDOW_START) { - iowrite32(value, ab->mem + offset); - } else { - if (ab->bus_params.static_window_map) - window_start = ath11k_pci_get_window_start(ab, offset); - else - window_start = ATH11K_PCI_WINDOW_START; - - if (window_start == ATH11K_PCI_WINDOW_START) { - spin_lock_bh(&ab_pci->window_lock); - ath11k_pci_select_window(ab_pci, offset); - iowrite32(value, ab->mem + window_start + - (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); - spin_unlock_bh(&ab_pci->window_lock); - } else { - iowrite32(value, ab->mem + window_start + - (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); - } - } - - if (ab->hw_params.wakeup_mhi && - test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && - !ret) - mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); -} - -u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - u32 val, window_start; - int ret = 0; - - /* for offset beyond BAR + 4K - 32, may - * need to wakeup MHI to access. - */ - if (ab->hw_params.wakeup_mhi && - test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF) - ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); - - if (offset < ATH11K_PCI_WINDOW_START) { - val = ioread32(ab->mem + offset); - } else { - if (ab->bus_params.static_window_map) - window_start = ath11k_pci_get_window_start(ab, offset); - else - window_start = ATH11K_PCI_WINDOW_START; - - if (window_start == ATH11K_PCI_WINDOW_START) { - spin_lock_bh(&ab_pci->window_lock); - ath11k_pci_select_window(ab_pci, offset); - val = ioread32(ab->mem + window_start + - (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); - spin_unlock_bh(&ab_pci->window_lock); - } else { - val = ioread32(ab->mem + window_start + - (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); - } - } - - if (ab->hw_params.wakeup_mhi && - test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && - !ret) - mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); - - return val; -} - static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) { u32 val, delay; - val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); + val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET); val |= PCIE_SOC_GLOBAL_RESET_V; - ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); + ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val); /* TODO: exact time to sleep is uncertain */ delay = 10; @@ -277,11 +107,11 @@ static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) /* Need to toggle V bit back otherwise stuck in reset status */ val &= ~PCIE_SOC_GLOBAL_RESET_V; - ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); + ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val); mdelay(delay); - val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); + val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET); if (val == 0xffffffff) ath11k_warn(ab, "link down error during global reset\n"); } @@ -291,10 +121,10 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) u32 val; /* read cookie */ - val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); + val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR); ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val); - val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); + val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY); ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); /* TODO: exact time to sleep is uncertain */ @@ -303,16 +133,16 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from * continuing warm path and entering dead loop. */ - ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); + ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0); mdelay(10); - val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); + val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY); ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); /* A read clear register. clear the register to prevent * Q6 from entering wrong code path. */ - val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); + val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG); ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val); } @@ -322,14 +152,14 @@ static int ath11k_pci_set_link_reg(struct ath11k_base *ab, u32 v; int i; - v = ath11k_pci_read32(ab, offset); + v = ath11k_pcic_read32(ab, offset); if ((v & mask) == value) return 0; for (i = 0; i < 10; i++) { - ath11k_pci_write32(ab, offset, (v & ~mask) | value); + ath11k_pcic_write32(ab, offset, (v & ~mask) | value); - v = ath11k_pci_read32(ab, offset); + v = ath11k_pcic_read32(ab, offset); if ((v & mask) == value) return 0; @@ -390,23 +220,23 @@ static void ath11k_pci_enable_ltssm(struct ath11k_base *ab) u32 val; int i; - val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); + val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM); /* PCIE link seems very unstable after the Hot Reset*/ for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { if (val == 0xffffffff) mdelay(5); - ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); - val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); + ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); + val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM); } ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val); - val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); + val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST); val |= GCC_GCC_PCIE_HOT_RST_VAL; - ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); - val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); + ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val); + val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST); ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); @@ -420,21 +250,21 @@ static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab) * So when download SBL again, SBL will open Interrupt and * receive it, and crash immediately. */ - ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); + ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); } static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab) { u32 val; - val = ath11k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); + val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; - ath11k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); + ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); } static void ath11k_pci_force_wake(struct ath11k_base *ab) { - ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); + ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); mdelay(5); } @@ -456,462 +286,6 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on) ath11k_mhi_set_mhictrl_reset(ab); } -int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector) -{ - struct pci_dev *pci_dev = to_pci_dev(dev); - - return pci_irq_vector(pci_dev, vector); -} - -void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, - u32 *msi_addr_hi) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - struct pci_dev *pci_dev = to_pci_dev(ab->dev); - - pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, - msi_addr_lo); - - if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { - pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, - msi_addr_hi); - } else { - *msi_addr_hi = 0; - } -} - -int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name, - int *num_vectors, u32 *user_base_data, - u32 *base_vector) -{ - struct ath11k_base *ab = ab_pci->ab; - const struct ath11k_msi_config *msi_config = ab_pci->msi_config; - int idx; - - for (idx = 0; idx < msi_config->total_users; idx++) { - if (strcmp(user_name, msi_config->users[idx].name) == 0) { - *num_vectors = msi_config->users[idx].num_vectors; - *base_vector = msi_config->users[idx].base_vector; - *user_base_data = *base_vector + ab_pci->msi_ep_base_data; - - ath11k_dbg(ab, ATH11K_DBG_PCI, - "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", - user_name, *num_vectors, *user_base_data, - *base_vector); - - return 0; - } - } - - ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); - - return -EINVAL; -} - -void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) -{ - u32 i, msi_data_idx; - - for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { - if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) - continue; - - if (ce_id == i) - break; - - msi_data_idx++; - } - *msi_idx = msi_data_idx; -} - -int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, - int *num_vectors, u32 *user_base_data, - u32 *base_vector) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - - return ath11k_pci_get_user_msi_assignment(ab_pci, user_name, - num_vectors, user_base_data, - base_vector); -} - -static void ath11k_pci_free_ext_irq(struct ath11k_base *ab) -{ - int i, j; - - for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - - for (j = 0; j < irq_grp->num_irq; j++) - free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); - - netif_napi_del(&irq_grp->napi); - } -} - -void ath11k_pci_free_irq(struct ath11k_base *ab) -{ - int i, irq_idx; - - for (i = 0; i < ab->hw_params.ce_count; i++) { - if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) - continue; - irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; - free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); - } - - ath11k_pci_free_ext_irq(ab); -} - -static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - u32 irq_idx; - - /* In case of one MSI vector, we handle irq enable/disable in a - * uniform way since we only have one irq - */ - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) - return; - - irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; - enable_irq(ab->irq_num[irq_idx]); -} - -static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - u32 irq_idx; - - /* In case of one MSI vector, we handle irq enable/disable in a - * uniform way since we only have one irq - */ - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) - return; - - irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; - disable_irq_nosync(ab->irq_num[irq_idx]); -} - -static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab) -{ - int i; - - clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); - - for (i = 0; i < ab->hw_params.ce_count; i++) { - if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) - continue; - ath11k_pci_ce_irq_disable(ab, i); - } -} - -static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab) -{ - int i; - int irq_idx; - - for (i = 0; i < ab->hw_params.ce_count; i++) { - if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) - continue; - - irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; - synchronize_irq(ab->irq_num[irq_idx]); - } -} - -static void ath11k_pci_ce_tasklet(struct tasklet_struct *t) -{ - struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); - int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; - - ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); - - enable_irq(ce_pipe->ab->irq_num[irq_idx]); -} - -static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg) -{ - struct ath11k_ce_pipe *ce_pipe = arg; - struct ath11k_base *ab = ce_pipe->ab; - int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; - - if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) - return IRQ_HANDLED; - - /* last interrupt received for this CE */ - ce_pipe->timestamp = jiffies; - - disable_irq_nosync(ab->irq_num[irq_idx]); - - tasklet_schedule(&ce_pipe->intr_tq); - - return IRQ_HANDLED; -} - -static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab); - int i; - - /* In case of one MSI vector, we handle irq enable/disable - * in a uniform way since we only have one irq - */ - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) - return; - - for (i = 0; i < irq_grp->num_irq; i++) - disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); -} - -static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc) -{ - int i; - - clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags); - - for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; - - ath11k_pci_ext_grp_disable(irq_grp); - - if (irq_grp->napi_enabled) { - napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); - irq_grp->napi_enabled = false; - } - } -} - -static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab); - int i; - - /* In case of one MSI vector, we handle irq enable/disable in a - * uniform way since we only have one irq - */ - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) - return; - - for (i = 0; i < irq_grp->num_irq; i++) - enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); -} - -void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) -{ - int i; - - set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); - - for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - - if (!irq_grp->napi_enabled) { - napi_enable(&irq_grp->napi); - irq_grp->napi_enabled = true; - } - ath11k_pci_ext_grp_enable(irq_grp); - } -} - -static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab) -{ - int i, j, irq_idx; - - for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - - for (j = 0; j < irq_grp->num_irq; j++) { - irq_idx = irq_grp->irqs[j]; - synchronize_irq(ab->irq_num[irq_idx]); - } - } -} - -void ath11k_pci_ext_irq_disable(struct ath11k_base *ab) -{ - __ath11k_pci_ext_irq_disable(ab); - ath11k_pci_sync_ext_irqs(ab); -} - -static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) -{ - struct ath11k_ext_irq_grp *irq_grp = container_of(napi, - struct ath11k_ext_irq_grp, - napi); - struct ath11k_base *ab = irq_grp->ab; - int work_done; - int i; - - work_done = ath11k_dp_service_srng(ab, irq_grp, budget); - if (work_done < budget) { - napi_complete_done(napi, work_done); - for (i = 0; i < irq_grp->num_irq; i++) - enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); - } - - if (work_done > budget) - work_done = budget; - - return work_done; -} - -static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) -{ - struct ath11k_ext_irq_grp *irq_grp = arg; - struct ath11k_base *ab = irq_grp->ab; - int i; - - if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) - return IRQ_HANDLED; - - ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); - - /* last interrupt received for this group */ - irq_grp->timestamp = jiffies; - - for (i = 0; i < irq_grp->num_irq; i++) - disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); - - napi_schedule(&irq_grp->napi); - - return IRQ_HANDLED; -} - -static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - int i, j, ret, num_vectors = 0; - u32 user_base_data = 0, base_vector = 0; - - ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", - &num_vectors, - &user_base_data, - &base_vector); - if (ret < 0) - return ret; - - for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - u32 num_irq = 0; - - irq_grp->ab = ab; - irq_grp->grp_id = i; - init_dummy_netdev(&irq_grp->napi_ndev); - netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, - ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT); - - if (ab->hw_params.ring_mask->tx[i] || - ab->hw_params.ring_mask->rx[i] || - ab->hw_params.ring_mask->rx_err[i] || - ab->hw_params.ring_mask->rx_wbm_rel[i] || - ab->hw_params.ring_mask->reo_status[i] || - ab->hw_params.ring_mask->rxdma2host[i] || - ab->hw_params.ring_mask->host2rxdma[i] || - ab->hw_params.ring_mask->rx_mon_status[i]) { - num_irq = 1; - } - - irq_grp->num_irq = num_irq; - irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i; - - for (j = 0; j < irq_grp->num_irq; j++) { - int irq_idx = irq_grp->irqs[j]; - int vector = (i % num_vectors) + base_vector; - int irq = ath11k_pci_get_msi_irq(ab->dev, vector); - - ab->irq_num[irq_idx] = irq; - - ath11k_dbg(ab, ATH11K_DBG_PCI, - "irq:%d group:%d\n", irq, i); - - irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); - ret = request_irq(irq, ath11k_pci_ext_interrupt_handler, - ab_pci->irq_flags, - "DP_EXT_IRQ", irq_grp); - if (ret) { - ath11k_err(ab, "failed request irq %d: %d\n", - vector, ret); - return ret; - } - } - ath11k_pci_ext_grp_disable(irq_grp); - } - - return 0; -} - -int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, - const struct cpumask *m) -{ - if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) - return 0; - - return irq_set_affinity_hint(ab_pci->pdev->irq, m); -} - -int ath11k_pci_config_irq(struct ath11k_base *ab) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - struct ath11k_ce_pipe *ce_pipe; - u32 msi_data_start; - u32 msi_data_count, msi_data_idx; - u32 msi_irq_start; - unsigned int msi_data; - int irq, i, ret, irq_idx; - - ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), - "CE", &msi_data_count, - &msi_data_start, &msi_irq_start); - if (ret) - return ret; - - ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); - if (ret) { - ath11k_err(ab, "failed to set irq affinity %d\n", ret); - return ret; - } - - /* Configure CE irqs */ - for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { - if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) - continue; - - msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; - irq = ath11k_pci_get_msi_irq(ab->dev, msi_data); - ce_pipe = &ab->ce.ce_pipe[i]; - - irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; - - tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet); - - ret = request_irq(irq, ath11k_pci_ce_interrupt_handler, - ab_pci->irq_flags, irq_name[irq_idx], - ce_pipe); - if (ret) { - ath11k_err(ab, "failed to request irq %d: %d\n", - irq_idx, ret); - goto err_irq_affinity_cleanup; - } - - ab->irq_num[irq_idx] = irq; - msi_data_idx++; - - ath11k_pci_ce_irq_disable(ab, i); - } - - ret = ath11k_pci_ext_irq_config(ab); - if (ret) - goto err_irq_affinity_cleanup; - - return 0; - -err_irq_affinity_cleanup: - ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); - return ret; -} - static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) { struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; @@ -927,19 +301,6 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) &cfg->shadow_reg_v2_len); } -void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) -{ - int i; - - set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); - - for (i = 0; i < ab->hw_params.ce_count; i++) { - if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) - continue; - ath11k_pci_ce_irq_enable(ab, i); - } -} - static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable) { struct pci_dev *dev = ab_pci->pdev; @@ -1139,13 +500,6 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci) set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags); } -void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) -{ - if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl); -} - static int ath11k_pci_power_up(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); @@ -1179,7 +533,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab) struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); /* restore aspm in case firmware bootup fails */ - ath11k_pci_aspm_restore(ab_pci); + ath11k_pcic_aspm_restore(ab_pci); ath11k_pci_force_wake(ab_pci->ab); @@ -1208,130 +562,40 @@ static int ath11k_pci_hif_resume(struct ath11k_base *ab) return 0; } -static void ath11k_pci_kill_tasklets(struct ath11k_base *ab) -{ - int i; - - for (i = 0; i < ab->hw_params.ce_count; i++) { - struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; - - if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) - continue; - - tasklet_kill(&ce_pipe->intr_tq); - } -} - -void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab) -{ - ath11k_pci_ce_irqs_disable(ab); - ath11k_pci_sync_ce_irqs(ab); - ath11k_pci_kill_tasklets(ab); -} - -void ath11k_pci_stop(struct ath11k_base *ab) -{ - ath11k_pci_ce_irq_disable_sync(ab); - ath11k_ce_cleanup_pipes(ab); -} - -int ath11k_pci_start(struct ath11k_base *ab) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - - set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); - - /* TODO: for now don't restore ASPM in case of single MSI - * vector as MHI register reading in M2 causes system hang. - */ - if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) - ath11k_pci_aspm_restore(ab_pci); - else - ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); - - ath11k_pci_ce_irqs_enable(ab); - ath11k_ce_rx_post_buf(ab); - - return 0; -} - static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab) { - ath11k_pci_ce_irqs_enable(ab); + ath11k_pcic_ce_irqs_enable(ab); } static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab) { - ath11k_pci_ce_irq_disable_sync(ab); -} - -int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe) -{ - const struct service_to_pipe *entry; - bool ul_set = false, dl_set = false; - int i; - - for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { - entry = &ab->hw_params.svc_to_ce_map[i]; - - if (__le32_to_cpu(entry->service_id) != service_id) - continue; - - switch (__le32_to_cpu(entry->pipedir)) { - case PIPEDIR_NONE: - break; - case PIPEDIR_IN: - WARN_ON(dl_set); - *dl_pipe = __le32_to_cpu(entry->pipenum); - dl_set = true; - break; - case PIPEDIR_OUT: - WARN_ON(ul_set); - *ul_pipe = __le32_to_cpu(entry->pipenum); - ul_set = true; - break; - case PIPEDIR_INOUT: - WARN_ON(dl_set); - WARN_ON(ul_set); - *dl_pipe = __le32_to_cpu(entry->pipenum); - *ul_pipe = __le32_to_cpu(entry->pipenum); - dl_set = true; - ul_set = true; - break; - } - } - - if (WARN_ON(!ul_set || !dl_set)) - return -ENOENT; - - return 0; + ath11k_pcic_ce_irq_disable_sync(ab); } static const struct ath11k_hif_ops ath11k_pci_hif_ops = { - .start = ath11k_pci_start, - .stop = ath11k_pci_stop, - .read32 = ath11k_pci_read32, - .write32 = ath11k_pci_write32, + .start = ath11k_pcic_start, + .stop = ath11k_pcic_stop, + .read32 = ath11k_pcic_read32, + .write32 = ath11k_pcic_write32, .power_down = ath11k_pci_power_down, .power_up = ath11k_pci_power_up, .suspend = ath11k_pci_hif_suspend, .resume = ath11k_pci_hif_resume, - .irq_enable = ath11k_pci_ext_irq_enable, - .irq_disable = ath11k_pci_ext_irq_disable, - .get_msi_address = ath11k_pci_get_msi_address, + .irq_enable = ath11k_pcic_ext_irq_enable, + .irq_disable = ath11k_pcic_ext_irq_disable, + .get_msi_address = ath11k_pcic_get_msi_address, .get_user_msi_vector = ath11k_get_user_msi_assignment, - .map_service_to_pipe = ath11k_pci_map_service_to_pipe, + .map_service_to_pipe = ath11k_pcic_map_service_to_pipe, .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, - .get_ce_msi_idx = ath11k_pci_get_ce_msi_idx, + .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx, }; static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor) { u32 soc_hw_version; - soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION); + soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION); *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, soc_hw_version); *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, @@ -1473,7 +737,7 @@ unsupported_wcn6855_soc: ath11k_pci_init_qmi_ce_config(ab); - ret = ath11k_pci_config_irq(ab); + ret = ath11k_pcic_config_irq(ab); if (ret) { ath11k_err(ab, "failed to config irq: %d\n", ret); goto err_ce_free; @@ -1498,7 +762,7 @@ unsupported_wcn6855_soc: return 0; err_free_irq: - ath11k_pci_free_irq(ab); + ath11k_pcic_free_irq(ab); err_ce_free: ath11k_ce_free_pipes(ab); @@ -1526,7 +790,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev) struct ath11k_base *ab = pci_get_drvdata(pdev); struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); + ath11k_pcic_set_irq_affinity_hint(ab_pci, NULL); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath11k_pci_power_down(ab); @@ -1542,7 +806,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev) qmi_fail: ath11k_mhi_unregister(ab_pci); - ath11k_pci_free_irq(ab); + ath11k_pcic_free_irq(ab); ath11k_pci_free_msi(ab_pci); ath11k_pci_free_region(ab_pci); diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index 31ee843c1cab..038e4152987c 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -53,21 +53,6 @@ #define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 -#define ATH11K_PCI_IRQ_CE0_OFFSET 3 -#define ATH11K_PCI_IRQ_DP_OFFSET 14 - -#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000 -#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c -#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19) -#define ATH11K_PCI_WINDOW_START 0x80000 -#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0) - -/* BAR0 + 4k is always accessible, and no - * need to force wakeup. - * 4K - 32 = 0xFE0 - */ -#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0 - struct ath11k_msi_user { char *name; int num_vectors; @@ -113,30 +98,4 @@ static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab) return (struct ath11k_pci *)ab->drv_priv; } -int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name, - int *num_vectors, u32 *user_base_data, - u32 *base_vector); -int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector); -void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value); -u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset); -void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, - u32 *msi_addr_hi); -void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx); -void ath11k_pci_free_irq(struct ath11k_base *ab); -int ath11k_pci_config_irq(struct ath11k_base *ab); -void ath11k_pci_ext_irq_enable(struct ath11k_base *ab); -void ath11k_pci_ext_irq_disable(struct ath11k_base *ab); -void ath11k_pci_stop(struct ath11k_base *ab); -int ath11k_pci_start(struct ath11k_base *ab); -int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe); -void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab); -void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab); -int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, - int *num_vectors, u32 *user_base_data, - u32 *base_vector); -void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci); -int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, - const struct cpumask *m); - #endif diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c new file mode 100644 index 000000000000..391c308455e5 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -0,0 +1,747 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "core.h" +#include "pcic.h" +#include "debug.h" + +static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { + "bhi", + "mhi-er0", + "mhi-er1", + "ce0", + "ce1", + "ce2", + "ce3", + "ce4", + "ce5", + "ce6", + "ce7", + "ce8", + "ce9", + "ce10", + "ce11", + "host2wbm-desc-feed", + "host2reo-re-injection", + "host2reo-command", + "host2rxdma-monitor-ring3", + "host2rxdma-monitor-ring2", + "host2rxdma-monitor-ring1", + "reo2ost-exception", + "wbm2host-rx-release", + "reo2host-status", + "reo2host-destination-ring4", + "reo2host-destination-ring3", + "reo2host-destination-ring2", + "reo2host-destination-ring1", + "rxdma2host-monitor-destination-mac3", + "rxdma2host-monitor-destination-mac2", + "rxdma2host-monitor-destination-mac1", + "ppdu-end-interrupts-mac3", + "ppdu-end-interrupts-mac2", + "ppdu-end-interrupts-mac1", + "rxdma2host-monitor-status-ring-mac3", + "rxdma2host-monitor-status-ring-mac2", + "rxdma2host-monitor-status-ring-mac1", + "host2rxdma-host-buf-ring-mac3", + "host2rxdma-host-buf-ring-mac2", + "host2rxdma-host-buf-ring-mac1", + "rxdma2host-destination-ring-mac3", + "rxdma2host-destination-ring-mac2", + "rxdma2host-destination-ring-mac1", + "host2tcl-input-ring4", + "host2tcl-input-ring3", + "host2tcl-input-ring2", + "host2tcl-input-ring1", + "wbm2host-tx-completions-ring3", + "wbm2host-tx-completions-ring2", + "wbm2host-tx-completions-ring1", + "tcl2host-status-ring", +}; + +void ath11k_pcic_aspm_restore(struct ath11k_pci *ab_pci) +{ + if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) + pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, + ab_pci->link_ctl); +} + +static inline void ath11k_pcic_select_window(struct ath11k_pci *ab_pci, u32 offset) +{ + struct ath11k_base *ab = ab_pci->ab; + + u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset); + + lockdep_assert_held(&ab_pci->window_lock); + + if (window != ab_pci->register_window) { + iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window, + ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); + ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); + ab_pci->register_window = window; + } +} + +static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab, + u32 offset) +{ + u32 window_start; + + /* If offset lies within DP register range, use 3rd window */ + if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) + window_start = 3 * ATH11K_PCI_WINDOW_START; + /* If offset lies within CE register range, use 2nd window */ + else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < ATH11K_PCI_WINDOW_RANGE_MASK) + window_start = 2 * ATH11K_PCI_WINDOW_START; + else + window_start = ATH11K_PCI_WINDOW_START; + + return window_start; +} + +void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + u32 window_start; + int ret = 0; + + /* for offset beyond BAR + 4K - 32, may + * need to wakeup MHI to access. + */ + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF) + ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + + if (offset < ATH11K_PCI_WINDOW_START) { + iowrite32(value, ab->mem + offset); + } else { + if (ab->bus_params.static_window_map) + window_start = ath11k_pcic_get_window_start(ab, offset); + else + window_start = ATH11K_PCI_WINDOW_START; + + if (window_start == ATH11K_PCI_WINDOW_START) { + spin_lock_bh(&ab_pci->window_lock); + ath11k_pcic_select_window(ab_pci, offset); + iowrite32(value, ab->mem + window_start + + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + } else { + iowrite32(value, ab->mem + window_start + + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); + } + } + + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && + !ret) + mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); +} + +u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + u32 val, window_start; + int ret = 0; + + /* for offset beyond BAR + 4K - 32, may + * need to wakeup MHI to access. + */ + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF) + ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + + if (offset < ATH11K_PCI_WINDOW_START) { + val = ioread32(ab->mem + offset); + } else { + if (ab->bus_params.static_window_map) + window_start = ath11k_pcic_get_window_start(ab, offset); + else + window_start = ATH11K_PCI_WINDOW_START; + + if (window_start == ATH11K_PCI_WINDOW_START) { + spin_lock_bh(&ab_pci->window_lock); + ath11k_pcic_select_window(ab_pci, offset); + val = ioread32(ab->mem + window_start + + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + } else { + val = ioread32(ab->mem + window_start + + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); + } + } + + if (ab->hw_params.wakeup_mhi && + test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && + !ret) + mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); + + return val; +} + +int ath11k_pcic_get_msi_irq(struct device *dev, unsigned int vector) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + + return pci_irq_vector(pci_dev, vector); +} + +void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + struct pci_dev *pci_dev = to_pci_dev(ab->dev); + + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, + msi_addr_lo); + + if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, + msi_addr_hi); + } else { + *msi_addr_hi = 0; + } +} + +int ath11k_pcic_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector) +{ + struct ath11k_base *ab = ab_pci->ab; + const struct ath11k_msi_config *msi_config = ab_pci->msi_config; + int idx; + + for (idx = 0; idx < msi_config->total_users; idx++) { + if (strcmp(user_name, msi_config->users[idx].name) == 0) { + *num_vectors = msi_config->users[idx].num_vectors; + *base_vector = msi_config->users[idx].base_vector; + *user_base_data = *base_vector + ab_pci->msi_ep_base_data; + + ath11k_dbg(ab, ATH11K_DBG_PCI, + "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", + user_name, *num_vectors, *user_base_data, + *base_vector); + + return 0; + } + } + + ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); + + return -EINVAL; +} + +void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) +{ + u32 i, msi_data_idx; + + for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + if (ce_id == i) + break; + + msi_data_idx++; + } + *msi_idx = msi_data_idx; +} + +int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + + return ath11k_pcic_get_user_msi_assignment(ab_pci, user_name, + num_vectors, user_base_data, + base_vector); +} + +static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab) +{ + int i, j; + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) + free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); + + netif_napi_del(&irq_grp->napi); + } +} + +void ath11k_pcic_free_irq(struct ath11k_base *ab) +{ + int i, irq_idx; + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; + free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); + } + + ath11k_pcic_free_ext_irq(ab); +} + +static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + u32 irq_idx; + + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; + enable_irq(ab->irq_num[irq_idx]); +} + +static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + u32 irq_idx; + + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; + disable_irq_nosync(ab->irq_num[irq_idx]); +} + +static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab) +{ + int i; + + clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + ath11k_pcic_ce_irq_disable(ab, i); + } +} + +static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab) +{ + int i; + int irq_idx; + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; + synchronize_irq(ab->irq_num[irq_idx]); + } +} + +static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t) +{ + struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); + int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; + + ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); + + enable_irq(ce_pipe->ab->irq_num[irq_idx]); +} + +static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg) +{ + struct ath11k_ce_pipe *ce_pipe = arg; + struct ath11k_base *ab = ce_pipe->ab; + int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; + + if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) + return IRQ_HANDLED; + + /* last interrupt received for this CE */ + ce_pipe->timestamp = jiffies; + + disable_irq_nosync(ab->irq_num[irq_idx]); + + tasklet_schedule(&ce_pipe->intr_tq); + + return IRQ_HANDLED; +} + +static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab); + int i; + + /* In case of one MSI vector, we handle irq enable/disable + * in a uniform way since we only have one irq + */ + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc) +{ + int i; + + clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags); + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + + ath11k_pcic_ext_grp_disable(irq_grp); + + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } + } +} + +static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab); + int i; + + /* In case of one MSI vector, we handle irq enable/disable in a + * uniform way since we only have one irq + */ + if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return; + + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab) +{ + int i; + + set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } + ath11k_pcic_ext_grp_enable(irq_grp); + } +} + +static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab) +{ + int i, j, irq_idx; + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) { + irq_idx = irq_grp->irqs[j]; + synchronize_irq(ab->irq_num[irq_idx]); + } + } +} + +void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) +{ + __ath11k_pcic_ext_irq_disable(ab); + ath11k_pcic_sync_ext_irqs(ab); +} + +static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget) +{ + struct ath11k_ext_irq_grp *irq_grp = container_of(napi, + struct ath11k_ext_irq_grp, + napi); + struct ath11k_base *ab = irq_grp->ab; + int work_done; + int i; + + work_done = ath11k_dp_service_srng(ab, irq_grp, budget); + if (work_done < budget) { + napi_complete_done(napi, work_done); + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); + } + + if (work_done > budget) + work_done = budget; + + return work_done; +} + +static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg) +{ + struct ath11k_ext_irq_grp *irq_grp = arg; + struct ath11k_base *ab = irq_grp->ab; + int i; + + if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) + return IRQ_HANDLED; + + ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); + + /* last interrupt received for this group */ + irq_grp->timestamp = jiffies; + + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); + + napi_schedule(&irq_grp->napi); + + return IRQ_HANDLED; +} + +static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + int i, j, ret, num_vectors = 0; + u32 user_base_data = 0, base_vector = 0; + + ret = ath11k_pcic_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", + &num_vectors, + &user_base_data, + &base_vector); + if (ret < 0) + return ret; + + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + u32 num_irq = 0; + + irq_grp->ab = ab; + irq_grp->grp_id = i; + init_dummy_netdev(&irq_grp->napi_ndev); + netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + ath11k_pcic_ext_grp_napi_poll, NAPI_POLL_WEIGHT); + + if (ab->hw_params.ring_mask->tx[i] || + ab->hw_params.ring_mask->rx[i] || + ab->hw_params.ring_mask->rx_err[i] || + ab->hw_params.ring_mask->rx_wbm_rel[i] || + ab->hw_params.ring_mask->reo_status[i] || + ab->hw_params.ring_mask->rxdma2host[i] || + ab->hw_params.ring_mask->host2rxdma[i] || + ab->hw_params.ring_mask->rx_mon_status[i]) { + num_irq = 1; + } + + irq_grp->num_irq = num_irq; + irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i; + + for (j = 0; j < irq_grp->num_irq; j++) { + int irq_idx = irq_grp->irqs[j]; + int vector = (i % num_vectors) + base_vector; + int irq = ath11k_pcic_get_msi_irq(ab->dev, vector); + + ab->irq_num[irq_idx] = irq; + + ath11k_dbg(ab, ATH11K_DBG_PCI, + "irq:%d group:%d\n", irq, i); + + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler, + ab_pci->irq_flags, + "DP_EXT_IRQ", irq_grp); + if (ret) { + ath11k_err(ab, "failed request irq %d: %d\n", + vector, ret); + return ret; + } + } + ath11k_pcic_ext_grp_disable(irq_grp); + } + + return 0; +} + +int ath11k_pcic_set_irq_affinity_hint(struct ath11k_pci *ab_pci, + const struct cpumask *m) +{ + if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + return 0; + + return irq_set_affinity_hint(ab_pci->pdev->irq, m); +} + +int ath11k_pcic_config_irq(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + struct ath11k_ce_pipe *ce_pipe; + u32 msi_data_start; + u32 msi_data_count, msi_data_idx; + u32 msi_irq_start; + unsigned int msi_data; + int irq, i, ret, irq_idx; + + ret = ath11k_pcic_get_user_msi_assignment(ath11k_pci_priv(ab), + "CE", &msi_data_count, + &msi_data_start, &msi_irq_start); + if (ret) + return ret; + + ret = ath11k_pcic_set_irq_affinity_hint(ab_pci, cpumask_of(0)); + if (ret) { + ath11k_err(ab, "failed to set irq affinity %d\n", ret); + return ret; + } + + /* Configure CE irqs */ + for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; + irq = ath11k_pcic_get_msi_irq(ab->dev, msi_data); + ce_pipe = &ab->ce.ce_pipe[i]; + + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; + + tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet); + + ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler, + ab_pci->irq_flags, irq_name[irq_idx], + ce_pipe); + if (ret) { + ath11k_err(ab, "failed to request irq %d: %d\n", + irq_idx, ret); + goto err_irq_affinity_cleanup; + } + + ab->irq_num[irq_idx] = irq; + msi_data_idx++; + + ath11k_pcic_ce_irq_disable(ab, i); + } + + ret = ath11k_pcic_ext_irq_config(ab); + if (ret) + goto err_irq_affinity_cleanup; + + return 0; + +err_irq_affinity_cleanup: + ath11k_pcic_set_irq_affinity_hint(ab_pci, NULL); + return ret; +} + +void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab) +{ + int i; + + set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); + + for (i = 0; i < ab->hw_params.ce_count; i++) { + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + ath11k_pcic_ce_irq_enable(ab, i); + } +} + +static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params.ce_count; i++) { + struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; + + if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + tasklet_kill(&ce_pipe->intr_tq); + } +} + +void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab) +{ + ath11k_pcic_ce_irqs_disable(ab); + ath11k_pcic_sync_ce_irqs(ab); + ath11k_pcic_kill_tasklets(ab); +} + +void ath11k_pcic_stop(struct ath11k_base *ab) +{ + ath11k_pcic_ce_irq_disable_sync(ab); + ath11k_ce_cleanup_pipes(ab); +} + +int ath11k_pcic_start(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + + set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); + + /* TODO: for now don't restore ASPM in case of single MSI + * vector as MHI register reading in M2 causes system hang. + */ + if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + ath11k_pcic_aspm_restore(ab_pci); + else + ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); + + ath11k_pcic_ce_irqs_enable(ab); + ath11k_ce_rx_post_buf(ab); + + return 0; +} + +int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { + entry = &ab->hw_params.svc_to_ce_map[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + if (WARN_ON(!ul_set || !dl_set)) + return -ENOENT; + + return 0; +} diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h new file mode 100644 index 000000000000..6780f7e8bc64 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/pcic.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _ATH11K_PCI_CMN_H +#define _ATH11K_PCI_CMN_H + +#include "core.h" +#include "pci.h" + +#define ATH11K_PCI_IRQ_CE0_OFFSET 3 +#define ATH11K_PCI_IRQ_DP_OFFSET 14 + +#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000 +#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c +#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19) +#define ATH11K_PCI_WINDOW_START 0x80000 +#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0) + +/* BAR0 + 4k is always accessible, and no + * need to force wakeup. + * 4K - 32 = 0xFE0 + */ +#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0 + +int ath11k_pcic_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector); +int ath11k_pcic_get_msi_irq(struct device *dev, unsigned int vector); +void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value); +u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset); +void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, + u32 *msi_addr_hi); +void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx); +void ath11k_pcic_free_irq(struct ath11k_base *ab); +int ath11k_pcic_config_irq(struct ath11k_base *ab); +void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab); +void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab); +void ath11k_pcic_stop(struct ath11k_base *ab); +int ath11k_pcic_start(struct ath11k_base *ab); +int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab); +void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab); +int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector); +void ath11k_pcic_aspm_restore(struct ath11k_pci *ab_pci); +int ath11k_pcic_set_irq_affinity_hint(struct ath11k_pci *ab_pci, + const struct cpumask *m); +#endif -- cgit v1.2.3 From 8d06b8023ace027dc31a9cb3c85c3c8fe83289c5 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 1 Apr 2022 14:53:08 +0300 Subject: ath11k: Choose MSI config based on HW revision Instead of selecting MSI config based on magic numbers, make the assignment based on HW revision. The logic is similar to the selection of HW params. This improves readability of the code and also simplifies new additions. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328055714.6449-4-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/pci.c | 31 +++------------ drivers/net/wireless/ath/ath11k/pci.h | 1 + drivers/net/wireless/ath/ath11k/pcic.c | 70 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/pcic.h | 1 + 4 files changed, 78 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index bffbcace63ae..ab6572b14937 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -43,28 +43,6 @@ static const struct ath11k_bus_params ath11k_pci_bus_params = { .fixed_mem_region = false, }; -static const struct ath11k_msi_config ath11k_msi_config[] = { - { - .total_vectors = 32, - .total_users = 4, - .users = (struct ath11k_msi_user[]) { - { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, - { .name = "CE", .num_vectors = 10, .base_vector = 3 }, - { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, - { .name = "DP", .num_vectors = 18, .base_vector = 14 }, - }, - }, - { - .total_vectors = 16, - .total_users = 3, - .users = (struct ath11k_msi_user[]) { - { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, - { .name = "CE", .num_vectors = 5, .base_vector = 3 }, - { .name = "DP", .num_vectors = 8, .base_vector = 8 }, - }, - }, -}; - static const struct ath11k_msi_config msi_config_one_msi = { .total_vectors = 1, .total_users = 4, @@ -667,10 +645,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ret = -EOPNOTSUPP; goto err_pci_free_region; } - ab_pci->msi_config = &ath11k_msi_config[0]; break; case QCN9074_DEVICE_ID: - ab_pci->msi_config = &ath11k_msi_config[1]; ab->bus_params.static_window_map = true; ab->hw_rev = ATH11K_HW_QCN9074_HW10; break; @@ -700,7 +676,6 @@ unsupported_wcn6855_soc: ret = -EOPNOTSUPP; goto err_pci_free_region; } - ab_pci->msi_config = &ath11k_msi_config[0]; break; default: dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", @@ -709,6 +684,12 @@ unsupported_wcn6855_soc: goto err_pci_free_region; } + ret = ath11k_pcic_init_msi_config(ab); + if (ret) { + ath11k_err(ab, "failed to init msi config: %d\n", ret); + goto err_pci_free_region; + } + ret = ath11k_pci_alloc_msi(ab_pci); if (ret) { ath11k_err(ab, "failed to enable msi: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index 038e4152987c..7e225de6eb6e 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -63,6 +63,7 @@ struct ath11k_msi_config { int total_vectors; int total_users; struct ath11k_msi_user *users; + u16 hw_rev; }; enum ath11k_pci_flags { diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index 391c308455e5..eab153453106 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -63,6 +63,76 @@ static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { "tcl2host-status-ring", }; +static const struct ath11k_msi_config ath11k_msi_config[] = { + { + .total_vectors = 32, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_QCA6390_HW20, + }, + { + .total_vectors = 16, + .total_users = 3, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 5, .base_vector = 3 }, + { .name = "DP", .num_vectors = 8, .base_vector = 8 }, + }, + .hw_rev = ATH11K_HW_QCN9074_HW10, + }, + { + .total_vectors = 32, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_WCN6855_HW20, + }, + { + .total_vectors = 32, + .total_users = 4, + .users = (struct ath11k_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_WCN6855_HW21, + }, +}; + +int ath11k_pcic_init_msi_config(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + const struct ath11k_msi_config *msi_config; + int i; + + for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) { + msi_config = &ath11k_msi_config[i]; + + if (msi_config->hw_rev == ab->hw_rev) + break; + } + + if (i == ARRAY_SIZE(ath11k_msi_config)) { + ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n", + ab->hw_rev); + return -EINVAL; + } + + ab_pci->msi_config = msi_config; + return 0; +} +EXPORT_SYMBOL(ath11k_pcic_init_msi_config); + void ath11k_pcic_aspm_restore(struct ath11k_pci *ab_pci) { if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h index 6780f7e8bc64..d5fe315ccc61 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.h +++ b/drivers/net/wireless/ath/ath11k/pcic.h @@ -50,4 +50,5 @@ int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, void ath11k_pcic_aspm_restore(struct ath11k_pci *ab_pci); int ath11k_pcic_set_irq_affinity_hint(struct ath11k_pci *ab_pci, const struct cpumask *m); +int ath11k_pcic_init_msi_config(struct ath11k_base *ab); #endif -- cgit v1.2.3 From 0cfaf2243e9eef8ed32cdde6467a7e123a9f915f Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 1 Apr 2022 14:53:08 +0300 Subject: ath11k: Refactor MSI logic to support WCN6750 Refactor MSI logic in order to support hybrid bus devices like WCN6750. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328055714.6449-5-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/core.h | 22 ++++++++++++++++++ drivers/net/wireless/ath/ath11k/mhi.c | 3 +-- drivers/net/wireless/ath/ath11k/pci.c | 29 +++++++++++++++--------- drivers/net/wireless/ath/ath11k/pci.h | 16 ------------- drivers/net/wireless/ath/ath11k/pcic.c | 41 +++++++--------------------------- drivers/net/wireless/ath/ath11k/pcic.h | 5 +---- 6 files changed, 51 insertions(+), 65 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 17a61d0d684e..4e0861a94a14 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -769,6 +769,19 @@ struct ath11k_soc_dp_stats { struct ath11k_dp_ring_bp_stats bp_stats; }; +struct ath11k_msi_user { + char *name; + int num_vectors; + u32 base_vector; +}; + +struct ath11k_msi_config { + int total_vectors; + int total_users; + struct ath11k_msi_user *users; + u16 hw_rev; +}; + /* Master structure to hold the hw data which may be used in core module */ struct ath11k_base { enum ath11k_hw_rev hw_rev; @@ -905,6 +918,15 @@ struct ath11k_base { u32 subsystem_device; } id; + struct { + struct { + const struct ath11k_msi_config *config; + u32 ep_base_data; + u32 addr_lo; + u32 addr_hi; + } msi; + } pci; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 3724eebba4a2..5de255969364 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -258,8 +258,7 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) int *irq; unsigned int msi_data; - ret = ath11k_pcic_get_user_msi_assignment(ab_pci, - "MHI", &num_vectors, + ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors, &user_base_data, &base_vector); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index ab6572b14937..fd5487d7045e 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -307,12 +307,13 @@ static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci) static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; - const struct ath11k_msi_config *msi_config = ab_pci->msi_config; + const struct ath11k_msi_config *msi_config = ab->pci.msi.config; + struct pci_dev *pci_dev = ab_pci->pdev; struct msi_desc *msi_desc; int num_vectors; int ret; - num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, + num_vectors = pci_alloc_irq_vectors(pci_dev, msi_config->total_vectors, msi_config->total_vectors, PCI_IRQ_MSI); @@ -329,7 +330,7 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) goto reset_msi_config; } clear_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); - ab_pci->msi_config = &msi_config_one_msi; + ab->pci.msi.config = &msi_config_one_msi; ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n"); } @@ -344,11 +345,19 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) goto free_msi_vector; } - ab_pci->msi_ep_base_data = msi_desc->msg.data; - if (msi_desc->pci.msi_attrib.is_64) - set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); + ab->pci.msi.ep_base_data = msi_desc->msg.data; + + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, + &ab->pci.msi.addr_lo); + + if (msi_desc->pci.msi_attrib.is_64) { + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, + &ab->pci.msi.addr_hi); + } else { + ab->pci.msi.addr_hi = 0; + } - ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); + ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data); return 0; @@ -375,10 +384,10 @@ static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci) return -EINVAL; } - ab_pci->msi_ep_base_data = msi_desc->msg.data; + ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data; ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n", - ab_pci->msi_ep_base_data); + ab_pci->ab->pci.msi.ep_base_data); return 0; } @@ -562,7 +571,7 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = { .irq_enable = ath11k_pcic_ext_irq_enable, .irq_disable = ath11k_pcic_ext_irq_disable, .get_msi_address = ath11k_pcic_get_msi_address, - .get_user_msi_vector = ath11k_get_user_msi_assignment, + .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment, .map_service_to_pipe = ath11k_pcic_map_service_to_pipe, .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index 7e225de6eb6e..c9cae48d784e 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -53,22 +53,8 @@ #define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 -struct ath11k_msi_user { - char *name; - int num_vectors; - u32 base_vector; -}; - -struct ath11k_msi_config { - int total_vectors; - int total_users; - struct ath11k_msi_user *users; - u16 hw_rev; -}; - enum ath11k_pci_flags { ATH11K_PCI_FLAG_INIT_DONE, - ATH11K_PCI_FLAG_IS_MSI_64, ATH11K_PCI_ASPM_RESTORE, ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, }; @@ -78,9 +64,7 @@ struct ath11k_pci { struct ath11k_base *ab; u16 dev_id; char amss_path[100]; - u32 msi_ep_base_data; struct mhi_controller *mhi_ctrl; - const struct ath11k_msi_config *msi_config; unsigned long mhi_state; u32 register_window; diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index eab153453106..c743095387f1 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -111,7 +111,6 @@ static const struct ath11k_msi_config ath11k_msi_config[] = { int ath11k_pcic_init_msi_config(struct ath11k_base *ab) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); const struct ath11k_msi_config *msi_config; int i; @@ -128,7 +127,7 @@ int ath11k_pcic_init_msi_config(struct ath11k_base *ab) return -EINVAL; } - ab_pci->msi_config = msi_config; + ab->pci.msi.config = msi_config; return 0; } EXPORT_SYMBOL(ath11k_pcic_init_msi_config); @@ -267,33 +266,22 @@ int ath11k_pcic_get_msi_irq(struct device *dev, unsigned int vector) void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, u32 *msi_addr_hi) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - struct pci_dev *pci_dev = to_pci_dev(ab->dev); - - pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, - msi_addr_lo); - - if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { - pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, - msi_addr_hi); - } else { - *msi_addr_hi = 0; - } + *msi_addr_lo = ab->pci.msi.addr_lo; + *msi_addr_hi = ab->pci.msi.addr_hi; } -int ath11k_pcic_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name, +int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, u32 *base_vector) { - struct ath11k_base *ab = ab_pci->ab; - const struct ath11k_msi_config *msi_config = ab_pci->msi_config; + const struct ath11k_msi_config *msi_config = ab->pci.msi.config; int idx; for (idx = 0; idx < msi_config->total_users; idx++) { if (strcmp(user_name, msi_config->users[idx].name) == 0) { *num_vectors = msi_config->users[idx].num_vectors; *base_vector = msi_config->users[idx].base_vector; - *user_base_data = *base_vector + ab_pci->msi_ep_base_data; + *user_base_data = *base_vector + ab->pci.msi.ep_base_data; ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", @@ -325,17 +313,6 @@ void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) *msi_idx = msi_data_idx; } -int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, - int *num_vectors, u32 *user_base_data, - u32 *base_vector) -{ - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - - return ath11k_pcic_get_user_msi_assignment(ab_pci, user_name, - num_vectors, user_base_data, - base_vector); -} - static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab) { int i, j; @@ -586,8 +563,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) int i, j, ret, num_vectors = 0; u32 user_base_data = 0, base_vector = 0; - ret = ath11k_pcic_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", - &num_vectors, + ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors, &user_base_data, &base_vector); if (ret < 0) @@ -662,8 +638,7 @@ int ath11k_pcic_config_irq(struct ath11k_base *ab) unsigned int msi_data; int irq, i, ret, irq_idx; - ret = ath11k_pcic_get_user_msi_assignment(ath11k_pci_priv(ab), - "CE", &msi_data_count, + ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count, &msi_data_start, &msi_irq_start); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h index d5fe315ccc61..34e8c069d6e4 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.h +++ b/drivers/net/wireless/ath/ath11k/pcic.h @@ -25,7 +25,7 @@ */ #define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0 -int ath11k_pcic_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name, +int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, u32 *base_vector); int ath11k_pcic_get_msi_irq(struct device *dev, unsigned int vector); @@ -44,9 +44,6 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab); void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab); -int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, - int *num_vectors, u32 *user_base_data, - u32 *base_vector); void ath11k_pcic_aspm_restore(struct ath11k_pci *ab_pci); int ath11k_pcic_set_irq_affinity_hint(struct ath11k_pci *ab_pci, const struct cpumask *m); -- cgit v1.2.3 From 5b32b6dd966338005671780c1df02327582c4be4 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 1 Apr 2022 14:53:08 +0300 Subject: ath11k: Remove core PCI references from PCI common code Remove core PCI and ath11k PCI references(struct ath11k_pci) from PCI common code. Since, PCI common code will be used by hybrid bus devices, this code should be independent from ATH11K PCI references and Linux core PCI references like struct pci_dev. Since this change introduces function callbacks for bus wakeup and bus release operations, wakeup_mhi HW param is no longer needed and hence it is removed completely. Alternatively, bus wakeup/release ops for QCA9074 are initialized to NULL as QCA9704 does not need bus wakeup/release for register accesses. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328055714.6449-6-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 6 -- drivers/net/wireless/ath/ath11k/core.h | 12 +++ drivers/net/wireless/ath/ath11k/hw.h | 2 +- drivers/net/wireless/ath/ath11k/mhi.c | 6 +- drivers/net/wireless/ath/ath11k/pci.c | 146 +++++++++++++++++++++++++--- drivers/net/wireless/ath/ath11k/pci.h | 5 +- drivers/net/wireless/ath/ath11k/pcic.c | 169 ++++++++++++--------------------- drivers/net/wireless/ath/ath11k/pcic.h | 5 - 8 files changed, 211 insertions(+), 140 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 7f4462cf5787..3e8d4eabbe62 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -96,7 +96,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = true, - .wakeup_mhi = false, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, @@ -163,7 +162,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = true, - .wakeup_mhi = false, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, @@ -229,7 +227,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, - .wakeup_mhi = true, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, @@ -295,7 +292,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = true, .alloc_cacheable_memory = true, - .wakeup_mhi = false, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, @@ -361,7 +357,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, - .wakeup_mhi = true, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, @@ -426,7 +421,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, - .wakeup_mhi = true, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 4e0861a94a14..fa299bfb4efc 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -239,6 +239,8 @@ enum ath11k_dev_flags { ATH11K_FLAG_CE_IRQ_ENABLED, ATH11K_FLAG_EXT_IRQ_ENABLED, ATH11K_FLAG_FIXED_MEM_RGN, + ATH11K_FLAG_DEVICE_INIT_DONE, + ATH11K_FLAG_MULTI_MSI_VECTORS, }; enum ath11k_monitor_flags { @@ -728,6 +730,14 @@ struct ath11k_bus_params { bool static_window_map; }; +struct ath11k_pci_ops { + int (*wakeup)(struct ath11k_base *ab); + void (*release)(struct ath11k_base *ab); + int (*get_msi_irq)(struct ath11k_base *ab, unsigned int vector); + void (*window_write32)(struct ath11k_base *ab, u32 offset, u32 value); + u32 (*window_read32)(struct ath11k_base *ab, u32 offset); +}; + /* IPQ8074 HW channel counters frequency value in hertz */ #define IPQ8074_CC_FREQ_HERTZ 320000 @@ -925,6 +935,8 @@ struct ath11k_base { u32 addr_lo; u32 addr_hi; } msi; + + const struct ath11k_pci_ops *ops; } pci; /* must be last */ diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 44025bc5d082..08f958c03ec4 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_HW_H @@ -189,7 +190,6 @@ struct ath11k_hw_params { const struct ath11k_hw_hal_params *hal_params; bool supports_dynamic_smps_6ghz; bool alloc_cacheable_memory; - bool wakeup_mhi; bool supports_rssi_stats; bool fw_wmi_diag_event; bool current_cc_support; diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 5de255969364..1aa1e0f01b85 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -273,10 +273,10 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) for (i = 0; i < num_vectors; i++) { msi_data = base_vector; - if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) msi_data += i; - irq[i] = ath11k_pcic_get_msi_irq(ab->dev, msi_data); + irq[i] = ath11k_pci_get_msi_irq(ab, msi_data); } ab_pci->mhi_ctrl->irq = irq; @@ -406,7 +406,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) return ret; } - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index fd5487d7045e..3fd5b416a564 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -36,6 +36,85 @@ static const struct pci_device_id ath11k_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table); +static int ath11k_pci_bus_wake_up(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + + return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); +} + +static void ath11k_pci_bus_release(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + + mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); +} + +static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset) +{ + struct ath11k_base *ab = ab_pci->ab; + + u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset); + + lockdep_assert_held(&ab_pci->window_lock); + + if (window != ab_pci->register_window) { + iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window, + ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); + ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); + ab_pci->register_window = window; + } +} + +static void +ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + u32 window_start = ATH11K_PCI_WINDOW_START; + + spin_lock_bh(&ab_pci->window_lock); + ath11k_pci_select_window(ab_pci, offset); + iowrite32(value, ab->mem + window_start + + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); +} + +static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + u32 window_start = ATH11K_PCI_WINDOW_START; + u32 val; + + spin_lock_bh(&ab_pci->window_lock); + ath11k_pci_select_window(ab_pci, offset); + val = ioread32(ab->mem + window_start + + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); + spin_unlock_bh(&ab_pci->window_lock); + + return val; +} + +int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector) +{ + struct pci_dev *pci_dev = to_pci_dev(ab->dev); + + return pci_irq_vector(pci_dev, vector); +} + +static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = { + .wakeup = ath11k_pci_bus_wake_up, + .release = ath11k_pci_bus_release, + .get_msi_irq = ath11k_pci_get_msi_irq, + .window_write32 = ath11k_pci_window_write32, + .window_read32 = ath11k_pci_window_read32, +}; + +static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = { + .get_msi_irq = ath11k_pci_get_msi_irq, + .window_write32 = ath11k_pci_window_write32, + .window_read32 = ath11k_pci_window_read32, +}; + static const struct ath11k_bus_params ath11k_pci_bus_params = { .mhi_support = true, .m3_fw_support = true, @@ -318,8 +397,7 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) msi_config->total_vectors, PCI_IRQ_MSI); if (num_vectors == msi_config->total_vectors) { - set_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); - ab_pci->irq_flags = IRQF_SHARED; + set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); } else { num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 1, @@ -329,9 +407,8 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) ret = -EINVAL; goto reset_msi_config; } - clear_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); + clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); ab->pci.msi.config = &msi_config_one_msi; - ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n"); } ath11k_info(ab, "MSI vectors: %d\n", num_vectors); @@ -487,13 +564,20 @@ static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci) set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags); } +static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) +{ + if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) + pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, + ab_pci->link_ctl); +} + static int ath11k_pci_power_up(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); int ret; ab_pci->register_window = 0; - clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); + clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); ath11k_pci_sw_reset(ab_pci->ab, true); /* Disable ASPM during firmware download due to problems switching @@ -520,14 +604,14 @@ static void ath11k_pci_power_down(struct ath11k_base *ab) struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); /* restore aspm in case firmware bootup fails */ - ath11k_pcic_aspm_restore(ab_pci); + ath11k_pci_aspm_restore(ab_pci); ath11k_pci_force_wake(ab_pci->ab); ath11k_pci_msi_disable(ab_pci); ath11k_mhi_stop(ab_pci); - clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); + clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); ath11k_pci_sw_reset(ab_pci->ab, false); } @@ -559,8 +643,25 @@ static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab) ath11k_pcic_ce_irq_disable_sync(ab); } +static int ath11k_pci_start(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + + /* TODO: for now don't restore ASPM in case of single MSI + * vector as MHI register reading in M2 causes system hang. + */ + if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + ath11k_pci_aspm_restore(ab_pci); + else + ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); + + ath11k_pcic_start(ab); + + return 0; +} + static const struct ath11k_hif_ops ath11k_pci_hif_ops = { - .start = ath11k_pcic_start, + .start = ath11k_pci_start, .stop = ath11k_pcic_stop, .read32 = ath11k_pcic_read32, .write32 = ath11k_pcic_write32, @@ -592,6 +693,15 @@ static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 * *major, *minor); } +static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, + const struct cpumask *m) +{ + if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags)) + return 0; + + return irq_set_affinity_hint(ab_pci->pdev->irq, m); +} + static int ath11k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_dev) { @@ -654,9 +764,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ret = -EOPNOTSUPP; goto err_pci_free_region; } + + ab->pci.ops = &ath11k_pci_ops_qca6390; break; case QCN9074_DEVICE_ID: ab->bus_params.static_window_map = true; + ab->pci.ops = &ath11k_pci_ops_qcn9074; ab->hw_rev = ATH11K_HW_QCN9074_HW10; break; case WCN6855_DEVICE_ID: @@ -685,6 +798,8 @@ unsupported_wcn6855_soc: ret = -EOPNOTSUPP; goto err_pci_free_region; } + + ab->pci.ops = &ath11k_pci_ops_qca6390; break; default: dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", @@ -733,6 +848,12 @@ unsupported_wcn6855_soc: goto err_ce_free; } + ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); + if (ret) { + ath11k_err(ab, "failed to set irq affinity %d\n", ret); + goto err_free_irq; + } + /* kernel may allocate a dummy vector before request_irq and * then allocate a real vector when request_irq is called. * So get msi_data here again to avoid spurious interrupt @@ -741,16 +862,19 @@ unsupported_wcn6855_soc: ret = ath11k_pci_config_msi_data(ab_pci); if (ret) { ath11k_err(ab, "failed to config msi_data: %d\n", ret); - goto err_free_irq; + goto err_irq_affinity_cleanup; } ret = ath11k_core_init(ab); if (ret) { ath11k_err(ab, "failed to init core: %d\n", ret); - goto err_free_irq; + goto err_irq_affinity_cleanup; } return 0; +err_irq_affinity_cleanup: + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); + err_free_irq: ath11k_pcic_free_irq(ab); @@ -780,7 +904,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev) struct ath11k_base *ab = pci_get_drvdata(pdev); struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - ath11k_pcic_set_irq_affinity_hint(ab_pci, NULL); + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath11k_pci_power_down(ab); diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index c9cae48d784e..16a000b9cc5e 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -54,9 +54,7 @@ #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 enum ath11k_pci_flags { - ATH11K_PCI_FLAG_INIT_DONE, ATH11K_PCI_ASPM_RESTORE, - ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, }; struct ath11k_pci { @@ -74,8 +72,6 @@ struct ath11k_pci { /* enum ath11k_pci_flags */ unsigned long flags; u16 link_ctl; - - unsigned long irq_flags; }; static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab) @@ -83,4 +79,5 @@ static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab) return (struct ath11k_pci *)ab->drv_priv; } +int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector); #endif diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index c743095387f1..63c678aea29e 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -4,7 +4,6 @@ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. */ -#include #include "core.h" #include "pcic.h" #include "debug.h" @@ -132,29 +131,6 @@ int ath11k_pcic_init_msi_config(struct ath11k_base *ab) } EXPORT_SYMBOL(ath11k_pcic_init_msi_config); -void ath11k_pcic_aspm_restore(struct ath11k_pci *ab_pci) -{ - if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) - pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, - ab_pci->link_ctl); -} - -static inline void ath11k_pcic_select_window(struct ath11k_pci *ab_pci, u32 offset) -{ - struct ath11k_base *ab = ab_pci->ab; - - u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset); - - lockdep_assert_held(&ab_pci->window_lock); - - if (window != ab_pci->register_window) { - iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window, - ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); - ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); - ab_pci->register_window = window; - } -} - static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab, u32 offset) { @@ -174,17 +150,15 @@ static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab, void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 window_start; int ret = 0; /* for offset beyond BAR + 4K - 32, may - * need to wakeup MHI to access. + * need to wakeup the device to access. */ - if (ab->hw_params.wakeup_mhi && - test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF) - ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup) + ret = ab->pci.ops->wakeup(ab); if (offset < ATH11K_PCI_WINDOW_START) { iowrite32(value, ab->mem + offset); @@ -194,38 +168,32 @@ void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) else window_start = ATH11K_PCI_WINDOW_START; - if (window_start == ATH11K_PCI_WINDOW_START) { - spin_lock_bh(&ab_pci->window_lock); - ath11k_pcic_select_window(ab_pci, offset); - iowrite32(value, ab->mem + window_start + - (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); - spin_unlock_bh(&ab_pci->window_lock); + if (window_start == ATH11K_PCI_WINDOW_START && + ab->pci.ops->window_write32) { + ab->pci.ops->window_write32(ab, offset, value); } else { iowrite32(value, ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); } } - if (ab->hw_params.wakeup_mhi && - test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && + if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release && !ret) - mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); + ab->pci.ops->release(ab); } u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 val, window_start; int ret = 0; /* for offset beyond BAR + 4K - 32, may - * need to wakeup MHI to access. + * need to wakeup the device to access. */ - if (ab->hw_params.wakeup_mhi && - test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF) - ret = mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); + if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup) + ret = ab->pci.ops->wakeup(ab); if (offset < ATH11K_PCI_WINDOW_START) { val = ioread32(ab->mem + offset); @@ -235,34 +203,23 @@ u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) else window_start = ATH11K_PCI_WINDOW_START; - if (window_start == ATH11K_PCI_WINDOW_START) { - spin_lock_bh(&ab_pci->window_lock); - ath11k_pcic_select_window(ab_pci, offset); - val = ioread32(ab->mem + window_start + - (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); - spin_unlock_bh(&ab_pci->window_lock); + if (window_start == ATH11K_PCI_WINDOW_START && + ab->pci.ops->window_read32) { + val = ab->pci.ops->window_read32(ab, offset); } else { val = ioread32(ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); } } - if (ab->hw_params.wakeup_mhi && - test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && - offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && + if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && + offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release && !ret) - mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); + ab->pci.ops->release(ab); return val; } -int ath11k_pcic_get_msi_irq(struct device *dev, unsigned int vector) -{ - struct pci_dev *pci_dev = to_pci_dev(dev); - - return pci_irq_vector(pci_dev, vector); -} - void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, u32 *msi_addr_hi) { @@ -343,13 +300,12 @@ void ath11k_pcic_free_irq(struct ath11k_base *ab) static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 irq_idx; /* In case of one MSI vector, we handle irq enable/disable in a * uniform way since we only have one irq */ - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) return; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; @@ -358,13 +314,12 @@ static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 irq_idx; /* In case of one MSI vector, we handle irq enable/disable in a * uniform way since we only have one irq */ - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) return; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; @@ -429,13 +384,13 @@ static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg) static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab); + struct ath11k_base *ab = irq_grp->ab; int i; /* In case of one MSI vector, we handle irq enable/disable * in a uniform way since we only have one irq */ - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) return; for (i = 0; i < irq_grp->num_irq; i++) @@ -463,13 +418,13 @@ static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc) static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab); + struct ath11k_base *ab = irq_grp->ab; int i; /* In case of one MSI vector, we handle irq enable/disable in a * uniform way since we only have one irq */ - if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) return; for (i = 0; i < irq_grp->num_irq; i++) @@ -557,11 +512,22 @@ static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg) return IRQ_HANDLED; } +static int +ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector) +{ + if (!ab->pci.ops->get_msi_irq) { + WARN_ONCE(1, "get_msi_irq pci op not defined"); + return -EOPNOTSUPP; + } + + return ab->pci.ops->get_msi_irq(ab, vector); +} + static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); int i, j, ret, num_vectors = 0; u32 user_base_data = 0, base_vector = 0; + unsigned long irq_flags; ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors, &user_base_data, @@ -569,6 +535,10 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) if (ret < 0) return ret; + irq_flags = IRQF_SHARED; + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + irq_flags |= IRQF_NOBALANCING; + for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; u32 num_irq = 0; @@ -596,7 +566,10 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) for (j = 0; j < irq_grp->num_irq; j++) { int irq_idx = irq_grp->irqs[j]; int vector = (i % num_vectors) + base_vector; - int irq = ath11k_pcic_get_msi_irq(ab->dev, vector); + int irq = ath11k_pcic_get_msi_irq(ab, vector); + + if (irq < 0) + return irq; ab->irq_num[irq_idx] = irq; @@ -605,8 +578,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler, - ab_pci->irq_flags, - "DP_EXT_IRQ", irq_grp); + irq_flags, "DP_EXT_IRQ", irq_grp); if (ret) { ath11k_err(ab, "failed request irq %d: %d\n", vector, ret); @@ -619,35 +591,24 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) return 0; } -int ath11k_pcic_set_irq_affinity_hint(struct ath11k_pci *ab_pci, - const struct cpumask *m) -{ - if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) - return 0; - - return irq_set_affinity_hint(ab_pci->pdev->irq, m); -} - int ath11k_pcic_config_irq(struct ath11k_base *ab) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct ath11k_ce_pipe *ce_pipe; u32 msi_data_start; u32 msi_data_count, msi_data_idx; u32 msi_irq_start; unsigned int msi_data; int irq, i, ret, irq_idx; + unsigned long irq_flags; ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count, &msi_data_start, &msi_irq_start); if (ret) return ret; - ret = ath11k_pcic_set_irq_affinity_hint(ab_pci, cpumask_of(0)); - if (ret) { - ath11k_err(ab, "failed to set irq affinity %d\n", ret); - return ret; - } + irq_flags = IRQF_SHARED; + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) + irq_flags |= IRQF_NOBALANCING; /* Configure CE irqs */ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { @@ -655,7 +616,10 @@ int ath11k_pcic_config_irq(struct ath11k_base *ab) continue; msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; - irq = ath11k_pcic_get_msi_irq(ab->dev, msi_data); + irq = ath11k_pcic_get_msi_irq(ab, msi_data); + if (irq < 0) + return irq; + ce_pipe = &ab->ce.ce_pipe[i]; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; @@ -663,12 +627,11 @@ int ath11k_pcic_config_irq(struct ath11k_base *ab) tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet); ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler, - ab_pci->irq_flags, irq_name[irq_idx], - ce_pipe); + irq_flags, irq_name[irq_idx], ce_pipe); if (ret) { ath11k_err(ab, "failed to request irq %d: %d\n", irq_idx, ret); - goto err_irq_affinity_cleanup; + return ret; } ab->irq_num[irq_idx] = irq; @@ -679,13 +642,9 @@ int ath11k_pcic_config_irq(struct ath11k_base *ab) ret = ath11k_pcic_ext_irq_config(ab); if (ret) - goto err_irq_affinity_cleanup; + return ret; return 0; - -err_irq_affinity_cleanup: - ath11k_pcic_set_irq_affinity_hint(ab_pci, NULL); - return ret; } void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab) @@ -730,17 +689,7 @@ void ath11k_pcic_stop(struct ath11k_base *ab) int ath11k_pcic_start(struct ath11k_base *ab) { - struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); - - set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); - - /* TODO: for now don't restore ASPM in case of single MSI - * vector as MHI register reading in M2 causes system hang. - */ - if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) - ath11k_pcic_aspm_restore(ab_pci); - else - ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); + set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); ath11k_pcic_ce_irqs_enable(ab); ath11k_ce_rx_post_buf(ab); diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h index 34e8c069d6e4..c53d86289a8e 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.h +++ b/drivers/net/wireless/ath/ath11k/pcic.h @@ -8,7 +8,6 @@ #define _ATH11K_PCI_CMN_H #include "core.h" -#include "pci.h" #define ATH11K_PCI_IRQ_CE0_OFFSET 3 #define ATH11K_PCI_IRQ_DP_OFFSET 14 @@ -28,7 +27,6 @@ int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, u32 *base_vector); -int ath11k_pcic_get_msi_irq(struct device *dev, unsigned int vector); void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value); u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset); void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, @@ -44,8 +42,5 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab); void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab); -void ath11k_pcic_aspm_restore(struct ath11k_pci *ab_pci); -int ath11k_pcic_set_irq_affinity_hint(struct ath11k_pci *ab_pci, - const struct cpumask *m); int ath11k_pcic_init_msi_config(struct ath11k_base *ab); #endif -- cgit v1.2.3 From 50dc9ce9f80554a88e33b73c30851acf2be36ed3 Mon Sep 17 00:00:00 2001 From: Karthikeyan Kathirvel Date: Fri, 1 Apr 2022 14:53:09 +0300 Subject: ath11k: Change max no of active probe SSID and BSSID to fw capability The maximum number of SSIDs in a for active probe requests is currently reported as 16 (WLAN_SCAN_PARAMS_MAX_SSID) when registering the driver. The scan_req_params structure only has the capacity to hold 10 SSIDs. This leads to a buffer overflow which can be triggered from wpa_supplicant in userspace. When copying the SSIDs into the scan_req_params structure in the ath11k_mac_op_hw_scan route, it can overwrite the extraie pointer. Firmware supports 16 ssid * 4 bssid, for each ssid 4 bssid combo probe request will be sent, so totally 64 probe requests supported. So set both max ssid and bssid to 16 and 4 respectively. Remove the redundant macros of ssid and bssid. Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.7.0.1-01300-QCAHKSWPL_SILICONZ-1 Signed-off-by: Karthikeyan Kathirvel Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220329150221.21907-1-quic_kathirve@quicinc.com --- drivers/net/wireless/ath/ath11k/wmi.h | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 925b8003feae..c9ffefea549c 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -3089,9 +3089,6 @@ enum scan_dwelltime_adaptive_mode { SCAN_DWELL_MODE_STATIC = 4 }; -#define WLAN_SCAN_MAX_NUM_SSID 10 -#define WLAN_SCAN_MAX_NUM_BSSID 10 - #define WLAN_SSID_MAX_LEN 32 struct element_info { @@ -3106,7 +3103,6 @@ struct wlan_ssid { #define WMI_IE_BITMAP_SIZE 8 -#define WMI_SCAN_MAX_NUM_SSID 0x0A /* prefix used by scan requestor ids on the host */ #define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000 @@ -3114,10 +3110,6 @@ struct wlan_ssid { /* host cycles through the lower 12 bits to generate ids */ #define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000 -#define WLAN_SCAN_PARAMS_MAX_SSID 16 -#define WLAN_SCAN_PARAMS_MAX_BSSID 4 -#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 - /* Values lower than this may be refused by some firmware revisions with a scan * completion with a timedout reason. */ @@ -3313,8 +3305,8 @@ struct scan_req_params { u32 n_probes; u32 *chan_list; u32 notify_scan_events; - struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID]; - struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID]; + struct wlan_ssid ssid[WLAN_SCAN_PARAMS_MAX_SSID]; + struct wmi_mac_addr bssid_list[WLAN_SCAN_PARAMS_MAX_BSSID]; struct element_info extraie; struct element_info htcap; struct element_info vhtcap; -- cgit v1.2.3 From 2dd398dee7aa5ec6b296d9915bbb1c1a76199b4a Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Fri, 1 Apr 2022 14:53:10 +0300 Subject: ath11k: Remove unnecessary delay in ath11k_core_suspend The intended delay in ath11k_core_suspend is introduced in commit d1b0c33850d2 ("ath11k: implement suspend for QCA6390 PCI devices"), now with ath11k_mac_wait_tx_complete added in commit ba9177fcef21 ("ath11k: Add basic WoW functionalities"), that delay is not necessary now, so remove it. This is found in code review. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-02431-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220331002105.1162099-1-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 3e8d4eabbe62..cbac1919867f 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -453,11 +453,6 @@ int ath11k_core_suspend(struct ath11k_base *ab) if (!ar || ar->state != ATH11K_STATE_OFF) return 0; - /* TODO: there can frames in queues so for now add delay as a hack. - * Need to implement to handle and remove this delay. - */ - msleep(500); - ret = ath11k_dp_rx_pktlog_stop(ab, true); if (ret) { ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", -- cgit v1.2.3 From 633469e3bac10650ecff421ba6b9603d67da884b Mon Sep 17 00:00:00 2001 From: Nagarajan Maran Date: Fri, 1 Apr 2022 14:53:10 +0300 Subject: ath11k: fix driver initialization failure with WoW unsupported hw In the "ath11k_wow_init", error value "EINVAL" is returned when the check for firmware support of WoW feature fails, which in turn stops the driver initialization. Warning message: [ 31.040144] ------------[ cut here ]------------ [ 31.040185] WARNING: CPU: 1 PID: 51 at drivers/net/wireless/ath/ath11k/wow.c:813 ath11k_wow_init+0xc8/0x13a8 [ath11k] [ 31.043846] Modules linked in: ath11k_pci ath11k qmi_helpers [ 31.054341] CPU: 1 PID: 51 Comm: kworker/u8:1 Tainted: G W 5.17.0-wt-ath-594817-ga7f6aa925cf8-dirty #17 [ 31.060078] Hardware name: Qualcomm Technologies, Inc. IPQ8074/AP-HK10-C2 (DT) [ 31.070578] Workqueue: ath11k_qmi_driver_event ath11k_qmi_driver_event_work [ath11k] [ 31.077782] pstate: 80000005 (Nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 31.085676] pc : ath11k_wow_init+0xc8/0x13a8 [ath11k] [ 31.092359] lr : ath11k_mac_register+0x548/0xb98 [ath11k] [ 31.097567] sp : ffff80000aa13c40 [ 31.102944] x29: ffff80000aa13c40 x28: ffff800009184390 x27: ffff000002959f20 [ 31.106251] x26: ffff000002828000 x25: ffff000002830000 x24: ffff000002830000 [ 31.113369] x23: ffff000002820000 x22: ffff00000282854c x21: 0000000000000000 [ 31.120487] x20: ffff00000295cf20 x19: ffff000002828540 x18: 0000000000000031 [ 31.127605] x17: 0000000000000004 x16: ffff0000028285fc x15: ffff00000295b040 [ 31.134723] x14: 0000000000000067 x13: ffff00000282859c x12: 000000000000000d [ 31.141840] x11: 0000000000000018 x10: 0000000000000004 x9 : 0000000000000000 [ 31.148959] x8 : ffff00000289d680 x7 : 0000000000000000 x6 : 000000000000003f [ 31.156077] x5 : 0000000000000040 x4 : 0000000000000000 x3 : ffff000002820968 [ 31.163196] x2 : 0000000000000080 x1 : 0080008af9981779 x0 : ffff000002959f20 [ 31.170314] Call trace: [ 31.177421] ath11k_wow_init+0xc8/0x13a8 [ath11k] [ 31.179684] ath11k_core_qmi_firmware_ready+0x430/0x5e0 [ath11k] [ 31.184548] ath11k_qmi_driver_event_work+0x16c/0x4f8 [ath11k] [ 31.190623] process_one_work+0x134/0x350 [ 31.196262] worker_thread+0x12c/0x450 [ 31.200340] kthread+0xf4/0x110 [ 31.203986] ret_from_fork+0x10/0x20 [ 31.207026] ---[ end trace 0000000000000000 ]--- [ 31.210894] ath11k_pci 0000:01:00.0: failed to init wow: -22 [ 31.215467] ath11k_pci 0000:01:00.0: failed register the radio with mac80211: -22 [ 31.221117] ath11k_pci 0000:01:00.0: failed to create pdev core: -22 Fix this by returning value "0" when FW doesn't support WoW to allow driver to proceed with initialize sequence and also remove the unnecessary "WARN_ON". Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Fixes: ba9177fcef21 ("ath11k: Add basic WoW functionalities") Signed-off-by: Nagarajan Maran Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220331073110.3846-1-quic_nmaran@quicinc.com --- drivers/net/wireless/ath/ath11k/wow.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 93714df834b2..6c2611f93739 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -810,8 +810,8 @@ exit: int ath11k_wow_init(struct ath11k *ar) { - if (WARN_ON(!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))) - return -EINVAL; + if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map)) + return 0; ar->wow.wowlan_support = ath11k_wowlan_support; -- cgit v1.2.3 From 6f2f36e5f932c58e370bff79aba7f05963ea1c2a Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sat, 2 Apr 2022 08:15:16 -0400 Subject: mlxsw: spectrum_router: simplify list unwinding The setting of i here err_nexthop6_group_get: i = nrt6; Is redundant, i is already nrt6. So remove this statement. The for loop for the unwinding err_rt6_create: for (i--; i >= 0; i--) { Is equivelent to for (; i > 0; i--) { Two consecutive labels can be reduced to one. Signed-off-by: Tom Rix Reviewed-by: Ido Schimmel Link: https://lore.kernel.org/r/20220402121516.2750284-1-trix@redhat.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 79deb19e3a19..79fd486e29e3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -7010,7 +7010,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]); if (IS_ERR(mlxsw_sp_rt6)) { err = PTR_ERR(mlxsw_sp_rt6); - goto err_rt6_create; + goto err_rt6_unwind; } list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); @@ -7019,14 +7019,12 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry); if (err) - goto err_nexthop6_group_update; + goto err_rt6_unwind; return 0; -err_nexthop6_group_update: - i = nrt6; -err_rt6_create: - for (i--; i >= 0; i--) { +err_rt6_unwind: + for (; i > 0; i--) { fib6_entry->nrt6--; mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, list); @@ -7154,7 +7152,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]); if (IS_ERR(mlxsw_sp_rt6)) { err = PTR_ERR(mlxsw_sp_rt6); - goto err_rt6_create; + goto err_rt6_unwind; } list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); fib6_entry->nrt6++; @@ -7162,7 +7160,7 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); if (err) - goto err_nexthop6_group_get; + goto err_rt6_unwind; err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group, fib_node->fib); @@ -7181,10 +7179,8 @@ err_fib6_entry_type_set: mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib); err_nexthop_group_vr_link: mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry); -err_nexthop6_group_get: - i = nrt6; -err_rt6_create: - for (i--; i >= 0; i--) { +err_rt6_unwind: + for (; i > 0; i--) { fib6_entry->nrt6--; mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, list); -- cgit v1.2.3 From 4a5fb1bbcdf1cccae1f6b9c0277b3796b2a468ef Mon Sep 17 00:00:00 2001 From: Jérôme Pouiller Date: Sat, 26 Feb 2022 10:21:42 +0100 Subject: wfx: get out from the staging area MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The wfx driver is now mature enough to leave the staging area. Signed-off-by: Jérôme Pouiller Signed-off-by: Kalle Valo --- .../bindings/net/wireless/silabs,wfx.yaml | 137 ++++ .../bindings/staging/net/wireless/silabs,wfx.yaml | 137 ---- MAINTAINERS | 4 +- drivers/net/wireless/Kconfig | 1 + drivers/net/wireless/Makefile | 1 + drivers/net/wireless/silabs/Kconfig | 18 + drivers/net/wireless/silabs/Makefile | 3 + drivers/net/wireless/silabs/wfx/Kconfig | 13 + drivers/net/wireless/silabs/wfx/Makefile | 25 + drivers/net/wireless/silabs/wfx/bh.c | 324 +++++++++ drivers/net/wireless/silabs/wfx/bh.h | 34 + drivers/net/wireless/silabs/wfx/bus.h | 36 + drivers/net/wireless/silabs/wfx/bus_sdio.c | 273 +++++++ drivers/net/wireless/silabs/wfx/bus_spi.c | 284 ++++++++ drivers/net/wireless/silabs/wfx/data_rx.c | 92 +++ drivers/net/wireless/silabs/wfx/data_rx.h | 17 + drivers/net/wireless/silabs/wfx/data_tx.c | 568 +++++++++++++++ drivers/net/wireless/silabs/wfx/data_tx.h | 66 ++ drivers/net/wireless/silabs/wfx/debug.c | 331 +++++++++ drivers/net/wireless/silabs/wfx/debug.h | 19 + drivers/net/wireless/silabs/wfx/fwio.c | 389 ++++++++++ drivers/net/wireless/silabs/wfx/fwio.h | 15 + drivers/net/wireless/silabs/wfx/hif_api_cmd.h | 553 ++++++++++++++ drivers/net/wireless/silabs/wfx/hif_api_general.h | 252 +++++++ drivers/net/wireless/silabs/wfx/hif_api_mib.h | 346 +++++++++ drivers/net/wireless/silabs/wfx/hif_rx.c | 391 ++++++++++ drivers/net/wireless/silabs/wfx/hif_rx.h | 17 + drivers/net/wireless/silabs/wfx/hif_tx.c | 490 +++++++++++++ drivers/net/wireless/silabs/wfx/hif_tx.h | 61 ++ drivers/net/wireless/silabs/wfx/hif_tx_mib.c | 307 ++++++++ drivers/net/wireless/silabs/wfx/hif_tx_mib.h | 48 ++ drivers/net/wireless/silabs/wfx/hwio.c | 332 +++++++++ drivers/net/wireless/silabs/wfx/hwio.h | 78 ++ drivers/net/wireless/silabs/wfx/key.c | 227 ++++++ drivers/net/wireless/silabs/wfx/key.h | 19 + drivers/net/wireless/silabs/wfx/main.c | 491 +++++++++++++ drivers/net/wireless/silabs/wfx/main.h | 41 ++ drivers/net/wireless/silabs/wfx/queue.c | 297 ++++++++ drivers/net/wireless/silabs/wfx/queue.h | 44 ++ drivers/net/wireless/silabs/wfx/scan.c | 144 ++++ drivers/net/wireless/silabs/wfx/scan.h | 22 + drivers/net/wireless/silabs/wfx/sta.c | 794 +++++++++++++++++++++ drivers/net/wireless/silabs/wfx/sta.h | 66 ++ drivers/net/wireless/silabs/wfx/traces.h | 496 +++++++++++++ drivers/net/wireless/silabs/wfx/wfx.h | 162 +++++ drivers/staging/Kconfig | 1 - drivers/staging/Makefile | 1 - drivers/staging/wfx/Kconfig | 13 - drivers/staging/wfx/Makefile | 25 - drivers/staging/wfx/TODO | 6 - drivers/staging/wfx/bh.c | 324 --------- drivers/staging/wfx/bh.h | 34 - drivers/staging/wfx/bus.h | 36 - drivers/staging/wfx/bus_sdio.c | 273 ------- drivers/staging/wfx/bus_spi.c | 284 -------- drivers/staging/wfx/data_rx.c | 92 --- drivers/staging/wfx/data_rx.h | 17 - drivers/staging/wfx/data_tx.c | 568 --------------- drivers/staging/wfx/data_tx.h | 66 -- drivers/staging/wfx/debug.c | 331 --------- drivers/staging/wfx/debug.h | 19 - drivers/staging/wfx/fwio.c | 389 ---------- drivers/staging/wfx/fwio.h | 15 - drivers/staging/wfx/hif_api_cmd.h | 553 -------------- drivers/staging/wfx/hif_api_general.h | 252 ------- drivers/staging/wfx/hif_api_mib.h | 346 --------- drivers/staging/wfx/hif_rx.c | 391 ---------- drivers/staging/wfx/hif_rx.h | 17 - drivers/staging/wfx/hif_tx.c | 490 ------------- drivers/staging/wfx/hif_tx.h | 61 -- drivers/staging/wfx/hif_tx_mib.c | 307 -------- drivers/staging/wfx/hif_tx_mib.h | 48 -- drivers/staging/wfx/hwio.c | 332 --------- drivers/staging/wfx/hwio.h | 78 -- drivers/staging/wfx/key.c | 227 ------ drivers/staging/wfx/key.h | 19 - drivers/staging/wfx/main.c | 491 ------------- drivers/staging/wfx/main.h | 41 -- drivers/staging/wfx/queue.c | 297 -------- drivers/staging/wfx/queue.h | 44 -- drivers/staging/wfx/scan.c | 144 ---- drivers/staging/wfx/scan.h | 22 - drivers/staging/wfx/sta.c | 794 --------------------- drivers/staging/wfx/sta.h | 66 -- drivers/staging/wfx/traces.h | 496 ------------- drivers/staging/wfx/wfx.h | 162 ----- 86 files changed, 8326 insertions(+), 8311 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml delete mode 100644 Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml create mode 100644 drivers/net/wireless/silabs/Kconfig create mode 100644 drivers/net/wireless/silabs/Makefile create mode 100644 drivers/net/wireless/silabs/wfx/Kconfig create mode 100644 drivers/net/wireless/silabs/wfx/Makefile create mode 100644 drivers/net/wireless/silabs/wfx/bh.c create mode 100644 drivers/net/wireless/silabs/wfx/bh.h create mode 100644 drivers/net/wireless/silabs/wfx/bus.h create mode 100644 drivers/net/wireless/silabs/wfx/bus_sdio.c create mode 100644 drivers/net/wireless/silabs/wfx/bus_spi.c create mode 100644 drivers/net/wireless/silabs/wfx/data_rx.c create mode 100644 drivers/net/wireless/silabs/wfx/data_rx.h create mode 100644 drivers/net/wireless/silabs/wfx/data_tx.c create mode 100644 drivers/net/wireless/silabs/wfx/data_tx.h create mode 100644 drivers/net/wireless/silabs/wfx/debug.c create mode 100644 drivers/net/wireless/silabs/wfx/debug.h create mode 100644 drivers/net/wireless/silabs/wfx/fwio.c create mode 100644 drivers/net/wireless/silabs/wfx/fwio.h create mode 100644 drivers/net/wireless/silabs/wfx/hif_api_cmd.h create mode 100644 drivers/net/wireless/silabs/wfx/hif_api_general.h create mode 100644 drivers/net/wireless/silabs/wfx/hif_api_mib.h create mode 100644 drivers/net/wireless/silabs/wfx/hif_rx.c create mode 100644 drivers/net/wireless/silabs/wfx/hif_rx.h create mode 100644 drivers/net/wireless/silabs/wfx/hif_tx.c create mode 100644 drivers/net/wireless/silabs/wfx/hif_tx.h create mode 100644 drivers/net/wireless/silabs/wfx/hif_tx_mib.c create mode 100644 drivers/net/wireless/silabs/wfx/hif_tx_mib.h create mode 100644 drivers/net/wireless/silabs/wfx/hwio.c create mode 100644 drivers/net/wireless/silabs/wfx/hwio.h create mode 100644 drivers/net/wireless/silabs/wfx/key.c create mode 100644 drivers/net/wireless/silabs/wfx/key.h create mode 100644 drivers/net/wireless/silabs/wfx/main.c create mode 100644 drivers/net/wireless/silabs/wfx/main.h create mode 100644 drivers/net/wireless/silabs/wfx/queue.c create mode 100644 drivers/net/wireless/silabs/wfx/queue.h create mode 100644 drivers/net/wireless/silabs/wfx/scan.c create mode 100644 drivers/net/wireless/silabs/wfx/scan.h create mode 100644 drivers/net/wireless/silabs/wfx/sta.c create mode 100644 drivers/net/wireless/silabs/wfx/sta.h create mode 100644 drivers/net/wireless/silabs/wfx/traces.h create mode 100644 drivers/net/wireless/silabs/wfx/wfx.h delete mode 100644 drivers/staging/wfx/Kconfig delete mode 100644 drivers/staging/wfx/Makefile delete mode 100644 drivers/staging/wfx/TODO delete mode 100644 drivers/staging/wfx/bh.c delete mode 100644 drivers/staging/wfx/bh.h delete mode 100644 drivers/staging/wfx/bus.h delete mode 100644 drivers/staging/wfx/bus_sdio.c delete mode 100644 drivers/staging/wfx/bus_spi.c delete mode 100644 drivers/staging/wfx/data_rx.c delete mode 100644 drivers/staging/wfx/data_rx.h delete mode 100644 drivers/staging/wfx/data_tx.c delete mode 100644 drivers/staging/wfx/data_tx.h delete mode 100644 drivers/staging/wfx/debug.c delete mode 100644 drivers/staging/wfx/debug.h delete mode 100644 drivers/staging/wfx/fwio.c delete mode 100644 drivers/staging/wfx/fwio.h delete mode 100644 drivers/staging/wfx/hif_api_cmd.h delete mode 100644 drivers/staging/wfx/hif_api_general.h delete mode 100644 drivers/staging/wfx/hif_api_mib.h delete mode 100644 drivers/staging/wfx/hif_rx.c delete mode 100644 drivers/staging/wfx/hif_rx.h delete mode 100644 drivers/staging/wfx/hif_tx.c delete mode 100644 drivers/staging/wfx/hif_tx.h delete mode 100644 drivers/staging/wfx/hif_tx_mib.c delete mode 100644 drivers/staging/wfx/hif_tx_mib.h delete mode 100644 drivers/staging/wfx/hwio.c delete mode 100644 drivers/staging/wfx/hwio.h delete mode 100644 drivers/staging/wfx/key.c delete mode 100644 drivers/staging/wfx/key.h delete mode 100644 drivers/staging/wfx/main.c delete mode 100644 drivers/staging/wfx/main.h delete mode 100644 drivers/staging/wfx/queue.c delete mode 100644 drivers/staging/wfx/queue.h delete mode 100644 drivers/staging/wfx/scan.c delete mode 100644 drivers/staging/wfx/scan.h delete mode 100644 drivers/staging/wfx/sta.c delete mode 100644 drivers/staging/wfx/sta.h delete mode 100644 drivers/staging/wfx/traces.h delete mode 100644 drivers/staging/wfx/wfx.h (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml new file mode 100644 index 000000000000..f5a531738d93 --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright (c) 2020, Silicon Laboratories, Inc. +%YAML 1.2 +--- + +$id: http://devicetree.org/schemas/net/wireless/silabs,wfx.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Silicon Labs WFxxx devicetree bindings + +maintainers: + - Jérôme Pouiller + +description: > + Support for the Wifi chip WFxxx from Silicon Labs. Currently, the only device + from the WFxxx series is the WF200 described here: + https://www.silabs.com/documents/public/data-sheets/wf200-datasheet.pdf + + The WF200 can be connected via SPI or via SDIO. + + For SDIO: + + Declaring the WFxxx chip in device tree is mandatory (usually, the VID/PID is + sufficient for the SDIO devices). + + It is recommended to declare a mmc-pwrseq on SDIO host above WFx. Without + it, you may encounter issues during reboot. The mmc-pwrseq should be + compatible with mmc-pwrseq-simple. Please consult + Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml for more + information. + + For SPI: + + In add of the properties below, please consult + Documentation/devicetree/bindings/spi/spi-controller.yaml for optional SPI + related properties. + +properties: + compatible: + items: + - enum: + - silabs,brd4001a # WGM160P Evaluation Board + - silabs,brd8022a # WF200 Evaluation Board + - silabs,brd8023a # WFM200 Evaluation Board + - const: silabs,wf200 # Chip alone without antenna + + reg: + description: + When used on SDIO bus, must be set to 1. When used on SPI bus, it is + the chip select address of the device as defined in the SPI devices + bindings. + maxItems: 1 + + spi-max-frequency: true + + interrupts: + description: The interrupt line. Should be IRQ_TYPE_EDGE_RISING. When SPI is + used, this property is required. When SDIO is used, the "in-band" + interrupt provided by the SDIO bus is used unless an interrupt is defined + in the Device Tree. + maxItems: 1 + + reset-gpios: + description: (SPI only) Phandle of gpio that will be used to reset chip + during probe. Without this property, you may encounter issues with warm + boot. + + For SDIO, the reset gpio should declared using a mmc-pwrseq. + maxItems: 1 + + wakeup-gpios: + description: Phandle of gpio that will be used to wake-up chip. Without this + property, driver will disable most of power saving features. + maxItems: 1 + + silabs,antenna-config-file: + $ref: /schemas/types.yaml#/definitions/string + description: Use an alternative file for antenna configuration (aka + "Platform Data Set" in Silabs jargon). Default depends of "compatible" + string. For "silabs,wf200", the default is 'wf200.pds'. + + local-mac-address: true + + mac-address: true + +additionalProperties: false + +required: + - compatible + - reg + +examples: + - | + #include + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + wifi@0 { + compatible = "silabs,brd8022a", "silabs,wf200"; + pinctrl-names = "default"; + pinctrl-0 = <&wfx_irq &wfx_gpios>; + reg = <0>; + interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>; + wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>; + spi-max-frequency = <42000000>; + }; + }; + + - | + #include + #include + + wfx_pwrseq: wfx_pwrseq { + compatible = "mmc-pwrseq-simple"; + pinctrl-names = "default"; + pinctrl-0 = <&wfx_reset>; + reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>; + }; + + mmc { + mmc-pwrseq = <&wfx_pwrseq>; + #address-cells = <1>; + #size-cells = <0>; + + wifi@1 { + compatible = "silabs,brd8022a", "silabs,wf200"; + pinctrl-names = "default"; + pinctrl-0 = <&wfx_wakeup>; + reg = <1>; + wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml b/Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml deleted file mode 100644 index 105725a127ab..000000000000 --- a/Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml +++ /dev/null @@ -1,137 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -# Copyright (c) 2020, Silicon Laboratories, Inc. -%YAML 1.2 ---- - -$id: http://devicetree.org/schemas/staging/net/wireless/silabs,wfx.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Silicon Labs WFxxx devicetree bindings - -maintainers: - - Jérôme Pouiller - -description: > - Support for the Wifi chip WFxxx from Silicon Labs. Currently, the only device - from the WFxxx series is the WF200 described here: - https://www.silabs.com/documents/public/data-sheets/wf200-datasheet.pdf - - The WF200 can be connected via SPI or via SDIO. - - For SDIO: - - Declaring the WFxxx chip in device tree is mandatory (usually, the VID/PID is - sufficient for the SDIO devices). - - It is recommended to declare a mmc-pwrseq on SDIO host above WFx. Without - it, you may encounter issues during reboot. The mmc-pwrseq should be - compatible with mmc-pwrseq-simple. Please consult - Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml for more - information. - - For SPI: - - In add of the properties below, please consult - Documentation/devicetree/bindings/spi/spi-controller.yaml for optional SPI - related properties. - -properties: - compatible: - items: - - enum: - - silabs,brd4001a # WGM160P Evaluation Board - - silabs,brd8022a # WF200 Evaluation Board - - silabs,brd8023a # WFM200 Evaluation Board - - const: silabs,wf200 # Chip alone without antenna - - reg: - description: - When used on SDIO bus, must be set to 1. When used on SPI bus, it is - the chip select address of the device as defined in the SPI devices - bindings. - maxItems: 1 - - spi-max-frequency: true - - interrupts: - description: The interrupt line. Should be IRQ_TYPE_EDGE_RISING. When SPI is - used, this property is required. When SDIO is used, the "in-band" - interrupt provided by the SDIO bus is used unless an interrupt is defined - in the Device Tree. - maxItems: 1 - - reset-gpios: - description: (SPI only) Phandle of gpio that will be used to reset chip - during probe. Without this property, you may encounter issues with warm - boot. - - For SDIO, the reset gpio should declared using a mmc-pwrseq. - maxItems: 1 - - wakeup-gpios: - description: Phandle of gpio that will be used to wake-up chip. Without this - property, driver will disable most of power saving features. - maxItems: 1 - - silabs,antenna-config-file: - $ref: /schemas/types.yaml#/definitions/string - description: Use an alternative file for antenna configuration (aka - "Platform Data Set" in Silabs jargon). Default depends of "compatible" - string. For "silabs,wf200", the default is 'wf200.pds'. - - local-mac-address: true - - mac-address: true - -additionalProperties: false - -required: - - compatible - - reg - -examples: - - | - #include - #include - - spi { - #address-cells = <1>; - #size-cells = <0>; - - wifi@0 { - compatible = "silabs,brd8022a", "silabs,wf200"; - pinctrl-names = "default"; - pinctrl-0 = <&wfx_irq &wfx_gpios>; - reg = <0>; - interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>; - wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>; - spi-max-frequency = <42000000>; - }; - }; - - - | - #include - #include - - wfx_pwrseq: wfx_pwrseq { - compatible = "mmc-pwrseq-simple"; - pinctrl-names = "default"; - pinctrl-0 = <&wfx_reset>; - reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>; - }; - - mmc { - mmc-pwrseq = <&wfx_pwrseq>; - #address-cells = <1>; - #size-cells = <0>; - - wifi@1 { - compatible = "silabs,brd8022a", "silabs,wf200"; - pinctrl-names = "default"; - pinctrl-0 = <&wfx_wakeup>; - reg = <1>; - wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>; - }; - }; -... diff --git a/MAINTAINERS b/MAINTAINERS index fd768d43e048..ba405f6ec31a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17982,8 +17982,8 @@ F: drivers/platform/x86/touchscreen_dmi.c SILICON LABS WIRELESS DRIVERS (for WFxxx series) M: Jérôme Pouiller S: Supported -F: Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml -F: drivers/staging/wfx/ +F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml +F: drivers/net/wireless/silabs/wfx/ SILICON MOTION SM712 FRAME BUFFER DRIVER M: Sudip Mukherjee diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 7add2002ff4c..e78ff7af6517 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -31,6 +31,7 @@ source "drivers/net/wireless/microchip/Kconfig" source "drivers/net/wireless/ralink/Kconfig" source "drivers/net/wireless/realtek/Kconfig" source "drivers/net/wireless/rsi/Kconfig" +source "drivers/net/wireless/silabs/Kconfig" source "drivers/net/wireless/st/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zydas/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 80b324499786..76885e5f0ea7 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/ obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/ +obj-$(CONFIG_WLAN_VENDOR_SILABS) += silabs/ obj-$(CONFIG_WLAN_VENDOR_ST) += st/ obj-$(CONFIG_WLAN_VENDOR_TI) += ti/ obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/ diff --git a/drivers/net/wireless/silabs/Kconfig b/drivers/net/wireless/silabs/Kconfig new file mode 100644 index 000000000000..6262a799bf36 --- /dev/null +++ b/drivers/net/wireless/silabs/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 + +config WLAN_VENDOR_SILABS + bool "Silicon Laboratories devices" + default y + help + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_SILABS + +source "drivers/net/wireless/silabs/wfx/Kconfig" + +endif # WLAN_VENDOR_SILABS diff --git a/drivers/net/wireless/silabs/Makefile b/drivers/net/wireless/silabs/Makefile new file mode 100644 index 000000000000..c2263ee21006 --- /dev/null +++ b/drivers/net/wireless/silabs/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_WFX) += wfx/ diff --git a/drivers/net/wireless/silabs/wfx/Kconfig b/drivers/net/wireless/silabs/wfx/Kconfig new file mode 100644 index 000000000000..835a855409d8 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config WFX + tristate "Silicon Labs wireless chips WF200 and further" + depends on MAC80211 + depends on MMC || !MMC # do not allow WFX=y if MMC=m + depends on (SPI || MMC) + help + This is a driver for Silicons Labs WFxxx series (WF200 and further) + chipsets. This chip can be found on SPI or SDIO buses. + + Silabs does not use a reliable SDIO vendor ID. So, to avoid conflicts, + the driver won't probe the device if it is not also declared in the + Device Tree. diff --git a/drivers/net/wireless/silabs/wfx/Makefile b/drivers/net/wireless/silabs/wfx/Makefile new file mode 100644 index 000000000000..c8b356f71c99 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only + +# Necessary for CREATE_TRACE_POINTS +CFLAGS_debug.o = -I$(src) + +wfx-y := \ + bh.o \ + hwio.o \ + fwio.o \ + hif_tx_mib.o \ + hif_tx.o \ + hif_rx.o \ + queue.o \ + data_tx.o \ + data_rx.o \ + scan.o \ + sta.o \ + key.o \ + main.o \ + debug.o +wfx-$(CONFIG_SPI) += bus_spi.o +# When CONFIG_MMC == m, append to 'wfx-y' (and not to 'wfx-m') +wfx-$(subst m,y,$(CONFIG_MMC)) += bus_sdio.o + +obj-$(CONFIG_WFX) += wfx.o diff --git a/drivers/net/wireless/silabs/wfx/bh.c b/drivers/net/wireless/silabs/wfx/bh.c new file mode 100644 index 000000000000..bcea9d5b119c --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bh.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Interrupt bottom half (BH). + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "bh.h" +#include "wfx.h" +#include "hwio.h" +#include "traces.h" +#include "hif_rx.h" +#include "hif_api_cmd.h" + +static void device_wakeup(struct wfx_dev *wdev) +{ + int max_retry = 3; + + if (!wdev->pdata.gpio_wakeup) + return; + if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0) + return; + + if (wfx_api_older_than(wdev, 1, 4)) { + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); + if (!completion_done(&wdev->hif.ctrl_ready)) + usleep_range(2000, 2500); + return; + } + for (;;) { + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); + /* completion.h does not provide any function to wait completion without consume it + * (a kind of wait_for_completion_done_timeout()). So we have to emulate it. + */ + if (wait_for_completion_timeout(&wdev->hif.ctrl_ready, msecs_to_jiffies(2))) { + complete(&wdev->hif.ctrl_ready); + return; + } else if (max_retry-- > 0) { + /* Older firmwares have a race in sleep/wake-up process. Redo the process + * is sufficient to unfreeze the chip. + */ + dev_err(wdev->dev, "timeout while wake up chip\n"); + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); + usleep_range(2000, 2500); + } else { + dev_err(wdev->dev, "max wake-up retries reached\n"); + return; + } + } +} + +static void device_release(struct wfx_dev *wdev) +{ + if (!wdev->pdata.gpio_wakeup) + return; + + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); +} + +static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf) +{ + struct sk_buff *skb; + struct wfx_hif_msg *hif; + size_t alloc_len; + size_t computed_len; + int release_count; + int piggyback = 0; + + WARN(read_len > round_down(0xFFF, 2) * sizeof(u16), "request exceed the chip capability"); + + /* Add 2 to take into account piggyback size */ + alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2); + skb = dev_alloc_skb(alloc_len); + if (!skb) + return -ENOMEM; + + if (wfx_data_read(wdev, skb->data, alloc_len)) + goto err; + + piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2)); + _trace_piggyback(piggyback, false); + + hif = (struct wfx_hif_msg *)skb->data; + WARN(hif->encrypted & 0x3, "encryption is unsupported"); + if (WARN(read_len < sizeof(struct wfx_hif_msg), "corrupted read")) + goto err; + computed_len = le16_to_cpu(hif->len); + computed_len = round_up(computed_len, 2); + if (computed_len != read_len) { + dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n", + computed_len, read_len); + print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1, + hif, read_len, true); + goto err; + } + + if (!(hif->id & HIF_ID_IS_INDICATION)) { + (*is_cnf)++; + if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT) + release_count = + ((struct wfx_hif_cnf_multi_transmit *)hif->body)->num_tx_confs; + else + release_count = 1; + WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter"); + wdev->hif.tx_buffers_used -= release_count; + } + _trace_hif_recv(hif, wdev->hif.tx_buffers_used); + + if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) { + if (hif->seqnum != wdev->hif.rx_seqnum) + dev_warn(wdev->dev, "wrong message sequence: %d != %d\n", + hif->seqnum, wdev->hif.rx_seqnum); + wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1); + } + + skb_put(skb, le16_to_cpu(hif->len)); + /* wfx_handle_rx takes care on SKB livetime */ + wfx_handle_rx(wdev, skb); + if (!wdev->hif.tx_buffers_used) + wake_up(&wdev->hif.tx_buffers_empty); + + return piggyback; + +err: + if (skb) + dev_kfree_skb(skb); + return -EIO; +} + +static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf) +{ + size_t len; + int i; + int ctrl_reg, piggyback; + + piggyback = 0; + for (i = 0; i < max_msg; i++) { + if (piggyback & CTRL_NEXT_LEN_MASK) + ctrl_reg = piggyback; + else if (try_wait_for_completion(&wdev->hif.ctrl_ready)) + ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0); + else + ctrl_reg = 0; + if (!(ctrl_reg & CTRL_NEXT_LEN_MASK)) + return i; + /* ctrl_reg units are 16bits words */ + len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2; + piggyback = rx_helper(wdev, len, num_cnf); + if (piggyback < 0) + return i; + if (!(piggyback & CTRL_WLAN_READY)) + dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n", + piggyback); + } + if (piggyback & CTRL_NEXT_LEN_MASK) { + ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback); + complete(&wdev->hif.ctrl_ready); + if (ctrl_reg) + dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n", + ctrl_reg, piggyback); + } + return i; +} + +static void tx_helper(struct wfx_dev *wdev, struct wfx_hif_msg *hif) +{ + int ret; + void *data; + bool is_encrypted = false; + size_t len = le16_to_cpu(hif->len); + + WARN(len < sizeof(*hif), "try to send corrupted data"); + + hif->seqnum = wdev->hif.tx_seqnum; + wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1); + + data = hif; + WARN(len > le16_to_cpu(wdev->hw_caps.size_inp_ch_buf), + "request exceed the chip capability: %zu > %d\n", + len, le16_to_cpu(wdev->hw_caps.size_inp_ch_buf)); + len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len); + ret = wfx_data_write(wdev, data, len); + if (ret) + goto end; + + wdev->hif.tx_buffers_used++; + _trace_hif_send(hif, wdev->hif.tx_buffers_used); +end: + if (is_encrypted) + kfree(data); +} + +static int bh_work_tx(struct wfx_dev *wdev, int max_msg) +{ + struct wfx_hif_msg *hif; + int i; + + for (i = 0; i < max_msg; i++) { + hif = NULL; + if (wdev->hif.tx_buffers_used < le16_to_cpu(wdev->hw_caps.num_inp_ch_bufs)) { + if (try_wait_for_completion(&wdev->hif_cmd.ready)) { + WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error"); + hif = wdev->hif_cmd.buf_send; + } else { + hif = wfx_tx_queues_get(wdev); + } + } + if (!hif) + return i; + tx_helper(wdev, hif); + } + return i; +} + +/* In SDIO mode, it is necessary to make an access to a register to acknowledge last received + * message. It could be possible to restrict this acknowledge to SDIO mode and only if last + * operation was rx. + */ +static void ack_sdio_data(struct wfx_dev *wdev) +{ + u32 cfg_reg; + + wfx_config_reg_read(wdev, &cfg_reg); + if (cfg_reg & 0xFF) { + dev_warn(wdev->dev, "chip reports errors: %02x\n", cfg_reg & 0xFF); + wfx_config_reg_write_bits(wdev, 0xFF, 0x00); + } +} + +static void bh_work(struct work_struct *work) +{ + struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh); + int stats_req = 0, stats_cnf = 0, stats_ind = 0; + bool release_chip = false, last_op_is_rx = false; + int num_tx, num_rx; + + device_wakeup(wdev); + do { + num_tx = bh_work_tx(wdev, 32); + stats_req += num_tx; + if (num_tx) + last_op_is_rx = false; + num_rx = bh_work_rx(wdev, 32, &stats_cnf); + stats_ind += num_rx; + if (num_rx) + last_op_is_rx = true; + } while (num_rx || num_tx); + stats_ind -= stats_cnf; + + if (last_op_is_rx) + ack_sdio_data(wdev); + if (!wdev->hif.tx_buffers_used && !work_pending(work)) { + device_release(wdev); + release_chip = true; + } + _trace_bh_stats(stats_ind, stats_req, stats_cnf, wdev->hif.tx_buffers_used, release_chip); +} + +/* An IRQ from chip did occur */ +void wfx_bh_request_rx(struct wfx_dev *wdev) +{ + u32 cur, prev; + + wfx_control_reg_read(wdev, &cur); + prev = atomic_xchg(&wdev->hif.ctrl_reg, cur); + complete(&wdev->hif.ctrl_ready); + queue_work(system_highpri_wq, &wdev->hif.bh); + + if (!(cur & CTRL_NEXT_LEN_MASK)) + dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n", + cur); + if (prev != 0) + dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n", + prev, cur); +} + +/* Driver want to send data */ +void wfx_bh_request_tx(struct wfx_dev *wdev) +{ + queue_work(system_highpri_wq, &wdev->hif.bh); +} + +/* If IRQ is not available, this function allow to manually poll the control register and simulate + * an IRQ ahen an event happened. + * + * Note that the device has a bug: If an IRQ raise while host read control register, the IRQ is + * lost. So, use this function carefully (only duing device initialisation). + */ +void wfx_bh_poll_irq(struct wfx_dev *wdev) +{ + ktime_t now, start; + u32 reg; + + WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ"); + flush_workqueue(system_highpri_wq); + start = ktime_get(); + for (;;) { + wfx_control_reg_read(wdev, ®); + now = ktime_get(); + if (reg & 0xFFF) + break; + if (ktime_after(now, ktime_add_ms(start, 1000))) { + dev_err(wdev->dev, "time out while polling control register\n"); + return; + } + udelay(200); + } + wfx_bh_request_rx(wdev); +} + +void wfx_bh_register(struct wfx_dev *wdev) +{ + INIT_WORK(&wdev->hif.bh, bh_work); + init_completion(&wdev->hif.ctrl_ready); + init_waitqueue_head(&wdev->hif.tx_buffers_empty); +} + +void wfx_bh_unregister(struct wfx_dev *wdev) +{ + flush_work(&wdev->hif.bh); +} diff --git a/drivers/net/wireless/silabs/wfx/bh.h b/drivers/net/wireless/silabs/wfx/bh.h new file mode 100644 index 000000000000..a44c8b421b7c --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bh.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Interrupt bottom half (BH). + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_BH_H +#define WFX_BH_H + +#include +#include +#include +#include + +struct wfx_dev; + +struct wfx_hif { + struct work_struct bh; + struct completion ctrl_ready; + wait_queue_head_t tx_buffers_empty; + atomic_t ctrl_reg; + int rx_seqnum; + int tx_seqnum; + int tx_buffers_used; +}; + +void wfx_bh_register(struct wfx_dev *wdev); +void wfx_bh_unregister(struct wfx_dev *wdev); +void wfx_bh_request_rx(struct wfx_dev *wdev); +void wfx_bh_request_tx(struct wfx_dev *wdev); +void wfx_bh_poll_irq(struct wfx_dev *wdev); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/bus.h b/drivers/net/wireless/silabs/wfx/bus.h new file mode 100644 index 000000000000..ccadfdd6873c --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bus.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Common bus abstraction layer. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_BUS_H +#define WFX_BUS_H + +#include +#include + +#define WFX_REG_CONFIG 0x0 +#define WFX_REG_CONTROL 0x1 +#define WFX_REG_IN_OUT_QUEUE 0x2 +#define WFX_REG_AHB_DPORT 0x3 +#define WFX_REG_BASE_ADDR 0x4 +#define WFX_REG_SRAM_DPORT 0x5 +#define WFX_REG_SET_GEN_R_W 0x6 +#define WFX_REG_FRAME_OUT 0x7 + +struct wfx_hwbus_ops { + int (*copy_from_io)(void *bus_priv, unsigned int addr, void *dst, size_t count); + int (*copy_to_io)(void *bus_priv, unsigned int addr, const void *src, size_t count); + int (*irq_subscribe)(void *bus_priv); + int (*irq_unsubscribe)(void *bus_priv); + void (*lock)(void *bus_priv); + void (*unlock)(void *bus_priv); + size_t (*align_size)(void *bus_priv, size_t size); +}; + +extern struct sdio_driver wfx_sdio_driver; +extern struct spi_driver wfx_spi_driver; + +#endif diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c new file mode 100644 index 000000000000..51a0d58a9070 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SDIO interface. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bus.h" +#include "wfx.h" +#include "hwio.h" +#include "main.h" +#include "bh.h" + +static const struct wfx_platform_data pdata_wf200 = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/wf200.pds", +}; + +static const struct wfx_platform_data pdata_brd4001a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd4001a.pds", +}; + +static const struct wfx_platform_data pdata_brd8022a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd8022a.pds", +}; + +static const struct wfx_platform_data pdata_brd8023a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd8023a.pds", +}; + +struct wfx_sdio_priv { + struct sdio_func *func; + struct wfx_dev *core; + u8 buf_id_tx; + u8 buf_id_rx; + int of_irq; +}; + +static int wfx_sdio_copy_from_io(void *priv, unsigned int reg_id, void *dst, size_t count) +{ + struct wfx_sdio_priv *bus = priv; + unsigned int sdio_addr = reg_id << 2; + int ret; + + WARN(reg_id > 7, "chip only has 7 registers"); + WARN(!IS_ALIGNED((uintptr_t)dst, 4), "unaligned buffer address"); + WARN(!IS_ALIGNED(count, 4), "unaligned buffer size"); + + /* Use queue mode buffers */ + if (reg_id == WFX_REG_IN_OUT_QUEUE) + sdio_addr |= (bus->buf_id_rx + 1) << 7; + ret = sdio_memcpy_fromio(bus->func, dst, sdio_addr, count); + if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE) + bus->buf_id_rx = (bus->buf_id_rx + 1) % 4; + + return ret; +} + +static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id, const void *src, size_t count) +{ + struct wfx_sdio_priv *bus = priv; + unsigned int sdio_addr = reg_id << 2; + int ret; + + WARN(reg_id > 7, "chip only has 7 registers"); + WARN(!IS_ALIGNED((uintptr_t)src, 4), "unaligned buffer address"); + WARN(!IS_ALIGNED(count, 4), "unaligned buffer size"); + + /* Use queue mode buffers */ + if (reg_id == WFX_REG_IN_OUT_QUEUE) + sdio_addr |= bus->buf_id_tx << 7; + /* FIXME: discards 'const' qualifier for src */ + ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count); + if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE) + bus->buf_id_tx = (bus->buf_id_tx + 1) % 32; + + return ret; +} + +static void wfx_sdio_lock(void *priv) +{ + struct wfx_sdio_priv *bus = priv; + + sdio_claim_host(bus->func); +} + +static void wfx_sdio_unlock(void *priv) +{ + struct wfx_sdio_priv *bus = priv; + + sdio_release_host(bus->func); +} + +static void wfx_sdio_irq_handler(struct sdio_func *func) +{ + struct wfx_sdio_priv *bus = sdio_get_drvdata(func); + + wfx_bh_request_rx(bus->core); +} + +static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv) +{ + struct wfx_sdio_priv *bus = priv; + + sdio_claim_host(bus->func); + wfx_bh_request_rx(bus->core); + sdio_release_host(bus->func); + return IRQ_HANDLED; +} + +static int wfx_sdio_irq_subscribe(void *priv) +{ + struct wfx_sdio_priv *bus = priv; + u32 flags; + int ret; + u8 cccr; + + if (!bus->of_irq) { + sdio_claim_host(bus->func); + ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler); + sdio_release_host(bus->func); + return ret; + } + + flags = irq_get_trigger_type(bus->of_irq); + if (!flags) + flags = IRQF_TRIGGER_HIGH; + flags |= IRQF_ONESHOT; + ret = devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL, + wfx_sdio_irq_handler_ext, flags, "wfx", bus); + if (ret) + return ret; + sdio_claim_host(bus->func); + cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL); + cccr |= BIT(0); + cccr |= BIT(bus->func->num); + sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL); + sdio_release_host(bus->func); + return 0; +} + +static int wfx_sdio_irq_unsubscribe(void *priv) +{ + struct wfx_sdio_priv *bus = priv; + int ret; + + if (bus->of_irq) + devm_free_irq(&bus->func->dev, bus->of_irq, bus); + sdio_claim_host(bus->func); + ret = sdio_release_irq(bus->func); + sdio_release_host(bus->func); + return ret; +} + +static size_t wfx_sdio_align_size(void *priv, size_t size) +{ + struct wfx_sdio_priv *bus = priv; + + return sdio_align_size(bus->func, size); +} + +static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = { + .copy_from_io = wfx_sdio_copy_from_io, + .copy_to_io = wfx_sdio_copy_to_io, + .irq_subscribe = wfx_sdio_irq_subscribe, + .irq_unsubscribe = wfx_sdio_irq_unsubscribe, + .lock = wfx_sdio_lock, + .unlock = wfx_sdio_unlock, + .align_size = wfx_sdio_align_size, +}; + +static const struct of_device_id wfx_sdio_of_match[] = { + { .compatible = "silabs,wf200", .data = &pdata_wf200 }, + { .compatible = "silabs,brd4001a", .data = &pdata_brd4001a }, + { .compatible = "silabs,brd8022a", .data = &pdata_brd8022a }, + { .compatible = "silabs,brd8023a", .data = &pdata_brd8023a }, + { }, +}; +MODULE_DEVICE_TABLE(of, wfx_sdio_of_match); + +static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) +{ + const struct wfx_platform_data *pdata = of_device_get_match_data(&func->dev); + struct device_node *np = func->dev.of_node; + struct wfx_sdio_priv *bus; + int ret; + + if (func->num != 1) { + dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n", + func->num); + return -ENODEV; + } + + if (!pdata) { + dev_warn(&func->dev, "no compatible device found in DT\n"); + return -ENODEV; + } + + bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + bus->func = func; + bus->of_irq = irq_of_parse_and_map(np, 0); + sdio_set_drvdata(func, bus); + + sdio_claim_host(func); + ret = sdio_enable_func(func); + /* Block of 64 bytes is more efficient than 512B for frame sizes < 4k */ + sdio_set_block_size(func, 64); + sdio_release_host(func); + if (ret) + return ret; + + bus->core = wfx_init_common(&func->dev, pdata, &wfx_sdio_hwbus_ops, bus); + if (!bus->core) { + ret = -EIO; + goto sdio_release; + } + + ret = wfx_probe(bus->core); + if (ret) + goto sdio_release; + + return 0; + +sdio_release: + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); + return ret; +} + +static void wfx_sdio_remove(struct sdio_func *func) +{ + struct wfx_sdio_priv *bus = sdio_get_drvdata(func); + + wfx_release(bus->core); + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); +} + +static const struct sdio_device_id wfx_sdio_ids[] = { + /* WF200 does not have official VID/PID */ + { SDIO_DEVICE(0x0000, 0x1000) }, + { }, +}; +MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids); + +struct sdio_driver wfx_sdio_driver = { + .name = "wfx-sdio", + .id_table = wfx_sdio_ids, + .probe = wfx_sdio_probe, + .remove = wfx_sdio_remove, + .drv = { + .owner = THIS_MODULE, + .of_match_table = wfx_sdio_of_match, + } +}; diff --git a/drivers/net/wireless/silabs/wfx/bus_spi.c b/drivers/net/wireless/silabs/wfx/bus_spi.c new file mode 100644 index 000000000000..7fb1afb8ed31 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/bus_spi.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SPI interface. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2011, Sagrad Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include +#include +#include +#include +#include + +#include "bus.h" +#include "wfx.h" +#include "hwio.h" +#include "main.h" +#include "bh.h" + +#define SET_WRITE 0x7FFF /* usage: and operation */ +#define SET_READ 0x8000 /* usage: or operation */ + +static const struct wfx_platform_data pdata_wf200 = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/wf200.pds", + .use_rising_clk = true, +}; + +static const struct wfx_platform_data pdata_brd4001a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd4001a.pds", + .use_rising_clk = true, +}; + +static const struct wfx_platform_data pdata_brd8022a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd8022a.pds", + .use_rising_clk = true, +}; + +static const struct wfx_platform_data pdata_brd8023a = { + .file_fw = "wfx/wfm_wf200", + .file_pds = "wfx/brd8023a.pds", + .use_rising_clk = true, +}; + +struct wfx_spi_priv { + struct spi_device *func; + struct wfx_dev *core; + struct gpio_desc *gpio_reset; + bool need_swab; +}; + +/* The chip reads 16bits of data at time and place them directly into (little endian) CPU register. + * So, the chip expects bytes order to be "B1 B0 B3 B2" (while LE is "B0 B1 B2 B3" and BE is + * "B3 B2 B1 B0") + * + * A little endian host with bits_per_word == 16 should do the right job natively. The code below to + * support big endian host and commonly used SPI 8bits. + */ +static int wfx_spi_copy_from_io(void *priv, unsigned int addr, void *dst, size_t count) +{ + struct wfx_spi_priv *bus = priv; + u16 regaddr = (addr << 12) | (count / 2) | SET_READ; + struct spi_message m; + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .rx_buf = dst, + .len = count, + }; + u16 *dst16 = dst; + int ret, i; + + WARN(count % 2, "buffer size must be a multiple of 2"); + + cpu_to_le16s(®addr); + if (bus->need_swab) + swab16s(®addr); + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + ret = spi_sync(bus->func, &m); + + if (bus->need_swab && addr == WFX_REG_CONFIG) + for (i = 0; i < count / 2; i++) + swab16s(&dst16[i]); + return ret; +} + +static int wfx_spi_copy_to_io(void *priv, unsigned int addr, const void *src, size_t count) +{ + struct wfx_spi_priv *bus = priv; + u16 regaddr = (addr << 12) | (count / 2); + /* FIXME: use a bounce buffer */ + u16 *src16 = (void *)src; + int ret, i; + struct spi_message m; + struct spi_transfer t_addr = { + .tx_buf = ®addr, + .len = sizeof(regaddr), + }; + struct spi_transfer t_msg = { + .tx_buf = src, + .len = count, + }; + + WARN(count % 2, "buffer size must be a multiple of 2"); + WARN(regaddr & SET_READ, "bad addr or size overflow"); + + cpu_to_le16s(®addr); + + /* Register address and CONFIG content always use 16bit big endian + * ("BADC" order) + */ + if (bus->need_swab) + swab16s(®addr); + if (bus->need_swab && addr == WFX_REG_CONFIG) + for (i = 0; i < count / 2; i++) + swab16s(&src16[i]); + + spi_message_init(&m); + spi_message_add_tail(&t_addr, &m); + spi_message_add_tail(&t_msg, &m); + ret = spi_sync(bus->func, &m); + + if (bus->need_swab && addr == WFX_REG_CONFIG) + for (i = 0; i < count / 2; i++) + swab16s(&src16[i]); + return ret; +} + +static void wfx_spi_lock(void *priv) +{ +} + +static void wfx_spi_unlock(void *priv) +{ +} + +static irqreturn_t wfx_spi_irq_handler(int irq, void *priv) +{ + struct wfx_spi_priv *bus = priv; + + wfx_bh_request_rx(bus->core); + return IRQ_HANDLED; +} + +static int wfx_spi_irq_subscribe(void *priv) +{ + struct wfx_spi_priv *bus = priv; + u32 flags; + + flags = irq_get_trigger_type(bus->func->irq); + if (!flags) + flags = IRQF_TRIGGER_HIGH; + flags |= IRQF_ONESHOT; + return devm_request_threaded_irq(&bus->func->dev, bus->func->irq, NULL, + wfx_spi_irq_handler, flags, "wfx", bus); +} + +static int wfx_spi_irq_unsubscribe(void *priv) +{ + struct wfx_spi_priv *bus = priv; + + devm_free_irq(&bus->func->dev, bus->func->irq, bus); + return 0; +} + +static size_t wfx_spi_align_size(void *priv, size_t size) +{ + /* Most of SPI controllers avoid DMA if buffer size is not 32bit aligned */ + return ALIGN(size, 4); +} + +static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = { + .copy_from_io = wfx_spi_copy_from_io, + .copy_to_io = wfx_spi_copy_to_io, + .irq_subscribe = wfx_spi_irq_subscribe, + .irq_unsubscribe = wfx_spi_irq_unsubscribe, + .lock = wfx_spi_lock, + .unlock = wfx_spi_unlock, + .align_size = wfx_spi_align_size, +}; + +static int wfx_spi_probe(struct spi_device *func) +{ + struct wfx_platform_data *pdata; + struct wfx_spi_priv *bus; + int ret; + + if (!func->bits_per_word) + func->bits_per_word = 16; + ret = spi_setup(func); + if (ret) + return ret; + pdata = (struct wfx_platform_data *)spi_get_device_id(func)->driver_data; + if (!pdata) { + dev_err(&func->dev, "unable to retrieve driver data (please report)\n"); + return -ENODEV; + } + + /* Trace below is also displayed by spi_setup() if compiled with DEBUG */ + dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n", + func->chip_select, func->mode, func->bits_per_word, func->max_speed_hz); + if (func->bits_per_word != 16 && func->bits_per_word != 8) + dev_warn(&func->dev, "unusual bits/word value: %d\n", func->bits_per_word); + if (func->max_speed_hz > 50000000) + dev_warn(&func->dev, "%dHz is a very high speed\n", func->max_speed_hz); + + bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + bus->func = func; + if (func->bits_per_word == 8 || IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + bus->need_swab = true; + spi_set_drvdata(func, bus); + + bus->gpio_reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(bus->gpio_reset)) + return PTR_ERR(bus->gpio_reset); + if (!bus->gpio_reset) { + dev_warn(&func->dev, "gpio reset is not defined, trying to load firmware anyway\n"); + } else { + gpiod_set_consumer_name(bus->gpio_reset, "wfx reset"); + gpiod_set_value_cansleep(bus->gpio_reset, 1); + usleep_range(100, 150); + gpiod_set_value_cansleep(bus->gpio_reset, 0); + usleep_range(2000, 2500); + } + + bus->core = wfx_init_common(&func->dev, pdata, &wfx_spi_hwbus_ops, bus); + if (!bus->core) + return -EIO; + + return wfx_probe(bus->core); +} + +static void wfx_spi_remove(struct spi_device *func) +{ + struct wfx_spi_priv *bus = spi_get_drvdata(func); + + wfx_release(bus->core); +} + +/* For dynamic driver binding, kernel does not use OF to match driver. It only + * use modalias and modalias is a copy of 'compatible' DT node with vendor + * stripped. + */ +static const struct spi_device_id wfx_spi_id[] = { + { "wf200", (kernel_ulong_t)&pdata_wf200 }, + { "brd4001a", (kernel_ulong_t)&pdata_brd4001a }, + { "brd8022a", (kernel_ulong_t)&pdata_brd8022a }, + { "brd8023a", (kernel_ulong_t)&pdata_brd8023a }, + { }, +}; +MODULE_DEVICE_TABLE(spi, wfx_spi_id); + +#ifdef CONFIG_OF +static const struct of_device_id wfx_spi_of_match[] = { + { .compatible = "silabs,wf200" }, + { .compatible = "silabs,brd4001a" }, + { .compatible = "silabs,brd8022a" }, + { .compatible = "silabs,brd8023a" }, + { }, +}; +MODULE_DEVICE_TABLE(of, wfx_spi_of_match); +#endif + +struct spi_driver wfx_spi_driver = { + .driver = { + .name = "wfx-spi", + .of_match_table = of_match_ptr(wfx_spi_of_match), + }, + .id_table = wfx_spi_id, + .probe = wfx_spi_probe, + .remove = wfx_spi_remove, +}; diff --git a/drivers/net/wireless/silabs/wfx/data_rx.c b/drivers/net/wireless/silabs/wfx/data_rx.c new file mode 100644 index 000000000000..a4b5ffe158e4 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/data_rx.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Data receiving implementation. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "data_rx.h" +#include "wfx.h" +#include "bh.h" +#include "sta.h" + +static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt) +{ + int params, tid; + + if (wfx_api_older_than(wvif->wdev, 3, 6)) + return; + + switch (mgmt->u.action.u.addba_req.action_code) { + case WLAN_ACTION_ADDBA_REQ: + params = le16_to_cpu(mgmt->u.action.u.addba_req.capab); + tid = (params & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; + ieee80211_start_rx_ba_session_offl(wvif->vif, mgmt->sa, tid); + break; + case WLAN_ACTION_DELBA: + params = le16_to_cpu(mgmt->u.action.u.delba.params); + tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; + ieee80211_stop_rx_ba_session_offl(wvif->vif, mgmt->sa, tid); + break; + } +} + +void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb) +{ + struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb); + struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + + memset(hdr, 0, sizeof(*hdr)); + + if (arg->status == HIF_STATUS_RX_FAIL_MIC) + hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED; + else if (arg->status) + goto drop; + + if (skb->len < sizeof(struct ieee80211_pspoll)) { + dev_warn(wvif->wdev->dev, "malformed SDU received\n"); + goto drop; + } + + hdr->band = NL80211_BAND_2GHZ; + hdr->freq = ieee80211_channel_to_frequency(arg->channel_number, hdr->band); + + if (arg->rxed_rate >= 14) { + hdr->encoding = RX_ENC_HT; + hdr->rate_idx = arg->rxed_rate - 14; + } else if (arg->rxed_rate >= 4) { + hdr->rate_idx = arg->rxed_rate - 2; + } else { + hdr->rate_idx = arg->rxed_rate; + } + + if (!arg->rcpi_rssi) { + hdr->flag |= RX_FLAG_NO_SIGNAL_VAL; + dev_info(wvif->wdev->dev, "received frame without RSSI data\n"); + } + hdr->signal = arg->rcpi_rssi / 2 - 110; + hdr->antenna = 0; + + if (arg->encryp) + hdr->flag |= RX_FLAG_DECRYPTED; + + /* Block ack negotiation is offloaded by the firmware. However, re-ordering must be done by + * the mac80211. + */ + if (ieee80211_is_action(frame->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_BACK && + skb->len > IEEE80211_MIN_ACTION_SIZE) { + wfx_rx_handle_ba(wvif, mgmt); + goto drop; + } + + ieee80211_rx_irqsafe(wvif->wdev->hw, skb); + return; + +drop: + dev_kfree_skb(skb); +} diff --git a/drivers/net/wireless/silabs/wfx/data_rx.h b/drivers/net/wireless/silabs/wfx/data_rx.h new file mode 100644 index 000000000000..cf708f16d602 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/data_rx.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Data receiving implementation. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_DATA_RX_H +#define WFX_DATA_RX_H + +struct wfx_vif; +struct sk_buff; +struct wfx_hif_ind_rx; + +void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c new file mode 100644 index 000000000000..e07381b2ff4d --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/data_tx.c @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Data transmitting implementation. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "data_tx.h" +#include "wfx.h" +#include "bh.h" +#include "sta.h" +#include "queue.h" +#include "debug.h" +#include "traces.h" +#include "hif_tx_mib.h" + +static int wfx_get_hw_rate(struct wfx_dev *wdev, const struct ieee80211_tx_rate *rate) +{ + struct ieee80211_supported_band *band; + + if (rate->idx < 0) + return -1; + if (rate->flags & IEEE80211_TX_RC_MCS) { + if (rate->idx > 7) { + WARN(1, "wrong rate->idx value: %d", rate->idx); + return -1; + } + return rate->idx + 14; + } + /* The device only support 2GHz, else band information should be retrieved from + * ieee80211_tx_info + */ + band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; + if (rate->idx >= band->n_bitrates) { + WARN(1, "wrong rate->idx value: %d", rate->idx); + return -1; + } + return band->bitrates[rate->idx].hw_value; +} + +/* TX policy cache implementation */ + +static void wfx_tx_policy_build(struct wfx_vif *wvif, struct wfx_tx_policy *policy, + struct ieee80211_tx_rate *rates) +{ + struct wfx_dev *wdev = wvif->wdev; + int i, rateid; + u8 count; + + WARN(rates[0].idx < 0, "invalid rate policy"); + memset(policy, 0, sizeof(*policy)); + for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { + if (rates[i].idx < 0) + break; + WARN_ON(rates[i].count > 15); + rateid = wfx_get_hw_rate(wdev, &rates[i]); + /* Pack two values in each byte of policy->rates */ + count = rates[i].count; + if (rateid % 2) + count <<= 4; + policy->rates[rateid / 2] |= count; + } +} + +static bool wfx_tx_policy_is_equal(const struct wfx_tx_policy *a, const struct wfx_tx_policy *b) +{ + return !memcmp(a->rates, b->rates, sizeof(a->rates)); +} + +static int wfx_tx_policy_find(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *wanted) +{ + struct wfx_tx_policy *it; + + list_for_each_entry(it, &cache->used, link) + if (wfx_tx_policy_is_equal(wanted, it)) + return it - cache->cache; + list_for_each_entry(it, &cache->free, link) + if (wfx_tx_policy_is_equal(wanted, it)) + return it - cache->cache; + return -1; +} + +static void wfx_tx_policy_use(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry) +{ + ++entry->usage_count; + list_move(&entry->link, &cache->used); +} + +static int wfx_tx_policy_release(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry) +{ + int ret = --entry->usage_count; + + if (!ret) + list_move(&entry->link, &cache->free); + return ret; +} + +static int wfx_tx_policy_get(struct wfx_vif *wvif, struct ieee80211_tx_rate *rates, bool *renew) +{ + int idx; + struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; + struct wfx_tx_policy wanted; + struct wfx_tx_policy *entry; + + wfx_tx_policy_build(wvif, &wanted, rates); + + spin_lock_bh(&cache->lock); + if (list_empty(&cache->free)) { + WARN(1, "unable to get a valid Tx policy"); + spin_unlock_bh(&cache->lock); + return HIF_TX_RETRY_POLICY_INVALID; + } + idx = wfx_tx_policy_find(cache, &wanted); + if (idx >= 0) { + *renew = false; + } else { + /* If policy is not found create a new one using the oldest entry in "free" list */ + *renew = true; + entry = list_entry(cache->free.prev, struct wfx_tx_policy, link); + memcpy(entry->rates, wanted.rates, sizeof(entry->rates)); + entry->uploaded = false; + entry->usage_count = 0; + idx = entry - cache->cache; + } + wfx_tx_policy_use(cache, &cache->cache[idx]); + if (list_empty(&cache->free)) + ieee80211_stop_queues(wvif->wdev->hw); + spin_unlock_bh(&cache->lock); + return idx; +} + +static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx) +{ + int usage, locked; + struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; + + if (idx == HIF_TX_RETRY_POLICY_INVALID) + return; + spin_lock_bh(&cache->lock); + locked = list_empty(&cache->free); + usage = wfx_tx_policy_release(cache, &cache->cache[idx]); + if (locked && !usage) + ieee80211_wake_queues(wvif->wdev->hw); + spin_unlock_bh(&cache->lock); +} + +static int wfx_tx_policy_upload(struct wfx_vif *wvif) +{ + struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache; + u8 tmp_rates[12]; + int i, is_used; + + do { + spin_lock_bh(&wvif->tx_policy_cache.lock); + for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) { + is_used = memzcmp(policies[i].rates, sizeof(policies[i].rates)); + if (!policies[i].uploaded && is_used) + break; + } + if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) { + policies[i].uploaded = true; + memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates)); + spin_unlock_bh(&wvif->tx_policy_cache.lock); + wfx_hif_set_tx_rate_retry_policy(wvif, i, tmp_rates); + } else { + spin_unlock_bh(&wvif->tx_policy_cache.lock); + } + } while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)); + return 0; +} + +void wfx_tx_policy_upload_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work); + + wfx_tx_policy_upload(wvif); + wfx_tx_unlock(wvif->wdev); +} + +void wfx_tx_policy_init(struct wfx_vif *wvif) +{ + struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; + int i; + + memset(cache, 0, sizeof(*cache)); + + spin_lock_init(&cache->lock); + INIT_LIST_HEAD(&cache->used); + INIT_LIST_HEAD(&cache->free); + + for (i = 0; i < ARRAY_SIZE(cache->cache); ++i) + list_add(&cache->cache[i].link, &cache->free); +} + +/* Tx implementation */ + +static bool wfx_is_action_back(struct ieee80211_hdr *hdr) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr; + + if (!ieee80211_is_action(mgmt->frame_control)) + return false; + if (mgmt->u.action.category != WLAN_CATEGORY_BACK) + return false; + return true; +} + +static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr) +{ + struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL; + const u8 *da = ieee80211_get_DA(hdr); + + if (sta_priv && sta_priv->link_id) + return sta_priv->link_id; + if (wvif->vif->type != NL80211_IFTYPE_AP) + return 0; + if (is_multicast_ether_addr(da)) + return 0; + return HIF_LINK_ID_NOT_ASSOCIATED; +} + +static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) +{ + int i; + bool finished; + + /* Firmware is not able to mix rates with different flags */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + rates[i].flags |= IEEE80211_TX_RC_SHORT_GI; + if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI)) + rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; + if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)) + rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; + } + + /* Sort rates and remove duplicates */ + do { + finished = true; + for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) { + if (rates[i + 1].idx == rates[i].idx && + rates[i].idx != -1) { + rates[i].count += rates[i + 1].count; + if (rates[i].count > 15) + rates[i].count = 15; + rates[i + 1].idx = -1; + rates[i + 1].count = 0; + + finished = false; + } + if (rates[i + 1].idx > rates[i].idx) { + swap(rates[i + 1], rates[i]); + finished = false; + } + } + } while (!finished); + /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + if (rates[i].idx == 0) + break; + if (rates[i].idx == -1) { + rates[i].idx = 0; + rates[i].count = 8; /* == hw->max_rate_tries */ + rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS; + break; + } + } + /* All retries use long GI */ + for (i = 1; i < IEEE80211_TX_MAX_RATES; i++) + rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; +} + +static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info) +{ + bool tx_policy_renew = false; + u8 ret; + + ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew); + if (ret == HIF_TX_RETRY_POLICY_INVALID) + dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy"); + + if (tx_policy_renew) { + wfx_tx_lock(wvif->wdev); + if (!schedule_work(&wvif->tx_policy_upload_work)) + wfx_tx_unlock(wvif->wdev); + } + return ret; +} + +static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info) +{ + if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS)) + return HIF_FRAME_FORMAT_NON_HT; + else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)) + return HIF_FRAME_FORMAT_MIXED_FORMAT_HT; + else + return HIF_FRAME_FORMAT_GF_HT_11N; +} + +static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key) +{ + int mic_space; + + if (!hw_key) + return 0; + if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + return 0; + mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0; + return hw_key->icv_len + mic_space; +} + +static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct sk_buff *skb) +{ + struct wfx_hif_msg *hif_msg; + struct wfx_hif_req_tx *req; + struct wfx_tx_priv *tx_priv; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int queue_id = skb_get_queue_mapping(skb); + size_t offset = (size_t)skb->data & 3; + int wmsg_len = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + offset; + + WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id"); + wfx_tx_fixup_rates(tx_info->driver_rates); + + /* From now tx_info->control is unusable */ + memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv)); + /* Fill tx_priv */ + tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data; + tx_priv->icv_size = wfx_tx_get_icv_len(hw_key); + + /* Fill hif_msg */ + WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb"); + WARN(offset & 1, "attempt to transmit an unaligned frame"); + skb_put(skb, tx_priv->icv_size); + skb_push(skb, wmsg_len); + memset(skb->data, 0, wmsg_len); + hif_msg = (struct wfx_hif_msg *)skb->data; + hif_msg->len = cpu_to_le16(skb->len); + hif_msg->id = HIF_REQ_ID_TX; + hif_msg->interface = wvif->id; + if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) { + dev_warn(wvif->wdev->dev, + "requested frame size (%d) is larger than maximum supported (%d)\n", + skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)); + skb_pull(skb, wmsg_len); + return -EIO; + } + + /* Fill tx request */ + req = (struct wfx_hif_req_tx *)hif_msg->body; + /* packet_id just need to be unique on device. 32bits are more than necessary for that task, + * so we take advantage of it to add some extra data for debug. + */ + req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF; + req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16; + req->packet_id |= queue_id << 28; + + req->fc_offset = offset; + /* Queue index are inverted between firmware and Linux */ + req->queue_id = 3 - queue_id; + req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr); + req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info); + req->frame_format = wfx_tx_get_frame_format(tx_info); + if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + req->short_gi = 1; + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + req->after_dtim = 1; + + /* Auxiliary operations */ + wfx_tx_queues_put(wvif, skb); + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + schedule_work(&wvif->update_tim_work); + wfx_bh_request_tx(wvif->wdev); + return 0; +} + +void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif; + struct ieee80211_sta *sta = control ? control->sta : NULL; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, rate_driver_data); + + BUILD_BUG_ON_MSG(sizeof(struct wfx_tx_priv) > driver_data_room, + "struct tx_priv is too large"); + WARN(skb->next || skb->prev, "skb is already member of a list"); + /* control.vif can be NULL for injected frames */ + if (tx_info->control.vif) + wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv; + else + wvif = wvif_iterate(wdev, NULL); + if (WARN_ON(!wvif)) + goto drop; + /* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any BlockAck session + * management frame. The check below exist just in case. + */ + if (wfx_is_action_back(hdr)) { + dev_info(wdev->dev, "drop BA action\n"); + goto drop; + } + if (wfx_tx_inner(wvif, sta, skb)) + goto drop; + + return; + +drop: + ieee80211_tx_status_irqsafe(wdev->hw, skb); +} + +static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb) +{ + struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; + struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; + unsigned int offset = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + + req->fc_offset; + + if (!wvif) { + pr_warn("vif associated with the skb does not exist anymore\n"); + return; + } + wfx_tx_policy_put(wvif, req->retry_policy_index); + skb_pull(skb, offset); + ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb); +} + +static void wfx_tx_fill_rates(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info, + const struct wfx_hif_cnf_tx *arg) +{ + struct ieee80211_tx_rate *rate; + int tx_count; + int i; + + tx_count = arg->ack_failures; + if (!arg->status || arg->ack_failures) + tx_count += 1; /* Also report success */ + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + rate = &tx_info->status.rates[i]; + if (rate->idx < 0) + break; + if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES && + arg->ack_failures) + dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n", + rate->count, tx_count); + if (tx_count <= rate->count && tx_count && + arg->txed_rate != wfx_get_hw_rate(wdev, rate)) + dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n", + arg->txed_rate, wfx_get_hw_rate(wdev, rate)); + if (tx_count > rate->count) { + tx_count -= rate->count; + } else if (!tx_count) { + rate->count = 0; + rate->idx = -1; + } else { + rate->count = tx_count; + tx_count = 0; + } + } + if (tx_count) + dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count); +} + +void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg) +{ + const struct wfx_tx_priv *tx_priv; + struct ieee80211_tx_info *tx_info; + struct wfx_vif *wvif; + struct sk_buff *skb; + + skb = wfx_pending_get(wdev, arg->packet_id); + if (!skb) { + dev_warn(wdev->dev, "received unknown packet_id (%#.8x) from chip\n", + arg->packet_id); + return; + } + tx_info = IEEE80211_SKB_CB(skb); + tx_priv = wfx_skb_tx_priv(skb); + wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface); + WARN_ON(!wvif); + if (!wvif) + return; + + /* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */ + _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb)); + wfx_tx_fill_rates(wdev, tx_info, arg); + skb_trim(skb, skb->len - tx_priv->icv_size); + + /* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */ + /* FIXME: use ieee80211_tx_info_clear_status() */ + memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data)); + memset(tx_info->pad, 0, sizeof(tx_info->pad)); + + if (!arg->status) { + tx_info->status.tx_time = le32_to_cpu(arg->media_delay) - + le32_to_cpu(arg->tx_queue_delay); + if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) + tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + else + tx_info->flags |= IEEE80211_TX_STAT_ACK; + } else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) { + WARN(!arg->requeue, "incoherent status and result_flags"); + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */ + schedule_work(&wvif->update_tim_work); + } + tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + } + wfx_skb_dtor(wvif, skb); +} + +static void wfx_flush_vif(struct wfx_vif *wvif, u32 queues, struct sk_buff_head *dropped) +{ + struct wfx_queue *queue; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (!(BIT(i) & queues)) + continue; + queue = &wvif->tx_queue[i]; + if (dropped) + wfx_tx_queue_drop(wvif, queue, dropped); + } + if (wvif->wdev->chip_frozen) + return; + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (!(BIT(i) & queues)) + continue; + queue = &wvif->tx_queue[i]; + if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue), + msecs_to_jiffies(1000)) <= 0) + dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?"); + } +} + +void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) +{ + struct wfx_dev *wdev = hw->priv; + struct sk_buff_head dropped; + struct wfx_vif *wvif; + struct wfx_hif_msg *hif; + struct sk_buff *skb; + + skb_queue_head_init(&dropped); + if (vif) { + wvif = (struct wfx_vif *)vif->drv_priv; + wfx_flush_vif(wvif, queues, drop ? &dropped : NULL); + } else { + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_flush_vif(wvif, queues, drop ? &dropped : NULL); + } + wfx_tx_flush(wdev); + if (wdev->chip_frozen) + wfx_pending_drop(wdev, &dropped); + while ((skb = skb_dequeue(&dropped)) != NULL) { + hif = (struct wfx_hif_msg *)skb->data; + wvif = wdev_to_wvif(wdev, hif->interface); + ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb)); + wfx_skb_dtor(wvif, skb); + } +} diff --git a/drivers/net/wireless/silabs/wfx/data_tx.h b/drivers/net/wireless/silabs/wfx/data_tx.h new file mode 100644 index 000000000000..983470705e4b --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/data_tx.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Data transmitting implementation. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_DATA_TX_H +#define WFX_DATA_TX_H + +#include +#include + +#include "hif_api_cmd.h" +#include "hif_api_mib.h" + +struct wfx_tx_priv; +struct wfx_dev; +struct wfx_vif; + +struct wfx_tx_policy { + struct list_head link; + int usage_count; + u8 rates[12]; + bool uploaded; +}; + +struct wfx_tx_policy_cache { + struct wfx_tx_policy cache[HIF_TX_RETRY_POLICY_MAX]; + /* FIXME: use a trees and drop hash from tx_policy */ + struct list_head used; + struct list_head free; + spinlock_t lock; +}; + +struct wfx_tx_priv { + ktime_t xmit_timestamp; + unsigned char icv_size; +}; + +void wfx_tx_policy_init(struct wfx_vif *wvif); +void wfx_tx_policy_upload_work(struct work_struct *work); + +void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); +void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg); +void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); + +static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb) +{ + struct ieee80211_tx_info *tx_info; + + if (!skb) + return NULL; + tx_info = IEEE80211_SKB_CB(skb); + return (struct wfx_tx_priv *)tx_info->rate_driver_data; +} + +static inline struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb) +{ + struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; + struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; + + return req; +} + +#endif diff --git a/drivers/net/wireless/silabs/wfx/debug.c b/drivers/net/wireless/silabs/wfx/debug.c new file mode 100644 index 000000000000..e8265208f9a5 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/debug.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Debugfs interface. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include + +#include "debug.h" +#include "wfx.h" +#include "sta.h" +#include "main.h" +#include "hif_tx.h" +#include "hif_tx_mib.h" + +#define CREATE_TRACE_POINTS +#include "traces.h" + +static const struct trace_print_flags hif_msg_print_map[] = { + hif_msg_list, +}; + +static const struct trace_print_flags hif_mib_print_map[] = { + hif_mib_list, +}; + +static const struct trace_print_flags wfx_reg_print_map[] = { + wfx_reg_list, +}; + +static const char *get_symbol(unsigned long val, const struct trace_print_flags *symbol_array) +{ + int i; + + for (i = 0; symbol_array[i].mask != -1; i++) { + if (val == symbol_array[i].mask) + return symbol_array[i].name; + } + + return "unknown"; +} + +const char *wfx_get_hif_name(unsigned long id) +{ + return get_symbol(id, hif_msg_print_map); +} + +const char *wfx_get_mib_name(unsigned long id) +{ + return get_symbol(id, hif_mib_print_map); +} + +const char *wfx_get_reg_name(unsigned long id) +{ + return get_symbol(id, wfx_reg_print_map); +} + +static int wfx_counters_show(struct seq_file *seq, void *v) +{ + int ret, i; + struct wfx_dev *wdev = seq->private; + struct wfx_hif_mib_extended_count_table counters[3]; + + for (i = 0; i < ARRAY_SIZE(counters); i++) { + ret = wfx_hif_get_counters_table(wdev, i, counters + i); + if (ret < 0) + return ret; + if (ret > 0) + return -EIO; + } + + seq_printf(seq, "%-24s %12s %12s %12s\n", "", "global", "iface 0", "iface 1"); + +#define PUT_COUNTER(name) \ + seq_printf(seq, "%-24s %12d %12d %12d\n", #name, \ + le32_to_cpu(counters[2].count_##name), \ + le32_to_cpu(counters[0].count_##name), \ + le32_to_cpu(counters[1].count_##name)) + + PUT_COUNTER(tx_frames); + PUT_COUNTER(tx_frames_multicast); + PUT_COUNTER(tx_frames_success); + PUT_COUNTER(tx_frames_retried); + PUT_COUNTER(tx_frames_multi_retried); + PUT_COUNTER(tx_frames_failed); + + PUT_COUNTER(ack_failed); + PUT_COUNTER(rts_success); + PUT_COUNTER(rts_failed); + + PUT_COUNTER(rx_frames); + PUT_COUNTER(rx_frames_multicast); + PUT_COUNTER(rx_frames_success); + PUT_COUNTER(rx_frames_failed); + PUT_COUNTER(drop_plcp); + PUT_COUNTER(drop_fcs); + PUT_COUNTER(drop_no_key); + PUT_COUNTER(drop_decryption); + PUT_COUNTER(drop_tkip_mic); + PUT_COUNTER(drop_bip_mic); + PUT_COUNTER(drop_cmac_icv); + PUT_COUNTER(drop_cmac_replay); + PUT_COUNTER(drop_ccmp_replay); + PUT_COUNTER(drop_duplicate); + + PUT_COUNTER(rx_bcn_miss); + PUT_COUNTER(rx_bcn_success); + PUT_COUNTER(rx_bcn_dtim); + PUT_COUNTER(rx_bcn_dtim_aid0_clr); + PUT_COUNTER(rx_bcn_dtim_aid0_set); + +#undef PUT_COUNTER + + for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++) + seq_printf(seq, "reserved[%02d]%12s %12d %12d %12d\n", i, "", + le32_to_cpu(counters[2].reserved[i]), + le32_to_cpu(counters[0].reserved[i]), + le32_to_cpu(counters[1].reserved[i])); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wfx_counters); + +static const char * const channel_names[] = { + [0] = "1M", + [1] = "2M", + [2] = "5.5M", + [3] = "11M", + /* Entries 4 and 5 does not exist */ + [6] = "6M", + [7] = "9M", + [8] = "12M", + [9] = "18M", + [10] = "24M", + [11] = "36M", + [12] = "48M", + [13] = "54M", + [14] = "MCS0", + [15] = "MCS1", + [16] = "MCS2", + [17] = "MCS3", + [18] = "MCS4", + [19] = "MCS5", + [20] = "MCS6", + [21] = "MCS7", +}; + +static int wfx_rx_stats_show(struct seq_file *seq, void *v) +{ + struct wfx_dev *wdev = seq->private; + struct wfx_hif_rx_stats *st = &wdev->rx_stats; + int i; + + mutex_lock(&wdev->rx_stats_lock); + seq_printf(seq, "Timestamp: %dus\n", st->date); + seq_printf(seq, "Low power clock: frequency %uHz, external %s\n", + le32_to_cpu(st->pwr_clk_freq), st->is_ext_pwr_clk ? "yes" : "no"); + seq_printf(seq, "Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n", + st->nb_rx_frame, st->per_total, st->throughput); + seq_puts(seq, " Num. of PER RSSI SNR CFO\n"); + seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n"); + for (i = 0; i < ARRAY_SIZE(channel_names); i++) { + if (channel_names[i]) + seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n", + channel_names[i], + le32_to_cpu(st->nb_rx_by_rate[i]), + le16_to_cpu(st->per[i]), + (s16)le16_to_cpu(st->rssi[i]) / 100, + (s16)le16_to_cpu(st->snr[i]) / 100, + (s16)le16_to_cpu(st->cfo[i])); + } + mutex_unlock(&wdev->rx_stats_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats); + +static int wfx_tx_power_loop_show(struct seq_file *seq, void *v) +{ + struct wfx_dev *wdev = seq->private; + struct wfx_hif_tx_power_loop_info *st = &wdev->tx_power_loop_info; + int tmp; + + mutex_lock(&wdev->tx_power_loop_info_lock); + tmp = le16_to_cpu(st->tx_gain_dig); + seq_printf(seq, "Tx gain digital: %d\n", tmp); + tmp = le16_to_cpu(st->tx_gain_pa); + seq_printf(seq, "Tx gain PA: %d\n", tmp); + tmp = (s16)le16_to_cpu(st->target_pout); + seq_printf(seq, "Target Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25); + tmp = (s16)le16_to_cpu(st->p_estimation); + seq_printf(seq, "FEM Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25); + tmp = le16_to_cpu(st->vpdet); + seq_printf(seq, "Vpdet: %d mV\n", tmp); + seq_printf(seq, "Measure index: %d\n", st->measurement_index); + mutex_unlock(&wdev->tx_power_loop_info_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wfx_tx_power_loop); + +static ssize_t wfx_send_pds_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wfx_dev *wdev = file->private_data; + char *buf; + int ret; + + if (*ppos != 0) { + dev_dbg(wdev->dev, "PDS data must be written in one transaction"); + return -EBUSY; + } + buf = memdup_user(user_buf, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + *ppos = *ppos + count; + ret = wfx_send_pds(wdev, buf, count); + kfree(buf); + if (ret < 0) + return ret; + return count; +} + +static const struct file_operations wfx_send_pds_fops = { + .open = simple_open, + .write = wfx_send_pds_write, +}; + +struct dbgfs_hif_msg { + struct wfx_dev *wdev; + struct completion complete; + u8 reply[1024]; + int ret; +}; + +static ssize_t wfx_send_hif_msg_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dbgfs_hif_msg *context = file->private_data; + struct wfx_dev *wdev = context->wdev; + struct wfx_hif_msg *request; + + if (completion_done(&context->complete)) { + dev_dbg(wdev->dev, "read previous result before start a new one\n"); + return -EBUSY; + } + if (count < sizeof(struct wfx_hif_msg)) + return -EINVAL; + + /* wfx_cmd_send() checks that reply buffer is wide enough, but does not return precise + * length read. User have to know how many bytes should be read. Filling reply buffer with a + * memory pattern may help user. + */ + memset(context->reply, 0xFF, sizeof(context->reply)); + request = memdup_user(user_buf, count); + if (IS_ERR(request)) + return PTR_ERR(request); + if (le16_to_cpu(request->len) != count) { + kfree(request); + return -EINVAL; + } + context->ret = wfx_cmd_send(wdev, request, context->reply, sizeof(context->reply), false); + + kfree(request); + complete(&context->complete); + return count; +} + +static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dbgfs_hif_msg *context = file->private_data; + int ret; + + if (count > sizeof(context->reply)) + return -EINVAL; + ret = wait_for_completion_interruptible(&context->complete); + if (ret) + return ret; + if (context->ret < 0) + return context->ret; + /* Be careful, write() is waiting for a full message while read() only returns a payload */ + if (copy_to_user(user_buf, context->reply, count)) + return -EFAULT; + + return count; +} + +static int wfx_send_hif_msg_open(struct inode *inode, struct file *file) +{ + struct dbgfs_hif_msg *context = kzalloc(sizeof(*context), GFP_KERNEL); + + if (!context) + return -ENOMEM; + context->wdev = inode->i_private; + init_completion(&context->complete); + file->private_data = context; + return 0; +} + +static int wfx_send_hif_msg_release(struct inode *inode, struct file *file) +{ + struct dbgfs_hif_msg *context = file->private_data; + + kfree(context); + return 0; +} + +static const struct file_operations wfx_send_hif_msg_fops = { + .open = wfx_send_hif_msg_open, + .release = wfx_send_hif_msg_release, + .write = wfx_send_hif_msg_write, + .read = wfx_send_hif_msg_read, +}; + +int wfx_debug_init(struct wfx_dev *wdev) +{ + struct dentry *d; + + d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir); + debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops); + debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops); + debugfs_create_file("tx_power_loop", 0444, d, wdev, &wfx_tx_power_loop_fops); + debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops); + debugfs_create_file("send_hif_msg", 0600, d, wdev, &wfx_send_hif_msg_fops); + + return 0; +} diff --git a/drivers/net/wireless/silabs/wfx/debug.h b/drivers/net/wireless/silabs/wfx/debug.h new file mode 100644 index 000000000000..3840575e5e28 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/debug.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Debugfs interface. + * + * Copyright (c) 2017-2019, Silicon Laboratories, Inc. + * Copyright (c) 2011, ST-Ericsson + */ +#ifndef WFX_DEBUG_H +#define WFX_DEBUG_H + +struct wfx_dev; + +int wfx_debug_init(struct wfx_dev *wdev); + +const char *wfx_get_hif_name(unsigned long id); +const char *wfx_get_mib_name(unsigned long id); +const char *wfx_get_reg_name(unsigned long id); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/fwio.c b/drivers/net/wireless/silabs/wfx/fwio.c new file mode 100644 index 000000000000..3d1b8a135dc0 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/fwio.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Firmware loading. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include +#include + +#include "fwio.h" +#include "wfx.h" +#include "hwio.h" + +/* Addresses below are in SRAM area */ +#define WFX_DNLD_FIFO 0x09004000 +#define DNLD_BLOCK_SIZE 0x0400 +#define DNLD_FIFO_SIZE 0x8000 /* (32 * DNLD_BLOCK_SIZE) */ +/* Download Control Area (DCA) */ +#define WFX_DCA_IMAGE_SIZE 0x0900C000 +#define WFX_DCA_PUT 0x0900C004 +#define WFX_DCA_GET 0x0900C008 +#define WFX_DCA_HOST_STATUS 0x0900C00C +#define HOST_READY 0x87654321 +#define HOST_INFO_READ 0xA753BD99 +#define HOST_UPLOAD_PENDING 0xABCDDCBA +#define HOST_UPLOAD_COMPLETE 0xD4C64A99 +#define HOST_OK_TO_JUMP 0x174FC882 +#define WFX_DCA_NCP_STATUS 0x0900C010 +#define NCP_NOT_READY 0x12345678 +#define NCP_READY 0x87654321 +#define NCP_INFO_READY 0xBD53EF99 +#define NCP_DOWNLOAD_PENDING 0xABCDDCBA +#define NCP_DOWNLOAD_COMPLETE 0xCAFEFECA +#define NCP_AUTH_OK 0xD4C64A99 +#define NCP_AUTH_FAIL 0x174FC882 +#define NCP_PUB_KEY_RDY 0x7AB41D19 +#define WFX_DCA_FW_SIGNATURE 0x0900C014 +#define FW_SIGNATURE_SIZE 0x40 +#define WFX_DCA_FW_HASH 0x0900C054 +#define FW_HASH_SIZE 0x08 +#define WFX_DCA_FW_VERSION 0x0900C05C +#define FW_VERSION_SIZE 0x04 +#define WFX_DCA_RESERVED 0x0900C060 +#define DCA_RESERVED_SIZE 0x20 +#define WFX_STATUS_INFO 0x0900C080 +#define WFX_BOOTLOADER_LABEL 0x0900C084 +#define BOOTLOADER_LABEL_SIZE 0x3C +#define WFX_PTE_INFO 0x0900C0C0 +#define PTE_INFO_KEYSET_IDX 0x0D +#define PTE_INFO_SIZE 0x10 +#define WFX_ERR_INFO 0x0900C0D0 +#define ERR_INVALID_SEC_TYPE 0x05 +#define ERR_SIG_VERIF_FAILED 0x0F +#define ERR_AES_CTRL_KEY 0x10 +#define ERR_ECC_PUB_KEY 0x11 +#define ERR_MAC_KEY 0x18 + +#define DCA_TIMEOUT 50 /* milliseconds */ +#define WAKEUP_TIMEOUT 200 /* milliseconds */ + +static const char * const fwio_errors[] = { + [ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption", + [ERR_SIG_VERIF_FAILED] = "Signature verification failed", + [ERR_AES_CTRL_KEY] = "AES control key not initialized", + [ERR_ECC_PUB_KEY] = "ECC public key not initialized", + [ERR_MAC_KEY] = "MAC key not initialized", +}; + +/* request_firmware() allocate data using vmalloc(). It is not compatible with underlying hardware + * that use DMA. Function below detect this case and allocate a bounce buffer if necessary. + * + * Notice that, in doubt, you can enable CONFIG_DEBUG_SG to ask kernel to detect this problem at + * runtime (else, kernel silently fail). + * + * NOTE: it may also be possible to use 'pages' from struct firmware and avoid bounce buffer + */ +static int wfx_sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf, size_t len) +{ + int ret; + const u8 *tmp; + + if (!virt_addr_valid(buf)) { + tmp = kmemdup(buf, len, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + } else { + tmp = buf; + } + ret = wfx_sram_buf_write(wdev, addr, tmp, len); + if (tmp != buf) + kfree(tmp); + return ret; +} + +static int get_firmware(struct wfx_dev *wdev, u32 keyset_chip, + const struct firmware **fw, int *file_offset) +{ + int keyset_file; + char filename[256]; + const char *data; + int ret; + + snprintf(filename, sizeof(filename), "%s_%02X.sec", + wdev->pdata.file_fw, keyset_chip); + ret = firmware_request_nowarn(fw, filename, wdev->dev); + if (ret) { + dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n", + filename, wdev->pdata.file_fw); + snprintf(filename, sizeof(filename), "%s.sec", wdev->pdata.file_fw); + ret = request_firmware(fw, filename, wdev->dev); + if (ret) { + dev_err(wdev->dev, "can't load %s\n", filename); + *fw = NULL; + return ret; + } + } + + data = (*fw)->data; + if (memcmp(data, "KEYSET", 6) != 0) { + /* Legacy firmware format */ + *file_offset = 0; + keyset_file = 0x90; + } else { + *file_offset = 8; + keyset_file = (hex_to_bin(data[6]) * 16) | hex_to_bin(data[7]); + if (keyset_file < 0) { + dev_err(wdev->dev, "%s corrupted\n", filename); + release_firmware(*fw); + *fw = NULL; + return -EINVAL; + } + } + if (keyset_file != keyset_chip) { + dev_err(wdev->dev, "firmware keyset is incompatible with chip (file: 0x%02X, chip: 0x%02X)\n", + keyset_file, keyset_chip); + release_firmware(*fw); + *fw = NULL; + return -ENODEV; + } + wdev->keyset = keyset_file; + return 0; +} + +static int wait_ncp_status(struct wfx_dev *wdev, u32 status) +{ + ktime_t now, start; + u32 reg; + int ret; + + start = ktime_get(); + for (;;) { + ret = wfx_sram_reg_read(wdev, WFX_DCA_NCP_STATUS, ®); + if (ret < 0) + return -EIO; + now = ktime_get(); + if (reg == status) + break; + if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT))) + return -ETIMEDOUT; + } + if (ktime_compare(now, start)) + dev_dbg(wdev->dev, "chip answer after %lldus\n", ktime_us_delta(now, start)); + else + dev_dbg(wdev->dev, "chip answer immediately\n"); + return 0; +} + +static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len) +{ + int ret; + u32 offs, bytes_done = 0; + ktime_t now, start; + + if (len % DNLD_BLOCK_SIZE) { + dev_err(wdev->dev, "firmware size is not aligned. Buffer overrun will occur\n"); + return -EIO; + } + offs = 0; + while (offs < len) { + start = ktime_get(); + for (;;) { + now = ktime_get(); + if (offs + DNLD_BLOCK_SIZE - bytes_done < DNLD_FIFO_SIZE) + break; + if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT))) + return -ETIMEDOUT; + ret = wfx_sram_reg_read(wdev, WFX_DCA_GET, &bytes_done); + if (ret < 0) + return ret; + } + if (ktime_compare(now, start)) + dev_dbg(wdev->dev, "answer after %lldus\n", ktime_us_delta(now, start)); + + ret = wfx_sram_write_dma_safe(wdev, WFX_DNLD_FIFO + (offs % DNLD_FIFO_SIZE), + data + offs, DNLD_BLOCK_SIZE); + if (ret < 0) + return ret; + + /* The device seems to not support writing 0 in this register during first loop */ + offs += DNLD_BLOCK_SIZE; + ret = wfx_sram_reg_write(wdev, WFX_DCA_PUT, offs); + if (ret < 0) + return ret; + } + return 0; +} + +static void print_boot_status(struct wfx_dev *wdev) +{ + u32 reg; + + wfx_sram_reg_read(wdev, WFX_STATUS_INFO, ®); + if (reg == 0x12345678) + return; + wfx_sram_reg_read(wdev, WFX_ERR_INFO, ®); + if (reg < ARRAY_SIZE(fwio_errors) && fwio_errors[reg]) + dev_info(wdev->dev, "secure boot: %s\n", fwio_errors[reg]); + else + dev_info(wdev->dev, "secure boot: Error %#02x\n", reg); +} + +static int load_firmware_secure(struct wfx_dev *wdev) +{ + const struct firmware *fw = NULL; + int header_size; + int fw_offset; + ktime_t start; + u8 *buf; + int ret; + + BUILD_BUG_ON(PTE_INFO_SIZE > BOOTLOADER_LABEL_SIZE); + buf = kmalloc(BOOTLOADER_LABEL_SIZE + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_READY); + ret = wait_ncp_status(wdev, NCP_INFO_READY); + if (ret) + goto error; + + wfx_sram_buf_read(wdev, WFX_BOOTLOADER_LABEL, buf, BOOTLOADER_LABEL_SIZE); + buf[BOOTLOADER_LABEL_SIZE] = 0; + dev_dbg(wdev->dev, "bootloader: \"%s\"\n", buf); + + wfx_sram_buf_read(wdev, WFX_PTE_INFO, buf, PTE_INFO_SIZE); + ret = get_firmware(wdev, buf[PTE_INFO_KEYSET_IDX], &fw, &fw_offset); + if (ret) + goto error; + header_size = fw_offset + FW_SIGNATURE_SIZE + FW_HASH_SIZE; + + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_INFO_READ); + ret = wait_ncp_status(wdev, NCP_READY); + if (ret) + goto error; + + wfx_sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); /* Fifo init */ + wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00", FW_VERSION_SIZE); + wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset, + FW_SIGNATURE_SIZE); + wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_HASH, fw->data + fw_offset + FW_SIGNATURE_SIZE, + FW_HASH_SIZE); + wfx_sram_reg_write(wdev, WFX_DCA_IMAGE_SIZE, fw->size - header_size); + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_PENDING); + ret = wait_ncp_status(wdev, NCP_DOWNLOAD_PENDING); + if (ret) + goto error; + + start = ktime_get(); + ret = upload_firmware(wdev, fw->data + header_size, fw->size - header_size); + if (ret) + goto error; + dev_dbg(wdev->dev, "firmware load after %lldus\n", + ktime_us_delta(ktime_get(), start)); + + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE); + ret = wait_ncp_status(wdev, NCP_AUTH_OK); + /* Legacy ROM support */ + if (ret < 0) + ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY); + if (ret < 0) + goto error; + wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_OK_TO_JUMP); + +error: + kfree(buf); + if (fw) + release_firmware(fw); + if (ret) + print_boot_status(wdev); + return ret; +} + +static int init_gpr(struct wfx_dev *wdev) +{ + int ret, i; + static const struct { + int index; + u32 value; + } gpr_init[] = { + { 0x07, 0x208775 }, + { 0x08, 0x2EC020 }, + { 0x09, 0x3C3C3C }, + { 0x0B, 0x322C44 }, + { 0x0C, 0xA06497 }, + }; + + for (i = 0; i < ARRAY_SIZE(gpr_init); i++) { + ret = wfx_igpr_reg_write(wdev, gpr_init[i].index, gpr_init[i].value); + if (ret < 0) + return ret; + dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index, gpr_init[i].value); + } + return 0; +} + +int wfx_init_device(struct wfx_dev *wdev) +{ + int ret; + int hw_revision, hw_type; + int wakeup_timeout = 50; /* ms */ + ktime_t now, start; + u32 reg; + + reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_BYTE_ORDER_ABCD; + if (wdev->pdata.use_rising_clk) + reg |= CFG_CLK_RISE_EDGE; + ret = wfx_config_reg_write(wdev, reg); + if (ret < 0) { + dev_err(wdev->dev, "bus returned an error during first write access. Host configuration error?\n"); + return -EIO; + } + + ret = wfx_config_reg_read(wdev, ®); + if (ret < 0) { + dev_err(wdev->dev, "bus returned an error during first read access. Bus configuration error?\n"); + return -EIO; + } + if (reg == 0 || reg == ~0) { + dev_err(wdev->dev, "chip mute. Bus configuration error or chip wasn't reset?\n"); + return -EIO; + } + dev_dbg(wdev->dev, "initial config register value: %08x\n", reg); + + hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg); + if (hw_revision == 0) { + dev_err(wdev->dev, "bad hardware revision number: %d\n", hw_revision); + return -ENODEV; + } + hw_type = FIELD_GET(CFG_DEVICE_ID_TYPE, reg); + if (hw_type == 1) { + dev_notice(wdev->dev, "development hardware detected\n"); + wakeup_timeout = 2000; + } + + ret = init_gpr(wdev); + if (ret < 0) + return ret; + + ret = wfx_control_reg_write(wdev, CTRL_WLAN_WAKEUP); + if (ret < 0) + return -EIO; + start = ktime_get(); + for (;;) { + ret = wfx_control_reg_read(wdev, ®); + now = ktime_get(); + if (reg & CTRL_WLAN_READY) + break; + if (ktime_after(now, ktime_add_ms(start, wakeup_timeout))) { + dev_err(wdev->dev, "chip didn't wake up. Chip wasn't reset?\n"); + return -ETIMEDOUT; + } + } + dev_dbg(wdev->dev, "chip wake up after %lldus\n", ktime_us_delta(now, start)); + + ret = wfx_config_reg_write_bits(wdev, CFG_CPU_RESET, 0); + if (ret < 0) + return ret; + ret = load_firmware_secure(wdev); + if (ret < 0) + return ret; + return wfx_config_reg_write_bits(wdev, + CFG_DIRECT_ACCESS_MODE | + CFG_IRQ_ENABLE_DATA | + CFG_IRQ_ENABLE_WRDY, + CFG_IRQ_ENABLE_DATA); +} diff --git a/drivers/net/wireless/silabs/wfx/fwio.h b/drivers/net/wireless/silabs/wfx/fwio.h new file mode 100644 index 000000000000..eeea61210eca --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/fwio.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Firmware loading. + * + * Copyright (c) 2017-2019, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_FWIO_H +#define WFX_FWIO_H + +struct wfx_dev; + +int wfx_init_device(struct wfx_dev *wdev); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_api_cmd.h b/drivers/net/wireless/silabs/wfx/hif_api_cmd.h new file mode 100644 index 000000000000..8b91b1d4a46b --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_api_cmd.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ +/* + * WF200 hardware interface definitions + * + * Copyright (c) 2018-2020, Silicon Laboratories Inc. + */ + +#ifndef WFX_HIF_API_CMD_H +#define WFX_HIF_API_CMD_H + +#include "hif_api_general.h" + +enum wfx_hif_requests_ids { + HIF_REQ_ID_RESET = 0x0a, + HIF_REQ_ID_READ_MIB = 0x05, + HIF_REQ_ID_WRITE_MIB = 0x06, + HIF_REQ_ID_START_SCAN = 0x07, + HIF_REQ_ID_STOP_SCAN = 0x08, + HIF_REQ_ID_TX = 0x04, + HIF_REQ_ID_JOIN = 0x0b, + HIF_REQ_ID_SET_PM_MODE = 0x10, + HIF_REQ_ID_SET_BSS_PARAMS = 0x11, + HIF_REQ_ID_ADD_KEY = 0x0c, + HIF_REQ_ID_REMOVE_KEY = 0x0d, + HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13, + HIF_REQ_ID_START = 0x17, + HIF_REQ_ID_BEACON_TRANSMIT = 0x18, + HIF_REQ_ID_UPDATE_IE = 0x1b, + HIF_REQ_ID_MAP_LINK = 0x1c, +}; + +enum wfx_hif_confirmations_ids { + HIF_CNF_ID_RESET = 0x0a, + HIF_CNF_ID_READ_MIB = 0x05, + HIF_CNF_ID_WRITE_MIB = 0x06, + HIF_CNF_ID_START_SCAN = 0x07, + HIF_CNF_ID_STOP_SCAN = 0x08, + HIF_CNF_ID_TX = 0x04, + HIF_CNF_ID_MULTI_TRANSMIT = 0x1e, + HIF_CNF_ID_JOIN = 0x0b, + HIF_CNF_ID_SET_PM_MODE = 0x10, + HIF_CNF_ID_SET_BSS_PARAMS = 0x11, + HIF_CNF_ID_ADD_KEY = 0x0c, + HIF_CNF_ID_REMOVE_KEY = 0x0d, + HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13, + HIF_CNF_ID_START = 0x17, + HIF_CNF_ID_BEACON_TRANSMIT = 0x18, + HIF_CNF_ID_UPDATE_IE = 0x1b, + HIF_CNF_ID_MAP_LINK = 0x1c, +}; + +enum wfx_hif_indications_ids { + HIF_IND_ID_RX = 0x84, + HIF_IND_ID_SCAN_CMPL = 0x86, + HIF_IND_ID_JOIN_COMPLETE = 0x8f, + HIF_IND_ID_SET_PM_MODE_CMPL = 0x89, + HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c, + HIF_IND_ID_EVENT = 0x85 +}; + +struct wfx_hif_req_reset { + u8 reset_stat:1; + u8 reset_all_int:1; + u8 reserved1:6; + u8 reserved2[3]; +} __packed; + +struct wfx_hif_cnf_reset { + __le32 status; +} __packed; + +struct wfx_hif_req_read_mib { + __le16 mib_id; + __le16 reserved; +} __packed; + +struct wfx_hif_cnf_read_mib { + __le32 status; + __le16 mib_id; + __le16 length; + u8 mib_data[]; +} __packed; + +struct wfx_hif_req_write_mib { + __le16 mib_id; + __le16 length; + u8 mib_data[]; +} __packed; + +struct wfx_hif_cnf_write_mib { + __le32 status; +} __packed; + +struct wfx_hif_req_update_ie { + u8 beacon:1; + u8 probe_resp:1; + u8 probe_req:1; + u8 reserved1:5; + u8 reserved2; + __le16 num_ies; + u8 ie[]; +} __packed; + +struct wfx_hif_cnf_update_ie { + __le32 status; +} __packed; + +struct wfx_hif_ssid_def { + __le32 ssid_length; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +} __packed; + +#define HIF_API_MAX_NB_SSIDS 2 +#define HIF_API_MAX_NB_CHANNELS 14 + +struct wfx_hif_req_start_scan_alt { + u8 band; + u8 maintain_current_bss:1; + u8 periodic:1; + u8 reserved1:6; + u8 disallow_ps:1; + u8 reserved2:1; + u8 short_preamble:1; + u8 reserved3:5; + u8 max_transmit_rate; + __le16 periodic_interval; + u8 reserved4; + s8 periodic_rssi_thr; + u8 num_of_probe_requests; + u8 probe_delay; + u8 num_of_ssids; + u8 num_of_channels; + __le32 min_channel_time; + __le32 max_channel_time; + __le32 tx_power_level; /* signed value */ + struct wfx_hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS]; + u8 channel_list[]; +} __packed; + +struct wfx_hif_cnf_start_scan { + __le32 status; +} __packed; + +struct wfx_hif_cnf_stop_scan { + __le32 status; +} __packed; + +enum wfx_hif_pm_mode_status { + HIF_PM_MODE_ACTIVE = 0x0, + HIF_PM_MODE_PS = 0x1, + HIF_PM_MODE_UNDETERMINED = 0x2 +}; + +struct wfx_hif_ind_scan_cmpl { + __le32 status; + u8 pm_mode; + u8 num_channels_completed; + __le16 reserved; +} __packed; + +enum wfx_hif_queue_id { + HIF_QUEUE_ID_BACKGROUND = 0x0, + HIF_QUEUE_ID_BESTEFFORT = 0x1, + HIF_QUEUE_ID_VIDEO = 0x2, + HIF_QUEUE_ID_VOICE = 0x3 +}; + +enum wfx_hif_frame_format { + HIF_FRAME_FORMAT_NON_HT = 0x0, + HIF_FRAME_FORMAT_MIXED_FORMAT_HT = 0x1, + HIF_FRAME_FORMAT_GF_HT_11N = 0x2 +}; + +struct wfx_hif_req_tx { + /* packet_id is not interpreted by the device, so it is not necessary to declare it little + * endian + */ + u32 packet_id; + u8 max_tx_rate; + u8 queue_id:2; + u8 peer_sta_id:4; + u8 reserved1:2; + u8 more:1; + u8 fc_offset:3; + u8 after_dtim:1; + u8 reserved2:3; + u8 start_exp:1; + u8 reserved3:3; + u8 retry_policy_index:4; + __le32 reserved4; + __le32 expire_time; + u8 frame_format:4; + u8 fec_coding:1; + u8 short_gi:1; + u8 reserved5:1; + u8 stbc:1; + u8 reserved6; + u8 aggregation:1; + u8 reserved7:7; + u8 reserved8; + u8 frame[]; +} __packed; + +enum wfx_hif_qos_ackplcy { + HIF_QOS_ACKPLCY_NORMAL = 0x0, + HIF_QOS_ACKPLCY_TXNOACK = 0x1, + HIF_QOS_ACKPLCY_NOEXPACK = 0x2, + HIF_QOS_ACKPLCY_BLCKACK = 0x3 +}; + +struct wfx_hif_cnf_tx { + __le32 status; + /* packet_id is copied from struct wfx_hif_req_tx without been interpreted by the device, so + * it is not necessary to declare it little endian + */ + u32 packet_id; + u8 txed_rate; + u8 ack_failures; + u8 aggr:1; + u8 requeue:1; + u8 ack_policy:2; + u8 txop_limit:1; + u8 reserved1:3; + u8 reserved2; + __le32 media_delay; + __le32 tx_queue_delay; +} __packed; + +struct wfx_hif_cnf_multi_transmit { + u8 num_tx_confs; + u8 reserved[3]; + struct wfx_hif_cnf_tx tx_conf_payload[]; +} __packed; + +enum wfx_hif_ri_flags_encrypt { + HIF_RI_FLAGS_UNENCRYPTED = 0x0, + HIF_RI_FLAGS_WEP_ENCRYPTED = 0x1, + HIF_RI_FLAGS_TKIP_ENCRYPTED = 0x2, + HIF_RI_FLAGS_AES_ENCRYPTED = 0x3, + HIF_RI_FLAGS_WAPI_ENCRYPTED = 0x4 +}; + +struct wfx_hif_ind_rx { + __le32 status; + u8 channel_number; + u8 reserved1; + u8 rxed_rate; + u8 rcpi_rssi; + u8 encryp:3; + u8 in_aggr:1; + u8 first_aggr:1; + u8 last_aggr:1; + u8 defrag:1; + u8 beacon:1; + u8 tim:1; + u8 bitmap:1; + u8 match_ssid:1; + u8 match_bssid:1; + u8 more:1; + u8 reserved2:1; + u8 ht:1; + u8 stbc:1; + u8 match_uc_addr:1; + u8 match_mc_addr:1; + u8 match_bc_addr:1; + u8 key_type:1; + u8 key_index:4; + u8 reserved3:1; + u8 peer_sta_id:4; + u8 reserved4:2; + u8 reserved5:1; + u8 frame[]; +} __packed; + +struct wfx_hif_req_edca_queue_params { + u8 queue_id; + u8 reserved1; + u8 aifsn; + u8 reserved2; + __le16 cw_min; + __le16 cw_max; + __le16 tx_op_limit; + __le16 allowed_medium_time; + __le32 reserved3; +} __packed; + +struct wfx_hif_cnf_edca_queue_params { + __le32 status; +} __packed; + +struct wfx_hif_req_join { + u8 infrastructure_bss_mode:1; + u8 reserved1:7; + u8 band; + u8 channel_number; + u8 reserved2; + u8 bssid[ETH_ALEN]; + __le16 atim_window; + u8 short_preamble:1; + u8 reserved3:7; + u8 probe_for_join; + u8 reserved4; + u8 reserved5:2; + u8 force_no_beacon:1; + u8 force_with_ind:1; + u8 reserved6:4; + __le32 ssid_length; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + __le32 beacon_interval; + __le32 basic_rate_set; +} __packed; + +struct wfx_hif_cnf_join { + __le32 status; +} __packed; + +struct wfx_hif_ind_join_complete { + __le32 status; +} __packed; + +struct wfx_hif_req_set_bss_params { + u8 lost_count_only:1; + u8 reserved:7; + u8 beacon_lost_count; + __le16 aid; + __le32 operational_rate_set; +} __packed; + +struct wfx_hif_cnf_set_bss_params { + __le32 status; +} __packed; + +struct wfx_hif_req_set_pm_mode { + u8 enter_psm:1; + u8 reserved:6; + u8 fast_psm:1; + u8 fast_psm_idle_period; + u8 ap_psm_change_period; + u8 min_auto_ps_poll_period; +} __packed; + +struct wfx_hif_cnf_set_pm_mode { + __le32 status; +} __packed; + +struct wfx_hif_ind_set_pm_mode_cmpl { + __le32 status; + u8 pm_mode; + u8 reserved[3]; +} __packed; + +struct wfx_hif_req_start { + u8 mode; + u8 band; + u8 channel_number; + u8 reserved1; + __le32 reserved2; + __le32 beacon_interval; + u8 dtim_period; + u8 short_preamble:1; + u8 reserved3:7; + u8 reserved4; + u8 ssid_length; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + __le32 basic_rate_set; +} __packed; + +struct wfx_hif_cnf_start { + __le32 status; +} __packed; + +struct wfx_hif_req_beacon_transmit { + u8 enable_beaconing; + u8 reserved[3]; +} __packed; + +struct wfx_hif_cnf_beacon_transmit { + __le32 status; +} __packed; + +#define HIF_LINK_ID_MAX 14 +#define HIF_LINK_ID_NOT_ASSOCIATED (HIF_LINK_ID_MAX + 1) + +struct wfx_hif_req_map_link { + u8 mac_addr[ETH_ALEN]; + u8 unmap:1; + u8 mfpc:1; + u8 reserved:6; + u8 peer_sta_id; +} __packed; + +struct wfx_hif_cnf_map_link { + __le32 status; +} __packed; + +struct wfx_hif_ind_suspend_resume_tx { + u8 resume:1; + u8 reserved1:2; + u8 bc_mc_only:1; + u8 reserved2:4; + u8 reserved3; + __le16 peer_sta_set; +} __packed; + + +#define MAX_KEY_ENTRIES 24 +#define HIF_API_WEP_KEY_DATA_SIZE 16 +#define HIF_API_TKIP_KEY_DATA_SIZE 16 +#define HIF_API_RX_MIC_KEY_SIZE 8 +#define HIF_API_TX_MIC_KEY_SIZE 8 +#define HIF_API_AES_KEY_DATA_SIZE 16 +#define HIF_API_WAPI_KEY_DATA_SIZE 16 +#define HIF_API_MIC_KEY_DATA_SIZE 16 +#define HIF_API_IGTK_KEY_DATA_SIZE 16 +#define HIF_API_RX_SEQUENCE_COUNTER_SIZE 8 +#define HIF_API_IPN_SIZE 8 + +enum wfx_hif_key_type { + HIF_KEY_TYPE_WEP_DEFAULT = 0x0, + HIF_KEY_TYPE_WEP_PAIRWISE = 0x1, + HIF_KEY_TYPE_TKIP_GROUP = 0x2, + HIF_KEY_TYPE_TKIP_PAIRWISE = 0x3, + HIF_KEY_TYPE_AES_GROUP = 0x4, + HIF_KEY_TYPE_AES_PAIRWISE = 0x5, + HIF_KEY_TYPE_WAPI_GROUP = 0x6, + HIF_KEY_TYPE_WAPI_PAIRWISE = 0x7, + HIF_KEY_TYPE_IGTK_GROUP = 0x8, + HIF_KEY_TYPE_NONE = 0x9 +}; + +struct wfx_hif_wep_pairwise_key { + u8 peer_address[ETH_ALEN]; + u8 reserved; + u8 key_length; + u8 key_data[HIF_API_WEP_KEY_DATA_SIZE]; +} __packed; + +struct wfx_hif_wep_group_key { + u8 key_id; + u8 key_length; + u8 reserved[2]; + u8 key_data[HIF_API_WEP_KEY_DATA_SIZE]; +} __packed; + +struct wfx_hif_tkip_pairwise_key { + u8 peer_address[ETH_ALEN]; + u8 reserved[2]; + u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE]; + u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE]; + u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE]; +} __packed; + +struct wfx_hif_tkip_group_key { + u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE]; + u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE]; + u8 key_id; + u8 reserved[3]; + u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE]; +} __packed; + +struct wfx_hif_aes_pairwise_key { + u8 peer_address[ETH_ALEN]; + u8 reserved[2]; + u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE]; +} __packed; + +struct wfx_hif_aes_group_key { + u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE]; + u8 key_id; + u8 reserved[3]; + u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE]; +} __packed; + +struct wfx_hif_wapi_pairwise_key { + u8 peer_address[ETH_ALEN]; + u8 key_id; + u8 reserved; + u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE]; + u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE]; +} __packed; + +struct wfx_hif_wapi_group_key { + u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE]; + u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE]; + u8 key_id; + u8 reserved[3]; +} __packed; + +struct wfx_hif_igtk_group_key { + u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE]; + u8 key_id; + u8 reserved[3]; + u8 ipn[HIF_API_IPN_SIZE]; +} __packed; + +struct wfx_hif_req_add_key { + u8 type; + u8 entry_index; + u8 int_id:2; + u8 reserved1:6; + u8 reserved2; + union { + struct wfx_hif_wep_pairwise_key wep_pairwise_key; + struct wfx_hif_wep_group_key wep_group_key; + struct wfx_hif_tkip_pairwise_key tkip_pairwise_key; + struct wfx_hif_tkip_group_key tkip_group_key; + struct wfx_hif_aes_pairwise_key aes_pairwise_key; + struct wfx_hif_aes_group_key aes_group_key; + struct wfx_hif_wapi_pairwise_key wapi_pairwise_key; + struct wfx_hif_wapi_group_key wapi_group_key; + struct wfx_hif_igtk_group_key igtk_group_key; + } key; +} __packed; + +struct wfx_hif_cnf_add_key { + __le32 status; +} __packed; + +struct wfx_hif_req_remove_key { + u8 entry_index; + u8 reserved[3]; +} __packed; + +struct wfx_hif_cnf_remove_key { + __le32 status; +} __packed; + +enum wfx_hif_event_ind { + HIF_EVENT_IND_BSSLOST = 0x1, + HIF_EVENT_IND_BSSREGAINED = 0x2, + HIF_EVENT_IND_RCPI_RSSI = 0x3, + HIF_EVENT_IND_PS_MODE_ERROR = 0x4, + HIF_EVENT_IND_INACTIVITY = 0x5 +}; + +enum wfx_hif_ps_mode_error { + HIF_PS_ERROR_NO_ERROR = 0, + HIF_PS_ERROR_AP_NOT_RESP_TO_POLL = 1, + HIF_PS_ERROR_AP_NOT_RESP_TO_UAPSD_TRIGGER = 2, + HIF_PS_ERROR_AP_SENT_UNICAST_IN_DOZE = 3, + HIF_PS_ERROR_AP_NO_DATA_AFTER_TIM = 4 +}; + +struct wfx_hif_ind_event { + __le32 event_id; + union { + u8 rcpi_rssi; + __le32 ps_mode_error; + __le32 peer_sta_set; + } event_data; +} __packed; + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_api_general.h b/drivers/net/wireless/silabs/wfx/hif_api_general.h new file mode 100644 index 000000000000..4d400fdc2252 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_api_general.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ +/* + * WF200 hardware interface definitions + * + * Copyright (c) 2018-2020, Silicon Laboratories Inc. + */ + +#ifndef WFX_HIF_API_GENERAL_H +#define WFX_HIF_API_GENERAL_H + +#include +#include + +#define HIF_ID_IS_INDICATION 0x80 +#define HIF_COUNTER_MAX 7 + +struct wfx_hif_msg { + __le16 len; + u8 id; + u8 reserved:1; + u8 interface:2; + u8 seqnum:3; + u8 encrypted:2; + u8 body[]; +} __packed; + +enum wfx_hif_general_requests_ids { + HIF_REQ_ID_CONFIGURATION = 0x09, + HIF_REQ_ID_CONTROL_GPIO = 0x26, + HIF_REQ_ID_SET_SL_MAC_KEY = 0x27, + HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28, + HIF_REQ_ID_SL_CONFIGURE = 0x29, + HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a, + HIF_REQ_ID_PTA_SETTINGS = 0x2b, + HIF_REQ_ID_PTA_PRIORITY = 0x2c, + HIF_REQ_ID_PTA_STATE = 0x2d, + HIF_REQ_ID_SHUT_DOWN = 0x32, +}; + +enum wfx_hif_general_confirmations_ids { + HIF_CNF_ID_CONFIGURATION = 0x09, + HIF_CNF_ID_CONTROL_GPIO = 0x26, + HIF_CNF_ID_SET_SL_MAC_KEY = 0x27, + HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28, + HIF_CNF_ID_SL_CONFIGURE = 0x29, + HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a, + HIF_CNF_ID_PTA_SETTINGS = 0x2b, + HIF_CNF_ID_PTA_PRIORITY = 0x2c, + HIF_CNF_ID_PTA_STATE = 0x2d, + HIF_CNF_ID_SHUT_DOWN = 0x32, +}; + +enum wfx_hif_general_indications_ids { + HIF_IND_ID_EXCEPTION = 0xe0, + HIF_IND_ID_STARTUP = 0xe1, + HIF_IND_ID_WAKEUP = 0xe2, + HIF_IND_ID_GENERIC = 0xe3, + HIF_IND_ID_ERROR = 0xe4, + HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5 +}; + +#define HIF_STATUS_SUCCESS (cpu_to_le32(0x0000)) +#define HIF_STATUS_FAIL (cpu_to_le32(0x0001)) +#define HIF_STATUS_INVALID_PARAMETER (cpu_to_le32(0x0002)) +#define HIF_STATUS_WARNING (cpu_to_le32(0x0003)) +#define HIF_STATUS_UNKNOWN_REQUEST (cpu_to_le32(0x0004)) +#define HIF_STATUS_RX_FAIL_DECRYPT (cpu_to_le32(0x0010)) +#define HIF_STATUS_RX_FAIL_MIC (cpu_to_le32(0x0011)) +#define HIF_STATUS_RX_FAIL_NO_KEY (cpu_to_le32(0x0012)) +#define HIF_STATUS_TX_FAIL_RETRIES (cpu_to_le32(0x0013)) +#define HIF_STATUS_TX_FAIL_TIMEOUT (cpu_to_le32(0x0014)) +#define HIF_STATUS_TX_FAIL_REQUEUE (cpu_to_le32(0x0015)) +#define HIF_STATUS_REFUSED (cpu_to_le32(0x0016)) +#define HIF_STATUS_BUSY (cpu_to_le32(0x0017)) +#define HIF_STATUS_SLK_SET_KEY_SUCCESS (cpu_to_le32(0x005A)) +#define HIF_STATUS_SLK_SET_KEY_ALREADY_BURNED (cpu_to_le32(0x006B)) +#define HIF_STATUS_SLK_SET_KEY_DISALLOWED_MODE (cpu_to_le32(0x007C)) +#define HIF_STATUS_SLK_SET_KEY_UNKNOWN_MODE (cpu_to_le32(0x008D)) +#define HIF_STATUS_SLK_NEGO_SUCCESS (cpu_to_le32(0x009E)) +#define HIF_STATUS_SLK_NEGO_FAILED (cpu_to_le32(0x00AF)) +#define HIF_STATUS_ROLLBACK_SUCCESS (cpu_to_le32(0x1234)) +#define HIF_STATUS_ROLLBACK_FAIL (cpu_to_le32(0x1256)) + +enum wfx_hif_api_rate_index { + API_RATE_INDEX_B_1MBPS = 0, + API_RATE_INDEX_B_2MBPS = 1, + API_RATE_INDEX_B_5P5MBPS = 2, + API_RATE_INDEX_B_11MBPS = 3, + API_RATE_INDEX_PBCC_22MBPS = 4, + API_RATE_INDEX_PBCC_33MBPS = 5, + API_RATE_INDEX_G_6MBPS = 6, + API_RATE_INDEX_G_9MBPS = 7, + API_RATE_INDEX_G_12MBPS = 8, + API_RATE_INDEX_G_18MBPS = 9, + API_RATE_INDEX_G_24MBPS = 10, + API_RATE_INDEX_G_36MBPS = 11, + API_RATE_INDEX_G_48MBPS = 12, + API_RATE_INDEX_G_54MBPS = 13, + API_RATE_INDEX_N_6P5MBPS = 14, + API_RATE_INDEX_N_13MBPS = 15, + API_RATE_INDEX_N_19P5MBPS = 16, + API_RATE_INDEX_N_26MBPS = 17, + API_RATE_INDEX_N_39MBPS = 18, + API_RATE_INDEX_N_52MBPS = 19, + API_RATE_INDEX_N_58P5MBPS = 20, + API_RATE_INDEX_N_65MBPS = 21, + API_RATE_NUM_ENTRIES = 22 +}; + +struct wfx_hif_ind_startup { + __le32 status; + __le16 hardware_id; + u8 opn[14]; + u8 uid[8]; + __le16 num_inp_ch_bufs; + __le16 size_inp_ch_buf; + u8 num_links_ap; + u8 num_interfaces; + u8 mac_addr[2][ETH_ALEN]; + u8 api_version_minor; + u8 api_version_major; + u8 link_mode:2; + u8 reserved1:6; + u8 reserved2; + u8 reserved3; + u8 reserved4; + u8 firmware_build; + u8 firmware_minor; + u8 firmware_major; + u8 firmware_type; + u8 disabled_channel_list[2]; + u8 region_sel_mode:4; + u8 reserved5:4; + u8 phy1_region:3; + u8 phy0_region:3; + u8 otp_phy_ver:2; + __le32 supported_rate_mask; + u8 firmware_label[128]; +} __packed; + +struct wfx_hif_ind_wakeup { +} __packed; + +struct wfx_hif_req_configuration { + __le16 length; + u8 pds_data[]; +} __packed; + +struct wfx_hif_cnf_configuration { + __le32 status; +} __packed; + +enum wfx_hif_gpio_mode { + HIF_GPIO_MODE_D0 = 0x0, + HIF_GPIO_MODE_D1 = 0x1, + HIF_GPIO_MODE_OD0 = 0x2, + HIF_GPIO_MODE_OD1 = 0x3, + HIF_GPIO_MODE_TRISTATE = 0x4, + HIF_GPIO_MODE_TOGGLE = 0x5, + HIF_GPIO_MODE_READ = 0x6 +}; + +struct wfx_hif_req_control_gpio { + u8 gpio_label; + u8 gpio_mode; +} __packed; + +struct wfx_hif_cnf_control_gpio { + __le32 status; + __le32 value; +} __packed; + +enum wfx_hif_generic_indication_type { + HIF_GENERIC_INDICATION_TYPE_RAW = 0x0, + HIF_GENERIC_INDICATION_TYPE_STRING = 0x1, + HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2, + HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO = 0x3, +}; + +struct wfx_hif_rx_stats { + __le32 nb_rx_frame; + __le32 nb_crc_frame; + __le32 per_total; + __le32 throughput; + __le32 nb_rx_by_rate[API_RATE_NUM_ENTRIES]; + __le16 per[API_RATE_NUM_ENTRIES]; + __le16 snr[API_RATE_NUM_ENTRIES]; /* signed value */ + __le16 rssi[API_RATE_NUM_ENTRIES]; /* signed value */ + __le16 cfo[API_RATE_NUM_ENTRIES]; /* signed value */ + __le32 date; + __le32 pwr_clk_freq; + u8 is_ext_pwr_clk; + s8 current_temp; +} __packed; + +struct wfx_hif_tx_power_loop_info { + __le16 tx_gain_dig; + __le16 tx_gain_pa; + __le16 target_pout; /* signed value */ + __le16 p_estimation; /* signed value */ + __le16 vpdet; + u8 measurement_index; + u8 reserved; +} __packed; + +struct wfx_hif_ind_generic { + __le32 type; + union { + struct wfx_hif_rx_stats rx_stats; + struct wfx_hif_tx_power_loop_info tx_power_loop_info; + } data; +} __packed; + +enum wfx_hif_error { + HIF_ERROR_FIRMWARE_ROLLBACK = 0x00, + HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x01, + HIF_ERROR_SLK_OUTDATED_SESSION_KEY = 0x02, + HIF_ERROR_SLK_SESSION_KEY = 0x03, + HIF_ERROR_OOR_VOLTAGE = 0x04, + HIF_ERROR_PDS_PAYLOAD = 0x05, + HIF_ERROR_OOR_TEMPERATURE = 0x06, + HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE = 0x07, + HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED = 0x08, + HIF_ERROR_SLK_OVERFLOW = 0x09, + HIF_ERROR_SLK_DECRYPTION = 0x0a, + HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE = 0x0b, + HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW = 0x0c, + HIF_ERROR_HIF_RX_DATA_TOO_LARGE = 0x0e, + HIF_ERROR_HIF_TX_QUEUE_FULL = 0x0d, + HIF_ERROR_HIF_BUS = 0x0f, + HIF_ERROR_PDS_TESTFEATURE = 0x10, + HIF_ERROR_SLK_UNCONFIGURED = 0x11, +}; + +struct wfx_hif_ind_error { + __le32 type; + u8 data[]; +} __packed; + +struct wfx_hif_ind_exception { + __le32 type; + u8 data[]; +} __packed; + +enum wfx_hif_secure_link_state { + SEC_LINK_UNAVAILABLE = 0x0, + SEC_LINK_RESERVED = 0x1, + SEC_LINK_EVAL = 0x2, + SEC_LINK_ENFORCED = 0x3 +}; + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_api_mib.h b/drivers/net/wireless/silabs/wfx/hif_api_mib.h new file mode 100644 index 000000000000..7b68b83866c9 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_api_mib.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ +/* + * WF200 hardware interface definitions + * + * Copyright (c) 2018-2020, Silicon Laboratories Inc. + */ + +#ifndef WFX_HIF_API_MIB_H +#define WFX_HIF_API_MIB_H + +#include "hif_api_general.h" + +#define HIF_API_IPV4_ADDRESS_SIZE 4 +#define HIF_API_IPV6_ADDRESS_SIZE 16 + +enum wfx_hif_mib_ids { + HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000, + HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001, + HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002, + HIF_MIB_ID_CCA_CONFIG = 0x2003, + HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010, + HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011, + HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012, + HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013, + HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014, + HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015, + HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016, + HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017, + HIF_MIB_ID_SET_DATA_FILTERING = 0x2018, + HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019, + HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A, + HIF_MIB_ID_RX_FILTER = 0x201B, + HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C, + HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D, + HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030, + HIF_MIB_ID_TSF_COUNTER = 0x2031, + HIF_MIB_ID_STATISTICS_TABLE = 0x2032, + HIF_MIB_ID_COUNTERS_TABLE = 0x2033, + HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034, + HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035, + HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040, + HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041, + HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042, + HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043, + HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044, + HIF_MIB_ID_SLOT_TIME = 0x2045, + HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046, + HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047, + HIF_MIB_ID_TEMPLATE_FRAME = 0x2048, + HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049, + HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A, + HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B, + HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C, + HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D, + HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E, + HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F, + HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050, + HIF_MIB_ID_SET_HT_PROTECTION = 0x2051, + HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052, + HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053, + HIF_MIB_ID_INACTIVITY_TIMER = 0x2054, + HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055, + HIF_MIB_ID_BEACON_STATS = 0x2056, +}; + +enum wfx_hif_op_power_mode { + HIF_OP_POWER_MODE_ACTIVE = 0x0, + HIF_OP_POWER_MODE_DOZE = 0x1, + HIF_OP_POWER_MODE_QUIESCENT = 0x2 +}; + +struct wfx_hif_mib_gl_operational_power_mode { + u8 power_mode:4; + u8 reserved1:3; + u8 wup_ind_activation:1; + u8 reserved2[3]; +} __packed; + +struct wfx_hif_mib_gl_set_multi_msg { + u8 enable_multi_tx_conf:1; + u8 reserved1:7; + u8 reserved2[3]; +} __packed; + +enum wfx_hif_arp_ns_frame_treatment { + HIF_ARP_NS_FILTERING_DISABLE = 0x0, + HIF_ARP_NS_FILTERING_ENABLE = 0x1, + HIF_ARP_NS_REPLY_ENABLE = 0x2 +}; + +struct wfx_hif_mib_arp_ip_addr_table { + u8 condition_idx; + u8 arp_enable; + u8 reserved[2]; + u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE]; +} __packed; + +struct wfx_hif_mib_rx_filter { + u8 reserved1:1; + u8 bssid_filter:1; + u8 reserved2:1; + u8 fwd_probe_req:1; + u8 keep_alive_filter:1; + u8 reserved3:3; + u8 reserved4[3]; +} __packed; + +struct wfx_hif_ie_table_entry { + u8 ie_id; + u8 has_changed:1; + u8 no_longer:1; + u8 has_appeared:1; + u8 reserved:1; + u8 num_match_data:4; + u8 oui[3]; + u8 match_data[3]; +} __packed; + +struct wfx_hif_mib_bcn_filter_table { + __le32 num_of_info_elmts; + struct wfx_hif_ie_table_entry ie_table[]; +} __packed; + +enum wfx_hif_beacon_filter { + HIF_BEACON_FILTER_DISABLE = 0x0, + HIF_BEACON_FILTER_ENABLE = 0x1, + HIF_BEACON_FILTER_AUTO_ERP = 0x2 +}; + +struct wfx_hif_mib_bcn_filter_enable { + __le32 enable; + __le32 bcn_count; +} __packed; + +struct wfx_hif_mib_extended_count_table { + __le32 count_drop_plcp; + __le32 count_drop_fcs; + __le32 count_tx_frames; + __le32 count_rx_frames; + __le32 count_rx_frames_failed; + __le32 count_drop_decryption; + __le32 count_drop_tkip_mic; + __le32 count_drop_no_key; + __le32 count_tx_frames_multicast; + __le32 count_tx_frames_success; + __le32 count_tx_frames_failed; + __le32 count_tx_frames_retried; + __le32 count_tx_frames_multi_retried; + __le32 count_drop_duplicate; + __le32 count_rts_success; + __le32 count_rts_failed; + __le32 count_ack_failed; + __le32 count_rx_frames_multicast; + __le32 count_rx_frames_success; + __le32 count_drop_cmac_icv; + __le32 count_drop_cmac_replay; + __le32 count_drop_ccmp_replay; + __le32 count_drop_bip_mic; + __le32 count_rx_bcn_success; + __le32 count_rx_bcn_miss; + __le32 count_rx_bcn_dtim; + __le32 count_rx_bcn_dtim_aid0_clr; + __le32 count_rx_bcn_dtim_aid0_set; + __le32 reserved[12]; +} __packed; + +struct wfx_hif_mib_count_table { + __le32 count_drop_plcp; + __le32 count_drop_fcs; + __le32 count_tx_frames; + __le32 count_rx_frames; + __le32 count_rx_frames_failed; + __le32 count_drop_decryption; + __le32 count_drop_tkip_mic; + __le32 count_drop_no_key; + __le32 count_tx_frames_multicast; + __le32 count_tx_frames_success; + __le32 count_tx_frames_failed; + __le32 count_tx_frames_retried; + __le32 count_tx_frames_multi_retried; + __le32 count_drop_duplicate; + __le32 count_rts_success; + __le32 count_rts_failed; + __le32 count_ack_failed; + __le32 count_rx_frames_multicast; + __le32 count_rx_frames_success; + __le32 count_drop_cmac_icv; + __le32 count_drop_cmac_replay; + __le32 count_drop_ccmp_replay; + __le32 count_drop_bip_mic; +} __packed; + +struct wfx_hif_mib_mac_address { + u8 mac_addr[ETH_ALEN]; + __le16 reserved; +} __packed; + +struct wfx_hif_mib_wep_default_key_id { + u8 wep_default_key_id; + u8 reserved[3]; +} __packed; + +struct wfx_hif_mib_dot11_rts_threshold { + __le32 threshold; +} __packed; + +struct wfx_hif_mib_slot_time { + __le32 slot_time; +} __packed; + +struct wfx_hif_mib_current_tx_power_level { + __le32 power_level; /* signed value */ +} __packed; + +struct wfx_hif_mib_non_erp_protection { + u8 use_cts_to_self:1; + u8 reserved1:7; + u8 reserved2[3]; +} __packed; + +enum wfx_hif_tmplt { + HIF_TMPLT_PRBREQ = 0x0, + HIF_TMPLT_BCN = 0x1, + HIF_TMPLT_NULL = 0x2, + HIF_TMPLT_QOSNUL = 0x3, + HIF_TMPLT_PSPOLL = 0x4, + HIF_TMPLT_PRBRES = 0x5, + HIF_TMPLT_ARP = 0x6, + HIF_TMPLT_NA = 0x7 +}; + +#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700 + +struct wfx_hif_mib_template_frame { + u8 frame_type; + u8 init_rate:7; + u8 mode:1; + __le16 frame_length; + u8 frame[]; +} __packed; + +struct wfx_hif_mib_beacon_wake_up_period { + u8 wakeup_period_min; + u8 receive_dtim:1; + u8 reserved1:7; + u8 wakeup_period_max; + u8 reserved2; +} __packed; + +struct wfx_hif_mib_rcpi_rssi_threshold { + u8 detection:1; + u8 rcpi_rssi:1; + u8 upperthresh:1; + u8 lowerthresh:1; + u8 reserved:4; + u8 lower_threshold; + u8 upper_threshold; + u8 rolling_average_count; +} __packed; + +#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16 + +struct wfx_hif_mib_block_ack_policy { + u8 block_ack_tx_tid_policy; + u8 reserved1; + u8 block_ack_rx_tid_policy; + u8 block_ack_rx_max_buffer_size; +} __packed; + +enum wfx_hif_mpdu_start_spacing { + HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0, + HIF_MPDU_START_SPACING_QUARTER = 0x1, + HIF_MPDU_START_SPACING_HALF = 0x2, + HIF_MPDU_START_SPACING_ONE = 0x3, + HIF_MPDU_START_SPACING_TWO = 0x4, + HIF_MPDU_START_SPACING_FOUR = 0x5, + HIF_MPDU_START_SPACING_EIGHT = 0x6, + HIF_MPDU_START_SPACING_SIXTEEN = 0x7 +}; + +struct wfx_hif_mib_set_association_mode { + u8 preambtype_use:1; + u8 mode:1; + u8 rateset:1; + u8 spacing:1; + u8 reserved1:4; + u8 short_preamble:1; + u8 reserved2:7; + u8 greenfield:1; + u8 reserved3:7; + u8 mpdu_start_spacing; + __le32 basic_rate_set; +} __packed; + +struct wfx_hif_mib_set_uapsd_information { + u8 trig_bckgrnd:1; + u8 trig_be:1; + u8 trig_video:1; + u8 trig_voice:1; + u8 reserved1:4; + u8 deliv_bckgrnd:1; + u8 deliv_be:1; + u8 deliv_video:1; + u8 deliv_voice:1; + u8 reserved2:4; + __le16 min_auto_trigger_interval; + __le16 max_auto_trigger_interval; + __le16 auto_trigger_step; +} __packed; + +struct wfx_hif_tx_rate_retry_policy { + u8 policy_index; + u8 short_retry_count; + u8 long_retry_count; + u8 first_rate_sel:2; + u8 terminate:1; + u8 count_init:1; + u8 reserved1:4; + u8 rate_recovery_count; + u8 reserved2[3]; + u8 rates[12]; +} __packed; + +#define HIF_TX_RETRY_POLICY_MAX 15 +#define HIF_TX_RETRY_POLICY_INVALID HIF_TX_RETRY_POLICY_MAX + +struct wfx_hif_mib_set_tx_rate_retry_policy { + u8 num_tx_rate_policies; + u8 reserved[3]; + struct wfx_hif_tx_rate_retry_policy tx_rate_retry_policy[]; +} __packed; + +struct wfx_hif_mib_protected_mgmt_policy { + u8 pmf_enable:1; + u8 unpmf_allowed:1; + u8 host_enc_auth_frames:1; + u8 reserved1:5; + u8 reserved2[3]; +} __packed; + +struct wfx_hif_mib_keep_alive_period { + __le16 keep_alive_period; + u8 reserved[2]; +} __packed; + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_rx.c b/drivers/net/wireless/silabs/wfx/hif_rx.c new file mode 100644 index 000000000000..64ca8acb8e4f --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_rx.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Handling of the chip-to-host events (aka indications) of the hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "hif_rx.h" +#include "wfx.h" +#include "scan.h" +#include "bh.h" +#include "sta.h" +#include "data_rx.h" +#include "hif_api_cmd.h" + +static int wfx_hif_generic_confirm(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + /* All confirm messages start with status */ + int status = le32_to_cpup((__le32 *)buf); + int cmd = hif->id; + int len = le16_to_cpu(hif->len) - 4; /* drop header */ + + WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error"); + + if (!wdev->hif_cmd.buf_send) { + dev_warn(wdev->dev, "unexpected confirmation: 0x%.2x\n", cmd); + return -EINVAL; + } + + if (cmd != wdev->hif_cmd.buf_send->id) { + dev_warn(wdev->dev, "chip response mismatch request: 0x%.2x vs 0x%.2x\n", + cmd, wdev->hif_cmd.buf_send->id); + return -EINVAL; + } + + if (wdev->hif_cmd.buf_recv) { + if (wdev->hif_cmd.len_recv >= len && len > 0) + memcpy(wdev->hif_cmd.buf_recv, buf, len); + else + status = -EIO; + } + wdev->hif_cmd.ret = status; + + complete(&wdev->hif_cmd.done); + return status; +} + +static int wfx_hif_tx_confirm(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_cnf_tx *body = buf; + + wfx_tx_confirm_cb(wdev, body); + return 0; +} + +static int wfx_hif_multi_tx_confirm(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_cnf_multi_transmit *body = buf; + int i; + + WARN(body->num_tx_confs <= 0, "corrupted message"); + for (i = 0; i < body->num_tx_confs; i++) + wfx_tx_confirm_cb(wdev, &body->tx_conf_payload[i]); + return 0; +} + +static int wfx_hif_startup_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_startup *body = buf; + + if (body->status || body->firmware_type > 4) { + dev_err(wdev->dev, "received invalid startup indication"); + return -EINVAL; + } + memcpy(&wdev->hw_caps, body, sizeof(struct wfx_hif_ind_startup)); + complete(&wdev->firmware_ready); + return 0; +} + +static int wfx_hif_wakeup_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + if (!wdev->pdata.gpio_wakeup || gpiod_get_value(wdev->pdata.gpio_wakeup) == 0) { + dev_warn(wdev->dev, "unexpected wake-up indication\n"); + return -EIO; + } + return 0; +} + +static int wfx_hif_receive_indication(struct wfx_dev *wdev, const struct wfx_hif_msg *hif, + const void *buf, struct sk_buff *skb) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + const struct wfx_hif_ind_rx *body = buf; + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + skb_pull(skb, sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_ind_rx)); + wfx_rx_cb(wvif, body, skb); + + return 0; +} + +static int wfx_hif_event_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + const struct wfx_hif_ind_event *body = buf; + int type = le32_to_cpu(body->event_id); + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + + switch (type) { + case HIF_EVENT_IND_RCPI_RSSI: + wfx_event_report_rssi(wvif, body->event_data.rcpi_rssi); + break; + case HIF_EVENT_IND_BSSLOST: + schedule_delayed_work(&wvif->beacon_loss_work, 0); + break; + case HIF_EVENT_IND_BSSREGAINED: + cancel_delayed_work(&wvif->beacon_loss_work); + dev_dbg(wdev->dev, "ignore BSSREGAINED indication\n"); + break; + case HIF_EVENT_IND_PS_MODE_ERROR: + dev_warn(wdev->dev, "error while processing power save request: %d\n", + le32_to_cpu(body->event_data.ps_mode_error)); + break; + default: + dev_warn(wdev->dev, "unhandled event indication: %.2x\n", type); + break; + } + return 0; +} + +static int wfx_hif_pm_mode_complete_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + complete(&wvif->set_pm_mode_complete); + + return 0; +} + +static int wfx_hif_scan_complete_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + const struct wfx_hif_ind_scan_cmpl *body = buf; + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + + wfx_scan_complete(wvif, body->num_channels_completed); + + return 0; +} + +static int wfx_hif_join_complete_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + dev_warn(wdev->dev, "unattended JoinCompleteInd\n"); + + return 0; +} + +static int wfx_hif_suspend_resume_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_suspend_resume_tx *body = buf; + struct wfx_vif *wvif; + + if (body->bc_mc_only) { + wvif = wdev_to_wvif(wdev, hif->interface); + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + if (body->resume) + wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE); + else + wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP); + } else { + WARN(body->peer_sta_set, "misunderstood indication"); + WARN(hif->interface != 2, "misunderstood indication"); + if (body->resume) + wfx_suspend_hot_dev(wdev, STA_NOTIFY_AWAKE); + else + wfx_suspend_hot_dev(wdev, STA_NOTIFY_SLEEP); + } + + return 0; +} + +static int wfx_hif_generic_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_generic *body = buf; + int type = le32_to_cpu(body->type); + + switch (type) { + case HIF_GENERIC_INDICATION_TYPE_RAW: + return 0; + case HIF_GENERIC_INDICATION_TYPE_STRING: + dev_info(wdev->dev, "firmware says: %s\n", (char *)&body->data); + return 0; + case HIF_GENERIC_INDICATION_TYPE_RX_STATS: + mutex_lock(&wdev->rx_stats_lock); + /* Older firmware send a generic indication beside RxStats */ + if (!wfx_api_older_than(wdev, 1, 4)) + dev_info(wdev->dev, "Rx test ongoing. Temperature: %d degrees C\n", + body->data.rx_stats.current_temp); + memcpy(&wdev->rx_stats, &body->data.rx_stats, sizeof(wdev->rx_stats)); + mutex_unlock(&wdev->rx_stats_lock); + return 0; + case HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO: + mutex_lock(&wdev->tx_power_loop_info_lock); + memcpy(&wdev->tx_power_loop_info, &body->data.tx_power_loop_info, + sizeof(wdev->tx_power_loop_info)); + mutex_unlock(&wdev->tx_power_loop_info_lock); + return 0; + default: + dev_err(wdev->dev, "generic_indication: unknown indication type: %#.8x\n", type); + return -EIO; + } +} + +static const struct { + int val; + const char *str; + bool has_param; +} hif_errors[] = { + { HIF_ERROR_FIRMWARE_ROLLBACK, + "rollback status" }, + { HIF_ERROR_FIRMWARE_DEBUG_ENABLED, + "debug feature enabled" }, + { HIF_ERROR_PDS_PAYLOAD, + "PDS version is not supported" }, + { HIF_ERROR_PDS_TESTFEATURE, + "PDS ask for an unknown test mode" }, + { HIF_ERROR_OOR_VOLTAGE, + "out-of-range power supply voltage", true }, + { HIF_ERROR_OOR_TEMPERATURE, + "out-of-range temperature", true }, + { HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE, + "secure link does not expect request during key exchange" }, + { HIF_ERROR_SLK_SESSION_KEY, + "secure link session key is invalid" }, + { HIF_ERROR_SLK_OVERFLOW, + "secure link overflow" }, + { HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE, + "secure link messages list does not match message encryption" }, + { HIF_ERROR_SLK_UNCONFIGURED, + "secure link not yet configured" }, + { HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW, + "bus clock is too slow (<1kHz)" }, + { HIF_ERROR_HIF_RX_DATA_TOO_LARGE, + "HIF message too large" }, + /* Following errors only exists in old firmware versions: */ + { HIF_ERROR_HIF_TX_QUEUE_FULL, + "HIF messages queue is full" }, + { HIF_ERROR_HIF_BUS, + "HIF bus" }, + { HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED, + "secure link does not support multi-tx confirmations" }, + { HIF_ERROR_SLK_OUTDATED_SESSION_KEY, + "secure link session key is outdated" }, + { HIF_ERROR_SLK_DECRYPTION, + "secure link params (nonce or tag) mismatch" }, +}; + +static int wfx_hif_error_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_error *body = buf; + int type = le32_to_cpu(body->type); + int param = (s8)body->data[0]; + int i; + + for (i = 0; i < ARRAY_SIZE(hif_errors); i++) + if (type == hif_errors[i].val) + break; + if (i < ARRAY_SIZE(hif_errors)) + if (hif_errors[i].has_param) + dev_err(wdev->dev, "asynchronous error: %s: %d\n", + hif_errors[i].str, param); + else + dev_err(wdev->dev, "asynchronous error: %s\n", hif_errors[i].str); + else + dev_err(wdev->dev, "asynchronous error: unknown: %08x\n", type); + print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, + 16, 1, hif, le16_to_cpu(hif->len), false); + wdev->chip_frozen = true; + + return 0; +}; + +static int wfx_hif_exception_indication(struct wfx_dev *wdev, + const struct wfx_hif_msg *hif, const void *buf) +{ + const struct wfx_hif_ind_exception *body = buf; + int type = le32_to_cpu(body->type); + + if (type == 4) + dev_err(wdev->dev, "firmware assert %d\n", le32_to_cpup((__le32 *)body->data)); + else + dev_err(wdev->dev, "firmware exception\n"); + print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, + 16, 1, hif, le16_to_cpu(hif->len), false); + wdev->chip_frozen = true; + + return -1; +} + +static const struct { + int msg_id; + int (*handler)(struct wfx_dev *wdev, const struct wfx_hif_msg *hif, const void *buf); +} hif_handlers[] = { + /* Confirmations */ + { HIF_CNF_ID_TX, wfx_hif_tx_confirm }, + { HIF_CNF_ID_MULTI_TRANSMIT, wfx_hif_multi_tx_confirm }, + /* Indications */ + { HIF_IND_ID_STARTUP, wfx_hif_startup_indication }, + { HIF_IND_ID_WAKEUP, wfx_hif_wakeup_indication }, + { HIF_IND_ID_JOIN_COMPLETE, wfx_hif_join_complete_indication }, + { HIF_IND_ID_SET_PM_MODE_CMPL, wfx_hif_pm_mode_complete_indication }, + { HIF_IND_ID_SCAN_CMPL, wfx_hif_scan_complete_indication }, + { HIF_IND_ID_SUSPEND_RESUME_TX, wfx_hif_suspend_resume_indication }, + { HIF_IND_ID_EVENT, wfx_hif_event_indication }, + { HIF_IND_ID_GENERIC, wfx_hif_generic_indication }, + { HIF_IND_ID_ERROR, wfx_hif_error_indication }, + { HIF_IND_ID_EXCEPTION, wfx_hif_exception_indication }, + /* FIXME: allocate skb_p from wfx_hif_receive_indication and make it generic */ + //{ HIF_IND_ID_RX, wfx_hif_receive_indication }, +}; + +void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb) +{ + int i; + const struct wfx_hif_msg *hif = (const struct wfx_hif_msg *)skb->data; + int hif_id = hif->id; + + if (hif_id == HIF_IND_ID_RX) { + /* wfx_hif_receive_indication take care of skb lifetime */ + wfx_hif_receive_indication(wdev, hif, hif->body, skb); + return; + } + /* Note: mutex_is_lock cause an implicit memory barrier that protect buf_send */ + if (mutex_is_locked(&wdev->hif_cmd.lock) && + wdev->hif_cmd.buf_send && wdev->hif_cmd.buf_send->id == hif_id) { + wfx_hif_generic_confirm(wdev, hif, hif->body); + goto free; + } + for (i = 0; i < ARRAY_SIZE(hif_handlers); i++) { + if (hif_handlers[i].msg_id == hif_id) { + if (hif_handlers[i].handler) + hif_handlers[i].handler(wdev, hif, hif->body); + goto free; + } + } + if (hif_id & HIF_ID_IS_INDICATION) + dev_err(wdev->dev, "unsupported HIF indication: ID %02x\n", hif_id); + else + dev_err(wdev->dev, "unexpected HIF confirmation: ID %02x\n", hif_id); +free: + dev_kfree_skb(skb); +} diff --git a/drivers/net/wireless/silabs/wfx/hif_rx.h b/drivers/net/wireless/silabs/wfx/hif_rx.h new file mode 100644 index 000000000000..96543b81fa77 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_rx.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Handling of the chip-to-host events (aka indications) of the hardware API. + * + * Copyright (c) 2017-2019, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (C) 2010, ST-Ericsson SA + */ +#ifndef WFX_HIF_RX_H +#define WFX_HIF_RX_H + +struct wfx_dev; +struct sk_buff; + +void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c new file mode 100644 index 000000000000..ae3cc5919dcd --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_tx.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of the host-to-chip commands (aka request/confirmation) of the + * hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include + +#include "hif_tx.h" +#include "wfx.h" +#include "bh.h" +#include "hwio.h" +#include "debug.h" +#include "sta.h" + +void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd) +{ + init_completion(&hif_cmd->ready); + init_completion(&hif_cmd->done); + mutex_init(&hif_cmd->lock); +} + +static void wfx_fill_header(struct wfx_hif_msg *hif, int if_id, unsigned int cmd, size_t size) +{ + if (if_id == -1) + if_id = 2; + + WARN(cmd > 0x3f, "invalid hardware command %#.2x", cmd); + WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size); + WARN(if_id > 0x3, "invalid interface ID %d", if_id); + + hif->len = cpu_to_le16(size + 4); + hif->id = cmd; + hif->interface = if_id; +} + +static void *wfx_alloc_hif(size_t body_len, struct wfx_hif_msg **hif) +{ + *hif = kzalloc(sizeof(struct wfx_hif_msg) + body_len, GFP_KERNEL); + if (*hif) + return (*hif)->body; + else + return NULL; +} + +int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, + void *reply, size_t reply_len, bool no_reply) +{ + const char *mib_name = ""; + const char *mib_sep = ""; + int cmd = request->id; + int vif = request->interface; + int ret; + + /* Do not wait for any reply if chip is frozen */ + if (wdev->chip_frozen) + return -ETIMEDOUT; + + mutex_lock(&wdev->hif_cmd.lock); + WARN(wdev->hif_cmd.buf_send, "data locking error"); + + /* Note: call to complete() below has an implicit memory barrier that hopefully protect + * buf_send + */ + wdev->hif_cmd.buf_send = request; + wdev->hif_cmd.buf_recv = reply; + wdev->hif_cmd.len_recv = reply_len; + complete(&wdev->hif_cmd.ready); + + wfx_bh_request_tx(wdev); + + if (no_reply) { + /* Chip won't reply. Ensure the wq has send the buffer before to continue. */ + flush_workqueue(system_highpri_wq); + ret = 0; + goto end; + } + + if (wdev->poll_irq) + wfx_bh_poll_irq(wdev); + + ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ); + if (!ret) { + dev_err(wdev->dev, "chip is abnormally long to answer\n"); + reinit_completion(&wdev->hif_cmd.ready); + ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 3 * HZ); + } + if (!ret) { + dev_err(wdev->dev, "chip did not answer\n"); + wfx_pending_dump_old_frames(wdev, 3000); + wdev->chip_frozen = true; + reinit_completion(&wdev->hif_cmd.done); + ret = -ETIMEDOUT; + } else { + ret = wdev->hif_cmd.ret; + } + +end: + wdev->hif_cmd.buf_send = NULL; + mutex_unlock(&wdev->hif_cmd.lock); + + if (ret && + (cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) { + mib_name = wfx_get_mib_name(((u16 *)request)[2]); + mib_sep = "/"; + } + if (ret < 0) + dev_err(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned error %d\n", + wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret); + if (ret > 0) + dev_warn(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned status %d\n", + wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret); + + return ret; +} + +/* This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any request anymore. + * Obviously, only call this function during device unregister. + */ +int wfx_hif_shutdown(struct wfx_dev *wdev) +{ + int ret; + struct wfx_hif_msg *hif; + + wfx_alloc_hif(0, &hif); + if (!hif) + return -ENOMEM; + wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0); + ret = wfx_cmd_send(wdev, hif, NULL, 0, true); + if (wdev->pdata.gpio_wakeup) + gpiod_set_value(wdev->pdata.gpio_wakeup, 0); + else + wfx_control_reg_write(wdev, 0); + kfree(hif); + return ret; +} + +int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len) +{ + int ret; + size_t buf_len = sizeof(struct wfx_hif_req_configuration) + len; + struct wfx_hif_msg *hif; + struct wfx_hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif); + + if (!hif) + return -ENOMEM; + body->length = cpu_to_le16(len); + memcpy(body->pds_data, conf, len); + wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len); + ret = wfx_cmd_send(wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + body->reset_stat = reset_stat; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len) +{ + int ret; + struct wfx_hif_msg *hif; + int buf_len = sizeof(struct wfx_hif_cnf_read_mib) + val_len; + struct wfx_hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif); + struct wfx_hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL); + + if (!body || !reply) { + ret = -ENOMEM; + goto out; + } + body->mib_id = cpu_to_le16(mib_id); + wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body)); + ret = wfx_cmd_send(wdev, hif, reply, buf_len, false); + + if (!ret && mib_id != le16_to_cpu(reply->mib_id)) { + dev_warn(wdev->dev, "%s: confirmation mismatch request\n", __func__); + ret = -EIO; + } + if (ret == -ENOMEM) + dev_err(wdev->dev, "buffer is too small to receive %s (%zu < %d)\n", + wfx_get_mib_name(mib_id), val_len, le16_to_cpu(reply->length)); + if (!ret) + memcpy(val, &reply->mib_data, le16_to_cpu(reply->length)); + else + memset(val, 0xFF, val_len); +out: + kfree(hif); + kfree(reply); + return ret; +} + +int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len) +{ + int ret; + struct wfx_hif_msg *hif; + int buf_len = sizeof(struct wfx_hif_req_write_mib) + val_len; + struct wfx_hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif); + + if (!hif) + return -ENOMEM; + body->mib_id = cpu_to_le16(mib_id); + body->length = cpu_to_le16(val_len); + memcpy(&body->mib_data, val, val_len); + wfx_fill_header(hif, vif_id, HIF_REQ_ID_WRITE_MIB, buf_len); + ret = wfx_cmd_send(wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, + int chan_start_idx, int chan_num) +{ + int ret, i; + struct wfx_hif_msg *hif; + size_t buf_len = sizeof(struct wfx_hif_req_start_scan_alt) + chan_num * sizeof(u8); + struct wfx_hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif); + + WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params"); + WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params"); + + if (!hif) + return -ENOMEM; + for (i = 0; i < req->n_ssids; i++) { + memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid, IEEE80211_MAX_SSID_LEN); + body->ssid_def[i].ssid_length = cpu_to_le32(req->ssids[i].ssid_len); + } + body->num_of_ssids = HIF_API_MAX_NB_SSIDS; + body->maintain_current_bss = 1; + body->disallow_ps = 1; + body->tx_power_level = cpu_to_le32(req->channels[chan_start_idx]->max_power); + body->num_of_channels = chan_num; + for (i = 0; i < chan_num; i++) + body->channel_list[i] = req->channels[i + chan_start_idx]->hw_value; + if (req->no_cck) + body->max_transmit_rate = API_RATE_INDEX_G_6MBPS; + else + body->max_transmit_rate = API_RATE_INDEX_B_1MBPS; + if (req->channels[chan_start_idx]->flags & IEEE80211_CHAN_NO_IR) { + body->min_channel_time = cpu_to_le32(50); + body->max_channel_time = cpu_to_le32(150); + } else { + body->min_channel_time = cpu_to_le32(10); + body->max_channel_time = cpu_to_le32(50); + body->num_of_probe_requests = 2; + body->probe_delay = 100; + } + + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_stop_scan(struct wfx_vif *wvif) +{ + int ret; + struct wfx_hif_msg *hif; + /* body associated to HIF_REQ_ID_STOP_SCAN is empty */ + wfx_alloc_hif(0, &hif); + + if (!hif) + return -ENOMEM; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, + struct ieee80211_channel *channel, const u8 *ssid, int ssidlen) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif); + + WARN_ON(!conf->beacon_int); + WARN_ON(!conf->basic_rates); + WARN_ON(sizeof(body->ssid) < ssidlen); + WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS"); + if (!hif) + return -ENOMEM; + body->infrastructure_bss_mode = !conf->ibss_joined; + body->short_preamble = conf->use_short_preamble; + body->probe_for_join = !(channel->flags & IEEE80211_CHAN_NO_IR); + body->channel_number = channel->hw_value; + body->beacon_interval = cpu_to_le32(conf->beacon_int); + body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates)); + memcpy(body->bssid, conf->bssid, sizeof(body->bssid)); + if (ssid) { + body->ssid_length = cpu_to_le32(ssidlen); + memcpy(body->ssid, ssid, ssidlen); + } + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + body->aid = cpu_to_le16(aid); + body->beacon_lost_count = beacon_lost_count; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg) +{ + int ret; + struct wfx_hif_msg *hif; + /* FIXME: only send necessary bits */ + struct wfx_hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + /* FIXME: swap bytes as necessary in body */ + memcpy(body, arg, sizeof(*body)); + if (wfx_api_older_than(wdev, 1, 5)) + /* Legacy firmwares expect that add_key to be sent on right interface. */ + wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY, sizeof(*body)); + else + wfx_fill_header(hif, -1, HIF_REQ_ID_ADD_KEY, sizeof(*body)); + ret = wfx_cmd_send(wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_remove_key(struct wfx_dev *wdev, int idx) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + body->entry_index = idx; + wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body)); + ret = wfx_cmd_send(wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue, + const struct ieee80211_tx_queue_params *arg) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!body) + return -ENOMEM; + + WARN_ON(arg->aifs > 255); + if (!hif) + return -ENOMEM; + body->aifsn = arg->aifs; + body->cw_min = cpu_to_le16(arg->cw_min); + body->cw_max = cpu_to_le16(arg->cw_max); + body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP); + body->queue_id = 3 - queue; + /* API 2.0 has changed queue IDs values */ + if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE) + body->queue_id = HIF_QUEUE_ID_BACKGROUND; + if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK) + body->queue_id = HIF_QUEUE_ID_BESTEFFORT; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!body) + return -ENOMEM; + + if (!hif) + return -ENOMEM; + if (ps) { + body->enter_psm = 1; + /* Firmware does not support more than 128ms */ + body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255); + if (body->fast_psm_idle_period) + body->fast_psm = 1; + } + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, + const struct ieee80211_channel *channel) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif); + + WARN_ON(!conf->beacon_int); + if (!hif) + return -ENOMEM; + body->dtim_period = conf->dtim_period; + body->short_preamble = conf->use_short_preamble; + body->channel_number = channel->hw_value; + body->beacon_interval = cpu_to_le32(conf->beacon_int); + body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates)); + body->ssid_length = conf->ssid_len; + memcpy(body->ssid, conf->ssid, conf->ssid_len); + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + body->enable_beaconing = enable ? 1 : 0; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp) +{ + int ret; + struct wfx_hif_msg *hif; + struct wfx_hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif); + + if (!hif) + return -ENOMEM; + if (mac_addr) + ether_addr_copy(body->mac_addr, mac_addr); + body->mfpc = mfp ? 1 : 0; + body->unmap = unmap ? 1 : 0; + body->peer_sta_id = sta_id; + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body)); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} + +int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len) +{ + int ret; + struct wfx_hif_msg *hif; + int buf_len = sizeof(struct wfx_hif_req_update_ie) + ies_len; + struct wfx_hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif); + + if (!hif) + return -ENOMEM; + body->beacon = 1; + body->num_ies = cpu_to_le16(1); + memcpy(body->ie, ies, ies_len); + wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len); + ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); + kfree(hif); + return ret; +} diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.h b/drivers/net/wireless/silabs/wfx/hif_tx.h new file mode 100644 index 000000000000..71817a6571f0 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_tx.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Implementation of the host-to-chip commands (aka request/confirmation) of the + * hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (C) 2010, ST-Ericsson SA + */ +#ifndef WFX_HIF_TX_H +#define WFX_HIF_TX_H + +#include +#include +#include + +struct ieee80211_channel; +struct ieee80211_bss_conf; +struct ieee80211_tx_queue_params; +struct cfg80211_scan_request; +struct wfx_hif_req_add_key; +struct wfx_dev; +struct wfx_vif; + +struct wfx_hif_cmd { + struct mutex lock; + struct completion ready; + struct completion done; + struct wfx_hif_msg *buf_send; + void *buf_recv; + size_t len_recv; + int ret; +}; + +void wfx_init_hif_cmd(struct wfx_hif_cmd *wfx_hif_cmd); +int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, + void *reply, size_t reply_len, bool async); + +int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size); +int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size); +int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, + const struct ieee80211_channel *channel); +int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat); +int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, + struct ieee80211_channel *channel, const u8 *ssid, int ssidlen); +int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp); +int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg); +int wfx_hif_remove_key(struct wfx_dev *wdev, int idx); +int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout); +int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count); +int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue, + const struct ieee80211_tx_queue_params *arg); +int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable); +int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len); +int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211, + int chan_start, int chan_num); +int wfx_hif_stop_scan(struct wfx_vif *wvif); +int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len); +int wfx_hif_shutdown(struct wfx_dev *wdev); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hif_tx_mib.c b/drivers/net/wireless/silabs/wfx/hif_tx_mib.c new file mode 100644 index 000000000000..df1bcb1e2c02 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_tx_mib.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of the host-to-chip MIBs of the hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (C) 2010, ST-Ericsson SA + */ + +#include + +#include "wfx.h" +#include "hif_tx.h" +#include "hif_tx_mib.h" +#include "hif_api_mib.h" + +int wfx_hif_set_output_power(struct wfx_vif *wvif, int val) +{ + struct wfx_hif_mib_current_tx_power_level arg = { + .power_level = cpu_to_le32(val * 10), + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_CURRENT_TX_POWER_LEVEL, + &arg, sizeof(arg)); +} + +int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif, + unsigned int dtim_interval, unsigned int listen_interval) +{ + struct wfx_hif_mib_beacon_wake_up_period arg = { + .wakeup_period_min = dtim_interval, + .receive_dtim = 0, + .wakeup_period_max = listen_interval, + }; + + if (dtim_interval > 0xFF || listen_interval > 0xFFFF) + return -EINVAL; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_WAKEUP_PERIOD, + &arg, sizeof(arg)); +} + +int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst) +{ + struct wfx_hif_mib_rcpi_rssi_threshold arg = { + .rolling_average_count = 8, + .detection = 1, + }; + + if (!rssi_thold && !rssi_hyst) { + arg.upperthresh = 1; + arg.lowerthresh = 1; + } else { + arg.upper_threshold = rssi_thold + rssi_hyst; + arg.upper_threshold = (arg.upper_threshold + 110) * 2; + arg.lower_threshold = rssi_thold; + arg.lower_threshold = (arg.lower_threshold + 110) * 2; + } + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RCPI_RSSI_THRESHOLD, + &arg, sizeof(arg)); +} + +int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id, + struct wfx_hif_mib_extended_count_table *arg) +{ + if (wfx_api_older_than(wdev, 1, 3)) { + /* extended_count_table is wider than count_table */ + memset(arg, 0xFF, sizeof(*arg)); + return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_COUNTERS_TABLE, + arg, sizeof(struct wfx_hif_mib_count_table)); + } else { + return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, + arg, sizeof(struct wfx_hif_mib_extended_count_table)); + } +} + +int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac) +{ + struct wfx_hif_mib_mac_address arg = { }; + + if (mac) + ether_addr_copy(arg.mac_addr, mac); + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS, + &arg, sizeof(arg)); +} + +int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool filter_prbreq) +{ + struct wfx_hif_mib_rx_filter arg = { }; + + if (filter_bssid) + arg.bssid_filter = 1; + if (!filter_prbreq) + arg.fwd_probe_req = 1; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER, &arg, sizeof(arg)); +} + +int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len, + const struct wfx_hif_ie_table_entry *tbl) +{ + int ret; + struct wfx_hif_mib_bcn_filter_table *arg; + int buf_len = struct_size(arg, ie_table, tbl_len); + + arg = kzalloc(buf_len, GFP_KERNEL); + if (!arg) + return -ENOMEM; + arg->num_of_info_elmts = cpu_to_le32(tbl_len); + memcpy(arg->ie_table, tbl, flex_array_size(arg, ie_table, tbl_len)); + ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_TABLE, + arg, buf_len); + kfree(arg); + return ret; +} + +int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count) +{ + struct wfx_hif_mib_bcn_filter_enable arg = { + .enable = cpu_to_le32(enable), + .bcn_count = cpu_to_le32(beacon_count), + }; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_ENABLE, + &arg, sizeof(arg)); +} + +int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode) +{ + struct wfx_hif_mib_gl_operational_power_mode arg = { + .power_mode = mode, + .wup_ind_activation = 1, + }; + + return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE, + &arg, sizeof(arg)); +} + +int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb, + u8 frame_type, int init_rate) +{ + struct wfx_hif_mib_template_frame *arg; + + WARN(skb->len > HIF_API_MAX_TEMPLATE_FRAME_SIZE, "frame is too big"); + skb_push(skb, 4); + arg = (struct wfx_hif_mib_template_frame *)skb->data; + skb_pull(skb, 4); + arg->init_rate = init_rate; + arg->frame_type = frame_type; + arg->frame_length = cpu_to_le16(skb->len); + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME, + arg, sizeof(*arg) + skb->len); +} + +int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required) +{ + struct wfx_hif_mib_protected_mgmt_policy arg = { }; + + WARN(required && !capable, "incoherent arguments"); + if (capable) { + arg.pmf_enable = 1; + arg.host_enc_auth_frames = 1; + } + if (!required) + arg.unpmf_allowed = 1; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_PROTECTED_MGMT_POLICY, + &arg, sizeof(arg)); +} + +int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy) +{ + struct wfx_hif_mib_block_ack_policy arg = { + .block_ack_tx_tid_policy = tx_tid_policy, + .block_ack_rx_tid_policy = rx_tid_policy, + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY, + &arg, sizeof(arg)); +} + +int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density, + bool greenfield, bool short_preamble) +{ + struct wfx_hif_mib_set_association_mode arg = { + .preambtype_use = 1, + .mode = 1, + .spacing = 1, + .short_preamble = short_preamble, + .greenfield = greenfield, + .mpdu_start_spacing = ampdu_density, + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_ASSOCIATION_MODE, + &arg, sizeof(arg)); +} + +int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates) +{ + struct wfx_hif_mib_set_tx_rate_retry_policy *arg; + size_t size = struct_size(arg, tx_rate_retry_policy, 1); + int ret; + + arg = kzalloc(size, GFP_KERNEL); + if (!arg) + return -ENOMEM; + arg->num_tx_rate_policies = 1; + arg->tx_rate_retry_policy[0].policy_index = policy_index; + arg->tx_rate_retry_policy[0].short_retry_count = 255; + arg->tx_rate_retry_policy[0].long_retry_count = 255; + arg->tx_rate_retry_policy[0].first_rate_sel = 1; + arg->tx_rate_retry_policy[0].terminate = 1; + arg->tx_rate_retry_policy[0].count_init = 1; + memcpy(&arg->tx_rate_retry_policy[0].rates, rates, + sizeof(arg->tx_rate_retry_policy[0].rates)); + ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, + arg, size); + kfree(arg); + return ret; +} + +int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period) +{ + struct wfx_hif_mib_keep_alive_period arg = { + .keep_alive_period = cpu_to_le16(period), + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD, + &arg, sizeof(arg)); +}; + +int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr) +{ + struct wfx_hif_mib_arp_ip_addr_table arg = { + .condition_idx = idx, + .arp_enable = HIF_ARP_NS_FILTERING_DISABLE, + }; + + if (addr) { + /* Caution: type of addr is __be32 */ + memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address)); + arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE; + } + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE, + &arg, sizeof(arg)); +} + +int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable) +{ + struct wfx_hif_mib_gl_set_multi_msg arg = { + .enable_multi_tx_conf = enable, + }; + + return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG, &arg, sizeof(arg)); +} + +int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val) +{ + struct wfx_hif_mib_set_uapsd_information arg = { }; + + if (val & BIT(IEEE80211_AC_VO)) + arg.trig_voice = 1; + if (val & BIT(IEEE80211_AC_VI)) + arg.trig_video = 1; + if (val & BIT(IEEE80211_AC_BE)) + arg.trig_be = 1; + if (val & BIT(IEEE80211_AC_BK)) + arg.trig_bckgrnd = 1; + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_UAPSD_INFORMATION, + &arg, sizeof(arg)); +} + +int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable) +{ + struct wfx_hif_mib_non_erp_protection arg = { + .use_cts_to_self = enable, + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_NON_ERP_PROTECTION, + &arg, sizeof(arg)); +} + +int wfx_hif_slot_time(struct wfx_vif *wvif, int val) +{ + struct wfx_hif_mib_slot_time arg = { + .slot_time = cpu_to_le32(val), + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME, &arg, sizeof(arg)); +} + +int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val) +{ + struct wfx_hif_mib_wep_default_key_id arg = { + .wep_default_key_id = val, + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID, + &arg, sizeof(arg)); +} + +int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val) +{ + struct wfx_hif_mib_dot11_rts_threshold arg = { + .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF), + }; + + return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_RTS_THRESHOLD, + &arg, sizeof(arg)); +} diff --git a/drivers/net/wireless/silabs/wfx/hif_tx_mib.h b/drivers/net/wireless/silabs/wfx/hif_tx_mib.h new file mode 100644 index 000000000000..bcd4ef6a8497 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hif_tx_mib.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Implementation of the host-to-chip MIBs of the hardware API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (C) 2010, ST-Ericsson SA + */ +#ifndef WFX_HIF_TX_MIB_H +#define WFX_HIF_TX_MIB_H + +#include + +struct sk_buff; +struct wfx_vif; +struct wfx_dev; +struct wfx_hif_ie_table_entry; +struct wfx_hif_mib_extended_count_table; + +int wfx_hif_set_output_power(struct wfx_vif *wvif, int val); +int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif, + unsigned int dtim_interval, unsigned int listen_interval); +int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst); +int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id, + struct wfx_hif_mib_extended_count_table *arg); +int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac); +int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool fwd_probe_req); +int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len, + const struct wfx_hif_ie_table_entry *tbl); +int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count); +int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode); +int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb, + u8 frame_type, int init_rate); +int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required); +int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy); +int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density, + bool greenfield, bool short_preamble); +int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates); +int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period); +int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr); +int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable); +int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val); +int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable); +int wfx_hif_slot_time(struct wfx_vif *wvif, int val); +int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val); +int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/hwio.c b/drivers/net/wireless/silabs/wfx/hwio.c new file mode 100644 index 000000000000..3f9750b470be --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hwio.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Low-level I/O functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include +#include +#include + +#include "hwio.h" +#include "wfx.h" +#include "bus.h" +#include "traces.h" + +#define WFX_HIF_BUFFER_SIZE 0x2000 + +static int wfx_read32(struct wfx_dev *wdev, int reg, u32 *val) +{ + int ret; + __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); + + *val = ~0; /* Never return undefined value */ + if (!tmp) + return -ENOMEM; + ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp, sizeof(u32)); + if (ret >= 0) + *val = le32_to_cpu(*tmp); + kfree(tmp); + if (ret) + dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); + return ret; +} + +static int wfx_write32(struct wfx_dev *wdev, int reg, u32 val) +{ + int ret; + __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + *tmp = cpu_to_le32(val); + ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, tmp, sizeof(u32)); + kfree(tmp); + if (ret) + dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); + return ret; +} + +static int wfx_read32_locked(struct wfx_dev *wdev, int reg, u32 *val) +{ + int ret; + + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_read32(wdev, reg, val); + _trace_io_read32(reg, *val); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_write32_locked(struct wfx_dev *wdev, int reg, u32 val) +{ + int ret; + + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_write32(wdev, reg, val); + _trace_io_write32(reg, val); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_write32_bits_locked(struct wfx_dev *wdev, int reg, u32 mask, u32 val) +{ + int ret; + u32 val_r, val_w; + + WARN_ON(~mask & val); + val &= mask; + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_read32(wdev, reg, &val_r); + _trace_io_read32(reg, val_r); + if (ret < 0) + goto err; + val_w = (val_r & ~mask) | val; + if (val_w != val_r) { + ret = wfx_write32(wdev, reg, val_w); + _trace_io_write32(reg, val_w); + } +err: + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf, size_t len) +{ + int ret; + int i; + u32 cfg; + u32 prefetch; + + WARN_ON(len >= WFX_HIF_BUFFER_SIZE); + WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT); + + if (reg == WFX_REG_AHB_DPORT) + prefetch = CFG_PREFETCH_AHB; + else if (reg == WFX_REG_SRAM_DPORT) + prefetch = CFG_PREFETCH_SRAM; + else + return -ENODEV; + + ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr); + if (ret < 0) + goto err; + + ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg); + if (ret < 0) + goto err; + + ret = wfx_write32(wdev, WFX_REG_CONFIG, cfg | prefetch); + if (ret < 0) + goto err; + + for (i = 0; i < 20; i++) { + ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg); + if (ret < 0) + goto err; + if (!(cfg & prefetch)) + break; + usleep_range(200, 250); + } + if (i == 20) { + ret = -ETIMEDOUT; + goto err; + } + + ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, buf, len); + +err: + if (ret < 0) + memset(buf, 0xFF, len); /* Never return undefined value */ + return ret; +} + +static int wfx_indirect_write(struct wfx_dev *wdev, int reg, u32 addr, + const void *buf, size_t len) +{ + int ret; + + WARN_ON(len >= WFX_HIF_BUFFER_SIZE); + WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT); + ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr); + if (ret < 0) + return ret; + + return wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, buf, len); +} + +static int wfx_indirect_read_locked(struct wfx_dev *wdev, int reg, u32 addr, + void *buf, size_t len) +{ + int ret; + + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_indirect_read(wdev, reg, addr, buf, len); + _trace_io_ind_read(reg, addr, buf, len); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr, + const void *buf, size_t len) +{ + int ret; + + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_indirect_write(wdev, reg, addr, buf, len); + _trace_io_ind_write(reg, addr, buf, len); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + return ret; +} + +static int wfx_indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 *val) +{ + int ret; + __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_indirect_read(wdev, reg, addr, tmp, sizeof(u32)); + *val = le32_to_cpu(*tmp); + _trace_io_ind_read32(reg, addr, *val); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + kfree(tmp); + return ret; +} + +static int wfx_indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 val) +{ + int ret; + __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); + + if (!tmp) + return -ENOMEM; + *tmp = cpu_to_le32(val); + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wfx_indirect_write(wdev, reg, addr, tmp, sizeof(u32)); + _trace_io_ind_write32(reg, addr, val); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + kfree(tmp); + return ret; +} + +int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len) +{ + int ret; + + WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer"); + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len); + _trace_io_read(WFX_REG_IN_OUT_QUEUE, buf, len); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + if (ret) + dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); + return ret; +} + +int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len) +{ + int ret; + + WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer"); + wdev->hwbus_ops->lock(wdev->hwbus_priv); + ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len); + _trace_io_write(WFX_REG_IN_OUT_QUEUE, buf, len); + wdev->hwbus_ops->unlock(wdev->hwbus_priv); + if (ret) + dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); + return ret; +} + +int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len) +{ + return wfx_indirect_read_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len); +} + +int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len) +{ + return wfx_indirect_read_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len); +} + +int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len) +{ + return wfx_indirect_write_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len); +} + +int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len) +{ + return wfx_indirect_write_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len); +} + +int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val) +{ + return wfx_indirect_read32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val); +} + +int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val) +{ + return wfx_indirect_read32_locked(wdev, WFX_REG_AHB_DPORT, addr, val); +} + +int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val) +{ + return wfx_indirect_write32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val); +} + +int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val) +{ + return wfx_indirect_write32_locked(wdev, WFX_REG_AHB_DPORT, addr, val); +} + +int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val) +{ + return wfx_read32_locked(wdev, WFX_REG_CONFIG, val); +} + +int wfx_config_reg_write(struct wfx_dev *wdev, u32 val) +{ + return wfx_write32_locked(wdev, WFX_REG_CONFIG, val); +} + +int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val) +{ + return wfx_write32_bits_locked(wdev, WFX_REG_CONFIG, mask, val); +} + +int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val) +{ + return wfx_read32_locked(wdev, WFX_REG_CONTROL, val); +} + +int wfx_control_reg_write(struct wfx_dev *wdev, u32 val) +{ + return wfx_write32_locked(wdev, WFX_REG_CONTROL, val); +} + +int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val) +{ + return wfx_write32_bits_locked(wdev, WFX_REG_CONTROL, mask, val); +} + +int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val) +{ + int ret; + + *val = ~0; /* Never return undefined value */ + ret = wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24); + if (ret) + return ret; + ret = wfx_read32_locked(wdev, WFX_REG_SET_GEN_R_W, val); + if (ret) + return ret; + *val &= IGPR_VALUE; + return ret; +} + +int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val) +{ + return wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, index << 24 | val); +} diff --git a/drivers/net/wireless/silabs/wfx/hwio.h b/drivers/net/wireless/silabs/wfx/hwio.h new file mode 100644 index 000000000000..c6e7b065b7ff --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/hwio.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Low-level I/O functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_HWIO_H +#define WFX_HWIO_H + +#include + +struct wfx_dev; + +/* Caution: in the functions below, 'buf' will used with a DMA. So, it must be kmalloc'd (do not use + * stack allocated buffers). In doubt, enable CONFIG_DEBUG_SG to detect badly located buffer. + */ +int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len); +int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t buf_len); + +int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len); +int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len); + +int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len); +int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len); + +int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val); +int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val); + +int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val); +int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val); + +#define CFG_ERR_SPI_FRAME 0x00000001 /* only with SPI */ +#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 /* only with SDIO */ +#define CFG_ERR_BUF_UNDERRUN 0x00000002 +#define CFG_ERR_DATA_IN_TOO_LARGE 0x00000004 +#define CFG_ERR_HOST_NO_OUT_QUEUE 0x00000008 +#define CFG_ERR_BUF_OVERRUN 0x00000010 +#define CFG_ERR_DATA_OUT_TOO_LARGE 0x00000020 +#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040 +#define CFG_ERR_HOST_CRC_MISS 0x00000080 /* only with SDIO */ +#define CFG_SPI_IGNORE_CS 0x00000080 /* only with SPI */ +#define CFG_BYTE_ORDER_MASK 0x00000300 /* only writable with SPI */ +#define CFG_BYTE_ORDER_BADC 0x00000000 +#define CFG_BYTE_ORDER_DCBA 0x00000100 +#define CFG_BYTE_ORDER_ABCD 0x00000200 /* SDIO always use this value */ +#define CFG_DIRECT_ACCESS_MODE 0x00000400 +#define CFG_PREFETCH_AHB 0x00000800 +#define CFG_DISABLE_CPU_CLK 0x00001000 +#define CFG_PREFETCH_SRAM 0x00002000 +#define CFG_CPU_RESET 0x00004000 +#define CFG_SDIO_DISABLE_IRQ 0x00008000 /* only with SDIO */ +#define CFG_IRQ_ENABLE_DATA 0x00010000 +#define CFG_IRQ_ENABLE_WRDY 0x00020000 +#define CFG_CLK_RISE_EDGE 0x00040000 +#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 /* only with SDIO */ +#define CFG_RESERVED 0x00F00000 +#define CFG_DEVICE_ID_MAJOR 0x07000000 +#define CFG_DEVICE_ID_RESERVED 0x78000000 +#define CFG_DEVICE_ID_TYPE 0x80000000 +int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val); +int wfx_config_reg_write(struct wfx_dev *wdev, u32 val); +int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val); + +#define CTRL_NEXT_LEN_MASK 0x00000FFF +#define CTRL_WLAN_WAKEUP 0x00001000 +#define CTRL_WLAN_READY 0x00002000 +int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val); +int wfx_control_reg_write(struct wfx_dev *wdev, u32 val); +int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val); + +#define IGPR_RW 0x80000000 +#define IGPR_INDEX 0x7F000000 +#define IGPR_VALUE 0x00FFFFFF +int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val); +int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/key.c b/drivers/net/wireless/silabs/wfx/key.c new file mode 100644 index 000000000000..8f23e8d42bd4 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/key.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Key management related functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "key.h" +#include "wfx.h" +#include "hif_tx_mib.h" + +static int wfx_alloc_key(struct wfx_dev *wdev) +{ + int idx; + + idx = ffs(~wdev->key_map) - 1; + if (idx < 0 || idx >= MAX_KEY_ENTRIES) + return -1; + + wdev->key_map |= BIT(idx); + return idx; +} + +static void wfx_free_key(struct wfx_dev *wdev, int idx) +{ + WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation"); + wdev->key_map &= ~BIT(idx); +} + +static u8 fill_wep_pair(struct wfx_hif_wep_pairwise_key *msg, + struct ieee80211_key_conf *key, u8 *peer_addr) +{ + WARN(key->keylen > sizeof(msg->key_data), "inconsistent data"); + msg->key_length = key->keylen; + memcpy(msg->key_data, key->key, key->keylen); + ether_addr_copy(msg->peer_address, peer_addr); + return HIF_KEY_TYPE_WEP_PAIRWISE; +} + +static u8 fill_wep_group(struct wfx_hif_wep_group_key *msg, + struct ieee80211_key_conf *key) +{ + WARN(key->keylen > sizeof(msg->key_data), "inconsistent data"); + msg->key_id = key->keyidx; + msg->key_length = key->keylen; + memcpy(msg->key_data, key->key, key->keylen); + return HIF_KEY_TYPE_WEP_DEFAULT; +} + +static u8 fill_tkip_pair(struct wfx_hif_tkip_pairwise_key *msg, + struct ieee80211_key_conf *key, u8 *peer_addr) +{ + u8 *keybuf = key->key; + + WARN(key->keylen != sizeof(msg->tkip_key_data) + sizeof(msg->tx_mic_key) + + sizeof(msg->rx_mic_key), "inconsistent data"); + memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data)); + keybuf += sizeof(msg->tkip_key_data); + memcpy(msg->tx_mic_key, keybuf, sizeof(msg->tx_mic_key)); + keybuf += sizeof(msg->tx_mic_key); + memcpy(msg->rx_mic_key, keybuf, sizeof(msg->rx_mic_key)); + ether_addr_copy(msg->peer_address, peer_addr); + return HIF_KEY_TYPE_TKIP_PAIRWISE; +} + +static u8 fill_tkip_group(struct wfx_hif_tkip_group_key *msg, struct ieee80211_key_conf *key, + struct ieee80211_key_seq *seq, enum nl80211_iftype iftype) +{ + u8 *keybuf = key->key; + + WARN(key->keylen != sizeof(msg->tkip_key_data) + 2 * sizeof(msg->rx_mic_key), + "inconsistent data"); + msg->key_id = key->keyidx; + memcpy(msg->rx_sequence_counter, &seq->tkip.iv16, sizeof(seq->tkip.iv16)); + memcpy(msg->rx_sequence_counter + sizeof(u16), &seq->tkip.iv32, sizeof(seq->tkip.iv32)); + memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data)); + keybuf += sizeof(msg->tkip_key_data); + if (iftype == NL80211_IFTYPE_AP) + /* Use Tx MIC Key */ + memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key)); + else + /* Use Rx MIC Key */ + memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key)); + return HIF_KEY_TYPE_TKIP_GROUP; +} + +static u8 fill_ccmp_pair(struct wfx_hif_aes_pairwise_key *msg, + struct ieee80211_key_conf *key, u8 *peer_addr) +{ + WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data"); + ether_addr_copy(msg->peer_address, peer_addr); + memcpy(msg->aes_key_data, key->key, key->keylen); + return HIF_KEY_TYPE_AES_PAIRWISE; +} + +static u8 fill_ccmp_group(struct wfx_hif_aes_group_key *msg, + struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) +{ + WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data"); + memcpy(msg->aes_key_data, key->key, key->keylen); + memcpy(msg->rx_sequence_counter, seq->ccmp.pn, sizeof(seq->ccmp.pn)); + memreverse(msg->rx_sequence_counter, sizeof(seq->ccmp.pn)); + msg->key_id = key->keyidx; + return HIF_KEY_TYPE_AES_GROUP; +} + +static u8 fill_sms4_pair(struct wfx_hif_wapi_pairwise_key *msg, + struct ieee80211_key_conf *key, u8 *peer_addr) +{ + u8 *keybuf = key->key; + + WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data), + "inconsistent data"); + ether_addr_copy(msg->peer_address, peer_addr); + memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data)); + keybuf += sizeof(msg->wapi_key_data); + memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data)); + msg->key_id = key->keyidx; + return HIF_KEY_TYPE_WAPI_PAIRWISE; +} + +static u8 fill_sms4_group(struct wfx_hif_wapi_group_key *msg, + struct ieee80211_key_conf *key) +{ + u8 *keybuf = key->key; + + WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data), + "inconsistent data"); + memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data)); + keybuf += sizeof(msg->wapi_key_data); + memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data)); + msg->key_id = key->keyidx; + return HIF_KEY_TYPE_WAPI_GROUP; +} + +static u8 fill_aes_cmac_group(struct wfx_hif_igtk_group_key *msg, + struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) +{ + WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data"); + memcpy(msg->igtk_key_data, key->key, key->keylen); + memcpy(msg->ipn, seq->aes_cmac.pn, sizeof(seq->aes_cmac.pn)); + memreverse(msg->ipn, sizeof(seq->aes_cmac.pn)); + msg->key_id = key->keyidx; + return HIF_KEY_TYPE_IGTK_GROUP; +} + +static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + int ret; + struct wfx_hif_req_add_key k = { }; + struct ieee80211_key_seq seq; + struct wfx_dev *wdev = wvif->wdev; + int idx = wfx_alloc_key(wvif->wdev); + bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE; + + WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data"); + ieee80211_get_key_rx_seq(key, 0, &seq); + if (idx < 0) + return -EINVAL; + k.int_id = wvif->id; + k.entry_index = idx; + if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) { + if (pairwise) + k.type = fill_wep_pair(&k.key.wep_pairwise_key, key, sta->addr); + else + k.type = fill_wep_group(&k.key.wep_group_key, key); + } else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { + if (pairwise) + k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key, sta->addr); + else + k.type = fill_tkip_group(&k.key.tkip_group_key, key, &seq, + wvif->vif->type); + } else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) { + if (pairwise) + k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key, sta->addr); + else + k.type = fill_ccmp_group(&k.key.aes_group_key, key, &seq); + } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) { + if (pairwise) + k.type = fill_sms4_pair(&k.key.wapi_pairwise_key, key, sta->addr); + else + k.type = fill_sms4_group(&k.key.wapi_group_key, key); + } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key, &seq); + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + } else { + dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher); + wfx_free_key(wdev, idx); + return -EOPNOTSUPP; + } + ret = wfx_hif_add_key(wdev, &k); + if (ret) { + wfx_free_key(wdev, idx); + return -EOPNOTSUPP; + } + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM; + key->hw_key_idx = idx; + return 0; +} + +static int wfx_remove_key(struct wfx_vif *wvif, struct ieee80211_key_conf *key) +{ + WARN(key->hw_key_idx >= MAX_KEY_ENTRIES, "corrupted hw_key_idx"); + wfx_free_key(wvif->wdev, key->hw_key_idx); + return wfx_hif_remove_key(wvif->wdev, key->hw_key_idx); +} + +int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct ieee80211_key_conf *key) +{ + int ret = -EOPNOTSUPP; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + mutex_lock(&wvif->wdev->conf_mutex); + if (cmd == SET_KEY) + ret = wfx_add_key(wvif, sta, key); + if (cmd == DISABLE_KEY) + ret = wfx_remove_key(wvif, key); + mutex_unlock(&wvif->wdev->conf_mutex); + return ret; +} + diff --git a/drivers/net/wireless/silabs/wfx/key.h b/drivers/net/wireless/silabs/wfx/key.h new file mode 100644 index 000000000000..2234e36dbbcd --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/key.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Key management related functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_KEY_H +#define WFX_KEY_H + +#include + +struct wfx_dev; +struct wfx_vif; + +int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct ieee80211_key_conf *key); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c new file mode 100644 index 000000000000..b93b16b900c8 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Device probe and register. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (c) 2008, Johannes Berg + * Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). + * Copyright (c) 2007-2009, Christian Lamparter + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2004-2006 Jean-Baptiste Note , et al. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" +#include "wfx.h" +#include "fwio.h" +#include "hwio.h" +#include "bus.h" +#include "bh.h" +#include "sta.h" +#include "key.h" +#include "scan.h" +#include "debug.h" +#include "data_tx.h" +#include "hif_tx_mib.h" +#include "hif_api_cmd.h" + +#define WFX_PDS_TLV_TYPE 0x4450 // "PD" (Platform Data) in ascii little-endian +#define WFX_PDS_MAX_CHUNK_SIZE 1500 + +MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WF200"); +MODULE_AUTHOR("Jérôme Pouiller "); +MODULE_LICENSE("GPL"); + +#define RATETAB_ENT(_rate, _rateid, _flags) { \ + .bitrate = (_rate), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ +} + +static struct ieee80211_rate wfx_rates[] = { + RATETAB_ENT(10, 0, 0), + RATETAB_ENT(20, 1, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(55, 2, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(110, 3, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(60, 6, 0), + RATETAB_ENT(90, 7, 0), + RATETAB_ENT(120, 8, 0), + RATETAB_ENT(180, 9, 0), + RATETAB_ENT(240, 10, 0), + RATETAB_ENT(360, 11, 0), + RATETAB_ENT(480, 12, 0), + RATETAB_ENT(540, 13, 0), +}; + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_channel wfx_2ghz_chantable[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static const struct ieee80211_supported_band wfx_band_2ghz = { + .channels = wfx_2ghz_chantable, + .n_channels = ARRAY_SIZE(wfx_2ghz_chantable), + .bitrates = wfx_rates, + .n_bitrates = ARRAY_SIZE(wfx_rates), + .ht_cap = { + /* Receive caps */ + .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_MAX_AMSDU | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), + .ht_supported = 1, + .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, + .mcs = { + .rx_mask = { 0xFF }, /* MCS0 to MCS7 */ + .rx_highest = cpu_to_le16(72), + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static const struct ieee80211_iface_limit wdev_iface_limits[] = { + { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) }, + { .max = 1, .types = BIT(NL80211_IFTYPE_AP) }, +}; + +static const struct ieee80211_iface_combination wfx_iface_combinations[] = { + { + .num_different_channels = 2, + .max_interfaces = 2, + .limits = wdev_iface_limits, + .n_limits = ARRAY_SIZE(wdev_iface_limits), + } +}; + +static const struct ieee80211_ops wfx_ops = { + .start = wfx_start, + .stop = wfx_stop, + .add_interface = wfx_add_interface, + .remove_interface = wfx_remove_interface, + .config = wfx_config, + .tx = wfx_tx, + .join_ibss = wfx_join_ibss, + .leave_ibss = wfx_leave_ibss, + .conf_tx = wfx_conf_tx, + .hw_scan = wfx_hw_scan, + .cancel_hw_scan = wfx_cancel_hw_scan, + .start_ap = wfx_start_ap, + .stop_ap = wfx_stop_ap, + .sta_add = wfx_sta_add, + .sta_remove = wfx_sta_remove, + .set_tim = wfx_set_tim, + .set_key = wfx_set_key, + .set_rts_threshold = wfx_set_rts_threshold, + .set_default_unicast_key = wfx_set_default_unicast_key, + .bss_info_changed = wfx_bss_info_changed, + .configure_filter = wfx_configure_filter, + .ampdu_action = wfx_ampdu_action, + .flush = wfx_flush, + .add_chanctx = wfx_add_chanctx, + .remove_chanctx = wfx_remove_chanctx, + .change_chanctx = wfx_change_chanctx, + .assign_vif_chanctx = wfx_assign_vif_chanctx, + .unassign_vif_chanctx = wfx_unassign_vif_chanctx, +}; + +bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor) +{ + if (wdev->hw_caps.api_version_major < major) + return true; + if (wdev->hw_caps.api_version_major > major) + return false; + if (wdev->hw_caps.api_version_minor < minor) + return true; + return false; +} + +/* The device needs data about the antenna configuration. This information in provided by PDS + * (Platform Data Set, this is the wording used in WF200 documentation) files. For hardware + * integrators, the full process to create PDS files is described here: + * https://github.com/SiliconLabs/wfx-firmware/blob/master/PDS/README.md + * + * The PDS file is an array of Time-Length-Value structs. + */ + int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len) +{ + int ret, chunk_type, chunk_len, chunk_num = 0; + + if (*buf == '{') { + dev_err(wdev->dev, "PDS: malformed file (legacy format?)\n"); + return -EINVAL; + } + while (len > 0) { + chunk_type = get_unaligned_le16(buf + 0); + chunk_len = get_unaligned_le16(buf + 2); + if (chunk_len > len) { + dev_err(wdev->dev, "PDS:%d: corrupted file\n", chunk_num); + return -EINVAL; + } + if (chunk_type != WFX_PDS_TLV_TYPE) { + dev_info(wdev->dev, "PDS:%d: skip unknown data\n", chunk_num); + goto next; + } + if (chunk_len > WFX_PDS_MAX_CHUNK_SIZE) + dev_warn(wdev->dev, "PDS:%d: unexpectedly large chunk\n", chunk_num); + if (buf[4] != '{' || buf[chunk_len - 1] != '}') + dev_warn(wdev->dev, "PDS:%d: unexpected content\n", chunk_num); + + ret = wfx_hif_configuration(wdev, buf + 4, chunk_len - 4); + if (ret > 0) { + dev_err(wdev->dev, "PDS:%d: invalid data (unsupported options?)\n", chunk_num); + return -EINVAL; + } + if (ret == -ETIMEDOUT) { + dev_err(wdev->dev, "PDS:%d: chip didn't reply (corrupted file?)\n", chunk_num); + return ret; + } + if (ret) { + dev_err(wdev->dev, "PDS:%d: chip returned an unknown error\n", chunk_num); + return -EIO; + } +next: + chunk_num++; + len -= chunk_len; + buf += chunk_len; + } + return 0; +} + +static int wfx_send_pdata_pds(struct wfx_dev *wdev) +{ + int ret = 0; + const struct firmware *pds; + u8 *tmp_buf; + + ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev); + if (ret) { + dev_err(wdev->dev, "can't load antenna parameters (PDS file %s). The device may be unstable.\n", + wdev->pdata.file_pds); + return ret; + } + tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL); + if (!tmp_buf) { + ret = -ENOMEM; + goto release_fw; + } + ret = wfx_send_pds(wdev, tmp_buf, pds->size); + kfree(tmp_buf); +release_fw: + release_firmware(pds); + return ret; +} + +static void wfx_free_common(void *data) +{ + struct wfx_dev *wdev = data; + + mutex_destroy(&wdev->tx_power_loop_info_lock); + mutex_destroy(&wdev->rx_stats_lock); + mutex_destroy(&wdev->conf_mutex); + ieee80211_free_hw(wdev->hw); +} + +struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata, + const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv) +{ + struct ieee80211_hw *hw; + struct wfx_dev *wdev; + + hw = ieee80211_alloc_hw(sizeof(struct wfx_dev), &wfx_ops); + if (!hw) + return NULL; + + SET_IEEE80211_DEV(hw, dev); + + ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, CONNECTION_MONITOR); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, MFP_CAPABLE); + + hw->vif_data_size = sizeof(struct wfx_vif); + hw->sta_data_size = sizeof(struct wfx_sta_priv); + hw->queues = 4; + hw->max_rates = 8; + hw->max_rate_tries = 8; + hw->extra_tx_headroom = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + + 4 /* alignment */ + 8 /* TKIP IV */; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP); + hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P | + NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U; + hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; + hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; + hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX; + hw->wiphy->max_scan_ssids = 2; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations); + hw->wiphy->iface_combinations = wfx_iface_combinations; + hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL); + if (!hw->wiphy->bands[NL80211_BAND_2GHZ]) + goto err; + + /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */ + memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz, sizeof(wfx_band_2ghz)); + + wdev = hw->priv; + wdev->hw = hw; + wdev->dev = dev; + wdev->hwbus_ops = hwbus_ops; + wdev->hwbus_priv = hwbus_priv; + memcpy(&wdev->pdata, pdata, sizeof(*pdata)); + of_property_read_string(dev->of_node, "silabs,antenna-config-file", &wdev->pdata.file_pds); + wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup", GPIOD_OUT_LOW); + if (IS_ERR(wdev->pdata.gpio_wakeup)) + goto err; + + if (wdev->pdata.gpio_wakeup) + gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup"); + + mutex_init(&wdev->conf_mutex); + mutex_init(&wdev->rx_stats_lock); + mutex_init(&wdev->tx_power_loop_info_lock); + init_completion(&wdev->firmware_ready); + INIT_DELAYED_WORK(&wdev->cooling_timeout_work, wfx_cooling_timeout_work); + skb_queue_head_init(&wdev->tx_pending); + init_waitqueue_head(&wdev->tx_dequeue); + wfx_init_hif_cmd(&wdev->hif_cmd); + + if (devm_add_action_or_reset(dev, wfx_free_common, wdev)) + return NULL; + + return wdev; + +err: + ieee80211_free_hw(hw); + return NULL; +} + +int wfx_probe(struct wfx_dev *wdev) +{ + int i; + int err; + struct gpio_desc *gpio_saved; + + /* During first part of boot, gpio_wakeup cannot yet been used. So prevent bh() to touch + * it. + */ + gpio_saved = wdev->pdata.gpio_wakeup; + wdev->pdata.gpio_wakeup = NULL; + wdev->poll_irq = true; + + wfx_bh_register(wdev); + + err = wfx_init_device(wdev); + if (err) + goto bh_unregister; + + wfx_bh_poll_irq(wdev); + err = wait_for_completion_timeout(&wdev->firmware_ready, 1 * HZ); + if (err <= 0) { + if (err == 0) { + dev_err(wdev->dev, "timeout while waiting for startup indication\n"); + err = -ETIMEDOUT; + } else if (err == -ERESTARTSYS) { + dev_info(wdev->dev, "probe interrupted by user\n"); + } + goto bh_unregister; + } + + /* FIXME: fill wiphy::hw_version */ + dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n", + wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor, + wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label, + wdev->hw_caps.api_version_major, wdev->hw_caps.api_version_minor, + wdev->keyset, wdev->hw_caps.link_mode); + snprintf(wdev->hw->wiphy->fw_version, + sizeof(wdev->hw->wiphy->fw_version), + "%d.%d.%d", + wdev->hw_caps.firmware_major, + wdev->hw_caps.firmware_minor, + wdev->hw_caps.firmware_build); + + if (wfx_api_older_than(wdev, 1, 0)) { + dev_err(wdev->dev, "unsupported firmware API version (expect 1 while firmware returns %d)\n", + wdev->hw_caps.api_version_major); + err = -EOPNOTSUPP; + goto bh_unregister; + } + + if (wdev->hw_caps.link_mode == SEC_LINK_ENFORCED) { + dev_err(wdev->dev, "chip require secure_link, but can't negotiate it\n"); + goto bh_unregister; + } + + if (wdev->hw_caps.region_sel_mode) { + wdev->hw->wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS; + wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |= + IEEE80211_CHAN_NO_IR; + wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |= + IEEE80211_CHAN_NO_IR; + wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |= + IEEE80211_CHAN_DISABLED; + } + + dev_dbg(wdev->dev, "sending configuration file %s\n", wdev->pdata.file_pds); + err = wfx_send_pdata_pds(wdev); + if (err < 0 && err != -ENOENT) + goto bh_unregister; + + wdev->poll_irq = false; + err = wdev->hwbus_ops->irq_subscribe(wdev->hwbus_priv); + if (err) + goto bh_unregister; + + err = wfx_hif_use_multi_tx_conf(wdev, true); + if (err) + dev_err(wdev->dev, "misconfigured IRQ?\n"); + + wdev->pdata.gpio_wakeup = gpio_saved; + if (wdev->pdata.gpio_wakeup) { + dev_dbg(wdev->dev, "enable 'quiescent' power mode with wakeup GPIO and PDS file %s\n", + wdev->pdata.file_pds); + gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); + wfx_control_reg_write(wdev, 0); + wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT); + } else { + wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE); + } + + for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) { + eth_zero_addr(wdev->addresses[i].addr); + err = of_get_mac_address(wdev->dev->of_node, wdev->addresses[i].addr); + if (!err) + wdev->addresses[i].addr[ETH_ALEN - 1] += i; + else + ether_addr_copy(wdev->addresses[i].addr, wdev->hw_caps.mac_addr[i]); + if (!is_valid_ether_addr(wdev->addresses[i].addr)) { + dev_warn(wdev->dev, "using random MAC address\n"); + eth_random_addr(wdev->addresses[i].addr); + } + dev_info(wdev->dev, "MAC address %d: %pM\n", i, wdev->addresses[i].addr); + } + wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses); + wdev->hw->wiphy->addresses = wdev->addresses; + + if (!wfx_api_older_than(wdev, 3, 8)) + wdev->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + + err = ieee80211_register_hw(wdev->hw); + if (err) + goto irq_unsubscribe; + + err = wfx_debug_init(wdev); + if (err) + goto ieee80211_unregister; + + return 0; + +ieee80211_unregister: + ieee80211_unregister_hw(wdev->hw); +irq_unsubscribe: + wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv); +bh_unregister: + wfx_bh_unregister(wdev); + return err; +} + +void wfx_release(struct wfx_dev *wdev) +{ + ieee80211_unregister_hw(wdev->hw); + wfx_hif_shutdown(wdev); + wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv); + wfx_bh_unregister(wdev); +} + +static int __init wfx_core_init(void) +{ + int ret = 0; + + if (IS_ENABLED(CONFIG_SPI)) + ret = spi_register_driver(&wfx_spi_driver); + if (IS_ENABLED(CONFIG_MMC) && !ret) + ret = sdio_register_driver(&wfx_sdio_driver); + return ret; +} +module_init(wfx_core_init); + +static void __exit wfx_core_exit(void) +{ + if (IS_ENABLED(CONFIG_MMC)) + sdio_unregister_driver(&wfx_sdio_driver); + if (IS_ENABLED(CONFIG_SPI)) + spi_unregister_driver(&wfx_spi_driver); +} +module_exit(wfx_core_exit); diff --git a/drivers/net/wireless/silabs/wfx/main.h b/drivers/net/wireless/silabs/wfx/main.h new file mode 100644 index 000000000000..68c665307153 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/main.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Device probe and register. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (c) 2006, Michael Wu + * Copyright 2004-2006 Jean-Baptiste Note , et al. + */ +#ifndef WFX_MAIN_H +#define WFX_MAIN_H + +#include +#include + +#include "hif_api_general.h" + +struct wfx_dev; +struct wfx_hwbus_ops; + +struct wfx_platform_data { + /* Keyset and ".sec" extension will be appended to this string */ + const char *file_fw; + const char *file_pds; + struct gpio_desc *gpio_wakeup; + /* if true HIF D_out is sampled on the rising edge of the clock (intended to be used in + * 50Mhz SDIO) + */ + bool use_rising_clk; +}; + +struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata, + const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv); + +int wfx_probe(struct wfx_dev *wdev); +void wfx_release(struct wfx_dev *wdev); + +bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor); +int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/queue.c b/drivers/net/wireless/silabs/wfx/queue.c new file mode 100644 index 000000000000..729825230db2 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/queue.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Queue between the tx operation and the bh workqueue. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "queue.h" +#include "wfx.h" +#include "sta.h" +#include "data_tx.h" +#include "traces.h" + +void wfx_tx_lock(struct wfx_dev *wdev) +{ + atomic_inc(&wdev->tx_lock); +} + +void wfx_tx_unlock(struct wfx_dev *wdev) +{ + int tx_lock = atomic_dec_return(&wdev->tx_lock); + + WARN(tx_lock < 0, "inconsistent tx_lock value"); + if (!tx_lock) + wfx_bh_request_tx(wdev); +} + +void wfx_tx_flush(struct wfx_dev *wdev) +{ + int ret; + + /* Do not wait for any reply if chip is frozen */ + if (wdev->chip_frozen) + return; + + wfx_tx_lock(wdev); + mutex_lock(&wdev->hif_cmd.lock); + ret = wait_event_timeout(wdev->hif.tx_buffers_empty, !wdev->hif.tx_buffers_used, + msecs_to_jiffies(3000)); + if (!ret) { + dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n", + wdev->hif.tx_buffers_used); + wfx_pending_dump_old_frames(wdev, 3000); + /* FIXME: drop pending frames here */ + wdev->chip_frozen = true; + } + mutex_unlock(&wdev->hif_cmd.lock); + wfx_tx_unlock(wdev); +} + +void wfx_tx_lock_flush(struct wfx_dev *wdev) +{ + wfx_tx_lock(wdev); + wfx_tx_flush(wdev); +} + +void wfx_tx_queues_init(struct wfx_vif *wvif) +{ + /* The device is in charge to respect the details of the QoS parameters. The driver just + * ensure that it roughtly respect the priorities to avoid any shortage. + */ + const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 }; + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; ++i) { + skb_queue_head_init(&wvif->tx_queue[i].normal); + skb_queue_head_init(&wvif->tx_queue[i].cab); + wvif->tx_queue[i].priority = priorities[i]; + } +} + +bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue) +{ + return skb_queue_empty_lockless(&queue->normal) && skb_queue_empty_lockless(&queue->cab); +} + +void wfx_tx_queues_check_empty(struct wfx_vif *wvif) +{ + int i; + + for (i = 0; i < IEEE80211_NUM_ACS; ++i) { + WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames)); + WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i])); + } +} + +static void __wfx_tx_queue_drop(struct wfx_vif *wvif, + struct sk_buff_head *skb_queue, struct sk_buff_head *dropped) +{ + struct sk_buff *skb, *tmp; + + spin_lock_bh(&skb_queue->lock); + skb_queue_walk_safe(skb_queue, skb, tmp) { + __skb_unlink(skb, skb_queue); + skb_queue_head(dropped, skb); + } + spin_unlock_bh(&skb_queue->lock); +} + +void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, + struct sk_buff_head *dropped) +{ + __wfx_tx_queue_drop(wvif, &queue->cab, dropped); + __wfx_tx_queue_drop(wvif, &queue->normal, dropped); + wake_up(&wvif->wdev->tx_dequeue); +} + +void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb) +{ + struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + skb_queue_tail(&queue->cab, skb); + else + skb_queue_tail(&queue->normal, skb); +} + +void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped) +{ + struct wfx_queue *queue; + struct wfx_vif *wvif; + struct wfx_hif_msg *hif; + struct sk_buff *skb; + + WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__); + while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) { + hif = (struct wfx_hif_msg *)skb->data; + wvif = wdev_to_wvif(wdev, hif->interface); + if (wvif) { + queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; + WARN_ON(skb_get_queue_mapping(skb) > 3); + WARN_ON(!atomic_read(&queue->pending_frames)); + atomic_dec(&queue->pending_frames); + } + skb_queue_head(dropped, skb); + } +} + +struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id) +{ + struct wfx_queue *queue; + struct wfx_hif_req_tx *req; + struct wfx_vif *wvif; + struct wfx_hif_msg *hif; + struct sk_buff *skb; + + spin_lock_bh(&wdev->tx_pending.lock); + skb_queue_walk(&wdev->tx_pending, skb) { + hif = (struct wfx_hif_msg *)skb->data; + req = (struct wfx_hif_req_tx *)hif->body; + if (req->packet_id != packet_id) + continue; + spin_unlock_bh(&wdev->tx_pending.lock); + wvif = wdev_to_wvif(wdev, hif->interface); + if (wvif) { + queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; + WARN_ON(skb_get_queue_mapping(skb) > 3); + WARN_ON(!atomic_read(&queue->pending_frames)); + atomic_dec(&queue->pending_frames); + } + skb_unlink(skb, &wdev->tx_pending); + return skb; + } + spin_unlock_bh(&wdev->tx_pending.lock); + WARN(1, "cannot find packet in pending queue"); + return NULL; +} + +void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms) +{ + ktime_t now = ktime_get(); + struct wfx_tx_priv *tx_priv; + struct wfx_hif_req_tx *req; + struct sk_buff *skb; + bool first = true; + + spin_lock_bh(&wdev->tx_pending.lock); + skb_queue_walk(&wdev->tx_pending, skb) { + tx_priv = wfx_skb_tx_priv(skb); + req = wfx_skb_txreq(skb); + if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp, limit_ms))) { + if (first) { + dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n", + limit_ms); + first = false; + } + dev_info(wdev->dev, " id %08x sent %lldms ago\n", + req->packet_id, ktime_ms_delta(now, tx_priv->xmit_timestamp)); + } + } + spin_unlock_bh(&wdev->tx_pending.lock); +} + +unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb) +{ + ktime_t now = ktime_get(); + struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb); + + return ktime_us_delta(now, tx_priv->xmit_timestamp); +} + +bool wfx_tx_queues_has_cab(struct wfx_vif *wvif) +{ + int i; + + if (wvif->vif->type != NL80211_IFTYPE_AP) + return false; + for (i = 0; i < IEEE80211_NUM_ACS; ++i) + /* Note: since only AP can have mcast frames in queue and only one vif can be AP, + * all queued frames has same interface id + */ + if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab)) + return true; + return false; +} + +static int wfx_tx_queue_get_weight(struct wfx_queue *queue) +{ + return atomic_read(&queue->pending_frames) * queue->priority; +} + +static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev) +{ + struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; + int i, j, num_queues = 0; + struct wfx_vif *wvif; + struct wfx_hif_msg *hif; + struct sk_buff *skb; + + /* sort the queues */ + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + WARN_ON(num_queues >= ARRAY_SIZE(queues)); + queues[num_queues] = &wvif->tx_queue[i]; + for (j = num_queues; j > 0; j--) + if (wfx_tx_queue_get_weight(queues[j]) < + wfx_tx_queue_get_weight(queues[j - 1])) + swap(queues[j - 1], queues[j]); + num_queues++; + } + } + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + if (!wvif->after_dtim_tx_allowed) + continue; + for (i = 0; i < num_queues; i++) { + skb = skb_dequeue(&queues[i]->cab); + if (!skb) + continue; + /* Note: since only AP can have mcast frames in queue and only one vif can + * be AP, all queued frames has same interface id + */ + hif = (struct wfx_hif_msg *)skb->data; + WARN_ON(hif->interface != wvif->id); + WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]); + atomic_inc(&queues[i]->pending_frames); + trace_queues_stats(wdev, queues[i]); + return skb; + } + /* No more multicast to sent */ + wvif->after_dtim_tx_allowed = false; + schedule_work(&wvif->update_tim_work); + } + + for (i = 0; i < num_queues; i++) { + skb = skb_dequeue(&queues[i]->normal); + if (skb) { + atomic_inc(&queues[i]->pending_frames); + trace_queues_stats(wdev, queues[i]); + return skb; + } + } + return NULL; +} + +struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev) +{ + struct wfx_tx_priv *tx_priv; + struct sk_buff *skb; + + if (atomic_read(&wdev->tx_lock)) + return NULL; + skb = wfx_tx_queues_get_skb(wdev); + if (!skb) + return NULL; + skb_queue_tail(&wdev->tx_pending, skb); + wake_up(&wdev->tx_dequeue); + tx_priv = wfx_skb_tx_priv(skb); + tx_priv->xmit_timestamp = ktime_get(); + return (struct wfx_hif_msg *)skb->data; +} diff --git a/drivers/net/wireless/silabs/wfx/queue.h b/drivers/net/wireless/silabs/wfx/queue.h new file mode 100644 index 000000000000..4731debca93d --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/queue.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Queue between the tx operation and the bh workqueue. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_QUEUE_H +#define WFX_QUEUE_H + +#include +#include + +struct wfx_dev; +struct wfx_vif; + +struct wfx_queue { + struct sk_buff_head normal; + struct sk_buff_head cab; /* Content After (DTIM) Beacon */ + atomic_t pending_frames; + int priority; +}; + +void wfx_tx_lock(struct wfx_dev *wdev); +void wfx_tx_unlock(struct wfx_dev *wdev); +void wfx_tx_flush(struct wfx_dev *wdev); +void wfx_tx_lock_flush(struct wfx_dev *wdev); + +void wfx_tx_queues_init(struct wfx_vif *wvif); +void wfx_tx_queues_check_empty(struct wfx_vif *wvif); +bool wfx_tx_queues_has_cab(struct wfx_vif *wvif); +void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb); +struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev); + +bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue); +void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, + struct sk_buff_head *dropped); + +struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id); +void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped); +unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb); +void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/scan.c b/drivers/net/wireless/silabs/wfx/scan.c new file mode 100644 index 000000000000..7f34f0d322f9 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/scan.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Scan related functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include + +#include "scan.h" +#include "wfx.h" +#include "sta.h" +#include "hif_tx_mib.h" + +static void wfx_ieee80211_scan_completed_compat(struct ieee80211_hw *hw, bool aborted) +{ + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + + ieee80211_scan_completed(hw, &info); +} + +static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request *req) +{ + struct sk_buff *skb; + + skb = ieee80211_probereq_get(wvif->wdev->hw, wvif->vif->addr, NULL, 0, req->ie_len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, req->ie, req->ie_len); + wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBREQ, 0); + dev_kfree_skb(skb); + return 0; +} + +static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int start_idx) +{ + int i, ret; + struct ieee80211_channel *ch_start, *ch_cur; + + for (i = start_idx; i < req->n_channels; i++) { + ch_start = req->channels[start_idx]; + ch_cur = req->channels[i]; + WARN(ch_cur->band != NL80211_BAND_2GHZ, "band not supported"); + if (ch_cur->max_power != ch_start->max_power) + break; + if ((ch_cur->flags ^ ch_start->flags) & IEEE80211_CHAN_NO_IR) + break; + } + wfx_tx_lock_flush(wvif->wdev); + wvif->scan_abort = false; + reinit_completion(&wvif->scan_complete); + ret = wfx_hif_scan(wvif, req, start_idx, i - start_idx); + if (ret) { + wfx_tx_unlock(wvif->wdev); + return -EIO; + } + ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ); + if (!ret) { + wfx_hif_stop_scan(wvif); + ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ); + dev_dbg(wvif->wdev->dev, "scan timeout (%d channels done)\n", + wvif->scan_nb_chan_done); + } + if (!ret) { + dev_err(wvif->wdev->dev, "scan didn't stop\n"); + ret = -ETIMEDOUT; + } else if (wvif->scan_abort) { + dev_notice(wvif->wdev->dev, "scan abort\n"); + ret = -ECONNABORTED; + } else if (wvif->scan_nb_chan_done > i - start_idx) { + ret = -EIO; + } else { + ret = wvif->scan_nb_chan_done; + } + if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower) + wfx_hif_set_output_power(wvif, wvif->vif->bss_conf.txpower); + wfx_tx_unlock(wvif->wdev); + return ret; +} + +/* It is not really necessary to run scan request asynchronously. However, + * there is a bug in "iw scan" when ieee80211_scan_completed() is called before + * wfx_hw_scan() return + */ +void wfx_hw_scan_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work); + struct ieee80211_scan_request *hw_req = wvif->scan_req; + int chan_cur, ret, err; + + mutex_lock(&wvif->wdev->conf_mutex); + mutex_lock(&wvif->scan_lock); + if (wvif->join_in_progress) { + dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN"); + wfx_reset(wvif); + } + update_probe_tmpl(wvif, &hw_req->req); + chan_cur = 0; + err = 0; + do { + ret = send_scan_req(wvif, &hw_req->req, chan_cur); + if (ret > 0) { + chan_cur += ret; + err = 0; + } + if (!ret) + err++; + if (err > 2) { + dev_err(wvif->wdev->dev, "scan has not been able to start\n"); + ret = -ETIMEDOUT; + } + } while (ret >= 0 && chan_cur < hw_req->req.n_channels); + mutex_unlock(&wvif->scan_lock); + mutex_unlock(&wvif->wdev->conf_mutex); + wfx_ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0); +} + +int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS); + wvif->scan_req = hw_req; + schedule_work(&wvif->scan_work); + return 0; +} + +void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wvif->scan_abort = true; + wfx_hif_stop_scan(wvif); +} + +void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done) +{ + wvif->scan_nb_chan_done = nb_chan_done; + complete(&wvif->scan_complete); +} diff --git a/drivers/net/wireless/silabs/wfx/scan.h b/drivers/net/wireless/silabs/wfx/scan.h new file mode 100644 index 000000000000..78e3b984f375 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/scan.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Scan related functions. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_SCAN_H +#define WFX_SCAN_H + +#include + +struct wfx_dev; +struct wfx_vif; + +void wfx_hw_scan_work(struct work_struct *work); +int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req); +void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c new file mode 100644 index 000000000000..b1e9fb14d2b4 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/sta.c @@ -0,0 +1,794 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of mac80211 API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#include +#include + +#include "sta.h" +#include "wfx.h" +#include "fwio.h" +#include "bh.h" +#include "key.h" +#include "scan.h" +#include "debug.h" +#include "hif_tx.h" +#include "hif_tx_mib.h" + +#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2 + +u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates) +{ + int i; + u32 ret = 0; + /* The device only supports 2GHz */ + struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; + + for (i = 0; i < sband->n_bitrates; i++) { + if (rates & BIT(i)) { + if (i >= sband->n_bitrates) + dev_warn(wdev->dev, "unsupported basic rate\n"); + else + ret |= BIT(sband->bitrates[i].hw_value); + } + } + return ret; +} + +void wfx_cooling_timeout_work(struct work_struct *work) +{ + struct wfx_dev *wdev = container_of(to_delayed_work(work), struct wfx_dev, + cooling_timeout_work); + + wdev->chip_frozen = true; + wfx_tx_unlock(wdev); +} + +void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd) +{ + if (cmd == STA_NOTIFY_AWAKE) { + /* Device recover normal temperature */ + if (cancel_delayed_work(&wdev->cooling_timeout_work)) + wfx_tx_unlock(wdev); + } else { + /* Device is too hot */ + schedule_delayed_work(&wdev->cooling_timeout_work, 10 * HZ); + wfx_tx_lock(wdev); + } +} + +static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon) +{ + static const struct wfx_hif_ie_table_entry filter_ies[] = { + { + .ie_id = WLAN_EID_VENDOR_SPECIFIC, + .has_changed = 1, + .no_longer = 1, + .has_appeared = 1, + .oui = { 0x50, 0x6F, 0x9A }, + }, { + .ie_id = WLAN_EID_HT_OPERATION, + .has_changed = 1, + .no_longer = 1, + .has_appeared = 1, + }, { + .ie_id = WLAN_EID_ERP_INFO, + .has_changed = 1, + .no_longer = 1, + .has_appeared = 1, + }, { + .ie_id = WLAN_EID_CHANNEL_SWITCH, + .has_changed = 1, + .no_longer = 1, + .has_appeared = 1, + } + }; + + if (!filter_beacon) { + wfx_hif_beacon_filter_control(wvif, 0, 1); + } else { + wfx_hif_set_beacon_filter_table(wvif, ARRAY_SIZE(filter_ies), filter_ies); + wfx_hif_beacon_filter_control(wvif, HIF_BEACON_FILTER_ENABLE, 0); + } +} + +void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 unused) +{ + struct wfx_vif *wvif = NULL; + struct wfx_dev *wdev = hw->priv; + bool filter_bssid, filter_prbreq, filter_beacon; + + /* Notes: + * - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered + * - PS-Poll (FIF_PSPOLL) are never filtered + * - RTS, CTS and Ack (FIF_CONTROL) are always filtered + * - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered + * - Firmware does (yet) allow to forward unicast traffic sent to other stations (aka. + * promiscuous mode) + */ + *total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS | + FIF_PROBE_REQ | FIF_PSPOLL; + + mutex_lock(&wdev->conf_mutex); + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + mutex_lock(&wvif->scan_lock); + + /* Note: FIF_BCN_PRBRESP_PROMISC covers probe response and + * beacons from other BSS + */ + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + filter_beacon = false; + else + filter_beacon = true; + wfx_filter_beacon(wvif, filter_beacon); + + if (*total_flags & FIF_OTHER_BSS) + filter_bssid = false; + else + filter_bssid = true; + + /* In AP mode, chip can reply to probe request itself */ + if (*total_flags & FIF_PROBE_REQ && wvif->vif->type == NL80211_IFTYPE_AP) { + dev_dbg(wdev->dev, "do not forward probe request in AP mode\n"); + *total_flags &= ~FIF_PROBE_REQ; + } + + if (*total_flags & FIF_PROBE_REQ) + filter_prbreq = false; + else + filter_prbreq = true; + wfx_hif_set_rx_filter(wvif, filter_bssid, filter_prbreq); + + mutex_unlock(&wvif->scan_lock); + } + mutex_unlock(&wdev->conf_mutex); +} + +static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps) +{ + struct ieee80211_channel *chan0 = NULL, *chan1 = NULL; + struct ieee80211_conf *conf = &wvif->wdev->hw->conf; + + WARN(!wvif->vif->bss_conf.assoc && enable_ps, + "enable_ps is reliable only if associated"); + if (wdev_to_wvif(wvif->wdev, 0)) + chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan; + if (wdev_to_wvif(wvif->wdev, 1)) + chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan; + if (chan0 && chan1 && wvif->vif->type != NL80211_IFTYPE_AP) { + if (chan0->hw_value == chan1->hw_value) { + /* It is useless to enable PS if channels are the same. */ + if (enable_ps) + *enable_ps = false; + if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps) + dev_info(wvif->wdev->dev, "ignoring requested PS mode"); + return -1; + } + /* It is necessary to enable PS if channels are different. */ + if (enable_ps) + *enable_ps = true; + if (wfx_api_older_than(wvif->wdev, 3, 2)) + return 0; + else + return 30; + } + if (enable_ps) + *enable_ps = wvif->vif->bss_conf.ps; + if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps) + return conf->dynamic_ps_timeout; + else + return -1; +} + +int wfx_update_pm(struct wfx_vif *wvif) +{ + int ps_timeout; + bool ps; + + if (!wvif->vif->bss_conf.assoc) + return 0; + ps_timeout = wfx_get_ps_timeout(wvif, &ps); + if (!ps) + ps_timeout = 0; + WARN_ON(ps_timeout < 0); + if (wvif->uapsd_mask) + ps_timeout = 0; + + if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete, TU_TO_JIFFIES(512))) + dev_warn(wvif->wdev->dev, "timeout while waiting of set_pm_mode_complete\n"); + return wfx_hif_set_pm(wvif, ps, ps_timeout); +} + +int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + int old_uapsd = wvif->uapsd_mask; + + WARN_ON(queue >= hw->queues); + + mutex_lock(&wdev->conf_mutex); + assign_bit(queue, &wvif->uapsd_mask, params->uapsd); + wfx_hif_set_edca_queue_params(wvif, queue, params); + if (wvif->vif->type == NL80211_IFTYPE_STATION && old_uapsd != wvif->uapsd_mask) { + wfx_hif_set_uapsd_info(wvif, wvif->uapsd_mask); + wfx_update_pm(wvif); + } + mutex_unlock(&wdev->conf_mutex); + return 0; +} + +int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = NULL; + + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_hif_rts_threshold(wvif, value); + return 0; +} + +void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi) +{ + /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 + * RSSI = RCPI / 2 - 110 + */ + int rcpi_rssi; + int cqm_evt; + + rcpi_rssi = raw_rcpi_rssi / 2 - 110; + if (rcpi_rssi <= wvif->vif->bss_conf.cqm_rssi_thold) + cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; + else + cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; + ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL); +} + +static void wfx_beacon_loss_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(to_delayed_work(work), struct wfx_vif, + beacon_loss_work); + struct ieee80211_bss_conf *bss_conf = &wvif->vif->bss_conf; + + ieee80211_beacon_loss(wvif->vif); + schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(bss_conf->beacon_int)); +} + +void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_hif_wep_default_key_id(wvif, idx); +} + +void wfx_reset(struct wfx_vif *wvif) +{ + struct wfx_dev *wdev = wvif->wdev; + + wfx_tx_lock_flush(wdev); + wfx_hif_reset(wvif, false); + wfx_tx_policy_init(wvif); + if (wvif_count(wdev) <= 1) + wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); + wfx_tx_unlock(wdev); + wvif->join_in_progress = false; + cancel_delayed_work_sync(&wvif->beacon_loss_work); + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_update_pm(wvif); +} + +int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv; + + sta_priv->vif_id = wvif->id; + + if (vif->type == NL80211_IFTYPE_STATION) + wfx_hif_set_mfp(wvif, sta->mfp, sta->mfp); + + /* In station mode, the firmware interprets new link-id as a TDLS peer */ + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + return 0; + sta_priv->link_id = ffz(wvif->link_id_map); + wvif->link_id_map |= BIT(sta_priv->link_id); + WARN_ON(!sta_priv->link_id); + WARN_ON(sta_priv->link_id >= HIF_LINK_ID_MAX); + wfx_hif_map_link(wvif, false, sta->addr, sta_priv->link_id, sta->mfp); + + return 0; +} + +int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv; + + /* See note in wfx_sta_add() */ + if (!sta_priv->link_id) + return 0; + /* FIXME add a mutex? */ + wfx_hif_map_link(wvif, true, sta->addr, sta_priv->link_id, false); + wvif->link_id_map &= ~BIT(sta_priv->link_id); + return 0; +} + +static int wfx_upload_ap_templates(struct wfx_vif *wvif) +{ + struct sk_buff *skb; + + skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif); + if (!skb) + return -ENOMEM; + wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN, API_RATE_INDEX_B_1MBPS); + dev_kfree_skb(skb); + + skb = ieee80211_proberesp_get(wvif->wdev->hw, wvif->vif); + if (!skb) + return -ENOMEM; + wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES, API_RATE_INDEX_B_1MBPS); + dev_kfree_skb(skb); + return 0; +} + +static void wfx_set_mfp_ap(struct wfx_vif *wvif) +{ + struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif); + const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); + const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, + skb->len - ieoffset); + const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16); + const int pairwise_cipher_suite_size = 4 / sizeof(u16); + const int akm_suite_size = 4 / sizeof(u16); + + if (ptr) { + ptr += pairwise_cipher_suite_count_offset; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return; + ptr += 1 + pairwise_cipher_suite_size * *ptr; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return; + ptr += 1 + akm_suite_size * *ptr; + if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) + return; + wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6)); + } +} + +int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct wfx_dev *wdev = wvif->wdev; + int ret; + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) + wfx_update_pm(wvif); + wvif = (struct wfx_vif *)vif->drv_priv; + wfx_upload_ap_templates(wvif); + ret = wfx_hif_start(wvif, &vif->bss_conf, wvif->channel); + if (ret > 0) + return -EIO; + wfx_set_mfp_ap(wvif); + return ret; +} + +void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_reset(wvif); +} + +static void wfx_join(struct wfx_vif *wvif) +{ + int ret; + struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf; + struct cfg80211_bss *bss = NULL; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + const u8 *ssidie = NULL; + int ssidlen = 0; + + wfx_tx_lock_flush(wvif->wdev); + + bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel, conf->bssid, NULL, 0, + IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); + if (!bss && !conf->ibss_joined) { + wfx_tx_unlock(wvif->wdev); + return; + } + + rcu_read_lock(); /* protect ssidie */ + if (bss) + ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); + if (ssidie) { + ssidlen = ssidie[1]; + if (ssidlen > IEEE80211_MAX_SSID_LEN) + ssidlen = IEEE80211_MAX_SSID_LEN; + memcpy(ssid, &ssidie[2], ssidlen); + } + rcu_read_unlock(); + + cfg80211_put_bss(wvif->wdev->hw->wiphy, bss); + + wvif->join_in_progress = true; + ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssidlen); + if (ret) { + ieee80211_connection_loss(wvif->vif); + wfx_reset(wvif); + } else { + /* Due to beacon filtering it is possible that the AP's beacon is not known for the + * mac80211 stack. Disable filtering temporary to make sure the stack receives at + * least one + */ + wfx_filter_beacon(wvif, false); + } + wfx_tx_unlock(wvif->wdev); +} + +static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *info) +{ + struct ieee80211_sta *sta = NULL; + int ampdu_density = 0; + bool greenfield = false; + + rcu_read_lock(); /* protect sta */ + if (info->bssid && !info->ibss_joined) + sta = ieee80211_find_sta(wvif->vif, info->bssid); + if (sta && sta->ht_cap.ht_supported) + ampdu_density = sta->ht_cap.ampdu_density; + if (sta && sta->ht_cap.ht_supported && + !(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)) + greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); + rcu_read_unlock(); + + wvif->join_in_progress = false; + wfx_hif_set_association_mode(wvif, ampdu_density, greenfield, info->use_short_preamble); + wfx_hif_keep_alive_period(wvif, 0); + /* beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use the same value. */ + wfx_hif_set_bss_params(wvif, info->aid, 7); + wfx_hif_set_beacon_wakeup_period(wvif, 1, 1); + wfx_update_pm(wvif); +} + +int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_upload_ap_templates(wvif); + wfx_join(wvif); + return 0; +} + +void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wfx_reset(wvif); +} + +static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable) +{ + /* Driver has Content After DTIM Beacon in queue. Driver is waiting for a signal from the + * firmware. Since we are going to stop to send beacons, this signal will never happens. See + * also wfx_suspend_resume_mc() + */ + if (!enable && wfx_tx_queues_has_cab(wvif)) { + wvif->after_dtim_tx_allowed = true; + wfx_bh_request_tx(wvif->wdev); + } + wfx_hif_beacon_transmit(wvif, enable); +} + +void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + int i; + + mutex_lock(&wdev->conf_mutex); + + if (changed & BSS_CHANGED_BASIC_RATES || + changed & BSS_CHANGED_BEACON_INT || + changed & BSS_CHANGED_BSSID) { + if (vif->type == NL80211_IFTYPE_STATION) + wfx_join(wvif); + } + + if (changed & BSS_CHANGED_ASSOC) { + if (info->assoc || info->ibss_joined) + wfx_join_finalize(wvif, info); + else if (!info->assoc && vif->type == NL80211_IFTYPE_STATION) + wfx_reset(wvif); + else + dev_warn(wdev->dev, "misunderstood change: ASSOC\n"); + } + + if (changed & BSS_CHANGED_BEACON_INFO) { + if (vif->type != NL80211_IFTYPE_STATION) + dev_warn(wdev->dev, "misunderstood change: BEACON_INFO\n"); + wfx_hif_set_beacon_wakeup_period(wvif, info->dtim_period, info->dtim_period); + /* We temporary forwarded beacon for join process. It is now no more necessary. */ + wfx_filter_beacon(wvif, true); + } + + if (changed & BSS_CHANGED_ARP_FILTER) { + for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) { + __be32 *arp_addr = &info->arp_addr_list[i]; + + if (info->arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES) + arp_addr = NULL; + if (i >= info->arp_addr_cnt) + arp_addr = NULL; + wfx_hif_set_arp_ipv4_filter(wvif, i, arp_addr); + } + } + + if (changed & BSS_CHANGED_AP_PROBE_RESP || changed & BSS_CHANGED_BEACON) + wfx_upload_ap_templates(wvif); + + if (changed & BSS_CHANGED_BEACON_ENABLED) + wfx_enable_beacon(wvif, info->enable_beacon); + + if (changed & BSS_CHANGED_KEEP_ALIVE) + wfx_hif_keep_alive_period(wvif, + info->max_idle_period * USEC_PER_TU / USEC_PER_MSEC); + + if (changed & BSS_CHANGED_ERP_CTS_PROT) + wfx_hif_erp_use_protection(wvif, info->use_cts_prot); + + if (changed & BSS_CHANGED_ERP_SLOT) + wfx_hif_slot_time(wvif, info->use_short_slot ? 9 : 20); + + if (changed & BSS_CHANGED_CQM) + wfx_hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold, info->cqm_rssi_hyst); + + if (changed & BSS_CHANGED_TXPOWER) + wfx_hif_set_output_power(wvif, info->txpower); + + if (changed & BSS_CHANGED_PS) + wfx_update_pm(wvif); + + mutex_unlock(&wdev->conf_mutex); +} + +static int wfx_update_tim(struct wfx_vif *wvif) +{ + struct sk_buff *skb; + u16 tim_offset, tim_length; + u8 *tim_ptr; + + skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif, &tim_offset, &tim_length); + if (!skb) + return -ENOENT; + tim_ptr = skb->data + tim_offset; + + if (tim_offset && tim_length >= 6) { + /* Firmware handles DTIM counter internally */ + tim_ptr[2] = 0; + + /* Set/reset aid0 bit */ + if (wfx_tx_queues_has_cab(wvif)) + tim_ptr[4] |= 1; + else + tim_ptr[4] &= ~1; + } + + wfx_hif_update_ie_beacon(wvif, tim_ptr, tim_length); + dev_kfree_skb(skb); + + return 0; +} + +static void wfx_update_tim_work(struct work_struct *work) +{ + struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work); + + wfx_update_tim(wvif); +} + +int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *)&sta->drv_priv; + struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id); + + if (!wvif) { + dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); + return -EIO; + } + schedule_work(&wvif->update_tim_work); + return 0; +} + +void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd) +{ + struct wfx_vif *wvif_it; + + if (notify_cmd != STA_NOTIFY_AWAKE) + return; + + /* Device won't be able to honor CAB if a scan is in progress on any interface. Prefer to + * skip this DTIM and wait for the next one. + */ + wvif_it = NULL; + while ((wvif_it = wvif_iterate(wvif->wdev, wvif_it)) != NULL) + if (mutex_is_locked(&wvif_it->scan_lock)) + return; + + if (!wfx_tx_queues_has_cab(wvif) || wvif->after_dtim_tx_allowed) + dev_warn(wvif->wdev->dev, "incorrect sequence (%d CAB in queue)", + wfx_tx_queues_has_cab(wvif)); + wvif->after_dtim_tx_allowed = true; + wfx_bh_request_tx(wvif->wdev); +} + +int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + /* Aggregation is implemented fully in firmware */ + switch (params->action) { + case IEEE80211_AMPDU_RX_START: + case IEEE80211_AMPDU_RX_STOP: + /* Just acknowledge it to enable frame re-ordering */ + return 0; + default: + /* Leave the firmware doing its business for tx aggregation */ + return -EOPNOTSUPP; + } +} + +int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) +{ + return 0; +} + +void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) +{ +} + +void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed) +{ +} + +int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *conf) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct ieee80211_channel *ch = conf->def.chan; + + WARN(wvif->channel, "channel overwrite"); + wvif->channel = ch; + + return 0; +} + +void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *conf) +{ + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + struct ieee80211_channel *ch = conf->def.chan; + + WARN(wvif->channel != ch, "channel mismatch"); + wvif->channel = NULL; +} + +int wfx_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} + +int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + int i; + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_UAPSD | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + + mutex_lock(&wdev->conf_mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + break; + default: + mutex_unlock(&wdev->conf_mutex); + return -EOPNOTSUPP; + } + + /* FIXME: prefer use of container_of() to get vif */ + wvif->vif = vif; + wvif->wdev = wdev; + + wvif->link_id_map = 1; /* link-id 0 is reserved for multicast */ + INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work); + INIT_DELAYED_WORK(&wvif->beacon_loss_work, wfx_beacon_loss_work); + + init_completion(&wvif->set_pm_mode_complete); + complete(&wvif->set_pm_mode_complete); + INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); + + mutex_init(&wvif->scan_lock); + init_completion(&wvif->scan_complete); + INIT_WORK(&wvif->scan_work, wfx_hw_scan_work); + + wfx_tx_queues_init(wvif); + wfx_tx_policy_init(wvif); + + for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { + if (!wdev->vif[i]) { + wdev->vif[i] = vif; + wvif->id = i; + break; + } + } + WARN(i == ARRAY_SIZE(wdev->vif), "try to instantiate more vif than supported"); + + wfx_hif_set_macaddr(wvif, vif->addr); + + mutex_unlock(&wdev->conf_mutex); + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + /* Combo mode does not support Block Acks. We can re-enable them */ + if (wvif_count(wdev) == 1) + wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); + else + wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00); + } + return 0; +} + +void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; + + wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300)); + wfx_tx_queues_check_empty(wvif); + + mutex_lock(&wdev->conf_mutex); + WARN(wvif->link_id_map != 1, "corrupted state"); + + wfx_hif_reset(wvif, false); + wfx_hif_set_macaddr(wvif, NULL); + wfx_tx_policy_init(wvif); + + cancel_delayed_work_sync(&wvif->beacon_loss_work); + wdev->vif[wvif->id] = NULL; + wvif->vif = NULL; + + mutex_unlock(&wdev->conf_mutex); + + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + /* Combo mode does not support Block Acks. We can re-enable them */ + if (wvif_count(wdev) == 1) + wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); + else + wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00); + } +} + +int wfx_start(struct ieee80211_hw *hw) +{ + return 0; +} + +void wfx_stop(struct ieee80211_hw *hw) +{ + struct wfx_dev *wdev = hw->priv; + + WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending)); +} diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h new file mode 100644 index 000000000000..c69b2227e9ac --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/sta.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Implementation of mac80211 API. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + */ +#ifndef WFX_STA_H +#define WFX_STA_H + +#include + +struct wfx_dev; +struct wfx_vif; + +struct wfx_sta_priv { + int link_id; + int vif_id; +}; + +/* mac80211 interface */ +int wfx_start(struct ieee80211_hw *hw); +void wfx_stop(struct ieee80211_hw *hw); +int wfx_config(struct ieee80211_hw *hw, u32 changed); +int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value); +void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); +void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 unused); + +int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed); +int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, struct ieee80211_sta *sta); +int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); +int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params); +int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf); +void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf); +void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed); +int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *conf); +void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *conf); + +/* Hardware API Callbacks */ +void wfx_cooling_timeout_work(struct work_struct *work); +void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd); +void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd); +void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi); +int wfx_update_pm(struct wfx_vif *wvif); + +/* Other Helpers */ +void wfx_reset(struct wfx_vif *wvif); +u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates); + +#endif diff --git a/drivers/net/wireless/silabs/wfx/traces.h b/drivers/net/wireless/silabs/wfx/traces.h new file mode 100644 index 000000000000..e011e8a46bd5 --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/traces.h @@ -0,0 +1,496 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tracepoints definitions. + * + * Copyright (c) 2018-2020, Silicon Laboratories, Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM wfx + +#if !defined(_WFX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _WFX_TRACE_H + +#include +#include + +#include "bus.h" +#include "hif_api_cmd.h" +#include "hif_api_mib.h" + +/* The hell below need some explanations. For each symbolic number, we need to define it with + * TRACE_DEFINE_ENUM() and in a list for __print_symbolic. + * + * 1. Define a new macro that call TRACE_DEFINE_ENUM(): + * + * #define xxx_name(sym) TRACE_DEFINE_ENUM(sym); + * + * 2. Define list of all symbols: + * + * #define list_names \ + * ... \ + * xxx_name(XXX) \ + * ... + * + * 3. Instantiate that list_names: + * + * list_names + * + * 4. Redefine xxx_name() as an entry of array for __print_symbolic() + * + * #undef xxx_name + * #define xxx_name(msg) { msg, #msg }, + * + * 5. list_name can now nearly be used with __print_symbolic() but, __print_symbolic() dislike + * last comma of list. So we define a new list with a dummy element: + * + * #define list_for_print_symbolic list_names { -1, NULL } + */ + +#define _hif_msg_list \ + hif_cnf_name(ADD_KEY) \ + hif_cnf_name(BEACON_TRANSMIT) \ + hif_cnf_name(EDCA_QUEUE_PARAMS) \ + hif_cnf_name(JOIN) \ + hif_cnf_name(MAP_LINK) \ + hif_cnf_name(READ_MIB) \ + hif_cnf_name(REMOVE_KEY) \ + hif_cnf_name(RESET) \ + hif_cnf_name(SET_BSS_PARAMS) \ + hif_cnf_name(SET_PM_MODE) \ + hif_cnf_name(START) \ + hif_cnf_name(START_SCAN) \ + hif_cnf_name(STOP_SCAN) \ + hif_cnf_name(TX) \ + hif_cnf_name(MULTI_TRANSMIT) \ + hif_cnf_name(UPDATE_IE) \ + hif_cnf_name(WRITE_MIB) \ + hif_cnf_name(CONFIGURATION) \ + hif_cnf_name(CONTROL_GPIO) \ + hif_cnf_name(PREVENT_ROLLBACK) \ + hif_cnf_name(SET_SL_MAC_KEY) \ + hif_cnf_name(SL_CONFIGURE) \ + hif_cnf_name(SL_EXCHANGE_PUB_KEYS) \ + hif_cnf_name(SHUT_DOWN) \ + hif_ind_name(EVENT) \ + hif_ind_name(JOIN_COMPLETE) \ + hif_ind_name(RX) \ + hif_ind_name(SCAN_CMPL) \ + hif_ind_name(SET_PM_MODE_CMPL) \ + hif_ind_name(SUSPEND_RESUME_TX) \ + hif_ind_name(SL_EXCHANGE_PUB_KEYS) \ + hif_ind_name(ERROR) \ + hif_ind_name(EXCEPTION) \ + hif_ind_name(GENERIC) \ + hif_ind_name(WAKEUP) \ + hif_ind_name(STARTUP) + +#define hif_msg_list_enum _hif_msg_list + +#undef hif_cnf_name +#undef hif_ind_name +#define hif_cnf_name(msg) TRACE_DEFINE_ENUM(HIF_CNF_ID_##msg); +#define hif_ind_name(msg) TRACE_DEFINE_ENUM(HIF_IND_ID_##msg); +hif_msg_list_enum +#undef hif_cnf_name +#undef hif_ind_name +#define hif_cnf_name(msg) { HIF_CNF_ID_##msg, #msg }, +#define hif_ind_name(msg) { HIF_IND_ID_##msg, #msg }, +#define hif_msg_list hif_msg_list_enum { -1, NULL } + +#define _hif_mib_list \ + hif_mib_name(ARP_IP_ADDRESSES_TABLE) \ + hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \ + hif_mib_name(BEACON_FILTER_ENABLE) \ + hif_mib_name(BEACON_FILTER_TABLE) \ + hif_mib_name(BEACON_STATS) \ + hif_mib_name(BEACON_WAKEUP_PERIOD) \ + hif_mib_name(BLOCK_ACK_POLICY) \ + hif_mib_name(CCA_CONFIG) \ + hif_mib_name(CONFIG_DATA_FILTER) \ + hif_mib_name(COUNTERS_TABLE) \ + hif_mib_name(CURRENT_TX_POWER_LEVEL) \ + hif_mib_name(DOT11_MAC_ADDRESS) \ + hif_mib_name(DOT11_MAX_RECEIVE_LIFETIME) \ + hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \ + hif_mib_name(DOT11_RTS_THRESHOLD) \ + hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \ + hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \ + hif_mib_name(EXTENDED_COUNTERS_TABLE) \ + hif_mib_name(GL_BLOCK_ACK_INFO) \ + hif_mib_name(GL_OPERATIONAL_POWER_MODE) \ + hif_mib_name(GL_SET_MULTI_MSG) \ + hif_mib_name(GRP_SEQ_COUNTER) \ + hif_mib_name(INACTIVITY_TIMER) \ + hif_mib_name(INTERFACE_PROTECTION) \ + hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \ + hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \ + hif_mib_name(KEEP_ALIVE_PERIOD) \ + hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \ + hif_mib_name(MAGIC_DATAFRAME_CONDITION) \ + hif_mib_name(MAX_TX_POWER_LEVEL) \ + hif_mib_name(NON_ERP_PROTECTION) \ + hif_mib_name(NS_IP_ADDRESSES_TABLE) \ + hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \ + hif_mib_name(PORT_DATAFRAME_CONDITION) \ + hif_mib_name(PROTECTED_MGMT_POLICY) \ + hif_mib_name(RCPI_RSSI_THRESHOLD) \ + hif_mib_name(RX_FILTER) \ + hif_mib_name(SET_ASSOCIATION_MODE) \ + hif_mib_name(SET_DATA_FILTERING) \ + hif_mib_name(SET_HT_PROTECTION) \ + hif_mib_name(SET_TX_RATE_RETRY_POLICY) \ + hif_mib_name(SET_UAPSD_INFORMATION) \ + hif_mib_name(SLOT_TIME) \ + hif_mib_name(STATISTICS_TABLE) \ + hif_mib_name(TEMPLATE_FRAME) \ + hif_mib_name(TSF_COUNTER) \ + hif_mib_name(UC_MC_BC_DATAFRAME_CONDITION) + +#define hif_mib_list_enum _hif_mib_list + +#undef hif_mib_name +#define hif_mib_name(mib) TRACE_DEFINE_ENUM(HIF_MIB_ID_##mib); +hif_mib_list_enum +#undef hif_mib_name +#define hif_mib_name(mib) { HIF_MIB_ID_##mib, #mib }, +#define hif_mib_list hif_mib_list_enum { -1, NULL } + +DECLARE_EVENT_CLASS(hif_data, + TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), + TP_ARGS(hif, tx_fill_level, is_recv), + TP_STRUCT__entry( + __field(int, tx_fill_level) + __field(int, msg_id) + __field(const char *, msg_type) + __field(int, msg_len) + __field(int, buf_len) + __field(int, if_id) + __field(int, mib) + __array(u8, buf, 128) + ), + TP_fast_assign( + int header_len; + + __entry->tx_fill_level = tx_fill_level; + __entry->msg_len = le16_to_cpu(hif->len); + __entry->msg_id = hif->id; + __entry->if_id = hif->interface; + if (is_recv) + __entry->msg_type = __entry->msg_id & 0x80 ? "IND" : "CNF"; + else + __entry->msg_type = "REQ"; + if (!is_recv && + (__entry->msg_id == HIF_REQ_ID_READ_MIB || + __entry->msg_id == HIF_REQ_ID_WRITE_MIB)) { + __entry->mib = le16_to_cpup((__le16 *)hif->body); + header_len = 4; + } else { + __entry->mib = -1; + header_len = 0; + } + __entry->buf_len = min_t(int, __entry->msg_len, sizeof(__entry->buf)) + - sizeof(struct wfx_hif_msg) - header_len; + memcpy(__entry->buf, hif->body + header_len, __entry->buf_len); + ), + TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)", + __entry->tx_fill_level, + __entry->if_id, + __entry->msg_type, + __print_symbolic(__entry->msg_id, hif_msg_list), + __entry->mib != -1 ? "/" : "", + __entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "", + __print_hex(__entry->buf, __entry->buf_len), + __entry->msg_len > sizeof(__entry->buf) ? " ..." : "", + __entry->msg_len + ) +); +DEFINE_EVENT(hif_data, hif_send, + TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), + TP_ARGS(hif, tx_fill_level, is_recv)); +#define _trace_hif_send(hif, tx_fill_level)\ + trace_hif_send(hif, tx_fill_level, false) +DEFINE_EVENT(hif_data, hif_recv, + TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), + TP_ARGS(hif, tx_fill_level, is_recv)); +#define _trace_hif_recv(hif, tx_fill_level)\ + trace_hif_recv(hif, tx_fill_level, true) + +#define wfx_reg_list_enum \ + wfx_reg_name(WFX_REG_CONFIG, "CONFIG") \ + wfx_reg_name(WFX_REG_CONTROL, "CONTROL") \ + wfx_reg_name(WFX_REG_IN_OUT_QUEUE, "QUEUE") \ + wfx_reg_name(WFX_REG_AHB_DPORT, "AHB") \ + wfx_reg_name(WFX_REG_BASE_ADDR, "BASE_ADDR") \ + wfx_reg_name(WFX_REG_SRAM_DPORT, "SRAM") \ + wfx_reg_name(WFX_REG_SET_GEN_R_W, "SET_GEN_R_W") \ + wfx_reg_name(WFX_REG_FRAME_OUT, "FRAME_OUT") + +#undef wfx_reg_name +#define wfx_reg_name(sym, name) TRACE_DEFINE_ENUM(sym); +wfx_reg_list_enum +#undef wfx_reg_name +#define wfx_reg_name(sym, name) { sym, name }, +#define wfx_reg_list wfx_reg_list_enum { -1, NULL } + +DECLARE_EVENT_CLASS(io_data, + TP_PROTO(int reg, int addr, const void *io_buf, size_t len), + TP_ARGS(reg, addr, io_buf, len), + TP_STRUCT__entry( + __field(int, reg) + __field(int, addr) + __field(int, msg_len) + __field(int, buf_len) + __array(u8, buf, 32) + __array(u8, addr_str, 10) + ), + TP_fast_assign( + __entry->reg = reg; + __entry->addr = addr; + __entry->msg_len = len; + __entry->buf_len = min_t(int, sizeof(__entry->buf), __entry->msg_len); + memcpy(__entry->buf, io_buf, __entry->buf_len); + if (addr >= 0) + snprintf(__entry->addr_str, 10, "/%08x", addr); + else + __entry->addr_str[0] = 0; + ), + TP_printk("%s%s: %s%s (%d bytes)", + __print_symbolic(__entry->reg, wfx_reg_list), + __entry->addr_str, + __print_hex(__entry->buf, __entry->buf_len), + __entry->msg_len > sizeof(__entry->buf) ? " ..." : "", + __entry->msg_len + ) +); +DEFINE_EVENT(io_data, io_write, + TP_PROTO(int reg, int addr, const void *io_buf, size_t len), + TP_ARGS(reg, addr, io_buf, len)); +#define _trace_io_ind_write(reg, addr, io_buf, len)\ + trace_io_write(reg, addr, io_buf, len) +#define _trace_io_write(reg, io_buf, len) trace_io_write(reg, -1, io_buf, len) +DEFINE_EVENT(io_data, io_read, + TP_PROTO(int reg, int addr, const void *io_buf, size_t len), + TP_ARGS(reg, addr, io_buf, len)); +#define _trace_io_ind_read(reg, addr, io_buf, len)\ + trace_io_read(reg, addr, io_buf, len) +#define _trace_io_read(reg, io_buf, len) trace_io_read(reg, -1, io_buf, len) + +DECLARE_EVENT_CLASS(io_data32, + TP_PROTO(int reg, int addr, u32 val), + TP_ARGS(reg, addr, val), + TP_STRUCT__entry( + __field(int, reg) + __field(int, addr) + __field(int, val) + __array(u8, addr_str, 10) + ), + TP_fast_assign( + __entry->reg = reg; + __entry->addr = addr; + __entry->val = val; + if (addr >= 0) + snprintf(__entry->addr_str, 10, "/%08x", addr); + else + __entry->addr_str[0] = 0; + ), + TP_printk("%s%s: %08x", + __print_symbolic(__entry->reg, wfx_reg_list), + __entry->addr_str, + __entry->val + ) +); +DEFINE_EVENT(io_data32, io_write32, + TP_PROTO(int reg, int addr, u32 val), + TP_ARGS(reg, addr, val)); +#define _trace_io_ind_write32(reg, addr, val) trace_io_write32(reg, addr, val) +#define _trace_io_write32(reg, val) trace_io_write32(reg, -1, val) +DEFINE_EVENT(io_data32, io_read32, + TP_PROTO(int reg, int addr, u32 val), + TP_ARGS(reg, addr, val)); +#define _trace_io_ind_read32(reg, addr, val) trace_io_read32(reg, addr, val) +#define _trace_io_read32(reg, val) trace_io_read32(reg, -1, val) + +DECLARE_EVENT_CLASS(piggyback, + TP_PROTO(u32 val, bool ignored), + TP_ARGS(val, ignored), + TP_STRUCT__entry( + __field(int, val) + __field(bool, ignored) + ), + TP_fast_assign( + __entry->val = val; + __entry->ignored = ignored; + ), + TP_printk("CONTROL: %08x%s", + __entry->val, + __entry->ignored ? " (ignored)" : "" + ) +); +DEFINE_EVENT(piggyback, piggyback, + TP_PROTO(u32 val, bool ignored), + TP_ARGS(val, ignored)); +#define _trace_piggyback(val, ignored) trace_piggyback(val, ignored) + +TRACE_EVENT(bh_stats, + TP_PROTO(int ind, int req, int cnf, int busy, bool release), + TP_ARGS(ind, req, cnf, busy, release), + TP_STRUCT__entry( + __field(int, ind) + __field(int, req) + __field(int, cnf) + __field(int, busy) + __field(bool, release) + ), + TP_fast_assign( + __entry->ind = ind; + __entry->req = req; + __entry->cnf = cnf; + __entry->busy = busy; + __entry->release = release; + ), + TP_printk("IND/REQ/CNF:%3d/%3d/%3d, REQ in progress:%3d, WUP: %s", + __entry->ind, + __entry->req, + __entry->cnf, + __entry->busy, + __entry->release ? "release" : "keep" + ) +); +#define _trace_bh_stats(ind, req, cnf, busy, release)\ + trace_bh_stats(ind, req, cnf, busy, release) + +TRACE_EVENT(tx_stats, + TP_PROTO(const struct wfx_hif_cnf_tx *tx_cnf, const struct sk_buff *skb, + int delay), + TP_ARGS(tx_cnf, skb, delay), + TP_STRUCT__entry( + __field(int, pkt_id) + __field(int, delay_media) + __field(int, delay_queue) + __field(int, delay_fw) + __field(int, ack_failures) + __field(int, flags) + __array(int, rate, 4) + __array(int, tx_count, 4) + ), + TP_fast_assign( + /* Keep sync with wfx_rates definition in main.c */ + static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13 }; + const struct ieee80211_tx_info *tx_info = + (const struct ieee80211_tx_info *)skb->cb; + const struct ieee80211_tx_rate *rates = tx_info->driver_rates; + int i; + + __entry->pkt_id = tx_cnf->packet_id; + __entry->delay_media = le32_to_cpu(tx_cnf->media_delay); + __entry->delay_queue = le32_to_cpu(tx_cnf->tx_queue_delay); + __entry->delay_fw = delay; + __entry->ack_failures = tx_cnf->ack_failures; + if (!tx_cnf->status || __entry->ack_failures) + __entry->ack_failures += 1; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (rates[0].flags & IEEE80211_TX_RC_MCS) + __entry->rate[i] = rates[i].idx; + else + __entry->rate[i] = hw_rate[rates[i].idx]; + __entry->tx_count[i] = rates[i].count; + } + __entry->flags = 0; + if (rates[0].flags & IEEE80211_TX_RC_MCS) + __entry->flags |= 0x01; + if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI) + __entry->flags |= 0x02; + if (rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD) + __entry->flags |= 0x04; + if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) + __entry->flags |= 0x08; + if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) + __entry->flags |= 0x10; + if (tx_cnf->status) + __entry->flags |= 0x20; + if (tx_cnf->status == HIF_STATUS_TX_FAIL_REQUEUE) + __entry->flags |= 0x40; + ), + TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus", + __entry->pkt_id, + __print_flags(__entry->flags, NULL, + { 0x01, "M" }, { 0x02, "S" }, { 0x04, "G" }, { 0x08, "R" }, + { 0x10, "D" }, { 0x20, "F" }, { 0x40, "Q" }), + __entry->rate[0], + __entry->tx_count[0], + __entry->rate[1], + __entry->tx_count[1], + __entry->rate[2], + __entry->tx_count[2], + __entry->rate[3], + __entry->tx_count[3], + __entry->ack_failures, + __entry->delay_media, + __entry->delay_queue, + __entry->delay_fw + ) +); +#define _trace_tx_stats(tx_cnf, skb, delay) trace_tx_stats(tx_cnf, skb, delay) + +TRACE_EVENT(queues_stats, + TP_PROTO(struct wfx_dev *wdev, const struct wfx_queue *elected_queue), + TP_ARGS(wdev, elected_queue), + TP_STRUCT__entry( + __field(int, vif_id) + __field(int, queue_id) + __array(int, hw, IEEE80211_NUM_ACS * 2) + __array(int, drv, IEEE80211_NUM_ACS * 2) + __array(int, cab, IEEE80211_NUM_ACS * 2) + ), + TP_fast_assign( + const struct wfx_queue *queue; + struct wfx_vif *wvif; + int i, j; + + for (j = 0; j < IEEE80211_NUM_ACS * 2; j++) { + __entry->hw[j] = -1; + __entry->drv[j] = -1; + __entry->cab[j] = -1; + } + __entry->vif_id = -1; + __entry->queue_id = -1; + wvif = NULL; + while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + j = wvif->id * IEEE80211_NUM_ACS + i; + WARN_ON(j >= IEEE80211_NUM_ACS * 2); + queue = &wvif->tx_queue[i]; + __entry->hw[j] = atomic_read(&queue->pending_frames); + __entry->drv[j] = skb_queue_len(&queue->normal); + __entry->cab[j] = skb_queue_len(&queue->cab); + if (queue == elected_queue) { + __entry->vif_id = wvif->id; + __entry->queue_id = i; + } + } + } + ), + TP_printk("got skb from %d/%d, pend. hw/norm/cab: [ %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d ] [ %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d ]", + __entry->vif_id, __entry->queue_id, + __entry->hw[0], __entry->drv[0], __entry->cab[0], + __entry->hw[1], __entry->drv[1], __entry->cab[1], + __entry->hw[2], __entry->drv[2], __entry->cab[2], + __entry->hw[3], __entry->drv[3], __entry->cab[3], + __entry->hw[4], __entry->drv[4], __entry->cab[4], + __entry->hw[5], __entry->drv[5], __entry->cab[5], + __entry->hw[6], __entry->drv[6], __entry->cab[6], + __entry->hw[7], __entry->drv[7], __entry->cab[7] + ) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE traces + +#include diff --git a/drivers/net/wireless/silabs/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h new file mode 100644 index 000000000000..6594cc647c2f --- /dev/null +++ b/drivers/net/wireless/silabs/wfx/wfx.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Common private data. + * + * Copyright (c) 2017-2020, Silicon Laboratories, Inc. + * Copyright (c) 2010, ST-Ericsson + * Copyright (c) 2006, Michael Wu + * Copyright 2004-2006 Jean-Baptiste Note , et al. + */ +#ifndef WFX_H +#define WFX_H + +#include +#include +#include +#include +#include + +#include "bh.h" +#include "data_tx.h" +#include "main.h" +#include "queue.h" +#include "hif_tx.h" + +#define USEC_PER_TXOP 32 /* see struct ieee80211_tx_queue_params */ +#define USEC_PER_TU 1024 + +struct wfx_hwbus_ops; + +struct wfx_dev { + struct wfx_platform_data pdata; + struct device *dev; + struct ieee80211_hw *hw; + struct ieee80211_vif *vif[2]; + struct mac_address addresses[2]; + const struct wfx_hwbus_ops *hwbus_ops; + void *hwbus_priv; + + u8 keyset; + struct completion firmware_ready; + struct wfx_hif_ind_startup hw_caps; + struct wfx_hif hif; + struct delayed_work cooling_timeout_work; + bool poll_irq; + bool chip_frozen; + struct mutex conf_mutex; + + struct wfx_hif_cmd hif_cmd; + struct sk_buff_head tx_pending; + wait_queue_head_t tx_dequeue; + atomic_t tx_lock; + + atomic_t packet_id; + u32 key_map; + + struct wfx_hif_rx_stats rx_stats; + struct mutex rx_stats_lock; + struct wfx_hif_tx_power_loop_info tx_power_loop_info; + struct mutex tx_power_loop_info_lock; +}; + +struct wfx_vif { + struct wfx_dev *wdev; + struct ieee80211_vif *vif; + struct ieee80211_channel *channel; + int id; + + u32 link_id_map; + + bool after_dtim_tx_allowed; + bool join_in_progress; + + struct delayed_work beacon_loss_work; + + struct wfx_queue tx_queue[4]; + struct wfx_tx_policy_cache tx_policy_cache; + struct work_struct tx_policy_upload_work; + + struct work_struct update_tim_work; + + unsigned long uapsd_mask; + + /* avoid some operations in parallel with scan */ + struct mutex scan_lock; + struct work_struct scan_work; + struct completion scan_complete; + int scan_nb_chan_done; + bool scan_abort; + struct ieee80211_scan_request *scan_req; + + struct completion set_pm_mode_complete; +}; + +static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id) +{ + if (vif_id >= ARRAY_SIZE(wdev->vif)) { + dev_dbg(wdev->dev, "requesting non-existent vif: %d\n", vif_id); + return NULL; + } + vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif)); + if (!wdev->vif[vif_id]) + return NULL; + return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv; +} + +static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev, struct wfx_vif *cur) +{ + int i; + int mark = 0; + struct wfx_vif *tmp; + + if (!cur) + mark = 1; + for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { + tmp = wdev_to_wvif(wdev, i); + if (mark && tmp) + return tmp; + if (tmp == cur) + mark = 1; + } + return NULL; +} + +static inline int wvif_count(struct wfx_dev *wdev) +{ + int i; + int ret = 0; + struct wfx_vif *wvif; + + for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { + wvif = wdev_to_wvif(wdev, i); + if (wvif) + ret++; + } + return ret; +} + +static inline void memreverse(u8 *src, u8 length) +{ + u8 *lo = src; + u8 *hi = src + length - 1; + u8 swap; + + while (lo < hi) { + swap = *lo; + *lo++ = *hi; + *hi-- = swap; + } +} + +static inline int memzcmp(void *src, unsigned int size) +{ + u8 *buf = src; + + if (!size) + return 0; + if (*buf) + return 1; + return memcmp(buf, buf + 1, size - 1); +} + +#endif diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 932acb4e8cbc..fc274737053d 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -86,6 +86,5 @@ source "drivers/staging/fieldbus/Kconfig" source "drivers/staging/qlge/Kconfig" -source "drivers/staging/wfx/Kconfig" endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 3ffb35ccfae2..65e317922e3f 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -33,4 +33,3 @@ obj-$(CONFIG_PI433) += pi433/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ obj-$(CONFIG_QLGE) += qlge/ -obj-$(CONFIG_WFX) += wfx/ diff --git a/drivers/staging/wfx/Kconfig b/drivers/staging/wfx/Kconfig deleted file mode 100644 index 835a855409d8..000000000000 --- a/drivers/staging/wfx/Kconfig +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config WFX - tristate "Silicon Labs wireless chips WF200 and further" - depends on MAC80211 - depends on MMC || !MMC # do not allow WFX=y if MMC=m - depends on (SPI || MMC) - help - This is a driver for Silicons Labs WFxxx series (WF200 and further) - chipsets. This chip can be found on SPI or SDIO buses. - - Silabs does not use a reliable SDIO vendor ID. So, to avoid conflicts, - the driver won't probe the device if it is not also declared in the - Device Tree. diff --git a/drivers/staging/wfx/Makefile b/drivers/staging/wfx/Makefile deleted file mode 100644 index c8b356f71c99..000000000000 --- a/drivers/staging/wfx/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -# Necessary for CREATE_TRACE_POINTS -CFLAGS_debug.o = -I$(src) - -wfx-y := \ - bh.o \ - hwio.o \ - fwio.o \ - hif_tx_mib.o \ - hif_tx.o \ - hif_rx.o \ - queue.o \ - data_tx.o \ - data_rx.o \ - scan.o \ - sta.o \ - key.o \ - main.o \ - debug.o -wfx-$(CONFIG_SPI) += bus_spi.o -# When CONFIG_MMC == m, append to 'wfx-y' (and not to 'wfx-m') -wfx-$(subst m,y,$(CONFIG_MMC)) += bus_sdio.o - -obj-$(CONFIG_WFX) += wfx.o diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO deleted file mode 100644 index 1b4bc2af94b6..000000000000 --- a/drivers/staging/wfx/TODO +++ /dev/null @@ -1,6 +0,0 @@ -This is a list of things that need to be done to get this driver out of the -staging directory. - - - As suggested by Felix, rate control could be improved following this idea: - https://lore.kernel.org/lkml/3099559.gv3Q75KnN1@pc-42/ - diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c deleted file mode 100644 index bcea9d5b119c..000000000000 --- a/drivers/staging/wfx/bh.c +++ /dev/null @@ -1,324 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Interrupt bottom half (BH). - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include - -#include "bh.h" -#include "wfx.h" -#include "hwio.h" -#include "traces.h" -#include "hif_rx.h" -#include "hif_api_cmd.h" - -static void device_wakeup(struct wfx_dev *wdev) -{ - int max_retry = 3; - - if (!wdev->pdata.gpio_wakeup) - return; - if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0) - return; - - if (wfx_api_older_than(wdev, 1, 4)) { - gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); - if (!completion_done(&wdev->hif.ctrl_ready)) - usleep_range(2000, 2500); - return; - } - for (;;) { - gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); - /* completion.h does not provide any function to wait completion without consume it - * (a kind of wait_for_completion_done_timeout()). So we have to emulate it. - */ - if (wait_for_completion_timeout(&wdev->hif.ctrl_ready, msecs_to_jiffies(2))) { - complete(&wdev->hif.ctrl_ready); - return; - } else if (max_retry-- > 0) { - /* Older firmwares have a race in sleep/wake-up process. Redo the process - * is sufficient to unfreeze the chip. - */ - dev_err(wdev->dev, "timeout while wake up chip\n"); - gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); - usleep_range(2000, 2500); - } else { - dev_err(wdev->dev, "max wake-up retries reached\n"); - return; - } - } -} - -static void device_release(struct wfx_dev *wdev) -{ - if (!wdev->pdata.gpio_wakeup) - return; - - gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); -} - -static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf) -{ - struct sk_buff *skb; - struct wfx_hif_msg *hif; - size_t alloc_len; - size_t computed_len; - int release_count; - int piggyback = 0; - - WARN(read_len > round_down(0xFFF, 2) * sizeof(u16), "request exceed the chip capability"); - - /* Add 2 to take into account piggyback size */ - alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2); - skb = dev_alloc_skb(alloc_len); - if (!skb) - return -ENOMEM; - - if (wfx_data_read(wdev, skb->data, alloc_len)) - goto err; - - piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2)); - _trace_piggyback(piggyback, false); - - hif = (struct wfx_hif_msg *)skb->data; - WARN(hif->encrypted & 0x3, "encryption is unsupported"); - if (WARN(read_len < sizeof(struct wfx_hif_msg), "corrupted read")) - goto err; - computed_len = le16_to_cpu(hif->len); - computed_len = round_up(computed_len, 2); - if (computed_len != read_len) { - dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n", - computed_len, read_len); - print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1, - hif, read_len, true); - goto err; - } - - if (!(hif->id & HIF_ID_IS_INDICATION)) { - (*is_cnf)++; - if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT) - release_count = - ((struct wfx_hif_cnf_multi_transmit *)hif->body)->num_tx_confs; - else - release_count = 1; - WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter"); - wdev->hif.tx_buffers_used -= release_count; - } - _trace_hif_recv(hif, wdev->hif.tx_buffers_used); - - if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) { - if (hif->seqnum != wdev->hif.rx_seqnum) - dev_warn(wdev->dev, "wrong message sequence: %d != %d\n", - hif->seqnum, wdev->hif.rx_seqnum); - wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1); - } - - skb_put(skb, le16_to_cpu(hif->len)); - /* wfx_handle_rx takes care on SKB livetime */ - wfx_handle_rx(wdev, skb); - if (!wdev->hif.tx_buffers_used) - wake_up(&wdev->hif.tx_buffers_empty); - - return piggyback; - -err: - if (skb) - dev_kfree_skb(skb); - return -EIO; -} - -static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf) -{ - size_t len; - int i; - int ctrl_reg, piggyback; - - piggyback = 0; - for (i = 0; i < max_msg; i++) { - if (piggyback & CTRL_NEXT_LEN_MASK) - ctrl_reg = piggyback; - else if (try_wait_for_completion(&wdev->hif.ctrl_ready)) - ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0); - else - ctrl_reg = 0; - if (!(ctrl_reg & CTRL_NEXT_LEN_MASK)) - return i; - /* ctrl_reg units are 16bits words */ - len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2; - piggyback = rx_helper(wdev, len, num_cnf); - if (piggyback < 0) - return i; - if (!(piggyback & CTRL_WLAN_READY)) - dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n", - piggyback); - } - if (piggyback & CTRL_NEXT_LEN_MASK) { - ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback); - complete(&wdev->hif.ctrl_ready); - if (ctrl_reg) - dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n", - ctrl_reg, piggyback); - } - return i; -} - -static void tx_helper(struct wfx_dev *wdev, struct wfx_hif_msg *hif) -{ - int ret; - void *data; - bool is_encrypted = false; - size_t len = le16_to_cpu(hif->len); - - WARN(len < sizeof(*hif), "try to send corrupted data"); - - hif->seqnum = wdev->hif.tx_seqnum; - wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1); - - data = hif; - WARN(len > le16_to_cpu(wdev->hw_caps.size_inp_ch_buf), - "request exceed the chip capability: %zu > %d\n", - len, le16_to_cpu(wdev->hw_caps.size_inp_ch_buf)); - len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len); - ret = wfx_data_write(wdev, data, len); - if (ret) - goto end; - - wdev->hif.tx_buffers_used++; - _trace_hif_send(hif, wdev->hif.tx_buffers_used); -end: - if (is_encrypted) - kfree(data); -} - -static int bh_work_tx(struct wfx_dev *wdev, int max_msg) -{ - struct wfx_hif_msg *hif; - int i; - - for (i = 0; i < max_msg; i++) { - hif = NULL; - if (wdev->hif.tx_buffers_used < le16_to_cpu(wdev->hw_caps.num_inp_ch_bufs)) { - if (try_wait_for_completion(&wdev->hif_cmd.ready)) { - WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error"); - hif = wdev->hif_cmd.buf_send; - } else { - hif = wfx_tx_queues_get(wdev); - } - } - if (!hif) - return i; - tx_helper(wdev, hif); - } - return i; -} - -/* In SDIO mode, it is necessary to make an access to a register to acknowledge last received - * message. It could be possible to restrict this acknowledge to SDIO mode and only if last - * operation was rx. - */ -static void ack_sdio_data(struct wfx_dev *wdev) -{ - u32 cfg_reg; - - wfx_config_reg_read(wdev, &cfg_reg); - if (cfg_reg & 0xFF) { - dev_warn(wdev->dev, "chip reports errors: %02x\n", cfg_reg & 0xFF); - wfx_config_reg_write_bits(wdev, 0xFF, 0x00); - } -} - -static void bh_work(struct work_struct *work) -{ - struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh); - int stats_req = 0, stats_cnf = 0, stats_ind = 0; - bool release_chip = false, last_op_is_rx = false; - int num_tx, num_rx; - - device_wakeup(wdev); - do { - num_tx = bh_work_tx(wdev, 32); - stats_req += num_tx; - if (num_tx) - last_op_is_rx = false; - num_rx = bh_work_rx(wdev, 32, &stats_cnf); - stats_ind += num_rx; - if (num_rx) - last_op_is_rx = true; - } while (num_rx || num_tx); - stats_ind -= stats_cnf; - - if (last_op_is_rx) - ack_sdio_data(wdev); - if (!wdev->hif.tx_buffers_used && !work_pending(work)) { - device_release(wdev); - release_chip = true; - } - _trace_bh_stats(stats_ind, stats_req, stats_cnf, wdev->hif.tx_buffers_used, release_chip); -} - -/* An IRQ from chip did occur */ -void wfx_bh_request_rx(struct wfx_dev *wdev) -{ - u32 cur, prev; - - wfx_control_reg_read(wdev, &cur); - prev = atomic_xchg(&wdev->hif.ctrl_reg, cur); - complete(&wdev->hif.ctrl_ready); - queue_work(system_highpri_wq, &wdev->hif.bh); - - if (!(cur & CTRL_NEXT_LEN_MASK)) - dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n", - cur); - if (prev != 0) - dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n", - prev, cur); -} - -/* Driver want to send data */ -void wfx_bh_request_tx(struct wfx_dev *wdev) -{ - queue_work(system_highpri_wq, &wdev->hif.bh); -} - -/* If IRQ is not available, this function allow to manually poll the control register and simulate - * an IRQ ahen an event happened. - * - * Note that the device has a bug: If an IRQ raise while host read control register, the IRQ is - * lost. So, use this function carefully (only duing device initialisation). - */ -void wfx_bh_poll_irq(struct wfx_dev *wdev) -{ - ktime_t now, start; - u32 reg; - - WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ"); - flush_workqueue(system_highpri_wq); - start = ktime_get(); - for (;;) { - wfx_control_reg_read(wdev, ®); - now = ktime_get(); - if (reg & 0xFFF) - break; - if (ktime_after(now, ktime_add_ms(start, 1000))) { - dev_err(wdev->dev, "time out while polling control register\n"); - return; - } - udelay(200); - } - wfx_bh_request_rx(wdev); -} - -void wfx_bh_register(struct wfx_dev *wdev) -{ - INIT_WORK(&wdev->hif.bh, bh_work); - init_completion(&wdev->hif.ctrl_ready); - init_waitqueue_head(&wdev->hif.tx_buffers_empty); -} - -void wfx_bh_unregister(struct wfx_dev *wdev) -{ - flush_work(&wdev->hif.bh); -} diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h deleted file mode 100644 index a44c8b421b7c..000000000000 --- a/drivers/staging/wfx/bh.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Interrupt bottom half (BH). - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_BH_H -#define WFX_BH_H - -#include -#include -#include -#include - -struct wfx_dev; - -struct wfx_hif { - struct work_struct bh; - struct completion ctrl_ready; - wait_queue_head_t tx_buffers_empty; - atomic_t ctrl_reg; - int rx_seqnum; - int tx_seqnum; - int tx_buffers_used; -}; - -void wfx_bh_register(struct wfx_dev *wdev); -void wfx_bh_unregister(struct wfx_dev *wdev); -void wfx_bh_request_rx(struct wfx_dev *wdev); -void wfx_bh_request_tx(struct wfx_dev *wdev); -void wfx_bh_poll_irq(struct wfx_dev *wdev); - -#endif diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h deleted file mode 100644 index ccadfdd6873c..000000000000 --- a/drivers/staging/wfx/bus.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Common bus abstraction layer. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_BUS_H -#define WFX_BUS_H - -#include -#include - -#define WFX_REG_CONFIG 0x0 -#define WFX_REG_CONTROL 0x1 -#define WFX_REG_IN_OUT_QUEUE 0x2 -#define WFX_REG_AHB_DPORT 0x3 -#define WFX_REG_BASE_ADDR 0x4 -#define WFX_REG_SRAM_DPORT 0x5 -#define WFX_REG_SET_GEN_R_W 0x6 -#define WFX_REG_FRAME_OUT 0x7 - -struct wfx_hwbus_ops { - int (*copy_from_io)(void *bus_priv, unsigned int addr, void *dst, size_t count); - int (*copy_to_io)(void *bus_priv, unsigned int addr, const void *src, size_t count); - int (*irq_subscribe)(void *bus_priv); - int (*irq_unsubscribe)(void *bus_priv); - void (*lock)(void *bus_priv); - void (*unlock)(void *bus_priv); - size_t (*align_size)(void *bus_priv, size_t size); -}; - -extern struct sdio_driver wfx_sdio_driver; -extern struct spi_driver wfx_spi_driver; - -#endif diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c deleted file mode 100644 index 51a0d58a9070..000000000000 --- a/drivers/staging/wfx/bus_sdio.c +++ /dev/null @@ -1,273 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * SDIO interface. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "bus.h" -#include "wfx.h" -#include "hwio.h" -#include "main.h" -#include "bh.h" - -static const struct wfx_platform_data pdata_wf200 = { - .file_fw = "wfx/wfm_wf200", - .file_pds = "wfx/wf200.pds", -}; - -static const struct wfx_platform_data pdata_brd4001a = { - .file_fw = "wfx/wfm_wf200", - .file_pds = "wfx/brd4001a.pds", -}; - -static const struct wfx_platform_data pdata_brd8022a = { - .file_fw = "wfx/wfm_wf200", - .file_pds = "wfx/brd8022a.pds", -}; - -static const struct wfx_platform_data pdata_brd8023a = { - .file_fw = "wfx/wfm_wf200", - .file_pds = "wfx/brd8023a.pds", -}; - -struct wfx_sdio_priv { - struct sdio_func *func; - struct wfx_dev *core; - u8 buf_id_tx; - u8 buf_id_rx; - int of_irq; -}; - -static int wfx_sdio_copy_from_io(void *priv, unsigned int reg_id, void *dst, size_t count) -{ - struct wfx_sdio_priv *bus = priv; - unsigned int sdio_addr = reg_id << 2; - int ret; - - WARN(reg_id > 7, "chip only has 7 registers"); - WARN(!IS_ALIGNED((uintptr_t)dst, 4), "unaligned buffer address"); - WARN(!IS_ALIGNED(count, 4), "unaligned buffer size"); - - /* Use queue mode buffers */ - if (reg_id == WFX_REG_IN_OUT_QUEUE) - sdio_addr |= (bus->buf_id_rx + 1) << 7; - ret = sdio_memcpy_fromio(bus->func, dst, sdio_addr, count); - if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE) - bus->buf_id_rx = (bus->buf_id_rx + 1) % 4; - - return ret; -} - -static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id, const void *src, size_t count) -{ - struct wfx_sdio_priv *bus = priv; - unsigned int sdio_addr = reg_id << 2; - int ret; - - WARN(reg_id > 7, "chip only has 7 registers"); - WARN(!IS_ALIGNED((uintptr_t)src, 4), "unaligned buffer address"); - WARN(!IS_ALIGNED(count, 4), "unaligned buffer size"); - - /* Use queue mode buffers */ - if (reg_id == WFX_REG_IN_OUT_QUEUE) - sdio_addr |= bus->buf_id_tx << 7; - /* FIXME: discards 'const' qualifier for src */ - ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count); - if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE) - bus->buf_id_tx = (bus->buf_id_tx + 1) % 32; - - return ret; -} - -static void wfx_sdio_lock(void *priv) -{ - struct wfx_sdio_priv *bus = priv; - - sdio_claim_host(bus->func); -} - -static void wfx_sdio_unlock(void *priv) -{ - struct wfx_sdio_priv *bus = priv; - - sdio_release_host(bus->func); -} - -static void wfx_sdio_irq_handler(struct sdio_func *func) -{ - struct wfx_sdio_priv *bus = sdio_get_drvdata(func); - - wfx_bh_request_rx(bus->core); -} - -static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv) -{ - struct wfx_sdio_priv *bus = priv; - - sdio_claim_host(bus->func); - wfx_bh_request_rx(bus->core); - sdio_release_host(bus->func); - return IRQ_HANDLED; -} - -static int wfx_sdio_irq_subscribe(void *priv) -{ - struct wfx_sdio_priv *bus = priv; - u32 flags; - int ret; - u8 cccr; - - if (!bus->of_irq) { - sdio_claim_host(bus->func); - ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler); - sdio_release_host(bus->func); - return ret; - } - - flags = irq_get_trigger_type(bus->of_irq); - if (!flags) - flags = IRQF_TRIGGER_HIGH; - flags |= IRQF_ONESHOT; - ret = devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL, - wfx_sdio_irq_handler_ext, flags, "wfx", bus); - if (ret) - return ret; - sdio_claim_host(bus->func); - cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL); - cccr |= BIT(0); - cccr |= BIT(bus->func->num); - sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL); - sdio_release_host(bus->func); - return 0; -} - -static int wfx_sdio_irq_unsubscribe(void *priv) -{ - struct wfx_sdio_priv *bus = priv; - int ret; - - if (bus->of_irq) - devm_free_irq(&bus->func->dev, bus->of_irq, bus); - sdio_claim_host(bus->func); - ret = sdio_release_irq(bus->func); - sdio_release_host(bus->func); - return ret; -} - -static size_t wfx_sdio_align_size(void *priv, size_t size) -{ - struct wfx_sdio_priv *bus = priv; - - return sdio_align_size(bus->func, size); -} - -static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = { - .copy_from_io = wfx_sdio_copy_from_io, - .copy_to_io = wfx_sdio_copy_to_io, - .irq_subscribe = wfx_sdio_irq_subscribe, - .irq_unsubscribe = wfx_sdio_irq_unsubscribe, - .lock = wfx_sdio_lock, - .unlock = wfx_sdio_unlock, - .align_size = wfx_sdio_align_size, -}; - -static const struct of_device_id wfx_sdio_of_match[] = { - { .compatible = "silabs,wf200", .data = &pdata_wf200 }, - { .compatible = "silabs,brd4001a", .data = &pdata_brd4001a }, - { .compatible = "silabs,brd8022a", .data = &pdata_brd8022a }, - { .compatible = "silabs,brd8023a", .data = &pdata_brd8023a }, - { }, -}; -MODULE_DEVICE_TABLE(of, wfx_sdio_of_match); - -static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) -{ - const struct wfx_platform_data *pdata = of_device_get_match_data(&func->dev); - struct device_node *np = func->dev.of_node; - struct wfx_sdio_priv *bus; - int ret; - - if (func->num != 1) { - dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n", - func->num); - return -ENODEV; - } - - if (!pdata) { - dev_warn(&func->dev, "no compatible device found in DT\n"); - return -ENODEV; - } - - bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL); - if (!bus) - return -ENOMEM; - - bus->func = func; - bus->of_irq = irq_of_parse_and_map(np, 0); - sdio_set_drvdata(func, bus); - - sdio_claim_host(func); - ret = sdio_enable_func(func); - /* Block of 64 bytes is more efficient than 512B for frame sizes < 4k */ - sdio_set_block_size(func, 64); - sdio_release_host(func); - if (ret) - return ret; - - bus->core = wfx_init_common(&func->dev, pdata, &wfx_sdio_hwbus_ops, bus); - if (!bus->core) { - ret = -EIO; - goto sdio_release; - } - - ret = wfx_probe(bus->core); - if (ret) - goto sdio_release; - - return 0; - -sdio_release: - sdio_claim_host(func); - sdio_disable_func(func); - sdio_release_host(func); - return ret; -} - -static void wfx_sdio_remove(struct sdio_func *func) -{ - struct wfx_sdio_priv *bus = sdio_get_drvdata(func); - - wfx_release(bus->core); - sdio_claim_host(func); - sdio_disable_func(func); - sdio_release_host(func); -} - -static const struct sdio_device_id wfx_sdio_ids[] = { - /* WF200 does not have official VID/PID */ - { SDIO_DEVICE(0x0000, 0x1000) }, - { }, -}; -MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids); - -struct sdio_driver wfx_sdio_driver = { - .name = "wfx-sdio", - .id_table = wfx_sdio_ids, - .probe = wfx_sdio_probe, - .remove = wfx_sdio_remove, - .drv = { - .owner = THIS_MODULE, - .of_match_table = wfx_sdio_of_match, - } -}; diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c deleted file mode 100644 index 7fb1afb8ed31..000000000000 --- a/drivers/staging/wfx/bus_spi.c +++ /dev/null @@ -1,284 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * SPI interface. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2011, Sagrad Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include -#include -#include -#include -#include -#include - -#include "bus.h" -#include "wfx.h" -#include "hwio.h" -#include "main.h" -#include "bh.h" - -#define SET_WRITE 0x7FFF /* usage: and operation */ -#define SET_READ 0x8000 /* usage: or operation */ - -static const struct wfx_platform_data pdata_wf200 = { - .file_fw = "wfx/wfm_wf200", - .file_pds = "wfx/wf200.pds", - .use_rising_clk = true, -}; - -static const struct wfx_platform_data pdata_brd4001a = { - .file_fw = "wfx/wfm_wf200", - .file_pds = "wfx/brd4001a.pds", - .use_rising_clk = true, -}; - -static const struct wfx_platform_data pdata_brd8022a = { - .file_fw = "wfx/wfm_wf200", - .file_pds = "wfx/brd8022a.pds", - .use_rising_clk = true, -}; - -static const struct wfx_platform_data pdata_brd8023a = { - .file_fw = "wfx/wfm_wf200", - .file_pds = "wfx/brd8023a.pds", - .use_rising_clk = true, -}; - -struct wfx_spi_priv { - struct spi_device *func; - struct wfx_dev *core; - struct gpio_desc *gpio_reset; - bool need_swab; -}; - -/* The chip reads 16bits of data at time and place them directly into (little endian) CPU register. - * So, the chip expects bytes order to be "B1 B0 B3 B2" (while LE is "B0 B1 B2 B3" and BE is - * "B3 B2 B1 B0") - * - * A little endian host with bits_per_word == 16 should do the right job natively. The code below to - * support big endian host and commonly used SPI 8bits. - */ -static int wfx_spi_copy_from_io(void *priv, unsigned int addr, void *dst, size_t count) -{ - struct wfx_spi_priv *bus = priv; - u16 regaddr = (addr << 12) | (count / 2) | SET_READ; - struct spi_message m; - struct spi_transfer t_addr = { - .tx_buf = ®addr, - .len = sizeof(regaddr), - }; - struct spi_transfer t_msg = { - .rx_buf = dst, - .len = count, - }; - u16 *dst16 = dst; - int ret, i; - - WARN(count % 2, "buffer size must be a multiple of 2"); - - cpu_to_le16s(®addr); - if (bus->need_swab) - swab16s(®addr); - - spi_message_init(&m); - spi_message_add_tail(&t_addr, &m); - spi_message_add_tail(&t_msg, &m); - ret = spi_sync(bus->func, &m); - - if (bus->need_swab && addr == WFX_REG_CONFIG) - for (i = 0; i < count / 2; i++) - swab16s(&dst16[i]); - return ret; -} - -static int wfx_spi_copy_to_io(void *priv, unsigned int addr, const void *src, size_t count) -{ - struct wfx_spi_priv *bus = priv; - u16 regaddr = (addr << 12) | (count / 2); - /* FIXME: use a bounce buffer */ - u16 *src16 = (void *)src; - int ret, i; - struct spi_message m; - struct spi_transfer t_addr = { - .tx_buf = ®addr, - .len = sizeof(regaddr), - }; - struct spi_transfer t_msg = { - .tx_buf = src, - .len = count, - }; - - WARN(count % 2, "buffer size must be a multiple of 2"); - WARN(regaddr & SET_READ, "bad addr or size overflow"); - - cpu_to_le16s(®addr); - - /* Register address and CONFIG content always use 16bit big endian - * ("BADC" order) - */ - if (bus->need_swab) - swab16s(®addr); - if (bus->need_swab && addr == WFX_REG_CONFIG) - for (i = 0; i < count / 2; i++) - swab16s(&src16[i]); - - spi_message_init(&m); - spi_message_add_tail(&t_addr, &m); - spi_message_add_tail(&t_msg, &m); - ret = spi_sync(bus->func, &m); - - if (bus->need_swab && addr == WFX_REG_CONFIG) - for (i = 0; i < count / 2; i++) - swab16s(&src16[i]); - return ret; -} - -static void wfx_spi_lock(void *priv) -{ -} - -static void wfx_spi_unlock(void *priv) -{ -} - -static irqreturn_t wfx_spi_irq_handler(int irq, void *priv) -{ - struct wfx_spi_priv *bus = priv; - - wfx_bh_request_rx(bus->core); - return IRQ_HANDLED; -} - -static int wfx_spi_irq_subscribe(void *priv) -{ - struct wfx_spi_priv *bus = priv; - u32 flags; - - flags = irq_get_trigger_type(bus->func->irq); - if (!flags) - flags = IRQF_TRIGGER_HIGH; - flags |= IRQF_ONESHOT; - return devm_request_threaded_irq(&bus->func->dev, bus->func->irq, NULL, - wfx_spi_irq_handler, flags, "wfx", bus); -} - -static int wfx_spi_irq_unsubscribe(void *priv) -{ - struct wfx_spi_priv *bus = priv; - - devm_free_irq(&bus->func->dev, bus->func->irq, bus); - return 0; -} - -static size_t wfx_spi_align_size(void *priv, size_t size) -{ - /* Most of SPI controllers avoid DMA if buffer size is not 32bit aligned */ - return ALIGN(size, 4); -} - -static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = { - .copy_from_io = wfx_spi_copy_from_io, - .copy_to_io = wfx_spi_copy_to_io, - .irq_subscribe = wfx_spi_irq_subscribe, - .irq_unsubscribe = wfx_spi_irq_unsubscribe, - .lock = wfx_spi_lock, - .unlock = wfx_spi_unlock, - .align_size = wfx_spi_align_size, -}; - -static int wfx_spi_probe(struct spi_device *func) -{ - struct wfx_platform_data *pdata; - struct wfx_spi_priv *bus; - int ret; - - if (!func->bits_per_word) - func->bits_per_word = 16; - ret = spi_setup(func); - if (ret) - return ret; - pdata = (struct wfx_platform_data *)spi_get_device_id(func)->driver_data; - if (!pdata) { - dev_err(&func->dev, "unable to retrieve driver data (please report)\n"); - return -ENODEV; - } - - /* Trace below is also displayed by spi_setup() if compiled with DEBUG */ - dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n", - func->chip_select, func->mode, func->bits_per_word, func->max_speed_hz); - if (func->bits_per_word != 16 && func->bits_per_word != 8) - dev_warn(&func->dev, "unusual bits/word value: %d\n", func->bits_per_word); - if (func->max_speed_hz > 50000000) - dev_warn(&func->dev, "%dHz is a very high speed\n", func->max_speed_hz); - - bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL); - if (!bus) - return -ENOMEM; - bus->func = func; - if (func->bits_per_word == 8 || IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) - bus->need_swab = true; - spi_set_drvdata(func, bus); - - bus->gpio_reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(bus->gpio_reset)) - return PTR_ERR(bus->gpio_reset); - if (!bus->gpio_reset) { - dev_warn(&func->dev, "gpio reset is not defined, trying to load firmware anyway\n"); - } else { - gpiod_set_consumer_name(bus->gpio_reset, "wfx reset"); - gpiod_set_value_cansleep(bus->gpio_reset, 1); - usleep_range(100, 150); - gpiod_set_value_cansleep(bus->gpio_reset, 0); - usleep_range(2000, 2500); - } - - bus->core = wfx_init_common(&func->dev, pdata, &wfx_spi_hwbus_ops, bus); - if (!bus->core) - return -EIO; - - return wfx_probe(bus->core); -} - -static void wfx_spi_remove(struct spi_device *func) -{ - struct wfx_spi_priv *bus = spi_get_drvdata(func); - - wfx_release(bus->core); -} - -/* For dynamic driver binding, kernel does not use OF to match driver. It only - * use modalias and modalias is a copy of 'compatible' DT node with vendor - * stripped. - */ -static const struct spi_device_id wfx_spi_id[] = { - { "wf200", (kernel_ulong_t)&pdata_wf200 }, - { "brd4001a", (kernel_ulong_t)&pdata_brd4001a }, - { "brd8022a", (kernel_ulong_t)&pdata_brd8022a }, - { "brd8023a", (kernel_ulong_t)&pdata_brd8023a }, - { }, -}; -MODULE_DEVICE_TABLE(spi, wfx_spi_id); - -#ifdef CONFIG_OF -static const struct of_device_id wfx_spi_of_match[] = { - { .compatible = "silabs,wf200" }, - { .compatible = "silabs,brd4001a" }, - { .compatible = "silabs,brd8022a" }, - { .compatible = "silabs,brd8023a" }, - { }, -}; -MODULE_DEVICE_TABLE(of, wfx_spi_of_match); -#endif - -struct spi_driver wfx_spi_driver = { - .driver = { - .name = "wfx-spi", - .of_match_table = of_match_ptr(wfx_spi_of_match), - }, - .id_table = wfx_spi_id, - .probe = wfx_spi_probe, - .remove = wfx_spi_remove, -}; diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c deleted file mode 100644 index a4b5ffe158e4..000000000000 --- a/drivers/staging/wfx/data_rx.c +++ /dev/null @@ -1,92 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Data receiving implementation. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include - -#include "data_rx.h" -#include "wfx.h" -#include "bh.h" -#include "sta.h" - -static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt) -{ - int params, tid; - - if (wfx_api_older_than(wvif->wdev, 3, 6)) - return; - - switch (mgmt->u.action.u.addba_req.action_code) { - case WLAN_ACTION_ADDBA_REQ: - params = le16_to_cpu(mgmt->u.action.u.addba_req.capab); - tid = (params & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; - ieee80211_start_rx_ba_session_offl(wvif->vif, mgmt->sa, tid); - break; - case WLAN_ACTION_DELBA: - params = le16_to_cpu(mgmt->u.action.u.delba.params); - tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; - ieee80211_stop_rx_ba_session_offl(wvif->vif, mgmt->sa, tid); - break; - } -} - -void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb) -{ - struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb); - struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; - - memset(hdr, 0, sizeof(*hdr)); - - if (arg->status == HIF_STATUS_RX_FAIL_MIC) - hdr->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_IV_STRIPPED; - else if (arg->status) - goto drop; - - if (skb->len < sizeof(struct ieee80211_pspoll)) { - dev_warn(wvif->wdev->dev, "malformed SDU received\n"); - goto drop; - } - - hdr->band = NL80211_BAND_2GHZ; - hdr->freq = ieee80211_channel_to_frequency(arg->channel_number, hdr->band); - - if (arg->rxed_rate >= 14) { - hdr->encoding = RX_ENC_HT; - hdr->rate_idx = arg->rxed_rate - 14; - } else if (arg->rxed_rate >= 4) { - hdr->rate_idx = arg->rxed_rate - 2; - } else { - hdr->rate_idx = arg->rxed_rate; - } - - if (!arg->rcpi_rssi) { - hdr->flag |= RX_FLAG_NO_SIGNAL_VAL; - dev_info(wvif->wdev->dev, "received frame without RSSI data\n"); - } - hdr->signal = arg->rcpi_rssi / 2 - 110; - hdr->antenna = 0; - - if (arg->encryp) - hdr->flag |= RX_FLAG_DECRYPTED; - - /* Block ack negotiation is offloaded by the firmware. However, re-ordering must be done by - * the mac80211. - */ - if (ieee80211_is_action(frame->frame_control) && - mgmt->u.action.category == WLAN_CATEGORY_BACK && - skb->len > IEEE80211_MIN_ACTION_SIZE) { - wfx_rx_handle_ba(wvif, mgmt); - goto drop; - } - - ieee80211_rx_irqsafe(wvif->wdev->hw, skb); - return; - -drop: - dev_kfree_skb(skb); -} diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h deleted file mode 100644 index cf708f16d602..000000000000 --- a/drivers/staging/wfx/data_rx.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Data receiving implementation. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_DATA_RX_H -#define WFX_DATA_RX_H - -struct wfx_vif; -struct sk_buff; -struct wfx_hif_ind_rx; - -void wfx_rx_cb(struct wfx_vif *wvif, const struct wfx_hif_ind_rx *arg, struct sk_buff *skb); - -#endif diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c deleted file mode 100644 index e07381b2ff4d..000000000000 --- a/drivers/staging/wfx/data_tx.c +++ /dev/null @@ -1,568 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Data transmitting implementation. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include - -#include "data_tx.h" -#include "wfx.h" -#include "bh.h" -#include "sta.h" -#include "queue.h" -#include "debug.h" -#include "traces.h" -#include "hif_tx_mib.h" - -static int wfx_get_hw_rate(struct wfx_dev *wdev, const struct ieee80211_tx_rate *rate) -{ - struct ieee80211_supported_band *band; - - if (rate->idx < 0) - return -1; - if (rate->flags & IEEE80211_TX_RC_MCS) { - if (rate->idx > 7) { - WARN(1, "wrong rate->idx value: %d", rate->idx); - return -1; - } - return rate->idx + 14; - } - /* The device only support 2GHz, else band information should be retrieved from - * ieee80211_tx_info - */ - band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; - if (rate->idx >= band->n_bitrates) { - WARN(1, "wrong rate->idx value: %d", rate->idx); - return -1; - } - return band->bitrates[rate->idx].hw_value; -} - -/* TX policy cache implementation */ - -static void wfx_tx_policy_build(struct wfx_vif *wvif, struct wfx_tx_policy *policy, - struct ieee80211_tx_rate *rates) -{ - struct wfx_dev *wdev = wvif->wdev; - int i, rateid; - u8 count; - - WARN(rates[0].idx < 0, "invalid rate policy"); - memset(policy, 0, sizeof(*policy)); - for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { - if (rates[i].idx < 0) - break; - WARN_ON(rates[i].count > 15); - rateid = wfx_get_hw_rate(wdev, &rates[i]); - /* Pack two values in each byte of policy->rates */ - count = rates[i].count; - if (rateid % 2) - count <<= 4; - policy->rates[rateid / 2] |= count; - } -} - -static bool wfx_tx_policy_is_equal(const struct wfx_tx_policy *a, const struct wfx_tx_policy *b) -{ - return !memcmp(a->rates, b->rates, sizeof(a->rates)); -} - -static int wfx_tx_policy_find(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *wanted) -{ - struct wfx_tx_policy *it; - - list_for_each_entry(it, &cache->used, link) - if (wfx_tx_policy_is_equal(wanted, it)) - return it - cache->cache; - list_for_each_entry(it, &cache->free, link) - if (wfx_tx_policy_is_equal(wanted, it)) - return it - cache->cache; - return -1; -} - -static void wfx_tx_policy_use(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry) -{ - ++entry->usage_count; - list_move(&entry->link, &cache->used); -} - -static int wfx_tx_policy_release(struct wfx_tx_policy_cache *cache, struct wfx_tx_policy *entry) -{ - int ret = --entry->usage_count; - - if (!ret) - list_move(&entry->link, &cache->free); - return ret; -} - -static int wfx_tx_policy_get(struct wfx_vif *wvif, struct ieee80211_tx_rate *rates, bool *renew) -{ - int idx; - struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; - struct wfx_tx_policy wanted; - struct wfx_tx_policy *entry; - - wfx_tx_policy_build(wvif, &wanted, rates); - - spin_lock_bh(&cache->lock); - if (list_empty(&cache->free)) { - WARN(1, "unable to get a valid Tx policy"); - spin_unlock_bh(&cache->lock); - return HIF_TX_RETRY_POLICY_INVALID; - } - idx = wfx_tx_policy_find(cache, &wanted); - if (idx >= 0) { - *renew = false; - } else { - /* If policy is not found create a new one using the oldest entry in "free" list */ - *renew = true; - entry = list_entry(cache->free.prev, struct wfx_tx_policy, link); - memcpy(entry->rates, wanted.rates, sizeof(entry->rates)); - entry->uploaded = false; - entry->usage_count = 0; - idx = entry - cache->cache; - } - wfx_tx_policy_use(cache, &cache->cache[idx]); - if (list_empty(&cache->free)) - ieee80211_stop_queues(wvif->wdev->hw); - spin_unlock_bh(&cache->lock); - return idx; -} - -static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx) -{ - int usage, locked; - struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; - - if (idx == HIF_TX_RETRY_POLICY_INVALID) - return; - spin_lock_bh(&cache->lock); - locked = list_empty(&cache->free); - usage = wfx_tx_policy_release(cache, &cache->cache[idx]); - if (locked && !usage) - ieee80211_wake_queues(wvif->wdev->hw); - spin_unlock_bh(&cache->lock); -} - -static int wfx_tx_policy_upload(struct wfx_vif *wvif) -{ - struct wfx_tx_policy *policies = wvif->tx_policy_cache.cache; - u8 tmp_rates[12]; - int i, is_used; - - do { - spin_lock_bh(&wvif->tx_policy_cache.lock); - for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) { - is_used = memzcmp(policies[i].rates, sizeof(policies[i].rates)); - if (!policies[i].uploaded && is_used) - break; - } - if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) { - policies[i].uploaded = true; - memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates)); - spin_unlock_bh(&wvif->tx_policy_cache.lock); - wfx_hif_set_tx_rate_retry_policy(wvif, i, tmp_rates); - } else { - spin_unlock_bh(&wvif->tx_policy_cache.lock); - } - } while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)); - return 0; -} - -void wfx_tx_policy_upload_work(struct work_struct *work) -{ - struct wfx_vif *wvif = container_of(work, struct wfx_vif, tx_policy_upload_work); - - wfx_tx_policy_upload(wvif); - wfx_tx_unlock(wvif->wdev); -} - -void wfx_tx_policy_init(struct wfx_vif *wvif) -{ - struct wfx_tx_policy_cache *cache = &wvif->tx_policy_cache; - int i; - - memset(cache, 0, sizeof(*cache)); - - spin_lock_init(&cache->lock); - INIT_LIST_HEAD(&cache->used); - INIT_LIST_HEAD(&cache->free); - - for (i = 0; i < ARRAY_SIZE(cache->cache); ++i) - list_add(&cache->cache[i].link, &cache->free); -} - -/* Tx implementation */ - -static bool wfx_is_action_back(struct ieee80211_hdr *hdr) -{ - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr; - - if (!ieee80211_is_action(mgmt->frame_control)) - return false; - if (mgmt->u.action.category != WLAN_CATEGORY_BACK) - return false; - return true; -} - -static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, - struct ieee80211_hdr *hdr) -{ - struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL; - const u8 *da = ieee80211_get_DA(hdr); - - if (sta_priv && sta_priv->link_id) - return sta_priv->link_id; - if (wvif->vif->type != NL80211_IFTYPE_AP) - return 0; - if (is_multicast_ether_addr(da)) - return 0; - return HIF_LINK_ID_NOT_ASSOCIATED; -} - -static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates) -{ - int i; - bool finished; - - /* Firmware is not able to mix rates with different flags */ - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI) - rates[i].flags |= IEEE80211_TX_RC_SHORT_GI; - if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI)) - rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; - if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)) - rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; - } - - /* Sort rates and remove duplicates */ - do { - finished = true; - for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) { - if (rates[i + 1].idx == rates[i].idx && - rates[i].idx != -1) { - rates[i].count += rates[i + 1].count; - if (rates[i].count > 15) - rates[i].count = 15; - rates[i + 1].idx = -1; - rates[i + 1].count = 0; - - finished = false; - } - if (rates[i + 1].idx > rates[i].idx) { - swap(rates[i + 1], rates[i]); - finished = false; - } - } - } while (!finished); - /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */ - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - if (rates[i].idx == 0) - break; - if (rates[i].idx == -1) { - rates[i].idx = 0; - rates[i].count = 8; /* == hw->max_rate_tries */ - rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS; - break; - } - } - /* All retries use long GI */ - for (i = 1; i < IEEE80211_TX_MAX_RATES; i++) - rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI; -} - -static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif, struct ieee80211_tx_info *tx_info) -{ - bool tx_policy_renew = false; - u8 ret; - - ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew); - if (ret == HIF_TX_RETRY_POLICY_INVALID) - dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy"); - - if (tx_policy_renew) { - wfx_tx_lock(wvif->wdev); - if (!schedule_work(&wvif->tx_policy_upload_work)) - wfx_tx_unlock(wvif->wdev); - } - return ret; -} - -static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info) -{ - if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_MCS)) - return HIF_FRAME_FORMAT_NON_HT; - else if (!(tx_info->driver_rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)) - return HIF_FRAME_FORMAT_MIXED_FORMAT_HT; - else - return HIF_FRAME_FORMAT_GF_HT_11N; -} - -static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key) -{ - int mic_space; - - if (!hw_key) - return 0; - if (hw_key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) - return 0; - mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0; - return hw_key->icv_len + mic_space; -} - -static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct sk_buff *skb) -{ - struct wfx_hif_msg *hif_msg; - struct wfx_hif_req_tx *req; - struct wfx_tx_priv *tx_priv; - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int queue_id = skb_get_queue_mapping(skb); - size_t offset = (size_t)skb->data & 3; - int wmsg_len = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + offset; - - WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id"); - wfx_tx_fixup_rates(tx_info->driver_rates); - - /* From now tx_info->control is unusable */ - memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv)); - /* Fill tx_priv */ - tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data; - tx_priv->icv_size = wfx_tx_get_icv_len(hw_key); - - /* Fill hif_msg */ - WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb"); - WARN(offset & 1, "attempt to transmit an unaligned frame"); - skb_put(skb, tx_priv->icv_size); - skb_push(skb, wmsg_len); - memset(skb->data, 0, wmsg_len); - hif_msg = (struct wfx_hif_msg *)skb->data; - hif_msg->len = cpu_to_le16(skb->len); - hif_msg->id = HIF_REQ_ID_TX; - hif_msg->interface = wvif->id; - if (skb->len > le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)) { - dev_warn(wvif->wdev->dev, - "requested frame size (%d) is larger than maximum supported (%d)\n", - skb->len, le16_to_cpu(wvif->wdev->hw_caps.size_inp_ch_buf)); - skb_pull(skb, wmsg_len); - return -EIO; - } - - /* Fill tx request */ - req = (struct wfx_hif_req_tx *)hif_msg->body; - /* packet_id just need to be unique on device. 32bits are more than necessary for that task, - * so we take advantage of it to add some extra data for debug. - */ - req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF; - req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16; - req->packet_id |= queue_id << 28; - - req->fc_offset = offset; - /* Queue index are inverted between firmware and Linux */ - req->queue_id = 3 - queue_id; - req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr); - req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info); - req->frame_format = wfx_tx_get_frame_format(tx_info); - if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI) - req->short_gi = 1; - if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) - req->after_dtim = 1; - - /* Auxiliary operations */ - wfx_tx_queues_put(wvif, skb); - if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) - schedule_work(&wvif->update_tim_work); - wfx_bh_request_tx(wvif->wdev); - return 0; -} - -void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) -{ - struct wfx_dev *wdev = hw->priv; - struct wfx_vif *wvif; - struct ieee80211_sta *sta = control ? control->sta : NULL; - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, rate_driver_data); - - BUILD_BUG_ON_MSG(sizeof(struct wfx_tx_priv) > driver_data_room, - "struct tx_priv is too large"); - WARN(skb->next || skb->prev, "skb is already member of a list"); - /* control.vif can be NULL for injected frames */ - if (tx_info->control.vif) - wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv; - else - wvif = wvif_iterate(wdev, NULL); - if (WARN_ON(!wvif)) - goto drop; - /* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any BlockAck session - * management frame. The check below exist just in case. - */ - if (wfx_is_action_back(hdr)) { - dev_info(wdev->dev, "drop BA action\n"); - goto drop; - } - if (wfx_tx_inner(wvif, sta, skb)) - goto drop; - - return; - -drop: - ieee80211_tx_status_irqsafe(wdev->hw, skb); -} - -static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb) -{ - struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; - struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; - unsigned int offset = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + - req->fc_offset; - - if (!wvif) { - pr_warn("vif associated with the skb does not exist anymore\n"); - return; - } - wfx_tx_policy_put(wvif, req->retry_policy_index); - skb_pull(skb, offset); - ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb); -} - -static void wfx_tx_fill_rates(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info, - const struct wfx_hif_cnf_tx *arg) -{ - struct ieee80211_tx_rate *rate; - int tx_count; - int i; - - tx_count = arg->ack_failures; - if (!arg->status || arg->ack_failures) - tx_count += 1; /* Also report success */ - for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { - rate = &tx_info->status.rates[i]; - if (rate->idx < 0) - break; - if (tx_count < rate->count && arg->status == HIF_STATUS_TX_FAIL_RETRIES && - arg->ack_failures) - dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n", - rate->count, tx_count); - if (tx_count <= rate->count && tx_count && - arg->txed_rate != wfx_get_hw_rate(wdev, rate)) - dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n", - arg->txed_rate, wfx_get_hw_rate(wdev, rate)); - if (tx_count > rate->count) { - tx_count -= rate->count; - } else if (!tx_count) { - rate->count = 0; - rate->idx = -1; - } else { - rate->count = tx_count; - tx_count = 0; - } - } - if (tx_count) - dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count); -} - -void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg) -{ - const struct wfx_tx_priv *tx_priv; - struct ieee80211_tx_info *tx_info; - struct wfx_vif *wvif; - struct sk_buff *skb; - - skb = wfx_pending_get(wdev, arg->packet_id); - if (!skb) { - dev_warn(wdev->dev, "received unknown packet_id (%#.8x) from chip\n", - arg->packet_id); - return; - } - tx_info = IEEE80211_SKB_CB(skb); - tx_priv = wfx_skb_tx_priv(skb); - wvif = wdev_to_wvif(wdev, ((struct wfx_hif_msg *)skb->data)->interface); - WARN_ON(!wvif); - if (!wvif) - return; - - /* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */ - _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb)); - wfx_tx_fill_rates(wdev, tx_info, arg); - skb_trim(skb, skb->len - tx_priv->icv_size); - - /* From now, you can touch to tx_info->status, but do not touch to tx_priv anymore */ - /* FIXME: use ieee80211_tx_info_clear_status() */ - memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data)); - memset(tx_info->pad, 0, sizeof(tx_info->pad)); - - if (!arg->status) { - tx_info->status.tx_time = le32_to_cpu(arg->media_delay) - - le32_to_cpu(arg->tx_queue_delay); - if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) - tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; - else - tx_info->flags |= IEEE80211_TX_STAT_ACK; - } else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) { - WARN(!arg->requeue, "incoherent status and result_flags"); - if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { - wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */ - schedule_work(&wvif->update_tim_work); - } - tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; - } - wfx_skb_dtor(wvif, skb); -} - -static void wfx_flush_vif(struct wfx_vif *wvif, u32 queues, struct sk_buff_head *dropped) -{ - struct wfx_queue *queue; - int i; - - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - if (!(BIT(i) & queues)) - continue; - queue = &wvif->tx_queue[i]; - if (dropped) - wfx_tx_queue_drop(wvif, queue, dropped); - } - if (wvif->wdev->chip_frozen) - return; - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - if (!(BIT(i) & queues)) - continue; - queue = &wvif->tx_queue[i]; - if (wait_event_timeout(wvif->wdev->tx_dequeue, wfx_tx_queue_empty(wvif, queue), - msecs_to_jiffies(1000)) <= 0) - dev_warn(wvif->wdev->dev, "frames queued while flushing tx queues?"); - } -} - -void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) -{ - struct wfx_dev *wdev = hw->priv; - struct sk_buff_head dropped; - struct wfx_vif *wvif; - struct wfx_hif_msg *hif; - struct sk_buff *skb; - - skb_queue_head_init(&dropped); - if (vif) { - wvif = (struct wfx_vif *)vif->drv_priv; - wfx_flush_vif(wvif, queues, drop ? &dropped : NULL); - } else { - wvif = NULL; - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) - wfx_flush_vif(wvif, queues, drop ? &dropped : NULL); - } - wfx_tx_flush(wdev); - if (wdev->chip_frozen) - wfx_pending_drop(wdev, &dropped); - while ((skb = skb_dequeue(&dropped)) != NULL) { - hif = (struct wfx_hif_msg *)skb->data; - wvif = wdev_to_wvif(wdev, hif->interface); - ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb)); - wfx_skb_dtor(wvif, skb); - } -} diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h deleted file mode 100644 index 983470705e4b..000000000000 --- a/drivers/staging/wfx/data_tx.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Data transmitting implementation. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_DATA_TX_H -#define WFX_DATA_TX_H - -#include -#include - -#include "hif_api_cmd.h" -#include "hif_api_mib.h" - -struct wfx_tx_priv; -struct wfx_dev; -struct wfx_vif; - -struct wfx_tx_policy { - struct list_head link; - int usage_count; - u8 rates[12]; - bool uploaded; -}; - -struct wfx_tx_policy_cache { - struct wfx_tx_policy cache[HIF_TX_RETRY_POLICY_MAX]; - /* FIXME: use a trees and drop hash from tx_policy */ - struct list_head used; - struct list_head free; - spinlock_t lock; -}; - -struct wfx_tx_priv { - ktime_t xmit_timestamp; - unsigned char icv_size; -}; - -void wfx_tx_policy_init(struct wfx_vif *wvif); -void wfx_tx_policy_upload_work(struct work_struct *work); - -void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); -void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct wfx_hif_cnf_tx *arg); -void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); - -static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb) -{ - struct ieee80211_tx_info *tx_info; - - if (!skb) - return NULL; - tx_info = IEEE80211_SKB_CB(skb); - return (struct wfx_tx_priv *)tx_info->rate_driver_data; -} - -static inline struct wfx_hif_req_tx *wfx_skb_txreq(struct sk_buff *skb) -{ - struct wfx_hif_msg *hif = (struct wfx_hif_msg *)skb->data; - struct wfx_hif_req_tx *req = (struct wfx_hif_req_tx *)hif->body; - - return req; -} - -#endif diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c deleted file mode 100644 index e8265208f9a5..000000000000 --- a/drivers/staging/wfx/debug.c +++ /dev/null @@ -1,331 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Debugfs interface. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include -#include - -#include "debug.h" -#include "wfx.h" -#include "sta.h" -#include "main.h" -#include "hif_tx.h" -#include "hif_tx_mib.h" - -#define CREATE_TRACE_POINTS -#include "traces.h" - -static const struct trace_print_flags hif_msg_print_map[] = { - hif_msg_list, -}; - -static const struct trace_print_flags hif_mib_print_map[] = { - hif_mib_list, -}; - -static const struct trace_print_flags wfx_reg_print_map[] = { - wfx_reg_list, -}; - -static const char *get_symbol(unsigned long val, const struct trace_print_flags *symbol_array) -{ - int i; - - for (i = 0; symbol_array[i].mask != -1; i++) { - if (val == symbol_array[i].mask) - return symbol_array[i].name; - } - - return "unknown"; -} - -const char *wfx_get_hif_name(unsigned long id) -{ - return get_symbol(id, hif_msg_print_map); -} - -const char *wfx_get_mib_name(unsigned long id) -{ - return get_symbol(id, hif_mib_print_map); -} - -const char *wfx_get_reg_name(unsigned long id) -{ - return get_symbol(id, wfx_reg_print_map); -} - -static int wfx_counters_show(struct seq_file *seq, void *v) -{ - int ret, i; - struct wfx_dev *wdev = seq->private; - struct wfx_hif_mib_extended_count_table counters[3]; - - for (i = 0; i < ARRAY_SIZE(counters); i++) { - ret = wfx_hif_get_counters_table(wdev, i, counters + i); - if (ret < 0) - return ret; - if (ret > 0) - return -EIO; - } - - seq_printf(seq, "%-24s %12s %12s %12s\n", "", "global", "iface 0", "iface 1"); - -#define PUT_COUNTER(name) \ - seq_printf(seq, "%-24s %12d %12d %12d\n", #name, \ - le32_to_cpu(counters[2].count_##name), \ - le32_to_cpu(counters[0].count_##name), \ - le32_to_cpu(counters[1].count_##name)) - - PUT_COUNTER(tx_frames); - PUT_COUNTER(tx_frames_multicast); - PUT_COUNTER(tx_frames_success); - PUT_COUNTER(tx_frames_retried); - PUT_COUNTER(tx_frames_multi_retried); - PUT_COUNTER(tx_frames_failed); - - PUT_COUNTER(ack_failed); - PUT_COUNTER(rts_success); - PUT_COUNTER(rts_failed); - - PUT_COUNTER(rx_frames); - PUT_COUNTER(rx_frames_multicast); - PUT_COUNTER(rx_frames_success); - PUT_COUNTER(rx_frames_failed); - PUT_COUNTER(drop_plcp); - PUT_COUNTER(drop_fcs); - PUT_COUNTER(drop_no_key); - PUT_COUNTER(drop_decryption); - PUT_COUNTER(drop_tkip_mic); - PUT_COUNTER(drop_bip_mic); - PUT_COUNTER(drop_cmac_icv); - PUT_COUNTER(drop_cmac_replay); - PUT_COUNTER(drop_ccmp_replay); - PUT_COUNTER(drop_duplicate); - - PUT_COUNTER(rx_bcn_miss); - PUT_COUNTER(rx_bcn_success); - PUT_COUNTER(rx_bcn_dtim); - PUT_COUNTER(rx_bcn_dtim_aid0_clr); - PUT_COUNTER(rx_bcn_dtim_aid0_set); - -#undef PUT_COUNTER - - for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++) - seq_printf(seq, "reserved[%02d]%12s %12d %12d %12d\n", i, "", - le32_to_cpu(counters[2].reserved[i]), - le32_to_cpu(counters[0].reserved[i]), - le32_to_cpu(counters[1].reserved[i])); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(wfx_counters); - -static const char * const channel_names[] = { - [0] = "1M", - [1] = "2M", - [2] = "5.5M", - [3] = "11M", - /* Entries 4 and 5 does not exist */ - [6] = "6M", - [7] = "9M", - [8] = "12M", - [9] = "18M", - [10] = "24M", - [11] = "36M", - [12] = "48M", - [13] = "54M", - [14] = "MCS0", - [15] = "MCS1", - [16] = "MCS2", - [17] = "MCS3", - [18] = "MCS4", - [19] = "MCS5", - [20] = "MCS6", - [21] = "MCS7", -}; - -static int wfx_rx_stats_show(struct seq_file *seq, void *v) -{ - struct wfx_dev *wdev = seq->private; - struct wfx_hif_rx_stats *st = &wdev->rx_stats; - int i; - - mutex_lock(&wdev->rx_stats_lock); - seq_printf(seq, "Timestamp: %dus\n", st->date); - seq_printf(seq, "Low power clock: frequency %uHz, external %s\n", - le32_to_cpu(st->pwr_clk_freq), st->is_ext_pwr_clk ? "yes" : "no"); - seq_printf(seq, "Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n", - st->nb_rx_frame, st->per_total, st->throughput); - seq_puts(seq, " Num. of PER RSSI SNR CFO\n"); - seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n"); - for (i = 0; i < ARRAY_SIZE(channel_names); i++) { - if (channel_names[i]) - seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n", - channel_names[i], - le32_to_cpu(st->nb_rx_by_rate[i]), - le16_to_cpu(st->per[i]), - (s16)le16_to_cpu(st->rssi[i]) / 100, - (s16)le16_to_cpu(st->snr[i]) / 100, - (s16)le16_to_cpu(st->cfo[i])); - } - mutex_unlock(&wdev->rx_stats_lock); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats); - -static int wfx_tx_power_loop_show(struct seq_file *seq, void *v) -{ - struct wfx_dev *wdev = seq->private; - struct wfx_hif_tx_power_loop_info *st = &wdev->tx_power_loop_info; - int tmp; - - mutex_lock(&wdev->tx_power_loop_info_lock); - tmp = le16_to_cpu(st->tx_gain_dig); - seq_printf(seq, "Tx gain digital: %d\n", tmp); - tmp = le16_to_cpu(st->tx_gain_pa); - seq_printf(seq, "Tx gain PA: %d\n", tmp); - tmp = (s16)le16_to_cpu(st->target_pout); - seq_printf(seq, "Target Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25); - tmp = (s16)le16_to_cpu(st->p_estimation); - seq_printf(seq, "FEM Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25); - tmp = le16_to_cpu(st->vpdet); - seq_printf(seq, "Vpdet: %d mV\n", tmp); - seq_printf(seq, "Measure index: %d\n", st->measurement_index); - mutex_unlock(&wdev->tx_power_loop_info_lock); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(wfx_tx_power_loop); - -static ssize_t wfx_send_pds_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct wfx_dev *wdev = file->private_data; - char *buf; - int ret; - - if (*ppos != 0) { - dev_dbg(wdev->dev, "PDS data must be written in one transaction"); - return -EBUSY; - } - buf = memdup_user(user_buf, count); - if (IS_ERR(buf)) - return PTR_ERR(buf); - *ppos = *ppos + count; - ret = wfx_send_pds(wdev, buf, count); - kfree(buf); - if (ret < 0) - return ret; - return count; -} - -static const struct file_operations wfx_send_pds_fops = { - .open = simple_open, - .write = wfx_send_pds_write, -}; - -struct dbgfs_hif_msg { - struct wfx_dev *wdev; - struct completion complete; - u8 reply[1024]; - int ret; -}; - -static ssize_t wfx_send_hif_msg_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dbgfs_hif_msg *context = file->private_data; - struct wfx_dev *wdev = context->wdev; - struct wfx_hif_msg *request; - - if (completion_done(&context->complete)) { - dev_dbg(wdev->dev, "read previous result before start a new one\n"); - return -EBUSY; - } - if (count < sizeof(struct wfx_hif_msg)) - return -EINVAL; - - /* wfx_cmd_send() checks that reply buffer is wide enough, but does not return precise - * length read. User have to know how many bytes should be read. Filling reply buffer with a - * memory pattern may help user. - */ - memset(context->reply, 0xFF, sizeof(context->reply)); - request = memdup_user(user_buf, count); - if (IS_ERR(request)) - return PTR_ERR(request); - if (le16_to_cpu(request->len) != count) { - kfree(request); - return -EINVAL; - } - context->ret = wfx_cmd_send(wdev, request, context->reply, sizeof(context->reply), false); - - kfree(request); - complete(&context->complete); - return count; -} - -static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct dbgfs_hif_msg *context = file->private_data; - int ret; - - if (count > sizeof(context->reply)) - return -EINVAL; - ret = wait_for_completion_interruptible(&context->complete); - if (ret) - return ret; - if (context->ret < 0) - return context->ret; - /* Be careful, write() is waiting for a full message while read() only returns a payload */ - if (copy_to_user(user_buf, context->reply, count)) - return -EFAULT; - - return count; -} - -static int wfx_send_hif_msg_open(struct inode *inode, struct file *file) -{ - struct dbgfs_hif_msg *context = kzalloc(sizeof(*context), GFP_KERNEL); - - if (!context) - return -ENOMEM; - context->wdev = inode->i_private; - init_completion(&context->complete); - file->private_data = context; - return 0; -} - -static int wfx_send_hif_msg_release(struct inode *inode, struct file *file) -{ - struct dbgfs_hif_msg *context = file->private_data; - - kfree(context); - return 0; -} - -static const struct file_operations wfx_send_hif_msg_fops = { - .open = wfx_send_hif_msg_open, - .release = wfx_send_hif_msg_release, - .write = wfx_send_hif_msg_write, - .read = wfx_send_hif_msg_read, -}; - -int wfx_debug_init(struct wfx_dev *wdev) -{ - struct dentry *d; - - d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir); - debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops); - debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops); - debugfs_create_file("tx_power_loop", 0444, d, wdev, &wfx_tx_power_loop_fops); - debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops); - debugfs_create_file("send_hif_msg", 0600, d, wdev, &wfx_send_hif_msg_fops); - - return 0; -} diff --git a/drivers/staging/wfx/debug.h b/drivers/staging/wfx/debug.h deleted file mode 100644 index 3840575e5e28..000000000000 --- a/drivers/staging/wfx/debug.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Debugfs interface. - * - * Copyright (c) 2017-2019, Silicon Laboratories, Inc. - * Copyright (c) 2011, ST-Ericsson - */ -#ifndef WFX_DEBUG_H -#define WFX_DEBUG_H - -struct wfx_dev; - -int wfx_debug_init(struct wfx_dev *wdev); - -const char *wfx_get_hif_name(unsigned long id); -const char *wfx_get_mib_name(unsigned long id); -const char *wfx_get_reg_name(unsigned long id); - -#endif diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c deleted file mode 100644 index 3d1b8a135dc0..000000000000 --- a/drivers/staging/wfx/fwio.c +++ /dev/null @@ -1,389 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Firmware loading. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include -#include -#include - -#include "fwio.h" -#include "wfx.h" -#include "hwio.h" - -/* Addresses below are in SRAM area */ -#define WFX_DNLD_FIFO 0x09004000 -#define DNLD_BLOCK_SIZE 0x0400 -#define DNLD_FIFO_SIZE 0x8000 /* (32 * DNLD_BLOCK_SIZE) */ -/* Download Control Area (DCA) */ -#define WFX_DCA_IMAGE_SIZE 0x0900C000 -#define WFX_DCA_PUT 0x0900C004 -#define WFX_DCA_GET 0x0900C008 -#define WFX_DCA_HOST_STATUS 0x0900C00C -#define HOST_READY 0x87654321 -#define HOST_INFO_READ 0xA753BD99 -#define HOST_UPLOAD_PENDING 0xABCDDCBA -#define HOST_UPLOAD_COMPLETE 0xD4C64A99 -#define HOST_OK_TO_JUMP 0x174FC882 -#define WFX_DCA_NCP_STATUS 0x0900C010 -#define NCP_NOT_READY 0x12345678 -#define NCP_READY 0x87654321 -#define NCP_INFO_READY 0xBD53EF99 -#define NCP_DOWNLOAD_PENDING 0xABCDDCBA -#define NCP_DOWNLOAD_COMPLETE 0xCAFEFECA -#define NCP_AUTH_OK 0xD4C64A99 -#define NCP_AUTH_FAIL 0x174FC882 -#define NCP_PUB_KEY_RDY 0x7AB41D19 -#define WFX_DCA_FW_SIGNATURE 0x0900C014 -#define FW_SIGNATURE_SIZE 0x40 -#define WFX_DCA_FW_HASH 0x0900C054 -#define FW_HASH_SIZE 0x08 -#define WFX_DCA_FW_VERSION 0x0900C05C -#define FW_VERSION_SIZE 0x04 -#define WFX_DCA_RESERVED 0x0900C060 -#define DCA_RESERVED_SIZE 0x20 -#define WFX_STATUS_INFO 0x0900C080 -#define WFX_BOOTLOADER_LABEL 0x0900C084 -#define BOOTLOADER_LABEL_SIZE 0x3C -#define WFX_PTE_INFO 0x0900C0C0 -#define PTE_INFO_KEYSET_IDX 0x0D -#define PTE_INFO_SIZE 0x10 -#define WFX_ERR_INFO 0x0900C0D0 -#define ERR_INVALID_SEC_TYPE 0x05 -#define ERR_SIG_VERIF_FAILED 0x0F -#define ERR_AES_CTRL_KEY 0x10 -#define ERR_ECC_PUB_KEY 0x11 -#define ERR_MAC_KEY 0x18 - -#define DCA_TIMEOUT 50 /* milliseconds */ -#define WAKEUP_TIMEOUT 200 /* milliseconds */ - -static const char * const fwio_errors[] = { - [ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption", - [ERR_SIG_VERIF_FAILED] = "Signature verification failed", - [ERR_AES_CTRL_KEY] = "AES control key not initialized", - [ERR_ECC_PUB_KEY] = "ECC public key not initialized", - [ERR_MAC_KEY] = "MAC key not initialized", -}; - -/* request_firmware() allocate data using vmalloc(). It is not compatible with underlying hardware - * that use DMA. Function below detect this case and allocate a bounce buffer if necessary. - * - * Notice that, in doubt, you can enable CONFIG_DEBUG_SG to ask kernel to detect this problem at - * runtime (else, kernel silently fail). - * - * NOTE: it may also be possible to use 'pages' from struct firmware and avoid bounce buffer - */ -static int wfx_sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf, size_t len) -{ - int ret; - const u8 *tmp; - - if (!virt_addr_valid(buf)) { - tmp = kmemdup(buf, len, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - } else { - tmp = buf; - } - ret = wfx_sram_buf_write(wdev, addr, tmp, len); - if (tmp != buf) - kfree(tmp); - return ret; -} - -static int get_firmware(struct wfx_dev *wdev, u32 keyset_chip, - const struct firmware **fw, int *file_offset) -{ - int keyset_file; - char filename[256]; - const char *data; - int ret; - - snprintf(filename, sizeof(filename), "%s_%02X.sec", - wdev->pdata.file_fw, keyset_chip); - ret = firmware_request_nowarn(fw, filename, wdev->dev); - if (ret) { - dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n", - filename, wdev->pdata.file_fw); - snprintf(filename, sizeof(filename), "%s.sec", wdev->pdata.file_fw); - ret = request_firmware(fw, filename, wdev->dev); - if (ret) { - dev_err(wdev->dev, "can't load %s\n", filename); - *fw = NULL; - return ret; - } - } - - data = (*fw)->data; - if (memcmp(data, "KEYSET", 6) != 0) { - /* Legacy firmware format */ - *file_offset = 0; - keyset_file = 0x90; - } else { - *file_offset = 8; - keyset_file = (hex_to_bin(data[6]) * 16) | hex_to_bin(data[7]); - if (keyset_file < 0) { - dev_err(wdev->dev, "%s corrupted\n", filename); - release_firmware(*fw); - *fw = NULL; - return -EINVAL; - } - } - if (keyset_file != keyset_chip) { - dev_err(wdev->dev, "firmware keyset is incompatible with chip (file: 0x%02X, chip: 0x%02X)\n", - keyset_file, keyset_chip); - release_firmware(*fw); - *fw = NULL; - return -ENODEV; - } - wdev->keyset = keyset_file; - return 0; -} - -static int wait_ncp_status(struct wfx_dev *wdev, u32 status) -{ - ktime_t now, start; - u32 reg; - int ret; - - start = ktime_get(); - for (;;) { - ret = wfx_sram_reg_read(wdev, WFX_DCA_NCP_STATUS, ®); - if (ret < 0) - return -EIO; - now = ktime_get(); - if (reg == status) - break; - if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT))) - return -ETIMEDOUT; - } - if (ktime_compare(now, start)) - dev_dbg(wdev->dev, "chip answer after %lldus\n", ktime_us_delta(now, start)); - else - dev_dbg(wdev->dev, "chip answer immediately\n"); - return 0; -} - -static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len) -{ - int ret; - u32 offs, bytes_done = 0; - ktime_t now, start; - - if (len % DNLD_BLOCK_SIZE) { - dev_err(wdev->dev, "firmware size is not aligned. Buffer overrun will occur\n"); - return -EIO; - } - offs = 0; - while (offs < len) { - start = ktime_get(); - for (;;) { - now = ktime_get(); - if (offs + DNLD_BLOCK_SIZE - bytes_done < DNLD_FIFO_SIZE) - break; - if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT))) - return -ETIMEDOUT; - ret = wfx_sram_reg_read(wdev, WFX_DCA_GET, &bytes_done); - if (ret < 0) - return ret; - } - if (ktime_compare(now, start)) - dev_dbg(wdev->dev, "answer after %lldus\n", ktime_us_delta(now, start)); - - ret = wfx_sram_write_dma_safe(wdev, WFX_DNLD_FIFO + (offs % DNLD_FIFO_SIZE), - data + offs, DNLD_BLOCK_SIZE); - if (ret < 0) - return ret; - - /* The device seems to not support writing 0 in this register during first loop */ - offs += DNLD_BLOCK_SIZE; - ret = wfx_sram_reg_write(wdev, WFX_DCA_PUT, offs); - if (ret < 0) - return ret; - } - return 0; -} - -static void print_boot_status(struct wfx_dev *wdev) -{ - u32 reg; - - wfx_sram_reg_read(wdev, WFX_STATUS_INFO, ®); - if (reg == 0x12345678) - return; - wfx_sram_reg_read(wdev, WFX_ERR_INFO, ®); - if (reg < ARRAY_SIZE(fwio_errors) && fwio_errors[reg]) - dev_info(wdev->dev, "secure boot: %s\n", fwio_errors[reg]); - else - dev_info(wdev->dev, "secure boot: Error %#02x\n", reg); -} - -static int load_firmware_secure(struct wfx_dev *wdev) -{ - const struct firmware *fw = NULL; - int header_size; - int fw_offset; - ktime_t start; - u8 *buf; - int ret; - - BUILD_BUG_ON(PTE_INFO_SIZE > BOOTLOADER_LABEL_SIZE); - buf = kmalloc(BOOTLOADER_LABEL_SIZE + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_READY); - ret = wait_ncp_status(wdev, NCP_INFO_READY); - if (ret) - goto error; - - wfx_sram_buf_read(wdev, WFX_BOOTLOADER_LABEL, buf, BOOTLOADER_LABEL_SIZE); - buf[BOOTLOADER_LABEL_SIZE] = 0; - dev_dbg(wdev->dev, "bootloader: \"%s\"\n", buf); - - wfx_sram_buf_read(wdev, WFX_PTE_INFO, buf, PTE_INFO_SIZE); - ret = get_firmware(wdev, buf[PTE_INFO_KEYSET_IDX], &fw, &fw_offset); - if (ret) - goto error; - header_size = fw_offset + FW_SIGNATURE_SIZE + FW_HASH_SIZE; - - wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_INFO_READ); - ret = wait_ncp_status(wdev, NCP_READY); - if (ret) - goto error; - - wfx_sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); /* Fifo init */ - wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00", FW_VERSION_SIZE); - wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset, - FW_SIGNATURE_SIZE); - wfx_sram_write_dma_safe(wdev, WFX_DCA_FW_HASH, fw->data + fw_offset + FW_SIGNATURE_SIZE, - FW_HASH_SIZE); - wfx_sram_reg_write(wdev, WFX_DCA_IMAGE_SIZE, fw->size - header_size); - wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_PENDING); - ret = wait_ncp_status(wdev, NCP_DOWNLOAD_PENDING); - if (ret) - goto error; - - start = ktime_get(); - ret = upload_firmware(wdev, fw->data + header_size, fw->size - header_size); - if (ret) - goto error; - dev_dbg(wdev->dev, "firmware load after %lldus\n", - ktime_us_delta(ktime_get(), start)); - - wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE); - ret = wait_ncp_status(wdev, NCP_AUTH_OK); - /* Legacy ROM support */ - if (ret < 0) - ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY); - if (ret < 0) - goto error; - wfx_sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_OK_TO_JUMP); - -error: - kfree(buf); - if (fw) - release_firmware(fw); - if (ret) - print_boot_status(wdev); - return ret; -} - -static int init_gpr(struct wfx_dev *wdev) -{ - int ret, i; - static const struct { - int index; - u32 value; - } gpr_init[] = { - { 0x07, 0x208775 }, - { 0x08, 0x2EC020 }, - { 0x09, 0x3C3C3C }, - { 0x0B, 0x322C44 }, - { 0x0C, 0xA06497 }, - }; - - for (i = 0; i < ARRAY_SIZE(gpr_init); i++) { - ret = wfx_igpr_reg_write(wdev, gpr_init[i].index, gpr_init[i].value); - if (ret < 0) - return ret; - dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index, gpr_init[i].value); - } - return 0; -} - -int wfx_init_device(struct wfx_dev *wdev) -{ - int ret; - int hw_revision, hw_type; - int wakeup_timeout = 50; /* ms */ - ktime_t now, start; - u32 reg; - - reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_BYTE_ORDER_ABCD; - if (wdev->pdata.use_rising_clk) - reg |= CFG_CLK_RISE_EDGE; - ret = wfx_config_reg_write(wdev, reg); - if (ret < 0) { - dev_err(wdev->dev, "bus returned an error during first write access. Host configuration error?\n"); - return -EIO; - } - - ret = wfx_config_reg_read(wdev, ®); - if (ret < 0) { - dev_err(wdev->dev, "bus returned an error during first read access. Bus configuration error?\n"); - return -EIO; - } - if (reg == 0 || reg == ~0) { - dev_err(wdev->dev, "chip mute. Bus configuration error or chip wasn't reset?\n"); - return -EIO; - } - dev_dbg(wdev->dev, "initial config register value: %08x\n", reg); - - hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg); - if (hw_revision == 0) { - dev_err(wdev->dev, "bad hardware revision number: %d\n", hw_revision); - return -ENODEV; - } - hw_type = FIELD_GET(CFG_DEVICE_ID_TYPE, reg); - if (hw_type == 1) { - dev_notice(wdev->dev, "development hardware detected\n"); - wakeup_timeout = 2000; - } - - ret = init_gpr(wdev); - if (ret < 0) - return ret; - - ret = wfx_control_reg_write(wdev, CTRL_WLAN_WAKEUP); - if (ret < 0) - return -EIO; - start = ktime_get(); - for (;;) { - ret = wfx_control_reg_read(wdev, ®); - now = ktime_get(); - if (reg & CTRL_WLAN_READY) - break; - if (ktime_after(now, ktime_add_ms(start, wakeup_timeout))) { - dev_err(wdev->dev, "chip didn't wake up. Chip wasn't reset?\n"); - return -ETIMEDOUT; - } - } - dev_dbg(wdev->dev, "chip wake up after %lldus\n", ktime_us_delta(now, start)); - - ret = wfx_config_reg_write_bits(wdev, CFG_CPU_RESET, 0); - if (ret < 0) - return ret; - ret = load_firmware_secure(wdev); - if (ret < 0) - return ret; - return wfx_config_reg_write_bits(wdev, - CFG_DIRECT_ACCESS_MODE | - CFG_IRQ_ENABLE_DATA | - CFG_IRQ_ENABLE_WRDY, - CFG_IRQ_ENABLE_DATA); -} diff --git a/drivers/staging/wfx/fwio.h b/drivers/staging/wfx/fwio.h deleted file mode 100644 index eeea61210eca..000000000000 --- a/drivers/staging/wfx/fwio.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Firmware loading. - * - * Copyright (c) 2017-2019, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_FWIO_H -#define WFX_FWIO_H - -struct wfx_dev; - -int wfx_init_device(struct wfx_dev *wdev); - -#endif diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h deleted file mode 100644 index 8b91b1d4a46b..000000000000 --- a/drivers/staging/wfx/hif_api_cmd.h +++ /dev/null @@ -1,553 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ -/* - * WF200 hardware interface definitions - * - * Copyright (c) 2018-2020, Silicon Laboratories Inc. - */ - -#ifndef WFX_HIF_API_CMD_H -#define WFX_HIF_API_CMD_H - -#include "hif_api_general.h" - -enum wfx_hif_requests_ids { - HIF_REQ_ID_RESET = 0x0a, - HIF_REQ_ID_READ_MIB = 0x05, - HIF_REQ_ID_WRITE_MIB = 0x06, - HIF_REQ_ID_START_SCAN = 0x07, - HIF_REQ_ID_STOP_SCAN = 0x08, - HIF_REQ_ID_TX = 0x04, - HIF_REQ_ID_JOIN = 0x0b, - HIF_REQ_ID_SET_PM_MODE = 0x10, - HIF_REQ_ID_SET_BSS_PARAMS = 0x11, - HIF_REQ_ID_ADD_KEY = 0x0c, - HIF_REQ_ID_REMOVE_KEY = 0x0d, - HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13, - HIF_REQ_ID_START = 0x17, - HIF_REQ_ID_BEACON_TRANSMIT = 0x18, - HIF_REQ_ID_UPDATE_IE = 0x1b, - HIF_REQ_ID_MAP_LINK = 0x1c, -}; - -enum wfx_hif_confirmations_ids { - HIF_CNF_ID_RESET = 0x0a, - HIF_CNF_ID_READ_MIB = 0x05, - HIF_CNF_ID_WRITE_MIB = 0x06, - HIF_CNF_ID_START_SCAN = 0x07, - HIF_CNF_ID_STOP_SCAN = 0x08, - HIF_CNF_ID_TX = 0x04, - HIF_CNF_ID_MULTI_TRANSMIT = 0x1e, - HIF_CNF_ID_JOIN = 0x0b, - HIF_CNF_ID_SET_PM_MODE = 0x10, - HIF_CNF_ID_SET_BSS_PARAMS = 0x11, - HIF_CNF_ID_ADD_KEY = 0x0c, - HIF_CNF_ID_REMOVE_KEY = 0x0d, - HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13, - HIF_CNF_ID_START = 0x17, - HIF_CNF_ID_BEACON_TRANSMIT = 0x18, - HIF_CNF_ID_UPDATE_IE = 0x1b, - HIF_CNF_ID_MAP_LINK = 0x1c, -}; - -enum wfx_hif_indications_ids { - HIF_IND_ID_RX = 0x84, - HIF_IND_ID_SCAN_CMPL = 0x86, - HIF_IND_ID_JOIN_COMPLETE = 0x8f, - HIF_IND_ID_SET_PM_MODE_CMPL = 0x89, - HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c, - HIF_IND_ID_EVENT = 0x85 -}; - -struct wfx_hif_req_reset { - u8 reset_stat:1; - u8 reset_all_int:1; - u8 reserved1:6; - u8 reserved2[3]; -} __packed; - -struct wfx_hif_cnf_reset { - __le32 status; -} __packed; - -struct wfx_hif_req_read_mib { - __le16 mib_id; - __le16 reserved; -} __packed; - -struct wfx_hif_cnf_read_mib { - __le32 status; - __le16 mib_id; - __le16 length; - u8 mib_data[]; -} __packed; - -struct wfx_hif_req_write_mib { - __le16 mib_id; - __le16 length; - u8 mib_data[]; -} __packed; - -struct wfx_hif_cnf_write_mib { - __le32 status; -} __packed; - -struct wfx_hif_req_update_ie { - u8 beacon:1; - u8 probe_resp:1; - u8 probe_req:1; - u8 reserved1:5; - u8 reserved2; - __le16 num_ies; - u8 ie[]; -} __packed; - -struct wfx_hif_cnf_update_ie { - __le32 status; -} __packed; - -struct wfx_hif_ssid_def { - __le32 ssid_length; - u8 ssid[IEEE80211_MAX_SSID_LEN]; -} __packed; - -#define HIF_API_MAX_NB_SSIDS 2 -#define HIF_API_MAX_NB_CHANNELS 14 - -struct wfx_hif_req_start_scan_alt { - u8 band; - u8 maintain_current_bss:1; - u8 periodic:1; - u8 reserved1:6; - u8 disallow_ps:1; - u8 reserved2:1; - u8 short_preamble:1; - u8 reserved3:5; - u8 max_transmit_rate; - __le16 periodic_interval; - u8 reserved4; - s8 periodic_rssi_thr; - u8 num_of_probe_requests; - u8 probe_delay; - u8 num_of_ssids; - u8 num_of_channels; - __le32 min_channel_time; - __le32 max_channel_time; - __le32 tx_power_level; /* signed value */ - struct wfx_hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS]; - u8 channel_list[]; -} __packed; - -struct wfx_hif_cnf_start_scan { - __le32 status; -} __packed; - -struct wfx_hif_cnf_stop_scan { - __le32 status; -} __packed; - -enum wfx_hif_pm_mode_status { - HIF_PM_MODE_ACTIVE = 0x0, - HIF_PM_MODE_PS = 0x1, - HIF_PM_MODE_UNDETERMINED = 0x2 -}; - -struct wfx_hif_ind_scan_cmpl { - __le32 status; - u8 pm_mode; - u8 num_channels_completed; - __le16 reserved; -} __packed; - -enum wfx_hif_queue_id { - HIF_QUEUE_ID_BACKGROUND = 0x0, - HIF_QUEUE_ID_BESTEFFORT = 0x1, - HIF_QUEUE_ID_VIDEO = 0x2, - HIF_QUEUE_ID_VOICE = 0x3 -}; - -enum wfx_hif_frame_format { - HIF_FRAME_FORMAT_NON_HT = 0x0, - HIF_FRAME_FORMAT_MIXED_FORMAT_HT = 0x1, - HIF_FRAME_FORMAT_GF_HT_11N = 0x2 -}; - -struct wfx_hif_req_tx { - /* packet_id is not interpreted by the device, so it is not necessary to declare it little - * endian - */ - u32 packet_id; - u8 max_tx_rate; - u8 queue_id:2; - u8 peer_sta_id:4; - u8 reserved1:2; - u8 more:1; - u8 fc_offset:3; - u8 after_dtim:1; - u8 reserved2:3; - u8 start_exp:1; - u8 reserved3:3; - u8 retry_policy_index:4; - __le32 reserved4; - __le32 expire_time; - u8 frame_format:4; - u8 fec_coding:1; - u8 short_gi:1; - u8 reserved5:1; - u8 stbc:1; - u8 reserved6; - u8 aggregation:1; - u8 reserved7:7; - u8 reserved8; - u8 frame[]; -} __packed; - -enum wfx_hif_qos_ackplcy { - HIF_QOS_ACKPLCY_NORMAL = 0x0, - HIF_QOS_ACKPLCY_TXNOACK = 0x1, - HIF_QOS_ACKPLCY_NOEXPACK = 0x2, - HIF_QOS_ACKPLCY_BLCKACK = 0x3 -}; - -struct wfx_hif_cnf_tx { - __le32 status; - /* packet_id is copied from struct wfx_hif_req_tx without been interpreted by the device, so - * it is not necessary to declare it little endian - */ - u32 packet_id; - u8 txed_rate; - u8 ack_failures; - u8 aggr:1; - u8 requeue:1; - u8 ack_policy:2; - u8 txop_limit:1; - u8 reserved1:3; - u8 reserved2; - __le32 media_delay; - __le32 tx_queue_delay; -} __packed; - -struct wfx_hif_cnf_multi_transmit { - u8 num_tx_confs; - u8 reserved[3]; - struct wfx_hif_cnf_tx tx_conf_payload[]; -} __packed; - -enum wfx_hif_ri_flags_encrypt { - HIF_RI_FLAGS_UNENCRYPTED = 0x0, - HIF_RI_FLAGS_WEP_ENCRYPTED = 0x1, - HIF_RI_FLAGS_TKIP_ENCRYPTED = 0x2, - HIF_RI_FLAGS_AES_ENCRYPTED = 0x3, - HIF_RI_FLAGS_WAPI_ENCRYPTED = 0x4 -}; - -struct wfx_hif_ind_rx { - __le32 status; - u8 channel_number; - u8 reserved1; - u8 rxed_rate; - u8 rcpi_rssi; - u8 encryp:3; - u8 in_aggr:1; - u8 first_aggr:1; - u8 last_aggr:1; - u8 defrag:1; - u8 beacon:1; - u8 tim:1; - u8 bitmap:1; - u8 match_ssid:1; - u8 match_bssid:1; - u8 more:1; - u8 reserved2:1; - u8 ht:1; - u8 stbc:1; - u8 match_uc_addr:1; - u8 match_mc_addr:1; - u8 match_bc_addr:1; - u8 key_type:1; - u8 key_index:4; - u8 reserved3:1; - u8 peer_sta_id:4; - u8 reserved4:2; - u8 reserved5:1; - u8 frame[]; -} __packed; - -struct wfx_hif_req_edca_queue_params { - u8 queue_id; - u8 reserved1; - u8 aifsn; - u8 reserved2; - __le16 cw_min; - __le16 cw_max; - __le16 tx_op_limit; - __le16 allowed_medium_time; - __le32 reserved3; -} __packed; - -struct wfx_hif_cnf_edca_queue_params { - __le32 status; -} __packed; - -struct wfx_hif_req_join { - u8 infrastructure_bss_mode:1; - u8 reserved1:7; - u8 band; - u8 channel_number; - u8 reserved2; - u8 bssid[ETH_ALEN]; - __le16 atim_window; - u8 short_preamble:1; - u8 reserved3:7; - u8 probe_for_join; - u8 reserved4; - u8 reserved5:2; - u8 force_no_beacon:1; - u8 force_with_ind:1; - u8 reserved6:4; - __le32 ssid_length; - u8 ssid[IEEE80211_MAX_SSID_LEN]; - __le32 beacon_interval; - __le32 basic_rate_set; -} __packed; - -struct wfx_hif_cnf_join { - __le32 status; -} __packed; - -struct wfx_hif_ind_join_complete { - __le32 status; -} __packed; - -struct wfx_hif_req_set_bss_params { - u8 lost_count_only:1; - u8 reserved:7; - u8 beacon_lost_count; - __le16 aid; - __le32 operational_rate_set; -} __packed; - -struct wfx_hif_cnf_set_bss_params { - __le32 status; -} __packed; - -struct wfx_hif_req_set_pm_mode { - u8 enter_psm:1; - u8 reserved:6; - u8 fast_psm:1; - u8 fast_psm_idle_period; - u8 ap_psm_change_period; - u8 min_auto_ps_poll_period; -} __packed; - -struct wfx_hif_cnf_set_pm_mode { - __le32 status; -} __packed; - -struct wfx_hif_ind_set_pm_mode_cmpl { - __le32 status; - u8 pm_mode; - u8 reserved[3]; -} __packed; - -struct wfx_hif_req_start { - u8 mode; - u8 band; - u8 channel_number; - u8 reserved1; - __le32 reserved2; - __le32 beacon_interval; - u8 dtim_period; - u8 short_preamble:1; - u8 reserved3:7; - u8 reserved4; - u8 ssid_length; - u8 ssid[IEEE80211_MAX_SSID_LEN]; - __le32 basic_rate_set; -} __packed; - -struct wfx_hif_cnf_start { - __le32 status; -} __packed; - -struct wfx_hif_req_beacon_transmit { - u8 enable_beaconing; - u8 reserved[3]; -} __packed; - -struct wfx_hif_cnf_beacon_transmit { - __le32 status; -} __packed; - -#define HIF_LINK_ID_MAX 14 -#define HIF_LINK_ID_NOT_ASSOCIATED (HIF_LINK_ID_MAX + 1) - -struct wfx_hif_req_map_link { - u8 mac_addr[ETH_ALEN]; - u8 unmap:1; - u8 mfpc:1; - u8 reserved:6; - u8 peer_sta_id; -} __packed; - -struct wfx_hif_cnf_map_link { - __le32 status; -} __packed; - -struct wfx_hif_ind_suspend_resume_tx { - u8 resume:1; - u8 reserved1:2; - u8 bc_mc_only:1; - u8 reserved2:4; - u8 reserved3; - __le16 peer_sta_set; -} __packed; - - -#define MAX_KEY_ENTRIES 24 -#define HIF_API_WEP_KEY_DATA_SIZE 16 -#define HIF_API_TKIP_KEY_DATA_SIZE 16 -#define HIF_API_RX_MIC_KEY_SIZE 8 -#define HIF_API_TX_MIC_KEY_SIZE 8 -#define HIF_API_AES_KEY_DATA_SIZE 16 -#define HIF_API_WAPI_KEY_DATA_SIZE 16 -#define HIF_API_MIC_KEY_DATA_SIZE 16 -#define HIF_API_IGTK_KEY_DATA_SIZE 16 -#define HIF_API_RX_SEQUENCE_COUNTER_SIZE 8 -#define HIF_API_IPN_SIZE 8 - -enum wfx_hif_key_type { - HIF_KEY_TYPE_WEP_DEFAULT = 0x0, - HIF_KEY_TYPE_WEP_PAIRWISE = 0x1, - HIF_KEY_TYPE_TKIP_GROUP = 0x2, - HIF_KEY_TYPE_TKIP_PAIRWISE = 0x3, - HIF_KEY_TYPE_AES_GROUP = 0x4, - HIF_KEY_TYPE_AES_PAIRWISE = 0x5, - HIF_KEY_TYPE_WAPI_GROUP = 0x6, - HIF_KEY_TYPE_WAPI_PAIRWISE = 0x7, - HIF_KEY_TYPE_IGTK_GROUP = 0x8, - HIF_KEY_TYPE_NONE = 0x9 -}; - -struct wfx_hif_wep_pairwise_key { - u8 peer_address[ETH_ALEN]; - u8 reserved; - u8 key_length; - u8 key_data[HIF_API_WEP_KEY_DATA_SIZE]; -} __packed; - -struct wfx_hif_wep_group_key { - u8 key_id; - u8 key_length; - u8 reserved[2]; - u8 key_data[HIF_API_WEP_KEY_DATA_SIZE]; -} __packed; - -struct wfx_hif_tkip_pairwise_key { - u8 peer_address[ETH_ALEN]; - u8 reserved[2]; - u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE]; - u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE]; - u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE]; -} __packed; - -struct wfx_hif_tkip_group_key { - u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE]; - u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE]; - u8 key_id; - u8 reserved[3]; - u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE]; -} __packed; - -struct wfx_hif_aes_pairwise_key { - u8 peer_address[ETH_ALEN]; - u8 reserved[2]; - u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE]; -} __packed; - -struct wfx_hif_aes_group_key { - u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE]; - u8 key_id; - u8 reserved[3]; - u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE]; -} __packed; - -struct wfx_hif_wapi_pairwise_key { - u8 peer_address[ETH_ALEN]; - u8 key_id; - u8 reserved; - u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE]; - u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE]; -} __packed; - -struct wfx_hif_wapi_group_key { - u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE]; - u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE]; - u8 key_id; - u8 reserved[3]; -} __packed; - -struct wfx_hif_igtk_group_key { - u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE]; - u8 key_id; - u8 reserved[3]; - u8 ipn[HIF_API_IPN_SIZE]; -} __packed; - -struct wfx_hif_req_add_key { - u8 type; - u8 entry_index; - u8 int_id:2; - u8 reserved1:6; - u8 reserved2; - union { - struct wfx_hif_wep_pairwise_key wep_pairwise_key; - struct wfx_hif_wep_group_key wep_group_key; - struct wfx_hif_tkip_pairwise_key tkip_pairwise_key; - struct wfx_hif_tkip_group_key tkip_group_key; - struct wfx_hif_aes_pairwise_key aes_pairwise_key; - struct wfx_hif_aes_group_key aes_group_key; - struct wfx_hif_wapi_pairwise_key wapi_pairwise_key; - struct wfx_hif_wapi_group_key wapi_group_key; - struct wfx_hif_igtk_group_key igtk_group_key; - } key; -} __packed; - -struct wfx_hif_cnf_add_key { - __le32 status; -} __packed; - -struct wfx_hif_req_remove_key { - u8 entry_index; - u8 reserved[3]; -} __packed; - -struct wfx_hif_cnf_remove_key { - __le32 status; -} __packed; - -enum wfx_hif_event_ind { - HIF_EVENT_IND_BSSLOST = 0x1, - HIF_EVENT_IND_BSSREGAINED = 0x2, - HIF_EVENT_IND_RCPI_RSSI = 0x3, - HIF_EVENT_IND_PS_MODE_ERROR = 0x4, - HIF_EVENT_IND_INACTIVITY = 0x5 -}; - -enum wfx_hif_ps_mode_error { - HIF_PS_ERROR_NO_ERROR = 0, - HIF_PS_ERROR_AP_NOT_RESP_TO_POLL = 1, - HIF_PS_ERROR_AP_NOT_RESP_TO_UAPSD_TRIGGER = 2, - HIF_PS_ERROR_AP_SENT_UNICAST_IN_DOZE = 3, - HIF_PS_ERROR_AP_NO_DATA_AFTER_TIM = 4 -}; - -struct wfx_hif_ind_event { - __le32 event_id; - union { - u8 rcpi_rssi; - __le32 ps_mode_error; - __le32 peer_sta_set; - } event_data; -} __packed; - -#endif diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h deleted file mode 100644 index 4d400fdc2252..000000000000 --- a/drivers/staging/wfx/hif_api_general.h +++ /dev/null @@ -1,252 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ -/* - * WF200 hardware interface definitions - * - * Copyright (c) 2018-2020, Silicon Laboratories Inc. - */ - -#ifndef WFX_HIF_API_GENERAL_H -#define WFX_HIF_API_GENERAL_H - -#include -#include - -#define HIF_ID_IS_INDICATION 0x80 -#define HIF_COUNTER_MAX 7 - -struct wfx_hif_msg { - __le16 len; - u8 id; - u8 reserved:1; - u8 interface:2; - u8 seqnum:3; - u8 encrypted:2; - u8 body[]; -} __packed; - -enum wfx_hif_general_requests_ids { - HIF_REQ_ID_CONFIGURATION = 0x09, - HIF_REQ_ID_CONTROL_GPIO = 0x26, - HIF_REQ_ID_SET_SL_MAC_KEY = 0x27, - HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28, - HIF_REQ_ID_SL_CONFIGURE = 0x29, - HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a, - HIF_REQ_ID_PTA_SETTINGS = 0x2b, - HIF_REQ_ID_PTA_PRIORITY = 0x2c, - HIF_REQ_ID_PTA_STATE = 0x2d, - HIF_REQ_ID_SHUT_DOWN = 0x32, -}; - -enum wfx_hif_general_confirmations_ids { - HIF_CNF_ID_CONFIGURATION = 0x09, - HIF_CNF_ID_CONTROL_GPIO = 0x26, - HIF_CNF_ID_SET_SL_MAC_KEY = 0x27, - HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28, - HIF_CNF_ID_SL_CONFIGURE = 0x29, - HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a, - HIF_CNF_ID_PTA_SETTINGS = 0x2b, - HIF_CNF_ID_PTA_PRIORITY = 0x2c, - HIF_CNF_ID_PTA_STATE = 0x2d, - HIF_CNF_ID_SHUT_DOWN = 0x32, -}; - -enum wfx_hif_general_indications_ids { - HIF_IND_ID_EXCEPTION = 0xe0, - HIF_IND_ID_STARTUP = 0xe1, - HIF_IND_ID_WAKEUP = 0xe2, - HIF_IND_ID_GENERIC = 0xe3, - HIF_IND_ID_ERROR = 0xe4, - HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5 -}; - -#define HIF_STATUS_SUCCESS (cpu_to_le32(0x0000)) -#define HIF_STATUS_FAIL (cpu_to_le32(0x0001)) -#define HIF_STATUS_INVALID_PARAMETER (cpu_to_le32(0x0002)) -#define HIF_STATUS_WARNING (cpu_to_le32(0x0003)) -#define HIF_STATUS_UNKNOWN_REQUEST (cpu_to_le32(0x0004)) -#define HIF_STATUS_RX_FAIL_DECRYPT (cpu_to_le32(0x0010)) -#define HIF_STATUS_RX_FAIL_MIC (cpu_to_le32(0x0011)) -#define HIF_STATUS_RX_FAIL_NO_KEY (cpu_to_le32(0x0012)) -#define HIF_STATUS_TX_FAIL_RETRIES (cpu_to_le32(0x0013)) -#define HIF_STATUS_TX_FAIL_TIMEOUT (cpu_to_le32(0x0014)) -#define HIF_STATUS_TX_FAIL_REQUEUE (cpu_to_le32(0x0015)) -#define HIF_STATUS_REFUSED (cpu_to_le32(0x0016)) -#define HIF_STATUS_BUSY (cpu_to_le32(0x0017)) -#define HIF_STATUS_SLK_SET_KEY_SUCCESS (cpu_to_le32(0x005A)) -#define HIF_STATUS_SLK_SET_KEY_ALREADY_BURNED (cpu_to_le32(0x006B)) -#define HIF_STATUS_SLK_SET_KEY_DISALLOWED_MODE (cpu_to_le32(0x007C)) -#define HIF_STATUS_SLK_SET_KEY_UNKNOWN_MODE (cpu_to_le32(0x008D)) -#define HIF_STATUS_SLK_NEGO_SUCCESS (cpu_to_le32(0x009E)) -#define HIF_STATUS_SLK_NEGO_FAILED (cpu_to_le32(0x00AF)) -#define HIF_STATUS_ROLLBACK_SUCCESS (cpu_to_le32(0x1234)) -#define HIF_STATUS_ROLLBACK_FAIL (cpu_to_le32(0x1256)) - -enum wfx_hif_api_rate_index { - API_RATE_INDEX_B_1MBPS = 0, - API_RATE_INDEX_B_2MBPS = 1, - API_RATE_INDEX_B_5P5MBPS = 2, - API_RATE_INDEX_B_11MBPS = 3, - API_RATE_INDEX_PBCC_22MBPS = 4, - API_RATE_INDEX_PBCC_33MBPS = 5, - API_RATE_INDEX_G_6MBPS = 6, - API_RATE_INDEX_G_9MBPS = 7, - API_RATE_INDEX_G_12MBPS = 8, - API_RATE_INDEX_G_18MBPS = 9, - API_RATE_INDEX_G_24MBPS = 10, - API_RATE_INDEX_G_36MBPS = 11, - API_RATE_INDEX_G_48MBPS = 12, - API_RATE_INDEX_G_54MBPS = 13, - API_RATE_INDEX_N_6P5MBPS = 14, - API_RATE_INDEX_N_13MBPS = 15, - API_RATE_INDEX_N_19P5MBPS = 16, - API_RATE_INDEX_N_26MBPS = 17, - API_RATE_INDEX_N_39MBPS = 18, - API_RATE_INDEX_N_52MBPS = 19, - API_RATE_INDEX_N_58P5MBPS = 20, - API_RATE_INDEX_N_65MBPS = 21, - API_RATE_NUM_ENTRIES = 22 -}; - -struct wfx_hif_ind_startup { - __le32 status; - __le16 hardware_id; - u8 opn[14]; - u8 uid[8]; - __le16 num_inp_ch_bufs; - __le16 size_inp_ch_buf; - u8 num_links_ap; - u8 num_interfaces; - u8 mac_addr[2][ETH_ALEN]; - u8 api_version_minor; - u8 api_version_major; - u8 link_mode:2; - u8 reserved1:6; - u8 reserved2; - u8 reserved3; - u8 reserved4; - u8 firmware_build; - u8 firmware_minor; - u8 firmware_major; - u8 firmware_type; - u8 disabled_channel_list[2]; - u8 region_sel_mode:4; - u8 reserved5:4; - u8 phy1_region:3; - u8 phy0_region:3; - u8 otp_phy_ver:2; - __le32 supported_rate_mask; - u8 firmware_label[128]; -} __packed; - -struct wfx_hif_ind_wakeup { -} __packed; - -struct wfx_hif_req_configuration { - __le16 length; - u8 pds_data[]; -} __packed; - -struct wfx_hif_cnf_configuration { - __le32 status; -} __packed; - -enum wfx_hif_gpio_mode { - HIF_GPIO_MODE_D0 = 0x0, - HIF_GPIO_MODE_D1 = 0x1, - HIF_GPIO_MODE_OD0 = 0x2, - HIF_GPIO_MODE_OD1 = 0x3, - HIF_GPIO_MODE_TRISTATE = 0x4, - HIF_GPIO_MODE_TOGGLE = 0x5, - HIF_GPIO_MODE_READ = 0x6 -}; - -struct wfx_hif_req_control_gpio { - u8 gpio_label; - u8 gpio_mode; -} __packed; - -struct wfx_hif_cnf_control_gpio { - __le32 status; - __le32 value; -} __packed; - -enum wfx_hif_generic_indication_type { - HIF_GENERIC_INDICATION_TYPE_RAW = 0x0, - HIF_GENERIC_INDICATION_TYPE_STRING = 0x1, - HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2, - HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO = 0x3, -}; - -struct wfx_hif_rx_stats { - __le32 nb_rx_frame; - __le32 nb_crc_frame; - __le32 per_total; - __le32 throughput; - __le32 nb_rx_by_rate[API_RATE_NUM_ENTRIES]; - __le16 per[API_RATE_NUM_ENTRIES]; - __le16 snr[API_RATE_NUM_ENTRIES]; /* signed value */ - __le16 rssi[API_RATE_NUM_ENTRIES]; /* signed value */ - __le16 cfo[API_RATE_NUM_ENTRIES]; /* signed value */ - __le32 date; - __le32 pwr_clk_freq; - u8 is_ext_pwr_clk; - s8 current_temp; -} __packed; - -struct wfx_hif_tx_power_loop_info { - __le16 tx_gain_dig; - __le16 tx_gain_pa; - __le16 target_pout; /* signed value */ - __le16 p_estimation; /* signed value */ - __le16 vpdet; - u8 measurement_index; - u8 reserved; -} __packed; - -struct wfx_hif_ind_generic { - __le32 type; - union { - struct wfx_hif_rx_stats rx_stats; - struct wfx_hif_tx_power_loop_info tx_power_loop_info; - } data; -} __packed; - -enum wfx_hif_error { - HIF_ERROR_FIRMWARE_ROLLBACK = 0x00, - HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x01, - HIF_ERROR_SLK_OUTDATED_SESSION_KEY = 0x02, - HIF_ERROR_SLK_SESSION_KEY = 0x03, - HIF_ERROR_OOR_VOLTAGE = 0x04, - HIF_ERROR_PDS_PAYLOAD = 0x05, - HIF_ERROR_OOR_TEMPERATURE = 0x06, - HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE = 0x07, - HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED = 0x08, - HIF_ERROR_SLK_OVERFLOW = 0x09, - HIF_ERROR_SLK_DECRYPTION = 0x0a, - HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE = 0x0b, - HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW = 0x0c, - HIF_ERROR_HIF_RX_DATA_TOO_LARGE = 0x0e, - HIF_ERROR_HIF_TX_QUEUE_FULL = 0x0d, - HIF_ERROR_HIF_BUS = 0x0f, - HIF_ERROR_PDS_TESTFEATURE = 0x10, - HIF_ERROR_SLK_UNCONFIGURED = 0x11, -}; - -struct wfx_hif_ind_error { - __le32 type; - u8 data[]; -} __packed; - -struct wfx_hif_ind_exception { - __le32 type; - u8 data[]; -} __packed; - -enum wfx_hif_secure_link_state { - SEC_LINK_UNAVAILABLE = 0x0, - SEC_LINK_RESERVED = 0x1, - SEC_LINK_EVAL = 0x2, - SEC_LINK_ENFORCED = 0x3 -}; - -#endif diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h deleted file mode 100644 index 7b68b83866c9..000000000000 --- a/drivers/staging/wfx/hif_api_mib.h +++ /dev/null @@ -1,346 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only or Apache-2.0 */ -/* - * WF200 hardware interface definitions - * - * Copyright (c) 2018-2020, Silicon Laboratories Inc. - */ - -#ifndef WFX_HIF_API_MIB_H -#define WFX_HIF_API_MIB_H - -#include "hif_api_general.h" - -#define HIF_API_IPV4_ADDRESS_SIZE 4 -#define HIF_API_IPV6_ADDRESS_SIZE 16 - -enum wfx_hif_mib_ids { - HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000, - HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001, - HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002, - HIF_MIB_ID_CCA_CONFIG = 0x2003, - HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010, - HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011, - HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012, - HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013, - HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014, - HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015, - HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016, - HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017, - HIF_MIB_ID_SET_DATA_FILTERING = 0x2018, - HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019, - HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A, - HIF_MIB_ID_RX_FILTER = 0x201B, - HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C, - HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D, - HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030, - HIF_MIB_ID_TSF_COUNTER = 0x2031, - HIF_MIB_ID_STATISTICS_TABLE = 0x2032, - HIF_MIB_ID_COUNTERS_TABLE = 0x2033, - HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034, - HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035, - HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040, - HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041, - HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042, - HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043, - HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044, - HIF_MIB_ID_SLOT_TIME = 0x2045, - HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046, - HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047, - HIF_MIB_ID_TEMPLATE_FRAME = 0x2048, - HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049, - HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A, - HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B, - HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C, - HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D, - HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E, - HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F, - HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050, - HIF_MIB_ID_SET_HT_PROTECTION = 0x2051, - HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052, - HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053, - HIF_MIB_ID_INACTIVITY_TIMER = 0x2054, - HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055, - HIF_MIB_ID_BEACON_STATS = 0x2056, -}; - -enum wfx_hif_op_power_mode { - HIF_OP_POWER_MODE_ACTIVE = 0x0, - HIF_OP_POWER_MODE_DOZE = 0x1, - HIF_OP_POWER_MODE_QUIESCENT = 0x2 -}; - -struct wfx_hif_mib_gl_operational_power_mode { - u8 power_mode:4; - u8 reserved1:3; - u8 wup_ind_activation:1; - u8 reserved2[3]; -} __packed; - -struct wfx_hif_mib_gl_set_multi_msg { - u8 enable_multi_tx_conf:1; - u8 reserved1:7; - u8 reserved2[3]; -} __packed; - -enum wfx_hif_arp_ns_frame_treatment { - HIF_ARP_NS_FILTERING_DISABLE = 0x0, - HIF_ARP_NS_FILTERING_ENABLE = 0x1, - HIF_ARP_NS_REPLY_ENABLE = 0x2 -}; - -struct wfx_hif_mib_arp_ip_addr_table { - u8 condition_idx; - u8 arp_enable; - u8 reserved[2]; - u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE]; -} __packed; - -struct wfx_hif_mib_rx_filter { - u8 reserved1:1; - u8 bssid_filter:1; - u8 reserved2:1; - u8 fwd_probe_req:1; - u8 keep_alive_filter:1; - u8 reserved3:3; - u8 reserved4[3]; -} __packed; - -struct wfx_hif_ie_table_entry { - u8 ie_id; - u8 has_changed:1; - u8 no_longer:1; - u8 has_appeared:1; - u8 reserved:1; - u8 num_match_data:4; - u8 oui[3]; - u8 match_data[3]; -} __packed; - -struct wfx_hif_mib_bcn_filter_table { - __le32 num_of_info_elmts; - struct wfx_hif_ie_table_entry ie_table[]; -} __packed; - -enum wfx_hif_beacon_filter { - HIF_BEACON_FILTER_DISABLE = 0x0, - HIF_BEACON_FILTER_ENABLE = 0x1, - HIF_BEACON_FILTER_AUTO_ERP = 0x2 -}; - -struct wfx_hif_mib_bcn_filter_enable { - __le32 enable; - __le32 bcn_count; -} __packed; - -struct wfx_hif_mib_extended_count_table { - __le32 count_drop_plcp; - __le32 count_drop_fcs; - __le32 count_tx_frames; - __le32 count_rx_frames; - __le32 count_rx_frames_failed; - __le32 count_drop_decryption; - __le32 count_drop_tkip_mic; - __le32 count_drop_no_key; - __le32 count_tx_frames_multicast; - __le32 count_tx_frames_success; - __le32 count_tx_frames_failed; - __le32 count_tx_frames_retried; - __le32 count_tx_frames_multi_retried; - __le32 count_drop_duplicate; - __le32 count_rts_success; - __le32 count_rts_failed; - __le32 count_ack_failed; - __le32 count_rx_frames_multicast; - __le32 count_rx_frames_success; - __le32 count_drop_cmac_icv; - __le32 count_drop_cmac_replay; - __le32 count_drop_ccmp_replay; - __le32 count_drop_bip_mic; - __le32 count_rx_bcn_success; - __le32 count_rx_bcn_miss; - __le32 count_rx_bcn_dtim; - __le32 count_rx_bcn_dtim_aid0_clr; - __le32 count_rx_bcn_dtim_aid0_set; - __le32 reserved[12]; -} __packed; - -struct wfx_hif_mib_count_table { - __le32 count_drop_plcp; - __le32 count_drop_fcs; - __le32 count_tx_frames; - __le32 count_rx_frames; - __le32 count_rx_frames_failed; - __le32 count_drop_decryption; - __le32 count_drop_tkip_mic; - __le32 count_drop_no_key; - __le32 count_tx_frames_multicast; - __le32 count_tx_frames_success; - __le32 count_tx_frames_failed; - __le32 count_tx_frames_retried; - __le32 count_tx_frames_multi_retried; - __le32 count_drop_duplicate; - __le32 count_rts_success; - __le32 count_rts_failed; - __le32 count_ack_failed; - __le32 count_rx_frames_multicast; - __le32 count_rx_frames_success; - __le32 count_drop_cmac_icv; - __le32 count_drop_cmac_replay; - __le32 count_drop_ccmp_replay; - __le32 count_drop_bip_mic; -} __packed; - -struct wfx_hif_mib_mac_address { - u8 mac_addr[ETH_ALEN]; - __le16 reserved; -} __packed; - -struct wfx_hif_mib_wep_default_key_id { - u8 wep_default_key_id; - u8 reserved[3]; -} __packed; - -struct wfx_hif_mib_dot11_rts_threshold { - __le32 threshold; -} __packed; - -struct wfx_hif_mib_slot_time { - __le32 slot_time; -} __packed; - -struct wfx_hif_mib_current_tx_power_level { - __le32 power_level; /* signed value */ -} __packed; - -struct wfx_hif_mib_non_erp_protection { - u8 use_cts_to_self:1; - u8 reserved1:7; - u8 reserved2[3]; -} __packed; - -enum wfx_hif_tmplt { - HIF_TMPLT_PRBREQ = 0x0, - HIF_TMPLT_BCN = 0x1, - HIF_TMPLT_NULL = 0x2, - HIF_TMPLT_QOSNUL = 0x3, - HIF_TMPLT_PSPOLL = 0x4, - HIF_TMPLT_PRBRES = 0x5, - HIF_TMPLT_ARP = 0x6, - HIF_TMPLT_NA = 0x7 -}; - -#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700 - -struct wfx_hif_mib_template_frame { - u8 frame_type; - u8 init_rate:7; - u8 mode:1; - __le16 frame_length; - u8 frame[]; -} __packed; - -struct wfx_hif_mib_beacon_wake_up_period { - u8 wakeup_period_min; - u8 receive_dtim:1; - u8 reserved1:7; - u8 wakeup_period_max; - u8 reserved2; -} __packed; - -struct wfx_hif_mib_rcpi_rssi_threshold { - u8 detection:1; - u8 rcpi_rssi:1; - u8 upperthresh:1; - u8 lowerthresh:1; - u8 reserved:4; - u8 lower_threshold; - u8 upper_threshold; - u8 rolling_average_count; -} __packed; - -#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16 - -struct wfx_hif_mib_block_ack_policy { - u8 block_ack_tx_tid_policy; - u8 reserved1; - u8 block_ack_rx_tid_policy; - u8 block_ack_rx_max_buffer_size; -} __packed; - -enum wfx_hif_mpdu_start_spacing { - HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0, - HIF_MPDU_START_SPACING_QUARTER = 0x1, - HIF_MPDU_START_SPACING_HALF = 0x2, - HIF_MPDU_START_SPACING_ONE = 0x3, - HIF_MPDU_START_SPACING_TWO = 0x4, - HIF_MPDU_START_SPACING_FOUR = 0x5, - HIF_MPDU_START_SPACING_EIGHT = 0x6, - HIF_MPDU_START_SPACING_SIXTEEN = 0x7 -}; - -struct wfx_hif_mib_set_association_mode { - u8 preambtype_use:1; - u8 mode:1; - u8 rateset:1; - u8 spacing:1; - u8 reserved1:4; - u8 short_preamble:1; - u8 reserved2:7; - u8 greenfield:1; - u8 reserved3:7; - u8 mpdu_start_spacing; - __le32 basic_rate_set; -} __packed; - -struct wfx_hif_mib_set_uapsd_information { - u8 trig_bckgrnd:1; - u8 trig_be:1; - u8 trig_video:1; - u8 trig_voice:1; - u8 reserved1:4; - u8 deliv_bckgrnd:1; - u8 deliv_be:1; - u8 deliv_video:1; - u8 deliv_voice:1; - u8 reserved2:4; - __le16 min_auto_trigger_interval; - __le16 max_auto_trigger_interval; - __le16 auto_trigger_step; -} __packed; - -struct wfx_hif_tx_rate_retry_policy { - u8 policy_index; - u8 short_retry_count; - u8 long_retry_count; - u8 first_rate_sel:2; - u8 terminate:1; - u8 count_init:1; - u8 reserved1:4; - u8 rate_recovery_count; - u8 reserved2[3]; - u8 rates[12]; -} __packed; - -#define HIF_TX_RETRY_POLICY_MAX 15 -#define HIF_TX_RETRY_POLICY_INVALID HIF_TX_RETRY_POLICY_MAX - -struct wfx_hif_mib_set_tx_rate_retry_policy { - u8 num_tx_rate_policies; - u8 reserved[3]; - struct wfx_hif_tx_rate_retry_policy tx_rate_retry_policy[]; -} __packed; - -struct wfx_hif_mib_protected_mgmt_policy { - u8 pmf_enable:1; - u8 unpmf_allowed:1; - u8 host_enc_auth_frames:1; - u8 reserved1:5; - u8 reserved2[3]; -} __packed; - -struct wfx_hif_mib_keep_alive_period { - __le16 keep_alive_period; - u8 reserved[2]; -} __packed; - -#endif diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c deleted file mode 100644 index 64ca8acb8e4f..000000000000 --- a/drivers/staging/wfx/hif_rx.c +++ /dev/null @@ -1,391 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Handling of the chip-to-host events (aka indications) of the hardware API. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include - -#include "hif_rx.h" -#include "wfx.h" -#include "scan.h" -#include "bh.h" -#include "sta.h" -#include "data_rx.h" -#include "hif_api_cmd.h" - -static int wfx_hif_generic_confirm(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - /* All confirm messages start with status */ - int status = le32_to_cpup((__le32 *)buf); - int cmd = hif->id; - int len = le16_to_cpu(hif->len) - 4; /* drop header */ - - WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error"); - - if (!wdev->hif_cmd.buf_send) { - dev_warn(wdev->dev, "unexpected confirmation: 0x%.2x\n", cmd); - return -EINVAL; - } - - if (cmd != wdev->hif_cmd.buf_send->id) { - dev_warn(wdev->dev, "chip response mismatch request: 0x%.2x vs 0x%.2x\n", - cmd, wdev->hif_cmd.buf_send->id); - return -EINVAL; - } - - if (wdev->hif_cmd.buf_recv) { - if (wdev->hif_cmd.len_recv >= len && len > 0) - memcpy(wdev->hif_cmd.buf_recv, buf, len); - else - status = -EIO; - } - wdev->hif_cmd.ret = status; - - complete(&wdev->hif_cmd.done); - return status; -} - -static int wfx_hif_tx_confirm(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - const struct wfx_hif_cnf_tx *body = buf; - - wfx_tx_confirm_cb(wdev, body); - return 0; -} - -static int wfx_hif_multi_tx_confirm(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - const struct wfx_hif_cnf_multi_transmit *body = buf; - int i; - - WARN(body->num_tx_confs <= 0, "corrupted message"); - for (i = 0; i < body->num_tx_confs; i++) - wfx_tx_confirm_cb(wdev, &body->tx_conf_payload[i]); - return 0; -} - -static int wfx_hif_startup_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - const struct wfx_hif_ind_startup *body = buf; - - if (body->status || body->firmware_type > 4) { - dev_err(wdev->dev, "received invalid startup indication"); - return -EINVAL; - } - memcpy(&wdev->hw_caps, body, sizeof(struct wfx_hif_ind_startup)); - complete(&wdev->firmware_ready); - return 0; -} - -static int wfx_hif_wakeup_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - if (!wdev->pdata.gpio_wakeup || gpiod_get_value(wdev->pdata.gpio_wakeup) == 0) { - dev_warn(wdev->dev, "unexpected wake-up indication\n"); - return -EIO; - } - return 0; -} - -static int wfx_hif_receive_indication(struct wfx_dev *wdev, const struct wfx_hif_msg *hif, - const void *buf, struct sk_buff *skb) -{ - struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); - const struct wfx_hif_ind_rx *body = buf; - - if (!wvif) { - dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); - return -EIO; - } - skb_pull(skb, sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_ind_rx)); - wfx_rx_cb(wvif, body, skb); - - return 0; -} - -static int wfx_hif_event_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); - const struct wfx_hif_ind_event *body = buf; - int type = le32_to_cpu(body->event_id); - - if (!wvif) { - dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); - return -EIO; - } - - switch (type) { - case HIF_EVENT_IND_RCPI_RSSI: - wfx_event_report_rssi(wvif, body->event_data.rcpi_rssi); - break; - case HIF_EVENT_IND_BSSLOST: - schedule_delayed_work(&wvif->beacon_loss_work, 0); - break; - case HIF_EVENT_IND_BSSREGAINED: - cancel_delayed_work(&wvif->beacon_loss_work); - dev_dbg(wdev->dev, "ignore BSSREGAINED indication\n"); - break; - case HIF_EVENT_IND_PS_MODE_ERROR: - dev_warn(wdev->dev, "error while processing power save request: %d\n", - le32_to_cpu(body->event_data.ps_mode_error)); - break; - default: - dev_warn(wdev->dev, "unhandled event indication: %.2x\n", type); - break; - } - return 0; -} - -static int wfx_hif_pm_mode_complete_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); - - if (!wvif) { - dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); - return -EIO; - } - complete(&wvif->set_pm_mode_complete); - - return 0; -} - -static int wfx_hif_scan_complete_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); - const struct wfx_hif_ind_scan_cmpl *body = buf; - - if (!wvif) { - dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); - return -EIO; - } - - wfx_scan_complete(wvif, body->num_channels_completed); - - return 0; -} - -static int wfx_hif_join_complete_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface); - - if (!wvif) { - dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); - return -EIO; - } - dev_warn(wdev->dev, "unattended JoinCompleteInd\n"); - - return 0; -} - -static int wfx_hif_suspend_resume_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - const struct wfx_hif_ind_suspend_resume_tx *body = buf; - struct wfx_vif *wvif; - - if (body->bc_mc_only) { - wvif = wdev_to_wvif(wdev, hif->interface); - if (!wvif) { - dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); - return -EIO; - } - if (body->resume) - wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE); - else - wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP); - } else { - WARN(body->peer_sta_set, "misunderstood indication"); - WARN(hif->interface != 2, "misunderstood indication"); - if (body->resume) - wfx_suspend_hot_dev(wdev, STA_NOTIFY_AWAKE); - else - wfx_suspend_hot_dev(wdev, STA_NOTIFY_SLEEP); - } - - return 0; -} - -static int wfx_hif_generic_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - const struct wfx_hif_ind_generic *body = buf; - int type = le32_to_cpu(body->type); - - switch (type) { - case HIF_GENERIC_INDICATION_TYPE_RAW: - return 0; - case HIF_GENERIC_INDICATION_TYPE_STRING: - dev_info(wdev->dev, "firmware says: %s\n", (char *)&body->data); - return 0; - case HIF_GENERIC_INDICATION_TYPE_RX_STATS: - mutex_lock(&wdev->rx_stats_lock); - /* Older firmware send a generic indication beside RxStats */ - if (!wfx_api_older_than(wdev, 1, 4)) - dev_info(wdev->dev, "Rx test ongoing. Temperature: %d degrees C\n", - body->data.rx_stats.current_temp); - memcpy(&wdev->rx_stats, &body->data.rx_stats, sizeof(wdev->rx_stats)); - mutex_unlock(&wdev->rx_stats_lock); - return 0; - case HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO: - mutex_lock(&wdev->tx_power_loop_info_lock); - memcpy(&wdev->tx_power_loop_info, &body->data.tx_power_loop_info, - sizeof(wdev->tx_power_loop_info)); - mutex_unlock(&wdev->tx_power_loop_info_lock); - return 0; - default: - dev_err(wdev->dev, "generic_indication: unknown indication type: %#.8x\n", type); - return -EIO; - } -} - -static const struct { - int val; - const char *str; - bool has_param; -} hif_errors[] = { - { HIF_ERROR_FIRMWARE_ROLLBACK, - "rollback status" }, - { HIF_ERROR_FIRMWARE_DEBUG_ENABLED, - "debug feature enabled" }, - { HIF_ERROR_PDS_PAYLOAD, - "PDS version is not supported" }, - { HIF_ERROR_PDS_TESTFEATURE, - "PDS ask for an unknown test mode" }, - { HIF_ERROR_OOR_VOLTAGE, - "out-of-range power supply voltage", true }, - { HIF_ERROR_OOR_TEMPERATURE, - "out-of-range temperature", true }, - { HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE, - "secure link does not expect request during key exchange" }, - { HIF_ERROR_SLK_SESSION_KEY, - "secure link session key is invalid" }, - { HIF_ERROR_SLK_OVERFLOW, - "secure link overflow" }, - { HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE, - "secure link messages list does not match message encryption" }, - { HIF_ERROR_SLK_UNCONFIGURED, - "secure link not yet configured" }, - { HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW, - "bus clock is too slow (<1kHz)" }, - { HIF_ERROR_HIF_RX_DATA_TOO_LARGE, - "HIF message too large" }, - /* Following errors only exists in old firmware versions: */ - { HIF_ERROR_HIF_TX_QUEUE_FULL, - "HIF messages queue is full" }, - { HIF_ERROR_HIF_BUS, - "HIF bus" }, - { HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED, - "secure link does not support multi-tx confirmations" }, - { HIF_ERROR_SLK_OUTDATED_SESSION_KEY, - "secure link session key is outdated" }, - { HIF_ERROR_SLK_DECRYPTION, - "secure link params (nonce or tag) mismatch" }, -}; - -static int wfx_hif_error_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - const struct wfx_hif_ind_error *body = buf; - int type = le32_to_cpu(body->type); - int param = (s8)body->data[0]; - int i; - - for (i = 0; i < ARRAY_SIZE(hif_errors); i++) - if (type == hif_errors[i].val) - break; - if (i < ARRAY_SIZE(hif_errors)) - if (hif_errors[i].has_param) - dev_err(wdev->dev, "asynchronous error: %s: %d\n", - hif_errors[i].str, param); - else - dev_err(wdev->dev, "asynchronous error: %s\n", hif_errors[i].str); - else - dev_err(wdev->dev, "asynchronous error: unknown: %08x\n", type); - print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, - 16, 1, hif, le16_to_cpu(hif->len), false); - wdev->chip_frozen = true; - - return 0; -}; - -static int wfx_hif_exception_indication(struct wfx_dev *wdev, - const struct wfx_hif_msg *hif, const void *buf) -{ - const struct wfx_hif_ind_exception *body = buf; - int type = le32_to_cpu(body->type); - - if (type == 4) - dev_err(wdev->dev, "firmware assert %d\n", le32_to_cpup((__le32 *)body->data)); - else - dev_err(wdev->dev, "firmware exception\n"); - print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, - 16, 1, hif, le16_to_cpu(hif->len), false); - wdev->chip_frozen = true; - - return -1; -} - -static const struct { - int msg_id; - int (*handler)(struct wfx_dev *wdev, const struct wfx_hif_msg *hif, const void *buf); -} hif_handlers[] = { - /* Confirmations */ - { HIF_CNF_ID_TX, wfx_hif_tx_confirm }, - { HIF_CNF_ID_MULTI_TRANSMIT, wfx_hif_multi_tx_confirm }, - /* Indications */ - { HIF_IND_ID_STARTUP, wfx_hif_startup_indication }, - { HIF_IND_ID_WAKEUP, wfx_hif_wakeup_indication }, - { HIF_IND_ID_JOIN_COMPLETE, wfx_hif_join_complete_indication }, - { HIF_IND_ID_SET_PM_MODE_CMPL, wfx_hif_pm_mode_complete_indication }, - { HIF_IND_ID_SCAN_CMPL, wfx_hif_scan_complete_indication }, - { HIF_IND_ID_SUSPEND_RESUME_TX, wfx_hif_suspend_resume_indication }, - { HIF_IND_ID_EVENT, wfx_hif_event_indication }, - { HIF_IND_ID_GENERIC, wfx_hif_generic_indication }, - { HIF_IND_ID_ERROR, wfx_hif_error_indication }, - { HIF_IND_ID_EXCEPTION, wfx_hif_exception_indication }, - /* FIXME: allocate skb_p from wfx_hif_receive_indication and make it generic */ - //{ HIF_IND_ID_RX, wfx_hif_receive_indication }, -}; - -void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb) -{ - int i; - const struct wfx_hif_msg *hif = (const struct wfx_hif_msg *)skb->data; - int hif_id = hif->id; - - if (hif_id == HIF_IND_ID_RX) { - /* wfx_hif_receive_indication take care of skb lifetime */ - wfx_hif_receive_indication(wdev, hif, hif->body, skb); - return; - } - /* Note: mutex_is_lock cause an implicit memory barrier that protect buf_send */ - if (mutex_is_locked(&wdev->hif_cmd.lock) && - wdev->hif_cmd.buf_send && wdev->hif_cmd.buf_send->id == hif_id) { - wfx_hif_generic_confirm(wdev, hif, hif->body); - goto free; - } - for (i = 0; i < ARRAY_SIZE(hif_handlers); i++) { - if (hif_handlers[i].msg_id == hif_id) { - if (hif_handlers[i].handler) - hif_handlers[i].handler(wdev, hif, hif->body); - goto free; - } - } - if (hif_id & HIF_ID_IS_INDICATION) - dev_err(wdev->dev, "unsupported HIF indication: ID %02x\n", hif_id); - else - dev_err(wdev->dev, "unexpected HIF confirmation: ID %02x\n", hif_id); -free: - dev_kfree_skb(skb); -} diff --git a/drivers/staging/wfx/hif_rx.h b/drivers/staging/wfx/hif_rx.h deleted file mode 100644 index 96543b81fa77..000000000000 --- a/drivers/staging/wfx/hif_rx.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Handling of the chip-to-host events (aka indications) of the hardware API. - * - * Copyright (c) 2017-2019, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - * Copyright (C) 2010, ST-Ericsson SA - */ -#ifndef WFX_HIF_RX_H -#define WFX_HIF_RX_H - -struct wfx_dev; -struct sk_buff; - -void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb); - -#endif diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c deleted file mode 100644 index ae3cc5919dcd..000000000000 --- a/drivers/staging/wfx/hif_tx.c +++ /dev/null @@ -1,490 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Implementation of the host-to-chip commands (aka request/confirmation) of the - * hardware API. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include - -#include "hif_tx.h" -#include "wfx.h" -#include "bh.h" -#include "hwio.h" -#include "debug.h" -#include "sta.h" - -void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd) -{ - init_completion(&hif_cmd->ready); - init_completion(&hif_cmd->done); - mutex_init(&hif_cmd->lock); -} - -static void wfx_fill_header(struct wfx_hif_msg *hif, int if_id, unsigned int cmd, size_t size) -{ - if (if_id == -1) - if_id = 2; - - WARN(cmd > 0x3f, "invalid hardware command %#.2x", cmd); - WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size); - WARN(if_id > 0x3, "invalid interface ID %d", if_id); - - hif->len = cpu_to_le16(size + 4); - hif->id = cmd; - hif->interface = if_id; -} - -static void *wfx_alloc_hif(size_t body_len, struct wfx_hif_msg **hif) -{ - *hif = kzalloc(sizeof(struct wfx_hif_msg) + body_len, GFP_KERNEL); - if (*hif) - return (*hif)->body; - else - return NULL; -} - -int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, - void *reply, size_t reply_len, bool no_reply) -{ - const char *mib_name = ""; - const char *mib_sep = ""; - int cmd = request->id; - int vif = request->interface; - int ret; - - /* Do not wait for any reply if chip is frozen */ - if (wdev->chip_frozen) - return -ETIMEDOUT; - - mutex_lock(&wdev->hif_cmd.lock); - WARN(wdev->hif_cmd.buf_send, "data locking error"); - - /* Note: call to complete() below has an implicit memory barrier that hopefully protect - * buf_send - */ - wdev->hif_cmd.buf_send = request; - wdev->hif_cmd.buf_recv = reply; - wdev->hif_cmd.len_recv = reply_len; - complete(&wdev->hif_cmd.ready); - - wfx_bh_request_tx(wdev); - - if (no_reply) { - /* Chip won't reply. Ensure the wq has send the buffer before to continue. */ - flush_workqueue(system_highpri_wq); - ret = 0; - goto end; - } - - if (wdev->poll_irq) - wfx_bh_poll_irq(wdev); - - ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ); - if (!ret) { - dev_err(wdev->dev, "chip is abnormally long to answer\n"); - reinit_completion(&wdev->hif_cmd.ready); - ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 3 * HZ); - } - if (!ret) { - dev_err(wdev->dev, "chip did not answer\n"); - wfx_pending_dump_old_frames(wdev, 3000); - wdev->chip_frozen = true; - reinit_completion(&wdev->hif_cmd.done); - ret = -ETIMEDOUT; - } else { - ret = wdev->hif_cmd.ret; - } - -end: - wdev->hif_cmd.buf_send = NULL; - mutex_unlock(&wdev->hif_cmd.lock); - - if (ret && - (cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) { - mib_name = wfx_get_mib_name(((u16 *)request)[2]); - mib_sep = "/"; - } - if (ret < 0) - dev_err(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned error %d\n", - wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret); - if (ret > 0) - dev_warn(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned status %d\n", - wfx_get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret); - - return ret; -} - -/* This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any request anymore. - * Obviously, only call this function during device unregister. - */ -int wfx_hif_shutdown(struct wfx_dev *wdev) -{ - int ret; - struct wfx_hif_msg *hif; - - wfx_alloc_hif(0, &hif); - if (!hif) - return -ENOMEM; - wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0); - ret = wfx_cmd_send(wdev, hif, NULL, 0, true); - if (wdev->pdata.gpio_wakeup) - gpiod_set_value(wdev->pdata.gpio_wakeup, 0); - else - wfx_control_reg_write(wdev, 0); - kfree(hif); - return ret; -} - -int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len) -{ - int ret; - size_t buf_len = sizeof(struct wfx_hif_req_configuration) + len; - struct wfx_hif_msg *hif; - struct wfx_hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif); - - if (!hif) - return -ENOMEM; - body->length = cpu_to_le16(len); - memcpy(body->pds_data, conf, len); - wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len); - ret = wfx_cmd_send(wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif); - - if (!hif) - return -ENOMEM; - body->reset_stat = reset_stat; - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body)); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len) -{ - int ret; - struct wfx_hif_msg *hif; - int buf_len = sizeof(struct wfx_hif_cnf_read_mib) + val_len; - struct wfx_hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif); - struct wfx_hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL); - - if (!body || !reply) { - ret = -ENOMEM; - goto out; - } - body->mib_id = cpu_to_le16(mib_id); - wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body)); - ret = wfx_cmd_send(wdev, hif, reply, buf_len, false); - - if (!ret && mib_id != le16_to_cpu(reply->mib_id)) { - dev_warn(wdev->dev, "%s: confirmation mismatch request\n", __func__); - ret = -EIO; - } - if (ret == -ENOMEM) - dev_err(wdev->dev, "buffer is too small to receive %s (%zu < %d)\n", - wfx_get_mib_name(mib_id), val_len, le16_to_cpu(reply->length)); - if (!ret) - memcpy(val, &reply->mib_data, le16_to_cpu(reply->length)); - else - memset(val, 0xFF, val_len); -out: - kfree(hif); - kfree(reply); - return ret; -} - -int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val, size_t val_len) -{ - int ret; - struct wfx_hif_msg *hif; - int buf_len = sizeof(struct wfx_hif_req_write_mib) + val_len; - struct wfx_hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif); - - if (!hif) - return -ENOMEM; - body->mib_id = cpu_to_le16(mib_id); - body->length = cpu_to_le16(val_len); - memcpy(&body->mib_data, val, val_len); - wfx_fill_header(hif, vif_id, HIF_REQ_ID_WRITE_MIB, buf_len); - ret = wfx_cmd_send(wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, - int chan_start_idx, int chan_num) -{ - int ret, i; - struct wfx_hif_msg *hif; - size_t buf_len = sizeof(struct wfx_hif_req_start_scan_alt) + chan_num * sizeof(u8); - struct wfx_hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif); - - WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params"); - WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params"); - - if (!hif) - return -ENOMEM; - for (i = 0; i < req->n_ssids; i++) { - memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid, IEEE80211_MAX_SSID_LEN); - body->ssid_def[i].ssid_length = cpu_to_le32(req->ssids[i].ssid_len); - } - body->num_of_ssids = HIF_API_MAX_NB_SSIDS; - body->maintain_current_bss = 1; - body->disallow_ps = 1; - body->tx_power_level = cpu_to_le32(req->channels[chan_start_idx]->max_power); - body->num_of_channels = chan_num; - for (i = 0; i < chan_num; i++) - body->channel_list[i] = req->channels[i + chan_start_idx]->hw_value; - if (req->no_cck) - body->max_transmit_rate = API_RATE_INDEX_G_6MBPS; - else - body->max_transmit_rate = API_RATE_INDEX_B_1MBPS; - if (req->channels[chan_start_idx]->flags & IEEE80211_CHAN_NO_IR) { - body->min_channel_time = cpu_to_le32(50); - body->max_channel_time = cpu_to_le32(150); - } else { - body->min_channel_time = cpu_to_le32(10); - body->max_channel_time = cpu_to_le32(50); - body->num_of_probe_requests = 2; - body->probe_delay = 100; - } - - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_stop_scan(struct wfx_vif *wvif) -{ - int ret; - struct wfx_hif_msg *hif; - /* body associated to HIF_REQ_ID_STOP_SCAN is empty */ - wfx_alloc_hif(0, &hif); - - if (!hif) - return -ENOMEM; - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, - struct ieee80211_channel *channel, const u8 *ssid, int ssidlen) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif); - - WARN_ON(!conf->beacon_int); - WARN_ON(!conf->basic_rates); - WARN_ON(sizeof(body->ssid) < ssidlen); - WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS"); - if (!hif) - return -ENOMEM; - body->infrastructure_bss_mode = !conf->ibss_joined; - body->short_preamble = conf->use_short_preamble; - body->probe_for_join = !(channel->flags & IEEE80211_CHAN_NO_IR); - body->channel_number = channel->hw_value; - body->beacon_interval = cpu_to_le32(conf->beacon_int); - body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates)); - memcpy(body->bssid, conf->bssid, sizeof(body->bssid)); - if (ssid) { - body->ssid_length = cpu_to_le32(ssidlen); - memcpy(body->ssid, ssid, ssidlen); - } - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body)); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body), &hif); - - if (!hif) - return -ENOMEM; - body->aid = cpu_to_le16(aid); - body->beacon_lost_count = beacon_lost_count; - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS, sizeof(*body)); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg) -{ - int ret; - struct wfx_hif_msg *hif; - /* FIXME: only send necessary bits */ - struct wfx_hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif); - - if (!hif) - return -ENOMEM; - /* FIXME: swap bytes as necessary in body */ - memcpy(body, arg, sizeof(*body)); - if (wfx_api_older_than(wdev, 1, 5)) - /* Legacy firmwares expect that add_key to be sent on right interface. */ - wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY, sizeof(*body)); - else - wfx_fill_header(hif, -1, HIF_REQ_ID_ADD_KEY, sizeof(*body)); - ret = wfx_cmd_send(wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_remove_key(struct wfx_dev *wdev, int idx) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif); - - if (!hif) - return -ENOMEM; - body->entry_index = idx; - wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body)); - ret = wfx_cmd_send(wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue, - const struct ieee80211_tx_queue_params *arg) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body), &hif); - - if (!body) - return -ENOMEM; - - WARN_ON(arg->aifs > 255); - if (!hif) - return -ENOMEM; - body->aifsn = arg->aifs; - body->cw_min = cpu_to_le16(arg->cw_min); - body->cw_max = cpu_to_le16(arg->cw_max); - body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP); - body->queue_id = 3 - queue; - /* API 2.0 has changed queue IDs values */ - if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE) - body->queue_id = HIF_QUEUE_ID_BACKGROUND; - if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK) - body->queue_id = HIF_QUEUE_ID_BESTEFFORT; - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS, sizeof(*body)); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif); - - if (!body) - return -ENOMEM; - - if (!hif) - return -ENOMEM; - if (ps) { - body->enter_psm = 1; - /* Firmware does not support more than 128ms */ - body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255); - if (body->fast_psm_idle_period) - body->fast_psm = 1; - } - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body)); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, - const struct ieee80211_channel *channel) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif); - - WARN_ON(!conf->beacon_int); - if (!hif) - return -ENOMEM; - body->dtim_period = conf->dtim_period; - body->short_preamble = conf->use_short_preamble; - body->channel_number = channel->hw_value; - body->beacon_interval = cpu_to_le32(conf->beacon_int); - body->basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates)); - body->ssid_length = conf->ssid_len; - memcpy(body->ssid, conf->ssid, conf->ssid_len); - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body)); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body), &hif); - - if (!hif) - return -ENOMEM; - body->enable_beaconing = enable ? 1 : 0; - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT, sizeof(*body)); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp) -{ - int ret; - struct wfx_hif_msg *hif; - struct wfx_hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif); - - if (!hif) - return -ENOMEM; - if (mac_addr) - ether_addr_copy(body->mac_addr, mac_addr); - body->mfpc = mfp ? 1 : 0; - body->unmap = unmap ? 1 : 0; - body->peer_sta_id = sta_id; - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body)); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} - -int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len) -{ - int ret; - struct wfx_hif_msg *hif; - int buf_len = sizeof(struct wfx_hif_req_update_ie) + ies_len; - struct wfx_hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif); - - if (!hif) - return -ENOMEM; - body->beacon = 1; - body->num_ies = cpu_to_le16(1); - memcpy(body->ie, ies, ies_len); - wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len); - ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); - kfree(hif); - return ret; -} diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h deleted file mode 100644 index 71817a6571f0..000000000000 --- a/drivers/staging/wfx/hif_tx.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Implementation of the host-to-chip commands (aka request/confirmation) of the - * hardware API. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - * Copyright (C) 2010, ST-Ericsson SA - */ -#ifndef WFX_HIF_TX_H -#define WFX_HIF_TX_H - -#include -#include -#include - -struct ieee80211_channel; -struct ieee80211_bss_conf; -struct ieee80211_tx_queue_params; -struct cfg80211_scan_request; -struct wfx_hif_req_add_key; -struct wfx_dev; -struct wfx_vif; - -struct wfx_hif_cmd { - struct mutex lock; - struct completion ready; - struct completion done; - struct wfx_hif_msg *buf_send; - void *buf_recv; - size_t len_recv; - int ret; -}; - -void wfx_init_hif_cmd(struct wfx_hif_cmd *wfx_hif_cmd); -int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, - void *reply, size_t reply_len, bool async); - -int wfx_hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size); -int wfx_hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size); -int wfx_hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, - const struct ieee80211_channel *channel); -int wfx_hif_reset(struct wfx_vif *wvif, bool reset_stat); -int wfx_hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, - struct ieee80211_channel *channel, const u8 *ssid, int ssidlen); -int wfx_hif_map_link(struct wfx_vif *wvif, bool unmap, u8 *mac_addr, int sta_id, bool mfp); -int wfx_hif_add_key(struct wfx_dev *wdev, const struct wfx_hif_req_add_key *arg); -int wfx_hif_remove_key(struct wfx_dev *wdev, int idx); -int wfx_hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout); -int wfx_hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count); -int wfx_hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue, - const struct ieee80211_tx_queue_params *arg); -int wfx_hif_beacon_transmit(struct wfx_vif *wvif, bool enable); -int wfx_hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len); -int wfx_hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211, - int chan_start, int chan_num); -int wfx_hif_stop_scan(struct wfx_vif *wvif); -int wfx_hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len); -int wfx_hif_shutdown(struct wfx_dev *wdev); - -#endif diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/staging/wfx/hif_tx_mib.c deleted file mode 100644 index df1bcb1e2c02..000000000000 --- a/drivers/staging/wfx/hif_tx_mib.c +++ /dev/null @@ -1,307 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Implementation of the host-to-chip MIBs of the hardware API. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - * Copyright (C) 2010, ST-Ericsson SA - */ - -#include - -#include "wfx.h" -#include "hif_tx.h" -#include "hif_tx_mib.h" -#include "hif_api_mib.h" - -int wfx_hif_set_output_power(struct wfx_vif *wvif, int val) -{ - struct wfx_hif_mib_current_tx_power_level arg = { - .power_level = cpu_to_le32(val * 10), - }; - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_CURRENT_TX_POWER_LEVEL, - &arg, sizeof(arg)); -} - -int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif, - unsigned int dtim_interval, unsigned int listen_interval) -{ - struct wfx_hif_mib_beacon_wake_up_period arg = { - .wakeup_period_min = dtim_interval, - .receive_dtim = 0, - .wakeup_period_max = listen_interval, - }; - - if (dtim_interval > 0xFF || listen_interval > 0xFFFF) - return -EINVAL; - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_WAKEUP_PERIOD, - &arg, sizeof(arg)); -} - -int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst) -{ - struct wfx_hif_mib_rcpi_rssi_threshold arg = { - .rolling_average_count = 8, - .detection = 1, - }; - - if (!rssi_thold && !rssi_hyst) { - arg.upperthresh = 1; - arg.lowerthresh = 1; - } else { - arg.upper_threshold = rssi_thold + rssi_hyst; - arg.upper_threshold = (arg.upper_threshold + 110) * 2; - arg.lower_threshold = rssi_thold; - arg.lower_threshold = (arg.lower_threshold + 110) * 2; - } - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RCPI_RSSI_THRESHOLD, - &arg, sizeof(arg)); -} - -int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id, - struct wfx_hif_mib_extended_count_table *arg) -{ - if (wfx_api_older_than(wdev, 1, 3)) { - /* extended_count_table is wider than count_table */ - memset(arg, 0xFF, sizeof(*arg)); - return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_COUNTERS_TABLE, - arg, sizeof(struct wfx_hif_mib_count_table)); - } else { - return wfx_hif_read_mib(wdev, vif_id, HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, - arg, sizeof(struct wfx_hif_mib_extended_count_table)); - } -} - -int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac) -{ - struct wfx_hif_mib_mac_address arg = { }; - - if (mac) - ether_addr_copy(arg.mac_addr, mac); - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS, - &arg, sizeof(arg)); -} - -int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool filter_prbreq) -{ - struct wfx_hif_mib_rx_filter arg = { }; - - if (filter_bssid) - arg.bssid_filter = 1; - if (!filter_prbreq) - arg.fwd_probe_req = 1; - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER, &arg, sizeof(arg)); -} - -int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len, - const struct wfx_hif_ie_table_entry *tbl) -{ - int ret; - struct wfx_hif_mib_bcn_filter_table *arg; - int buf_len = struct_size(arg, ie_table, tbl_len); - - arg = kzalloc(buf_len, GFP_KERNEL); - if (!arg) - return -ENOMEM; - arg->num_of_info_elmts = cpu_to_le32(tbl_len); - memcpy(arg->ie_table, tbl, flex_array_size(arg, ie_table, tbl_len)); - ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_TABLE, - arg, buf_len); - kfree(arg); - return ret; -} - -int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count) -{ - struct wfx_hif_mib_bcn_filter_enable arg = { - .enable = cpu_to_le32(enable), - .bcn_count = cpu_to_le32(beacon_count), - }; - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BEACON_FILTER_ENABLE, - &arg, sizeof(arg)); -} - -int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode) -{ - struct wfx_hif_mib_gl_operational_power_mode arg = { - .power_mode = mode, - .wup_ind_activation = 1, - }; - - return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE, - &arg, sizeof(arg)); -} - -int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb, - u8 frame_type, int init_rate) -{ - struct wfx_hif_mib_template_frame *arg; - - WARN(skb->len > HIF_API_MAX_TEMPLATE_FRAME_SIZE, "frame is too big"); - skb_push(skb, 4); - arg = (struct wfx_hif_mib_template_frame *)skb->data; - skb_pull(skb, 4); - arg->init_rate = init_rate; - arg->frame_type = frame_type; - arg->frame_length = cpu_to_le16(skb->len); - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME, - arg, sizeof(*arg) + skb->len); -} - -int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required) -{ - struct wfx_hif_mib_protected_mgmt_policy arg = { }; - - WARN(required && !capable, "incoherent arguments"); - if (capable) { - arg.pmf_enable = 1; - arg.host_enc_auth_frames = 1; - } - if (!required) - arg.unpmf_allowed = 1; - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_PROTECTED_MGMT_POLICY, - &arg, sizeof(arg)); -} - -int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy) -{ - struct wfx_hif_mib_block_ack_policy arg = { - .block_ack_tx_tid_policy = tx_tid_policy, - .block_ack_rx_tid_policy = rx_tid_policy, - }; - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY, - &arg, sizeof(arg)); -} - -int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density, - bool greenfield, bool short_preamble) -{ - struct wfx_hif_mib_set_association_mode arg = { - .preambtype_use = 1, - .mode = 1, - .spacing = 1, - .short_preamble = short_preamble, - .greenfield = greenfield, - .mpdu_start_spacing = ampdu_density, - }; - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_ASSOCIATION_MODE, - &arg, sizeof(arg)); -} - -int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates) -{ - struct wfx_hif_mib_set_tx_rate_retry_policy *arg; - size_t size = struct_size(arg, tx_rate_retry_policy, 1); - int ret; - - arg = kzalloc(size, GFP_KERNEL); - if (!arg) - return -ENOMEM; - arg->num_tx_rate_policies = 1; - arg->tx_rate_retry_policy[0].policy_index = policy_index; - arg->tx_rate_retry_policy[0].short_retry_count = 255; - arg->tx_rate_retry_policy[0].long_retry_count = 255; - arg->tx_rate_retry_policy[0].first_rate_sel = 1; - arg->tx_rate_retry_policy[0].terminate = 1; - arg->tx_rate_retry_policy[0].count_init = 1; - memcpy(&arg->tx_rate_retry_policy[0].rates, rates, - sizeof(arg->tx_rate_retry_policy[0].rates)); - ret = wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, - arg, size); - kfree(arg); - return ret; -} - -int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period) -{ - struct wfx_hif_mib_keep_alive_period arg = { - .keep_alive_period = cpu_to_le16(period), - }; - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD, - &arg, sizeof(arg)); -}; - -int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr) -{ - struct wfx_hif_mib_arp_ip_addr_table arg = { - .condition_idx = idx, - .arp_enable = HIF_ARP_NS_FILTERING_DISABLE, - }; - - if (addr) { - /* Caution: type of addr is __be32 */ - memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address)); - arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE; - } - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE, - &arg, sizeof(arg)); -} - -int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable) -{ - struct wfx_hif_mib_gl_set_multi_msg arg = { - .enable_multi_tx_conf = enable, - }; - - return wfx_hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG, &arg, sizeof(arg)); -} - -int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val) -{ - struct wfx_hif_mib_set_uapsd_information arg = { }; - - if (val & BIT(IEEE80211_AC_VO)) - arg.trig_voice = 1; - if (val & BIT(IEEE80211_AC_VI)) - arg.trig_video = 1; - if (val & BIT(IEEE80211_AC_BE)) - arg.trig_be = 1; - if (val & BIT(IEEE80211_AC_BK)) - arg.trig_bckgrnd = 1; - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_UAPSD_INFORMATION, - &arg, sizeof(arg)); -} - -int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable) -{ - struct wfx_hif_mib_non_erp_protection arg = { - .use_cts_to_self = enable, - }; - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_NON_ERP_PROTECTION, - &arg, sizeof(arg)); -} - -int wfx_hif_slot_time(struct wfx_vif *wvif, int val) -{ - struct wfx_hif_mib_slot_time arg = { - .slot_time = cpu_to_le32(val), - }; - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME, &arg, sizeof(arg)); -} - -int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val) -{ - struct wfx_hif_mib_wep_default_key_id arg = { - .wep_default_key_id = val, - }; - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID, - &arg, sizeof(arg)); -} - -int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val) -{ - struct wfx_hif_mib_dot11_rts_threshold arg = { - .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF), - }; - - return wfx_hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_RTS_THRESHOLD, - &arg, sizeof(arg)); -} diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h deleted file mode 100644 index bcd4ef6a8497..000000000000 --- a/drivers/staging/wfx/hif_tx_mib.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Implementation of the host-to-chip MIBs of the hardware API. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - * Copyright (C) 2010, ST-Ericsson SA - */ -#ifndef WFX_HIF_TX_MIB_H -#define WFX_HIF_TX_MIB_H - -#include - -struct sk_buff; -struct wfx_vif; -struct wfx_dev; -struct wfx_hif_ie_table_entry; -struct wfx_hif_mib_extended_count_table; - -int wfx_hif_set_output_power(struct wfx_vif *wvif, int val); -int wfx_hif_set_beacon_wakeup_period(struct wfx_vif *wvif, - unsigned int dtim_interval, unsigned int listen_interval); -int wfx_hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif, int rssi_thold, int rssi_hyst); -int wfx_hif_get_counters_table(struct wfx_dev *wdev, int vif_id, - struct wfx_hif_mib_extended_count_table *arg); -int wfx_hif_set_macaddr(struct wfx_vif *wvif, u8 *mac); -int wfx_hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid, bool fwd_probe_req); -int wfx_hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len, - const struct wfx_hif_ie_table_entry *tbl); -int wfx_hif_beacon_filter_control(struct wfx_vif *wvif, int enable, int beacon_count); -int wfx_hif_set_operational_mode(struct wfx_dev *wdev, enum wfx_hif_op_power_mode mode); -int wfx_hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb, - u8 frame_type, int init_rate); -int wfx_hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required); -int wfx_hif_set_block_ack_policy(struct wfx_vif *wvif, u8 tx_tid_policy, u8 rx_tid_policy); -int wfx_hif_set_association_mode(struct wfx_vif *wvif, int ampdu_density, - bool greenfield, bool short_preamble); -int wfx_hif_set_tx_rate_retry_policy(struct wfx_vif *wvif, int policy_index, u8 *rates); -int wfx_hif_keep_alive_period(struct wfx_vif *wvif, int period); -int wfx_hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr); -int wfx_hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable); -int wfx_hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val); -int wfx_hif_erp_use_protection(struct wfx_vif *wvif, bool enable); -int wfx_hif_slot_time(struct wfx_vif *wvif, int val); -int wfx_hif_wep_default_key_id(struct wfx_vif *wvif, int val); -int wfx_hif_rts_threshold(struct wfx_vif *wvif, int val); - -#endif diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c deleted file mode 100644 index 3f9750b470be..000000000000 --- a/drivers/staging/wfx/hwio.c +++ /dev/null @@ -1,332 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Low-level I/O functions. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include -#include -#include - -#include "hwio.h" -#include "wfx.h" -#include "bus.h" -#include "traces.h" - -#define WFX_HIF_BUFFER_SIZE 0x2000 - -static int wfx_read32(struct wfx_dev *wdev, int reg, u32 *val) -{ - int ret; - __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); - - *val = ~0; /* Never return undefined value */ - if (!tmp) - return -ENOMEM; - ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp, sizeof(u32)); - if (ret >= 0) - *val = le32_to_cpu(*tmp); - kfree(tmp); - if (ret) - dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); - return ret; -} - -static int wfx_write32(struct wfx_dev *wdev, int reg, u32 val) -{ - int ret; - __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); - - if (!tmp) - return -ENOMEM; - *tmp = cpu_to_le32(val); - ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, tmp, sizeof(u32)); - kfree(tmp); - if (ret) - dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); - return ret; -} - -static int wfx_read32_locked(struct wfx_dev *wdev, int reg, u32 *val) -{ - int ret; - - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wfx_read32(wdev, reg, val); - _trace_io_read32(reg, *val); - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - return ret; -} - -static int wfx_write32_locked(struct wfx_dev *wdev, int reg, u32 val) -{ - int ret; - - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wfx_write32(wdev, reg, val); - _trace_io_write32(reg, val); - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - return ret; -} - -static int wfx_write32_bits_locked(struct wfx_dev *wdev, int reg, u32 mask, u32 val) -{ - int ret; - u32 val_r, val_w; - - WARN_ON(~mask & val); - val &= mask; - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wfx_read32(wdev, reg, &val_r); - _trace_io_read32(reg, val_r); - if (ret < 0) - goto err; - val_w = (val_r & ~mask) | val; - if (val_w != val_r) { - ret = wfx_write32(wdev, reg, val_w); - _trace_io_write32(reg, val_w); - } -err: - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - return ret; -} - -static int wfx_indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf, size_t len) -{ - int ret; - int i; - u32 cfg; - u32 prefetch; - - WARN_ON(len >= WFX_HIF_BUFFER_SIZE); - WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT); - - if (reg == WFX_REG_AHB_DPORT) - prefetch = CFG_PREFETCH_AHB; - else if (reg == WFX_REG_SRAM_DPORT) - prefetch = CFG_PREFETCH_SRAM; - else - return -ENODEV; - - ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr); - if (ret < 0) - goto err; - - ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg); - if (ret < 0) - goto err; - - ret = wfx_write32(wdev, WFX_REG_CONFIG, cfg | prefetch); - if (ret < 0) - goto err; - - for (i = 0; i < 20; i++) { - ret = wfx_read32(wdev, WFX_REG_CONFIG, &cfg); - if (ret < 0) - goto err; - if (!(cfg & prefetch)) - break; - usleep_range(200, 250); - } - if (i == 20) { - ret = -ETIMEDOUT; - goto err; - } - - ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, buf, len); - -err: - if (ret < 0) - memset(buf, 0xFF, len); /* Never return undefined value */ - return ret; -} - -static int wfx_indirect_write(struct wfx_dev *wdev, int reg, u32 addr, - const void *buf, size_t len) -{ - int ret; - - WARN_ON(len >= WFX_HIF_BUFFER_SIZE); - WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT); - ret = wfx_write32(wdev, WFX_REG_BASE_ADDR, addr); - if (ret < 0) - return ret; - - return wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, buf, len); -} - -static int wfx_indirect_read_locked(struct wfx_dev *wdev, int reg, u32 addr, - void *buf, size_t len) -{ - int ret; - - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wfx_indirect_read(wdev, reg, addr, buf, len); - _trace_io_ind_read(reg, addr, buf, len); - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - return ret; -} - -static int wfx_indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr, - const void *buf, size_t len) -{ - int ret; - - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wfx_indirect_write(wdev, reg, addr, buf, len); - _trace_io_ind_write(reg, addr, buf, len); - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - return ret; -} - -static int wfx_indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 *val) -{ - int ret; - __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); - - if (!tmp) - return -ENOMEM; - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wfx_indirect_read(wdev, reg, addr, tmp, sizeof(u32)); - *val = le32_to_cpu(*tmp); - _trace_io_ind_read32(reg, addr, *val); - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - kfree(tmp); - return ret; -} - -static int wfx_indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr, u32 val) -{ - int ret; - __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL); - - if (!tmp) - return -ENOMEM; - *tmp = cpu_to_le32(val); - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wfx_indirect_write(wdev, reg, addr, tmp, sizeof(u32)); - _trace_io_ind_write32(reg, addr, val); - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - kfree(tmp); - return ret; -} - -int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len) -{ - int ret; - - WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer"); - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len); - _trace_io_read(WFX_REG_IN_OUT_QUEUE, buf, len); - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - if (ret) - dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); - return ret; -} - -int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len) -{ - int ret; - - WARN(!IS_ALIGNED((uintptr_t)buf, 4), "unaligned buffer"); - wdev->hwbus_ops->lock(wdev->hwbus_priv); - ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, WFX_REG_IN_OUT_QUEUE, buf, len); - _trace_io_write(WFX_REG_IN_OUT_QUEUE, buf, len); - wdev->hwbus_ops->unlock(wdev->hwbus_priv); - if (ret) - dev_err(wdev->dev, "%s: bus communication error: %d\n", __func__, ret); - return ret; -} - -int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len) -{ - return wfx_indirect_read_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len); -} - -int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len) -{ - return wfx_indirect_read_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len); -} - -int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len) -{ - return wfx_indirect_write_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len); -} - -int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len) -{ - return wfx_indirect_write_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len); -} - -int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val) -{ - return wfx_indirect_read32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val); -} - -int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val) -{ - return wfx_indirect_read32_locked(wdev, WFX_REG_AHB_DPORT, addr, val); -} - -int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val) -{ - return wfx_indirect_write32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val); -} - -int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val) -{ - return wfx_indirect_write32_locked(wdev, WFX_REG_AHB_DPORT, addr, val); -} - -int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val) -{ - return wfx_read32_locked(wdev, WFX_REG_CONFIG, val); -} - -int wfx_config_reg_write(struct wfx_dev *wdev, u32 val) -{ - return wfx_write32_locked(wdev, WFX_REG_CONFIG, val); -} - -int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val) -{ - return wfx_write32_bits_locked(wdev, WFX_REG_CONFIG, mask, val); -} - -int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val) -{ - return wfx_read32_locked(wdev, WFX_REG_CONTROL, val); -} - -int wfx_control_reg_write(struct wfx_dev *wdev, u32 val) -{ - return wfx_write32_locked(wdev, WFX_REG_CONTROL, val); -} - -int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val) -{ - return wfx_write32_bits_locked(wdev, WFX_REG_CONTROL, mask, val); -} - -int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val) -{ - int ret; - - *val = ~0; /* Never return undefined value */ - ret = wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24); - if (ret) - return ret; - ret = wfx_read32_locked(wdev, WFX_REG_SET_GEN_R_W, val); - if (ret) - return ret; - *val &= IGPR_VALUE; - return ret; -} - -int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val) -{ - return wfx_write32_locked(wdev, WFX_REG_SET_GEN_R_W, index << 24 | val); -} diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h deleted file mode 100644 index c6e7b065b7ff..000000000000 --- a/drivers/staging/wfx/hwio.h +++ /dev/null @@ -1,78 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Low-level I/O functions. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_HWIO_H -#define WFX_HWIO_H - -#include - -struct wfx_dev; - -/* Caution: in the functions below, 'buf' will used with a DMA. So, it must be kmalloc'd (do not use - * stack allocated buffers). In doubt, enable CONFIG_DEBUG_SG to detect badly located buffer. - */ -int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len); -int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t buf_len); - -int wfx_sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len); -int wfx_sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len); - -int wfx_ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len); -int wfx_ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len); - -int wfx_sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val); -int wfx_sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val); - -int wfx_ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val); -int wfx_ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val); - -#define CFG_ERR_SPI_FRAME 0x00000001 /* only with SPI */ -#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 /* only with SDIO */ -#define CFG_ERR_BUF_UNDERRUN 0x00000002 -#define CFG_ERR_DATA_IN_TOO_LARGE 0x00000004 -#define CFG_ERR_HOST_NO_OUT_QUEUE 0x00000008 -#define CFG_ERR_BUF_OVERRUN 0x00000010 -#define CFG_ERR_DATA_OUT_TOO_LARGE 0x00000020 -#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040 -#define CFG_ERR_HOST_CRC_MISS 0x00000080 /* only with SDIO */ -#define CFG_SPI_IGNORE_CS 0x00000080 /* only with SPI */ -#define CFG_BYTE_ORDER_MASK 0x00000300 /* only writable with SPI */ -#define CFG_BYTE_ORDER_BADC 0x00000000 -#define CFG_BYTE_ORDER_DCBA 0x00000100 -#define CFG_BYTE_ORDER_ABCD 0x00000200 /* SDIO always use this value */ -#define CFG_DIRECT_ACCESS_MODE 0x00000400 -#define CFG_PREFETCH_AHB 0x00000800 -#define CFG_DISABLE_CPU_CLK 0x00001000 -#define CFG_PREFETCH_SRAM 0x00002000 -#define CFG_CPU_RESET 0x00004000 -#define CFG_SDIO_DISABLE_IRQ 0x00008000 /* only with SDIO */ -#define CFG_IRQ_ENABLE_DATA 0x00010000 -#define CFG_IRQ_ENABLE_WRDY 0x00020000 -#define CFG_CLK_RISE_EDGE 0x00040000 -#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 /* only with SDIO */ -#define CFG_RESERVED 0x00F00000 -#define CFG_DEVICE_ID_MAJOR 0x07000000 -#define CFG_DEVICE_ID_RESERVED 0x78000000 -#define CFG_DEVICE_ID_TYPE 0x80000000 -int wfx_config_reg_read(struct wfx_dev *wdev, u32 *val); -int wfx_config_reg_write(struct wfx_dev *wdev, u32 val); -int wfx_config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val); - -#define CTRL_NEXT_LEN_MASK 0x00000FFF -#define CTRL_WLAN_WAKEUP 0x00001000 -#define CTRL_WLAN_READY 0x00002000 -int wfx_control_reg_read(struct wfx_dev *wdev, u32 *val); -int wfx_control_reg_write(struct wfx_dev *wdev, u32 val); -int wfx_control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val); - -#define IGPR_RW 0x80000000 -#define IGPR_INDEX 0x7F000000 -#define IGPR_VALUE 0x00FFFFFF -int wfx_igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val); -int wfx_igpr_reg_write(struct wfx_dev *wdev, int index, u32 val); - -#endif diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c deleted file mode 100644 index 8f23e8d42bd4..000000000000 --- a/drivers/staging/wfx/key.c +++ /dev/null @@ -1,227 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Key management related functions. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include - -#include "key.h" -#include "wfx.h" -#include "hif_tx_mib.h" - -static int wfx_alloc_key(struct wfx_dev *wdev) -{ - int idx; - - idx = ffs(~wdev->key_map) - 1; - if (idx < 0 || idx >= MAX_KEY_ENTRIES) - return -1; - - wdev->key_map |= BIT(idx); - return idx; -} - -static void wfx_free_key(struct wfx_dev *wdev, int idx) -{ - WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation"); - wdev->key_map &= ~BIT(idx); -} - -static u8 fill_wep_pair(struct wfx_hif_wep_pairwise_key *msg, - struct ieee80211_key_conf *key, u8 *peer_addr) -{ - WARN(key->keylen > sizeof(msg->key_data), "inconsistent data"); - msg->key_length = key->keylen; - memcpy(msg->key_data, key->key, key->keylen); - ether_addr_copy(msg->peer_address, peer_addr); - return HIF_KEY_TYPE_WEP_PAIRWISE; -} - -static u8 fill_wep_group(struct wfx_hif_wep_group_key *msg, - struct ieee80211_key_conf *key) -{ - WARN(key->keylen > sizeof(msg->key_data), "inconsistent data"); - msg->key_id = key->keyidx; - msg->key_length = key->keylen; - memcpy(msg->key_data, key->key, key->keylen); - return HIF_KEY_TYPE_WEP_DEFAULT; -} - -static u8 fill_tkip_pair(struct wfx_hif_tkip_pairwise_key *msg, - struct ieee80211_key_conf *key, u8 *peer_addr) -{ - u8 *keybuf = key->key; - - WARN(key->keylen != sizeof(msg->tkip_key_data) + sizeof(msg->tx_mic_key) + - sizeof(msg->rx_mic_key), "inconsistent data"); - memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data)); - keybuf += sizeof(msg->tkip_key_data); - memcpy(msg->tx_mic_key, keybuf, sizeof(msg->tx_mic_key)); - keybuf += sizeof(msg->tx_mic_key); - memcpy(msg->rx_mic_key, keybuf, sizeof(msg->rx_mic_key)); - ether_addr_copy(msg->peer_address, peer_addr); - return HIF_KEY_TYPE_TKIP_PAIRWISE; -} - -static u8 fill_tkip_group(struct wfx_hif_tkip_group_key *msg, struct ieee80211_key_conf *key, - struct ieee80211_key_seq *seq, enum nl80211_iftype iftype) -{ - u8 *keybuf = key->key; - - WARN(key->keylen != sizeof(msg->tkip_key_data) + 2 * sizeof(msg->rx_mic_key), - "inconsistent data"); - msg->key_id = key->keyidx; - memcpy(msg->rx_sequence_counter, &seq->tkip.iv16, sizeof(seq->tkip.iv16)); - memcpy(msg->rx_sequence_counter + sizeof(u16), &seq->tkip.iv32, sizeof(seq->tkip.iv32)); - memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data)); - keybuf += sizeof(msg->tkip_key_data); - if (iftype == NL80211_IFTYPE_AP) - /* Use Tx MIC Key */ - memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key)); - else - /* Use Rx MIC Key */ - memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key)); - return HIF_KEY_TYPE_TKIP_GROUP; -} - -static u8 fill_ccmp_pair(struct wfx_hif_aes_pairwise_key *msg, - struct ieee80211_key_conf *key, u8 *peer_addr) -{ - WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data"); - ether_addr_copy(msg->peer_address, peer_addr); - memcpy(msg->aes_key_data, key->key, key->keylen); - return HIF_KEY_TYPE_AES_PAIRWISE; -} - -static u8 fill_ccmp_group(struct wfx_hif_aes_group_key *msg, - struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) -{ - WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data"); - memcpy(msg->aes_key_data, key->key, key->keylen); - memcpy(msg->rx_sequence_counter, seq->ccmp.pn, sizeof(seq->ccmp.pn)); - memreverse(msg->rx_sequence_counter, sizeof(seq->ccmp.pn)); - msg->key_id = key->keyidx; - return HIF_KEY_TYPE_AES_GROUP; -} - -static u8 fill_sms4_pair(struct wfx_hif_wapi_pairwise_key *msg, - struct ieee80211_key_conf *key, u8 *peer_addr) -{ - u8 *keybuf = key->key; - - WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data), - "inconsistent data"); - ether_addr_copy(msg->peer_address, peer_addr); - memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data)); - keybuf += sizeof(msg->wapi_key_data); - memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data)); - msg->key_id = key->keyidx; - return HIF_KEY_TYPE_WAPI_PAIRWISE; -} - -static u8 fill_sms4_group(struct wfx_hif_wapi_group_key *msg, - struct ieee80211_key_conf *key) -{ - u8 *keybuf = key->key; - - WARN(key->keylen != sizeof(msg->wapi_key_data) + sizeof(msg->mic_key_data), - "inconsistent data"); - memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data)); - keybuf += sizeof(msg->wapi_key_data); - memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data)); - msg->key_id = key->keyidx; - return HIF_KEY_TYPE_WAPI_GROUP; -} - -static u8 fill_aes_cmac_group(struct wfx_hif_igtk_group_key *msg, - struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) -{ - WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data"); - memcpy(msg->igtk_key_data, key->key, key->keylen); - memcpy(msg->ipn, seq->aes_cmac.pn, sizeof(seq->aes_cmac.pn)); - memreverse(msg->ipn, sizeof(seq->aes_cmac.pn)); - msg->key_id = key->keyidx; - return HIF_KEY_TYPE_IGTK_GROUP; -} - -static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) -{ - int ret; - struct wfx_hif_req_add_key k = { }; - struct ieee80211_key_seq seq; - struct wfx_dev *wdev = wvif->wdev; - int idx = wfx_alloc_key(wvif->wdev); - bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE; - - WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data"); - ieee80211_get_key_rx_seq(key, 0, &seq); - if (idx < 0) - return -EINVAL; - k.int_id = wvif->id; - k.entry_index = idx; - if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || - key->cipher == WLAN_CIPHER_SUITE_WEP104) { - if (pairwise) - k.type = fill_wep_pair(&k.key.wep_pairwise_key, key, sta->addr); - else - k.type = fill_wep_group(&k.key.wep_group_key, key); - } else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { - if (pairwise) - k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key, sta->addr); - else - k.type = fill_tkip_group(&k.key.tkip_group_key, key, &seq, - wvif->vif->type); - } else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) { - if (pairwise) - k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key, sta->addr); - else - k.type = fill_ccmp_group(&k.key.aes_group_key, key, &seq); - } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) { - if (pairwise) - k.type = fill_sms4_pair(&k.key.wapi_pairwise_key, key, sta->addr); - else - k.type = fill_sms4_group(&k.key.wapi_group_key, key); - } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { - k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key, &seq); - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; - } else { - dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher); - wfx_free_key(wdev, idx); - return -EOPNOTSUPP; - } - ret = wfx_hif_add_key(wdev, &k); - if (ret) { - wfx_free_key(wdev, idx); - return -EOPNOTSUPP; - } - key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM; - key->hw_key_idx = idx; - return 0; -} - -static int wfx_remove_key(struct wfx_vif *wvif, struct ieee80211_key_conf *key) -{ - WARN(key->hw_key_idx >= MAX_KEY_ENTRIES, "corrupted hw_key_idx"); - wfx_free_key(wvif->wdev, key->hw_key_idx); - return wfx_hif_remove_key(wvif->wdev, key->hw_key_idx); -} - -int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct ieee80211_key_conf *key) -{ - int ret = -EOPNOTSUPP; - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - mutex_lock(&wvif->wdev->conf_mutex); - if (cmd == SET_KEY) - ret = wfx_add_key(wvif, sta, key); - if (cmd == DISABLE_KEY) - ret = wfx_remove_key(wvif, key); - mutex_unlock(&wvif->wdev->conf_mutex); - return ret; -} - diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h deleted file mode 100644 index 2234e36dbbcd..000000000000 --- a/drivers/staging/wfx/key.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Key management related functions. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_KEY_H -#define WFX_KEY_H - -#include - -struct wfx_dev; -struct wfx_vif; - -int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct ieee80211_key_conf *key); - -#endif diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c deleted file mode 100644 index b93b16b900c8..000000000000 --- a/drivers/staging/wfx/main.c +++ /dev/null @@ -1,491 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Device probe and register. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - * Copyright (c) 2008, Johannes Berg - * Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). - * Copyright (c) 2007-2009, Christian Lamparter - * Copyright (c) 2006, Michael Wu - * Copyright (c) 2004-2006 Jean-Baptiste Note , et al. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include "main.h" -#include "wfx.h" -#include "fwio.h" -#include "hwio.h" -#include "bus.h" -#include "bh.h" -#include "sta.h" -#include "key.h" -#include "scan.h" -#include "debug.h" -#include "data_tx.h" -#include "hif_tx_mib.h" -#include "hif_api_cmd.h" - -#define WFX_PDS_TLV_TYPE 0x4450 // "PD" (Platform Data) in ascii little-endian -#define WFX_PDS_MAX_CHUNK_SIZE 1500 - -MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WF200"); -MODULE_AUTHOR("Jérôme Pouiller "); -MODULE_LICENSE("GPL"); - -#define RATETAB_ENT(_rate, _rateid, _flags) { \ - .bitrate = (_rate), \ - .hw_value = (_rateid), \ - .flags = (_flags), \ -} - -static struct ieee80211_rate wfx_rates[] = { - RATETAB_ENT(10, 0, 0), - RATETAB_ENT(20, 1, IEEE80211_RATE_SHORT_PREAMBLE), - RATETAB_ENT(55, 2, IEEE80211_RATE_SHORT_PREAMBLE), - RATETAB_ENT(110, 3, IEEE80211_RATE_SHORT_PREAMBLE), - RATETAB_ENT(60, 6, 0), - RATETAB_ENT(90, 7, 0), - RATETAB_ENT(120, 8, 0), - RATETAB_ENT(180, 9, 0), - RATETAB_ENT(240, 10, 0), - RATETAB_ENT(360, 11, 0), - RATETAB_ENT(480, 12, 0), - RATETAB_ENT(540, 13, 0), -}; - -#define CHAN2G(_channel, _freq, _flags) { \ - .band = NL80211_BAND_2GHZ, \ - .center_freq = (_freq), \ - .hw_value = (_channel), \ - .flags = (_flags), \ - .max_antenna_gain = 0, \ - .max_power = 30, \ -} - -static struct ieee80211_channel wfx_2ghz_chantable[] = { - CHAN2G(1, 2412, 0), - CHAN2G(2, 2417, 0), - CHAN2G(3, 2422, 0), - CHAN2G(4, 2427, 0), - CHAN2G(5, 2432, 0), - CHAN2G(6, 2437, 0), - CHAN2G(7, 2442, 0), - CHAN2G(8, 2447, 0), - CHAN2G(9, 2452, 0), - CHAN2G(10, 2457, 0), - CHAN2G(11, 2462, 0), - CHAN2G(12, 2467, 0), - CHAN2G(13, 2472, 0), - CHAN2G(14, 2484, 0), -}; - -static const struct ieee80211_supported_band wfx_band_2ghz = { - .channels = wfx_2ghz_chantable, - .n_channels = ARRAY_SIZE(wfx_2ghz_chantable), - .bitrates = wfx_rates, - .n_bitrates = ARRAY_SIZE(wfx_rates), - .ht_cap = { - /* Receive caps */ - .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | - IEEE80211_HT_CAP_MAX_AMSDU | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), - .ht_supported = 1, - .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K, - .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, - .mcs = { - .rx_mask = { 0xFF }, /* MCS0 to MCS7 */ - .rx_highest = cpu_to_le16(72), - .tx_params = IEEE80211_HT_MCS_TX_DEFINED, - }, - }, -}; - -static const struct ieee80211_iface_limit wdev_iface_limits[] = { - { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) }, - { .max = 1, .types = BIT(NL80211_IFTYPE_AP) }, -}; - -static const struct ieee80211_iface_combination wfx_iface_combinations[] = { - { - .num_different_channels = 2, - .max_interfaces = 2, - .limits = wdev_iface_limits, - .n_limits = ARRAY_SIZE(wdev_iface_limits), - } -}; - -static const struct ieee80211_ops wfx_ops = { - .start = wfx_start, - .stop = wfx_stop, - .add_interface = wfx_add_interface, - .remove_interface = wfx_remove_interface, - .config = wfx_config, - .tx = wfx_tx, - .join_ibss = wfx_join_ibss, - .leave_ibss = wfx_leave_ibss, - .conf_tx = wfx_conf_tx, - .hw_scan = wfx_hw_scan, - .cancel_hw_scan = wfx_cancel_hw_scan, - .start_ap = wfx_start_ap, - .stop_ap = wfx_stop_ap, - .sta_add = wfx_sta_add, - .sta_remove = wfx_sta_remove, - .set_tim = wfx_set_tim, - .set_key = wfx_set_key, - .set_rts_threshold = wfx_set_rts_threshold, - .set_default_unicast_key = wfx_set_default_unicast_key, - .bss_info_changed = wfx_bss_info_changed, - .configure_filter = wfx_configure_filter, - .ampdu_action = wfx_ampdu_action, - .flush = wfx_flush, - .add_chanctx = wfx_add_chanctx, - .remove_chanctx = wfx_remove_chanctx, - .change_chanctx = wfx_change_chanctx, - .assign_vif_chanctx = wfx_assign_vif_chanctx, - .unassign_vif_chanctx = wfx_unassign_vif_chanctx, -}; - -bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor) -{ - if (wdev->hw_caps.api_version_major < major) - return true; - if (wdev->hw_caps.api_version_major > major) - return false; - if (wdev->hw_caps.api_version_minor < minor) - return true; - return false; -} - -/* The device needs data about the antenna configuration. This information in provided by PDS - * (Platform Data Set, this is the wording used in WF200 documentation) files. For hardware - * integrators, the full process to create PDS files is described here: - * https://github.com/SiliconLabs/wfx-firmware/blob/master/PDS/README.md - * - * The PDS file is an array of Time-Length-Value structs. - */ - int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len) -{ - int ret, chunk_type, chunk_len, chunk_num = 0; - - if (*buf == '{') { - dev_err(wdev->dev, "PDS: malformed file (legacy format?)\n"); - return -EINVAL; - } - while (len > 0) { - chunk_type = get_unaligned_le16(buf + 0); - chunk_len = get_unaligned_le16(buf + 2); - if (chunk_len > len) { - dev_err(wdev->dev, "PDS:%d: corrupted file\n", chunk_num); - return -EINVAL; - } - if (chunk_type != WFX_PDS_TLV_TYPE) { - dev_info(wdev->dev, "PDS:%d: skip unknown data\n", chunk_num); - goto next; - } - if (chunk_len > WFX_PDS_MAX_CHUNK_SIZE) - dev_warn(wdev->dev, "PDS:%d: unexpectedly large chunk\n", chunk_num); - if (buf[4] != '{' || buf[chunk_len - 1] != '}') - dev_warn(wdev->dev, "PDS:%d: unexpected content\n", chunk_num); - - ret = wfx_hif_configuration(wdev, buf + 4, chunk_len - 4); - if (ret > 0) { - dev_err(wdev->dev, "PDS:%d: invalid data (unsupported options?)\n", chunk_num); - return -EINVAL; - } - if (ret == -ETIMEDOUT) { - dev_err(wdev->dev, "PDS:%d: chip didn't reply (corrupted file?)\n", chunk_num); - return ret; - } - if (ret) { - dev_err(wdev->dev, "PDS:%d: chip returned an unknown error\n", chunk_num); - return -EIO; - } -next: - chunk_num++; - len -= chunk_len; - buf += chunk_len; - } - return 0; -} - -static int wfx_send_pdata_pds(struct wfx_dev *wdev) -{ - int ret = 0; - const struct firmware *pds; - u8 *tmp_buf; - - ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev); - if (ret) { - dev_err(wdev->dev, "can't load antenna parameters (PDS file %s). The device may be unstable.\n", - wdev->pdata.file_pds); - return ret; - } - tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL); - if (!tmp_buf) { - ret = -ENOMEM; - goto release_fw; - } - ret = wfx_send_pds(wdev, tmp_buf, pds->size); - kfree(tmp_buf); -release_fw: - release_firmware(pds); - return ret; -} - -static void wfx_free_common(void *data) -{ - struct wfx_dev *wdev = data; - - mutex_destroy(&wdev->tx_power_loop_info_lock); - mutex_destroy(&wdev->rx_stats_lock); - mutex_destroy(&wdev->conf_mutex); - ieee80211_free_hw(wdev->hw); -} - -struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata, - const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv) -{ - struct ieee80211_hw *hw; - struct wfx_dev *wdev; - - hw = ieee80211_alloc_hw(sizeof(struct wfx_dev), &wfx_ops); - if (!hw) - return NULL; - - SET_IEEE80211_DEV(hw, dev); - - ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); - ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, CONNECTION_MONITOR); - ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); - ieee80211_hw_set(hw, SIGNAL_DBM); - ieee80211_hw_set(hw, SUPPORTS_PS); - ieee80211_hw_set(hw, MFP_CAPABLE); - - hw->vif_data_size = sizeof(struct wfx_vif); - hw->sta_data_size = sizeof(struct wfx_sta_priv); - hw->queues = 4; - hw->max_rates = 8; - hw->max_rate_tries = 8; - hw->extra_tx_headroom = sizeof(struct wfx_hif_msg) + sizeof(struct wfx_hif_req_tx) + - 4 /* alignment */ + 8 /* TKIP IV */; - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_AP); - hw->wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P | - NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U; - hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; - hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; - hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; - hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX; - hw->wiphy->max_scan_ssids = 2; - hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; - hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations); - hw->wiphy->iface_combinations = wfx_iface_combinations; - hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL); - if (!hw->wiphy->bands[NL80211_BAND_2GHZ]) - goto err; - - /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */ - memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz, sizeof(wfx_band_2ghz)); - - wdev = hw->priv; - wdev->hw = hw; - wdev->dev = dev; - wdev->hwbus_ops = hwbus_ops; - wdev->hwbus_priv = hwbus_priv; - memcpy(&wdev->pdata, pdata, sizeof(*pdata)); - of_property_read_string(dev->of_node, "silabs,antenna-config-file", &wdev->pdata.file_pds); - wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup", GPIOD_OUT_LOW); - if (IS_ERR(wdev->pdata.gpio_wakeup)) - goto err; - - if (wdev->pdata.gpio_wakeup) - gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup"); - - mutex_init(&wdev->conf_mutex); - mutex_init(&wdev->rx_stats_lock); - mutex_init(&wdev->tx_power_loop_info_lock); - init_completion(&wdev->firmware_ready); - INIT_DELAYED_WORK(&wdev->cooling_timeout_work, wfx_cooling_timeout_work); - skb_queue_head_init(&wdev->tx_pending); - init_waitqueue_head(&wdev->tx_dequeue); - wfx_init_hif_cmd(&wdev->hif_cmd); - - if (devm_add_action_or_reset(dev, wfx_free_common, wdev)) - return NULL; - - return wdev; - -err: - ieee80211_free_hw(hw); - return NULL; -} - -int wfx_probe(struct wfx_dev *wdev) -{ - int i; - int err; - struct gpio_desc *gpio_saved; - - /* During first part of boot, gpio_wakeup cannot yet been used. So prevent bh() to touch - * it. - */ - gpio_saved = wdev->pdata.gpio_wakeup; - wdev->pdata.gpio_wakeup = NULL; - wdev->poll_irq = true; - - wfx_bh_register(wdev); - - err = wfx_init_device(wdev); - if (err) - goto bh_unregister; - - wfx_bh_poll_irq(wdev); - err = wait_for_completion_timeout(&wdev->firmware_ready, 1 * HZ); - if (err <= 0) { - if (err == 0) { - dev_err(wdev->dev, "timeout while waiting for startup indication\n"); - err = -ETIMEDOUT; - } else if (err == -ERESTARTSYS) { - dev_info(wdev->dev, "probe interrupted by user\n"); - } - goto bh_unregister; - } - - /* FIXME: fill wiphy::hw_version */ - dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n", - wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor, - wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label, - wdev->hw_caps.api_version_major, wdev->hw_caps.api_version_minor, - wdev->keyset, wdev->hw_caps.link_mode); - snprintf(wdev->hw->wiphy->fw_version, - sizeof(wdev->hw->wiphy->fw_version), - "%d.%d.%d", - wdev->hw_caps.firmware_major, - wdev->hw_caps.firmware_minor, - wdev->hw_caps.firmware_build); - - if (wfx_api_older_than(wdev, 1, 0)) { - dev_err(wdev->dev, "unsupported firmware API version (expect 1 while firmware returns %d)\n", - wdev->hw_caps.api_version_major); - err = -EOPNOTSUPP; - goto bh_unregister; - } - - if (wdev->hw_caps.link_mode == SEC_LINK_ENFORCED) { - dev_err(wdev->dev, "chip require secure_link, but can't negotiate it\n"); - goto bh_unregister; - } - - if (wdev->hw_caps.region_sel_mode) { - wdev->hw->wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS; - wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |= - IEEE80211_CHAN_NO_IR; - wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |= - IEEE80211_CHAN_NO_IR; - wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |= - IEEE80211_CHAN_DISABLED; - } - - dev_dbg(wdev->dev, "sending configuration file %s\n", wdev->pdata.file_pds); - err = wfx_send_pdata_pds(wdev); - if (err < 0 && err != -ENOENT) - goto bh_unregister; - - wdev->poll_irq = false; - err = wdev->hwbus_ops->irq_subscribe(wdev->hwbus_priv); - if (err) - goto bh_unregister; - - err = wfx_hif_use_multi_tx_conf(wdev, true); - if (err) - dev_err(wdev->dev, "misconfigured IRQ?\n"); - - wdev->pdata.gpio_wakeup = gpio_saved; - if (wdev->pdata.gpio_wakeup) { - dev_dbg(wdev->dev, "enable 'quiescent' power mode with wakeup GPIO and PDS file %s\n", - wdev->pdata.file_pds); - gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); - wfx_control_reg_write(wdev, 0); - wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT); - } else { - wfx_hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE); - } - - for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) { - eth_zero_addr(wdev->addresses[i].addr); - err = of_get_mac_address(wdev->dev->of_node, wdev->addresses[i].addr); - if (!err) - wdev->addresses[i].addr[ETH_ALEN - 1] += i; - else - ether_addr_copy(wdev->addresses[i].addr, wdev->hw_caps.mac_addr[i]); - if (!is_valid_ether_addr(wdev->addresses[i].addr)) { - dev_warn(wdev->dev, "using random MAC address\n"); - eth_random_addr(wdev->addresses[i].addr); - } - dev_info(wdev->dev, "MAC address %d: %pM\n", i, wdev->addresses[i].addr); - } - wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses); - wdev->hw->wiphy->addresses = wdev->addresses; - - if (!wfx_api_older_than(wdev, 3, 8)) - wdev->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; - - err = ieee80211_register_hw(wdev->hw); - if (err) - goto irq_unsubscribe; - - err = wfx_debug_init(wdev); - if (err) - goto ieee80211_unregister; - - return 0; - -ieee80211_unregister: - ieee80211_unregister_hw(wdev->hw); -irq_unsubscribe: - wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv); -bh_unregister: - wfx_bh_unregister(wdev); - return err; -} - -void wfx_release(struct wfx_dev *wdev) -{ - ieee80211_unregister_hw(wdev->hw); - wfx_hif_shutdown(wdev); - wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv); - wfx_bh_unregister(wdev); -} - -static int __init wfx_core_init(void) -{ - int ret = 0; - - if (IS_ENABLED(CONFIG_SPI)) - ret = spi_register_driver(&wfx_spi_driver); - if (IS_ENABLED(CONFIG_MMC) && !ret) - ret = sdio_register_driver(&wfx_sdio_driver); - return ret; -} -module_init(wfx_core_init); - -static void __exit wfx_core_exit(void) -{ - if (IS_ENABLED(CONFIG_MMC)) - sdio_unregister_driver(&wfx_sdio_driver); - if (IS_ENABLED(CONFIG_SPI)) - spi_unregister_driver(&wfx_spi_driver); -} -module_exit(wfx_core_exit); diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h deleted file mode 100644 index 68c665307153..000000000000 --- a/drivers/staging/wfx/main.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Device probe and register. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - * Copyright (c) 2006, Michael Wu - * Copyright 2004-2006 Jean-Baptiste Note , et al. - */ -#ifndef WFX_MAIN_H -#define WFX_MAIN_H - -#include -#include - -#include "hif_api_general.h" - -struct wfx_dev; -struct wfx_hwbus_ops; - -struct wfx_platform_data { - /* Keyset and ".sec" extension will be appended to this string */ - const char *file_fw; - const char *file_pds; - struct gpio_desc *gpio_wakeup; - /* if true HIF D_out is sampled on the rising edge of the clock (intended to be used in - * 50Mhz SDIO) - */ - bool use_rising_clk; -}; - -struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_data *pdata, - const struct wfx_hwbus_ops *hwbus_ops, void *hwbus_priv); - -int wfx_probe(struct wfx_dev *wdev); -void wfx_release(struct wfx_dev *wdev); - -bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor); -int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len); - -#endif diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c deleted file mode 100644 index 729825230db2..000000000000 --- a/drivers/staging/wfx/queue.c +++ /dev/null @@ -1,297 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Queue between the tx operation and the bh workqueue. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include - -#include "queue.h" -#include "wfx.h" -#include "sta.h" -#include "data_tx.h" -#include "traces.h" - -void wfx_tx_lock(struct wfx_dev *wdev) -{ - atomic_inc(&wdev->tx_lock); -} - -void wfx_tx_unlock(struct wfx_dev *wdev) -{ - int tx_lock = atomic_dec_return(&wdev->tx_lock); - - WARN(tx_lock < 0, "inconsistent tx_lock value"); - if (!tx_lock) - wfx_bh_request_tx(wdev); -} - -void wfx_tx_flush(struct wfx_dev *wdev) -{ - int ret; - - /* Do not wait for any reply if chip is frozen */ - if (wdev->chip_frozen) - return; - - wfx_tx_lock(wdev); - mutex_lock(&wdev->hif_cmd.lock); - ret = wait_event_timeout(wdev->hif.tx_buffers_empty, !wdev->hif.tx_buffers_used, - msecs_to_jiffies(3000)); - if (!ret) { - dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n", - wdev->hif.tx_buffers_used); - wfx_pending_dump_old_frames(wdev, 3000); - /* FIXME: drop pending frames here */ - wdev->chip_frozen = true; - } - mutex_unlock(&wdev->hif_cmd.lock); - wfx_tx_unlock(wdev); -} - -void wfx_tx_lock_flush(struct wfx_dev *wdev) -{ - wfx_tx_lock(wdev); - wfx_tx_flush(wdev); -} - -void wfx_tx_queues_init(struct wfx_vif *wvif) -{ - /* The device is in charge to respect the details of the QoS parameters. The driver just - * ensure that it roughtly respect the priorities to avoid any shortage. - */ - const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 }; - int i; - - for (i = 0; i < IEEE80211_NUM_ACS; ++i) { - skb_queue_head_init(&wvif->tx_queue[i].normal); - skb_queue_head_init(&wvif->tx_queue[i].cab); - wvif->tx_queue[i].priority = priorities[i]; - } -} - -bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue) -{ - return skb_queue_empty_lockless(&queue->normal) && skb_queue_empty_lockless(&queue->cab); -} - -void wfx_tx_queues_check_empty(struct wfx_vif *wvif) -{ - int i; - - for (i = 0; i < IEEE80211_NUM_ACS; ++i) { - WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames)); - WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i])); - } -} - -static void __wfx_tx_queue_drop(struct wfx_vif *wvif, - struct sk_buff_head *skb_queue, struct sk_buff_head *dropped) -{ - struct sk_buff *skb, *tmp; - - spin_lock_bh(&skb_queue->lock); - skb_queue_walk_safe(skb_queue, skb, tmp) { - __skb_unlink(skb, skb_queue); - skb_queue_head(dropped, skb); - } - spin_unlock_bh(&skb_queue->lock); -} - -void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, - struct sk_buff_head *dropped) -{ - __wfx_tx_queue_drop(wvif, &queue->cab, dropped); - __wfx_tx_queue_drop(wvif, &queue->normal, dropped); - wake_up(&wvif->wdev->tx_dequeue); -} - -void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb) -{ - struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - - if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) - skb_queue_tail(&queue->cab, skb); - else - skb_queue_tail(&queue->normal, skb); -} - -void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped) -{ - struct wfx_queue *queue; - struct wfx_vif *wvif; - struct wfx_hif_msg *hif; - struct sk_buff *skb; - - WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device", __func__); - while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) { - hif = (struct wfx_hif_msg *)skb->data; - wvif = wdev_to_wvif(wdev, hif->interface); - if (wvif) { - queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; - WARN_ON(skb_get_queue_mapping(skb) > 3); - WARN_ON(!atomic_read(&queue->pending_frames)); - atomic_dec(&queue->pending_frames); - } - skb_queue_head(dropped, skb); - } -} - -struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id) -{ - struct wfx_queue *queue; - struct wfx_hif_req_tx *req; - struct wfx_vif *wvif; - struct wfx_hif_msg *hif; - struct sk_buff *skb; - - spin_lock_bh(&wdev->tx_pending.lock); - skb_queue_walk(&wdev->tx_pending, skb) { - hif = (struct wfx_hif_msg *)skb->data; - req = (struct wfx_hif_req_tx *)hif->body; - if (req->packet_id != packet_id) - continue; - spin_unlock_bh(&wdev->tx_pending.lock); - wvif = wdev_to_wvif(wdev, hif->interface); - if (wvif) { - queue = &wvif->tx_queue[skb_get_queue_mapping(skb)]; - WARN_ON(skb_get_queue_mapping(skb) > 3); - WARN_ON(!atomic_read(&queue->pending_frames)); - atomic_dec(&queue->pending_frames); - } - skb_unlink(skb, &wdev->tx_pending); - return skb; - } - spin_unlock_bh(&wdev->tx_pending.lock); - WARN(1, "cannot find packet in pending queue"); - return NULL; -} - -void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms) -{ - ktime_t now = ktime_get(); - struct wfx_tx_priv *tx_priv; - struct wfx_hif_req_tx *req; - struct sk_buff *skb; - bool first = true; - - spin_lock_bh(&wdev->tx_pending.lock); - skb_queue_walk(&wdev->tx_pending, skb) { - tx_priv = wfx_skb_tx_priv(skb); - req = wfx_skb_txreq(skb); - if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp, limit_ms))) { - if (first) { - dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n", - limit_ms); - first = false; - } - dev_info(wdev->dev, " id %08x sent %lldms ago\n", - req->packet_id, ktime_ms_delta(now, tx_priv->xmit_timestamp)); - } - } - spin_unlock_bh(&wdev->tx_pending.lock); -} - -unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb) -{ - ktime_t now = ktime_get(); - struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb); - - return ktime_us_delta(now, tx_priv->xmit_timestamp); -} - -bool wfx_tx_queues_has_cab(struct wfx_vif *wvif) -{ - int i; - - if (wvif->vif->type != NL80211_IFTYPE_AP) - return false; - for (i = 0; i < IEEE80211_NUM_ACS; ++i) - /* Note: since only AP can have mcast frames in queue and only one vif can be AP, - * all queued frames has same interface id - */ - if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab)) - return true; - return false; -} - -static int wfx_tx_queue_get_weight(struct wfx_queue *queue) -{ - return atomic_read(&queue->pending_frames) * queue->priority; -} - -static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev) -{ - struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; - int i, j, num_queues = 0; - struct wfx_vif *wvif; - struct wfx_hif_msg *hif; - struct sk_buff *skb; - - /* sort the queues */ - wvif = NULL; - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - WARN_ON(num_queues >= ARRAY_SIZE(queues)); - queues[num_queues] = &wvif->tx_queue[i]; - for (j = num_queues; j > 0; j--) - if (wfx_tx_queue_get_weight(queues[j]) < - wfx_tx_queue_get_weight(queues[j - 1])) - swap(queues[j - 1], queues[j]); - num_queues++; - } - } - - wvif = NULL; - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { - if (!wvif->after_dtim_tx_allowed) - continue; - for (i = 0; i < num_queues; i++) { - skb = skb_dequeue(&queues[i]->cab); - if (!skb) - continue; - /* Note: since only AP can have mcast frames in queue and only one vif can - * be AP, all queued frames has same interface id - */ - hif = (struct wfx_hif_msg *)skb->data; - WARN_ON(hif->interface != wvif->id); - WARN_ON(queues[i] != &wvif->tx_queue[skb_get_queue_mapping(skb)]); - atomic_inc(&queues[i]->pending_frames); - trace_queues_stats(wdev, queues[i]); - return skb; - } - /* No more multicast to sent */ - wvif->after_dtim_tx_allowed = false; - schedule_work(&wvif->update_tim_work); - } - - for (i = 0; i < num_queues; i++) { - skb = skb_dequeue(&queues[i]->normal); - if (skb) { - atomic_inc(&queues[i]->pending_frames); - trace_queues_stats(wdev, queues[i]); - return skb; - } - } - return NULL; -} - -struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev) -{ - struct wfx_tx_priv *tx_priv; - struct sk_buff *skb; - - if (atomic_read(&wdev->tx_lock)) - return NULL; - skb = wfx_tx_queues_get_skb(wdev); - if (!skb) - return NULL; - skb_queue_tail(&wdev->tx_pending, skb); - wake_up(&wdev->tx_dequeue); - tx_priv = wfx_skb_tx_priv(skb); - tx_priv->xmit_timestamp = ktime_get(); - return (struct wfx_hif_msg *)skb->data; -} diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h deleted file mode 100644 index 4731debca93d..000000000000 --- a/drivers/staging/wfx/queue.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Queue between the tx operation and the bh workqueue. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_QUEUE_H -#define WFX_QUEUE_H - -#include -#include - -struct wfx_dev; -struct wfx_vif; - -struct wfx_queue { - struct sk_buff_head normal; - struct sk_buff_head cab; /* Content After (DTIM) Beacon */ - atomic_t pending_frames; - int priority; -}; - -void wfx_tx_lock(struct wfx_dev *wdev); -void wfx_tx_unlock(struct wfx_dev *wdev); -void wfx_tx_flush(struct wfx_dev *wdev); -void wfx_tx_lock_flush(struct wfx_dev *wdev); - -void wfx_tx_queues_init(struct wfx_vif *wvif); -void wfx_tx_queues_check_empty(struct wfx_vif *wvif); -bool wfx_tx_queues_has_cab(struct wfx_vif *wvif); -void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb); -struct wfx_hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev); - -bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue); -void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, - struct sk_buff_head *dropped); - -struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id); -void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped); -unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff *skb); -void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms); - -#endif diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c deleted file mode 100644 index 7f34f0d322f9..000000000000 --- a/drivers/staging/wfx/scan.c +++ /dev/null @@ -1,144 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Scan related functions. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include - -#include "scan.h" -#include "wfx.h" -#include "sta.h" -#include "hif_tx_mib.h" - -static void wfx_ieee80211_scan_completed_compat(struct ieee80211_hw *hw, bool aborted) -{ - struct cfg80211_scan_info info = { - .aborted = aborted, - }; - - ieee80211_scan_completed(hw, &info); -} - -static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request *req) -{ - struct sk_buff *skb; - - skb = ieee80211_probereq_get(wvif->wdev->hw, wvif->vif->addr, NULL, 0, req->ie_len); - if (!skb) - return -ENOMEM; - - skb_put_data(skb, req->ie, req->ie_len); - wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBREQ, 0); - dev_kfree_skb(skb); - return 0; -} - -static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int start_idx) -{ - int i, ret; - struct ieee80211_channel *ch_start, *ch_cur; - - for (i = start_idx; i < req->n_channels; i++) { - ch_start = req->channels[start_idx]; - ch_cur = req->channels[i]; - WARN(ch_cur->band != NL80211_BAND_2GHZ, "band not supported"); - if (ch_cur->max_power != ch_start->max_power) - break; - if ((ch_cur->flags ^ ch_start->flags) & IEEE80211_CHAN_NO_IR) - break; - } - wfx_tx_lock_flush(wvif->wdev); - wvif->scan_abort = false; - reinit_completion(&wvif->scan_complete); - ret = wfx_hif_scan(wvif, req, start_idx, i - start_idx); - if (ret) { - wfx_tx_unlock(wvif->wdev); - return -EIO; - } - ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ); - if (!ret) { - wfx_hif_stop_scan(wvif); - ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ); - dev_dbg(wvif->wdev->dev, "scan timeout (%d channels done)\n", - wvif->scan_nb_chan_done); - } - if (!ret) { - dev_err(wvif->wdev->dev, "scan didn't stop\n"); - ret = -ETIMEDOUT; - } else if (wvif->scan_abort) { - dev_notice(wvif->wdev->dev, "scan abort\n"); - ret = -ECONNABORTED; - } else if (wvif->scan_nb_chan_done > i - start_idx) { - ret = -EIO; - } else { - ret = wvif->scan_nb_chan_done; - } - if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower) - wfx_hif_set_output_power(wvif, wvif->vif->bss_conf.txpower); - wfx_tx_unlock(wvif->wdev); - return ret; -} - -/* It is not really necessary to run scan request asynchronously. However, - * there is a bug in "iw scan" when ieee80211_scan_completed() is called before - * wfx_hw_scan() return - */ -void wfx_hw_scan_work(struct work_struct *work) -{ - struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work); - struct ieee80211_scan_request *hw_req = wvif->scan_req; - int chan_cur, ret, err; - - mutex_lock(&wvif->wdev->conf_mutex); - mutex_lock(&wvif->scan_lock); - if (wvif->join_in_progress) { - dev_info(wvif->wdev->dev, "abort in-progress REQ_JOIN"); - wfx_reset(wvif); - } - update_probe_tmpl(wvif, &hw_req->req); - chan_cur = 0; - err = 0; - do { - ret = send_scan_req(wvif, &hw_req->req, chan_cur); - if (ret > 0) { - chan_cur += ret; - err = 0; - } - if (!ret) - err++; - if (err > 2) { - dev_err(wvif->wdev->dev, "scan has not been able to start\n"); - ret = -ETIMEDOUT; - } - } while (ret >= 0 && chan_cur < hw_req->req.n_channels); - mutex_unlock(&wvif->scan_lock); - mutex_unlock(&wvif->wdev->conf_mutex); - wfx_ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0); -} - -int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_scan_request *hw_req) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - WARN_ON(hw_req->req.n_channels > HIF_API_MAX_NB_CHANNELS); - wvif->scan_req = hw_req; - schedule_work(&wvif->scan_work); - return 0; -} - -void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - wvif->scan_abort = true; - wfx_hif_stop_scan(wvif); -} - -void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done) -{ - wvif->scan_nb_chan_done = nb_chan_done; - complete(&wvif->scan_complete); -} diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h deleted file mode 100644 index 78e3b984f375..000000000000 --- a/drivers/staging/wfx/scan.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Scan related functions. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_SCAN_H -#define WFX_SCAN_H - -#include - -struct wfx_dev; -struct wfx_vif; - -void wfx_hw_scan_work(struct work_struct *work); -int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_scan_request *req); -void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done); - -#endif diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c deleted file mode 100644 index b1e9fb14d2b4..000000000000 --- a/drivers/staging/wfx/sta.c +++ /dev/null @@ -1,794 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Implementation of mac80211 API. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#include -#include - -#include "sta.h" -#include "wfx.h" -#include "fwio.h" -#include "bh.h" -#include "key.h" -#include "scan.h" -#include "debug.h" -#include "hif_tx.h" -#include "hif_tx_mib.h" - -#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2 - -u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates) -{ - int i; - u32 ret = 0; - /* The device only supports 2GHz */ - struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]; - - for (i = 0; i < sband->n_bitrates; i++) { - if (rates & BIT(i)) { - if (i >= sband->n_bitrates) - dev_warn(wdev->dev, "unsupported basic rate\n"); - else - ret |= BIT(sband->bitrates[i].hw_value); - } - } - return ret; -} - -void wfx_cooling_timeout_work(struct work_struct *work) -{ - struct wfx_dev *wdev = container_of(to_delayed_work(work), struct wfx_dev, - cooling_timeout_work); - - wdev->chip_frozen = true; - wfx_tx_unlock(wdev); -} - -void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd) -{ - if (cmd == STA_NOTIFY_AWAKE) { - /* Device recover normal temperature */ - if (cancel_delayed_work(&wdev->cooling_timeout_work)) - wfx_tx_unlock(wdev); - } else { - /* Device is too hot */ - schedule_delayed_work(&wdev->cooling_timeout_work, 10 * HZ); - wfx_tx_lock(wdev); - } -} - -static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon) -{ - static const struct wfx_hif_ie_table_entry filter_ies[] = { - { - .ie_id = WLAN_EID_VENDOR_SPECIFIC, - .has_changed = 1, - .no_longer = 1, - .has_appeared = 1, - .oui = { 0x50, 0x6F, 0x9A }, - }, { - .ie_id = WLAN_EID_HT_OPERATION, - .has_changed = 1, - .no_longer = 1, - .has_appeared = 1, - }, { - .ie_id = WLAN_EID_ERP_INFO, - .has_changed = 1, - .no_longer = 1, - .has_appeared = 1, - }, { - .ie_id = WLAN_EID_CHANNEL_SWITCH, - .has_changed = 1, - .no_longer = 1, - .has_appeared = 1, - } - }; - - if (!filter_beacon) { - wfx_hif_beacon_filter_control(wvif, 0, 1); - } else { - wfx_hif_set_beacon_filter_table(wvif, ARRAY_SIZE(filter_ies), filter_ies); - wfx_hif_beacon_filter_control(wvif, HIF_BEACON_FILTER_ENABLE, 0); - } -} - -void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, - unsigned int *total_flags, u64 unused) -{ - struct wfx_vif *wvif = NULL; - struct wfx_dev *wdev = hw->priv; - bool filter_bssid, filter_prbreq, filter_beacon; - - /* Notes: - * - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered - * - PS-Poll (FIF_PSPOLL) are never filtered - * - RTS, CTS and Ack (FIF_CONTROL) are always filtered - * - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered - * - Firmware does (yet) allow to forward unicast traffic sent to other stations (aka. - * promiscuous mode) - */ - *total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS | - FIF_PROBE_REQ | FIF_PSPOLL; - - mutex_lock(&wdev->conf_mutex); - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { - mutex_lock(&wvif->scan_lock); - - /* Note: FIF_BCN_PRBRESP_PROMISC covers probe response and - * beacons from other BSS - */ - if (*total_flags & FIF_BCN_PRBRESP_PROMISC) - filter_beacon = false; - else - filter_beacon = true; - wfx_filter_beacon(wvif, filter_beacon); - - if (*total_flags & FIF_OTHER_BSS) - filter_bssid = false; - else - filter_bssid = true; - - /* In AP mode, chip can reply to probe request itself */ - if (*total_flags & FIF_PROBE_REQ && wvif->vif->type == NL80211_IFTYPE_AP) { - dev_dbg(wdev->dev, "do not forward probe request in AP mode\n"); - *total_flags &= ~FIF_PROBE_REQ; - } - - if (*total_flags & FIF_PROBE_REQ) - filter_prbreq = false; - else - filter_prbreq = true; - wfx_hif_set_rx_filter(wvif, filter_bssid, filter_prbreq); - - mutex_unlock(&wvif->scan_lock); - } - mutex_unlock(&wdev->conf_mutex); -} - -static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps) -{ - struct ieee80211_channel *chan0 = NULL, *chan1 = NULL; - struct ieee80211_conf *conf = &wvif->wdev->hw->conf; - - WARN(!wvif->vif->bss_conf.assoc && enable_ps, - "enable_ps is reliable only if associated"); - if (wdev_to_wvif(wvif->wdev, 0)) - chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan; - if (wdev_to_wvif(wvif->wdev, 1)) - chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan; - if (chan0 && chan1 && wvif->vif->type != NL80211_IFTYPE_AP) { - if (chan0->hw_value == chan1->hw_value) { - /* It is useless to enable PS if channels are the same. */ - if (enable_ps) - *enable_ps = false; - if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps) - dev_info(wvif->wdev->dev, "ignoring requested PS mode"); - return -1; - } - /* It is necessary to enable PS if channels are different. */ - if (enable_ps) - *enable_ps = true; - if (wfx_api_older_than(wvif->wdev, 3, 2)) - return 0; - else - return 30; - } - if (enable_ps) - *enable_ps = wvif->vif->bss_conf.ps; - if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps) - return conf->dynamic_ps_timeout; - else - return -1; -} - -int wfx_update_pm(struct wfx_vif *wvif) -{ - int ps_timeout; - bool ps; - - if (!wvif->vif->bss_conf.assoc) - return 0; - ps_timeout = wfx_get_ps_timeout(wvif, &ps); - if (!ps) - ps_timeout = 0; - WARN_ON(ps_timeout < 0); - if (wvif->uapsd_mask) - ps_timeout = 0; - - if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete, TU_TO_JIFFIES(512))) - dev_warn(wvif->wdev->dev, "timeout while waiting of set_pm_mode_complete\n"); - return wfx_hif_set_pm(wvif, ps, ps_timeout); -} - -int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params) -{ - struct wfx_dev *wdev = hw->priv; - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - int old_uapsd = wvif->uapsd_mask; - - WARN_ON(queue >= hw->queues); - - mutex_lock(&wdev->conf_mutex); - assign_bit(queue, &wvif->uapsd_mask, params->uapsd); - wfx_hif_set_edca_queue_params(wvif, queue, params); - if (wvif->vif->type == NL80211_IFTYPE_STATION && old_uapsd != wvif->uapsd_mask) { - wfx_hif_set_uapsd_info(wvif, wvif->uapsd_mask); - wfx_update_pm(wvif); - } - mutex_unlock(&wdev->conf_mutex); - return 0; -} - -int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value) -{ - struct wfx_dev *wdev = hw->priv; - struct wfx_vif *wvif = NULL; - - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) - wfx_hif_rts_threshold(wvif, value); - return 0; -} - -void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi) -{ - /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 - * RSSI = RCPI / 2 - 110 - */ - int rcpi_rssi; - int cqm_evt; - - rcpi_rssi = raw_rcpi_rssi / 2 - 110; - if (rcpi_rssi <= wvif->vif->bss_conf.cqm_rssi_thold) - cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; - else - cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; - ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL); -} - -static void wfx_beacon_loss_work(struct work_struct *work) -{ - struct wfx_vif *wvif = container_of(to_delayed_work(work), struct wfx_vif, - beacon_loss_work); - struct ieee80211_bss_conf *bss_conf = &wvif->vif->bss_conf; - - ieee80211_beacon_loss(wvif->vif); - schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(bss_conf->beacon_int)); -} - -void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - wfx_hif_wep_default_key_id(wvif, idx); -} - -void wfx_reset(struct wfx_vif *wvif) -{ - struct wfx_dev *wdev = wvif->wdev; - - wfx_tx_lock_flush(wdev); - wfx_hif_reset(wvif, false); - wfx_tx_policy_init(wvif); - if (wvif_count(wdev) <= 1) - wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); - wfx_tx_unlock(wdev); - wvif->join_in_progress = false; - cancel_delayed_work_sync(&wvif->beacon_loss_work); - wvif = NULL; - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) - wfx_update_pm(wvif); -} - -int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv; - - sta_priv->vif_id = wvif->id; - - if (vif->type == NL80211_IFTYPE_STATION) - wfx_hif_set_mfp(wvif, sta->mfp, sta->mfp); - - /* In station mode, the firmware interprets new link-id as a TDLS peer */ - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - return 0; - sta_priv->link_id = ffz(wvif->link_id_map); - wvif->link_id_map |= BIT(sta_priv->link_id); - WARN_ON(!sta_priv->link_id); - WARN_ON(sta_priv->link_id >= HIF_LINK_ID_MAX); - wfx_hif_map_link(wvif, false, sta->addr, sta_priv->link_id, sta->mfp); - - return 0; -} - -int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv; - - /* See note in wfx_sta_add() */ - if (!sta_priv->link_id) - return 0; - /* FIXME add a mutex? */ - wfx_hif_map_link(wvif, true, sta->addr, sta_priv->link_id, false); - wvif->link_id_map &= ~BIT(sta_priv->link_id); - return 0; -} - -static int wfx_upload_ap_templates(struct wfx_vif *wvif) -{ - struct sk_buff *skb; - - skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif); - if (!skb) - return -ENOMEM; - wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN, API_RATE_INDEX_B_1MBPS); - dev_kfree_skb(skb); - - skb = ieee80211_proberesp_get(wvif->wdev->hw, wvif->vif); - if (!skb) - return -ENOMEM; - wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES, API_RATE_INDEX_B_1MBPS); - dev_kfree_skb(skb); - return 0; -} - -static void wfx_set_mfp_ap(struct wfx_vif *wvif) -{ - struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif); - const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); - const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, - skb->len - ieoffset); - const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16); - const int pairwise_cipher_suite_size = 4 / sizeof(u16); - const int akm_suite_size = 4 / sizeof(u16); - - if (ptr) { - ptr += pairwise_cipher_suite_count_offset; - if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return; - ptr += 1 + pairwise_cipher_suite_size * *ptr; - if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return; - ptr += 1 + akm_suite_size * *ptr; - if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb))) - return; - wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6)); - } -} - -int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - struct wfx_dev *wdev = wvif->wdev; - int ret; - - wvif = NULL; - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) - wfx_update_pm(wvif); - wvif = (struct wfx_vif *)vif->drv_priv; - wfx_upload_ap_templates(wvif); - ret = wfx_hif_start(wvif, &vif->bss_conf, wvif->channel); - if (ret > 0) - return -EIO; - wfx_set_mfp_ap(wvif); - return ret; -} - -void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - wfx_reset(wvif); -} - -static void wfx_join(struct wfx_vif *wvif) -{ - int ret; - struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf; - struct cfg80211_bss *bss = NULL; - u8 ssid[IEEE80211_MAX_SSID_LEN]; - const u8 *ssidie = NULL; - int ssidlen = 0; - - wfx_tx_lock_flush(wvif->wdev); - - bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel, conf->bssid, NULL, 0, - IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); - if (!bss && !conf->ibss_joined) { - wfx_tx_unlock(wvif->wdev); - return; - } - - rcu_read_lock(); /* protect ssidie */ - if (bss) - ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); - if (ssidie) { - ssidlen = ssidie[1]; - if (ssidlen > IEEE80211_MAX_SSID_LEN) - ssidlen = IEEE80211_MAX_SSID_LEN; - memcpy(ssid, &ssidie[2], ssidlen); - } - rcu_read_unlock(); - - cfg80211_put_bss(wvif->wdev->hw->wiphy, bss); - - wvif->join_in_progress = true; - ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssidlen); - if (ret) { - ieee80211_connection_loss(wvif->vif); - wfx_reset(wvif); - } else { - /* Due to beacon filtering it is possible that the AP's beacon is not known for the - * mac80211 stack. Disable filtering temporary to make sure the stack receives at - * least one - */ - wfx_filter_beacon(wvif, false); - } - wfx_tx_unlock(wvif->wdev); -} - -static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *info) -{ - struct ieee80211_sta *sta = NULL; - int ampdu_density = 0; - bool greenfield = false; - - rcu_read_lock(); /* protect sta */ - if (info->bssid && !info->ibss_joined) - sta = ieee80211_find_sta(wvif->vif, info->bssid); - if (sta && sta->ht_cap.ht_supported) - ampdu_density = sta->ht_cap.ampdu_density; - if (sta && sta->ht_cap.ht_supported && - !(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)) - greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); - rcu_read_unlock(); - - wvif->join_in_progress = false; - wfx_hif_set_association_mode(wvif, ampdu_density, greenfield, info->use_short_preamble); - wfx_hif_keep_alive_period(wvif, 0); - /* beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use the same value. */ - wfx_hif_set_bss_params(wvif, info->aid, 7); - wfx_hif_set_beacon_wakeup_period(wvif, 1, 1); - wfx_update_pm(wvif); -} - -int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - wfx_upload_ap_templates(wvif); - wfx_join(wvif); - return 0; -} - -void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - wfx_reset(wvif); -} - -static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable) -{ - /* Driver has Content After DTIM Beacon in queue. Driver is waiting for a signal from the - * firmware. Since we are going to stop to send beacons, this signal will never happens. See - * also wfx_suspend_resume_mc() - */ - if (!enable && wfx_tx_queues_has_cab(wvif)) { - wvif->after_dtim_tx_allowed = true; - wfx_bh_request_tx(wvif->wdev); - } - wfx_hif_beacon_transmit(wvif, enable); -} - -void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, u32 changed) -{ - struct wfx_dev *wdev = hw->priv; - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - int i; - - mutex_lock(&wdev->conf_mutex); - - if (changed & BSS_CHANGED_BASIC_RATES || - changed & BSS_CHANGED_BEACON_INT || - changed & BSS_CHANGED_BSSID) { - if (vif->type == NL80211_IFTYPE_STATION) - wfx_join(wvif); - } - - if (changed & BSS_CHANGED_ASSOC) { - if (info->assoc || info->ibss_joined) - wfx_join_finalize(wvif, info); - else if (!info->assoc && vif->type == NL80211_IFTYPE_STATION) - wfx_reset(wvif); - else - dev_warn(wdev->dev, "misunderstood change: ASSOC\n"); - } - - if (changed & BSS_CHANGED_BEACON_INFO) { - if (vif->type != NL80211_IFTYPE_STATION) - dev_warn(wdev->dev, "misunderstood change: BEACON_INFO\n"); - wfx_hif_set_beacon_wakeup_period(wvif, info->dtim_period, info->dtim_period); - /* We temporary forwarded beacon for join process. It is now no more necessary. */ - wfx_filter_beacon(wvif, true); - } - - if (changed & BSS_CHANGED_ARP_FILTER) { - for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) { - __be32 *arp_addr = &info->arp_addr_list[i]; - - if (info->arp_addr_cnt > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES) - arp_addr = NULL; - if (i >= info->arp_addr_cnt) - arp_addr = NULL; - wfx_hif_set_arp_ipv4_filter(wvif, i, arp_addr); - } - } - - if (changed & BSS_CHANGED_AP_PROBE_RESP || changed & BSS_CHANGED_BEACON) - wfx_upload_ap_templates(wvif); - - if (changed & BSS_CHANGED_BEACON_ENABLED) - wfx_enable_beacon(wvif, info->enable_beacon); - - if (changed & BSS_CHANGED_KEEP_ALIVE) - wfx_hif_keep_alive_period(wvif, - info->max_idle_period * USEC_PER_TU / USEC_PER_MSEC); - - if (changed & BSS_CHANGED_ERP_CTS_PROT) - wfx_hif_erp_use_protection(wvif, info->use_cts_prot); - - if (changed & BSS_CHANGED_ERP_SLOT) - wfx_hif_slot_time(wvif, info->use_short_slot ? 9 : 20); - - if (changed & BSS_CHANGED_CQM) - wfx_hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold, info->cqm_rssi_hyst); - - if (changed & BSS_CHANGED_TXPOWER) - wfx_hif_set_output_power(wvif, info->txpower); - - if (changed & BSS_CHANGED_PS) - wfx_update_pm(wvif); - - mutex_unlock(&wdev->conf_mutex); -} - -static int wfx_update_tim(struct wfx_vif *wvif) -{ - struct sk_buff *skb; - u16 tim_offset, tim_length; - u8 *tim_ptr; - - skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif, &tim_offset, &tim_length); - if (!skb) - return -ENOENT; - tim_ptr = skb->data + tim_offset; - - if (tim_offset && tim_length >= 6) { - /* Firmware handles DTIM counter internally */ - tim_ptr[2] = 0; - - /* Set/reset aid0 bit */ - if (wfx_tx_queues_has_cab(wvif)) - tim_ptr[4] |= 1; - else - tim_ptr[4] &= ~1; - } - - wfx_hif_update_ie_beacon(wvif, tim_ptr, tim_length); - dev_kfree_skb(skb); - - return 0; -} - -static void wfx_update_tim_work(struct work_struct *work) -{ - struct wfx_vif *wvif = container_of(work, struct wfx_vif, update_tim_work); - - wfx_update_tim(wvif); -} - -int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) -{ - struct wfx_dev *wdev = hw->priv; - struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *)&sta->drv_priv; - struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id); - - if (!wvif) { - dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__); - return -EIO; - } - schedule_work(&wvif->update_tim_work); - return 0; -} - -void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd) -{ - struct wfx_vif *wvif_it; - - if (notify_cmd != STA_NOTIFY_AWAKE) - return; - - /* Device won't be able to honor CAB if a scan is in progress on any interface. Prefer to - * skip this DTIM and wait for the next one. - */ - wvif_it = NULL; - while ((wvif_it = wvif_iterate(wvif->wdev, wvif_it)) != NULL) - if (mutex_is_locked(&wvif_it->scan_lock)) - return; - - if (!wfx_tx_queues_has_cab(wvif) || wvif->after_dtim_tx_allowed) - dev_warn(wvif->wdev->dev, "incorrect sequence (%d CAB in queue)", - wfx_tx_queues_has_cab(wvif)); - wvif->after_dtim_tx_allowed = true; - wfx_bh_request_tx(wvif->wdev); -} - -int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) -{ - /* Aggregation is implemented fully in firmware */ - switch (params->action) { - case IEEE80211_AMPDU_RX_START: - case IEEE80211_AMPDU_RX_STOP: - /* Just acknowledge it to enable frame re-ordering */ - return 0; - default: - /* Leave the firmware doing its business for tx aggregation */ - return -EOPNOTSUPP; - } -} - -int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) -{ - return 0; -} - -void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) -{ -} - -void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed) -{ -} - -int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *conf) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - struct ieee80211_channel *ch = conf->def.chan; - - WARN(wvif->channel, "channel overwrite"); - wvif->channel = ch; - - return 0; -} - -void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *conf) -{ - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - struct ieee80211_channel *ch = conf->def.chan; - - WARN(wvif->channel != ch, "channel mismatch"); - wvif->channel = NULL; -} - -int wfx_config(struct ieee80211_hw *hw, u32 changed) -{ - return 0; -} - -int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - int i; - struct wfx_dev *wdev = hw->priv; - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | - IEEE80211_VIF_SUPPORTS_UAPSD | - IEEE80211_VIF_SUPPORTS_CQM_RSSI; - - mutex_lock(&wdev->conf_mutex); - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_AP: - break; - default: - mutex_unlock(&wdev->conf_mutex); - return -EOPNOTSUPP; - } - - /* FIXME: prefer use of container_of() to get vif */ - wvif->vif = vif; - wvif->wdev = wdev; - - wvif->link_id_map = 1; /* link-id 0 is reserved for multicast */ - INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work); - INIT_DELAYED_WORK(&wvif->beacon_loss_work, wfx_beacon_loss_work); - - init_completion(&wvif->set_pm_mode_complete); - complete(&wvif->set_pm_mode_complete); - INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work); - - mutex_init(&wvif->scan_lock); - init_completion(&wvif->scan_complete); - INIT_WORK(&wvif->scan_work, wfx_hw_scan_work); - - wfx_tx_queues_init(wvif); - wfx_tx_policy_init(wvif); - - for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { - if (!wdev->vif[i]) { - wdev->vif[i] = vif; - wvif->id = i; - break; - } - } - WARN(i == ARRAY_SIZE(wdev->vif), "try to instantiate more vif than supported"); - - wfx_hif_set_macaddr(wvif, vif->addr); - - mutex_unlock(&wdev->conf_mutex); - - wvif = NULL; - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { - /* Combo mode does not support Block Acks. We can re-enable them */ - if (wvif_count(wdev) == 1) - wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); - else - wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00); - } - return 0; -} - -void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct wfx_dev *wdev = hw->priv; - struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv; - - wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300)); - wfx_tx_queues_check_empty(wvif); - - mutex_lock(&wdev->conf_mutex); - WARN(wvif->link_id_map != 1, "corrupted state"); - - wfx_hif_reset(wvif, false); - wfx_hif_set_macaddr(wvif, NULL); - wfx_tx_policy_init(wvif); - - cancel_delayed_work_sync(&wvif->beacon_loss_work); - wdev->vif[wvif->id] = NULL; - wvif->vif = NULL; - - mutex_unlock(&wdev->conf_mutex); - - wvif = NULL; - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { - /* Combo mode does not support Block Acks. We can re-enable them */ - if (wvif_count(wdev) == 1) - wfx_hif_set_block_ack_policy(wvif, 0xFF, 0xFF); - else - wfx_hif_set_block_ack_policy(wvif, 0x00, 0x00); - } -} - -int wfx_start(struct ieee80211_hw *hw) -{ - return 0; -} - -void wfx_stop(struct ieee80211_hw *hw) -{ - struct wfx_dev *wdev = hw->priv; - - WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending)); -} diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h deleted file mode 100644 index c69b2227e9ac..000000000000 --- a/drivers/staging/wfx/sta.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Implementation of mac80211 API. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - */ -#ifndef WFX_STA_H -#define WFX_STA_H - -#include - -struct wfx_dev; -struct wfx_vif; - -struct wfx_sta_priv { - int link_id; - int vif_id; -}; - -/* mac80211 interface */ -int wfx_start(struct ieee80211_hw *hw); -void wfx_stop(struct ieee80211_hw *hw); -int wfx_config(struct ieee80211_hw *hw, u32 changed); -int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value); -void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); -void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, - unsigned int *total_flags, u64 unused); - -int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params); -void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, u32 changed); -int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, struct ieee80211_sta *sta); -int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); -int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params); -int wfx_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf); -void wfx_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf); -void wfx_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, u32 changed); -int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *conf); -void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *conf); - -/* Hardware API Callbacks */ -void wfx_cooling_timeout_work(struct work_struct *work); -void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd); -void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd); -void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi); -int wfx_update_pm(struct wfx_vif *wvif); - -/* Other Helpers */ -void wfx_reset(struct wfx_vif *wvif); -u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates); - -#endif diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h deleted file mode 100644 index e011e8a46bd5..000000000000 --- a/drivers/staging/wfx/traces.h +++ /dev/null @@ -1,496 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Tracepoints definitions. - * - * Copyright (c) 2018-2020, Silicon Laboratories, Inc. - */ - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM wfx - -#if !defined(_WFX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _WFX_TRACE_H - -#include -#include - -#include "bus.h" -#include "hif_api_cmd.h" -#include "hif_api_mib.h" - -/* The hell below need some explanations. For each symbolic number, we need to define it with - * TRACE_DEFINE_ENUM() and in a list for __print_symbolic. - * - * 1. Define a new macro that call TRACE_DEFINE_ENUM(): - * - * #define xxx_name(sym) TRACE_DEFINE_ENUM(sym); - * - * 2. Define list of all symbols: - * - * #define list_names \ - * ... \ - * xxx_name(XXX) \ - * ... - * - * 3. Instantiate that list_names: - * - * list_names - * - * 4. Redefine xxx_name() as an entry of array for __print_symbolic() - * - * #undef xxx_name - * #define xxx_name(msg) { msg, #msg }, - * - * 5. list_name can now nearly be used with __print_symbolic() but, __print_symbolic() dislike - * last comma of list. So we define a new list with a dummy element: - * - * #define list_for_print_symbolic list_names { -1, NULL } - */ - -#define _hif_msg_list \ - hif_cnf_name(ADD_KEY) \ - hif_cnf_name(BEACON_TRANSMIT) \ - hif_cnf_name(EDCA_QUEUE_PARAMS) \ - hif_cnf_name(JOIN) \ - hif_cnf_name(MAP_LINK) \ - hif_cnf_name(READ_MIB) \ - hif_cnf_name(REMOVE_KEY) \ - hif_cnf_name(RESET) \ - hif_cnf_name(SET_BSS_PARAMS) \ - hif_cnf_name(SET_PM_MODE) \ - hif_cnf_name(START) \ - hif_cnf_name(START_SCAN) \ - hif_cnf_name(STOP_SCAN) \ - hif_cnf_name(TX) \ - hif_cnf_name(MULTI_TRANSMIT) \ - hif_cnf_name(UPDATE_IE) \ - hif_cnf_name(WRITE_MIB) \ - hif_cnf_name(CONFIGURATION) \ - hif_cnf_name(CONTROL_GPIO) \ - hif_cnf_name(PREVENT_ROLLBACK) \ - hif_cnf_name(SET_SL_MAC_KEY) \ - hif_cnf_name(SL_CONFIGURE) \ - hif_cnf_name(SL_EXCHANGE_PUB_KEYS) \ - hif_cnf_name(SHUT_DOWN) \ - hif_ind_name(EVENT) \ - hif_ind_name(JOIN_COMPLETE) \ - hif_ind_name(RX) \ - hif_ind_name(SCAN_CMPL) \ - hif_ind_name(SET_PM_MODE_CMPL) \ - hif_ind_name(SUSPEND_RESUME_TX) \ - hif_ind_name(SL_EXCHANGE_PUB_KEYS) \ - hif_ind_name(ERROR) \ - hif_ind_name(EXCEPTION) \ - hif_ind_name(GENERIC) \ - hif_ind_name(WAKEUP) \ - hif_ind_name(STARTUP) - -#define hif_msg_list_enum _hif_msg_list - -#undef hif_cnf_name -#undef hif_ind_name -#define hif_cnf_name(msg) TRACE_DEFINE_ENUM(HIF_CNF_ID_##msg); -#define hif_ind_name(msg) TRACE_DEFINE_ENUM(HIF_IND_ID_##msg); -hif_msg_list_enum -#undef hif_cnf_name -#undef hif_ind_name -#define hif_cnf_name(msg) { HIF_CNF_ID_##msg, #msg }, -#define hif_ind_name(msg) { HIF_IND_ID_##msg, #msg }, -#define hif_msg_list hif_msg_list_enum { -1, NULL } - -#define _hif_mib_list \ - hif_mib_name(ARP_IP_ADDRESSES_TABLE) \ - hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \ - hif_mib_name(BEACON_FILTER_ENABLE) \ - hif_mib_name(BEACON_FILTER_TABLE) \ - hif_mib_name(BEACON_STATS) \ - hif_mib_name(BEACON_WAKEUP_PERIOD) \ - hif_mib_name(BLOCK_ACK_POLICY) \ - hif_mib_name(CCA_CONFIG) \ - hif_mib_name(CONFIG_DATA_FILTER) \ - hif_mib_name(COUNTERS_TABLE) \ - hif_mib_name(CURRENT_TX_POWER_LEVEL) \ - hif_mib_name(DOT11_MAC_ADDRESS) \ - hif_mib_name(DOT11_MAX_RECEIVE_LIFETIME) \ - hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \ - hif_mib_name(DOT11_RTS_THRESHOLD) \ - hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \ - hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \ - hif_mib_name(EXTENDED_COUNTERS_TABLE) \ - hif_mib_name(GL_BLOCK_ACK_INFO) \ - hif_mib_name(GL_OPERATIONAL_POWER_MODE) \ - hif_mib_name(GL_SET_MULTI_MSG) \ - hif_mib_name(GRP_SEQ_COUNTER) \ - hif_mib_name(INACTIVITY_TIMER) \ - hif_mib_name(INTERFACE_PROTECTION) \ - hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \ - hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \ - hif_mib_name(KEEP_ALIVE_PERIOD) \ - hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \ - hif_mib_name(MAGIC_DATAFRAME_CONDITION) \ - hif_mib_name(MAX_TX_POWER_LEVEL) \ - hif_mib_name(NON_ERP_PROTECTION) \ - hif_mib_name(NS_IP_ADDRESSES_TABLE) \ - hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \ - hif_mib_name(PORT_DATAFRAME_CONDITION) \ - hif_mib_name(PROTECTED_MGMT_POLICY) \ - hif_mib_name(RCPI_RSSI_THRESHOLD) \ - hif_mib_name(RX_FILTER) \ - hif_mib_name(SET_ASSOCIATION_MODE) \ - hif_mib_name(SET_DATA_FILTERING) \ - hif_mib_name(SET_HT_PROTECTION) \ - hif_mib_name(SET_TX_RATE_RETRY_POLICY) \ - hif_mib_name(SET_UAPSD_INFORMATION) \ - hif_mib_name(SLOT_TIME) \ - hif_mib_name(STATISTICS_TABLE) \ - hif_mib_name(TEMPLATE_FRAME) \ - hif_mib_name(TSF_COUNTER) \ - hif_mib_name(UC_MC_BC_DATAFRAME_CONDITION) - -#define hif_mib_list_enum _hif_mib_list - -#undef hif_mib_name -#define hif_mib_name(mib) TRACE_DEFINE_ENUM(HIF_MIB_ID_##mib); -hif_mib_list_enum -#undef hif_mib_name -#define hif_mib_name(mib) { HIF_MIB_ID_##mib, #mib }, -#define hif_mib_list hif_mib_list_enum { -1, NULL } - -DECLARE_EVENT_CLASS(hif_data, - TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), - TP_ARGS(hif, tx_fill_level, is_recv), - TP_STRUCT__entry( - __field(int, tx_fill_level) - __field(int, msg_id) - __field(const char *, msg_type) - __field(int, msg_len) - __field(int, buf_len) - __field(int, if_id) - __field(int, mib) - __array(u8, buf, 128) - ), - TP_fast_assign( - int header_len; - - __entry->tx_fill_level = tx_fill_level; - __entry->msg_len = le16_to_cpu(hif->len); - __entry->msg_id = hif->id; - __entry->if_id = hif->interface; - if (is_recv) - __entry->msg_type = __entry->msg_id & 0x80 ? "IND" : "CNF"; - else - __entry->msg_type = "REQ"; - if (!is_recv && - (__entry->msg_id == HIF_REQ_ID_READ_MIB || - __entry->msg_id == HIF_REQ_ID_WRITE_MIB)) { - __entry->mib = le16_to_cpup((__le16 *)hif->body); - header_len = 4; - } else { - __entry->mib = -1; - header_len = 0; - } - __entry->buf_len = min_t(int, __entry->msg_len, sizeof(__entry->buf)) - - sizeof(struct wfx_hif_msg) - header_len; - memcpy(__entry->buf, hif->body + header_len, __entry->buf_len); - ), - TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)", - __entry->tx_fill_level, - __entry->if_id, - __entry->msg_type, - __print_symbolic(__entry->msg_id, hif_msg_list), - __entry->mib != -1 ? "/" : "", - __entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "", - __print_hex(__entry->buf, __entry->buf_len), - __entry->msg_len > sizeof(__entry->buf) ? " ..." : "", - __entry->msg_len - ) -); -DEFINE_EVENT(hif_data, hif_send, - TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), - TP_ARGS(hif, tx_fill_level, is_recv)); -#define _trace_hif_send(hif, tx_fill_level)\ - trace_hif_send(hif, tx_fill_level, false) -DEFINE_EVENT(hif_data, hif_recv, - TP_PROTO(const struct wfx_hif_msg *hif, int tx_fill_level, bool is_recv), - TP_ARGS(hif, tx_fill_level, is_recv)); -#define _trace_hif_recv(hif, tx_fill_level)\ - trace_hif_recv(hif, tx_fill_level, true) - -#define wfx_reg_list_enum \ - wfx_reg_name(WFX_REG_CONFIG, "CONFIG") \ - wfx_reg_name(WFX_REG_CONTROL, "CONTROL") \ - wfx_reg_name(WFX_REG_IN_OUT_QUEUE, "QUEUE") \ - wfx_reg_name(WFX_REG_AHB_DPORT, "AHB") \ - wfx_reg_name(WFX_REG_BASE_ADDR, "BASE_ADDR") \ - wfx_reg_name(WFX_REG_SRAM_DPORT, "SRAM") \ - wfx_reg_name(WFX_REG_SET_GEN_R_W, "SET_GEN_R_W") \ - wfx_reg_name(WFX_REG_FRAME_OUT, "FRAME_OUT") - -#undef wfx_reg_name -#define wfx_reg_name(sym, name) TRACE_DEFINE_ENUM(sym); -wfx_reg_list_enum -#undef wfx_reg_name -#define wfx_reg_name(sym, name) { sym, name }, -#define wfx_reg_list wfx_reg_list_enum { -1, NULL } - -DECLARE_EVENT_CLASS(io_data, - TP_PROTO(int reg, int addr, const void *io_buf, size_t len), - TP_ARGS(reg, addr, io_buf, len), - TP_STRUCT__entry( - __field(int, reg) - __field(int, addr) - __field(int, msg_len) - __field(int, buf_len) - __array(u8, buf, 32) - __array(u8, addr_str, 10) - ), - TP_fast_assign( - __entry->reg = reg; - __entry->addr = addr; - __entry->msg_len = len; - __entry->buf_len = min_t(int, sizeof(__entry->buf), __entry->msg_len); - memcpy(__entry->buf, io_buf, __entry->buf_len); - if (addr >= 0) - snprintf(__entry->addr_str, 10, "/%08x", addr); - else - __entry->addr_str[0] = 0; - ), - TP_printk("%s%s: %s%s (%d bytes)", - __print_symbolic(__entry->reg, wfx_reg_list), - __entry->addr_str, - __print_hex(__entry->buf, __entry->buf_len), - __entry->msg_len > sizeof(__entry->buf) ? " ..." : "", - __entry->msg_len - ) -); -DEFINE_EVENT(io_data, io_write, - TP_PROTO(int reg, int addr, const void *io_buf, size_t len), - TP_ARGS(reg, addr, io_buf, len)); -#define _trace_io_ind_write(reg, addr, io_buf, len)\ - trace_io_write(reg, addr, io_buf, len) -#define _trace_io_write(reg, io_buf, len) trace_io_write(reg, -1, io_buf, len) -DEFINE_EVENT(io_data, io_read, - TP_PROTO(int reg, int addr, const void *io_buf, size_t len), - TP_ARGS(reg, addr, io_buf, len)); -#define _trace_io_ind_read(reg, addr, io_buf, len)\ - trace_io_read(reg, addr, io_buf, len) -#define _trace_io_read(reg, io_buf, len) trace_io_read(reg, -1, io_buf, len) - -DECLARE_EVENT_CLASS(io_data32, - TP_PROTO(int reg, int addr, u32 val), - TP_ARGS(reg, addr, val), - TP_STRUCT__entry( - __field(int, reg) - __field(int, addr) - __field(int, val) - __array(u8, addr_str, 10) - ), - TP_fast_assign( - __entry->reg = reg; - __entry->addr = addr; - __entry->val = val; - if (addr >= 0) - snprintf(__entry->addr_str, 10, "/%08x", addr); - else - __entry->addr_str[0] = 0; - ), - TP_printk("%s%s: %08x", - __print_symbolic(__entry->reg, wfx_reg_list), - __entry->addr_str, - __entry->val - ) -); -DEFINE_EVENT(io_data32, io_write32, - TP_PROTO(int reg, int addr, u32 val), - TP_ARGS(reg, addr, val)); -#define _trace_io_ind_write32(reg, addr, val) trace_io_write32(reg, addr, val) -#define _trace_io_write32(reg, val) trace_io_write32(reg, -1, val) -DEFINE_EVENT(io_data32, io_read32, - TP_PROTO(int reg, int addr, u32 val), - TP_ARGS(reg, addr, val)); -#define _trace_io_ind_read32(reg, addr, val) trace_io_read32(reg, addr, val) -#define _trace_io_read32(reg, val) trace_io_read32(reg, -1, val) - -DECLARE_EVENT_CLASS(piggyback, - TP_PROTO(u32 val, bool ignored), - TP_ARGS(val, ignored), - TP_STRUCT__entry( - __field(int, val) - __field(bool, ignored) - ), - TP_fast_assign( - __entry->val = val; - __entry->ignored = ignored; - ), - TP_printk("CONTROL: %08x%s", - __entry->val, - __entry->ignored ? " (ignored)" : "" - ) -); -DEFINE_EVENT(piggyback, piggyback, - TP_PROTO(u32 val, bool ignored), - TP_ARGS(val, ignored)); -#define _trace_piggyback(val, ignored) trace_piggyback(val, ignored) - -TRACE_EVENT(bh_stats, - TP_PROTO(int ind, int req, int cnf, int busy, bool release), - TP_ARGS(ind, req, cnf, busy, release), - TP_STRUCT__entry( - __field(int, ind) - __field(int, req) - __field(int, cnf) - __field(int, busy) - __field(bool, release) - ), - TP_fast_assign( - __entry->ind = ind; - __entry->req = req; - __entry->cnf = cnf; - __entry->busy = busy; - __entry->release = release; - ), - TP_printk("IND/REQ/CNF:%3d/%3d/%3d, REQ in progress:%3d, WUP: %s", - __entry->ind, - __entry->req, - __entry->cnf, - __entry->busy, - __entry->release ? "release" : "keep" - ) -); -#define _trace_bh_stats(ind, req, cnf, busy, release)\ - trace_bh_stats(ind, req, cnf, busy, release) - -TRACE_EVENT(tx_stats, - TP_PROTO(const struct wfx_hif_cnf_tx *tx_cnf, const struct sk_buff *skb, - int delay), - TP_ARGS(tx_cnf, skb, delay), - TP_STRUCT__entry( - __field(int, pkt_id) - __field(int, delay_media) - __field(int, delay_queue) - __field(int, delay_fw) - __field(int, ack_failures) - __field(int, flags) - __array(int, rate, 4) - __array(int, tx_count, 4) - ), - TP_fast_assign( - /* Keep sync with wfx_rates definition in main.c */ - static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13 }; - const struct ieee80211_tx_info *tx_info = - (const struct ieee80211_tx_info *)skb->cb; - const struct ieee80211_tx_rate *rates = tx_info->driver_rates; - int i; - - __entry->pkt_id = tx_cnf->packet_id; - __entry->delay_media = le32_to_cpu(tx_cnf->media_delay); - __entry->delay_queue = le32_to_cpu(tx_cnf->tx_queue_delay); - __entry->delay_fw = delay; - __entry->ack_failures = tx_cnf->ack_failures; - if (!tx_cnf->status || __entry->ack_failures) - __entry->ack_failures += 1; - - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - if (rates[0].flags & IEEE80211_TX_RC_MCS) - __entry->rate[i] = rates[i].idx; - else - __entry->rate[i] = hw_rate[rates[i].idx]; - __entry->tx_count[i] = rates[i].count; - } - __entry->flags = 0; - if (rates[0].flags & IEEE80211_TX_RC_MCS) - __entry->flags |= 0x01; - if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI) - __entry->flags |= 0x02; - if (rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD) - __entry->flags |= 0x04; - if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) - __entry->flags |= 0x08; - if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) - __entry->flags |= 0x10; - if (tx_cnf->status) - __entry->flags |= 0x20; - if (tx_cnf->status == HIF_STATUS_TX_FAIL_REQUEUE) - __entry->flags |= 0x40; - ), - TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus", - __entry->pkt_id, - __print_flags(__entry->flags, NULL, - { 0x01, "M" }, { 0x02, "S" }, { 0x04, "G" }, { 0x08, "R" }, - { 0x10, "D" }, { 0x20, "F" }, { 0x40, "Q" }), - __entry->rate[0], - __entry->tx_count[0], - __entry->rate[1], - __entry->tx_count[1], - __entry->rate[2], - __entry->tx_count[2], - __entry->rate[3], - __entry->tx_count[3], - __entry->ack_failures, - __entry->delay_media, - __entry->delay_queue, - __entry->delay_fw - ) -); -#define _trace_tx_stats(tx_cnf, skb, delay) trace_tx_stats(tx_cnf, skb, delay) - -TRACE_EVENT(queues_stats, - TP_PROTO(struct wfx_dev *wdev, const struct wfx_queue *elected_queue), - TP_ARGS(wdev, elected_queue), - TP_STRUCT__entry( - __field(int, vif_id) - __field(int, queue_id) - __array(int, hw, IEEE80211_NUM_ACS * 2) - __array(int, drv, IEEE80211_NUM_ACS * 2) - __array(int, cab, IEEE80211_NUM_ACS * 2) - ), - TP_fast_assign( - const struct wfx_queue *queue; - struct wfx_vif *wvif; - int i, j; - - for (j = 0; j < IEEE80211_NUM_ACS * 2; j++) { - __entry->hw[j] = -1; - __entry->drv[j] = -1; - __entry->cab[j] = -1; - } - __entry->vif_id = -1; - __entry->queue_id = -1; - wvif = NULL; - while ((wvif = wvif_iterate(wdev, wvif)) != NULL) { - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - j = wvif->id * IEEE80211_NUM_ACS + i; - WARN_ON(j >= IEEE80211_NUM_ACS * 2); - queue = &wvif->tx_queue[i]; - __entry->hw[j] = atomic_read(&queue->pending_frames); - __entry->drv[j] = skb_queue_len(&queue->normal); - __entry->cab[j] = skb_queue_len(&queue->cab); - if (queue == elected_queue) { - __entry->vif_id = wvif->id; - __entry->queue_id = i; - } - } - } - ), - TP_printk("got skb from %d/%d, pend. hw/norm/cab: [ %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d ] [ %d/%d/%d %d/%d/%d %d/%d/%d %d/%d/%d ]", - __entry->vif_id, __entry->queue_id, - __entry->hw[0], __entry->drv[0], __entry->cab[0], - __entry->hw[1], __entry->drv[1], __entry->cab[1], - __entry->hw[2], __entry->drv[2], __entry->cab[2], - __entry->hw[3], __entry->drv[3], __entry->cab[3], - __entry->hw[4], __entry->drv[4], __entry->cab[4], - __entry->hw[5], __entry->drv[5], __entry->cab[5], - __entry->hw[6], __entry->drv[6], __entry->cab[6], - __entry->hw[7], __entry->drv[7], __entry->cab[7] - ) -); - -#endif - -/* This part must be outside protection */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE traces - -#include diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h deleted file mode 100644 index 6594cc647c2f..000000000000 --- a/drivers/staging/wfx/wfx.h +++ /dev/null @@ -1,162 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Common private data. - * - * Copyright (c) 2017-2020, Silicon Laboratories, Inc. - * Copyright (c) 2010, ST-Ericsson - * Copyright (c) 2006, Michael Wu - * Copyright 2004-2006 Jean-Baptiste Note , et al. - */ -#ifndef WFX_H -#define WFX_H - -#include -#include -#include -#include -#include - -#include "bh.h" -#include "data_tx.h" -#include "main.h" -#include "queue.h" -#include "hif_tx.h" - -#define USEC_PER_TXOP 32 /* see struct ieee80211_tx_queue_params */ -#define USEC_PER_TU 1024 - -struct wfx_hwbus_ops; - -struct wfx_dev { - struct wfx_platform_data pdata; - struct device *dev; - struct ieee80211_hw *hw; - struct ieee80211_vif *vif[2]; - struct mac_address addresses[2]; - const struct wfx_hwbus_ops *hwbus_ops; - void *hwbus_priv; - - u8 keyset; - struct completion firmware_ready; - struct wfx_hif_ind_startup hw_caps; - struct wfx_hif hif; - struct delayed_work cooling_timeout_work; - bool poll_irq; - bool chip_frozen; - struct mutex conf_mutex; - - struct wfx_hif_cmd hif_cmd; - struct sk_buff_head tx_pending; - wait_queue_head_t tx_dequeue; - atomic_t tx_lock; - - atomic_t packet_id; - u32 key_map; - - struct wfx_hif_rx_stats rx_stats; - struct mutex rx_stats_lock; - struct wfx_hif_tx_power_loop_info tx_power_loop_info; - struct mutex tx_power_loop_info_lock; -}; - -struct wfx_vif { - struct wfx_dev *wdev; - struct ieee80211_vif *vif; - struct ieee80211_channel *channel; - int id; - - u32 link_id_map; - - bool after_dtim_tx_allowed; - bool join_in_progress; - - struct delayed_work beacon_loss_work; - - struct wfx_queue tx_queue[4]; - struct wfx_tx_policy_cache tx_policy_cache; - struct work_struct tx_policy_upload_work; - - struct work_struct update_tim_work; - - unsigned long uapsd_mask; - - /* avoid some operations in parallel with scan */ - struct mutex scan_lock; - struct work_struct scan_work; - struct completion scan_complete; - int scan_nb_chan_done; - bool scan_abort; - struct ieee80211_scan_request *scan_req; - - struct completion set_pm_mode_complete; -}; - -static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id) -{ - if (vif_id >= ARRAY_SIZE(wdev->vif)) { - dev_dbg(wdev->dev, "requesting non-existent vif: %d\n", vif_id); - return NULL; - } - vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif)); - if (!wdev->vif[vif_id]) - return NULL; - return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv; -} - -static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev, struct wfx_vif *cur) -{ - int i; - int mark = 0; - struct wfx_vif *tmp; - - if (!cur) - mark = 1; - for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { - tmp = wdev_to_wvif(wdev, i); - if (mark && tmp) - return tmp; - if (tmp == cur) - mark = 1; - } - return NULL; -} - -static inline int wvif_count(struct wfx_dev *wdev) -{ - int i; - int ret = 0; - struct wfx_vif *wvif; - - for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) { - wvif = wdev_to_wvif(wdev, i); - if (wvif) - ret++; - } - return ret; -} - -static inline void memreverse(u8 *src, u8 length) -{ - u8 *lo = src; - u8 *hi = src + length - 1; - u8 swap; - - while (lo < hi) { - swap = *lo; - *lo++ = *hi; - *hi-- = swap; - } -} - -static inline int memzcmp(void *src, unsigned int size) -{ - u8 *buf = src; - - if (!size) - return 0; - if (*buf) - return 1; - return memcmp(buf, buf + 1, size - 1); -} - -#endif -- cgit v1.2.3 From 40379a0084c2f65eb62c102f5bbf5cdc14a50410 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 4 Apr 2022 15:08:15 +0300 Subject: net/mlx5_fpga: Drop INNOVA TLS support Mellanox INNOVA TLS cards are EOL in May, 2018 [1]. As such, the code is unmaintained, untested and not in-use by any upstream/distro oriented customers. In order to reduce code complexity, drop the kernel code. [1] https://network.nvidia.com/related-docs/eol/LCR-000286.pdf Link: https://lore.kernel.org/r/b88add368def721ea9d054cb69def72d9e3f67aa.1649073691.git.leonro@nvidia.com Reviewed-by: Tariq Toukan Reviewed-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 14 - drivers/net/ethernet/mellanox/mlx5/core/Makefile | 1 - .../net/ethernet/mellanox/mlx5/core/accel/tls.c | 47 -- .../net/ethernet/mellanox/mlx5/core/accel/tls.h | 56 -- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - .../net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 180 +----- .../net/ethernet/mellanox/mlx5/core/en_accel/tls.h | 48 +- .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 324 +---------- .../mellanox/mlx5/core/en_accel/tls_rxtx.h | 8 +- .../mellanox/mlx5/core/en_accel/tls_stats.c | 17 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 - .../net/ethernet/mellanox/mlx5/core/fpga/core.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c | 622 --------------------- drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h | 74 --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 10 - include/linux/mlx5/mlx5_ifc_fpga.h | 63 --- 16 files changed, 10 insertions(+), 1459 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 4ba1a78c6515..21df10bb14c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -177,20 +177,6 @@ config MLX5_EN_IPSEC Note: Support for hardware with this capability needs to be selected for this option to become available. -config MLX5_FPGA_TLS - bool "Mellanox Technologies TLS Innova support" - depends on TLS_DEVICE - depends on TLS=y || MLX5_CORE=m - depends on MLX5_CORE_EN - depends on MLX5_FPGA - select MLX5_EN_TLS - help - Build TLS support for the Innova family of network cards by Mellanox - Technologies. Innova network cards are comprised of a ConnectX chip - and an FPGA chip on one board. If you select this option, the - mlx5_core driver will include the Innova FPGA core and allow building - sandbox-specific client drivers. - config MLX5_TLS bool "Mellanox Technologies TLS Connect-X support" depends on TLS_DEVICE diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 4bc666714a35..33525f7d0aa0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -90,7 +90,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o -mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c index 6c2b86a26863..fe82c4140d85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c @@ -37,53 +37,6 @@ #include "mlx5_core.h" #include "lib/mlx5.h" -#ifdef CONFIG_MLX5_FPGA_TLS -#include "fpga/tls.h" - -int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx) -{ - return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, p_swid, - direction_sx); -} - -void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - bool direction_sx) -{ - mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx); -} - -int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn) -{ - return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn); -} - -bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) -{ - return mlx5_fpga_is_tls_device(mdev) || - mlx5_accel_is_ktls_device(mdev); -} - -u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) -{ - return mlx5_fpga_tls_device_caps(mdev); -} - -int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) -{ - return mlx5_fpga_tls_init(mdev); -} - -void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) -{ - mlx5_fpga_tls_cleanup(mdev); -} -#endif - #ifdef CONFIG_MLX5_TLS int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, struct tls_crypto_info *crypto_info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index fd874f0c380a..6f92ebe3cc82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -97,60 +97,4 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, struct tls_crypto_info *crypto_info) { return false; } #endif - -enum { - MLX5_ACCEL_TLS_TX = BIT(0), - MLX5_ACCEL_TLS_RX = BIT(1), - MLX5_ACCEL_TLS_V12 = BIT(2), - MLX5_ACCEL_TLS_V13 = BIT(3), - MLX5_ACCEL_TLS_LRO = BIT(4), - MLX5_ACCEL_TLS_IPV6 = BIT(5), - MLX5_ACCEL_TLS_AES_GCM128 = BIT(30), - MLX5_ACCEL_TLS_AES_GCM256 = BIT(31), -}; - -struct mlx5_ifc_tls_flow_bits { - u8 src_port[0x10]; - u8 dst_port[0x10]; - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; - u8 ipv6[0x1]; - u8 direction_sx[0x1]; - u8 reserved_at_2[0x1e]; -}; - -#ifdef CONFIG_MLX5_FPGA_TLS -int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx); -void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - bool direction_sx); -int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn); -bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); -u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); -int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); -void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); - -#else - -static inline int -mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx) { return -ENOTSUPP; } -static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - bool direction_sx) { } -static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn) { return 0; } -static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) -{ - return mlx5_accel_is_ktls_device(mdev); -} -static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } -static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } -static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { } -#endif - #endif /* __MLX5_ACCEL_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8653ac0fd865..50818081bdc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -354,7 +354,6 @@ enum { MLX5E_RQ_STATE_AM, MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ - MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index b8fc863aa68d..0c6e165c154e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -36,188 +36,12 @@ #include "en_accel/tls.h" #include "accel/tls.h" -static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk) -{ - struct inet_sock *inet = inet_sk(sk); - - MLX5_SET(tls_flow, flow, ipv6, 0); - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4), - &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); -} - -#if IS_ENABLED(CONFIG_IPV6) -static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk) -{ - struct ipv6_pinfo *np = inet6_sk(sk); - - MLX5_SET(tls_flow, flow, ipv6, 1); - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); -} -#endif - -static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk) -{ - struct inet_sock *inet = inet_sk(sk); - - memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport, - MLX5_FLD_SZ_BYTES(tls_flow, src_port)); - memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport, - MLX5_FLD_SZ_BYTES(tls_flow, dst_port)); -} - -static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps) -{ - switch (sk->sk_family) { - case AF_INET: - mlx5e_tls_set_ipv4_flow(flow, sk); - break; -#if IS_ENABLED(CONFIG_IPV6) - case AF_INET6: - if (!sk->sk_ipv6only && - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { - mlx5e_tls_set_ipv4_flow(flow, sk); - break; - } - if (!(caps & MLX5_ACCEL_TLS_IPV6)) - goto error_out; - - mlx5e_tls_set_ipv6_flow(flow, sk); - break; -#endif - default: - goto error_out; - } - - mlx5e_tls_set_flow_tcp_ports(flow, sk); - return 0; -error_out: - return -EINVAL; -} - -static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, - enum tls_offload_ctx_dir direction, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct mlx5_core_dev *mdev = priv->mdev; - u32 caps = mlx5_accel_tls_device_caps(mdev); - int ret = -ENOMEM; - void *flow; - u32 swid; - - flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); - if (!flow) - return ret; - - ret = mlx5e_tls_set_flow(flow, sk, caps); - if (ret) - goto free_flow; - - ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info, - start_offload_tcp_sn, &swid, - direction == TLS_OFFLOAD_CTX_DIR_TX); - if (ret < 0) - goto free_flow; - - if (direction == TLS_OFFLOAD_CTX_DIR_TX) { - struct mlx5e_tls_offload_context_tx *tx_ctx = - mlx5e_get_tls_tx_context(tls_ctx); - - tx_ctx->swid = htonl(swid); - tx_ctx->expected_seq = start_offload_tcp_sn; - } else { - struct mlx5e_tls_offload_context_rx *rx_ctx = - mlx5e_get_tls_rx_context(tls_ctx); - - rx_ctx->handle = htonl(swid); - } - - return 0; -free_flow: - kfree(flow); - return ret; -} - -static void mlx5e_tls_del(struct net_device *netdev, - struct tls_context *tls_ctx, - enum tls_offload_ctx_dir direction) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - unsigned int handle; - - handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ? - mlx5e_get_tls_tx_context(tls_ctx)->swid : - mlx5e_get_tls_rx_context(tls_ctx)->handle); - - mlx5_accel_tls_del_flow(priv->mdev, handle, - direction == TLS_OFFLOAD_CTX_DIR_TX); -} - -static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk, - u32 seq, u8 *rcd_sn_data, - enum tls_offload_ctx_dir direction) -{ - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_tls_offload_context_rx *rx_ctx; - __be64 rcd_sn = *(__be64 *)rcd_sn_data; - - if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX)) - return -EINVAL; - rx_ctx = mlx5e_get_tls_rx_context(tls_ctx); - - netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq, - be64_to_cpu(rcd_sn)); - mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); - atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply); - - return 0; -} - -static const struct tlsdev_ops mlx5e_tls_ops = { - .tls_dev_add = mlx5e_tls_add, - .tls_dev_del = mlx5e_tls_del, - .tls_dev_resync = mlx5e_tls_resync, -}; - void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { - struct net_device *netdev = priv->netdev; - u32 caps; - - if (mlx5e_accel_is_ktls_device(priv->mdev)) { - mlx5e_ktls_build_netdev(priv); + if (!mlx5e_accel_is_ktls_device(priv->mdev)) return; - } - - /* FPGA */ - if (!mlx5e_accel_is_tls_device(priv->mdev)) - return; - - caps = mlx5_accel_tls_device_caps(priv->mdev); - if (caps & MLX5_ACCEL_TLS_TX) { - netdev->features |= NETIF_F_HW_TLS_TX; - netdev->hw_features |= NETIF_F_HW_TLS_TX; - } - - if (caps & MLX5_ACCEL_TLS_RX) { - netdev->features |= NETIF_F_HW_TLS_RX; - netdev->hw_features |= NETIF_F_HW_TLS_RX; - } - - if (!(caps & MLX5_ACCEL_TLS_LRO)) { - netdev->features &= ~NETIF_F_LRO; - netdev->hw_features &= ~NETIF_F_LRO; - } - netdev->tlsdev_ops = &mlx5e_tls_ops; + mlx5e_ktls_build_netdev(priv); } int mlx5e_tls_init(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index 62ecf14bf86a..fc13b2ebe6b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -43,16 +43,8 @@ struct mlx5e_tls_sw_stats { atomic64_t tx_tls_ctx; atomic64_t tx_tls_del; - atomic64_t tx_tls_drop_metadata; - atomic64_t tx_tls_drop_resync_alloc; - atomic64_t tx_tls_drop_no_sync_data; - atomic64_t tx_tls_drop_bypass_required; atomic64_t rx_tls_ctx; atomic64_t rx_tls_del; - atomic64_t rx_tls_drop_resync_request; - atomic64_t rx_tls_resync_request; - atomic64_t rx_tls_resync_reply; - atomic64_t rx_tls_auth_fail; }; struct mlx5e_tls { @@ -60,42 +52,6 @@ struct mlx5e_tls { struct workqueue_struct *rx_wq; }; -struct mlx5e_tls_offload_context_tx { - struct tls_offload_context_tx base; - u32 expected_seq; - __be32 swid; -}; - -static inline struct mlx5e_tls_offload_context_tx * -mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) -{ - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_tx) > - TLS_OFFLOAD_CONTEXT_SIZE_TX); - return container_of(tls_offload_ctx_tx(tls_ctx), - struct mlx5e_tls_offload_context_tx, - base); -} - -struct mlx5e_tls_offload_context_rx { - struct tls_offload_context_rx base; - __be32 handle; -}; - -static inline struct mlx5e_tls_offload_context_rx * -mlx5e_get_tls_rx_context(struct tls_context *tls_ctx) -{ - BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context_rx) > - TLS_OFFLOAD_CONTEXT_SIZE_RX); - return container_of(tls_offload_ctx_rx(tls_ctx), - struct mlx5e_tls_offload_context_rx, - base); -} - -static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) -{ - return priv->tls; -} - void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); int mlx5e_tls_init(struct mlx5e_priv *priv); void mlx5e_tls_cleanup(struct mlx5e_priv *priv); @@ -106,8 +62,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data); static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { - return !is_kdump_kernel() && - mlx5_accel_is_tls_device(mdev); + return !is_kdump_kernel() && mlx5_accel_is_ktls_device(mdev); } #else @@ -119,7 +74,6 @@ static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) mlx5e_ktls_build_netdev(priv); } -static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; } static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { } static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index a05580cea481..39412fafa860 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -38,230 +38,11 @@ #include #include -#define SYNDROM_DECRYPTED 0x30 -#define SYNDROM_RESYNC_REQUEST 0x31 -#define SYNDROM_AUTH_FAILED 0x32 - -#define SYNDROME_OFFLOAD_REQUIRED 32 -#define SYNDROME_SYNC 33 - -struct sync_info { - u64 rcd_sn; - s32 sync_len; - int nr_frags; - skb_frag_t frags[MAX_SKB_FRAGS]; -}; - -struct recv_metadata_content { - u8 syndrome; - u8 reserved; - __be32 sync_seq; -} __packed; - -struct send_metadata_content { - /* One byte of syndrome followed by 3 bytes of swid */ - __be32 syndrome_swid; - __be16 first_seq; -} __packed; - -struct mlx5e_tls_metadata { - union { - /* from fpga to host */ - struct recv_metadata_content recv; - /* from host to fpga */ - struct send_metadata_content send; - unsigned char raw[6]; - } __packed content; - /* packet type ID field */ - __be16 ethertype; -} __packed; - -static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) -{ - struct mlx5e_tls_metadata *pet; - struct ethhdr *eth; - - if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata))) - return -ENOMEM; - - eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata)); - skb->mac_header -= sizeof(struct mlx5e_tls_metadata); - pet = (struct mlx5e_tls_metadata *)(eth + 1); - - memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata), - 2 * ETH_ALEN); - - eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); - pet->content.send.syndrome_swid = - htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; - - return 0; -} - -static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context, - u32 tcp_seq, struct sync_info *info) -{ - int remaining, i = 0, ret = -EINVAL; - struct tls_record_info *record; - unsigned long flags; - s32 sync_size; - - spin_lock_irqsave(&context->base.lock, flags); - record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn); - - if (unlikely(!record)) - goto out; - - sync_size = tcp_seq - tls_record_start_seq(record); - info->sync_len = sync_size; - if (unlikely(sync_size < 0)) { - if (tls_record_is_start_marker(record)) - goto done; - - goto out; - } - - remaining = sync_size; - while (remaining > 0) { - info->frags[i] = record->frags[i]; - __skb_frag_ref(&info->frags[i]); - remaining -= skb_frag_size(&info->frags[i]); - - if (remaining < 0) - skb_frag_size_add(&info->frags[i], remaining); - - i++; - } - info->nr_frags = i; -done: - ret = 0; -out: - spin_unlock_irqrestore(&context->base.lock, flags); - return ret; -} - -static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, - struct sk_buff *nskb, u32 tcp_seq, - int headln, __be64 rcd_sn) -{ - struct mlx5e_tls_metadata *pet; - u8 syndrome = SYNDROME_SYNC; - struct iphdr *iph; - struct tcphdr *th; - int data_len, mss; - - nskb->dev = skb->dev; - skb_reset_mac_header(nskb); - skb_set_network_header(nskb, skb_network_offset(skb)); - skb_set_transport_header(nskb, skb_transport_offset(skb)); - memcpy(nskb->data, skb->data, headln); - memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn)); - - iph = ip_hdr(nskb); - iph->tot_len = htons(nskb->len - skb_network_offset(nskb)); - th = tcp_hdr(nskb); - data_len = nskb->len - headln; - tcp_seq -= data_len; - th->seq = htonl(tcp_seq); - - mss = nskb->dev->mtu - (headln - skb_network_offset(nskb)); - skb_shinfo(nskb)->gso_size = 0; - if (data_len > mss) { - skb_shinfo(nskb)->gso_size = mss; - skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss); - } - skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; - - pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); - memcpy(pet, &syndrome, sizeof(syndrome)); - pet->content.send.first_seq = htons(tcp_seq); - - /* MLX5 devices don't care about the checksum partial start, offset - * and pseudo header - */ - nskb->ip_summed = CHECKSUM_PARTIAL; - - nskb->queue_mapping = skb->queue_mapping; -} - -static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, - struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5e_tls *tls) -{ - u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); - struct sync_info info; - struct sk_buff *nskb; - int linear_len = 0; - int headln; - int i; - - sq->stats->tls_ooo++; - - if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { - /* We might get here if a retransmission reaches the driver - * after the relevant record is acked. - * It should be safe to drop the packet in this case - */ - atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data); - goto err_out; - } - - if (unlikely(info.sync_len < 0)) { - u32 payload; - - headln = skb_transport_offset(skb) + tcp_hdrlen(skb); - payload = skb->len - headln; - if (likely(payload <= -info.sync_len)) - /* SKB payload doesn't require offload - */ - return true; - - atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); - goto err_out; - } - - if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { - atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata); - goto err_out; - } - - headln = skb_transport_offset(skb) + tcp_hdrlen(skb); - linear_len += headln + sizeof(info.rcd_sn); - nskb = alloc_skb(linear_len, GFP_ATOMIC); - if (unlikely(!nskb)) { - atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc); - goto err_out; - } - - context->expected_seq = tcp_seq + skb->len - headln; - skb_put(nskb, linear_len); - for (i = 0; i < info.nr_frags; i++) - skb_shinfo(nskb)->frags[i] = info.frags[i]; - - skb_shinfo(nskb)->nr_frags = info.nr_frags; - nskb->data_len = info.sync_len; - nskb->len += info.sync_len; - sq->stats->tls_resync_bytes += nskb->len; - mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, - cpu_to_be64(info.rcd_sn)); - mlx5e_sq_xmit_simple(sq, nskb, true); - - return true; - -err_out: - dev_kfree_skb_any(skb); - return false; -} - bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state) { - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_tls_offload_context_tx *context; struct tls_context *tls_ctx; - u32 expected_seq; int datalen; - u32 skb_seq; datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (!datalen) @@ -273,118 +54,17 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) goto err_out; - if (mlx5e_accel_is_ktls_tx(sq->mdev)) - return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); - - /* FPGA */ - skb_seq = ntohl(tcp_hdr(skb)->seq); - context = mlx5e_get_tls_tx_context(tls_ctx); - expected_seq = context->expected_seq; - - if (unlikely(expected_seq != skb_seq)) - return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls); - - if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { - atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); - dev_kfree_skb_any(skb); - return false; - } - - context->expected_seq = skb_seq + datalen; - return true; + return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); err_out: dev_kfree_skb_any(skb); return false; } -static int tls_update_resync_sn(struct net_device *netdev, - struct sk_buff *skb, - struct mlx5e_tls_metadata *mdata) -{ - struct sock *sk = NULL; - struct iphdr *iph; - struct tcphdr *th; - __be32 seq; - - if (mdata->ethertype != htons(ETH_P_IP)) - return -EINVAL; - - iph = (struct iphdr *)(mdata + 1); - - th = ((void *)iph) + iph->ihl * 4; - - if (iph->version == 4) { - sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, - iph->saddr, th->source, iph->daddr, - th->dest, netdev->ifindex); -#if IS_ENABLED(CONFIG_IPV6) - } else { - struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; - - sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, - &ipv6h->saddr, th->source, - &ipv6h->daddr, ntohs(th->dest), - netdev->ifindex, 0); -#endif - } - if (!sk || sk->sk_state == TCP_TIME_WAIT) { - struct mlx5e_priv *priv = netdev_priv(netdev); - - atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request); - goto out; - } - - skb->sk = sk; - skb->destructor = sock_edemux; - - memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq)); - tls_offload_rx_resync_request(sk, seq); -out: - return 0; -} - -/* FPGA tls rx handler */ -void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, - u32 *cqe_bcnt) -{ - struct mlx5e_tls_metadata *mdata; - struct mlx5e_priv *priv; - - /* Use the metadata */ - mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN); - switch (mdata->content.recv.syndrome) { - case SYNDROM_DECRYPTED: - skb->decrypted = 1; - break; - case SYNDROM_RESYNC_REQUEST: - tls_update_resync_sn(rq->netdev, skb, mdata); - priv = netdev_priv(rq->netdev); - atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); - break; - case SYNDROM_AUTH_FAILED: - /* Authentication failure will be observed and verified by kTLS */ - priv = netdev_priv(rq->netdev); - atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); - break; - default: - /* Bypass the metadata header to others */ - return; - } - - remove_metadata_hdr(skb); - *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; -} - u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { if (!mlx5e_accel_is_tls_device(mdev)) return 0; - if (mlx5e_accel_is_ktls_device(mdev)) - return mlx5e_ktls_get_stop_room(mdev, params); - - /* FPGA */ - /* Resync SKB. */ - return mlx5e_stop_room_for_max_wqe(mdev); + return mlx5e_ktls_get_stop_room(mdev, params); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 0ca0a023fb8d..168acceb0d28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -60,18 +60,12 @@ mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); } -void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, - u32 *cqe_bcnt); - static inline void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) { if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ - return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); - - if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb))) - return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt); + mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); } #else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c index 56e7b2aee85f..c25e8742bbbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c @@ -38,13 +38,6 @@ #include "fpga/sdk.h" #include "en_accel/tls.h" -static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) }, -}; - static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) }, { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) }, @@ -59,18 +52,16 @@ static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv) { if (!priv->tls) return NULL; - if (mlx5e_accel_is_ktls_device(priv->mdev)) - return mlx5e_ktls_sw_stats_desc; - return mlx5e_tls_sw_stats_desc; + + return mlx5e_ktls_sw_stats_desc; } int mlx5e_tls_get_count(struct mlx5e_priv *priv) { if (!priv->tls) return 0; - if (mlx5e_accel_is_ktls_device(priv->mdev)) - return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); - return ARRAY_SIZE(mlx5e_tls_sw_stats_desc); + + return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); } int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2f1dedc721d1..7423c6830c4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1036,9 +1036,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, if (err) goto err_destroy_rq; - if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev)) - __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */ - if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h index 2a984e82ae16..e9e72d260681 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -59,7 +59,6 @@ struct mlx5_fpga_device { } conn_res; struct mlx5_fpga_ipsec *ipsec; - struct mlx5_fpga_tls *tls; }; #define mlx5_fpga_dbg(__adev, format, ...) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c deleted file mode 100644 index 29b7339ebfa3..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c +++ /dev/null @@ -1,622 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include -#include "fpga/tls.h" -#include "fpga/cmd.h" -#include "fpga/sdk.h" -#include "fpga/core.h" -#include "accel/tls.h" - -struct mlx5_fpga_tls_command_context; - -typedef void (*mlx5_fpga_tls_command_complete) - (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, - struct mlx5_fpga_tls_command_context *ctx, - struct mlx5_fpga_dma_buf *resp); - -struct mlx5_fpga_tls_command_context { - struct list_head list; - /* There is no guarantee on the order between the TX completion - * and the command response. - * The TX completion is going to touch cmd->buf even in - * the case of successful transmission. - * So instead of requiring separate allocations for cmd - * and cmd->buf we've decided to use a reference counter - */ - refcount_t ref; - struct mlx5_fpga_dma_buf buf; - mlx5_fpga_tls_command_complete complete; -}; - -static void -mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx) -{ - if (refcount_dec_and_test(&ctx->ref)) - kfree(ctx); -} - -static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev, - struct mlx5_fpga_dma_buf *resp) -{ - struct mlx5_fpga_conn *conn = fdev->tls->conn; - struct mlx5_fpga_tls_command_context *ctx; - struct mlx5_fpga_tls *tls = fdev->tls; - unsigned long flags; - - spin_lock_irqsave(&tls->pending_cmds_lock, flags); - ctx = list_first_entry(&tls->pending_cmds, - struct mlx5_fpga_tls_command_context, list); - list_del(&ctx->list); - spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); - ctx->complete(conn, fdev, ctx, resp); -} - -static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_dma_buf *buf, - u8 status) -{ - struct mlx5_fpga_tls_command_context *ctx = - container_of(buf, struct mlx5_fpga_tls_command_context, buf); - - mlx5_fpga_tls_put_command_ctx(ctx); - - if (unlikely(status)) - mlx5_fpga_tls_cmd_complete(fdev, NULL); -} - -static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, - struct mlx5_fpga_tls_command_context *cmd, - mlx5_fpga_tls_command_complete complete) -{ - struct mlx5_fpga_tls *tls = fdev->tls; - unsigned long flags; - int ret; - - refcount_set(&cmd->ref, 2); - cmd->complete = complete; - cmd->buf.complete = mlx5_fpga_cmd_send_complete; - - spin_lock_irqsave(&tls->pending_cmds_lock, flags); - /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock - * to make sure commands are inserted to the tls->pending_cmds list - * and the command QP in the same order. - */ - ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf); - if (likely(!ret)) - list_add_tail(&cmd->list, &tls->pending_cmds); - else - complete(tls->conn, fdev, cmd, NULL); - spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); -} - -/* Start of context identifiers range (inclusive) */ -#define SWID_START 0 -/* End of context identifiers range (exclusive) */ -#define SWID_END BIT(24) - -static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, - void *ptr) -{ - unsigned long flags; - int ret; - - /* TLS metadata format is 1 byte for syndrome followed - * by 3 bytes of swid (software ID) - * swid must not exceed 3 bytes. - * See tls_rxtx.c:insert_pet() for details - */ - BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); - - idr_preload(GFP_KERNEL); - spin_lock_irqsave(idr_spinlock, flags); - ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); - spin_unlock_irqrestore(idr_spinlock, flags); - idr_preload_end(); - - return ret; -} - -static void *mlx5_fpga_tls_release_swid(struct idr *idr, - spinlock_t *idr_spinlock, u32 swid) -{ - unsigned long flags; - void *ptr; - - spin_lock_irqsave(idr_spinlock, flags); - ptr = idr_remove(idr, swid); - spin_unlock_irqrestore(idr_spinlock, flags); - return ptr; -} - -static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_dma_buf *buf, u8 status) -{ - kfree(buf); -} - -static void -mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_tls_command_context *cmd, - struct mlx5_fpga_dma_buf *resp) -{ - if (resp) { - u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); - - if (syndrome) - mlx5_fpga_err(fdev, - "Teardown stream failed with syndrome = %d", - syndrome); - } - mlx5_fpga_tls_put_command_ctx(cmd); -} - -static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) -{ - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow, - MLX5_BYTE_OFF(tls_flow, ipv6)); - - MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6)); - MLX5_SET(tls_cmd, cmd, direction_sx, - MLX5_GET(tls_flow, flow, direction_sx)); -} - -int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn) -{ - struct mlx5_fpga_dma_buf *buf; - int size = sizeof(*buf) + MLX5_TLS_COMMAND_SIZE; - void *flow; - void *cmd; - int ret; - - buf = kzalloc(size, GFP_ATOMIC); - if (!buf) - return -ENOMEM; - - cmd = (buf + 1); - - rcu_read_lock(); - flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle)); - if (unlikely(!flow)) { - rcu_read_unlock(); - WARN_ONCE(1, "Received NULL pointer for handle\n"); - kfree(buf); - return -EINVAL; - } - mlx5_fpga_tls_flow_to_cmd(flow, cmd); - rcu_read_unlock(); - - MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); - MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); - MLX5_SET(tls_cmd, cmd, tcp_sn, seq); - MLX5_SET(tls_cmd, cmd, command_type, CMD_RESYNC_RX); - - buf->sg[0].data = cmd; - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - buf->complete = mlx_tls_kfree_complete; - - ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); - if (ret < 0) - kfree(buf); - - return ret; -} - -static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, - void *flow, u32 swid, gfp_t flags) -{ - struct mlx5_fpga_tls_command_context *ctx; - struct mlx5_fpga_dma_buf *buf; - void *cmd; - - ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags); - if (!ctx) - return; - - buf = &ctx->buf; - cmd = (ctx + 1); - MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); - MLX5_SET(tls_cmd, cmd, swid, swid); - - mlx5_fpga_tls_flow_to_cmd(flow, cmd); - kfree(flow); - - buf->sg[0].data = cmd; - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - - mlx5_fpga_tls_cmd_send(mdev->fpga, ctx, - mlx5_fpga_tls_teardown_completion); -} - -void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags, bool direction_sx) -{ - struct mlx5_fpga_tls *tls = mdev->fpga->tls; - void *flow; - - if (direction_sx) - flow = mlx5_fpga_tls_release_swid(&tls->tx_idr, - &tls->tx_idr_spinlock, - swid); - else - flow = mlx5_fpga_tls_release_swid(&tls->rx_idr, - &tls->rx_idr_spinlock, - swid); - - if (!flow) { - mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", - swid); - return; - } - - synchronize_rcu(); /* before kfree(flow) */ - mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); -} - -enum mlx5_fpga_setup_stream_status { - MLX5_FPGA_CMD_PENDING, - MLX5_FPGA_CMD_SEND_FAILED, - MLX5_FPGA_CMD_RESPONSE_RECEIVED, - MLX5_FPGA_CMD_ABANDONED, -}; - -struct mlx5_setup_stream_context { - struct mlx5_fpga_tls_command_context cmd; - atomic_t status; - u32 syndrome; - struct completion comp; -}; - -static void -mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_tls_command_context *cmd, - struct mlx5_fpga_dma_buf *resp) -{ - struct mlx5_setup_stream_context *ctx = - container_of(cmd, struct mlx5_setup_stream_context, cmd); - int status = MLX5_FPGA_CMD_SEND_FAILED; - void *tls_cmd = ctx + 1; - - /* If we failed to send to command resp == NULL */ - if (resp) { - ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); - status = MLX5_FPGA_CMD_RESPONSE_RECEIVED; - } - - status = atomic_xchg_release(&ctx->status, status); - if (likely(status != MLX5_FPGA_CMD_ABANDONED)) { - complete(&ctx->comp); - return; - } - - mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n", - ctx->syndrome); - - if (!ctx->syndrome) { - /* The process was killed while waiting for the context to be - * added, and the add completed successfully. - * We need to destroy the HW context, and we can't can't reuse - * the command context because we might not have received - * the tx completion yet. - */ - mlx5_fpga_tls_del_flow(fdev->mdev, - MLX5_GET(tls_cmd, tls_cmd, swid), - GFP_ATOMIC, - MLX5_GET(tls_cmd, tls_cmd, - direction_sx)); - } - - mlx5_fpga_tls_put_command_ctx(cmd); -} - -static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev, - struct mlx5_setup_stream_context *ctx) -{ - struct mlx5_fpga_dma_buf *buf; - void *cmd = ctx + 1; - int status, ret = 0; - - buf = &ctx->cmd.buf; - buf->sg[0].data = cmd; - buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; - MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM); - - init_completion(&ctx->comp); - atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING); - ctx->syndrome = -1; - - mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, - mlx5_fpga_tls_setup_completion); - wait_for_completion_killable(&ctx->comp); - - status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED); - if (unlikely(status == MLX5_FPGA_CMD_PENDING)) - /* ctx is going to be released in mlx5_fpga_tls_setup_completion */ - return -EINTR; - - if (unlikely(ctx->syndrome)) - ret = -ENOMEM; - - mlx5_fpga_tls_put_command_ctx(&ctx->cmd); - return ret; -} - -static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg, - struct mlx5_fpga_dma_buf *buf) -{ - struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg; - - mlx5_fpga_tls_cmd_complete(fdev, buf); -} - -bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev) -{ - if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) - return false; - - if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) != - MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX) - return false; - - if (MLX5_CAP_FPGA(mdev, sandbox_product_id) != - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS) - return false; - - if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0) - return false; - - return true; -} - -static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev, - u32 *p_caps) -{ - int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap); - u32 caps = 0; - void *buf; - - buf = kzalloc(cap_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf); - if (err) - goto out; - - if (MLX5_GET(tls_extended_cap, buf, tx)) - caps |= MLX5_ACCEL_TLS_TX; - if (MLX5_GET(tls_extended_cap, buf, rx)) - caps |= MLX5_ACCEL_TLS_RX; - if (MLX5_GET(tls_extended_cap, buf, tls_v12)) - caps |= MLX5_ACCEL_TLS_V12; - if (MLX5_GET(tls_extended_cap, buf, tls_v13)) - caps |= MLX5_ACCEL_TLS_V13; - if (MLX5_GET(tls_extended_cap, buf, lro)) - caps |= MLX5_ACCEL_TLS_LRO; - if (MLX5_GET(tls_extended_cap, buf, ipv6)) - caps |= MLX5_ACCEL_TLS_IPV6; - - if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128)) - caps |= MLX5_ACCEL_TLS_AES_GCM128; - if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256)) - caps |= MLX5_ACCEL_TLS_AES_GCM256; - - *p_caps = caps; - err = 0; -out: - kfree(buf); - return err; -} - -int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - struct mlx5_fpga_conn_attr init_attr = {0}; - struct mlx5_fpga_conn *conn; - struct mlx5_fpga_tls *tls; - int err = 0; - - if (!mlx5_fpga_is_tls_device(mdev) || !fdev) - return 0; - - tls = kzalloc(sizeof(*tls), GFP_KERNEL); - if (!tls) - return -ENOMEM; - - err = mlx5_fpga_tls_get_caps(fdev, &tls->caps); - if (err) - goto error; - - if (!(tls->caps & (MLX5_ACCEL_TLS_V12 | MLX5_ACCEL_TLS_AES_GCM128))) { - err = -ENOTSUPP; - goto error; - } - - init_attr.rx_size = SBU_QP_QUEUE_SIZE; - init_attr.tx_size = SBU_QP_QUEUE_SIZE; - init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb; - init_attr.cb_arg = fdev; - conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr); - if (IS_ERR(conn)) { - err = PTR_ERR(conn); - mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n", - err); - goto error; - } - - tls->conn = conn; - spin_lock_init(&tls->pending_cmds_lock); - INIT_LIST_HEAD(&tls->pending_cmds); - - idr_init(&tls->tx_idr); - idr_init(&tls->rx_idr); - spin_lock_init(&tls->tx_idr_spinlock); - spin_lock_init(&tls->rx_idr_spinlock); - fdev->tls = tls; - return 0; - -error: - kfree(tls); - return err; -} - -void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - - if (!fdev || !fdev->tls) - return; - - mlx5_fpga_sbu_conn_destroy(fdev->tls->conn); - kfree(fdev->tls); - fdev->tls = NULL; -} - -static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd, - struct tls_crypto_info *info, - __be64 *rcd_sn) -{ - struct tls12_crypto_info_aes_gcm_128 *crypto_info = - (struct tls12_crypto_info_aes_gcm_128 *)info; - - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq, - TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); - - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv), - crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key), - crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); - - /* in AES-GCM 128 we need to write the key twice */ - memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) + - TLS_CIPHER_AES_GCM_128_KEY_SIZE, - crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); - - MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128); -} - -static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, - struct tls_crypto_info *crypto_info) -{ - __be64 rcd_sn; - - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: - if (!(caps & MLX5_ACCEL_TLS_AES_GCM128)) - return -EINVAL; - mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn); - break; - default: - return -EINVAL; - } - - return 0; -} - -static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 swid, u32 tcp_sn) -{ - u32 caps = mlx5_fpga_tls_device_caps(mdev); - struct mlx5_setup_stream_context *ctx; - int ret = -ENOMEM; - size_t cmd_size; - void *cmd; - - cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx); - ctx = kzalloc(cmd_size, GFP_KERNEL); - if (!ctx) - goto out; - - cmd = ctx + 1; - ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info); - if (ret) - goto free_ctx; - - mlx5_fpga_tls_flow_to_cmd(flow, cmd); - - MLX5_SET(tls_cmd, cmd, swid, swid); - MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn); - - return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx); - -free_ctx: - kfree(ctx); -out: - return ret; -} - -int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx) -{ - struct mlx5_fpga_tls *tls = mdev->fpga->tls; - int ret = -ENOMEM; - u32 swid; - - if (direction_sx) - ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, - &tls->tx_idr_spinlock, flow); - else - ret = mlx5_fpga_tls_alloc_swid(&tls->rx_idr, - &tls->rx_idr_spinlock, flow); - - if (ret < 0) - return ret; - - swid = ret; - MLX5_SET(tls_flow, flow, direction_sx, direction_sx ? 1 : 0); - - ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, - start_offload_tcp_sn); - if (ret && ret != -EINTR) - goto free_swid; - - *p_swid = swid; - return 0; -free_swid: - if (direction_sx) - mlx5_fpga_tls_release_swid(&tls->tx_idr, - &tls->tx_idr_spinlock, swid); - else - mlx5_fpga_tls_release_swid(&tls->rx_idr, - &tls->rx_idr_spinlock, swid); - - return ret; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h deleted file mode 100644 index 5714cf391d1b..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_FPGA_TLS_H__ -#define __MLX5_FPGA_TLS_H__ - -#include - -#include -#include "fpga/core.h" - -struct mlx5_fpga_tls { - struct list_head pending_cmds; - spinlock_t pending_cmds_lock; /* Protects pending_cmds */ - u32 caps; - struct mlx5_fpga_conn *conn; - - struct idr tx_idr; - struct idr rx_idr; - spinlock_t tx_idr_spinlock; /* protects the IDR */ - spinlock_t rx_idr_spinlock; /* protects the IDR */ -}; - -int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn, u32 *p_swid, - bool direction_sx); - -void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, - gfp_t flags, bool direction_sx); - -bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); -int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); -void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev); - -static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) -{ - return mdev->fpga->tls->caps; -} - -int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle, - u32 seq, __be64 rcd_sn); - -#endif /* __MLX5_FPGA_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2589e39eb9c7..7f287e300fb4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -64,7 +64,6 @@ #include "fpga/core.h" #include "fpga/ipsec.h" #include "accel/ipsec.h" -#include "accel/tls.h" #include "lib/clock.h" #include "lib/vxlan.h" #include "lib/geneve.h" @@ -1185,12 +1184,6 @@ static int mlx5_load(struct mlx5_core_dev *dev) mlx5_accel_ipsec_init(dev); - err = mlx5_accel_tls_init(dev); - if (err) { - mlx5_core_err(dev, "TLS device start failed %d\n", err); - goto err_tls_start; - } - err = mlx5_init_fs(dev); if (err) { mlx5_core_err(dev, "Failed to init flow steering\n"); @@ -1238,8 +1231,6 @@ err_vhca: err_set_hca: mlx5_cleanup_fs(dev); err_fs: - mlx5_accel_tls_cleanup(dev); -err_tls_start: mlx5_accel_ipsec_cleanup(dev); mlx5_fpga_device_stop(dev); err_fpga_start: @@ -1267,7 +1258,6 @@ static void mlx5_unload(struct mlx5_core_dev *dev) mlx5_vhca_event_stop(dev); mlx5_cleanup_fs(dev); mlx5_accel_ipsec_cleanup(dev); - mlx5_accel_tls_cleanup(dev); mlx5_fpga_device_stop(dev); mlx5_rsc_dump_cleanup(dev); mlx5_hv_vhca_cleanup(dev->hv_vhca); diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index 07d77323f78a..e3d824f6a309 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -54,7 +54,6 @@ enum { enum { MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3, }; struct mlx5_ifc_fpga_shell_caps_bits { @@ -387,27 +386,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_tls_extended_cap_bits { - u8 aes_gcm_128[0x1]; - u8 aes_gcm_256[0x1]; - u8 reserved_at_2[0x1e]; - u8 reserved_at_20[0x20]; - u8 context_capacity_total[0x20]; - u8 context_capacity_rx[0x20]; - u8 context_capacity_tx[0x20]; - u8 reserved_at_a0[0x10]; - u8 tls_counter_size[0x10]; - u8 tls_counters_addr_low[0x20]; - u8 tls_counters_addr_high[0x20]; - u8 rx[0x1]; - u8 tx[0x1]; - u8 tls_v12[0x1]; - u8 tls_v13[0x1]; - u8 lro[0x1]; - u8 ipv6[0x1]; - u8 reserved_at_106[0x1a]; -}; - struct mlx5_ifc_ipsec_extended_cap_bits { u8 encapsulation[0x20]; @@ -572,45 +550,4 @@ struct mlx5_ifc_fpga_ipsec_sa { __be16 vid; /* only 12 bits, rest is reserved */ __be16 reserved2; } __packed; - -enum fpga_tls_cmds { - CMD_SETUP_STREAM = 0x1001, - CMD_TEARDOWN_STREAM = 0x1002, - CMD_RESYNC_RX = 0x1003, -}; - -#define MLX5_TLS_1_2 (0) - -#define MLX5_TLS_ALG_AES_GCM_128 (0) -#define MLX5_TLS_ALG_AES_GCM_256 (1) - -struct mlx5_ifc_tls_cmd_bits { - u8 command_type[0x20]; - u8 ipv6[0x1]; - u8 direction_sx[0x1]; - u8 tls_version[0x2]; - u8 reserved[0x1c]; - u8 swid[0x20]; - u8 src_port[0x10]; - u8 dst_port[0x10]; - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; - u8 tls_rcd_sn[0x40]; - u8 tcp_sn[0x20]; - u8 tls_implicit_iv[0x20]; - u8 tls_xor_iv[0x40]; - u8 encryption_key[0x100]; - u8 alg[4]; - u8 reserved2[0x1c]; - u8 reserved3[0x4a0]; -}; - -struct mlx5_ifc_tls_resp_bits { - u8 syndrome[0x20]; - u8 stream_id[0x20]; - u8 reserved[0x40]; -}; - -#define MLX5_TLS_COMMAND_SIZE (0x100) - #endif /* MLX5_IFC_FPGA_H */ -- cgit v1.2.3 From e59437aa7ae6757111e633d02acb9868c1a7ec03 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 4 Apr 2022 15:08:16 +0300 Subject: net/mlx5: Reliably return TLS device capabilities The capabilities returned from the FW are independent to the compiled kernel and traditionally rely on the relevant CAPs bit only. The mlx5_accel_is_ktls_*() functions are compiled out if CONFIG_MLX5_TLS is not set, which "hides" from the user the information that TLS can be enabled on this device. Link: https://lore.kernel.org/r/a333ce541fb9497d04126b11c4a0052f9807d141.1649073691.git.leonro@nvidia.com Reviewed-by: Tariq Toukan Reviewed-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 614687e0e3d9..cfb8bedba512 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -35,7 +35,6 @@ #include "mlx5_core.h" #include "../../mlxfw/mlxfw.h" #include "lib/tout.h" -#include "accel/tls.h" enum { MCQS_IDENTIFIER_BOOT_IMG = 0x1, @@ -249,7 +248,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } - if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) { + if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) { err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); if (err) return err; -- cgit v1.2.3 From 691f17b980d028e74ae168ef9e5be7d6021dbf23 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 4 Apr 2022 15:08:17 +0300 Subject: net/mlx5: Remove indirection in TLS build The dream described in the commit 1ae173228489 ("net/mlx5: Accel, Add TLS tx offload interface") never came true, even an opposite happened when FPGA TLS support was dropped. Such removal revealed the problematic flow in the build process: build of unrelated files in case of TLS or IPsec are enabled. In both cases, the MLX5_ACCEL is enabled, which built both TLS and IPsec. As a solution, simply merge MLX5_TLS and MLX5_EN_TLS options and move TLS related files to the eth part of the mlx5_core. Link: https://lore.kernel.org/r/0d1ea8cdc3a15922640b8b764d2bdb8f587b52c2.1649073691.git.leonro@nvidia.com Reviewed-by: Tariq Toukan Reviewed-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 11 +-- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../net/ethernet/mellanox/mlx5/core/accel/tls.c | 78 ---------------- .../net/ethernet/mellanox/mlx5/core/accel/tls.h | 100 --------------------- .../ethernet/mellanox/mlx5/core/en_accel/ktls.c | 39 ++++++++ .../ethernet/mellanox/mlx5/core/en_accel/ktls.h | 47 +++++++++- .../mellanox/mlx5/core/en_accel/ktls_utils.h | 1 - .../net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 1 - .../net/ethernet/mellanox/mlx5/core/en_accel/tls.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 - 10 files changed, 84 insertions(+), 197 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 21df10bb14c3..0c82b376416b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -177,23 +177,14 @@ config MLX5_EN_IPSEC Note: Support for hardware with this capability needs to be selected for this option to become available. -config MLX5_TLS +config MLX5_EN_TLS bool "Mellanox Technologies TLS Connect-X support" depends on TLS_DEVICE depends on TLS=y || MLX5_CORE=m depends on MLX5_CORE_EN select MLX5_ACCEL - select MLX5_EN_TLS - help - Build TLS support for the Connect-X family of network cards by Mellanox - Technologies. - -config MLX5_EN_TLS - bool help Build support for TLS cryptography-offload acceleration in the NIC. - Note: Support for hardware with this capability needs to be selected - for this option to become available. config MLX5_SW_STEERING bool "Mellanox Technologies software-managed steering" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 33525f7d0aa0..b7e3bcb5e6c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -90,7 +90,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o -mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o +mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c deleted file mode 100644 index fe82c4140d85..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include - -#include "accel/tls.h" -#include "mlx5_core.h" -#include "lib/mlx5.h" - -#ifdef CONFIG_MLX5_TLS -int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info, - u32 *p_key_id) -{ - u32 sz_bytes; - void *key; - - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: { - struct tls12_crypto_info_aes_gcm_128 *info = - (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; - - key = info->key; - sz_bytes = sizeof(info->key); - break; - } - case TLS_CIPHER_AES_GCM_256: { - struct tls12_crypto_info_aes_gcm_256 *info = - (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; - - key = info->key; - sz_bytes = sizeof(info->key); - break; - } - default: - return -EINVAL; - } - - return mlx5_create_encryption_key(mdev, key, sz_bytes, - MLX5_ACCEL_OBJ_TLS_KEY, - p_key_id); -} - -void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) -{ - mlx5_destroy_encryption_key(mdev, key_id); -} -#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h deleted file mode 100644 index 6f92ebe3cc82..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_ACCEL_TLS_H__ -#define __MLX5_ACCEL_TLS_H__ - -#include -#include - -#ifdef CONFIG_MLX5_TLS -int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info, - u32 *p_key_id); -void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); - -static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev) -{ - return MLX5_CAP_GEN(mdev, tls_tx); -} - -static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev) -{ - return MLX5_CAP_GEN(mdev, tls_rx); -} - -static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) -{ - if (!mlx5_accel_is_ktls_tx(mdev) && - !mlx5_accel_is_ktls_rx(mdev)) - return false; - - if (!MLX5_CAP_GEN(mdev, log_max_dek)) - return false; - - return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); -} - -static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info) -{ - switch (crypto_info->cipher_type) { - case TLS_CIPHER_AES_GCM_128: - if (crypto_info->version == TLS_1_2_VERSION) - return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); - break; - } - - return false; -} -#else -static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev) -{ return false; } - -static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev) -{ return false; } - -static inline int -mlx5_ktls_create_key(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info, - u32 *p_key_id) { return -ENOTSUPP; } -static inline void -mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {} - -static inline bool -mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } -static inline bool -mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info) { return false; } -#endif -#endif /* __MLX5_ACCEL_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index d93aadbf10da..160aa44bcece 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -2,11 +2,50 @@ // Copyright (c) 2019 Mellanox Technologies. #include "en.h" +#include "lib/mlx5.h" #include "en_accel/tls.h" #include "en_accel/ktls.h" #include "en_accel/ktls_utils.h" #include "en_accel/fs_tcp.h" +int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, + struct tls_crypto_info *crypto_info, + u32 *p_key_id) +{ + u32 sz_bytes; + void *key; + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: { + struct tls12_crypto_info_aes_gcm_128 *info = + (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + + key = info->key; + sz_bytes = sizeof(info->key); + break; + } + case TLS_CIPHER_AES_GCM_256: { + struct tls12_crypto_info_aes_gcm_256 *info = + (struct tls12_crypto_info_aes_gcm_256 *)crypto_info; + + key = info->key; + sz_bytes = sizeof(info->key); + break; + } + default: + return -EINVAL; + } + + return mlx5_create_encryption_key(mdev, key, sz_bytes, + MLX5_ACCEL_OBJ_TLS_KEY, + p_key_id); +} + +void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) +{ + mlx5_destroy_encryption_key(mdev, key_id); +} + static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, enum tls_offload_ctx_dir direction, struct tls_crypto_info *crypto_info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 5833deb2354c..82259d25a516 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -4,9 +4,38 @@ #ifndef __MLX5E_KTLS_H__ #define __MLX5E_KTLS_H__ +#include #include "en.h" #ifdef CONFIG_MLX5_EN_TLS +int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, + struct tls_crypto_info *crypto_info, + u32 *p_key_id); +void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); + +static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) +{ + if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return false; + + return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); +} + +static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, + struct tls_crypto_info *crypto_info) +{ + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: + if (crypto_info->version == TLS_1_2_VERSION) + return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128); + break; + } + + return false; +} void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); int mlx5e_ktls_init_rx(struct mlx5e_priv *priv); @@ -18,14 +47,12 @@ void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_ static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { - return !is_kdump_kernel() && - mlx5_accel_is_ktls_tx(mdev); + return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx); } static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { - return !is_kdump_kernel() && - mlx5_accel_is_ktls_rx(mdev); + return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx); } static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) @@ -35,6 +62,18 @@ static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) } #else +static inline int +mlx5_ktls_create_key(struct mlx5_core_dev *mdev, + struct tls_crypto_info *crypto_info, + u32 *p_key_id) { return -EOPNOTSUPP; } +static inline void +mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {} + +static inline bool +mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } +static inline bool +mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, + struct tls_crypto_info *crypto_info) { return false; } static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h index e5c180f2403b..0dc715c4c10d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h @@ -6,7 +6,6 @@ #include #include "en.h" -#include "accel/tls.h" enum { MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index 0c6e165c154e..0609828bb973 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -34,7 +34,6 @@ #include #include #include "en_accel/tls.h" -#include "accel/tls.h" void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index fc13b2ebe6b8..a9776fd04b20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -33,7 +33,6 @@ #ifndef __MLX5E_TLS_H__ #define __MLX5E_TLS_H__ -#include "accel/tls.h" #include "en_accel/ktls.h" #ifdef CONFIG_MLX5_EN_TLS diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7423c6830c4f..7ef1ee7e7572 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -49,7 +49,6 @@ #include "en_accel/en_accel.h" #include "en_accel/tls.h" #include "accel/ipsec.h" -#include "accel/tls.h" #include "lib/vxlan.h" #include "lib/clock.h" #include "en/port.h" -- cgit v1.2.3 From 943aa7bda37301eefc363e4940c4805d8e166478 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 4 Apr 2022 15:08:18 +0300 Subject: net/mlx5: Remove tls vs. ktls separation as it is the same After removal FPGA TLS, we can remove tls->ktls indirection too, as it is the same thing. Link: https://lore.kernel.org/r/67e596599edcffb0de43f26551208dfd34ac777e.1649073691.git.leonro@nvidia.com Reviewed-by: Tariq Toukan Reviewed-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../net/ethernet/mellanox/mlx5/core/en/params.c | 2 +- .../mellanox/mlx5/core/en_accel/en_accel.h | 11 +-- .../ethernet/mellanox/mlx5/core/en_accel/ktls.c | 22 ++++- .../ethernet/mellanox/mlx5/core/en_accel/ktls.h | 32 ++++++++ .../ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c | 2 +- .../mellanox/mlx5/core/en_accel/ktls_stats.c | 90 ++++++++++++++++++++ .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 18 +++- .../mellanox/mlx5/core/en_accel/ktls_txrx.h | 28 ++++++- .../net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 70 ---------------- .../net/ethernet/mellanox/mlx5/core/en_accel/tls.h | 85 ------------------- .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 70 ---------------- .../mellanox/mlx5/core/en_accel/tls_rxtx.h | 85 ------------------- .../mellanox/mlx5/core/en_accel/tls_stats.c | 96 ---------------------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 5 +- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 8 +- 17 files changed, 204 insertions(+), 430 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index b7e3bcb5e6c7..44ff1623707a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -97,7 +97,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o en_accel/ipsec_fs.o -mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ +mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ en_accel/ktls_tx.o en_accel/ktls_rx.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 08fd1370a8b0..458a75607ca8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -207,7 +207,7 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); u16 stop_room; - stop_room = mlx5e_tls_get_stop_room(mdev, params); + stop_room = mlx5e_ktls_get_stop_room(mdev, params); stop_room += mlx5e_stop_room_for_max_wqe(mdev); if (is_mpwqe) /* A MPWQE can take up to the maximum-sized WQE + all the normal diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 62cde3e87c2e..04c0a5e1c89a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -37,8 +37,8 @@ #include #include #include "en_accel/ipsec_rxtx.h" -#include "en_accel/tls.h" -#include "en_accel/tls_rxtx.h" +#include "en_accel/ktls.h" +#include "en_accel/ktls_txrx.h" #include "en.h" #include "en/txrx.h" @@ -124,8 +124,9 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, #ifdef CONFIG_MLX5_EN_TLS /* May send SKBs and WQEs. */ - if (mlx5e_tls_skb_offloaded(skb)) - if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) + if (mlx5e_ktls_skb_offloaded(skb)) + if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb, + &state->tls))) return false; #endif @@ -174,7 +175,7 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, struct mlx5_wqe_inline_seg *inlseg) { #ifdef CONFIG_MLX5_EN_TLS - mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls); + mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls); #endif #ifdef CONFIG_MLX5_EN_IPSEC diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 160aa44bcece..7f95b833f2ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -3,7 +3,6 @@ #include "en.h" #include "lib/mlx5.h" -#include "en_accel/tls.h" #include "en_accel/ktls.h" #include "en_accel/ktls_utils.h" #include "en_accel/fs_tcp.h" @@ -159,3 +158,24 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) destroy_workqueue(priv->tls->rx_wq); } + +int mlx5e_ktls_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tls *tls; + + if (!mlx5e_accel_is_ktls_device(priv->mdev)) + return 0; + + tls = kzalloc(sizeof(*tls), GFP_KERNEL); + if (!tls) + return -ENOMEM; + + priv->tls = tls; + return 0; +} + +void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) +{ + kfree(priv->tls); + priv->tls = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 82259d25a516..50e720966358 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -5,6 +5,7 @@ #define __MLX5E_KTLS_H__ #include +#include #include "en.h" #ifdef CONFIG_MLX5_EN_TLS @@ -61,6 +62,25 @@ static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) mlx5_accel_is_ktls_device(mdev); } +struct mlx5e_tls_sw_stats { + atomic64_t tx_tls_ctx; + atomic64_t tx_tls_del; + atomic64_t rx_tls_ctx; + atomic64_t rx_tls_del; +}; + +struct mlx5e_tls { + struct mlx5e_tls_sw_stats sw_stats; + struct workqueue_struct *rx_wq; +}; + +int mlx5e_ktls_init(struct mlx5e_priv *priv); +void mlx5e_ktls_cleanup(struct mlx5e_priv *priv); + +int mlx5e_ktls_get_count(struct mlx5e_priv *priv); +int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data); +int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data); + #else static inline int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, @@ -107,6 +127,18 @@ static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return f static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; } static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } +static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { } +static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; } +static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data) +{ + return 0; +} + +static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data) +{ + return 0; +} #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 96064a2033f7..0bb0633b7542 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -3,7 +3,7 @@ #include #include "en_accel/en_accel.h" -#include "en_accel/tls.h" +#include "en_accel/ktls.h" #include "en_accel/ktls_txrx.h" #include "en_accel/ktls_utils.h" #include "en_accel/fs_tcp.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c new file mode 100644 index 000000000000..2ab46c4247ff --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "en.h" +#include "fpga/sdk.h" +#include "en_accel/ktls.h" + +static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) }, +}; + +#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ + atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) + +int mlx5e_ktls_get_count(struct mlx5e_priv *priv) +{ + if (!priv->tls) + return 0; + + return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); +} + +int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data) +{ + unsigned int i, n, idx = 0; + + if (!priv->tls) + return 0; + + n = mlx5e_ktls_get_count(priv); + + for (i = 0; i < n; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + mlx5e_ktls_sw_stats_desc[i].format); + + return n; +} + +int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data) +{ + unsigned int i, n, idx = 0; + + if (!priv->tls) + return 0; + + n = mlx5e_ktls_get_count(priv); + + for (i = 0; i < n; i++) + data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, + mlx5e_ktls_sw_stats_desc, + i); + + return n; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index aaf11c66bf4c..cadf322f9d9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2019 Mellanox Technologies. -#include "en_accel/tls.h" +#include "en_accel/ktls.h" #include "en_accel/ktls_txrx.h" #include "en_accel/ktls_utils.h" @@ -448,14 +448,26 @@ err_out: return MLX5E_KTLS_SYNC_FAIL; } -bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, - struct sk_buff *skb, int datalen, +bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, + struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state) { struct mlx5e_ktls_offload_context_tx *priv_tx; struct mlx5e_sq_stats *stats = sq->stats; + struct tls_context *tls_ctx; + int datalen; u32 seq; + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (!datalen) + return true; + + mlx5e_tx_mpwqe_ensure_complete(sq); + + tls_ctx = tls_get_ctx(skb->sk); + if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) + goto err_out; + priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index 08c9d5134479..2dd78dd4ad65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -16,8 +16,8 @@ struct mlx5e_accel_tx_tls_state { u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); -bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, - struct sk_buff *skb, int datalen, +bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, + struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt); @@ -48,6 +48,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) { return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); } + +static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb) +{ + return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); +} + +static inline void +mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, + struct mlx5e_accel_tx_tls_state *state) +{ + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); +} #else static inline bool mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, @@ -69,6 +81,18 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) return false; } +static inline u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + return 0; +} + +static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, + struct sk_buff *skb, + struct mlx5_cqe64 *cqe, + u32 *cqe_bcnt) +{ +} #endif /* CONFIG_MLX5_EN_TLS */ #endif /* __MLX5E_TLS_TXRX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c deleted file mode 100644 index 0609828bb973..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include -#include -#include "en_accel/tls.h" - -void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) -{ - if (!mlx5e_accel_is_ktls_device(priv->mdev)) - return; - - mlx5e_ktls_build_netdev(priv); -} - -int mlx5e_tls_init(struct mlx5e_priv *priv) -{ - struct mlx5e_tls *tls; - - if (!mlx5e_accel_is_tls_device(priv->mdev)) - return 0; - - tls = kzalloc(sizeof(*tls), GFP_KERNEL); - if (!tls) - return -ENOMEM; - - priv->tls = tls; - return 0; -} - -void mlx5e_tls_cleanup(struct mlx5e_priv *priv) -{ - struct mlx5e_tls *tls = priv->tls; - - if (!tls) - return; - - kfree(tls); - priv->tls = NULL; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h deleted file mode 100644 index a9776fd04b20..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ -#ifndef __MLX5E_TLS_H__ -#define __MLX5E_TLS_H__ - -#include "en_accel/ktls.h" - -#ifdef CONFIG_MLX5_EN_TLS -#include -#include "en.h" - -struct mlx5e_tls_sw_stats { - atomic64_t tx_tls_ctx; - atomic64_t tx_tls_del; - atomic64_t rx_tls_ctx; - atomic64_t rx_tls_del; -}; - -struct mlx5e_tls { - struct mlx5e_tls_sw_stats sw_stats; - struct workqueue_struct *rx_wq; -}; - -void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); -int mlx5e_tls_init(struct mlx5e_priv *priv); -void mlx5e_tls_cleanup(struct mlx5e_priv *priv); - -int mlx5e_tls_get_count(struct mlx5e_priv *priv); -int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data); -int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data); - -static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) -{ - return !is_kdump_kernel() && mlx5_accel_is_ktls_device(mdev); -} - -#else - -static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) -{ - if (!is_kdump_kernel() && - mlx5_accel_is_ktls_device(priv->mdev)) - mlx5e_ktls_build_netdev(priv); -} - -static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; } -static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { } -static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; } -static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; } -static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; } -static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } - -#endif - -#endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c deleted file mode 100644 index 39412fafa860..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include "en_accel/tls.h" -#include "en_accel/tls_rxtx.h" -#include "accel/accel.h" - -#include -#include - -bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, - struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state) -{ - struct tls_context *tls_ctx; - int datalen; - - datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); - if (!datalen) - return true; - - mlx5e_tx_mpwqe_ensure_complete(sq); - - tls_ctx = tls_get_ctx(skb->sk); - if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) - goto err_out; - - return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); - -err_out: - dev_kfree_skb_any(skb); - return false; -} - -u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) -{ - if (!mlx5e_accel_is_tls_device(mdev)) - return 0; - - return mlx5e_ktls_get_stop_room(mdev, params); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h deleted file mode 100644 index 168acceb0d28..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5E_TLS_RXTX_H__ -#define __MLX5E_TLS_RXTX_H__ - -#include "accel/accel.h" -#include "en_accel/ktls_txrx.h" - -#ifdef CONFIG_MLX5_EN_TLS - -#include -#include "en.h" -#include "en/txrx.h" - -u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); - -bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, - struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); - -static inline bool mlx5e_tls_skb_offloaded(struct sk_buff *skb) -{ - return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); -} - -static inline void -mlx5e_tls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, - struct mlx5e_accel_tx_tls_state *state) -{ - cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); -} - -static inline void -mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) -{ - if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ - mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); -} - -#else - -static inline bool -mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; } -static inline void -mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} -static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) -{ - return 0; -} - -#endif /* CONFIG_MLX5_EN_TLS */ - -#endif /* __MLX5E_TLS_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c deleted file mode 100644 index c25e8742bbbf..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include -#include - -#include "en.h" -#include "fpga/sdk.h" -#include "en_accel/tls.h" - -static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) }, - { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) }, -}; - -#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ - atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) - -static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv) -{ - if (!priv->tls) - return NULL; - - return mlx5e_ktls_sw_stats_desc; -} - -int mlx5e_tls_get_count(struct mlx5e_priv *priv) -{ - if (!priv->tls) - return 0; - - return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); -} - -int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) -{ - const struct counter_desc *stats_desc; - unsigned int i, n, idx = 0; - - stats_desc = get_tls_atomic_stats(priv); - n = mlx5e_tls_get_count(priv); - - for (i = 0; i < n; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - stats_desc[i].format); - - return n; -} - -int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) -{ - const struct counter_desc *stats_desc; - unsigned int i, n, idx = 0; - - stats_desc = get_tls_atomic_stats(priv); - n = mlx5e_tls_get_count(priv); - - for (i = 0; i < n; i++) - data[idx++] = - MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, - stats_desc, i); - - return n; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7ef1ee7e7572..89a85030b0eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -47,7 +47,7 @@ #include "en_rep.h" #include "en_accel/ipsec.h" #include "en_accel/en_accel.h" -#include "en_accel/tls.h" +#include "en_accel/ktls.h" #include "accel/ipsec.h" #include "lib/vxlan.h" #include "lib/clock.h" @@ -4930,7 +4930,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); mlx5e_ipsec_build_netdev(priv); - mlx5e_tls_build_netdev(priv); + mlx5e_ktls_build_netdev(priv); } void mlx5e_create_q_counters(struct mlx5e_priv *priv) @@ -4992,7 +4992,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); - err = mlx5e_tls_init(priv); + err = mlx5e_ktls_init(priv); if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); @@ -5003,7 +5003,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_health_destroy_reporters(priv); - mlx5e_tls_cleanup(priv); + mlx5e_ktls_cleanup(priv); mlx5e_ipsec_cleanup(priv); mlx5e_fs_cleanup(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 56bb58704bf9..84cebf4c5ada 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -51,7 +51,7 @@ #include "accel/ipsec.h" #include "fpga/ipsec.h" #include "en_accel/ipsec_rxtx.h" -#include "en_accel/tls_rxtx.h" +#include "en_accel/ktls_txrx.h" #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/health.h" @@ -1416,7 +1416,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mac_len = ETH_HLEN; - mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); + if (unlikely(get_cqe_tls_offload(cqe))) + mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index bdc870f9c2f3..5123a220d7a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -32,7 +32,7 @@ #include "lib/mlx5.h" #include "en.h" -#include "en_accel/tls.h" +#include "en_accel/ktls.h" #include "en_accel/en_accel.h" #include "en/ptp.h" #include "en/port.h" @@ -1900,17 +1900,17 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) { - return mlx5e_tls_get_count(priv); + return mlx5e_ktls_get_count(priv); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) { - return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); + return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) { - return idx + mlx5e_tls_get_stats(priv, data + idx); + return idx + mlx5e_ktls_get_stats(priv, data + idx); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } -- cgit v1.2.3 From 7a9104ea9011c0e15176c2951a97bda43c4beabf Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 4 Apr 2022 15:08:19 +0300 Subject: net/mlx5: Cleanup kTLS function names and their exposure The _accel_ part of the function is not relevant anymore, so rename kTLS functions to be without it, together with header cleanup to do not have declarations that are not used. Link: https://lore.kernel.org/r/72319e6020fb2553d02b3bbc7476bda363f6d60c.1649073691.git.leonro@nvidia.com Reviewed-by: Tariq Toukan Reviewed-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- .../net/ethernet/mellanox/mlx5/core/en/params.c | 4 +-- .../ethernet/mellanox/mlx5/core/en_accel/ktls.c | 12 ++++---- .../ethernet/mellanox/mlx5/core/en_accel/ktls.h | 35 +++++++--------------- .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 2 +- 4 files changed, 19 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 458a75607ca8..7f76c4f9389b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -804,7 +804,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev) { - if (mlx5e_accel_is_ktls_rx(mdev)) + if (mlx5e_is_ktls_rx(mdev)) return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; @@ -833,7 +833,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev, mlx5e_build_sq_param_common(mdev, param); param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */ - param->is_tls = mlx5e_accel_is_ktls_rx(mdev); + param->is_tls = mlx5e_is_ktls_rx(mdev); if (param->is_tls) param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 7f95b833f2ea..814f2a56f633 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -97,15 +97,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev)) + if (!mlx5e_is_ktls_tx(mdev) && !mlx5e_is_ktls_rx(mdev)) return; - if (mlx5e_accel_is_ktls_tx(mdev)) { + if (mlx5e_is_ktls_tx(mdev)) { netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->features |= NETIF_F_HW_TLS_TX; } - if (mlx5e_accel_is_ktls_rx(mdev)) + if (mlx5e_is_ktls_rx(mdev)) netdev->hw_features |= NETIF_F_HW_TLS_RX; netdev->tlsdev_ops = &mlx5e_ktls_ops; @@ -130,7 +130,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv) { int err; - if (!mlx5e_accel_is_ktls_rx(priv->mdev)) + if (!mlx5e_is_ktls_rx(priv->mdev)) return 0; priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); @@ -150,7 +150,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv) void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) { - if (!mlx5e_accel_is_ktls_rx(priv->mdev)) + if (!mlx5e_is_ktls_rx(priv->mdev)) return; if (priv->netdev->features & NETIF_F_HW_TLS_RX) @@ -163,7 +163,7 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv) { struct mlx5e_tls *tls; - if (!mlx5e_accel_is_ktls_device(priv->mdev)) + if (!mlx5e_is_ktls_device(priv->mdev)) return 0; tls = kzalloc(sizeof(*tls), GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 50e720966358..d016624fbc9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -14,8 +14,11 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, u32 *p_key_id); void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); -static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) +static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev) { + if (is_kdump_kernel()) + return false; + if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx)) return false; @@ -46,22 +49,16 @@ struct mlx5e_ktls_resync_resp * mlx5e_ktls_rx_resync_create_resp_list(void); void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list); -static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) +static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev) { return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx); } -static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) +static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) { return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx); } -static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) -{ - return !is_kdump_kernel() && - mlx5_accel_is_ktls_device(mdev); -} - struct mlx5e_tls_sw_stats { atomic64_t tx_tls_ctx; atomic64_t tx_tls_del; @@ -82,19 +79,6 @@ int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data); int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data); #else -static inline int -mlx5_ktls_create_key(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info, - u32 *p_key_id) { return -EOPNOTSUPP; } -static inline void -mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {} - -static inline bool -mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } -static inline bool -mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, - struct tls_crypto_info *crypto_info) { return false; } - static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) { } @@ -123,9 +107,10 @@ mlx5e_ktls_rx_resync_create_resp_list(void) static inline void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {} -static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; } -static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; } -static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; } +static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) +{ + return false; +} static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index cadf322f9d9b..4b6f0d1ea59a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -27,7 +27,7 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa { u16 num_dumps, stop_room = 0; - if (!mlx5e_accel_is_ktls_tx(mdev)) + if (!mlx5e_is_ktls_tx(mdev)) return 0; num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); -- cgit v1.2.3 From 34e63cd5ba29fb832f3fe31e37cea28027979c1d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 16 Feb 2022 13:50:15 -0600 Subject: iwlwifi: fw: Replace zero-length arrays with flexible-array members MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a regular need in the kernel to provide a way to declare having a dynamically sized set of trailing elements in a structure. Kernel code should always use “flexible array members”[1] for these cases. The older style of one-element or zero-length arrays should no longer be used[2]. [1] https://en.wikipedia.org/wiki/Flexible_array_member [2] https://www.kernel.org/doc/html/v5.16/process/deprecated.html#zero-length-and-one-element-arrays Link: https://github.com/KSPP/linux/issues/78 Signed-off-by: Gustavo A. R. Silva Reviewed-by: Kees Cook Acked-by: Luca Coelho Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220216195015.GA904148@embeddedor --- drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h | 4 ++-- drivers/net/wireless/intel/iwlwifi/fw/api/debug.h | 4 ++-- drivers/net/wireless/intel/iwlwifi/fw/api/filter.h | 2 +- drivers/net/wireless/intel/iwlwifi/fw/api/scan.h | 4 ++-- drivers/net/wireless/intel/iwlwifi/fw/api/sta.h | 2 +- drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h | 2 +- drivers/net/wireless/intel/iwlwifi/fw/error-dump.h | 2 +- drivers/net/wireless/intel/iwlwifi/fw/file.h | 10 +++++----- 8 files changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 52bf96585fc6..ba538d70985f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -26,7 +26,7 @@ struct iwl_fw_ini_hcmd { u8 id; u8 group; __le16 reserved; - u8 data[0]; + u8 data[]; } __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */ /** @@ -275,7 +275,7 @@ struct iwl_fw_ini_conf_set_tlv { __le32 time_point; __le32 set_type; __le32 addr_offset; - struct iwl_fw_ini_addr_val addr_val[0]; + struct iwl_fw_ini_addr_val addr_val[]; } __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 6255257ddebe..0c555089e05f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -240,7 +240,7 @@ struct iwl_mfu_assert_dump_notif { __le16 index_num; __le16 parts_num; __le32 data_size; - __le32 data[0]; + __le32 data[]; } __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */ /** @@ -276,7 +276,7 @@ struct iwl_mvm_marker { u8 marker_id; __le16 reserved; __le64 timestamp; - __le32 metadata[0]; + __le32 metadata[]; } __packed; /* MARKER_API_S_VER_1 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h index e44c70b7c790..88fe61d144d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h @@ -33,7 +33,7 @@ struct iwl_mcast_filter_cmd { u8 pass_all; u8 bssid[6]; u8 reserved[2]; - u8 addr_list[0]; + u8 addr_list[]; } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_filter_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 5413087ae909..5543d9cb74c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -1165,7 +1165,7 @@ struct iwl_scan_offload_profiles_query_v1 { u8 resume_while_scanning; u8 self_recovery; __le16 reserved; - struct iwl_scan_offload_profile_match_v1 matches[0]; + struct iwl_scan_offload_profile_match_v1 matches[]; } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ /** @@ -1209,7 +1209,7 @@ struct iwl_scan_offload_profiles_query { u8 resume_while_scanning; u8 self_recovery; __le16 reserved; - struct iwl_scan_offload_profile_match matches[0]; + struct iwl_scan_offload_profile_match matches[]; } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index 5edbe27c0922..d62fed543276 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -477,7 +477,7 @@ struct iwl_mvm_wep_key_cmd { u8 decryption_type; u8 flags; u8 reserved; - struct iwl_mvm_wep_key wep_key[0]; + struct iwl_mvm_wep_key wep_key[]; } __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h index 14d35000abed..893438aadab0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -132,7 +132,7 @@ struct iwl_tdls_config_cmd { __le32 pti_req_data_offset; struct iwl_tx_cmd pti_req_tx_cmd; - u8 pti_req_template[0]; + u8 pti_req_template[]; } __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 079fa0023bd8..c62576e442bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -84,7 +84,7 @@ struct iwl_fw_error_dump_data { struct iwl_fw_error_dump_file { __le32 barker; __le32 file_len; - u8 data[0]; + u8 data[]; } __packed; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 5679a78758be..a7817d952022 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -145,7 +145,7 @@ struct iwl_tlv_ucode_header { * Note that each TLV is padded to a length * that is a multiple of 4 for alignment. */ - u8 data[0]; + u8 data[]; }; /* @@ -603,7 +603,7 @@ struct iwl_fw_dbg_dest_tlv_v1 { __le32 wrap_count; u8 base_shift; u8 end_shift; - struct iwl_fw_dbg_reg_op reg_ops[0]; + struct iwl_fw_dbg_reg_op reg_ops[]; } __packed; /* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */ @@ -623,14 +623,14 @@ struct iwl_fw_dbg_dest_tlv { __le32 wrap_count; u8 base_shift; u8 size_shift; - struct iwl_fw_dbg_reg_op reg_ops[0]; + struct iwl_fw_dbg_reg_op reg_ops[]; } __packed; struct iwl_fw_dbg_conf_hcmd { u8 id; u8 reserved; __le16 len; - u8 data[0]; + u8 data[]; } __packed; /** @@ -705,7 +705,7 @@ struct iwl_fw_dbg_trigger_tlv { u8 flags; u8 reserved[5]; - u8 data[0]; + u8 data[]; } __packed; #define FW_DBG_START_FROM_ALIVE 0 -- cgit v1.2.3 From c5f675748cf0e1db5ba5514e1f34360dfb95a6ba Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 16 Feb 2022 13:50:30 -0600 Subject: iwlwifi: mei: Replace zero-length array with flexible-array member MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a regular need in the kernel to provide a way to declare having a dynamically sized set of trailing elements in a structure. Kernel code should always use “flexible array members”[1] for these cases. The older style of one-element or zero-length arrays should no longer be used[2]. [1] https://en.wikipedia.org/wiki/Flexible_array_member [2] https://www.kernel.org/doc/html/v5.16/process/deprecated.html#zero-length-and-one-element-arrays Link: https://github.com/KSPP/linux/issues/78 Signed-off-by: Gustavo A. R. Silva Reviewed-by: Kees Cook Acked-by: Luca Coelho Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220216195030.GA904170@embeddedor --- drivers/net/wireless/intel/iwlwifi/mei/sap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h index 11e3009121cc..be1456dea484 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/sap.h +++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h @@ -298,7 +298,7 @@ struct iwl_sap_hdr { __le16 type; __le16 len; __le32 seq_num; - u8 payload[0]; + u8 payload[]; }; /** -- cgit v1.2.3 From 29ed2d7606bb674c9ee38f59ca5b155aaea72df3 Mon Sep 17 00:00:00 2001 From: Po-Hao Huang Date: Fri, 18 Mar 2022 11:43:16 +0800 Subject: rtw88: change idle mode condition during hw_scan Previously we only consider single interface's status, idle mode behavior could be unexpected when multiple interfaces is active. Change to enter/leave idle mode by mac80211's configuration state. Signed-off-by: Po-Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318034316.40720-1-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/main.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 8b9899e41b0b..b57f9262f590 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -280,7 +280,8 @@ static void rtw_ips_work(struct work_struct *work) struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work); mutex_lock(&rtwdev->mutex); - rtw_enter_ips(rtwdev); + if (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE) + rtw_enter_ips(rtwdev); mutex_unlock(&rtwdev->mutex); } @@ -1353,7 +1354,7 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, rtw_leave_lps(rtwdev); - if (hw_scan && rtwvif->net_type == RTW_NET_NO_LINK) { + if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) { ret = rtw_leave_ips(rtwdev); if (ret) { rtw_err(rtwdev, "failed to leave idle state\n"); @@ -1389,7 +1390,7 @@ void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH); - if (rtwvif->net_type == RTW_NET_NO_LINK && hw_scan) + if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work); } -- cgit v1.2.3 From b169f877f001a474fb89939842c390518160bcc5 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 14 Mar 2022 15:12:43 +0800 Subject: rtw89: ser: fix CAM leaks occurring in L2 reset The CAM, meaning address CAM and bssid CAM here, will get leaks during SER (system error recover) L2 reset process and ieee80211_restart_hw() which is called by L2 reset process eventually. The normal flow would be like -> add interface (acquire 1) -> enter ips (release 1) -> leave ips (acquire 1) -> connection (occupy 1) <(A) 1 leak after L2 reset if non-sec connection> The ieee80211_restart_hw() flow (under connection) -> ieee80211 reconfig -> add interface (acquire 1) -> leave ips (acquire 1) -> connection (occupy (A) + 2) <(B) 1 more leak> Originally, CAM is released before HW restart only if connection is under security. Now, release CAM whatever connection it is to fix leak in (A). OTOH, check if CAM is already valid to avoid acquiring multiple times to fix (B). Besides, if AP mode, release address CAM of all stations before HW restart. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314071250.40292-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/cam.c | 14 ++++++++++++-- drivers/net/wireless/realtek/rtw89/ser.c | 21 +++++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 305dbbebff6b..26bef9fdd205 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -421,10 +421,8 @@ static void rtw89_cam_reset_key_iter(struct ieee80211_hw *hw, void *data) { struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; - struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; rtw89_cam_sec_key_del(rtwdev, vif, sta, key, false); - rtw89_cam_deinit(rtwdev, rtwvif); } void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev, @@ -480,6 +478,12 @@ int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev, int i; int ret; + if (unlikely(addr_cam->valid)) { + rtw89_debug(rtwdev, RTW89_DBG_FW, + "addr cam is already valid; skip init\n"); + return 0; + } + ret = rtw89_cam_get_avail_addr_cam(rtwdev, &addr_cam_idx); if (ret) { rtw89_err(rtwdev, "failed to get available addr cam\n"); @@ -531,6 +535,12 @@ static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev, u8 bssid_cam_idx; int ret; + if (unlikely(bssid_cam->valid)) { + rtw89_debug(rtwdev, RTW89_DBG_FW, + "bssid cam is already valid; skip init\n"); + return 0; + } + ret = rtw89_cam_get_avail_bssid_cam(rtwdev, &bssid_cam_idx); if (ret) { rtw89_err(rtwdev, "failed to get available bssid cam\n"); diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 837cdc366a61..e86f3d89ef1b 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -220,11 +220,32 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtwvif->trigger = false; } +static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta) +{ + struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; + struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + + rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam); +} + +static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) +{ + if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) + ieee80211_iterate_stations_atomic(rtwdev->hw, + ser_sta_deinit_addr_cam_iter, + rtwdev); + + rtw89_cam_deinit(rtwdev, rtwvif); +} + static void ser_reset_mac_binding(struct rtw89_dev *rtwdev) { struct rtw89_vif *rtwvif; rtw89_cam_reset_keys(rtwdev); + rtw89_for_each_rtwvif(rtwdev, rtwvif) + ser_deinit_cam(rtwdev, rtwvif); + rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM); rtw89_for_each_rtwvif(rtwdev, rtwvif) ser_reset_vif(rtwdev, rtwvif); -- cgit v1.2.3 From e1400b115cace4528dbe0f2b72103241d0d11892 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 14 Mar 2022 15:12:44 +0800 Subject: rtw89: mac: move table of mem base addr to common Previously, mac_mem_base_addr_table was declared in debug.c locally because it's only used via debugfs to dump mac memory. Now, we plan to refine SER (system error recover) flow which will also need to dump mac memory to somewhere as information for error which is catched. So, we move mac_mem_base_addr_table to mac.c rtw89_mac_mem_base_addrs earlier as common code. (no logic is changed) Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314071250.40292-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/debug.c | 22 +--------------------- drivers/net/wireless/realtek/rtw89/mac.c | 20 ++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/mac.h | 2 ++ 3 files changed, 23 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index b73cc03cecfd..09c545497ec5 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -724,26 +724,6 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp, return count; } -static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = { - [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, - [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, - [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, - [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR, - [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR, - [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR, - [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR, - [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR, - [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR, - [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR, - [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR, - [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR, - [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR, - [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR, - [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR, - [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR, - [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR, -}; - static void rtw89_debug_dump_mac_mem(struct seq_file *m, struct rtw89_dev *rtwdev, u8 sel, u32 start_addr, u32 len) @@ -757,7 +737,7 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m, pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1; start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; - base_addr = mac_mem_base_addr_table[sel]; + base_addr = rtw89_mac_mem_base_addrs[sel]; base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; for (p = 0; p < pages; p++) { diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 5e554bd9f036..580757bb52cf 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -10,6 +10,26 @@ #include "reg.h" #include "util.h" +const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_MAX] = { + [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, + [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, + [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, + [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR, + [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR, + [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR, + [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR, + [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR, + [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR, + [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR, + [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR, + [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR, + [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR, + [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR, + [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR, + [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR, + [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR, +}; + int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, enum rtw89_mac_hwmod_sel sel) { diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index b797667c78c6..fdc5ded23fde 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -273,6 +273,8 @@ enum rtw89_mac_mem_sel { RTW89_MAC_MEM_INVALID = RTW89_MAC_MEM_LAST, }; +extern const u32 rtw89_mac_mem_base_addrs[]; + enum rtw89_rpwm_req_pwr_state { RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE = 0, RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFON = 1, -- cgit v1.2.3 From 198b6cf70146ca6b575efe35c123e4951f9724f1 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 14 Mar 2022 15:12:45 +0800 Subject: rtw89: mac: correct decision on error status by scenario The raw error code might combine error scenario and error status. But, the error scenario isn't parsed previously. It makes us mishandle cpu exception and assertion. Now, we correct the error status for them. Besides, a few uses of error status are refined. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314071250.40292-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 12 ++++++++++-- drivers/net/wireless/realtek/rtw89/mac.h | 8 ++++++++ drivers/net/wireless/realtek/rtw89/ser.c | 6 ++++-- 3 files changed, 22 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 580757bb52cf..87adaa08fdb9 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -257,7 +257,9 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev, u32 dmac_err, cmac_err; if (err != MAC_AX_ERR_L1_ERR_DMAC && - err != MAC_AX_ERR_L0_PROMOTE_TO_L1) + err != MAC_AX_ERR_L0_PROMOTE_TO_L1 && + err != MAC_AX_ERR_L0_ERR_CMAC0 && + err != MAC_AX_ERR_L0_ERR_CMAC1) return; rtw89_info(rtwdev, "--->\nerr=0x%x\n", err); @@ -458,7 +460,7 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev, u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev) { - u32 err; + u32 err, err_scnr; int ret; ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000, @@ -471,6 +473,12 @@ u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev) err = rtw89_read32(rtwdev, R_AX_HALT_C2H); rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); + err_scnr = RTW89_ERROR_SCENARIO(err); + if (err_scnr == RTW89_WCPU_CPU_EXCEPTION) + err = MAC_AX_ERR_CPU_EXCEPTION; + else if (err_scnr == RTW89_WCPU_ASSERTION) + err = MAC_AX_ERR_ASSERTION; + rtw89_fw_st_dbg_dump(rtwdev); rtw89_mac_dump_err_status(rtwdev, err); diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index fdc5ded23fde..a05c504505d8 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -521,6 +521,13 @@ struct rtw89_mac_dle_dfi_qempty { u32 qempty; }; +enum rtw89_mac_error_scenario { + RTW89_WCPU_CPU_EXCEPTION = 2, + RTW89_WCPU_ASSERTION = 3, +}; + +#define RTW89_ERROR_SCENARIO(__err) ((__err) >> 28) + /* Define DBG and recovery enum */ enum mac_ax_err_info { /* Get error info */ @@ -659,6 +666,7 @@ enum mac_ax_err_info { MAC_AX_ERR_L2_ERR_APB_BBRF_TO_OTHERS = 0x2370, MAC_AX_ERR_L2_RESET_DONE = 0x2400, MAC_AX_ERR_CPU_EXCEPTION = 0x3000, + MAC_AX_ERR_ASSERTION = 0x4000, MAC_AX_GET_ERR_MAX, MAC_AX_DUMP_SHAREBUFF_INDICATOR = 0x80000000, diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index e86f3d89ef1b..5327b97b9c72 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -477,7 +477,7 @@ int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err) { u8 event = SER_EV_NONE; - rtw89_info(rtwdev, "ser event = 0x%04x\n", err); + rtw89_info(rtwdev, "SER catches error: 0x%x\n", err); switch (err) { case MAC_AX_ERR_L1_ERR_DMAC: @@ -503,8 +503,10 @@ int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err) break; } - if (event == SER_EV_NONE) + if (event == SER_EV_NONE) { + rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err); return -EINVAL; + } ser_send_msg(&rtwdev->ser, event); return 0; -- cgit v1.2.3 From 14f9f4790048f684c2b151c899895feae0b5731a Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 14 Mar 2022 15:12:46 +0800 Subject: rtw89: ser: control hci interrupts on/off by state While SER (system error recover) is processing, it's supposed to mean something is under recovery. So, disable interrupts (excluding the one of halt which could be used during SER) to avoid unexpected behavior. And then, enable interrupts after SER is done. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314071250.40292-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 19 +++++++++++++++++++ drivers/net/wireless/realtek/rtw89/pci.c | 29 +++++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/pci.h | 1 + drivers/net/wireless/realtek/rtw89/ser.c | 4 ++++ 4 files changed, 53 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 771722132c53..5efa5ca372c6 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2025,6 +2025,13 @@ struct rtw89_hci_ops { int (*mac_lv1_rcvy)(struct rtw89_dev *rtwdev, enum rtw89_lv1_rcvy_step step); void (*dump_err_status)(struct rtw89_dev *rtwdev); int (*napi_poll)(struct napi_struct *napi, int budget); + + /* Deal with locks inside recovery_start and recovery_complete callbacks + * by hci instance, and handle things which need to consider under SER. + * e.g. turn on/off interrupts except for the one for halt notification. + */ + void (*recovery_start)(struct rtw89_dev *rtwdev); + void (*recovery_complete)(struct rtw89_dev *rtwdev); }; struct rtw89_hci_info { @@ -3033,6 +3040,18 @@ static inline void rtw89_hci_flush_queues(struct rtw89_dev *rtwdev, u32 queues, return rtwdev->hci.ops->flush_queues(rtwdev, queues, drop); } +static inline void rtw89_hci_recovery_start(struct rtw89_dev *rtwdev) +{ + if (rtwdev->hci.ops->recovery_start) + rtwdev->hci.ops->recovery_start(rtwdev); +} + +static inline void rtw89_hci_recovery_complete(struct rtw89_dev *rtwdev) +{ + if (rtwdev->hci.ops->recovery_complete) + rtwdev->hci.ops->recovery_complete(rtwdev); +} + static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr) { return rtwdev->hci.ops->read8(rtwdev, addr); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index e79bfc335b44..32e8283e22f3 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -647,6 +647,29 @@ static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); } +static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + unsigned long flags; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); + rtwpci->under_recovery = true; + rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); + rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); +} + +static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + unsigned long flags; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); + rtwpci->under_recovery = false; + rtw89_pci_enable_intr(rtwdev, rtwpci); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); +} + static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) { struct rtw89_dev *rtwdev = dev; @@ -664,6 +687,9 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); + if (unlikely(rtwpci->under_recovery)) + return IRQ_HANDLED; + if (likely(rtwpci->running)) { local_bh_disable(); napi_schedule(&rtwdev->napi); @@ -2931,6 +2957,9 @@ static const struct rtw89_hci_ops rtw89_pci_ops = { .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, .dump_err_status = rtw89_pci_ops_dump_err_status, .napi_poll = rtw89_pci_napi_poll, + + .recovery_start = rtw89_pci_ops_recovery_start, + .recovery_complete = rtw89_pci_ops_recovery_complete, }; int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index b84acd0d0582..2c8030af3e72 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -594,6 +594,7 @@ struct rtw89_pci { /* protect TRX resources (exclude RXQ) */ spinlock_t trx_lock; bool running; + bool under_recovery; struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM]; struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM]; struct sk_buff_head h2c_queue; diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 5327b97b9c72..a20389cde7e2 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -302,8 +302,11 @@ static void hal_send_m4_event(struct rtw89_ser *ser) /* state handler */ static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) { + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + switch (evt) { case SER_EV_STATE_IN: + rtw89_hci_recovery_complete(rtwdev); break; case SER_EV_L1_RESET: ser_state_goto(ser, SER_RESET_TRX_ST); @@ -312,6 +315,7 @@ static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) ser_state_goto(ser, SER_L2_RESET_ST); break; case SER_EV_STATE_OUT: + rtw89_hci_recovery_start(rtwdev); default: break; } -- cgit v1.2.3 From 9f8004bfed038ae22661f483c358ffc4c076739f Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 14 Mar 2022 15:12:47 +0800 Subject: rtw89: ser: dump memory for fw payload engine while L2 reset When FW encounters exception or assertion, SER L2 reset process will start. It will dump some error information and re-download FW eventually. Since such errors are usually critical, we would like to keep more information about error to increase possibility of analysis and debugging FW. We first add FW payload engine (fw reserved playoad engine, fw_rsvd_ple) memory dump. FW will record things like CPU registers, backtrace entry, etc. in it for debugging. Moreover, device core dump framework is used and wrapped to collect kinds of dumps during SER L2 reset process. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314071250.40292-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 1 + drivers/net/wireless/realtek/rtw89/fw.h | 2 + drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/ser.c | 118 +++++++++++++++++++++++++- 4 files changed, 120 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 5efa5ca372c6..17ba5b85e500 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2275,6 +2275,7 @@ struct rtw89_chip_info { u32 fifo_size; u16 max_amsdu_limit; bool dis_2g_40m_ul_ofdma; + u32 rsvd_ple_ofst; const struct rtw89_hfc_param_ini *hfc_param_ini; const struct rtw89_dle_mem *dle_mem; u32 rf_base_addr[2]; diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index ed8609b204e0..d0b93a0b406d 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -2194,6 +2194,8 @@ struct rtw89_fw_h2c_rf_reg_info { #define H2C_CL_OUTSRC_RF_REG_A 0x8 #define H2C_CL_OUTSRC_RF_REG_B 0x9 +#define RTW89_FW_RSVD_PLE_SIZE 0x800 + int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev); int rtw89_fw_recognize(struct rtw89_dev *rtwdev); int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 41fc8db311ec..348ad08090b8 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2041,6 +2041,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .fifo_size = 458752, .max_amsdu_limit = 3500, .dis_2g_40m_ul_ofdma = true, + .rsvd_ple_ofst = 0x6f800, .hfc_param_ini = rtw8852a_hfc_param_ini_pcie, .dle_mem = rtw8852a_dle_mem_pcie, .rf_base_addr = {0xc000, 0xd000}, diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index a20389cde7e2..bf2f97a146ec 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -2,10 +2,14 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include + #include "cam.h" #include "debug.h" +#include "fw.h" #include "mac.h" #include "ps.h" +#include "reg.h" #include "ser.h" #include "util.h" @@ -67,6 +71,58 @@ static char *ser_st_name(struct rtw89_ser *ser) return "err_st_name"; } +#define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \ +struct ser_cd_ ## _name { \ + u32 type; \ + u32 type_size; \ + u64 padding; \ + u8 data[_size]; \ +} __packed; \ +static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \ +{ \ + p->type = _type; \ + p->type_size = sizeof(p->data); \ + p->padding = 0x0123456789abcdef; \ +} + +enum rtw89_ser_cd_type { + RTW89_SER_CD_FW_RSVD_PLE = 0, +}; + +RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple, + RTW89_SER_CD_FW_RSVD_PLE, + RTW89_FW_RSVD_PLE_SIZE); + +struct rtw89_ser_cd_buffer { + struct ser_cd_fw_rsvd_ple fwple; +} __packed; + +static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev) +{ + struct rtw89_ser_cd_buffer *buf; + + buf = vzalloc(sizeof(*buf)); + if (!buf) + return NULL; + + ser_cd_fw_rsvd_ple_init(&buf->fwple); + + return buf; +} + +static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev, + struct rtw89_ser_cd_buffer *buf) +{ + rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n"); + + /* After calling dev_coredump, buf's lifetime is supposed to be + * handled by the device coredump framework. Note that a new dump + * will be discarded if a previous one hasn't been released by + * framework yet. + */ + dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL); +} + static void ser_state_run(struct rtw89_ser *ser, u8 evt) { struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); @@ -390,6 +446,65 @@ static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt) } } +static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf, + u8 sel, u32 start_addr, u32 len) +{ + u32 *ptr = (u32 *)buf; + u32 base_addr, start_page, residue; + u32 cnt = 0; + u32 i; + + start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; + residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; + base_addr = rtw89_mac_mem_base_addrs[sel]; + base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; + + while (cnt < len) { + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr); + + for (i = R_AX_INDIR_ACCESS_ENTRY + residue; + i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE; + i += 4, ptr++) { + *ptr = rtw89_read32(rtwdev, i); + cnt += 4; + if (cnt >= len) + break; + } + + residue = 0; + base_addr += MAC_MEM_DUMP_PAGE_SIZE; + } +} + +static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf) +{ + u32 start_addr = rtwdev->chip->rsvd_ple_ofst; + + rtw89_debug(rtwdev, RTW89_DBG_SER, + "dump mem for fw rsvd payload engine (start addr: 0x%x)\n", + start_addr); + ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr, + RTW89_FW_RSVD_PLE_SIZE); +} + +static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser) +{ + struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + struct rtw89_ser_cd_buffer *buf; + + buf = rtw89_ser_cd_prep(rtwdev); + if (!buf) + goto bottom; + + rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data); + rtw89_ser_cd_send(rtwdev, buf); + +bottom: + ser_reset_mac_binding(rtwdev); + rtw89_core_stop(rtwdev); + INIT_LIST_HEAD(&rtwdev->rtwvifs_list); +} + static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) { struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); @@ -397,8 +512,7 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) switch (evt) { case SER_EV_STATE_IN: mutex_lock(&rtwdev->mutex); - ser_reset_mac_binding(rtwdev); - rtw89_core_stop(rtwdev); + ser_l2_reset_st_pre_hdl(ser); mutex_unlock(&rtwdev->mutex); ieee80211_restart_hw(rtwdev->hw); -- cgit v1.2.3 From f5e246846412175f38f830d9082fae0f72470843 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 14 Mar 2022 15:12:48 +0800 Subject: rtw89: ser: dump fw backtrace while L2 reset Read FW backtrace entry through FW reserved payload engine, and then add FW backtrace dump during SER (system error recover) L2 reset process. It contains a list of RA (return address) and SP (stack pointer) which gives us a chance to trace back the call stack of FW. Moreover, if core dump might have wrong content due to error during dumping, we won't invoke device core dump framework. For this case, rtw89_ser_cd_free() is added to free buffer by ourselves. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314071250.40292-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/fw.h | 9 +++ drivers/net/wireless/realtek/rtw89/ser.c | 97 +++++++++++++++++++++++++++++++- 2 files changed, 105 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index d0b93a0b406d..1aaec2672237 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -2196,6 +2196,15 @@ struct rtw89_fw_h2c_rf_reg_info { #define RTW89_FW_RSVD_PLE_SIZE 0x800 +#define RTW89_WCPU_BASE_ADDR 0xA0000000 + +#define RTW89_FW_BACKTRACE_INFO_SIZE 8 +#define RTW89_VALID_FW_BACKTRACE_SIZE(_size) \ + ((_size) % RTW89_FW_BACKTRACE_INFO_SIZE == 0) + +#define RTW89_FW_BACKTRACE_MAX_SIZE 512 /* 8 * 64 (entries) */ +#define RTW89_FW_BACKTRACE_KEY 0xBACEBACE + int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev); int rtw89_fw_recognize(struct rtw89_dev *rtwdev); int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type); diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index bf2f97a146ec..f28cd0645ad9 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -87,14 +87,20 @@ static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \ enum rtw89_ser_cd_type { RTW89_SER_CD_FW_RSVD_PLE = 0, + RTW89_SER_CD_FW_BACKTRACE = 1, }; RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple, RTW89_SER_CD_FW_RSVD_PLE, RTW89_FW_RSVD_PLE_SIZE); +RTW89_DEF_SER_CD_TYPE(fw_backtrace, + RTW89_SER_CD_FW_BACKTRACE, + RTW89_FW_BACKTRACE_MAX_SIZE); + struct rtw89_ser_cd_buffer { struct ser_cd_fw_rsvd_ple fwple; + struct ser_cd_fw_backtrace fwbt; } __packed; static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev) @@ -106,6 +112,7 @@ static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev) return NULL; ser_cd_fw_rsvd_ple_init(&buf->fwple); + ser_cd_fw_backtrace_init(&buf->fwbt); return buf; } @@ -123,6 +130,21 @@ static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev, dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL); } +static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev, + struct rtw89_ser_cd_buffer *buf, bool free_self) +{ + if (!free_self) + return; + + rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n"); + + /* When some problems happen during filling data of core dump, + * we won't send it to device coredump framework. Instead, we + * free buf by ourselves. + */ + vfree(buf); +} + static void ser_state_run(struct rtw89_ser *ser, u8 evt) { struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); @@ -487,19 +509,92 @@ static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf) RTW89_FW_RSVD_PLE_SIZE); } +struct __fw_backtrace_entry { + u32 wcpu_addr; + u32 size; + u32 key; +} __packed; + +struct __fw_backtrace_info { + u32 ra; + u32 sp; +} __packed; + +static_assert(RTW89_FW_BACKTRACE_INFO_SIZE == + sizeof(struct __fw_backtrace_info)); + +static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf, + const struct __fw_backtrace_entry *ent) +{ + struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf; + u32 fwbt_addr = ent->wcpu_addr - RTW89_WCPU_BASE_ADDR; + u32 fwbt_size = ent->size; + u32 fwbt_key = ent->key; + u32 i; + + if (fwbt_addr == 0) { + rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n", + fwbt_addr); + return -EINVAL; + } + + if (fwbt_key != RTW89_FW_BACKTRACE_KEY) { + rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n", + fwbt_key); + return -EINVAL; + } + + if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) || + fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) { + rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n", + fwbt_size); + return -EINVAL; + } + + rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n"); + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr); + + for (i = R_AX_INDIR_ACCESS_ENTRY; + i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size; + i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) { + *ptr = (struct __fw_backtrace_info){ + .ra = rtw89_read32(rtwdev, i), + .sp = rtw89_read32(rtwdev, i + 4), + }; + rtw89_debug(rtwdev, RTW89_DBG_SER, + "next sp: 0x%x, next ra: 0x%x\n", + ptr->sp, ptr->ra); + } + + rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n"); + return 0; +} + static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser) { struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); struct rtw89_ser_cd_buffer *buf; + struct __fw_backtrace_entry fwbt_ent; + int ret = 0; buf = rtw89_ser_cd_prep(rtwdev); - if (!buf) + if (!buf) { + ret = -ENOMEM; goto bottom; + } rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data); + + fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data; + ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent); + if (ret) + goto bottom; + rtw89_ser_cd_send(rtwdev, buf); bottom: + rtw89_ser_cd_free(rtwdev, buf, !!ret); + ser_reset_mac_binding(rtwdev); rtw89_core_stop(rtwdev); INIT_LIST_HEAD(&rtwdev->rtwvifs_list); -- cgit v1.2.3 From 11fe4ccda8672db5375f8bfcb209756a31c89a82 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 14 Mar 2022 15:12:49 +0800 Subject: rtw89: reconstruct fw feature As the fw features gradually increase, it would be better that we have a set of methods to maintain fw features instead of using scattered bool variables. We reconstruct the way fw recognize features, and introduce RTW89_CHK_FW_FEATURE() / RTW89_SET_FW_FEATURE() to check / set fw features for uses. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314071250.40292-8-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 5 ++- drivers/net/wireless/realtek/rtw89/core.h | 16 ++++++-- drivers/net/wireless/realtek/rtw89/fw.c | 53 ++++++++++++++++++++++----- drivers/net/wireless/realtek/rtw89/mac80211.c | 4 +- drivers/net/wireless/realtek/rtw89/phy.c | 2 +- 5 files changed, 62 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index bcefc968576e..c61061358980 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -759,7 +759,7 @@ static void rtw89_core_tx_wake(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { - if (!rtwdev->fw.tx_wake) + if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw)) return; if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) @@ -1454,7 +1454,8 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; - if (rtwdev->scanning && rtwdev->fw.scan_offload) { + if (rtwdev->scanning && + RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { rx_status->freq = ieee80211_channel_to_frequency(hal->current_channel, hal->current_band_type); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 17ba5b85e500..395d3bacdd3f 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2396,6 +2396,12 @@ enum rtw89_fw_type { RTW89_FW_WOWLAN = 3, }; +enum rtw89_fw_feature { + RTW89_FW_FEATURE_OLD_HT_RA_FORMAT, + RTW89_FW_FEATURE_SCAN_OFFLOAD, + RTW89_FW_FEATURE_TX_WAKE, +}; + struct rtw89_fw_suit { const u8 *data; u32 size; @@ -2425,11 +2431,15 @@ struct rtw89_fw_info { struct rtw89_fw_suit normal; struct rtw89_fw_suit wowlan; bool fw_log_enable; - bool old_ht_ra_format; - bool scan_offload; - bool tx_wake; + u32 feature_map; }; +#define RTW89_CHK_FW_FEATURE(_feat, _fw) \ + (!!((_fw)->feature_map & BIT(RTW89_FW_FEATURE_ ## _feat))) + +#define RTW89_SET_FW_FEATURE(_fw_feature, _fw) \ + ((_fw)->feature_map |= BIT(_fw_feature)) + struct rtw89_cam_info { DECLARE_BITMAP(addr_cam_map, RTW89_MAX_ADDR_CAM_NUM); DECLARE_BITMAP(bssid_cam_map, RTW89_MAX_BSSID_CAM_NUM); diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 6deaf8eec6b4..76a290090507 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -193,22 +193,55 @@ int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) return 0; } +#define __DEF_FW_FEAT_COND(__cond, __op) \ +static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ +{ \ + return suit_ver_code __op comp_ver_code; \ +} + +__DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ +__DEF_FW_FEAT_COND(le, <=); /* less or equal */ + +struct __fw_feat_cfg { + enum rtw89_core_chip_id chip_id; + enum rtw89_fw_feature feature; + u32 ver_code; + bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); +}; + +#define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ + { \ + .chip_id = _chip, \ + .feature = RTW89_FW_FEATURE_ ## _feat, \ + .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ + .cond = __fw_feat_cond_ ## _cond, \ + } + +static const struct __fw_feat_cfg fw_feat_tbl[] = { + __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), + __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), + __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), +}; + static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; - struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); + const struct __fw_feat_cfg *ent; + const struct rtw89_fw_suit *fw_suit; + u32 suit_ver_code; + int i; - if (chip->chip_id == RTL8852A && - RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0)) - rtwdev->fw.old_ht_ra_format = true; + fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); + suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); - if (chip->chip_id == RTL8852A && - RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0)) - rtwdev->fw.scan_offload = true; + for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { + ent = &fw_feat_tbl[i]; + if (chip->chip_id != ent->chip_id) + continue; - if (chip->chip_id == RTL8852A && - RTW89_FW_SUIT_VER_CODE(fw_suit) >= RTW89_FW_VER_CODE(0, 13, 35, 0)) - rtwdev->fw.tx_wake = true; + if (ent->cond(suit_ver_code, ent->ver_code)) + RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw); + } } int rtw89_fw_recognize(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index fca9f82bb462..8da3e117ad38 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -725,7 +725,7 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct rtw89_dev *rtwdev = hw->priv; int ret = 0; - if (!rtwdev->fw.scan_offload) + if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) return 1; if (rtwdev->scanning) @@ -748,7 +748,7 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw, { struct rtw89_dev *rtwdev = hw->priv; - if (!rtwdev->fw.scan_offload) + if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) return; if (!rtwdev->scanning) diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index ac211d897311..c9a04ca0b1f2 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -1686,7 +1686,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) break; case RTW89_RA_RPT_MODE_HT: ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS; - if (rtwdev->fw.old_ht_ra_format) + if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw)) rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate), FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate)); else -- cgit v1.2.3 From edb896297abeef8c95c150247607b09517469a9a Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 14 Mar 2022 15:12:50 +0800 Subject: rtw89: support FW crash simulation Originally, there is already a mechanism, SER (system error recover), to deal with HW/FW recovery. After FW v0.13.36.0, FW supports a H2C (host to chip) command to make a CPU exception. Then, SER is supposed to catch this FW crash and do L2 reset. This feature is a simulation to verify if flow of recovering from FW crash works. Usage of fw_crash debugfs is as the following. $ echo 1 > fw_crash // trigger FW crash and wait SER handling $ cat fw_crash // return 0 if restart has been done Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220314071250.40292-9-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 2 ++ drivers/net/wireless/realtek/rtw89/debug.c | 48 ++++++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/fw.c | 36 ++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/fw.h | 12 ++++++++ drivers/net/wireless/realtek/rtw89/ser.c | 1 + 5 files changed, 99 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 395d3bacdd3f..7d8140cc25a7 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2400,6 +2400,7 @@ enum rtw89_fw_feature { RTW89_FW_FEATURE_OLD_HT_RA_FORMAT, RTW89_FW_FEATURE_SCAN_OFFLOAD, RTW89_FW_FEATURE_TX_WAKE, + RTW89_FW_FEATURE_CRASH_TRIGGER, }; struct rtw89_fw_suit { @@ -2502,6 +2503,7 @@ enum rtw89_flags { RTW89_FLAG_LEISURE_PS, RTW89_FLAG_LOW_POWER_MODE, RTW89_FLAG_INACTIVE_PS, + RTW89_FLAG_RESTART_TRIGGER, NUM_OF_RTW89_FLAGS, }; diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index 09c545497ec5..f93f3fee1505 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -2184,6 +2184,48 @@ out: return count; } +static int +rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v) +{ + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + + seq_printf(m, "%d\n", + test_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags)); + return 0; +} + +static ssize_t +rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf, + size_t count, loff_t *loff) +{ + struct seq_file *m = (struct seq_file *)filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + bool fw_crash; + int ret; + + if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw)) + return -EOPNOTSUPP; + + ret = kstrtobool_from_user(user_buf, count, &fw_crash); + if (ret) + return -EINVAL; + + if (!fw_crash) + return -EINVAL; + + mutex_lock(&rtwdev->mutex); + set_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags); + ret = rtw89_fw_h2c_trigger_cpu_exception(rtwdev); + mutex_unlock(&rtwdev->mutex); + + if (ret) + return ret; + + return count; +} + static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; @@ -2468,6 +2510,11 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = { .cb_write = rtw89_debug_priv_early_h2c_set, }; +static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = { + .cb_read = rtw89_debug_priv_fw_crash_get, + .cb_write = rtw89_debug_priv_fw_crash_set, +}; + static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = { .cb_read = rtw89_debug_priv_btc_info_get, }; @@ -2522,6 +2569,7 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev) rtw89_debugfs_add_rw(mac_dbg_port_dump); rtw89_debugfs_add_w(send_h2c); rtw89_debugfs_add_rw(early_h2c); + rtw89_debugfs_add_rw(fw_crash); rtw89_debugfs_add_r(btc_info); rtw89_debugfs_add_w(btc_manual); rtw89_debugfs_add_w(fw_log_manual); diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 76a290090507..2c9470616a1b 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -221,6 +221,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = { __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), + __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), }; static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) @@ -2287,3 +2288,38 @@ void rtw89_store_op_chan(struct rtw89_dev *rtwdev) scan_info->op_bw = hal->current_band_width; scan_info->op_band = hal->current_band_type; } + +#define H2C_FW_CPU_EXCEPTION_LEN 4 +#define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 +int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_FW_CPU_EXCEPTION_LEN); + if (!skb) { + rtw89_err(rtwdev, + "failed to alloc skb for fw cpu exception\n"); + return -ENOMEM; + } + + skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); + RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, + H2C_FW_CPU_EXCEPTION_TYPE_DEF); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_TEST, + H2C_CL_FW_STATUS_TEST, + H2C_FUNC_CPU_EXCEPTION, 0, 0, + H2C_FW_CPU_EXCEPTION_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; + +fail: + dev_kfree_skb_any(skb); + return -EBUSY; +} diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 1aaec2672237..24ab249a8ece 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -1461,6 +1461,11 @@ static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val) le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8)); } +static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val) +{ + le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0)); +} + enum rtw89_btc_btf_h2c_class { BTFC_SET = 0x10, BTFC_GET = 0x11, @@ -2140,6 +2145,12 @@ struct rtw89_fw_h2c_rf_reg_info { #define FWCMD_TYPE_H2C 0 +#define H2C_CAT_TEST 0x0 + +/* CLASS 5 - FW STATUS TEST */ +#define H2C_CL_FW_STATUS_TEST 0x5 +#define H2C_FUNC_CPU_EXCEPTION 0x1 + #define H2C_CAT_MAC 0x1 /* CLASS 0 - FW INFO */ @@ -2284,5 +2295,6 @@ void rtw89_hw_scan_status_report(struct rtw89_dev *rtwdev, struct sk_buff *skb); void rtw89_hw_scan_chan_switch(struct rtw89_dev *rtwdev, struct sk_buff *skb); void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); void rtw89_store_op_chan(struct rtw89_dev *rtwdev); +int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev); #endif diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index f28cd0645ad9..25d1df10f226 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -619,6 +619,7 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) fallthrough; case SER_EV_L2_RECFG_DONE: ser_state_goto(ser, SER_IDLE_ST); + clear_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags); break; case SER_EV_STATE_OUT: -- cgit v1.2.3 From 306451188062a788c59d6b708acd1f2ba2274bd9 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 15 Mar 2022 09:55:22 +0800 Subject: rtw89: reduce export symbol number of mac size and quota An export symbol costs about 40 bytes (in x86 with gcc), so use a structure containing these small arrays to reduce code size. text data bss dec hex filename 34932 1410 0 36342 8df6 mac.o (before) 34276 1258 0 35534 8ace mac.o (after) Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220315015522.11366-1-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 158 +++++++------------------- drivers/net/wireless/realtek/rtw89/mac.h | 44 +++---- drivers/net/wireless/realtek/rtw89/rtw8852a.c | 18 +-- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 14 ++- 4 files changed, 83 insertions(+), 151 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 87adaa08fdb9..d75105441372 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -510,11 +510,6 @@ int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err) } EXPORT_SYMBOL(rtw89_mac_set_err_status); -const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie = { - 2, 40, 0, 0, 1, 0, 0, 0 -}; -EXPORT_SYMBOL(rtw89_hfc_preccfg_pcie); - static int hfc_reset_param(struct rtw89_dev *rtwdev) { struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; @@ -1216,119 +1211,48 @@ static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev) return ret; } -/* PCIE 64 */ -const struct rtw89_dle_size rtw89_wde_size0 = { - RTW89_WDE_PG_64, 4095, 1, -}; -EXPORT_SYMBOL(rtw89_wde_size0); - -/* DLFW */ -const struct rtw89_dle_size rtw89_wde_size4 = { - RTW89_WDE_PG_64, 0, 4096, -}; -EXPORT_SYMBOL(rtw89_wde_size4); - -/* 8852C DLFW */ -const struct rtw89_dle_size rtw89_wde_size18 = { - RTW89_WDE_PG_64, 0, 2048, -}; -EXPORT_SYMBOL(rtw89_wde_size18); - -/* 8852C PCIE SCC */ -const struct rtw89_dle_size rtw89_wde_size19 = { - RTW89_WDE_PG_64, 3328, 0, -}; -EXPORT_SYMBOL(rtw89_wde_size19); - -/* PCIE */ -const struct rtw89_dle_size rtw89_ple_size0 = { - RTW89_PLE_PG_128, 1520, 16, -}; -EXPORT_SYMBOL(rtw89_ple_size0); - -/* DLFW */ -const struct rtw89_dle_size rtw89_ple_size4 = { - RTW89_PLE_PG_128, 64, 1472, -}; -EXPORT_SYMBOL(rtw89_ple_size4); - -/* 8852C DLFW */ -const struct rtw89_dle_size rtw89_ple_size18 = { - RTW89_PLE_PG_128, 2544, 16, -}; -EXPORT_SYMBOL(rtw89_ple_size18); - -/* 8852C PCIE SCC */ -const struct rtw89_dle_size rtw89_ple_size19 = { - RTW89_PLE_PG_128, 1904, 16, -}; -EXPORT_SYMBOL(rtw89_ple_size19); - -/* PCIE 64 */ -const struct rtw89_wde_quota rtw89_wde_qt0 = { - 3792, 196, 0, 107, -}; -EXPORT_SYMBOL(rtw89_wde_qt0); - -/* DLFW */ -const struct rtw89_wde_quota rtw89_wde_qt4 = { - 0, 0, 0, 0, -}; -EXPORT_SYMBOL(rtw89_wde_qt4); - -/* 8852C DLFW */ -const struct rtw89_wde_quota rtw89_wde_qt17 = { - 0, 0, 0, 0, -}; -EXPORT_SYMBOL(rtw89_wde_qt17); - -/* 8852C PCIE SCC */ -const struct rtw89_wde_quota rtw89_wde_qt18 = { - 3228, 60, 0, 40, -}; -EXPORT_SYMBOL(rtw89_wde_qt18); - -/* PCIE SCC */ -const struct rtw89_ple_quota rtw89_ple_qt4 = { - 264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8, -}; -EXPORT_SYMBOL(rtw89_ple_qt4); - -/* PCIE SCC */ -const struct rtw89_ple_quota rtw89_ple_qt5 = { - 264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120, -}; -EXPORT_SYMBOL(rtw89_ple_qt5); - -/* DLFW */ -const struct rtw89_ple_quota rtw89_ple_qt13 = { - 0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0 -}; -EXPORT_SYMBOL(rtw89_ple_qt13); - -/* DLFW 52C */ -const struct rtw89_ple_quota rtw89_ple_qt44 = { - 0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0, -}; -EXPORT_SYMBOL(rtw89_ple_qt44); - -/* DLFW 52C */ -const struct rtw89_ple_quota rtw89_ple_qt45 = { - 0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0, -}; -EXPORT_SYMBOL(rtw89_ple_qt45); - -/* 8852C PCIE SCC */ -const struct rtw89_ple_quota rtw89_ple_qt46 = { - 525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16, -}; -EXPORT_SYMBOL(rtw89_ple_qt46); - -/* 8852C PCIE SCC */ -const struct rtw89_ple_quota rtw89_ple_qt47 = { - 525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037, +const struct rtw89_mac_size_set rtw89_mac_size = { + .hfc_preccfg_pcie = {2, 40, 0, 0, 1, 0, 0, 0}, + /* PCIE 64 */ + .wde_size0 = {RTW89_WDE_PG_64, 4095, 1,}, + /* DLFW */ + .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,}, + /* 8852C DLFW */ + .wde_size18 = {RTW89_WDE_PG_64, 0, 2048,}, + /* 8852C PCIE SCC */ + .wde_size19 = {RTW89_WDE_PG_64, 3328, 0,}, + /* PCIE */ + .ple_size0 = {RTW89_PLE_PG_128, 1520, 16,}, + /* DLFW */ + .ple_size4 = {RTW89_PLE_PG_128, 64, 1472,}, + /* 8852C DLFW */ + .ple_size18 = {RTW89_PLE_PG_128, 2544, 16,}, + /* 8852C PCIE SCC */ + .ple_size19 = {RTW89_PLE_PG_128, 1904, 16,}, + /* PCIE 64 */ + .wde_qt0 = {3792, 196, 0, 107,}, + /* DLFW */ + .wde_qt4 = {0, 0, 0, 0,}, + /* 8852C DLFW */ + .wde_qt17 = {0, 0, 0, 0,}, + /* 8852C PCIE SCC */ + .wde_qt18 = {3228, 60, 0, 40,}, + /* PCIE SCC */ + .ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,}, + /* PCIE SCC */ + .ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,}, + /* DLFW */ + .ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,}, + /* DLFW 52C */ + .ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,}, + /* DLFW 52C */ + .ple_qt45 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,}, + /* 8852C PCIE SCC */ + .ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,}, + /* 8852C PCIE SCC */ + .ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,}, }; -EXPORT_SYMBOL(rtw89_ple_qt47); +EXPORT_SYMBOL(rtw89_mac_size); static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index a05c504505d8..6edeb1aafaf7 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -682,26 +682,30 @@ enum mac_ax_err_info { MAC_AX_SET_ERR_MAX, }; -extern const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie; -extern const struct rtw89_dle_size rtw89_wde_size0; -extern const struct rtw89_dle_size rtw89_wde_size4; -extern const struct rtw89_dle_size rtw89_wde_size18; -extern const struct rtw89_dle_size rtw89_wde_size19; -extern const struct rtw89_dle_size rtw89_ple_size0; -extern const struct rtw89_dle_size rtw89_ple_size4; -extern const struct rtw89_dle_size rtw89_ple_size18; -extern const struct rtw89_dle_size rtw89_ple_size19; -extern const struct rtw89_wde_quota rtw89_wde_qt0; -extern const struct rtw89_wde_quota rtw89_wde_qt4; -extern const struct rtw89_wde_quota rtw89_wde_qt17; -extern const struct rtw89_wde_quota rtw89_wde_qt18; -extern const struct rtw89_ple_quota rtw89_ple_qt4; -extern const struct rtw89_ple_quota rtw89_ple_qt5; -extern const struct rtw89_ple_quota rtw89_ple_qt13; -extern const struct rtw89_ple_quota rtw89_ple_qt44; -extern const struct rtw89_ple_quota rtw89_ple_qt45; -extern const struct rtw89_ple_quota rtw89_ple_qt46; -extern const struct rtw89_ple_quota rtw89_ple_qt47; +struct rtw89_mac_size_set { + const struct rtw89_hfc_prec_cfg hfc_preccfg_pcie; + const struct rtw89_dle_size wde_size0; + const struct rtw89_dle_size wde_size4; + const struct rtw89_dle_size wde_size18; + const struct rtw89_dle_size wde_size19; + const struct rtw89_dle_size ple_size0; + const struct rtw89_dle_size ple_size4; + const struct rtw89_dle_size ple_size18; + const struct rtw89_dle_size ple_size19; + const struct rtw89_wde_quota wde_qt0; + const struct rtw89_wde_quota wde_qt4; + const struct rtw89_wde_quota wde_qt17; + const struct rtw89_wde_quota wde_qt18; + const struct rtw89_ple_quota ple_qt4; + const struct rtw89_ple_quota ple_qt5; + const struct rtw89_ple_quota ple_qt13; + const struct rtw89_ple_quota ple_qt44; + const struct rtw89_ple_quota ple_qt45; + const struct rtw89_ple_quota ple_qt46; + const struct rtw89_ple_quota ple_qt47; +}; + +extern const struct rtw89_mac_size_set rtw89_mac_size; static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band) { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 348ad08090b8..67aaa05cb751 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -37,19 +37,21 @@ static const struct rtw89_hfc_pub_cfg rtw8852a_hfc_pubcfg_pcie = { static const struct rtw89_hfc_param_ini rtw8852a_hfc_param_ini_pcie[] = { [RTW89_QTA_SCC] = {rtw8852a_hfc_chcfg_pcie, &rtw8852a_hfc_pubcfg_pcie, - &rtw89_hfc_preccfg_pcie, RTW89_HCIFC_POH}, - [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_hfc_preccfg_pcie, + &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH}, + [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH}, [RTW89_QTA_INVALID] = {NULL}, }; static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = { - [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size0, &rtw89_ple_size0, - &rtw89_wde_qt0, &rtw89_wde_qt0, &rtw89_ple_qt4, - &rtw89_ple_qt5}, - [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size4, &rtw89_ple_size4, - &rtw89_wde_qt4, &rtw89_wde_qt4, &rtw89_ple_qt13, - &rtw89_ple_qt13}, + [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size0, + &rtw89_mac_size.ple_size0, &rtw89_mac_size.wde_qt0, + &rtw89_mac_size.wde_qt0, &rtw89_mac_size.ple_qt4, + &rtw89_mac_size.ple_qt5}, + [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4, + &rtw89_mac_size.ple_size4, &rtw89_mac_size.wde_qt4, + &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13, + &rtw89_mac_size.ple_qt13}, [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL, NULL}, }; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 58920e91765e..123cc3c4318d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -10,12 +10,14 @@ #include "rtw8852c.h" static const struct rtw89_dle_mem rtw8852c_dle_mem_pcie[] = { - [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size19, &rtw89_ple_size19, - &rtw89_wde_qt18, &rtw89_wde_qt18, &rtw89_ple_qt46, - &rtw89_ple_qt47}, - [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size18, - &rtw89_ple_size18, &rtw89_wde_qt17, &rtw89_wde_qt17, - &rtw89_ple_qt44, &rtw89_ple_qt45}, + [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size19, + &rtw89_mac_size.ple_size19, &rtw89_mac_size.wde_qt18, + &rtw89_mac_size.wde_qt18, &rtw89_mac_size.ple_qt46, + &rtw89_mac_size.ple_qt47}, + [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size18, + &rtw89_mac_size.ple_size18, &rtw89_mac_size.wde_qt17, + &rtw89_mac_size.wde_qt17, &rtw89_mac_size.ple_qt44, + &rtw89_mac_size.ple_qt45}, [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL, NULL}, }; -- cgit v1.2.3 From 5a0e776bec96e373940731875f9e58f9a747a6e4 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Fri, 18 Mar 2022 10:32:04 +0800 Subject: rtw89: add UK to regulation type Add RTW89_UK to enum rtw89_regulation_type. The follow-up commit will configure the corresponding values for it to TX power tables. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 7d8140cc25a7..5900cbc0efd9 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -446,6 +446,7 @@ enum rtw89_regulation_type { RTW89_UKRAINE = 11, RTW89_CN = 12, RTW89_QATAR = 13, + RTW89_UK = 14, RTW89_REGD_NUM, }; -- cgit v1.2.3 From c504bf23290c980588a19f228ffe6b37097dc460 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Fri, 18 Mar 2022 10:32:05 +0800 Subject: rtw89: 8852a: update txpwr tables to HALRF_027_00_038 Update notes: TX power by rate table is not changed. TX power limit table configures values for UK. TX power limit RU table configures values for UK. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-3-pkshih@realtek.com --- .../net/wireless/realtek/rtw89/rtw8852a_table.c | 581 +++++++++++++++++++++ 1 file changed, 581 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c index 253b5f8fc4f9..8c4749f10641 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c @@ -43563,6 +43563,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][0] = 76, [0][0][0][0][RTW89_CN][0] = 56, [0][0][0][0][RTW89_QATAR][0] = 56, + [0][0][0][0][RTW89_UK][0] = 56, [0][0][0][0][RTW89_FCC][1] = 76, [0][0][0][0][RTW89_ETSI][1] = 56, [0][0][0][0][RTW89_MKK][1] = 68, @@ -43574,6 +43575,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][1] = 76, [0][0][0][0][RTW89_CN][1] = 56, [0][0][0][0][RTW89_QATAR][1] = 56, + [0][0][0][0][RTW89_UK][1] = 56, [0][0][0][0][RTW89_FCC][2] = 76, [0][0][0][0][RTW89_ETSI][2] = 56, [0][0][0][0][RTW89_MKK][2] = 68, @@ -43585,6 +43587,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][2] = 76, [0][0][0][0][RTW89_CN][2] = 56, [0][0][0][0][RTW89_QATAR][2] = 56, + [0][0][0][0][RTW89_UK][2] = 56, [0][0][0][0][RTW89_FCC][3] = 76, [0][0][0][0][RTW89_ETSI][3] = 56, [0][0][0][0][RTW89_MKK][3] = 68, @@ -43596,6 +43599,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][3] = 76, [0][0][0][0][RTW89_CN][3] = 56, [0][0][0][0][RTW89_QATAR][3] = 56, + [0][0][0][0][RTW89_UK][3] = 56, [0][0][0][0][RTW89_FCC][4] = 76, [0][0][0][0][RTW89_ETSI][4] = 56, [0][0][0][0][RTW89_MKK][4] = 68, @@ -43607,6 +43611,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][4] = 76, [0][0][0][0][RTW89_CN][4] = 56, [0][0][0][0][RTW89_QATAR][4] = 56, + [0][0][0][0][RTW89_UK][4] = 56, [0][0][0][0][RTW89_FCC][5] = 76, [0][0][0][0][RTW89_ETSI][5] = 56, [0][0][0][0][RTW89_MKK][5] = 68, @@ -43618,6 +43623,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][5] = 76, [0][0][0][0][RTW89_CN][5] = 56, [0][0][0][0][RTW89_QATAR][5] = 56, + [0][0][0][0][RTW89_UK][5] = 56, [0][0][0][0][RTW89_FCC][6] = 76, [0][0][0][0][RTW89_ETSI][6] = 56, [0][0][0][0][RTW89_MKK][6] = 68, @@ -43629,6 +43635,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][6] = 76, [0][0][0][0][RTW89_CN][6] = 56, [0][0][0][0][RTW89_QATAR][6] = 56, + [0][0][0][0][RTW89_UK][6] = 56, [0][0][0][0][RTW89_FCC][7] = 76, [0][0][0][0][RTW89_ETSI][7] = 56, [0][0][0][0][RTW89_MKK][7] = 68, @@ -43640,6 +43647,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][7] = 76, [0][0][0][0][RTW89_CN][7] = 56, [0][0][0][0][RTW89_QATAR][7] = 56, + [0][0][0][0][RTW89_UK][7] = 56, [0][0][0][0][RTW89_FCC][8] = 76, [0][0][0][0][RTW89_ETSI][8] = 56, [0][0][0][0][RTW89_MKK][8] = 68, @@ -43651,6 +43659,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][8] = 76, [0][0][0][0][RTW89_CN][8] = 56, [0][0][0][0][RTW89_QATAR][8] = 56, + [0][0][0][0][RTW89_UK][8] = 56, [0][0][0][0][RTW89_FCC][9] = 76, [0][0][0][0][RTW89_ETSI][9] = 56, [0][0][0][0][RTW89_MKK][9] = 68, @@ -43662,6 +43671,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][9] = 76, [0][0][0][0][RTW89_CN][9] = 56, [0][0][0][0][RTW89_QATAR][9] = 56, + [0][0][0][0][RTW89_UK][9] = 56, [0][0][0][0][RTW89_FCC][10] = 76, [0][0][0][0][RTW89_ETSI][10] = 56, [0][0][0][0][RTW89_MKK][10] = 68, @@ -43673,6 +43683,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][10] = 76, [0][0][0][0][RTW89_CN][10] = 56, [0][0][0][0][RTW89_QATAR][10] = 56, + [0][0][0][0][RTW89_UK][10] = 56, [0][0][0][0][RTW89_FCC][11] = 68, [0][0][0][0][RTW89_ETSI][11] = 56, [0][0][0][0][RTW89_MKK][11] = 68, @@ -43684,6 +43695,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][11] = 68, [0][0][0][0][RTW89_CN][11] = 56, [0][0][0][0][RTW89_QATAR][11] = 56, + [0][0][0][0][RTW89_UK][11] = 56, [0][0][0][0][RTW89_FCC][12] = 48, [0][0][0][0][RTW89_ETSI][12] = 56, [0][0][0][0][RTW89_MKK][12] = 68, @@ -43695,6 +43707,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][12] = 48, [0][0][0][0][RTW89_CN][12] = 56, [0][0][0][0][RTW89_QATAR][12] = 56, + [0][0][0][0][RTW89_UK][12] = 56, [0][0][0][0][RTW89_FCC][13] = 127, [0][0][0][0][RTW89_ETSI][13] = 127, [0][0][0][0][RTW89_MKK][13] = 76, @@ -43706,6 +43719,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_MEXICO][13] = 127, [0][0][0][0][RTW89_CN][13] = 127, [0][0][0][0][RTW89_QATAR][13] = 127, + [0][0][0][0][RTW89_UK][13] = 127, [0][1][0][0][RTW89_FCC][0] = 74, [0][1][0][0][RTW89_ETSI][0] = 44, [0][1][0][0][RTW89_MKK][0] = 56, @@ -43717,6 +43731,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][0] = 74, [0][1][0][0][RTW89_CN][0] = 44, [0][1][0][0][RTW89_QATAR][0] = 44, + [0][1][0][0][RTW89_UK][0] = 44, [0][1][0][0][RTW89_FCC][1] = 76, [0][1][0][0][RTW89_ETSI][1] = 44, [0][1][0][0][RTW89_MKK][1] = 56, @@ -43728,6 +43743,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][1] = 76, [0][1][0][0][RTW89_CN][1] = 44, [0][1][0][0][RTW89_QATAR][1] = 44, + [0][1][0][0][RTW89_UK][1] = 44, [0][1][0][0][RTW89_FCC][2] = 76, [0][1][0][0][RTW89_ETSI][2] = 44, [0][1][0][0][RTW89_MKK][2] = 56, @@ -43739,6 +43755,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][2] = 76, [0][1][0][0][RTW89_CN][2] = 44, [0][1][0][0][RTW89_QATAR][2] = 44, + [0][1][0][0][RTW89_UK][2] = 44, [0][1][0][0][RTW89_FCC][3] = 76, [0][1][0][0][RTW89_ETSI][3] = 44, [0][1][0][0][RTW89_MKK][3] = 56, @@ -43750,6 +43767,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][3] = 76, [0][1][0][0][RTW89_CN][3] = 44, [0][1][0][0][RTW89_QATAR][3] = 44, + [0][1][0][0][RTW89_UK][3] = 44, [0][1][0][0][RTW89_FCC][4] = 76, [0][1][0][0][RTW89_ETSI][4] = 44, [0][1][0][0][RTW89_MKK][4] = 56, @@ -43761,6 +43779,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][4] = 76, [0][1][0][0][RTW89_CN][4] = 44, [0][1][0][0][RTW89_QATAR][4] = 44, + [0][1][0][0][RTW89_UK][4] = 44, [0][1][0][0][RTW89_FCC][5] = 76, [0][1][0][0][RTW89_ETSI][5] = 44, [0][1][0][0][RTW89_MKK][5] = 56, @@ -43772,6 +43791,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][5] = 76, [0][1][0][0][RTW89_CN][5] = 44, [0][1][0][0][RTW89_QATAR][5] = 44, + [0][1][0][0][RTW89_UK][5] = 44, [0][1][0][0][RTW89_FCC][6] = 76, [0][1][0][0][RTW89_ETSI][6] = 44, [0][1][0][0][RTW89_MKK][6] = 56, @@ -43783,6 +43803,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][6] = 76, [0][1][0][0][RTW89_CN][6] = 44, [0][1][0][0][RTW89_QATAR][6] = 44, + [0][1][0][0][RTW89_UK][6] = 44, [0][1][0][0][RTW89_FCC][7] = 76, [0][1][0][0][RTW89_ETSI][7] = 44, [0][1][0][0][RTW89_MKK][7] = 56, @@ -43794,6 +43815,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][7] = 76, [0][1][0][0][RTW89_CN][7] = 44, [0][1][0][0][RTW89_QATAR][7] = 44, + [0][1][0][0][RTW89_UK][7] = 44, [0][1][0][0][RTW89_FCC][8] = 76, [0][1][0][0][RTW89_ETSI][8] = 44, [0][1][0][0][RTW89_MKK][8] = 56, @@ -43805,6 +43827,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][8] = 76, [0][1][0][0][RTW89_CN][8] = 44, [0][1][0][0][RTW89_QATAR][8] = 44, + [0][1][0][0][RTW89_UK][8] = 44, [0][1][0][0][RTW89_FCC][9] = 76, [0][1][0][0][RTW89_ETSI][9] = 44, [0][1][0][0][RTW89_MKK][9] = 56, @@ -43816,6 +43839,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][9] = 76, [0][1][0][0][RTW89_CN][9] = 44, [0][1][0][0][RTW89_QATAR][9] = 44, + [0][1][0][0][RTW89_UK][9] = 44, [0][1][0][0][RTW89_FCC][10] = 62, [0][1][0][0][RTW89_ETSI][10] = 44, [0][1][0][0][RTW89_MKK][10] = 56, @@ -43827,6 +43851,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][10] = 62, [0][1][0][0][RTW89_CN][10] = 44, [0][1][0][0][RTW89_QATAR][10] = 44, + [0][1][0][0][RTW89_UK][10] = 44, [0][1][0][0][RTW89_FCC][11] = 52, [0][1][0][0][RTW89_ETSI][11] = 44, [0][1][0][0][RTW89_MKK][11] = 56, @@ -43838,6 +43863,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][11] = 52, [0][1][0][0][RTW89_CN][11] = 44, [0][1][0][0][RTW89_QATAR][11] = 44, + [0][1][0][0][RTW89_UK][11] = 44, [0][1][0][0][RTW89_FCC][12] = 38, [0][1][0][0][RTW89_ETSI][12] = 44, [0][1][0][0][RTW89_MKK][12] = 56, @@ -43849,6 +43875,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][12] = 38, [0][1][0][0][RTW89_CN][12] = 44, [0][1][0][0][RTW89_QATAR][12] = 44, + [0][1][0][0][RTW89_UK][12] = 44, [0][1][0][0][RTW89_FCC][13] = 127, [0][1][0][0][RTW89_ETSI][13] = 127, [0][1][0][0][RTW89_MKK][13] = 64, @@ -43860,6 +43887,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_MEXICO][13] = 127, [0][1][0][0][RTW89_CN][13] = 127, [0][1][0][0][RTW89_QATAR][13] = 127, + [0][1][0][0][RTW89_UK][13] = 127, [1][0][0][0][RTW89_FCC][0] = 127, [1][0][0][0][RTW89_ETSI][0] = 127, [1][0][0][0][RTW89_MKK][0] = 127, @@ -43871,6 +43899,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][0] = 127, [1][0][0][0][RTW89_CN][0] = 127, [1][0][0][0][RTW89_QATAR][0] = 127, + [1][0][0][0][RTW89_UK][0] = 127, [1][0][0][0][RTW89_FCC][1] = 127, [1][0][0][0][RTW89_ETSI][1] = 127, [1][0][0][0][RTW89_MKK][1] = 127, @@ -43882,6 +43911,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][1] = 127, [1][0][0][0][RTW89_CN][1] = 127, [1][0][0][0][RTW89_QATAR][1] = 127, + [1][0][0][0][RTW89_UK][1] = 127, [1][0][0][0][RTW89_FCC][2] = 60, [1][0][0][0][RTW89_ETSI][2] = 58, [1][0][0][0][RTW89_MKK][2] = 68, @@ -43893,6 +43923,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][2] = 60, [1][0][0][0][RTW89_CN][2] = 58, [1][0][0][0][RTW89_QATAR][2] = 58, + [1][0][0][0][RTW89_UK][2] = 58, [1][0][0][0][RTW89_FCC][3] = 60, [1][0][0][0][RTW89_ETSI][3] = 58, [1][0][0][0][RTW89_MKK][3] = 68, @@ -43904,6 +43935,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][3] = 60, [1][0][0][0][RTW89_CN][3] = 58, [1][0][0][0][RTW89_QATAR][3] = 58, + [1][0][0][0][RTW89_UK][3] = 58, [1][0][0][0][RTW89_FCC][4] = 60, [1][0][0][0][RTW89_ETSI][4] = 58, [1][0][0][0][RTW89_MKK][4] = 68, @@ -43915,6 +43947,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][4] = 60, [1][0][0][0][RTW89_CN][4] = 58, [1][0][0][0][RTW89_QATAR][4] = 58, + [1][0][0][0][RTW89_UK][4] = 58, [1][0][0][0][RTW89_FCC][5] = 60, [1][0][0][0][RTW89_ETSI][5] = 58, [1][0][0][0][RTW89_MKK][5] = 68, @@ -43926,6 +43959,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][5] = 60, [1][0][0][0][RTW89_CN][5] = 58, [1][0][0][0][RTW89_QATAR][5] = 58, + [1][0][0][0][RTW89_UK][5] = 58, [1][0][0][0][RTW89_FCC][6] = 46, [1][0][0][0][RTW89_ETSI][6] = 58, [1][0][0][0][RTW89_MKK][6] = 68, @@ -43937,6 +43971,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][6] = 46, [1][0][0][0][RTW89_CN][6] = 58, [1][0][0][0][RTW89_QATAR][6] = 58, + [1][0][0][0][RTW89_UK][6] = 58, [1][0][0][0][RTW89_FCC][7] = 46, [1][0][0][0][RTW89_ETSI][7] = 58, [1][0][0][0][RTW89_MKK][7] = 68, @@ -43948,6 +43983,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][7] = 46, [1][0][0][0][RTW89_CN][7] = 58, [1][0][0][0][RTW89_QATAR][7] = 58, + [1][0][0][0][RTW89_UK][7] = 58, [1][0][0][0][RTW89_FCC][8] = 46, [1][0][0][0][RTW89_ETSI][8] = 58, [1][0][0][0][RTW89_MKK][8] = 68, @@ -43959,6 +43995,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][8] = 46, [1][0][0][0][RTW89_CN][8] = 58, [1][0][0][0][RTW89_QATAR][8] = 58, + [1][0][0][0][RTW89_UK][8] = 58, [1][0][0][0][RTW89_FCC][9] = 32, [1][0][0][0][RTW89_ETSI][9] = 58, [1][0][0][0][RTW89_MKK][9] = 68, @@ -43970,6 +44007,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][9] = 32, [1][0][0][0][RTW89_CN][9] = 58, [1][0][0][0][RTW89_QATAR][9] = 58, + [1][0][0][0][RTW89_UK][9] = 58, [1][0][0][0][RTW89_FCC][10] = 32, [1][0][0][0][RTW89_ETSI][10] = 58, [1][0][0][0][RTW89_MKK][10] = 68, @@ -43981,6 +44019,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][10] = 32, [1][0][0][0][RTW89_CN][10] = 58, [1][0][0][0][RTW89_QATAR][10] = 58, + [1][0][0][0][RTW89_UK][10] = 58, [1][0][0][0][RTW89_FCC][11] = 127, [1][0][0][0][RTW89_ETSI][11] = 127, [1][0][0][0][RTW89_MKK][11] = 127, @@ -43992,6 +44031,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][11] = 127, [1][0][0][0][RTW89_CN][11] = 127, [1][0][0][0][RTW89_QATAR][11] = 127, + [1][0][0][0][RTW89_UK][11] = 127, [1][0][0][0][RTW89_FCC][12] = 127, [1][0][0][0][RTW89_ETSI][12] = 127, [1][0][0][0][RTW89_MKK][12] = 127, @@ -44003,6 +44043,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][12] = 127, [1][0][0][0][RTW89_CN][12] = 127, [1][0][0][0][RTW89_QATAR][12] = 127, + [1][0][0][0][RTW89_UK][12] = 127, [1][0][0][0][RTW89_FCC][13] = 127, [1][0][0][0][RTW89_ETSI][13] = 127, [1][0][0][0][RTW89_MKK][13] = 127, @@ -44014,6 +44055,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MEXICO][13] = 127, [1][0][0][0][RTW89_CN][13] = 127, [1][0][0][0][RTW89_QATAR][13] = 127, + [1][0][0][0][RTW89_UK][13] = 127, [1][1][0][0][RTW89_FCC][0] = 127, [1][1][0][0][RTW89_ETSI][0] = 127, [1][1][0][0][RTW89_MKK][0] = 127, @@ -44025,6 +44067,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][0] = 127, [1][1][0][0][RTW89_CN][0] = 127, [1][1][0][0][RTW89_QATAR][0] = 127, + [1][1][0][0][RTW89_UK][0] = 127, [1][1][0][0][RTW89_FCC][1] = 127, [1][1][0][0][RTW89_ETSI][1] = 127, [1][1][0][0][RTW89_MKK][1] = 127, @@ -44036,6 +44079,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][1] = 127, [1][1][0][0][RTW89_CN][1] = 127, [1][1][0][0][RTW89_QATAR][1] = 127, + [1][1][0][0][RTW89_UK][1] = 127, [1][1][0][0][RTW89_FCC][2] = 48, [1][1][0][0][RTW89_ETSI][2] = 46, [1][1][0][0][RTW89_MKK][2] = 56, @@ -44047,6 +44091,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][2] = 48, [1][1][0][0][RTW89_CN][2] = 46, [1][1][0][0][RTW89_QATAR][2] = 46, + [1][1][0][0][RTW89_UK][2] = 46, [1][1][0][0][RTW89_FCC][3] = 48, [1][1][0][0][RTW89_ETSI][3] = 46, [1][1][0][0][RTW89_MKK][3] = 56, @@ -44058,6 +44103,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][3] = 48, [1][1][0][0][RTW89_CN][3] = 46, [1][1][0][0][RTW89_QATAR][3] = 46, + [1][1][0][0][RTW89_UK][3] = 46, [1][1][0][0][RTW89_FCC][4] = 48, [1][1][0][0][RTW89_ETSI][4] = 46, [1][1][0][0][RTW89_MKK][4] = 56, @@ -44069,6 +44115,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][4] = 48, [1][1][0][0][RTW89_CN][4] = 46, [1][1][0][0][RTW89_QATAR][4] = 46, + [1][1][0][0][RTW89_UK][4] = 46, [1][1][0][0][RTW89_FCC][5] = 58, [1][1][0][0][RTW89_ETSI][5] = 46, [1][1][0][0][RTW89_MKK][5] = 56, @@ -44080,6 +44127,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][5] = 58, [1][1][0][0][RTW89_CN][5] = 46, [1][1][0][0][RTW89_QATAR][5] = 46, + [1][1][0][0][RTW89_UK][5] = 46, [1][1][0][0][RTW89_FCC][6] = 46, [1][1][0][0][RTW89_ETSI][6] = 46, [1][1][0][0][RTW89_MKK][6] = 56, @@ -44091,6 +44139,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][6] = 46, [1][1][0][0][RTW89_CN][6] = 46, [1][1][0][0][RTW89_QATAR][6] = 46, + [1][1][0][0][RTW89_UK][6] = 46, [1][1][0][0][RTW89_FCC][7] = 46, [1][1][0][0][RTW89_ETSI][7] = 46, [1][1][0][0][RTW89_MKK][7] = 56, @@ -44102,6 +44151,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][7] = 46, [1][1][0][0][RTW89_CN][7] = 46, [1][1][0][0][RTW89_QATAR][7] = 46, + [1][1][0][0][RTW89_UK][7] = 46, [1][1][0][0][RTW89_FCC][8] = 46, [1][1][0][0][RTW89_ETSI][8] = 46, [1][1][0][0][RTW89_MKK][8] = 56, @@ -44113,6 +44163,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][8] = 46, [1][1][0][0][RTW89_CN][8] = 46, [1][1][0][0][RTW89_QATAR][8] = 46, + [1][1][0][0][RTW89_UK][8] = 46, [1][1][0][0][RTW89_FCC][9] = 24, [1][1][0][0][RTW89_ETSI][9] = 46, [1][1][0][0][RTW89_MKK][9] = 56, @@ -44124,6 +44175,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][9] = 24, [1][1][0][0][RTW89_CN][9] = 46, [1][1][0][0][RTW89_QATAR][9] = 46, + [1][1][0][0][RTW89_UK][9] = 46, [1][1][0][0][RTW89_FCC][10] = 24, [1][1][0][0][RTW89_ETSI][10] = 46, [1][1][0][0][RTW89_MKK][10] = 56, @@ -44135,6 +44187,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][10] = 24, [1][1][0][0][RTW89_CN][10] = 46, [1][1][0][0][RTW89_QATAR][10] = 46, + [1][1][0][0][RTW89_UK][10] = 46, [1][1][0][0][RTW89_FCC][11] = 127, [1][1][0][0][RTW89_ETSI][11] = 127, [1][1][0][0][RTW89_MKK][11] = 127, @@ -44146,6 +44199,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][11] = 127, [1][1][0][0][RTW89_CN][11] = 127, [1][1][0][0][RTW89_QATAR][11] = 127, + [1][1][0][0][RTW89_UK][11] = 127, [1][1][0][0][RTW89_FCC][12] = 127, [1][1][0][0][RTW89_ETSI][12] = 127, [1][1][0][0][RTW89_MKK][12] = 127, @@ -44157,6 +44211,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][12] = 127, [1][1][0][0][RTW89_CN][12] = 127, [1][1][0][0][RTW89_QATAR][12] = 127, + [1][1][0][0][RTW89_UK][12] = 127, [1][1][0][0][RTW89_FCC][13] = 127, [1][1][0][0][RTW89_ETSI][13] = 127, [1][1][0][0][RTW89_MKK][13] = 127, @@ -44168,6 +44223,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MEXICO][13] = 127, [1][1][0][0][RTW89_CN][13] = 127, [1][1][0][0][RTW89_QATAR][13] = 127, + [1][1][0][0][RTW89_UK][13] = 127, [0][0][1][0][RTW89_FCC][0] = 66, [0][0][1][0][RTW89_ETSI][0] = 58, [0][0][1][0][RTW89_MKK][0] = 76, @@ -44179,6 +44235,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][0] = 66, [0][0][1][0][RTW89_CN][0] = 58, [0][0][1][0][RTW89_QATAR][0] = 58, + [0][0][1][0][RTW89_UK][0] = 58, [0][0][1][0][RTW89_FCC][1] = 66, [0][0][1][0][RTW89_ETSI][1] = 58, [0][0][1][0][RTW89_MKK][1] = 76, @@ -44190,6 +44247,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][1] = 66, [0][0][1][0][RTW89_CN][1] = 58, [0][0][1][0][RTW89_QATAR][1] = 58, + [0][0][1][0][RTW89_UK][1] = 58, [0][0][1][0][RTW89_FCC][2] = 70, [0][0][1][0][RTW89_ETSI][2] = 58, [0][0][1][0][RTW89_MKK][2] = 76, @@ -44201,6 +44259,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][2] = 70, [0][0][1][0][RTW89_CN][2] = 58, [0][0][1][0][RTW89_QATAR][2] = 58, + [0][0][1][0][RTW89_UK][2] = 58, [0][0][1][0][RTW89_FCC][3] = 74, [0][0][1][0][RTW89_ETSI][3] = 58, [0][0][1][0][RTW89_MKK][3] = 76, @@ -44212,6 +44271,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][3] = 74, [0][0][1][0][RTW89_CN][3] = 58, [0][0][1][0][RTW89_QATAR][3] = 58, + [0][0][1][0][RTW89_UK][3] = 58, [0][0][1][0][RTW89_FCC][4] = 78, [0][0][1][0][RTW89_ETSI][4] = 58, [0][0][1][0][RTW89_MKK][4] = 76, @@ -44223,6 +44283,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][4] = 78, [0][0][1][0][RTW89_CN][4] = 58, [0][0][1][0][RTW89_QATAR][4] = 58, + [0][0][1][0][RTW89_UK][4] = 58, [0][0][1][0][RTW89_FCC][5] = 78, [0][0][1][0][RTW89_ETSI][5] = 58, [0][0][1][0][RTW89_MKK][5] = 76, @@ -44234,6 +44295,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][5] = 78, [0][0][1][0][RTW89_CN][5] = 58, [0][0][1][0][RTW89_QATAR][5] = 58, + [0][0][1][0][RTW89_UK][5] = 58, [0][0][1][0][RTW89_FCC][6] = 78, [0][0][1][0][RTW89_ETSI][6] = 58, [0][0][1][0][RTW89_MKK][6] = 76, @@ -44245,6 +44307,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][6] = 78, [0][0][1][0][RTW89_CN][6] = 58, [0][0][1][0][RTW89_QATAR][6] = 58, + [0][0][1][0][RTW89_UK][6] = 58, [0][0][1][0][RTW89_FCC][7] = 74, [0][0][1][0][RTW89_ETSI][7] = 58, [0][0][1][0][RTW89_MKK][7] = 76, @@ -44256,6 +44319,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][7] = 74, [0][0][1][0][RTW89_CN][7] = 58, [0][0][1][0][RTW89_QATAR][7] = 58, + [0][0][1][0][RTW89_UK][7] = 58, [0][0][1][0][RTW89_FCC][8] = 70, [0][0][1][0][RTW89_ETSI][8] = 58, [0][0][1][0][RTW89_MKK][8] = 76, @@ -44267,6 +44331,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][8] = 70, [0][0][1][0][RTW89_CN][8] = 58, [0][0][1][0][RTW89_QATAR][8] = 58, + [0][0][1][0][RTW89_UK][8] = 58, [0][0][1][0][RTW89_FCC][9] = 66, [0][0][1][0][RTW89_ETSI][9] = 58, [0][0][1][0][RTW89_MKK][9] = 76, @@ -44278,6 +44343,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][9] = 66, [0][0][1][0][RTW89_CN][9] = 58, [0][0][1][0][RTW89_QATAR][9] = 58, + [0][0][1][0][RTW89_UK][9] = 58, [0][0][1][0][RTW89_FCC][10] = 66, [0][0][1][0][RTW89_ETSI][10] = 58, [0][0][1][0][RTW89_MKK][10] = 76, @@ -44289,6 +44355,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][10] = 66, [0][0][1][0][RTW89_CN][10] = 58, [0][0][1][0][RTW89_QATAR][10] = 58, + [0][0][1][0][RTW89_UK][10] = 58, [0][0][1][0][RTW89_FCC][11] = 56, [0][0][1][0][RTW89_ETSI][11] = 58, [0][0][1][0][RTW89_MKK][11] = 76, @@ -44300,6 +44367,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][11] = 56, [0][0][1][0][RTW89_CN][11] = 58, [0][0][1][0][RTW89_QATAR][11] = 58, + [0][0][1][0][RTW89_UK][11] = 58, [0][0][1][0][RTW89_FCC][12] = 52, [0][0][1][0][RTW89_ETSI][12] = 58, [0][0][1][0][RTW89_MKK][12] = 76, @@ -44311,6 +44379,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][12] = 52, [0][0][1][0][RTW89_CN][12] = 58, [0][0][1][0][RTW89_QATAR][12] = 58, + [0][0][1][0][RTW89_UK][12] = 58, [0][0][1][0][RTW89_FCC][13] = 127, [0][0][1][0][RTW89_ETSI][13] = 127, [0][0][1][0][RTW89_MKK][13] = 127, @@ -44322,6 +44391,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][13] = 127, [0][0][1][0][RTW89_CN][13] = 127, [0][0][1][0][RTW89_QATAR][13] = 127, + [0][0][1][0][RTW89_UK][13] = 127, [0][1][1][0][RTW89_FCC][0] = 62, [0][1][1][0][RTW89_ETSI][0] = 46, [0][1][1][0][RTW89_MKK][0] = 64, @@ -44333,6 +44403,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][0] = 62, [0][1][1][0][RTW89_CN][0] = 46, [0][1][1][0][RTW89_QATAR][0] = 46, + [0][1][1][0][RTW89_UK][0] = 46, [0][1][1][0][RTW89_FCC][1] = 62, [0][1][1][0][RTW89_ETSI][1] = 46, [0][1][1][0][RTW89_MKK][1] = 64, @@ -44344,6 +44415,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][1] = 62, [0][1][1][0][RTW89_CN][1] = 46, [0][1][1][0][RTW89_QATAR][1] = 46, + [0][1][1][0][RTW89_UK][1] = 46, [0][1][1][0][RTW89_FCC][2] = 66, [0][1][1][0][RTW89_ETSI][2] = 46, [0][1][1][0][RTW89_MKK][2] = 64, @@ -44355,6 +44427,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][2] = 66, [0][1][1][0][RTW89_CN][2] = 46, [0][1][1][0][RTW89_QATAR][2] = 46, + [0][1][1][0][RTW89_UK][2] = 46, [0][1][1][0][RTW89_FCC][3] = 70, [0][1][1][0][RTW89_ETSI][3] = 46, [0][1][1][0][RTW89_MKK][3] = 64, @@ -44366,6 +44439,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][3] = 70, [0][1][1][0][RTW89_CN][3] = 46, [0][1][1][0][RTW89_QATAR][3] = 46, + [0][1][1][0][RTW89_UK][3] = 46, [0][1][1][0][RTW89_FCC][4] = 78, [0][1][1][0][RTW89_ETSI][4] = 46, [0][1][1][0][RTW89_MKK][4] = 64, @@ -44377,6 +44451,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][4] = 78, [0][1][1][0][RTW89_CN][4] = 46, [0][1][1][0][RTW89_QATAR][4] = 46, + [0][1][1][0][RTW89_UK][4] = 46, [0][1][1][0][RTW89_FCC][5] = 78, [0][1][1][0][RTW89_ETSI][5] = 46, [0][1][1][0][RTW89_MKK][5] = 64, @@ -44388,6 +44463,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][5] = 78, [0][1][1][0][RTW89_CN][5] = 46, [0][1][1][0][RTW89_QATAR][5] = 46, + [0][1][1][0][RTW89_UK][5] = 46, [0][1][1][0][RTW89_FCC][6] = 78, [0][1][1][0][RTW89_ETSI][6] = 46, [0][1][1][0][RTW89_MKK][6] = 64, @@ -44399,6 +44475,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][6] = 78, [0][1][1][0][RTW89_CN][6] = 46, [0][1][1][0][RTW89_QATAR][6] = 46, + [0][1][1][0][RTW89_UK][6] = 46, [0][1][1][0][RTW89_FCC][7] = 70, [0][1][1][0][RTW89_ETSI][7] = 46, [0][1][1][0][RTW89_MKK][7] = 64, @@ -44410,6 +44487,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][7] = 70, [0][1][1][0][RTW89_CN][7] = 46, [0][1][1][0][RTW89_QATAR][7] = 46, + [0][1][1][0][RTW89_UK][7] = 46, [0][1][1][0][RTW89_FCC][8] = 66, [0][1][1][0][RTW89_ETSI][8] = 46, [0][1][1][0][RTW89_MKK][8] = 64, @@ -44421,6 +44499,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][8] = 66, [0][1][1][0][RTW89_CN][8] = 46, [0][1][1][0][RTW89_QATAR][8] = 46, + [0][1][1][0][RTW89_UK][8] = 46, [0][1][1][0][RTW89_FCC][9] = 62, [0][1][1][0][RTW89_ETSI][9] = 46, [0][1][1][0][RTW89_MKK][9] = 64, @@ -44432,6 +44511,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][9] = 62, [0][1][1][0][RTW89_CN][9] = 46, [0][1][1][0][RTW89_QATAR][9] = 46, + [0][1][1][0][RTW89_UK][9] = 46, [0][1][1][0][RTW89_FCC][10] = 62, [0][1][1][0][RTW89_ETSI][10] = 46, [0][1][1][0][RTW89_MKK][10] = 64, @@ -44443,6 +44523,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][10] = 62, [0][1][1][0][RTW89_CN][10] = 46, [0][1][1][0][RTW89_QATAR][10] = 46, + [0][1][1][0][RTW89_UK][10] = 46, [0][1][1][0][RTW89_FCC][11] = 42, [0][1][1][0][RTW89_ETSI][11] = 46, [0][1][1][0][RTW89_MKK][11] = 64, @@ -44454,6 +44535,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][11] = 42, [0][1][1][0][RTW89_CN][11] = 46, [0][1][1][0][RTW89_QATAR][11] = 46, + [0][1][1][0][RTW89_UK][11] = 46, [0][1][1][0][RTW89_FCC][12] = 40, [0][1][1][0][RTW89_ETSI][12] = 46, [0][1][1][0][RTW89_MKK][12] = 64, @@ -44465,6 +44547,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][12] = 40, [0][1][1][0][RTW89_CN][12] = 46, [0][1][1][0][RTW89_QATAR][12] = 46, + [0][1][1][0][RTW89_UK][12] = 46, [0][1][1][0][RTW89_FCC][13] = 127, [0][1][1][0][RTW89_ETSI][13] = 127, [0][1][1][0][RTW89_MKK][13] = 127, @@ -44476,6 +44559,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][13] = 127, [0][1][1][0][RTW89_CN][13] = 127, [0][1][1][0][RTW89_QATAR][13] = 127, + [0][1][1][0][RTW89_UK][13] = 127, [0][0][2][0][RTW89_FCC][0] = 66, [0][0][2][0][RTW89_ETSI][0] = 58, [0][0][2][0][RTW89_MKK][0] = 76, @@ -44487,6 +44571,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][0] = 66, [0][0][2][0][RTW89_CN][0] = 58, [0][0][2][0][RTW89_QATAR][0] = 58, + [0][0][2][0][RTW89_UK][0] = 58, [0][0][2][0][RTW89_FCC][1] = 66, [0][0][2][0][RTW89_ETSI][1] = 58, [0][0][2][0][RTW89_MKK][1] = 76, @@ -44498,6 +44583,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][1] = 66, [0][0][2][0][RTW89_CN][1] = 58, [0][0][2][0][RTW89_QATAR][1] = 58, + [0][0][2][0][RTW89_UK][1] = 58, [0][0][2][0][RTW89_FCC][2] = 70, [0][0][2][0][RTW89_ETSI][2] = 58, [0][0][2][0][RTW89_MKK][2] = 76, @@ -44509,6 +44595,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][2] = 70, [0][0][2][0][RTW89_CN][2] = 58, [0][0][2][0][RTW89_QATAR][2] = 58, + [0][0][2][0][RTW89_UK][2] = 58, [0][0][2][0][RTW89_FCC][3] = 74, [0][0][2][0][RTW89_ETSI][3] = 58, [0][0][2][0][RTW89_MKK][3] = 76, @@ -44520,6 +44607,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][3] = 74, [0][0][2][0][RTW89_CN][3] = 58, [0][0][2][0][RTW89_QATAR][3] = 58, + [0][0][2][0][RTW89_UK][3] = 58, [0][0][2][0][RTW89_FCC][4] = 76, [0][0][2][0][RTW89_ETSI][4] = 58, [0][0][2][0][RTW89_MKK][4] = 76, @@ -44531,6 +44619,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][4] = 76, [0][0][2][0][RTW89_CN][4] = 58, [0][0][2][0][RTW89_QATAR][4] = 58, + [0][0][2][0][RTW89_UK][4] = 58, [0][0][2][0][RTW89_FCC][5] = 76, [0][0][2][0][RTW89_ETSI][5] = 58, [0][0][2][0][RTW89_MKK][5] = 76, @@ -44542,6 +44631,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][5] = 76, [0][0][2][0][RTW89_CN][5] = 58, [0][0][2][0][RTW89_QATAR][5] = 58, + [0][0][2][0][RTW89_UK][5] = 58, [0][0][2][0][RTW89_FCC][6] = 76, [0][0][2][0][RTW89_ETSI][6] = 58, [0][0][2][0][RTW89_MKK][6] = 76, @@ -44553,6 +44643,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][6] = 76, [0][0][2][0][RTW89_CN][6] = 58, [0][0][2][0][RTW89_QATAR][6] = 58, + [0][0][2][0][RTW89_UK][6] = 58, [0][0][2][0][RTW89_FCC][7] = 74, [0][0][2][0][RTW89_ETSI][7] = 58, [0][0][2][0][RTW89_MKK][7] = 76, @@ -44564,6 +44655,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][7] = 74, [0][0][2][0][RTW89_CN][7] = 58, [0][0][2][0][RTW89_QATAR][7] = 58, + [0][0][2][0][RTW89_UK][7] = 58, [0][0][2][0][RTW89_FCC][8] = 70, [0][0][2][0][RTW89_ETSI][8] = 58, [0][0][2][0][RTW89_MKK][8] = 76, @@ -44575,6 +44667,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][8] = 70, [0][0][2][0][RTW89_CN][8] = 58, [0][0][2][0][RTW89_QATAR][8] = 58, + [0][0][2][0][RTW89_UK][8] = 58, [0][0][2][0][RTW89_FCC][9] = 66, [0][0][2][0][RTW89_ETSI][9] = 58, [0][0][2][0][RTW89_MKK][9] = 76, @@ -44586,6 +44679,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][9] = 66, [0][0][2][0][RTW89_CN][9] = 58, [0][0][2][0][RTW89_QATAR][9] = 58, + [0][0][2][0][RTW89_UK][9] = 58, [0][0][2][0][RTW89_FCC][10] = 66, [0][0][2][0][RTW89_ETSI][10] = 58, [0][0][2][0][RTW89_MKK][10] = 76, @@ -44597,6 +44691,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][10] = 66, [0][0][2][0][RTW89_CN][10] = 58, [0][0][2][0][RTW89_QATAR][10] = 58, + [0][0][2][0][RTW89_UK][10] = 58, [0][0][2][0][RTW89_FCC][11] = 54, [0][0][2][0][RTW89_ETSI][11] = 58, [0][0][2][0][RTW89_MKK][11] = 76, @@ -44608,6 +44703,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][11] = 54, [0][0][2][0][RTW89_CN][11] = 58, [0][0][2][0][RTW89_QATAR][11] = 58, + [0][0][2][0][RTW89_UK][11] = 58, [0][0][2][0][RTW89_FCC][12] = 50, [0][0][2][0][RTW89_ETSI][12] = 58, [0][0][2][0][RTW89_MKK][12] = 76, @@ -44619,6 +44715,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][12] = 50, [0][0][2][0][RTW89_CN][12] = 58, [0][0][2][0][RTW89_QATAR][12] = 58, + [0][0][2][0][RTW89_UK][12] = 58, [0][0][2][0][RTW89_FCC][13] = 127, [0][0][2][0][RTW89_ETSI][13] = 127, [0][0][2][0][RTW89_MKK][13] = 127, @@ -44630,6 +44727,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][13] = 127, [0][0][2][0][RTW89_CN][13] = 127, [0][0][2][0][RTW89_QATAR][13] = 127, + [0][0][2][0][RTW89_UK][13] = 127, [0][1][2][0][RTW89_FCC][0] = 62, [0][1][2][0][RTW89_ETSI][0] = 46, [0][1][2][0][RTW89_MKK][0] = 64, @@ -44641,6 +44739,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][0] = 62, [0][1][2][0][RTW89_CN][0] = 46, [0][1][2][0][RTW89_QATAR][0] = 46, + [0][1][2][0][RTW89_UK][0] = 46, [0][1][2][0][RTW89_FCC][1] = 62, [0][1][2][0][RTW89_ETSI][1] = 46, [0][1][2][0][RTW89_MKK][1] = 64, @@ -44652,6 +44751,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][1] = 62, [0][1][2][0][RTW89_CN][1] = 46, [0][1][2][0][RTW89_QATAR][1] = 46, + [0][1][2][0][RTW89_UK][1] = 46, [0][1][2][0][RTW89_FCC][2] = 66, [0][1][2][0][RTW89_ETSI][2] = 46, [0][1][2][0][RTW89_MKK][2] = 64, @@ -44663,6 +44763,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][2] = 66, [0][1][2][0][RTW89_CN][2] = 46, [0][1][2][0][RTW89_QATAR][2] = 46, + [0][1][2][0][RTW89_UK][2] = 46, [0][1][2][0][RTW89_FCC][3] = 70, [0][1][2][0][RTW89_ETSI][3] = 46, [0][1][2][0][RTW89_MKK][3] = 64, @@ -44674,6 +44775,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][3] = 70, [0][1][2][0][RTW89_CN][3] = 46, [0][1][2][0][RTW89_QATAR][3] = 46, + [0][1][2][0][RTW89_UK][3] = 46, [0][1][2][0][RTW89_FCC][4] = 76, [0][1][2][0][RTW89_ETSI][4] = 46, [0][1][2][0][RTW89_MKK][4] = 64, @@ -44685,6 +44787,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][4] = 76, [0][1][2][0][RTW89_CN][4] = 46, [0][1][2][0][RTW89_QATAR][4] = 46, + [0][1][2][0][RTW89_UK][4] = 46, [0][1][2][0][RTW89_FCC][5] = 76, [0][1][2][0][RTW89_ETSI][5] = 46, [0][1][2][0][RTW89_MKK][5] = 64, @@ -44696,6 +44799,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][5] = 76, [0][1][2][0][RTW89_CN][5] = 46, [0][1][2][0][RTW89_QATAR][5] = 46, + [0][1][2][0][RTW89_UK][5] = 46, [0][1][2][0][RTW89_FCC][6] = 76, [0][1][2][0][RTW89_ETSI][6] = 46, [0][1][2][0][RTW89_MKK][6] = 64, @@ -44707,6 +44811,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][6] = 76, [0][1][2][0][RTW89_CN][6] = 46, [0][1][2][0][RTW89_QATAR][6] = 46, + [0][1][2][0][RTW89_UK][6] = 46, [0][1][2][0][RTW89_FCC][7] = 68, [0][1][2][0][RTW89_ETSI][7] = 46, [0][1][2][0][RTW89_MKK][7] = 64, @@ -44718,6 +44823,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][7] = 68, [0][1][2][0][RTW89_CN][7] = 46, [0][1][2][0][RTW89_QATAR][7] = 46, + [0][1][2][0][RTW89_UK][7] = 46, [0][1][2][0][RTW89_FCC][8] = 64, [0][1][2][0][RTW89_ETSI][8] = 46, [0][1][2][0][RTW89_MKK][8] = 64, @@ -44729,6 +44835,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][8] = 64, [0][1][2][0][RTW89_CN][8] = 46, [0][1][2][0][RTW89_QATAR][8] = 46, + [0][1][2][0][RTW89_UK][8] = 46, [0][1][2][0][RTW89_FCC][9] = 60, [0][1][2][0][RTW89_ETSI][9] = 46, [0][1][2][0][RTW89_MKK][9] = 64, @@ -44740,6 +44847,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][9] = 60, [0][1][2][0][RTW89_CN][9] = 46, [0][1][2][0][RTW89_QATAR][9] = 46, + [0][1][2][0][RTW89_UK][9] = 46, [0][1][2][0][RTW89_FCC][10] = 60, [0][1][2][0][RTW89_ETSI][10] = 46, [0][1][2][0][RTW89_MKK][10] = 64, @@ -44751,6 +44859,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][10] = 60, [0][1][2][0][RTW89_CN][10] = 46, [0][1][2][0][RTW89_QATAR][10] = 46, + [0][1][2][0][RTW89_UK][10] = 46, [0][1][2][0][RTW89_FCC][11] = 42, [0][1][2][0][RTW89_ETSI][11] = 46, [0][1][2][0][RTW89_MKK][11] = 64, @@ -44762,6 +44871,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][11] = 42, [0][1][2][0][RTW89_CN][11] = 46, [0][1][2][0][RTW89_QATAR][11] = 46, + [0][1][2][0][RTW89_UK][11] = 46, [0][1][2][0][RTW89_FCC][12] = 40, [0][1][2][0][RTW89_ETSI][12] = 46, [0][1][2][0][RTW89_MKK][12] = 64, @@ -44773,6 +44883,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][12] = 40, [0][1][2][0][RTW89_CN][12] = 46, [0][1][2][0][RTW89_QATAR][12] = 46, + [0][1][2][0][RTW89_UK][12] = 46, [0][1][2][0][RTW89_FCC][13] = 127, [0][1][2][0][RTW89_ETSI][13] = 127, [0][1][2][0][RTW89_MKK][13] = 127, @@ -44784,6 +44895,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][13] = 127, [0][1][2][0][RTW89_CN][13] = 127, [0][1][2][0][RTW89_QATAR][13] = 127, + [0][1][2][0][RTW89_UK][13] = 127, [0][1][2][1][RTW89_FCC][0] = 62, [0][1][2][1][RTW89_ETSI][0] = 34, [0][1][2][1][RTW89_MKK][0] = 64, @@ -44795,6 +44907,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][0] = 62, [0][1][2][1][RTW89_CN][0] = 34, [0][1][2][1][RTW89_QATAR][0] = 34, + [0][1][2][1][RTW89_UK][0] = 34, [0][1][2][1][RTW89_FCC][1] = 62, [0][1][2][1][RTW89_ETSI][1] = 34, [0][1][2][1][RTW89_MKK][1] = 64, @@ -44806,6 +44919,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][1] = 62, [0][1][2][1][RTW89_CN][1] = 34, [0][1][2][1][RTW89_QATAR][1] = 34, + [0][1][2][1][RTW89_UK][1] = 34, [0][1][2][1][RTW89_FCC][2] = 66, [0][1][2][1][RTW89_ETSI][2] = 34, [0][1][2][1][RTW89_MKK][2] = 64, @@ -44817,6 +44931,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][2] = 66, [0][1][2][1][RTW89_CN][2] = 34, [0][1][2][1][RTW89_QATAR][2] = 34, + [0][1][2][1][RTW89_UK][2] = 34, [0][1][2][1][RTW89_FCC][3] = 70, [0][1][2][1][RTW89_ETSI][3] = 34, [0][1][2][1][RTW89_MKK][3] = 64, @@ -44828,6 +44943,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][3] = 70, [0][1][2][1][RTW89_CN][3] = 34, [0][1][2][1][RTW89_QATAR][3] = 34, + [0][1][2][1][RTW89_UK][3] = 34, [0][1][2][1][RTW89_FCC][4] = 76, [0][1][2][1][RTW89_ETSI][4] = 34, [0][1][2][1][RTW89_MKK][4] = 64, @@ -44839,6 +44955,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][4] = 76, [0][1][2][1][RTW89_CN][4] = 34, [0][1][2][1][RTW89_QATAR][4] = 34, + [0][1][2][1][RTW89_UK][4] = 34, [0][1][2][1][RTW89_FCC][5] = 76, [0][1][2][1][RTW89_ETSI][5] = 34, [0][1][2][1][RTW89_MKK][5] = 64, @@ -44850,6 +44967,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][5] = 76, [0][1][2][1][RTW89_CN][5] = 34, [0][1][2][1][RTW89_QATAR][5] = 34, + [0][1][2][1][RTW89_UK][5] = 34, [0][1][2][1][RTW89_FCC][6] = 76, [0][1][2][1][RTW89_ETSI][6] = 34, [0][1][2][1][RTW89_MKK][6] = 64, @@ -44861,6 +44979,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][6] = 76, [0][1][2][1][RTW89_CN][6] = 34, [0][1][2][1][RTW89_QATAR][6] = 34, + [0][1][2][1][RTW89_UK][6] = 34, [0][1][2][1][RTW89_FCC][7] = 68, [0][1][2][1][RTW89_ETSI][7] = 34, [0][1][2][1][RTW89_MKK][7] = 64, @@ -44872,6 +44991,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][7] = 68, [0][1][2][1][RTW89_CN][7] = 34, [0][1][2][1][RTW89_QATAR][7] = 34, + [0][1][2][1][RTW89_UK][7] = 34, [0][1][2][1][RTW89_FCC][8] = 64, [0][1][2][1][RTW89_ETSI][8] = 34, [0][1][2][1][RTW89_MKK][8] = 64, @@ -44883,6 +45003,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][8] = 64, [0][1][2][1][RTW89_CN][8] = 34, [0][1][2][1][RTW89_QATAR][8] = 34, + [0][1][2][1][RTW89_UK][8] = 34, [0][1][2][1][RTW89_FCC][9] = 60, [0][1][2][1][RTW89_ETSI][9] = 34, [0][1][2][1][RTW89_MKK][9] = 64, @@ -44894,6 +45015,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][9] = 60, [0][1][2][1][RTW89_CN][9] = 34, [0][1][2][1][RTW89_QATAR][9] = 34, + [0][1][2][1][RTW89_UK][9] = 34, [0][1][2][1][RTW89_FCC][10] = 60, [0][1][2][1][RTW89_ETSI][10] = 34, [0][1][2][1][RTW89_MKK][10] = 64, @@ -44905,6 +45027,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][10] = 60, [0][1][2][1][RTW89_CN][10] = 34, [0][1][2][1][RTW89_QATAR][10] = 34, + [0][1][2][1][RTW89_UK][10] = 34, [0][1][2][1][RTW89_FCC][11] = 42, [0][1][2][1][RTW89_ETSI][11] = 34, [0][1][2][1][RTW89_MKK][11] = 64, @@ -44916,6 +45039,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][11] = 42, [0][1][2][1][RTW89_CN][11] = 34, [0][1][2][1][RTW89_QATAR][11] = 34, + [0][1][2][1][RTW89_UK][11] = 34, [0][1][2][1][RTW89_FCC][12] = 40, [0][1][2][1][RTW89_ETSI][12] = 34, [0][1][2][1][RTW89_MKK][12] = 64, @@ -44927,6 +45051,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][12] = 40, [0][1][2][1][RTW89_CN][12] = 34, [0][1][2][1][RTW89_QATAR][12] = 34, + [0][1][2][1][RTW89_UK][12] = 34, [0][1][2][1][RTW89_FCC][13] = 127, [0][1][2][1][RTW89_ETSI][13] = 127, [0][1][2][1][RTW89_MKK][13] = 127, @@ -44938,6 +45063,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][13] = 127, [0][1][2][1][RTW89_CN][13] = 127, [0][1][2][1][RTW89_QATAR][13] = 127, + [0][1][2][1][RTW89_UK][13] = 127, [1][0][2][0][RTW89_FCC][0] = 127, [1][0][2][0][RTW89_ETSI][0] = 127, [1][0][2][0][RTW89_MKK][0] = 127, @@ -44949,6 +45075,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][0] = 127, [1][0][2][0][RTW89_CN][0] = 127, [1][0][2][0][RTW89_QATAR][0] = 127, + [1][0][2][0][RTW89_UK][0] = 127, [1][0][2][0][RTW89_FCC][1] = 127, [1][0][2][0][RTW89_ETSI][1] = 127, [1][0][2][0][RTW89_MKK][1] = 127, @@ -44960,6 +45087,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][1] = 127, [1][0][2][0][RTW89_CN][1] = 127, [1][0][2][0][RTW89_QATAR][1] = 127, + [1][0][2][0][RTW89_UK][1] = 127, [1][0][2][0][RTW89_FCC][2] = 56, [1][0][2][0][RTW89_ETSI][2] = 58, [1][0][2][0][RTW89_MKK][2] = 68, @@ -44971,6 +45099,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][2] = 56, [1][0][2][0][RTW89_CN][2] = 58, [1][0][2][0][RTW89_QATAR][2] = 58, + [1][0][2][0][RTW89_UK][2] = 58, [1][0][2][0][RTW89_FCC][3] = 56, [1][0][2][0][RTW89_ETSI][3] = 58, [1][0][2][0][RTW89_MKK][3] = 68, @@ -44982,6 +45111,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][3] = 56, [1][0][2][0][RTW89_CN][3] = 58, [1][0][2][0][RTW89_QATAR][3] = 58, + [1][0][2][0][RTW89_UK][3] = 58, [1][0][2][0][RTW89_FCC][4] = 60, [1][0][2][0][RTW89_ETSI][4] = 58, [1][0][2][0][RTW89_MKK][4] = 68, @@ -44993,6 +45123,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][4] = 60, [1][0][2][0][RTW89_CN][4] = 58, [1][0][2][0][RTW89_QATAR][4] = 58, + [1][0][2][0][RTW89_UK][4] = 58, [1][0][2][0][RTW89_FCC][5] = 64, [1][0][2][0][RTW89_ETSI][5] = 58, [1][0][2][0][RTW89_MKK][5] = 68, @@ -45004,6 +45135,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][5] = 64, [1][0][2][0][RTW89_CN][5] = 58, [1][0][2][0][RTW89_QATAR][5] = 58, + [1][0][2][0][RTW89_UK][5] = 58, [1][0][2][0][RTW89_FCC][6] = 54, [1][0][2][0][RTW89_ETSI][6] = 58, [1][0][2][0][RTW89_MKK][6] = 68, @@ -45015,6 +45147,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][6] = 54, [1][0][2][0][RTW89_CN][6] = 58, [1][0][2][0][RTW89_QATAR][6] = 58, + [1][0][2][0][RTW89_UK][6] = 58, [1][0][2][0][RTW89_FCC][7] = 50, [1][0][2][0][RTW89_ETSI][7] = 58, [1][0][2][0][RTW89_MKK][7] = 68, @@ -45026,6 +45159,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][7] = 50, [1][0][2][0][RTW89_CN][7] = 58, [1][0][2][0][RTW89_QATAR][7] = 58, + [1][0][2][0][RTW89_UK][7] = 58, [1][0][2][0][RTW89_FCC][8] = 50, [1][0][2][0][RTW89_ETSI][8] = 58, [1][0][2][0][RTW89_MKK][8] = 68, @@ -45037,6 +45171,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][8] = 50, [1][0][2][0][RTW89_CN][8] = 58, [1][0][2][0][RTW89_QATAR][8] = 58, + [1][0][2][0][RTW89_UK][8] = 58, [1][0][2][0][RTW89_FCC][9] = 42, [1][0][2][0][RTW89_ETSI][9] = 58, [1][0][2][0][RTW89_MKK][9] = 68, @@ -45048,6 +45183,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][9] = 42, [1][0][2][0][RTW89_CN][9] = 58, [1][0][2][0][RTW89_QATAR][9] = 58, + [1][0][2][0][RTW89_UK][9] = 58, [1][0][2][0][RTW89_FCC][10] = 40, [1][0][2][0][RTW89_ETSI][10] = 58, [1][0][2][0][RTW89_MKK][10] = 68, @@ -45059,6 +45195,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][10] = 40, [1][0][2][0][RTW89_CN][10] = 58, [1][0][2][0][RTW89_QATAR][10] = 58, + [1][0][2][0][RTW89_UK][10] = 58, [1][0][2][0][RTW89_FCC][11] = 127, [1][0][2][0][RTW89_ETSI][11] = 127, [1][0][2][0][RTW89_MKK][11] = 127, @@ -45070,6 +45207,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][11] = 127, [1][0][2][0][RTW89_CN][11] = 127, [1][0][2][0][RTW89_QATAR][11] = 127, + [1][0][2][0][RTW89_UK][11] = 127, [1][0][2][0][RTW89_FCC][12] = 127, [1][0][2][0][RTW89_ETSI][12] = 127, [1][0][2][0][RTW89_MKK][12] = 127, @@ -45081,6 +45219,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][12] = 127, [1][0][2][0][RTW89_CN][12] = 127, [1][0][2][0][RTW89_QATAR][12] = 127, + [1][0][2][0][RTW89_UK][12] = 127, [1][0][2][0][RTW89_FCC][13] = 127, [1][0][2][0][RTW89_ETSI][13] = 127, [1][0][2][0][RTW89_MKK][13] = 127, @@ -45092,6 +45231,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][13] = 127, [1][0][2][0][RTW89_CN][13] = 127, [1][0][2][0][RTW89_QATAR][13] = 127, + [1][0][2][0][RTW89_UK][13] = 127, [1][1][2][0][RTW89_FCC][0] = 127, [1][1][2][0][RTW89_ETSI][0] = 127, [1][1][2][0][RTW89_MKK][0] = 127, @@ -45103,6 +45243,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][0] = 127, [1][1][2][0][RTW89_CN][0] = 127, [1][1][2][0][RTW89_QATAR][0] = 127, + [1][1][2][0][RTW89_UK][0] = 127, [1][1][2][0][RTW89_FCC][1] = 127, [1][1][2][0][RTW89_ETSI][1] = 127, [1][1][2][0][RTW89_MKK][1] = 127, @@ -45114,6 +45255,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][1] = 127, [1][1][2][0][RTW89_CN][1] = 127, [1][1][2][0][RTW89_QATAR][1] = 127, + [1][1][2][0][RTW89_UK][1] = 127, [1][1][2][0][RTW89_FCC][2] = 52, [1][1][2][0][RTW89_ETSI][2] = 46, [1][1][2][0][RTW89_MKK][2] = 64, @@ -45125,6 +45267,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][2] = 52, [1][1][2][0][RTW89_CN][2] = 46, [1][1][2][0][RTW89_QATAR][2] = 46, + [1][1][2][0][RTW89_UK][2] = 46, [1][1][2][0][RTW89_FCC][3] = 52, [1][1][2][0][RTW89_ETSI][3] = 46, [1][1][2][0][RTW89_MKK][3] = 64, @@ -45136,6 +45279,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][3] = 52, [1][1][2][0][RTW89_CN][3] = 46, [1][1][2][0][RTW89_QATAR][3] = 46, + [1][1][2][0][RTW89_UK][3] = 46, [1][1][2][0][RTW89_FCC][4] = 56, [1][1][2][0][RTW89_ETSI][4] = 46, [1][1][2][0][RTW89_MKK][4] = 64, @@ -45147,6 +45291,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][4] = 56, [1][1][2][0][RTW89_CN][4] = 46, [1][1][2][0][RTW89_QATAR][4] = 46, + [1][1][2][0][RTW89_UK][4] = 46, [1][1][2][0][RTW89_FCC][5] = 60, [1][1][2][0][RTW89_ETSI][5] = 46, [1][1][2][0][RTW89_MKK][5] = 64, @@ -45158,6 +45303,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][5] = 60, [1][1][2][0][RTW89_CN][5] = 46, [1][1][2][0][RTW89_QATAR][5] = 46, + [1][1][2][0][RTW89_UK][5] = 46, [1][1][2][0][RTW89_FCC][6] = 54, [1][1][2][0][RTW89_ETSI][6] = 46, [1][1][2][0][RTW89_MKK][6] = 64, @@ -45169,6 +45315,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][6] = 54, [1][1][2][0][RTW89_CN][6] = 46, [1][1][2][0][RTW89_QATAR][6] = 46, + [1][1][2][0][RTW89_UK][6] = 46, [1][1][2][0][RTW89_FCC][7] = 50, [1][1][2][0][RTW89_ETSI][7] = 46, [1][1][2][0][RTW89_MKK][7] = 64, @@ -45180,6 +45327,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][7] = 50, [1][1][2][0][RTW89_CN][7] = 46, [1][1][2][0][RTW89_QATAR][7] = 46, + [1][1][2][0][RTW89_UK][7] = 46, [1][1][2][0][RTW89_FCC][8] = 50, [1][1][2][0][RTW89_ETSI][8] = 46, [1][1][2][0][RTW89_MKK][8] = 64, @@ -45191,6 +45339,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][8] = 50, [1][1][2][0][RTW89_CN][8] = 46, [1][1][2][0][RTW89_QATAR][8] = 46, + [1][1][2][0][RTW89_UK][8] = 46, [1][1][2][0][RTW89_FCC][9] = 38, [1][1][2][0][RTW89_ETSI][9] = 46, [1][1][2][0][RTW89_MKK][9] = 64, @@ -45202,6 +45351,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][9] = 38, [1][1][2][0][RTW89_CN][9] = 46, [1][1][2][0][RTW89_QATAR][9] = 46, + [1][1][2][0][RTW89_UK][9] = 46, [1][1][2][0][RTW89_FCC][10] = 36, [1][1][2][0][RTW89_ETSI][10] = 46, [1][1][2][0][RTW89_MKK][10] = 64, @@ -45213,6 +45363,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][10] = 36, [1][1][2][0][RTW89_CN][10] = 46, [1][1][2][0][RTW89_QATAR][10] = 46, + [1][1][2][0][RTW89_UK][10] = 46, [1][1][2][0][RTW89_FCC][11] = 127, [1][1][2][0][RTW89_ETSI][11] = 127, [1][1][2][0][RTW89_MKK][11] = 127, @@ -45224,6 +45375,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][11] = 127, [1][1][2][0][RTW89_CN][11] = 127, [1][1][2][0][RTW89_QATAR][11] = 127, + [1][1][2][0][RTW89_UK][11] = 127, [1][1][2][0][RTW89_FCC][12] = 127, [1][1][2][0][RTW89_ETSI][12] = 127, [1][1][2][0][RTW89_MKK][12] = 127, @@ -45235,6 +45387,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][12] = 127, [1][1][2][0][RTW89_CN][12] = 127, [1][1][2][0][RTW89_QATAR][12] = 127, + [1][1][2][0][RTW89_UK][12] = 127, [1][1][2][0][RTW89_FCC][13] = 127, [1][1][2][0][RTW89_ETSI][13] = 127, [1][1][2][0][RTW89_MKK][13] = 127, @@ -45246,6 +45399,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][13] = 127, [1][1][2][0][RTW89_CN][13] = 127, [1][1][2][0][RTW89_QATAR][13] = 127, + [1][1][2][0][RTW89_UK][13] = 127, [1][1][2][1][RTW89_FCC][0] = 127, [1][1][2][1][RTW89_ETSI][0] = 127, [1][1][2][1][RTW89_MKK][0] = 127, @@ -45257,6 +45411,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][0] = 127, [1][1][2][1][RTW89_CN][0] = 127, [1][1][2][1][RTW89_QATAR][0] = 127, + [1][1][2][1][RTW89_UK][0] = 127, [1][1][2][1][RTW89_FCC][1] = 127, [1][1][2][1][RTW89_ETSI][1] = 127, [1][1][2][1][RTW89_MKK][1] = 127, @@ -45268,6 +45423,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][1] = 127, [1][1][2][1][RTW89_CN][1] = 127, [1][1][2][1][RTW89_QATAR][1] = 127, + [1][1][2][1][RTW89_UK][1] = 127, [1][1][2][1][RTW89_FCC][2] = 52, [1][1][2][1][RTW89_ETSI][2] = 34, [1][1][2][1][RTW89_MKK][2] = 64, @@ -45279,6 +45435,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][2] = 52, [1][1][2][1][RTW89_CN][2] = 34, [1][1][2][1][RTW89_QATAR][2] = 34, + [1][1][2][1][RTW89_UK][2] = 34, [1][1][2][1][RTW89_FCC][3] = 52, [1][1][2][1][RTW89_ETSI][3] = 34, [1][1][2][1][RTW89_MKK][3] = 64, @@ -45290,6 +45447,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][3] = 52, [1][1][2][1][RTW89_CN][3] = 34, [1][1][2][1][RTW89_QATAR][3] = 34, + [1][1][2][1][RTW89_UK][3] = 34, [1][1][2][1][RTW89_FCC][4] = 56, [1][1][2][1][RTW89_ETSI][4] = 34, [1][1][2][1][RTW89_MKK][4] = 64, @@ -45301,6 +45459,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][4] = 56, [1][1][2][1][RTW89_CN][4] = 34, [1][1][2][1][RTW89_QATAR][4] = 34, + [1][1][2][1][RTW89_UK][4] = 34, [1][1][2][1][RTW89_FCC][5] = 60, [1][1][2][1][RTW89_ETSI][5] = 34, [1][1][2][1][RTW89_MKK][5] = 64, @@ -45312,6 +45471,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][5] = 60, [1][1][2][1][RTW89_CN][5] = 34, [1][1][2][1][RTW89_QATAR][5] = 34, + [1][1][2][1][RTW89_UK][5] = 34, [1][1][2][1][RTW89_FCC][6] = 54, [1][1][2][1][RTW89_ETSI][6] = 34, [1][1][2][1][RTW89_MKK][6] = 64, @@ -45323,6 +45483,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][6] = 54, [1][1][2][1][RTW89_CN][6] = 34, [1][1][2][1][RTW89_QATAR][6] = 34, + [1][1][2][1][RTW89_UK][6] = 34, [1][1][2][1][RTW89_FCC][7] = 50, [1][1][2][1][RTW89_ETSI][7] = 34, [1][1][2][1][RTW89_MKK][7] = 64, @@ -45334,6 +45495,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][7] = 50, [1][1][2][1][RTW89_CN][7] = 34, [1][1][2][1][RTW89_QATAR][7] = 34, + [1][1][2][1][RTW89_UK][7] = 34, [1][1][2][1][RTW89_FCC][8] = 50, [1][1][2][1][RTW89_ETSI][8] = 34, [1][1][2][1][RTW89_MKK][8] = 64, @@ -45345,6 +45507,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][8] = 50, [1][1][2][1][RTW89_CN][8] = 34, [1][1][2][1][RTW89_QATAR][8] = 34, + [1][1][2][1][RTW89_UK][8] = 34, [1][1][2][1][RTW89_FCC][9] = 38, [1][1][2][1][RTW89_ETSI][9] = 34, [1][1][2][1][RTW89_MKK][9] = 64, @@ -45356,6 +45519,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][9] = 38, [1][1][2][1][RTW89_CN][9] = 34, [1][1][2][1][RTW89_QATAR][9] = 34, + [1][1][2][1][RTW89_UK][9] = 34, [1][1][2][1][RTW89_FCC][10] = 36, [1][1][2][1][RTW89_ETSI][10] = 34, [1][1][2][1][RTW89_MKK][10] = 64, @@ -45367,6 +45531,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][10] = 36, [1][1][2][1][RTW89_CN][10] = 34, [1][1][2][1][RTW89_QATAR][10] = 34, + [1][1][2][1][RTW89_UK][10] = 34, [1][1][2][1][RTW89_FCC][11] = 127, [1][1][2][1][RTW89_ETSI][11] = 127, [1][1][2][1][RTW89_MKK][11] = 127, @@ -45378,6 +45543,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][11] = 127, [1][1][2][1][RTW89_CN][11] = 127, [1][1][2][1][RTW89_QATAR][11] = 127, + [1][1][2][1][RTW89_UK][11] = 127, [1][1][2][1][RTW89_FCC][12] = 127, [1][1][2][1][RTW89_ETSI][12] = 127, [1][1][2][1][RTW89_MKK][12] = 127, @@ -45389,6 +45555,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][12] = 127, [1][1][2][1][RTW89_CN][12] = 127, [1][1][2][1][RTW89_QATAR][12] = 127, + [1][1][2][1][RTW89_UK][12] = 127, [1][1][2][1][RTW89_FCC][13] = 127, [1][1][2][1][RTW89_ETSI][13] = 127, [1][1][2][1][RTW89_MKK][13] = 127, @@ -45400,6 +45567,7 @@ const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][13] = 127, [1][1][2][1][RTW89_CN][13] = 127, [1][1][2][1][RTW89_QATAR][13] = 127, + [1][1][2][1][RTW89_UK][13] = 127, }; const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] @@ -45595,6 +45763,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][0] = 62, [0][0][1][0][RTW89_CN][0] = 58, [0][0][1][0][RTW89_QATAR][0] = 58, + [0][0][1][0][RTW89_UK][0] = 58, [0][0][1][0][RTW89_FCC][2] = 76, [0][0][1][0][RTW89_ETSI][2] = 58, [0][0][1][0][RTW89_MKK][2] = 62, @@ -45606,6 +45775,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][2] = 62, [0][0][1][0][RTW89_CN][2] = 58, [0][0][1][0][RTW89_QATAR][2] = 58, + [0][0][1][0][RTW89_UK][2] = 58, [0][0][1][0][RTW89_FCC][4] = 76, [0][0][1][0][RTW89_ETSI][4] = 58, [0][0][1][0][RTW89_MKK][4] = 62, @@ -45617,6 +45787,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][4] = 62, [0][0][1][0][RTW89_CN][4] = 58, [0][0][1][0][RTW89_QATAR][4] = 58, + [0][0][1][0][RTW89_UK][4] = 58, [0][0][1][0][RTW89_FCC][6] = 76, [0][0][1][0][RTW89_ETSI][6] = 58, [0][0][1][0][RTW89_MKK][6] = 62, @@ -45628,6 +45799,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][6] = 62, [0][0][1][0][RTW89_CN][6] = 58, [0][0][1][0][RTW89_QATAR][6] = 58, + [0][0][1][0][RTW89_UK][6] = 58, [0][0][1][0][RTW89_FCC][8] = 76, [0][0][1][0][RTW89_ETSI][8] = 58, [0][0][1][0][RTW89_MKK][8] = 62, @@ -45639,6 +45811,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][8] = 76, [0][0][1][0][RTW89_CN][8] = 58, [0][0][1][0][RTW89_QATAR][8] = 58, + [0][0][1][0][RTW89_UK][8] = 58, [0][0][1][0][RTW89_FCC][10] = 76, [0][0][1][0][RTW89_ETSI][10] = 58, [0][0][1][0][RTW89_MKK][10] = 62, @@ -45650,6 +45823,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][10] = 76, [0][0][1][0][RTW89_CN][10] = 58, [0][0][1][0][RTW89_QATAR][10] = 58, + [0][0][1][0][RTW89_UK][10] = 58, [0][0][1][0][RTW89_FCC][12] = 76, [0][0][1][0][RTW89_ETSI][12] = 58, [0][0][1][0][RTW89_MKK][12] = 62, @@ -45661,6 +45835,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][12] = 76, [0][0][1][0][RTW89_CN][12] = 58, [0][0][1][0][RTW89_QATAR][12] = 58, + [0][0][1][0][RTW89_UK][12] = 58, [0][0][1][0][RTW89_FCC][14] = 76, [0][0][1][0][RTW89_ETSI][14] = 58, [0][0][1][0][RTW89_MKK][14] = 62, @@ -45672,6 +45847,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][14] = 76, [0][0][1][0][RTW89_CN][14] = 58, [0][0][1][0][RTW89_QATAR][14] = 58, + [0][0][1][0][RTW89_UK][14] = 58, [0][0][1][0][RTW89_FCC][15] = 76, [0][0][1][0][RTW89_ETSI][15] = 58, [0][0][1][0][RTW89_MKK][15] = 76, @@ -45683,6 +45859,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][15] = 76, [0][0][1][0][RTW89_CN][15] = 127, [0][0][1][0][RTW89_QATAR][15] = 52, + [0][0][1][0][RTW89_UK][15] = 58, [0][0][1][0][RTW89_FCC][17] = 76, [0][0][1][0][RTW89_ETSI][17] = 58, [0][0][1][0][RTW89_MKK][17] = 76, @@ -45694,6 +45871,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][17] = 76, [0][0][1][0][RTW89_CN][17] = 127, [0][0][1][0][RTW89_QATAR][17] = 52, + [0][0][1][0][RTW89_UK][17] = 58, [0][0][1][0][RTW89_FCC][19] = 76, [0][0][1][0][RTW89_ETSI][19] = 58, [0][0][1][0][RTW89_MKK][19] = 76, @@ -45705,6 +45883,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][19] = 76, [0][0][1][0][RTW89_CN][19] = 127, [0][0][1][0][RTW89_QATAR][19] = 52, + [0][0][1][0][RTW89_UK][19] = 58, [0][0][1][0][RTW89_FCC][21] = 76, [0][0][1][0][RTW89_ETSI][21] = 58, [0][0][1][0][RTW89_MKK][21] = 76, @@ -45716,6 +45895,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][21] = 76, [0][0][1][0][RTW89_CN][21] = 127, [0][0][1][0][RTW89_QATAR][21] = 52, + [0][0][1][0][RTW89_UK][21] = 58, [0][0][1][0][RTW89_FCC][23] = 76, [0][0][1][0][RTW89_ETSI][23] = 58, [0][0][1][0][RTW89_MKK][23] = 76, @@ -45727,6 +45907,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][23] = 76, [0][0][1][0][RTW89_CN][23] = 127, [0][0][1][0][RTW89_QATAR][23] = 52, + [0][0][1][0][RTW89_UK][23] = 58, [0][0][1][0][RTW89_FCC][25] = 76, [0][0][1][0][RTW89_ETSI][25] = 58, [0][0][1][0][RTW89_MKK][25] = 76, @@ -45738,6 +45919,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][25] = 76, [0][0][1][0][RTW89_CN][25] = 127, [0][0][1][0][RTW89_QATAR][25] = 52, + [0][0][1][0][RTW89_UK][25] = 58, [0][0][1][0][RTW89_FCC][27] = 76, [0][0][1][0][RTW89_ETSI][27] = 58, [0][0][1][0][RTW89_MKK][27] = 76, @@ -45749,6 +45931,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][27] = 76, [0][0][1][0][RTW89_CN][27] = 127, [0][0][1][0][RTW89_QATAR][27] = 52, + [0][0][1][0][RTW89_UK][27] = 58, [0][0][1][0][RTW89_FCC][29] = 76, [0][0][1][0][RTW89_ETSI][29] = 58, [0][0][1][0][RTW89_MKK][29] = 76, @@ -45760,6 +45943,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][29] = 76, [0][0][1][0][RTW89_CN][29] = 127, [0][0][1][0][RTW89_QATAR][29] = 52, + [0][0][1][0][RTW89_UK][29] = 58, [0][0][1][0][RTW89_FCC][31] = 76, [0][0][1][0][RTW89_ETSI][31] = 58, [0][0][1][0][RTW89_MKK][31] = 76, @@ -45771,6 +45955,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][31] = 76, [0][0][1][0][RTW89_CN][31] = 127, [0][0][1][0][RTW89_QATAR][31] = 52, + [0][0][1][0][RTW89_UK][31] = 58, [0][0][1][0][RTW89_FCC][33] = 76, [0][0][1][0][RTW89_ETSI][33] = 58, [0][0][1][0][RTW89_MKK][33] = 76, @@ -45782,6 +45967,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][33] = 76, [0][0][1][0][RTW89_CN][33] = 127, [0][0][1][0][RTW89_QATAR][33] = 52, + [0][0][1][0][RTW89_UK][33] = 58, [0][0][1][0][RTW89_FCC][35] = 74, [0][0][1][0][RTW89_ETSI][35] = 58, [0][0][1][0][RTW89_MKK][35] = 76, @@ -45793,6 +45979,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][35] = 74, [0][0][1][0][RTW89_CN][35] = 127, [0][0][1][0][RTW89_QATAR][35] = 52, + [0][0][1][0][RTW89_UK][35] = 58, [0][0][1][0][RTW89_FCC][37] = 76, [0][0][1][0][RTW89_ETSI][37] = 127, [0][0][1][0][RTW89_MKK][37] = 76, @@ -45804,6 +45991,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][37] = 76, [0][0][1][0][RTW89_CN][37] = 127, [0][0][1][0][RTW89_QATAR][37] = 127, + [0][0][1][0][RTW89_UK][37] = 76, [0][0][1][0][RTW89_FCC][38] = 76, [0][0][1][0][RTW89_ETSI][38] = 28, [0][0][1][0][RTW89_MKK][38] = 127, @@ -45815,6 +46003,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][38] = 76, [0][0][1][0][RTW89_CN][38] = 72, [0][0][1][0][RTW89_QATAR][38] = 28, + [0][0][1][0][RTW89_UK][38] = 56, [0][0][1][0][RTW89_FCC][40] = 76, [0][0][1][0][RTW89_ETSI][40] = 28, [0][0][1][0][RTW89_MKK][40] = 127, @@ -45826,6 +46015,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][40] = 76, [0][0][1][0][RTW89_CN][40] = 76, [0][0][1][0][RTW89_QATAR][40] = 28, + [0][0][1][0][RTW89_UK][40] = 56, [0][0][1][0][RTW89_FCC][42] = 76, [0][0][1][0][RTW89_ETSI][42] = 28, [0][0][1][0][RTW89_MKK][42] = 127, @@ -45837,6 +46027,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][42] = 76, [0][0][1][0][RTW89_CN][42] = 76, [0][0][1][0][RTW89_QATAR][42] = 28, + [0][0][1][0][RTW89_UK][42] = 56, [0][0][1][0][RTW89_FCC][44] = 76, [0][0][1][0][RTW89_ETSI][44] = 28, [0][0][1][0][RTW89_MKK][44] = 127, @@ -45848,6 +46039,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][44] = 76, [0][0][1][0][RTW89_CN][44] = 76, [0][0][1][0][RTW89_QATAR][44] = 28, + [0][0][1][0][RTW89_UK][44] = 56, [0][0][1][0][RTW89_FCC][46] = 76, [0][0][1][0][RTW89_ETSI][46] = 28, [0][0][1][0][RTW89_MKK][46] = 127, @@ -45859,6 +46051,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MEXICO][46] = 76, [0][0][1][0][RTW89_CN][46] = 76, [0][0][1][0][RTW89_QATAR][46] = 28, + [0][0][1][0][RTW89_UK][46] = 56, [0][1][1][0][RTW89_FCC][0] = 68, [0][1][1][0][RTW89_ETSI][0] = 46, [0][1][1][0][RTW89_MKK][0] = 50, @@ -45870,6 +46063,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][0] = 50, [0][1][1][0][RTW89_CN][0] = 46, [0][1][1][0][RTW89_QATAR][0] = 46, + [0][1][1][0][RTW89_UK][0] = 46, [0][1][1][0][RTW89_FCC][2] = 68, [0][1][1][0][RTW89_ETSI][2] = 46, [0][1][1][0][RTW89_MKK][2] = 50, @@ -45881,6 +46075,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][2] = 50, [0][1][1][0][RTW89_CN][2] = 46, [0][1][1][0][RTW89_QATAR][2] = 46, + [0][1][1][0][RTW89_UK][2] = 46, [0][1][1][0][RTW89_FCC][4] = 68, [0][1][1][0][RTW89_ETSI][4] = 46, [0][1][1][0][RTW89_MKK][4] = 50, @@ -45892,6 +46087,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][4] = 50, [0][1][1][0][RTW89_CN][4] = 46, [0][1][1][0][RTW89_QATAR][4] = 46, + [0][1][1][0][RTW89_UK][4] = 46, [0][1][1][0][RTW89_FCC][6] = 68, [0][1][1][0][RTW89_ETSI][6] = 46, [0][1][1][0][RTW89_MKK][6] = 50, @@ -45903,6 +46099,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][6] = 50, [0][1][1][0][RTW89_CN][6] = 46, [0][1][1][0][RTW89_QATAR][6] = 46, + [0][1][1][0][RTW89_UK][6] = 46, [0][1][1][0][RTW89_FCC][8] = 68, [0][1][1][0][RTW89_ETSI][8] = 46, [0][1][1][0][RTW89_MKK][8] = 50, @@ -45914,6 +46111,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][8] = 68, [0][1][1][0][RTW89_CN][8] = 46, [0][1][1][0][RTW89_QATAR][8] = 46, + [0][1][1][0][RTW89_UK][8] = 46, [0][1][1][0][RTW89_FCC][10] = 68, [0][1][1][0][RTW89_ETSI][10] = 46, [0][1][1][0][RTW89_MKK][10] = 50, @@ -45925,6 +46123,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][10] = 68, [0][1][1][0][RTW89_CN][10] = 46, [0][1][1][0][RTW89_QATAR][10] = 46, + [0][1][1][0][RTW89_UK][10] = 46, [0][1][1][0][RTW89_FCC][12] = 68, [0][1][1][0][RTW89_ETSI][12] = 46, [0][1][1][0][RTW89_MKK][12] = 50, @@ -45936,6 +46135,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][12] = 68, [0][1][1][0][RTW89_CN][12] = 46, [0][1][1][0][RTW89_QATAR][12] = 46, + [0][1][1][0][RTW89_UK][12] = 46, [0][1][1][0][RTW89_FCC][14] = 68, [0][1][1][0][RTW89_ETSI][14] = 46, [0][1][1][0][RTW89_MKK][14] = 50, @@ -45947,6 +46147,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][14] = 68, [0][1][1][0][RTW89_CN][14] = 46, [0][1][1][0][RTW89_QATAR][14] = 46, + [0][1][1][0][RTW89_UK][14] = 46, [0][1][1][0][RTW89_FCC][15] = 68, [0][1][1][0][RTW89_ETSI][15] = 46, [0][1][1][0][RTW89_MKK][15] = 70, @@ -45958,6 +46159,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][15] = 68, [0][1][1][0][RTW89_CN][15] = 127, [0][1][1][0][RTW89_QATAR][15] = 40, + [0][1][1][0][RTW89_UK][15] = 46, [0][1][1][0][RTW89_FCC][17] = 68, [0][1][1][0][RTW89_ETSI][17] = 46, [0][1][1][0][RTW89_MKK][17] = 70, @@ -45969,6 +46171,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][17] = 68, [0][1][1][0][RTW89_CN][17] = 127, [0][1][1][0][RTW89_QATAR][17] = 40, + [0][1][1][0][RTW89_UK][17] = 46, [0][1][1][0][RTW89_FCC][19] = 68, [0][1][1][0][RTW89_ETSI][19] = 46, [0][1][1][0][RTW89_MKK][19] = 70, @@ -45980,6 +46183,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][19] = 68, [0][1][1][0][RTW89_CN][19] = 127, [0][1][1][0][RTW89_QATAR][19] = 40, + [0][1][1][0][RTW89_UK][19] = 46, [0][1][1][0][RTW89_FCC][21] = 68, [0][1][1][0][RTW89_ETSI][21] = 46, [0][1][1][0][RTW89_MKK][21] = 70, @@ -45991,6 +46195,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][21] = 68, [0][1][1][0][RTW89_CN][21] = 127, [0][1][1][0][RTW89_QATAR][21] = 40, + [0][1][1][0][RTW89_UK][21] = 46, [0][1][1][0][RTW89_FCC][23] = 68, [0][1][1][0][RTW89_ETSI][23] = 46, [0][1][1][0][RTW89_MKK][23] = 70, @@ -46002,6 +46207,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][23] = 68, [0][1][1][0][RTW89_CN][23] = 127, [0][1][1][0][RTW89_QATAR][23] = 40, + [0][1][1][0][RTW89_UK][23] = 46, [0][1][1][0][RTW89_FCC][25] = 68, [0][1][1][0][RTW89_ETSI][25] = 46, [0][1][1][0][RTW89_MKK][25] = 70, @@ -46013,6 +46219,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][25] = 68, [0][1][1][0][RTW89_CN][25] = 127, [0][1][1][0][RTW89_QATAR][25] = 40, + [0][1][1][0][RTW89_UK][25] = 46, [0][1][1][0][RTW89_FCC][27] = 68, [0][1][1][0][RTW89_ETSI][27] = 46, [0][1][1][0][RTW89_MKK][27] = 70, @@ -46024,6 +46231,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][27] = 68, [0][1][1][0][RTW89_CN][27] = 127, [0][1][1][0][RTW89_QATAR][27] = 40, + [0][1][1][0][RTW89_UK][27] = 46, [0][1][1][0][RTW89_FCC][29] = 68, [0][1][1][0][RTW89_ETSI][29] = 46, [0][1][1][0][RTW89_MKK][29] = 70, @@ -46035,6 +46243,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][29] = 68, [0][1][1][0][RTW89_CN][29] = 127, [0][1][1][0][RTW89_QATAR][29] = 40, + [0][1][1][0][RTW89_UK][29] = 46, [0][1][1][0][RTW89_FCC][31] = 68, [0][1][1][0][RTW89_ETSI][31] = 46, [0][1][1][0][RTW89_MKK][31] = 70, @@ -46046,6 +46255,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][31] = 68, [0][1][1][0][RTW89_CN][31] = 127, [0][1][1][0][RTW89_QATAR][31] = 40, + [0][1][1][0][RTW89_UK][31] = 46, [0][1][1][0][RTW89_FCC][33] = 68, [0][1][1][0][RTW89_ETSI][33] = 46, [0][1][1][0][RTW89_MKK][33] = 70, @@ -46057,6 +46267,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][33] = 68, [0][1][1][0][RTW89_CN][33] = 127, [0][1][1][0][RTW89_QATAR][33] = 40, + [0][1][1][0][RTW89_UK][33] = 46, [0][1][1][0][RTW89_FCC][35] = 66, [0][1][1][0][RTW89_ETSI][35] = 46, [0][1][1][0][RTW89_MKK][35] = 70, @@ -46068,6 +46279,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][35] = 66, [0][1][1][0][RTW89_CN][35] = 127, [0][1][1][0][RTW89_QATAR][35] = 40, + [0][1][1][0][RTW89_UK][35] = 46, [0][1][1][0][RTW89_FCC][37] = 68, [0][1][1][0][RTW89_ETSI][37] = 127, [0][1][1][0][RTW89_MKK][37] = 70, @@ -46079,6 +46291,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][37] = 68, [0][1][1][0][RTW89_CN][37] = 127, [0][1][1][0][RTW89_QATAR][37] = 127, + [0][1][1][0][RTW89_UK][37] = 74, [0][1][1][0][RTW89_FCC][38] = 76, [0][1][1][0][RTW89_ETSI][38] = 16, [0][1][1][0][RTW89_MKK][38] = 127, @@ -46090,6 +46303,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][38] = 76, [0][1][1][0][RTW89_CN][38] = 72, [0][1][1][0][RTW89_QATAR][38] = 16, + [0][1][1][0][RTW89_UK][38] = 44, [0][1][1][0][RTW89_FCC][40] = 76, [0][1][1][0][RTW89_ETSI][40] = 16, [0][1][1][0][RTW89_MKK][40] = 127, @@ -46101,6 +46315,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][40] = 76, [0][1][1][0][RTW89_CN][40] = 76, [0][1][1][0][RTW89_QATAR][40] = 16, + [0][1][1][0][RTW89_UK][40] = 44, [0][1][1][0][RTW89_FCC][42] = 76, [0][1][1][0][RTW89_ETSI][42] = 16, [0][1][1][0][RTW89_MKK][42] = 127, @@ -46112,6 +46327,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][42] = 76, [0][1][1][0][RTW89_CN][42] = 76, [0][1][1][0][RTW89_QATAR][42] = 16, + [0][1][1][0][RTW89_UK][42] = 44, [0][1][1][0][RTW89_FCC][44] = 76, [0][1][1][0][RTW89_ETSI][44] = 16, [0][1][1][0][RTW89_MKK][44] = 127, @@ -46123,6 +46339,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][44] = 76, [0][1][1][0][RTW89_CN][44] = 76, [0][1][1][0][RTW89_QATAR][44] = 16, + [0][1][1][0][RTW89_UK][44] = 44, [0][1][1][0][RTW89_FCC][46] = 76, [0][1][1][0][RTW89_ETSI][46] = 16, [0][1][1][0][RTW89_MKK][46] = 127, @@ -46134,6 +46351,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MEXICO][46] = 76, [0][1][1][0][RTW89_CN][46] = 76, [0][1][1][0][RTW89_QATAR][46] = 16, + [0][1][1][0][RTW89_UK][46] = 44, [0][0][2][0][RTW89_FCC][0] = 76, [0][0][2][0][RTW89_ETSI][0] = 58, [0][0][2][0][RTW89_MKK][0] = 62, @@ -46145,6 +46363,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][0] = 62, [0][0][2][0][RTW89_CN][0] = 58, [0][0][2][0][RTW89_QATAR][0] = 58, + [0][0][2][0][RTW89_UK][0] = 58, [0][0][2][0][RTW89_FCC][2] = 76, [0][0][2][0][RTW89_ETSI][2] = 58, [0][0][2][0][RTW89_MKK][2] = 62, @@ -46156,6 +46375,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][2] = 62, [0][0][2][0][RTW89_CN][2] = 58, [0][0][2][0][RTW89_QATAR][2] = 58, + [0][0][2][0][RTW89_UK][2] = 58, [0][0][2][0][RTW89_FCC][4] = 76, [0][0][2][0][RTW89_ETSI][4] = 58, [0][0][2][0][RTW89_MKK][4] = 62, @@ -46167,6 +46387,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][4] = 62, [0][0][2][0][RTW89_CN][4] = 58, [0][0][2][0][RTW89_QATAR][4] = 58, + [0][0][2][0][RTW89_UK][4] = 58, [0][0][2][0][RTW89_FCC][6] = 76, [0][0][2][0][RTW89_ETSI][6] = 58, [0][0][2][0][RTW89_MKK][6] = 62, @@ -46178,6 +46399,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][6] = 62, [0][0][2][0][RTW89_CN][6] = 58, [0][0][2][0][RTW89_QATAR][6] = 58, + [0][0][2][0][RTW89_UK][6] = 58, [0][0][2][0][RTW89_FCC][8] = 76, [0][0][2][0][RTW89_ETSI][8] = 58, [0][0][2][0][RTW89_MKK][8] = 62, @@ -46189,6 +46411,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][8] = 76, [0][0][2][0][RTW89_CN][8] = 58, [0][0][2][0][RTW89_QATAR][8] = 58, + [0][0][2][0][RTW89_UK][8] = 58, [0][0][2][0][RTW89_FCC][10] = 76, [0][0][2][0][RTW89_ETSI][10] = 58, [0][0][2][0][RTW89_MKK][10] = 62, @@ -46200,6 +46423,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][10] = 76, [0][0][2][0][RTW89_CN][10] = 58, [0][0][2][0][RTW89_QATAR][10] = 58, + [0][0][2][0][RTW89_UK][10] = 58, [0][0][2][0][RTW89_FCC][12] = 76, [0][0][2][0][RTW89_ETSI][12] = 58, [0][0][2][0][RTW89_MKK][12] = 62, @@ -46211,6 +46435,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][12] = 76, [0][0][2][0][RTW89_CN][12] = 58, [0][0][2][0][RTW89_QATAR][12] = 58, + [0][0][2][0][RTW89_UK][12] = 58, [0][0][2][0][RTW89_FCC][14] = 76, [0][0][2][0][RTW89_ETSI][14] = 58, [0][0][2][0][RTW89_MKK][14] = 62, @@ -46222,6 +46447,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][14] = 76, [0][0][2][0][RTW89_CN][14] = 58, [0][0][2][0][RTW89_QATAR][14] = 58, + [0][0][2][0][RTW89_UK][14] = 58, [0][0][2][0][RTW89_FCC][15] = 74, [0][0][2][0][RTW89_ETSI][15] = 58, [0][0][2][0][RTW89_MKK][15] = 76, @@ -46233,6 +46459,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][15] = 74, [0][0][2][0][RTW89_CN][15] = 127, [0][0][2][0][RTW89_QATAR][15] = 52, + [0][0][2][0][RTW89_UK][15] = 58, [0][0][2][0][RTW89_FCC][17] = 76, [0][0][2][0][RTW89_ETSI][17] = 58, [0][0][2][0][RTW89_MKK][17] = 76, @@ -46244,6 +46471,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][17] = 76, [0][0][2][0][RTW89_CN][17] = 127, [0][0][2][0][RTW89_QATAR][17] = 52, + [0][0][2][0][RTW89_UK][17] = 58, [0][0][2][0][RTW89_FCC][19] = 76, [0][0][2][0][RTW89_ETSI][19] = 58, [0][0][2][0][RTW89_MKK][19] = 76, @@ -46255,6 +46483,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][19] = 76, [0][0][2][0][RTW89_CN][19] = 127, [0][0][2][0][RTW89_QATAR][19] = 52, + [0][0][2][0][RTW89_UK][19] = 58, [0][0][2][0][RTW89_FCC][21] = 76, [0][0][2][0][RTW89_ETSI][21] = 58, [0][0][2][0][RTW89_MKK][21] = 76, @@ -46266,6 +46495,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][21] = 76, [0][0][2][0][RTW89_CN][21] = 127, [0][0][2][0][RTW89_QATAR][21] = 52, + [0][0][2][0][RTW89_UK][21] = 58, [0][0][2][0][RTW89_FCC][23] = 76, [0][0][2][0][RTW89_ETSI][23] = 58, [0][0][2][0][RTW89_MKK][23] = 76, @@ -46277,6 +46507,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][23] = 76, [0][0][2][0][RTW89_CN][23] = 127, [0][0][2][0][RTW89_QATAR][23] = 52, + [0][0][2][0][RTW89_UK][23] = 58, [0][0][2][0][RTW89_FCC][25] = 76, [0][0][2][0][RTW89_ETSI][25] = 58, [0][0][2][0][RTW89_MKK][25] = 76, @@ -46288,6 +46519,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][25] = 76, [0][0][2][0][RTW89_CN][25] = 127, [0][0][2][0][RTW89_QATAR][25] = 52, + [0][0][2][0][RTW89_UK][25] = 58, [0][0][2][0][RTW89_FCC][27] = 76, [0][0][2][0][RTW89_ETSI][27] = 58, [0][0][2][0][RTW89_MKK][27] = 76, @@ -46299,6 +46531,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][27] = 76, [0][0][2][0][RTW89_CN][27] = 127, [0][0][2][0][RTW89_QATAR][27] = 52, + [0][0][2][0][RTW89_UK][27] = 58, [0][0][2][0][RTW89_FCC][29] = 76, [0][0][2][0][RTW89_ETSI][29] = 58, [0][0][2][0][RTW89_MKK][29] = 76, @@ -46310,6 +46543,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][29] = 76, [0][0][2][0][RTW89_CN][29] = 127, [0][0][2][0][RTW89_QATAR][29] = 52, + [0][0][2][0][RTW89_UK][29] = 58, [0][0][2][0][RTW89_FCC][31] = 76, [0][0][2][0][RTW89_ETSI][31] = 58, [0][0][2][0][RTW89_MKK][31] = 76, @@ -46321,6 +46555,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][31] = 76, [0][0][2][0][RTW89_CN][31] = 127, [0][0][2][0][RTW89_QATAR][31] = 52, + [0][0][2][0][RTW89_UK][31] = 58, [0][0][2][0][RTW89_FCC][33] = 76, [0][0][2][0][RTW89_ETSI][33] = 58, [0][0][2][0][RTW89_MKK][33] = 76, @@ -46332,6 +46567,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][33] = 76, [0][0][2][0][RTW89_CN][33] = 127, [0][0][2][0][RTW89_QATAR][33] = 52, + [0][0][2][0][RTW89_UK][33] = 58, [0][0][2][0][RTW89_FCC][35] = 70, [0][0][2][0][RTW89_ETSI][35] = 58, [0][0][2][0][RTW89_MKK][35] = 76, @@ -46343,6 +46579,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][35] = 70, [0][0][2][0][RTW89_CN][35] = 127, [0][0][2][0][RTW89_QATAR][35] = 52, + [0][0][2][0][RTW89_UK][35] = 58, [0][0][2][0][RTW89_FCC][37] = 76, [0][0][2][0][RTW89_ETSI][37] = 127, [0][0][2][0][RTW89_MKK][37] = 76, @@ -46354,6 +46591,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][37] = 76, [0][0][2][0][RTW89_CN][37] = 127, [0][0][2][0][RTW89_QATAR][37] = 127, + [0][0][2][0][RTW89_UK][37] = 76, [0][0][2][0][RTW89_FCC][38] = 76, [0][0][2][0][RTW89_ETSI][38] = 28, [0][0][2][0][RTW89_MKK][38] = 127, @@ -46365,6 +46603,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][38] = 76, [0][0][2][0][RTW89_CN][38] = 68, [0][0][2][0][RTW89_QATAR][38] = 28, + [0][0][2][0][RTW89_UK][38] = 58, [0][0][2][0][RTW89_FCC][40] = 76, [0][0][2][0][RTW89_ETSI][40] = 28, [0][0][2][0][RTW89_MKK][40] = 127, @@ -46376,6 +46615,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][40] = 76, [0][0][2][0][RTW89_CN][40] = 76, [0][0][2][0][RTW89_QATAR][40] = 28, + [0][0][2][0][RTW89_UK][40] = 58, [0][0][2][0][RTW89_FCC][42] = 76, [0][0][2][0][RTW89_ETSI][42] = 28, [0][0][2][0][RTW89_MKK][42] = 127, @@ -46387,6 +46627,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][42] = 76, [0][0][2][0][RTW89_CN][42] = 76, [0][0][2][0][RTW89_QATAR][42] = 28, + [0][0][2][0][RTW89_UK][42] = 58, [0][0][2][0][RTW89_FCC][44] = 76, [0][0][2][0][RTW89_ETSI][44] = 28, [0][0][2][0][RTW89_MKK][44] = 127, @@ -46398,6 +46639,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][44] = 76, [0][0][2][0][RTW89_CN][44] = 76, [0][0][2][0][RTW89_QATAR][44] = 28, + [0][0][2][0][RTW89_UK][44] = 58, [0][0][2][0][RTW89_FCC][46] = 76, [0][0][2][0][RTW89_ETSI][46] = 28, [0][0][2][0][RTW89_MKK][46] = 127, @@ -46409,6 +46651,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MEXICO][46] = 76, [0][0][2][0][RTW89_CN][46] = 76, [0][0][2][0][RTW89_QATAR][46] = 28, + [0][0][2][0][RTW89_UK][46] = 58, [0][1][2][0][RTW89_FCC][0] = 68, [0][1][2][0][RTW89_ETSI][0] = 46, [0][1][2][0][RTW89_MKK][0] = 50, @@ -46420,6 +46663,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][0] = 50, [0][1][2][0][RTW89_CN][0] = 46, [0][1][2][0][RTW89_QATAR][0] = 46, + [0][1][2][0][RTW89_UK][0] = 46, [0][1][2][0][RTW89_FCC][2] = 68, [0][1][2][0][RTW89_ETSI][2] = 46, [0][1][2][0][RTW89_MKK][2] = 50, @@ -46431,6 +46675,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][2] = 50, [0][1][2][0][RTW89_CN][2] = 46, [0][1][2][0][RTW89_QATAR][2] = 46, + [0][1][2][0][RTW89_UK][2] = 46, [0][1][2][0][RTW89_FCC][4] = 68, [0][1][2][0][RTW89_ETSI][4] = 46, [0][1][2][0][RTW89_MKK][4] = 50, @@ -46442,6 +46687,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][4] = 50, [0][1][2][0][RTW89_CN][4] = 46, [0][1][2][0][RTW89_QATAR][4] = 46, + [0][1][2][0][RTW89_UK][4] = 46, [0][1][2][0][RTW89_FCC][6] = 68, [0][1][2][0][RTW89_ETSI][6] = 46, [0][1][2][0][RTW89_MKK][6] = 50, @@ -46453,6 +46699,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][6] = 50, [0][1][2][0][RTW89_CN][6] = 46, [0][1][2][0][RTW89_QATAR][6] = 46, + [0][1][2][0][RTW89_UK][6] = 46, [0][1][2][0][RTW89_FCC][8] = 68, [0][1][2][0][RTW89_ETSI][8] = 46, [0][1][2][0][RTW89_MKK][8] = 50, @@ -46464,6 +46711,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][8] = 68, [0][1][2][0][RTW89_CN][8] = 46, [0][1][2][0][RTW89_QATAR][8] = 46, + [0][1][2][0][RTW89_UK][8] = 46, [0][1][2][0][RTW89_FCC][10] = 68, [0][1][2][0][RTW89_ETSI][10] = 46, [0][1][2][0][RTW89_MKK][10] = 50, @@ -46475,6 +46723,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][10] = 68, [0][1][2][0][RTW89_CN][10] = 46, [0][1][2][0][RTW89_QATAR][10] = 46, + [0][1][2][0][RTW89_UK][10] = 46, [0][1][2][0][RTW89_FCC][12] = 68, [0][1][2][0][RTW89_ETSI][12] = 46, [0][1][2][0][RTW89_MKK][12] = 50, @@ -46486,6 +46735,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][12] = 68, [0][1][2][0][RTW89_CN][12] = 46, [0][1][2][0][RTW89_QATAR][12] = 46, + [0][1][2][0][RTW89_UK][12] = 46, [0][1][2][0][RTW89_FCC][14] = 68, [0][1][2][0][RTW89_ETSI][14] = 46, [0][1][2][0][RTW89_MKK][14] = 50, @@ -46497,6 +46747,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][14] = 68, [0][1][2][0][RTW89_CN][14] = 46, [0][1][2][0][RTW89_QATAR][14] = 46, + [0][1][2][0][RTW89_UK][14] = 46, [0][1][2][0][RTW89_FCC][15] = 68, [0][1][2][0][RTW89_ETSI][15] = 46, [0][1][2][0][RTW89_MKK][15] = 70, @@ -46508,6 +46759,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][15] = 68, [0][1][2][0][RTW89_CN][15] = 127, [0][1][2][0][RTW89_QATAR][15] = 40, + [0][1][2][0][RTW89_UK][15] = 46, [0][1][2][0][RTW89_FCC][17] = 68, [0][1][2][0][RTW89_ETSI][17] = 46, [0][1][2][0][RTW89_MKK][17] = 70, @@ -46519,6 +46771,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][17] = 68, [0][1][2][0][RTW89_CN][17] = 127, [0][1][2][0][RTW89_QATAR][17] = 40, + [0][1][2][0][RTW89_UK][17] = 46, [0][1][2][0][RTW89_FCC][19] = 68, [0][1][2][0][RTW89_ETSI][19] = 46, [0][1][2][0][RTW89_MKK][19] = 70, @@ -46530,6 +46783,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][19] = 68, [0][1][2][0][RTW89_CN][19] = 127, [0][1][2][0][RTW89_QATAR][19] = 40, + [0][1][2][0][RTW89_UK][19] = 46, [0][1][2][0][RTW89_FCC][21] = 68, [0][1][2][0][RTW89_ETSI][21] = 46, [0][1][2][0][RTW89_MKK][21] = 70, @@ -46541,6 +46795,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][21] = 68, [0][1][2][0][RTW89_CN][21] = 127, [0][1][2][0][RTW89_QATAR][21] = 40, + [0][1][2][0][RTW89_UK][21] = 46, [0][1][2][0][RTW89_FCC][23] = 68, [0][1][2][0][RTW89_ETSI][23] = 46, [0][1][2][0][RTW89_MKK][23] = 70, @@ -46552,6 +46807,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][23] = 68, [0][1][2][0][RTW89_CN][23] = 127, [0][1][2][0][RTW89_QATAR][23] = 40, + [0][1][2][0][RTW89_UK][23] = 46, [0][1][2][0][RTW89_FCC][25] = 68, [0][1][2][0][RTW89_ETSI][25] = 46, [0][1][2][0][RTW89_MKK][25] = 70, @@ -46563,6 +46819,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][25] = 68, [0][1][2][0][RTW89_CN][25] = 127, [0][1][2][0][RTW89_QATAR][25] = 40, + [0][1][2][0][RTW89_UK][25] = 46, [0][1][2][0][RTW89_FCC][27] = 68, [0][1][2][0][RTW89_ETSI][27] = 46, [0][1][2][0][RTW89_MKK][27] = 70, @@ -46574,6 +46831,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][27] = 68, [0][1][2][0][RTW89_CN][27] = 127, [0][1][2][0][RTW89_QATAR][27] = 40, + [0][1][2][0][RTW89_UK][27] = 46, [0][1][2][0][RTW89_FCC][29] = 68, [0][1][2][0][RTW89_ETSI][29] = 46, [0][1][2][0][RTW89_MKK][29] = 70, @@ -46585,6 +46843,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][29] = 68, [0][1][2][0][RTW89_CN][29] = 127, [0][1][2][0][RTW89_QATAR][29] = 40, + [0][1][2][0][RTW89_UK][29] = 46, [0][1][2][0][RTW89_FCC][31] = 68, [0][1][2][0][RTW89_ETSI][31] = 46, [0][1][2][0][RTW89_MKK][31] = 70, @@ -46596,6 +46855,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][31] = 68, [0][1][2][0][RTW89_CN][31] = 127, [0][1][2][0][RTW89_QATAR][31] = 40, + [0][1][2][0][RTW89_UK][31] = 46, [0][1][2][0][RTW89_FCC][33] = 68, [0][1][2][0][RTW89_ETSI][33] = 46, [0][1][2][0][RTW89_MKK][33] = 70, @@ -46607,6 +46867,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][33] = 68, [0][1][2][0][RTW89_CN][33] = 127, [0][1][2][0][RTW89_QATAR][33] = 40, + [0][1][2][0][RTW89_UK][33] = 46, [0][1][2][0][RTW89_FCC][35] = 64, [0][1][2][0][RTW89_ETSI][35] = 46, [0][1][2][0][RTW89_MKK][35] = 70, @@ -46618,6 +46879,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][35] = 64, [0][1][2][0][RTW89_CN][35] = 127, [0][1][2][0][RTW89_QATAR][35] = 40, + [0][1][2][0][RTW89_UK][35] = 46, [0][1][2][0][RTW89_FCC][37] = 68, [0][1][2][0][RTW89_ETSI][37] = 127, [0][1][2][0][RTW89_MKK][37] = 70, @@ -46629,6 +46891,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][37] = 68, [0][1][2][0][RTW89_CN][37] = 127, [0][1][2][0][RTW89_QATAR][37] = 127, + [0][1][2][0][RTW89_UK][37] = 74, [0][1][2][0][RTW89_FCC][38] = 76, [0][1][2][0][RTW89_ETSI][38] = 16, [0][1][2][0][RTW89_MKK][38] = 127, @@ -46640,6 +46903,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][38] = 76, [0][1][2][0][RTW89_CN][38] = 68, [0][1][2][0][RTW89_QATAR][38] = 16, + [0][1][2][0][RTW89_UK][38] = 46, [0][1][2][0][RTW89_FCC][40] = 76, [0][1][2][0][RTW89_ETSI][40] = 16, [0][1][2][0][RTW89_MKK][40] = 127, @@ -46651,6 +46915,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][40] = 76, [0][1][2][0][RTW89_CN][40] = 76, [0][1][2][0][RTW89_QATAR][40] = 16, + [0][1][2][0][RTW89_UK][40] = 46, [0][1][2][0][RTW89_FCC][42] = 76, [0][1][2][0][RTW89_ETSI][42] = 16, [0][1][2][0][RTW89_MKK][42] = 127, @@ -46662,6 +46927,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][42] = 76, [0][1][2][0][RTW89_CN][42] = 76, [0][1][2][0][RTW89_QATAR][42] = 16, + [0][1][2][0][RTW89_UK][42] = 46, [0][1][2][0][RTW89_FCC][44] = 76, [0][1][2][0][RTW89_ETSI][44] = 16, [0][1][2][0][RTW89_MKK][44] = 127, @@ -46673,6 +46939,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][44] = 76, [0][1][2][0][RTW89_CN][44] = 76, [0][1][2][0][RTW89_QATAR][44] = 16, + [0][1][2][0][RTW89_UK][44] = 46, [0][1][2][0][RTW89_FCC][46] = 76, [0][1][2][0][RTW89_ETSI][46] = 16, [0][1][2][0][RTW89_MKK][46] = 127, @@ -46684,6 +46951,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MEXICO][46] = 76, [0][1][2][0][RTW89_CN][46] = 76, [0][1][2][0][RTW89_QATAR][46] = 16, + [0][1][2][0][RTW89_UK][46] = 46, [0][1][2][1][RTW89_FCC][0] = 68, [0][1][2][1][RTW89_ETSI][0] = 34, [0][1][2][1][RTW89_MKK][0] = 50, @@ -46695,6 +46963,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][0] = 50, [0][1][2][1][RTW89_CN][0] = 34, [0][1][2][1][RTW89_QATAR][0] = 34, + [0][1][2][1][RTW89_UK][0] = 34, [0][1][2][1][RTW89_FCC][2] = 68, [0][1][2][1][RTW89_ETSI][2] = 34, [0][1][2][1][RTW89_MKK][2] = 50, @@ -46706,6 +46975,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][2] = 50, [0][1][2][1][RTW89_CN][2] = 34, [0][1][2][1][RTW89_QATAR][2] = 34, + [0][1][2][1][RTW89_UK][2] = 34, [0][1][2][1][RTW89_FCC][4] = 68, [0][1][2][1][RTW89_ETSI][4] = 34, [0][1][2][1][RTW89_MKK][4] = 50, @@ -46717,6 +46987,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][4] = 50, [0][1][2][1][RTW89_CN][4] = 34, [0][1][2][1][RTW89_QATAR][4] = 34, + [0][1][2][1][RTW89_UK][4] = 34, [0][1][2][1][RTW89_FCC][6] = 68, [0][1][2][1][RTW89_ETSI][6] = 34, [0][1][2][1][RTW89_MKK][6] = 50, @@ -46728,6 +46999,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][6] = 50, [0][1][2][1][RTW89_CN][6] = 34, [0][1][2][1][RTW89_QATAR][6] = 34, + [0][1][2][1][RTW89_UK][6] = 34, [0][1][2][1][RTW89_FCC][8] = 68, [0][1][2][1][RTW89_ETSI][8] = 34, [0][1][2][1][RTW89_MKK][8] = 50, @@ -46739,6 +47011,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][8] = 68, [0][1][2][1][RTW89_CN][8] = 34, [0][1][2][1][RTW89_QATAR][8] = 34, + [0][1][2][1][RTW89_UK][8] = 34, [0][1][2][1][RTW89_FCC][10] = 68, [0][1][2][1][RTW89_ETSI][10] = 34, [0][1][2][1][RTW89_MKK][10] = 50, @@ -46750,6 +47023,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][10] = 68, [0][1][2][1][RTW89_CN][10] = 34, [0][1][2][1][RTW89_QATAR][10] = 34, + [0][1][2][1][RTW89_UK][10] = 34, [0][1][2][1][RTW89_FCC][12] = 68, [0][1][2][1][RTW89_ETSI][12] = 34, [0][1][2][1][RTW89_MKK][12] = 50, @@ -46761,6 +47035,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][12] = 68, [0][1][2][1][RTW89_CN][12] = 34, [0][1][2][1][RTW89_QATAR][12] = 34, + [0][1][2][1][RTW89_UK][12] = 34, [0][1][2][1][RTW89_FCC][14] = 68, [0][1][2][1][RTW89_ETSI][14] = 34, [0][1][2][1][RTW89_MKK][14] = 50, @@ -46772,6 +47047,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][14] = 68, [0][1][2][1][RTW89_CN][14] = 34, [0][1][2][1][RTW89_QATAR][14] = 34, + [0][1][2][1][RTW89_UK][14] = 34, [0][1][2][1][RTW89_FCC][15] = 68, [0][1][2][1][RTW89_ETSI][15] = 34, [0][1][2][1][RTW89_MKK][15] = 70, @@ -46783,6 +47059,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][15] = 68, [0][1][2][1][RTW89_CN][15] = 127, [0][1][2][1][RTW89_QATAR][15] = 28, + [0][1][2][1][RTW89_UK][15] = 34, [0][1][2][1][RTW89_FCC][17] = 68, [0][1][2][1][RTW89_ETSI][17] = 34, [0][1][2][1][RTW89_MKK][17] = 70, @@ -46794,6 +47071,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][17] = 68, [0][1][2][1][RTW89_CN][17] = 127, [0][1][2][1][RTW89_QATAR][17] = 28, + [0][1][2][1][RTW89_UK][17] = 34, [0][1][2][1][RTW89_FCC][19] = 68, [0][1][2][1][RTW89_ETSI][19] = 34, [0][1][2][1][RTW89_MKK][19] = 70, @@ -46805,6 +47083,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][19] = 68, [0][1][2][1][RTW89_CN][19] = 127, [0][1][2][1][RTW89_QATAR][19] = 28, + [0][1][2][1][RTW89_UK][19] = 34, [0][1][2][1][RTW89_FCC][21] = 68, [0][1][2][1][RTW89_ETSI][21] = 34, [0][1][2][1][RTW89_MKK][21] = 70, @@ -46816,6 +47095,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][21] = 68, [0][1][2][1][RTW89_CN][21] = 127, [0][1][2][1][RTW89_QATAR][21] = 28, + [0][1][2][1][RTW89_UK][21] = 34, [0][1][2][1][RTW89_FCC][23] = 68, [0][1][2][1][RTW89_ETSI][23] = 34, [0][1][2][1][RTW89_MKK][23] = 70, @@ -46827,6 +47107,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][23] = 68, [0][1][2][1][RTW89_CN][23] = 127, [0][1][2][1][RTW89_QATAR][23] = 28, + [0][1][2][1][RTW89_UK][23] = 34, [0][1][2][1][RTW89_FCC][25] = 68, [0][1][2][1][RTW89_ETSI][25] = 34, [0][1][2][1][RTW89_MKK][25] = 70, @@ -46838,6 +47119,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][25] = 68, [0][1][2][1][RTW89_CN][25] = 127, [0][1][2][1][RTW89_QATAR][25] = 28, + [0][1][2][1][RTW89_UK][25] = 34, [0][1][2][1][RTW89_FCC][27] = 68, [0][1][2][1][RTW89_ETSI][27] = 34, [0][1][2][1][RTW89_MKK][27] = 70, @@ -46849,6 +47131,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][27] = 68, [0][1][2][1][RTW89_CN][27] = 127, [0][1][2][1][RTW89_QATAR][27] = 28, + [0][1][2][1][RTW89_UK][27] = 34, [0][1][2][1][RTW89_FCC][29] = 68, [0][1][2][1][RTW89_ETSI][29] = 34, [0][1][2][1][RTW89_MKK][29] = 70, @@ -46860,6 +47143,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][29] = 68, [0][1][2][1][RTW89_CN][29] = 127, [0][1][2][1][RTW89_QATAR][29] = 28, + [0][1][2][1][RTW89_UK][29] = 34, [0][1][2][1][RTW89_FCC][31] = 68, [0][1][2][1][RTW89_ETSI][31] = 34, [0][1][2][1][RTW89_MKK][31] = 70, @@ -46871,6 +47155,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][31] = 68, [0][1][2][1][RTW89_CN][31] = 127, [0][1][2][1][RTW89_QATAR][31] = 28, + [0][1][2][1][RTW89_UK][31] = 34, [0][1][2][1][RTW89_FCC][33] = 68, [0][1][2][1][RTW89_ETSI][33] = 34, [0][1][2][1][RTW89_MKK][33] = 70, @@ -46882,6 +47167,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][33] = 68, [0][1][2][1][RTW89_CN][33] = 127, [0][1][2][1][RTW89_QATAR][33] = 28, + [0][1][2][1][RTW89_UK][33] = 34, [0][1][2][1][RTW89_FCC][35] = 64, [0][1][2][1][RTW89_ETSI][35] = 34, [0][1][2][1][RTW89_MKK][35] = 70, @@ -46893,6 +47179,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][35] = 64, [0][1][2][1][RTW89_CN][35] = 127, [0][1][2][1][RTW89_QATAR][35] = 28, + [0][1][2][1][RTW89_UK][35] = 34, [0][1][2][1][RTW89_FCC][37] = 68, [0][1][2][1][RTW89_ETSI][37] = 127, [0][1][2][1][RTW89_MKK][37] = 70, @@ -46904,6 +47191,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][37] = 68, [0][1][2][1][RTW89_CN][37] = 127, [0][1][2][1][RTW89_QATAR][37] = 127, + [0][1][2][1][RTW89_UK][37] = 62, [0][1][2][1][RTW89_FCC][38] = 76, [0][1][2][1][RTW89_ETSI][38] = 4, [0][1][2][1][RTW89_MKK][38] = 127, @@ -46915,6 +47203,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][38] = 76, [0][1][2][1][RTW89_CN][38] = 68, [0][1][2][1][RTW89_QATAR][38] = 4, + [0][1][2][1][RTW89_UK][38] = 34, [0][1][2][1][RTW89_FCC][40] = 76, [0][1][2][1][RTW89_ETSI][40] = 4, [0][1][2][1][RTW89_MKK][40] = 127, @@ -46926,6 +47215,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][40] = 76, [0][1][2][1][RTW89_CN][40] = 70, [0][1][2][1][RTW89_QATAR][40] = 4, + [0][1][2][1][RTW89_UK][40] = 34, [0][1][2][1][RTW89_FCC][42] = 76, [0][1][2][1][RTW89_ETSI][42] = 4, [0][1][2][1][RTW89_MKK][42] = 127, @@ -46937,6 +47227,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][42] = 76, [0][1][2][1][RTW89_CN][42] = 70, [0][1][2][1][RTW89_QATAR][42] = 4, + [0][1][2][1][RTW89_UK][42] = 34, [0][1][2][1][RTW89_FCC][44] = 76, [0][1][2][1][RTW89_ETSI][44] = 4, [0][1][2][1][RTW89_MKK][44] = 127, @@ -46948,6 +47239,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][44] = 76, [0][1][2][1][RTW89_CN][44] = 70, [0][1][2][1][RTW89_QATAR][44] = 4, + [0][1][2][1][RTW89_UK][44] = 34, [0][1][2][1][RTW89_FCC][46] = 76, [0][1][2][1][RTW89_ETSI][46] = 4, [0][1][2][1][RTW89_MKK][46] = 127, @@ -46959,6 +47251,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_MEXICO][46] = 76, [0][1][2][1][RTW89_CN][46] = 70, [0][1][2][1][RTW89_QATAR][46] = 4, + [0][1][2][1][RTW89_UK][46] = 34, [1][0][2][0][RTW89_FCC][1] = 68, [1][0][2][0][RTW89_ETSI][1] = 64, [1][0][2][0][RTW89_MKK][1] = 62, @@ -46970,6 +47263,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][1] = 62, [1][0][2][0][RTW89_CN][1] = 64, [1][0][2][0][RTW89_QATAR][1] = 64, + [1][0][2][0][RTW89_UK][1] = 64, [1][0][2][0][RTW89_FCC][5] = 72, [1][0][2][0][RTW89_ETSI][5] = 64, [1][0][2][0][RTW89_MKK][5] = 62, @@ -46981,6 +47275,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][5] = 62, [1][0][2][0][RTW89_CN][5] = 64, [1][0][2][0][RTW89_QATAR][5] = 64, + [1][0][2][0][RTW89_UK][5] = 64, [1][0][2][0][RTW89_FCC][9] = 72, [1][0][2][0][RTW89_ETSI][9] = 64, [1][0][2][0][RTW89_MKK][9] = 62, @@ -46992,6 +47287,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][9] = 72, [1][0][2][0][RTW89_CN][9] = 64, [1][0][2][0][RTW89_QATAR][9] = 64, + [1][0][2][0][RTW89_UK][9] = 64, [1][0][2][0][RTW89_FCC][13] = 66, [1][0][2][0][RTW89_ETSI][13] = 64, [1][0][2][0][RTW89_MKK][13] = 62, @@ -47003,6 +47299,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][13] = 66, [1][0][2][0][RTW89_CN][13] = 64, [1][0][2][0][RTW89_QATAR][13] = 64, + [1][0][2][0][RTW89_UK][13] = 64, [1][0][2][0][RTW89_FCC][16] = 62, [1][0][2][0][RTW89_ETSI][16] = 64, [1][0][2][0][RTW89_MKK][16] = 72, @@ -47014,6 +47311,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][16] = 62, [1][0][2][0][RTW89_CN][16] = 127, [1][0][2][0][RTW89_QATAR][16] = 52, + [1][0][2][0][RTW89_UK][16] = 64, [1][0][2][0][RTW89_FCC][20] = 72, [1][0][2][0][RTW89_ETSI][20] = 64, [1][0][2][0][RTW89_MKK][20] = 72, @@ -47025,6 +47323,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][20] = 72, [1][0][2][0][RTW89_CN][20] = 127, [1][0][2][0][RTW89_QATAR][20] = 52, + [1][0][2][0][RTW89_UK][20] = 64, [1][0][2][0][RTW89_FCC][24] = 72, [1][0][2][0][RTW89_ETSI][24] = 64, [1][0][2][0][RTW89_MKK][24] = 72, @@ -47036,6 +47335,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][24] = 72, [1][0][2][0][RTW89_CN][24] = 127, [1][0][2][0][RTW89_QATAR][24] = 52, + [1][0][2][0][RTW89_UK][24] = 64, [1][0][2][0][RTW89_FCC][28] = 72, [1][0][2][0][RTW89_ETSI][28] = 64, [1][0][2][0][RTW89_MKK][28] = 72, @@ -47047,6 +47347,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][28] = 72, [1][0][2][0][RTW89_CN][28] = 127, [1][0][2][0][RTW89_QATAR][28] = 52, + [1][0][2][0][RTW89_UK][28] = 64, [1][0][2][0][RTW89_FCC][32] = 72, [1][0][2][0][RTW89_ETSI][32] = 64, [1][0][2][0][RTW89_MKK][32] = 72, @@ -47058,6 +47359,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][32] = 72, [1][0][2][0][RTW89_CN][32] = 127, [1][0][2][0][RTW89_QATAR][32] = 52, + [1][0][2][0][RTW89_UK][32] = 64, [1][0][2][0][RTW89_FCC][36] = 72, [1][0][2][0][RTW89_ETSI][36] = 127, [1][0][2][0][RTW89_MKK][36] = 72, @@ -47069,6 +47371,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][36] = 72, [1][0][2][0][RTW89_CN][36] = 127, [1][0][2][0][RTW89_QATAR][36] = 127, + [1][0][2][0][RTW89_UK][36] = 72, [1][0][2][0][RTW89_FCC][39] = 72, [1][0][2][0][RTW89_ETSI][39] = 28, [1][0][2][0][RTW89_MKK][39] = 127, @@ -47080,6 +47383,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][39] = 72, [1][0][2][0][RTW89_CN][39] = 68, [1][0][2][0][RTW89_QATAR][39] = 28, + [1][0][2][0][RTW89_UK][39] = 64, [1][0][2][0][RTW89_FCC][43] = 72, [1][0][2][0][RTW89_ETSI][43] = 28, [1][0][2][0][RTW89_MKK][43] = 127, @@ -47091,6 +47395,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MEXICO][43] = 72, [1][0][2][0][RTW89_CN][43] = 72, [1][0][2][0][RTW89_QATAR][43] = 28, + [1][0][2][0][RTW89_UK][43] = 64, [1][1][2][0][RTW89_FCC][1] = 58, [1][1][2][0][RTW89_ETSI][1] = 52, [1][1][2][0][RTW89_MKK][1] = 50, @@ -47102,6 +47407,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][1] = 50, [1][1][2][0][RTW89_CN][1] = 52, [1][1][2][0][RTW89_QATAR][1] = 52, + [1][1][2][0][RTW89_UK][1] = 52, [1][1][2][0][RTW89_FCC][5] = 72, [1][1][2][0][RTW89_ETSI][5] = 52, [1][1][2][0][RTW89_MKK][5] = 50, @@ -47113,6 +47419,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][5] = 50, [1][1][2][0][RTW89_CN][5] = 52, [1][1][2][0][RTW89_QATAR][5] = 52, + [1][1][2][0][RTW89_UK][5] = 52, [1][1][2][0][RTW89_FCC][9] = 72, [1][1][2][0][RTW89_ETSI][9] = 52, [1][1][2][0][RTW89_MKK][9] = 50, @@ -47124,6 +47431,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][9] = 72, [1][1][2][0][RTW89_CN][9] = 52, [1][1][2][0][RTW89_QATAR][9] = 52, + [1][1][2][0][RTW89_UK][9] = 52, [1][1][2][0][RTW89_FCC][13] = 58, [1][1][2][0][RTW89_ETSI][13] = 52, [1][1][2][0][RTW89_MKK][13] = 50, @@ -47135,6 +47443,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][13] = 58, [1][1][2][0][RTW89_CN][13] = 52, [1][1][2][0][RTW89_QATAR][13] = 52, + [1][1][2][0][RTW89_UK][13] = 52, [1][1][2][0][RTW89_FCC][16] = 56, [1][1][2][0][RTW89_ETSI][16] = 52, [1][1][2][0][RTW89_MKK][16] = 72, @@ -47146,6 +47455,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][16] = 56, [1][1][2][0][RTW89_CN][16] = 127, [1][1][2][0][RTW89_QATAR][16] = 40, + [1][1][2][0][RTW89_UK][16] = 52, [1][1][2][0][RTW89_FCC][20] = 72, [1][1][2][0][RTW89_ETSI][20] = 52, [1][1][2][0][RTW89_MKK][20] = 72, @@ -47157,6 +47467,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][20] = 72, [1][1][2][0][RTW89_CN][20] = 127, [1][1][2][0][RTW89_QATAR][20] = 40, + [1][1][2][0][RTW89_UK][20] = 52, [1][1][2][0][RTW89_FCC][24] = 72, [1][1][2][0][RTW89_ETSI][24] = 52, [1][1][2][0][RTW89_MKK][24] = 72, @@ -47168,6 +47479,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][24] = 72, [1][1][2][0][RTW89_CN][24] = 127, [1][1][2][0][RTW89_QATAR][24] = 40, + [1][1][2][0][RTW89_UK][24] = 52, [1][1][2][0][RTW89_FCC][28] = 72, [1][1][2][0][RTW89_ETSI][28] = 52, [1][1][2][0][RTW89_MKK][28] = 72, @@ -47179,6 +47491,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][28] = 72, [1][1][2][0][RTW89_CN][28] = 127, [1][1][2][0][RTW89_QATAR][28] = 40, + [1][1][2][0][RTW89_UK][28] = 52, [1][1][2][0][RTW89_FCC][32] = 68, [1][1][2][0][RTW89_ETSI][32] = 52, [1][1][2][0][RTW89_MKK][32] = 72, @@ -47190,6 +47503,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][32] = 68, [1][1][2][0][RTW89_CN][32] = 127, [1][1][2][0][RTW89_QATAR][32] = 40, + [1][1][2][0][RTW89_UK][32] = 52, [1][1][2][0][RTW89_FCC][36] = 72, [1][1][2][0][RTW89_ETSI][36] = 127, [1][1][2][0][RTW89_MKK][36] = 72, @@ -47201,6 +47515,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][36] = 72, [1][1][2][0][RTW89_CN][36] = 127, [1][1][2][0][RTW89_QATAR][36] = 127, + [1][1][2][0][RTW89_UK][36] = 72, [1][1][2][0][RTW89_FCC][39] = 72, [1][1][2][0][RTW89_ETSI][39] = 16, [1][1][2][0][RTW89_MKK][39] = 127, @@ -47212,6 +47527,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][39] = 72, [1][1][2][0][RTW89_CN][39] = 68, [1][1][2][0][RTW89_QATAR][39] = 16, + [1][1][2][0][RTW89_UK][39] = 52, [1][1][2][0][RTW89_FCC][43] = 72, [1][1][2][0][RTW89_ETSI][43] = 16, [1][1][2][0][RTW89_MKK][43] = 127, @@ -47223,6 +47539,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MEXICO][43] = 72, [1][1][2][0][RTW89_CN][43] = 72, [1][1][2][0][RTW89_QATAR][43] = 16, + [1][1][2][0][RTW89_UK][43] = 52, [1][1][2][1][RTW89_FCC][1] = 58, [1][1][2][1][RTW89_ETSI][1] = 40, [1][1][2][1][RTW89_MKK][1] = 50, @@ -47234,6 +47551,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][1] = 50, [1][1][2][1][RTW89_CN][1] = 40, [1][1][2][1][RTW89_QATAR][1] = 40, + [1][1][2][1][RTW89_UK][1] = 40, [1][1][2][1][RTW89_FCC][5] = 68, [1][1][2][1][RTW89_ETSI][5] = 40, [1][1][2][1][RTW89_MKK][5] = 50, @@ -47245,6 +47563,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][5] = 50, [1][1][2][1][RTW89_CN][5] = 40, [1][1][2][1][RTW89_QATAR][5] = 40, + [1][1][2][1][RTW89_UK][5] = 40, [1][1][2][1][RTW89_FCC][9] = 68, [1][1][2][1][RTW89_ETSI][9] = 40, [1][1][2][1][RTW89_MKK][9] = 50, @@ -47256,6 +47575,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][9] = 68, [1][1][2][1][RTW89_CN][9] = 40, [1][1][2][1][RTW89_QATAR][9] = 40, + [1][1][2][1][RTW89_UK][9] = 40, [1][1][2][1][RTW89_FCC][13] = 58, [1][1][2][1][RTW89_ETSI][13] = 40, [1][1][2][1][RTW89_MKK][13] = 50, @@ -47267,6 +47587,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][13] = 58, [1][1][2][1][RTW89_CN][13] = 40, [1][1][2][1][RTW89_QATAR][13] = 40, + [1][1][2][1][RTW89_UK][13] = 40, [1][1][2][1][RTW89_FCC][16] = 56, [1][1][2][1][RTW89_ETSI][16] = 40, [1][1][2][1][RTW89_MKK][16] = 72, @@ -47278,6 +47599,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][16] = 56, [1][1][2][1][RTW89_CN][16] = 127, [1][1][2][1][RTW89_QATAR][16] = 28, + [1][1][2][1][RTW89_UK][16] = 40, [1][1][2][1][RTW89_FCC][20] = 68, [1][1][2][1][RTW89_ETSI][20] = 40, [1][1][2][1][RTW89_MKK][20] = 72, @@ -47289,6 +47611,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][20] = 68, [1][1][2][1][RTW89_CN][20] = 127, [1][1][2][1][RTW89_QATAR][20] = 28, + [1][1][2][1][RTW89_UK][20] = 40, [1][1][2][1][RTW89_FCC][24] = 68, [1][1][2][1][RTW89_ETSI][24] = 40, [1][1][2][1][RTW89_MKK][24] = 72, @@ -47300,6 +47623,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][24] = 68, [1][1][2][1][RTW89_CN][24] = 127, [1][1][2][1][RTW89_QATAR][24] = 28, + [1][1][2][1][RTW89_UK][24] = 40, [1][1][2][1][RTW89_FCC][28] = 68, [1][1][2][1][RTW89_ETSI][28] = 40, [1][1][2][1][RTW89_MKK][28] = 72, @@ -47311,6 +47635,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][28] = 68, [1][1][2][1][RTW89_CN][28] = 127, [1][1][2][1][RTW89_QATAR][28] = 28, + [1][1][2][1][RTW89_UK][28] = 40, [1][1][2][1][RTW89_FCC][32] = 68, [1][1][2][1][RTW89_ETSI][32] = 40, [1][1][2][1][RTW89_MKK][32] = 72, @@ -47322,6 +47647,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][32] = 68, [1][1][2][1][RTW89_CN][32] = 127, [1][1][2][1][RTW89_QATAR][32] = 28, + [1][1][2][1][RTW89_UK][32] = 40, [1][1][2][1][RTW89_FCC][36] = 68, [1][1][2][1][RTW89_ETSI][36] = 127, [1][1][2][1][RTW89_MKK][36] = 72, @@ -47333,6 +47659,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][36] = 68, [1][1][2][1][RTW89_CN][36] = 127, [1][1][2][1][RTW89_QATAR][36] = 127, + [1][1][2][1][RTW89_UK][36] = 66, [1][1][2][1][RTW89_FCC][39] = 72, [1][1][2][1][RTW89_ETSI][39] = 4, [1][1][2][1][RTW89_MKK][39] = 127, @@ -47344,6 +47671,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][39] = 72, [1][1][2][1][RTW89_CN][39] = 62, [1][1][2][1][RTW89_QATAR][39] = 4, + [1][1][2][1][RTW89_UK][39] = 40, [1][1][2][1][RTW89_FCC][43] = 72, [1][1][2][1][RTW89_ETSI][43] = 4, [1][1][2][1][RTW89_MKK][43] = 127, @@ -47355,6 +47683,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MEXICO][43] = 72, [1][1][2][1][RTW89_CN][43] = 72, [1][1][2][1][RTW89_QATAR][43] = 4, + [1][1][2][1][RTW89_UK][43] = 40, [2][0][2][0][RTW89_FCC][3] = 64, [2][0][2][0][RTW89_ETSI][3] = 64, [2][0][2][0][RTW89_MKK][3] = 64, @@ -47366,6 +47695,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MEXICO][3] = 62, [2][0][2][0][RTW89_CN][3] = 64, [2][0][2][0][RTW89_QATAR][3] = 64, + [2][0][2][0][RTW89_UK][3] = 64, [2][0][2][0][RTW89_FCC][11] = 64, [2][0][2][0][RTW89_ETSI][11] = 64, [2][0][2][0][RTW89_MKK][11] = 64, @@ -47377,6 +47707,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MEXICO][11] = 64, [2][0][2][0][RTW89_CN][11] = 64, [2][0][2][0][RTW89_QATAR][11] = 64, + [2][0][2][0][RTW89_UK][11] = 64, [2][0][2][0][RTW89_FCC][18] = 62, [2][0][2][0][RTW89_ETSI][18] = 64, [2][0][2][0][RTW89_MKK][18] = 72, @@ -47388,6 +47719,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MEXICO][18] = 62, [2][0][2][0][RTW89_CN][18] = 127, [2][0][2][0][RTW89_QATAR][18] = 52, + [2][0][2][0][RTW89_UK][18] = 64, [2][0][2][0][RTW89_FCC][26] = 72, [2][0][2][0][RTW89_ETSI][26] = 64, [2][0][2][0][RTW89_MKK][26] = 72, @@ -47399,6 +47731,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MEXICO][26] = 72, [2][0][2][0][RTW89_CN][26] = 127, [2][0][2][0][RTW89_QATAR][26] = 52, + [2][0][2][0][RTW89_UK][26] = 64, [2][0][2][0][RTW89_FCC][34] = 72, [2][0][2][0][RTW89_ETSI][34] = 127, [2][0][2][0][RTW89_MKK][34] = 72, @@ -47410,6 +47743,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MEXICO][34] = 72, [2][0][2][0][RTW89_CN][34] = 127, [2][0][2][0][RTW89_QATAR][34] = 127, + [2][0][2][0][RTW89_UK][34] = 72, [2][0][2][0][RTW89_FCC][41] = 72, [2][0][2][0][RTW89_ETSI][41] = 28, [2][0][2][0][RTW89_MKK][41] = 127, @@ -47421,6 +47755,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_MEXICO][41] = 72, [2][0][2][0][RTW89_CN][41] = 68, [2][0][2][0][RTW89_QATAR][41] = 28, + [2][0][2][0][RTW89_UK][41] = 64, [2][1][2][0][RTW89_FCC][3] = 56, [2][1][2][0][RTW89_ETSI][3] = 52, [2][1][2][0][RTW89_MKK][3] = 52, @@ -47432,6 +47767,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MEXICO][3] = 50, [2][1][2][0][RTW89_CN][3] = 52, [2][1][2][0][RTW89_QATAR][3] = 52, + [2][1][2][0][RTW89_UK][3] = 52, [2][1][2][0][RTW89_FCC][11] = 56, [2][1][2][0][RTW89_ETSI][11] = 52, [2][1][2][0][RTW89_MKK][11] = 52, @@ -47443,6 +47779,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MEXICO][11] = 56, [2][1][2][0][RTW89_CN][11] = 52, [2][1][2][0][RTW89_QATAR][11] = 52, + [2][1][2][0][RTW89_UK][11] = 52, [2][1][2][0][RTW89_FCC][18] = 56, [2][1][2][0][RTW89_ETSI][18] = 52, [2][1][2][0][RTW89_MKK][18] = 72, @@ -47454,6 +47791,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MEXICO][18] = 56, [2][1][2][0][RTW89_CN][18] = 127, [2][1][2][0][RTW89_QATAR][18] = 40, + [2][1][2][0][RTW89_UK][18] = 52, [2][1][2][0][RTW89_FCC][26] = 72, [2][1][2][0][RTW89_ETSI][26] = 52, [2][1][2][0][RTW89_MKK][26] = 72, @@ -47465,6 +47803,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MEXICO][26] = 72, [2][1][2][0][RTW89_CN][26] = 127, [2][1][2][0][RTW89_QATAR][26] = 40, + [2][1][2][0][RTW89_UK][26] = 52, [2][1][2][0][RTW89_FCC][34] = 72, [2][1][2][0][RTW89_ETSI][34] = 127, [2][1][2][0][RTW89_MKK][34] = 72, @@ -47476,6 +47815,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MEXICO][34] = 72, [2][1][2][0][RTW89_CN][34] = 127, [2][1][2][0][RTW89_QATAR][34] = 127, + [2][1][2][0][RTW89_UK][34] = 72, [2][1][2][0][RTW89_FCC][41] = 72, [2][1][2][0][RTW89_ETSI][41] = 16, [2][1][2][0][RTW89_MKK][41] = 127, @@ -47487,6 +47827,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_MEXICO][41] = 72, [2][1][2][0][RTW89_CN][41] = 68, [2][1][2][0][RTW89_QATAR][41] = 16, + [2][1][2][0][RTW89_UK][41] = 52, [2][1][2][1][RTW89_FCC][3] = 56, [2][1][2][1][RTW89_ETSI][3] = 40, [2][1][2][1][RTW89_MKK][3] = 52, @@ -47498,6 +47839,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MEXICO][3] = 50, [2][1][2][1][RTW89_CN][3] = 40, [2][1][2][1][RTW89_QATAR][3] = 40, + [2][1][2][1][RTW89_UK][3] = 40, [2][1][2][1][RTW89_FCC][11] = 56, [2][1][2][1][RTW89_ETSI][11] = 40, [2][1][2][1][RTW89_MKK][11] = 52, @@ -47509,6 +47851,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MEXICO][11] = 56, [2][1][2][1][RTW89_CN][11] = 40, [2][1][2][1][RTW89_QATAR][11] = 40, + [2][1][2][1][RTW89_UK][11] = 40, [2][1][2][1][RTW89_FCC][18] = 56, [2][1][2][1][RTW89_ETSI][18] = 40, [2][1][2][1][RTW89_MKK][18] = 72, @@ -47520,6 +47863,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MEXICO][18] = 56, [2][1][2][1][RTW89_CN][18] = 127, [2][1][2][1][RTW89_QATAR][18] = 28, + [2][1][2][1][RTW89_UK][18] = 40, [2][1][2][1][RTW89_FCC][26] = 68, [2][1][2][1][RTW89_ETSI][26] = 40, [2][1][2][1][RTW89_MKK][26] = 72, @@ -47531,6 +47875,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MEXICO][26] = 68, [2][1][2][1][RTW89_CN][26] = 127, [2][1][2][1][RTW89_QATAR][26] = 28, + [2][1][2][1][RTW89_UK][26] = 40, [2][1][2][1][RTW89_FCC][34] = 68, [2][1][2][1][RTW89_ETSI][34] = 127, [2][1][2][1][RTW89_MKK][34] = 72, @@ -47542,6 +47887,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MEXICO][34] = 68, [2][1][2][1][RTW89_CN][34] = 127, [2][1][2][1][RTW89_QATAR][34] = 127, + [2][1][2][1][RTW89_UK][34] = 66, [2][1][2][1][RTW89_FCC][41] = 72, [2][1][2][1][RTW89_ETSI][41] = 4, [2][1][2][1][RTW89_MKK][41] = 127, @@ -47553,6 +47899,7 @@ const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_MEXICO][41] = 72, [2][1][2][1][RTW89_CN][41] = 64, [2][1][2][1][RTW89_QATAR][41] = 4, + [2][1][2][1][RTW89_UK][41] = 40, }; const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] @@ -47652,6 +47999,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][0] = 70, [0][0][RTW89_CN][0] = 32, [0][0][RTW89_QATAR][0] = 32, + [0][0][RTW89_UK][0] = 32, [0][0][RTW89_FCC][1] = 70, [0][0][RTW89_ETSI][1] = 32, [0][0][RTW89_MKK][1] = 40, @@ -47663,6 +48011,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][1] = 70, [0][0][RTW89_CN][1] = 32, [0][0][RTW89_QATAR][1] = 32, + [0][0][RTW89_UK][1] = 32, [0][0][RTW89_FCC][2] = 74, [0][0][RTW89_ETSI][2] = 32, [0][0][RTW89_MKK][2] = 40, @@ -47674,6 +48023,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][2] = 74, [0][0][RTW89_CN][2] = 32, [0][0][RTW89_QATAR][2] = 32, + [0][0][RTW89_UK][2] = 32, [0][0][RTW89_FCC][3] = 78, [0][0][RTW89_ETSI][3] = 32, [0][0][RTW89_MKK][3] = 40, @@ -47685,6 +48035,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][3] = 78, [0][0][RTW89_CN][3] = 32, [0][0][RTW89_QATAR][3] = 32, + [0][0][RTW89_UK][3] = 32, [0][0][RTW89_FCC][4] = 78, [0][0][RTW89_ETSI][4] = 32, [0][0][RTW89_MKK][4] = 40, @@ -47696,6 +48047,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][4] = 78, [0][0][RTW89_CN][4] = 32, [0][0][RTW89_QATAR][4] = 32, + [0][0][RTW89_UK][4] = 32, [0][0][RTW89_FCC][5] = 78, [0][0][RTW89_ETSI][5] = 32, [0][0][RTW89_MKK][5] = 40, @@ -47707,6 +48059,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][5] = 78, [0][0][RTW89_CN][5] = 32, [0][0][RTW89_QATAR][5] = 32, + [0][0][RTW89_UK][5] = 32, [0][0][RTW89_FCC][6] = 78, [0][0][RTW89_ETSI][6] = 32, [0][0][RTW89_MKK][6] = 40, @@ -47718,6 +48071,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][6] = 78, [0][0][RTW89_CN][6] = 32, [0][0][RTW89_QATAR][6] = 32, + [0][0][RTW89_UK][6] = 32, [0][0][RTW89_FCC][7] = 78, [0][0][RTW89_ETSI][7] = 32, [0][0][RTW89_MKK][7] = 40, @@ -47729,6 +48083,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][7] = 78, [0][0][RTW89_CN][7] = 32, [0][0][RTW89_QATAR][7] = 32, + [0][0][RTW89_UK][7] = 32, [0][0][RTW89_FCC][8] = 74, [0][0][RTW89_ETSI][8] = 32, [0][0][RTW89_MKK][8] = 40, @@ -47740,6 +48095,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][8] = 74, [0][0][RTW89_CN][8] = 32, [0][0][RTW89_QATAR][8] = 32, + [0][0][RTW89_UK][8] = 32, [0][0][RTW89_FCC][9] = 70, [0][0][RTW89_ETSI][9] = 32, [0][0][RTW89_MKK][9] = 40, @@ -47751,6 +48107,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][9] = 70, [0][0][RTW89_CN][9] = 32, [0][0][RTW89_QATAR][9] = 32, + [0][0][RTW89_UK][9] = 32, [0][0][RTW89_FCC][10] = 70, [0][0][RTW89_ETSI][10] = 32, [0][0][RTW89_MKK][10] = 40, @@ -47762,6 +48119,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][10] = 70, [0][0][RTW89_CN][10] = 32, [0][0][RTW89_QATAR][10] = 32, + [0][0][RTW89_UK][10] = 32, [0][0][RTW89_FCC][11] = 58, [0][0][RTW89_ETSI][11] = 32, [0][0][RTW89_MKK][11] = 40, @@ -47773,6 +48131,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][11] = 58, [0][0][RTW89_CN][11] = 32, [0][0][RTW89_QATAR][11] = 32, + [0][0][RTW89_UK][11] = 32, [0][0][RTW89_FCC][12] = 34, [0][0][RTW89_ETSI][12] = 32, [0][0][RTW89_MKK][12] = 40, @@ -47784,6 +48143,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][12] = 34, [0][0][RTW89_CN][12] = 32, [0][0][RTW89_QATAR][12] = 32, + [0][0][RTW89_UK][12] = 32, [0][0][RTW89_FCC][13] = 127, [0][0][RTW89_ETSI][13] = 127, [0][0][RTW89_MKK][13] = 127, @@ -47795,6 +48155,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][13] = 127, [0][0][RTW89_CN][13] = 127, [0][0][RTW89_QATAR][13] = 127, + [0][0][RTW89_UK][13] = 127, [0][1][RTW89_FCC][0] = 64, [0][1][RTW89_ETSI][0] = 20, [0][1][RTW89_MKK][0] = 28, @@ -47806,6 +48167,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][0] = 64, [0][1][RTW89_CN][0] = 20, [0][1][RTW89_QATAR][0] = 20, + [0][1][RTW89_UK][0] = 20, [0][1][RTW89_FCC][1] = 64, [0][1][RTW89_ETSI][1] = 20, [0][1][RTW89_MKK][1] = 28, @@ -47817,6 +48179,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][1] = 64, [0][1][RTW89_CN][1] = 20, [0][1][RTW89_QATAR][1] = 20, + [0][1][RTW89_UK][1] = 20, [0][1][RTW89_FCC][2] = 68, [0][1][RTW89_ETSI][2] = 20, [0][1][RTW89_MKK][2] = 28, @@ -47828,6 +48191,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][2] = 68, [0][1][RTW89_CN][2] = 20, [0][1][RTW89_QATAR][2] = 20, + [0][1][RTW89_UK][2] = 20, [0][1][RTW89_FCC][3] = 72, [0][1][RTW89_ETSI][3] = 20, [0][1][RTW89_MKK][3] = 28, @@ -47839,6 +48203,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][3] = 72, [0][1][RTW89_CN][3] = 20, [0][1][RTW89_QATAR][3] = 20, + [0][1][RTW89_UK][3] = 20, [0][1][RTW89_FCC][4] = 76, [0][1][RTW89_ETSI][4] = 20, [0][1][RTW89_MKK][4] = 28, @@ -47850,6 +48215,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][4] = 76, [0][1][RTW89_CN][4] = 20, [0][1][RTW89_QATAR][4] = 20, + [0][1][RTW89_UK][4] = 20, [0][1][RTW89_FCC][5] = 78, [0][1][RTW89_ETSI][5] = 20, [0][1][RTW89_MKK][5] = 28, @@ -47861,6 +48227,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][5] = 78, [0][1][RTW89_CN][5] = 20, [0][1][RTW89_QATAR][5] = 20, + [0][1][RTW89_UK][5] = 20, [0][1][RTW89_FCC][6] = 76, [0][1][RTW89_ETSI][6] = 20, [0][1][RTW89_MKK][6] = 28, @@ -47872,6 +48239,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][6] = 76, [0][1][RTW89_CN][6] = 20, [0][1][RTW89_QATAR][6] = 20, + [0][1][RTW89_UK][6] = 20, [0][1][RTW89_FCC][7] = 72, [0][1][RTW89_ETSI][7] = 20, [0][1][RTW89_MKK][7] = 28, @@ -47883,6 +48251,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][7] = 72, [0][1][RTW89_CN][7] = 20, [0][1][RTW89_QATAR][7] = 20, + [0][1][RTW89_UK][7] = 20, [0][1][RTW89_FCC][8] = 68, [0][1][RTW89_ETSI][8] = 20, [0][1][RTW89_MKK][8] = 28, @@ -47894,6 +48263,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][8] = 68, [0][1][RTW89_CN][8] = 20, [0][1][RTW89_QATAR][8] = 20, + [0][1][RTW89_UK][8] = 20, [0][1][RTW89_FCC][9] = 64, [0][1][RTW89_ETSI][9] = 20, [0][1][RTW89_MKK][9] = 28, @@ -47905,6 +48275,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][9] = 64, [0][1][RTW89_CN][9] = 20, [0][1][RTW89_QATAR][9] = 20, + [0][1][RTW89_UK][9] = 20, [0][1][RTW89_FCC][10] = 64, [0][1][RTW89_ETSI][10] = 20, [0][1][RTW89_MKK][10] = 28, @@ -47916,6 +48287,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][10] = 64, [0][1][RTW89_CN][10] = 20, [0][1][RTW89_QATAR][10] = 20, + [0][1][RTW89_UK][10] = 20, [0][1][RTW89_FCC][11] = 54, [0][1][RTW89_ETSI][11] = 20, [0][1][RTW89_MKK][11] = 28, @@ -47927,6 +48299,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][11] = 54, [0][1][RTW89_CN][11] = 20, [0][1][RTW89_QATAR][11] = 20, + [0][1][RTW89_UK][11] = 20, [0][1][RTW89_FCC][12] = 32, [0][1][RTW89_ETSI][12] = 20, [0][1][RTW89_MKK][12] = 28, @@ -47938,6 +48311,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][12] = 32, [0][1][RTW89_CN][12] = 20, [0][1][RTW89_QATAR][12] = 20, + [0][1][RTW89_UK][12] = 20, [0][1][RTW89_FCC][13] = 127, [0][1][RTW89_ETSI][13] = 127, [0][1][RTW89_MKK][13] = 127, @@ -47949,6 +48323,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][13] = 127, [0][1][RTW89_CN][13] = 127, [0][1][RTW89_QATAR][13] = 127, + [0][1][RTW89_UK][13] = 127, [1][0][RTW89_FCC][0] = 72, [1][0][RTW89_ETSI][0] = 42, [1][0][RTW89_MKK][0] = 50, @@ -47960,6 +48335,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][0] = 72, [1][0][RTW89_CN][0] = 42, [1][0][RTW89_QATAR][0] = 42, + [1][0][RTW89_UK][0] = 42, [1][0][RTW89_FCC][1] = 72, [1][0][RTW89_ETSI][1] = 42, [1][0][RTW89_MKK][1] = 50, @@ -47971,6 +48347,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][1] = 72, [1][0][RTW89_CN][1] = 42, [1][0][RTW89_QATAR][1] = 42, + [1][0][RTW89_UK][1] = 42, [1][0][RTW89_FCC][2] = 76, [1][0][RTW89_ETSI][2] = 42, [1][0][RTW89_MKK][2] = 50, @@ -47982,6 +48359,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][2] = 76, [1][0][RTW89_CN][2] = 42, [1][0][RTW89_QATAR][2] = 42, + [1][0][RTW89_UK][2] = 42, [1][0][RTW89_FCC][3] = 78, [1][0][RTW89_ETSI][3] = 42, [1][0][RTW89_MKK][3] = 50, @@ -47993,6 +48371,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][3] = 78, [1][0][RTW89_CN][3] = 42, [1][0][RTW89_QATAR][3] = 42, + [1][0][RTW89_UK][3] = 42, [1][0][RTW89_FCC][4] = 78, [1][0][RTW89_ETSI][4] = 42, [1][0][RTW89_MKK][4] = 50, @@ -48004,6 +48383,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][4] = 78, [1][0][RTW89_CN][4] = 42, [1][0][RTW89_QATAR][4] = 42, + [1][0][RTW89_UK][4] = 42, [1][0][RTW89_FCC][5] = 78, [1][0][RTW89_ETSI][5] = 42, [1][0][RTW89_MKK][5] = 50, @@ -48015,6 +48395,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][5] = 78, [1][0][RTW89_CN][5] = 42, [1][0][RTW89_QATAR][5] = 42, + [1][0][RTW89_UK][5] = 42, [1][0][RTW89_FCC][6] = 78, [1][0][RTW89_ETSI][6] = 42, [1][0][RTW89_MKK][6] = 50, @@ -48026,6 +48407,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][6] = 78, [1][0][RTW89_CN][6] = 42, [1][0][RTW89_QATAR][6] = 42, + [1][0][RTW89_UK][6] = 42, [1][0][RTW89_FCC][7] = 78, [1][0][RTW89_ETSI][7] = 42, [1][0][RTW89_MKK][7] = 50, @@ -48037,6 +48419,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][7] = 78, [1][0][RTW89_CN][7] = 42, [1][0][RTW89_QATAR][7] = 42, + [1][0][RTW89_UK][7] = 42, [1][0][RTW89_FCC][8] = 78, [1][0][RTW89_ETSI][8] = 42, [1][0][RTW89_MKK][8] = 50, @@ -48048,6 +48431,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][8] = 78, [1][0][RTW89_CN][8] = 42, [1][0][RTW89_QATAR][8] = 42, + [1][0][RTW89_UK][8] = 42, [1][0][RTW89_FCC][9] = 74, [1][0][RTW89_ETSI][9] = 42, [1][0][RTW89_MKK][9] = 50, @@ -48059,6 +48443,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][9] = 74, [1][0][RTW89_CN][9] = 42, [1][0][RTW89_QATAR][9] = 42, + [1][0][RTW89_UK][9] = 42, [1][0][RTW89_FCC][10] = 74, [1][0][RTW89_ETSI][10] = 42, [1][0][RTW89_MKK][10] = 50, @@ -48070,6 +48455,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][10] = 74, [1][0][RTW89_CN][10] = 42, [1][0][RTW89_QATAR][10] = 42, + [1][0][RTW89_UK][10] = 42, [1][0][RTW89_FCC][11] = 64, [1][0][RTW89_ETSI][11] = 42, [1][0][RTW89_MKK][11] = 50, @@ -48081,6 +48467,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][11] = 64, [1][0][RTW89_CN][11] = 42, [1][0][RTW89_QATAR][11] = 42, + [1][0][RTW89_UK][11] = 42, [1][0][RTW89_FCC][12] = 36, [1][0][RTW89_ETSI][12] = 42, [1][0][RTW89_MKK][12] = 50, @@ -48092,6 +48479,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][12] = 36, [1][0][RTW89_CN][12] = 42, [1][0][RTW89_QATAR][12] = 42, + [1][0][RTW89_UK][12] = 42, [1][0][RTW89_FCC][13] = 127, [1][0][RTW89_ETSI][13] = 127, [1][0][RTW89_MKK][13] = 127, @@ -48103,6 +48491,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][13] = 127, [1][0][RTW89_CN][13] = 127, [1][0][RTW89_QATAR][13] = 127, + [1][0][RTW89_UK][13] = 127, [1][1][RTW89_FCC][0] = 66, [1][1][RTW89_ETSI][0] = 30, [1][1][RTW89_MKK][0] = 38, @@ -48114,6 +48503,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][0] = 66, [1][1][RTW89_CN][0] = 30, [1][1][RTW89_QATAR][0] = 30, + [1][1][RTW89_UK][0] = 30, [1][1][RTW89_FCC][1] = 66, [1][1][RTW89_ETSI][1] = 30, [1][1][RTW89_MKK][1] = 38, @@ -48125,6 +48515,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][1] = 66, [1][1][RTW89_CN][1] = 30, [1][1][RTW89_QATAR][1] = 30, + [1][1][RTW89_UK][1] = 30, [1][1][RTW89_FCC][2] = 70, [1][1][RTW89_ETSI][2] = 30, [1][1][RTW89_MKK][2] = 38, @@ -48136,6 +48527,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][2] = 70, [1][1][RTW89_CN][2] = 30, [1][1][RTW89_QATAR][2] = 30, + [1][1][RTW89_UK][2] = 30, [1][1][RTW89_FCC][3] = 74, [1][1][RTW89_ETSI][3] = 30, [1][1][RTW89_MKK][3] = 38, @@ -48147,6 +48539,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][3] = 74, [1][1][RTW89_CN][3] = 30, [1][1][RTW89_QATAR][3] = 30, + [1][1][RTW89_UK][3] = 30, [1][1][RTW89_FCC][4] = 78, [1][1][RTW89_ETSI][4] = 30, [1][1][RTW89_MKK][4] = 38, @@ -48158,6 +48551,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][4] = 78, [1][1][RTW89_CN][4] = 30, [1][1][RTW89_QATAR][4] = 30, + [1][1][RTW89_UK][4] = 30, [1][1][RTW89_FCC][5] = 78, [1][1][RTW89_ETSI][5] = 30, [1][1][RTW89_MKK][5] = 38, @@ -48169,6 +48563,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][5] = 78, [1][1][RTW89_CN][5] = 30, [1][1][RTW89_QATAR][5] = 30, + [1][1][RTW89_UK][5] = 30, [1][1][RTW89_FCC][6] = 78, [1][1][RTW89_ETSI][6] = 30, [1][1][RTW89_MKK][6] = 38, @@ -48180,6 +48575,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][6] = 78, [1][1][RTW89_CN][6] = 30, [1][1][RTW89_QATAR][6] = 30, + [1][1][RTW89_UK][6] = 30, [1][1][RTW89_FCC][7] = 74, [1][1][RTW89_ETSI][7] = 30, [1][1][RTW89_MKK][7] = 38, @@ -48191,6 +48587,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][7] = 74, [1][1][RTW89_CN][7] = 30, [1][1][RTW89_QATAR][7] = 30, + [1][1][RTW89_UK][7] = 30, [1][1][RTW89_FCC][8] = 70, [1][1][RTW89_ETSI][8] = 30, [1][1][RTW89_MKK][8] = 38, @@ -48202,6 +48599,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][8] = 70, [1][1][RTW89_CN][8] = 30, [1][1][RTW89_QATAR][8] = 30, + [1][1][RTW89_UK][8] = 30, [1][1][RTW89_FCC][9] = 66, [1][1][RTW89_ETSI][9] = 30, [1][1][RTW89_MKK][9] = 38, @@ -48213,6 +48611,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][9] = 66, [1][1][RTW89_CN][9] = 30, [1][1][RTW89_QATAR][9] = 30, + [1][1][RTW89_UK][9] = 30, [1][1][RTW89_FCC][10] = 66, [1][1][RTW89_ETSI][10] = 30, [1][1][RTW89_MKK][10] = 38, @@ -48224,6 +48623,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][10] = 66, [1][1][RTW89_CN][10] = 30, [1][1][RTW89_QATAR][10] = 30, + [1][1][RTW89_UK][10] = 30, [1][1][RTW89_FCC][11] = 60, [1][1][RTW89_ETSI][11] = 30, [1][1][RTW89_MKK][11] = 38, @@ -48235,6 +48635,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][11] = 60, [1][1][RTW89_CN][11] = 30, [1][1][RTW89_QATAR][11] = 30, + [1][1][RTW89_UK][11] = 30, [1][1][RTW89_FCC][12] = 32, [1][1][RTW89_ETSI][12] = 30, [1][1][RTW89_MKK][12] = 38, @@ -48246,6 +48647,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][12] = 32, [1][1][RTW89_CN][12] = 30, [1][1][RTW89_QATAR][12] = 30, + [1][1][RTW89_UK][12] = 30, [1][1][RTW89_FCC][13] = 127, [1][1][RTW89_ETSI][13] = 127, [1][1][RTW89_MKK][13] = 127, @@ -48257,6 +48659,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][13] = 127, [1][1][RTW89_CN][13] = 127, [1][1][RTW89_QATAR][13] = 127, + [1][1][RTW89_UK][13] = 127, [2][0][RTW89_FCC][0] = 76, [2][0][RTW89_ETSI][0] = 52, [2][0][RTW89_MKK][0] = 64, @@ -48268,6 +48671,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][0] = 76, [2][0][RTW89_CN][0] = 52, [2][0][RTW89_QATAR][0] = 52, + [2][0][RTW89_UK][0] = 52, [2][0][RTW89_FCC][1] = 76, [2][0][RTW89_ETSI][1] = 52, [2][0][RTW89_MKK][1] = 64, @@ -48279,6 +48683,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][1] = 76, [2][0][RTW89_CN][1] = 52, [2][0][RTW89_QATAR][1] = 52, + [2][0][RTW89_UK][1] = 52, [2][0][RTW89_FCC][2] = 78, [2][0][RTW89_ETSI][2] = 52, [2][0][RTW89_MKK][2] = 64, @@ -48290,6 +48695,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][2] = 78, [2][0][RTW89_CN][2] = 52, [2][0][RTW89_QATAR][2] = 52, + [2][0][RTW89_UK][2] = 52, [2][0][RTW89_FCC][3] = 78, [2][0][RTW89_ETSI][3] = 52, [2][0][RTW89_MKK][3] = 64, @@ -48301,6 +48707,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][3] = 78, [2][0][RTW89_CN][3] = 52, [2][0][RTW89_QATAR][3] = 52, + [2][0][RTW89_UK][3] = 52, [2][0][RTW89_FCC][4] = 78, [2][0][RTW89_ETSI][4] = 52, [2][0][RTW89_MKK][4] = 64, @@ -48312,6 +48719,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][4] = 78, [2][0][RTW89_CN][4] = 52, [2][0][RTW89_QATAR][4] = 52, + [2][0][RTW89_UK][4] = 52, [2][0][RTW89_FCC][5] = 78, [2][0][RTW89_ETSI][5] = 52, [2][0][RTW89_MKK][5] = 64, @@ -48323,6 +48731,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][5] = 78, [2][0][RTW89_CN][5] = 52, [2][0][RTW89_QATAR][5] = 52, + [2][0][RTW89_UK][5] = 52, [2][0][RTW89_FCC][6] = 78, [2][0][RTW89_ETSI][6] = 52, [2][0][RTW89_MKK][6] = 64, @@ -48334,6 +48743,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][6] = 78, [2][0][RTW89_CN][6] = 52, [2][0][RTW89_QATAR][6] = 52, + [2][0][RTW89_UK][6] = 52, [2][0][RTW89_FCC][7] = 78, [2][0][RTW89_ETSI][7] = 52, [2][0][RTW89_MKK][7] = 64, @@ -48345,6 +48755,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][7] = 78, [2][0][RTW89_CN][7] = 52, [2][0][RTW89_QATAR][7] = 52, + [2][0][RTW89_UK][7] = 52, [2][0][RTW89_FCC][8] = 78, [2][0][RTW89_ETSI][8] = 52, [2][0][RTW89_MKK][8] = 64, @@ -48356,6 +48767,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][8] = 78, [2][0][RTW89_CN][8] = 52, [2][0][RTW89_QATAR][8] = 52, + [2][0][RTW89_UK][8] = 52, [2][0][RTW89_FCC][9] = 76, [2][0][RTW89_ETSI][9] = 52, [2][0][RTW89_MKK][9] = 64, @@ -48367,6 +48779,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][9] = 76, [2][0][RTW89_CN][9] = 52, [2][0][RTW89_QATAR][9] = 52, + [2][0][RTW89_UK][9] = 52, [2][0][RTW89_FCC][10] = 76, [2][0][RTW89_ETSI][10] = 52, [2][0][RTW89_MKK][10] = 64, @@ -48378,6 +48791,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][10] = 76, [2][0][RTW89_CN][10] = 52, [2][0][RTW89_QATAR][10] = 52, + [2][0][RTW89_UK][10] = 52, [2][0][RTW89_FCC][11] = 68, [2][0][RTW89_ETSI][11] = 52, [2][0][RTW89_MKK][11] = 64, @@ -48389,6 +48803,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][11] = 68, [2][0][RTW89_CN][11] = 52, [2][0][RTW89_QATAR][11] = 52, + [2][0][RTW89_UK][11] = 52, [2][0][RTW89_FCC][12] = 40, [2][0][RTW89_ETSI][12] = 52, [2][0][RTW89_MKK][12] = 64, @@ -48400,6 +48815,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][12] = 40, [2][0][RTW89_CN][12] = 52, [2][0][RTW89_QATAR][12] = 52, + [2][0][RTW89_UK][12] = 52, [2][0][RTW89_FCC][13] = 127, [2][0][RTW89_ETSI][13] = 127, [2][0][RTW89_MKK][13] = 127, @@ -48411,6 +48827,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][13] = 127, [2][0][RTW89_CN][13] = 127, [2][0][RTW89_QATAR][13] = 127, + [2][0][RTW89_UK][13] = 127, [2][1][RTW89_FCC][0] = 68, [2][1][RTW89_ETSI][0] = 40, [2][1][RTW89_MKK][0] = 52, @@ -48422,6 +48839,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][0] = 68, [2][1][RTW89_CN][0] = 40, [2][1][RTW89_QATAR][0] = 40, + [2][1][RTW89_UK][0] = 40, [2][1][RTW89_FCC][1] = 68, [2][1][RTW89_ETSI][1] = 40, [2][1][RTW89_MKK][1] = 52, @@ -48433,6 +48851,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][1] = 68, [2][1][RTW89_CN][1] = 40, [2][1][RTW89_QATAR][1] = 40, + [2][1][RTW89_UK][1] = 40, [2][1][RTW89_FCC][2] = 72, [2][1][RTW89_ETSI][2] = 40, [2][1][RTW89_MKK][2] = 52, @@ -48444,6 +48863,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][2] = 72, [2][1][RTW89_CN][2] = 40, [2][1][RTW89_QATAR][2] = 40, + [2][1][RTW89_UK][2] = 40, [2][1][RTW89_FCC][3] = 76, [2][1][RTW89_ETSI][3] = 40, [2][1][RTW89_MKK][3] = 52, @@ -48455,6 +48875,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][3] = 76, [2][1][RTW89_CN][3] = 40, [2][1][RTW89_QATAR][3] = 40, + [2][1][RTW89_UK][3] = 40, [2][1][RTW89_FCC][4] = 78, [2][1][RTW89_ETSI][4] = 40, [2][1][RTW89_MKK][4] = 52, @@ -48466,6 +48887,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][4] = 78, [2][1][RTW89_CN][4] = 40, [2][1][RTW89_QATAR][4] = 40, + [2][1][RTW89_UK][4] = 40, [2][1][RTW89_FCC][5] = 78, [2][1][RTW89_ETSI][5] = 40, [2][1][RTW89_MKK][5] = 52, @@ -48477,6 +48899,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][5] = 78, [2][1][RTW89_CN][5] = 40, [2][1][RTW89_QATAR][5] = 40, + [2][1][RTW89_UK][5] = 40, [2][1][RTW89_FCC][6] = 78, [2][1][RTW89_ETSI][6] = 40, [2][1][RTW89_MKK][6] = 52, @@ -48488,6 +48911,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][6] = 78, [2][1][RTW89_CN][6] = 40, [2][1][RTW89_QATAR][6] = 40, + [2][1][RTW89_UK][6] = 40, [2][1][RTW89_FCC][7] = 78, [2][1][RTW89_ETSI][7] = 40, [2][1][RTW89_MKK][7] = 52, @@ -48499,6 +48923,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][7] = 78, [2][1][RTW89_CN][7] = 40, [2][1][RTW89_QATAR][7] = 40, + [2][1][RTW89_UK][7] = 40, [2][1][RTW89_FCC][8] = 74, [2][1][RTW89_ETSI][8] = 40, [2][1][RTW89_MKK][8] = 52, @@ -48510,6 +48935,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][8] = 74, [2][1][RTW89_CN][8] = 40, [2][1][RTW89_QATAR][8] = 40, + [2][1][RTW89_UK][8] = 40, [2][1][RTW89_FCC][9] = 70, [2][1][RTW89_ETSI][9] = 40, [2][1][RTW89_MKK][9] = 52, @@ -48521,6 +48947,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][9] = 70, [2][1][RTW89_CN][9] = 40, [2][1][RTW89_QATAR][9] = 40, + [2][1][RTW89_UK][9] = 40, [2][1][RTW89_FCC][10] = 70, [2][1][RTW89_ETSI][10] = 40, [2][1][RTW89_MKK][10] = 52, @@ -48532,6 +48959,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][10] = 70, [2][1][RTW89_CN][10] = 40, [2][1][RTW89_QATAR][10] = 40, + [2][1][RTW89_UK][10] = 40, [2][1][RTW89_FCC][11] = 48, [2][1][RTW89_ETSI][11] = 40, [2][1][RTW89_MKK][11] = 52, @@ -48543,6 +48971,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][11] = 48, [2][1][RTW89_CN][11] = 40, [2][1][RTW89_QATAR][11] = 40, + [2][1][RTW89_UK][11] = 40, [2][1][RTW89_FCC][12] = 26, [2][1][RTW89_ETSI][12] = 40, [2][1][RTW89_MKK][12] = 52, @@ -48554,6 +48983,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][12] = 26, [2][1][RTW89_CN][12] = 40, [2][1][RTW89_QATAR][12] = 40, + [2][1][RTW89_UK][12] = 40, [2][1][RTW89_FCC][13] = 127, [2][1][RTW89_ETSI][13] = 127, [2][1][RTW89_MKK][13] = 127, @@ -48565,6 +48995,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][13] = 127, [2][1][RTW89_CN][13] = 127, [2][1][RTW89_QATAR][13] = 127, + [2][1][RTW89_UK][13] = 127, }; const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] @@ -48730,6 +49161,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][0] = 48, [0][0][RTW89_CN][0] = 24, [0][0][RTW89_QATAR][0] = 24, + [0][0][RTW89_UK][0] = 24, [0][0][RTW89_FCC][2] = 48, [0][0][RTW89_ETSI][2] = 24, [0][0][RTW89_MKK][2] = 26, @@ -48741,6 +49173,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][2] = 48, [0][0][RTW89_CN][2] = 24, [0][0][RTW89_QATAR][2] = 24, + [0][0][RTW89_UK][2] = 24, [0][0][RTW89_FCC][4] = 48, [0][0][RTW89_ETSI][4] = 24, [0][0][RTW89_MKK][4] = 26, @@ -48752,6 +49185,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][4] = 48, [0][0][RTW89_CN][4] = 24, [0][0][RTW89_QATAR][4] = 24, + [0][0][RTW89_UK][4] = 24, [0][0][RTW89_FCC][6] = 48, [0][0][RTW89_ETSI][6] = 24, [0][0][RTW89_MKK][6] = 26, @@ -48763,6 +49197,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][6] = 48, [0][0][RTW89_CN][6] = 24, [0][0][RTW89_QATAR][6] = 24, + [0][0][RTW89_UK][6] = 24, [0][0][RTW89_FCC][8] = 48, [0][0][RTW89_ETSI][8] = 24, [0][0][RTW89_MKK][8] = 26, @@ -48774,6 +49209,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][8] = 48, [0][0][RTW89_CN][8] = 24, [0][0][RTW89_QATAR][8] = 24, + [0][0][RTW89_UK][8] = 24, [0][0][RTW89_FCC][10] = 48, [0][0][RTW89_ETSI][10] = 24, [0][0][RTW89_MKK][10] = 26, @@ -48785,6 +49221,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][10] = 48, [0][0][RTW89_CN][10] = 24, [0][0][RTW89_QATAR][10] = 24, + [0][0][RTW89_UK][10] = 24, [0][0][RTW89_FCC][12] = 48, [0][0][RTW89_ETSI][12] = 24, [0][0][RTW89_MKK][12] = 26, @@ -48796,6 +49233,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][12] = 48, [0][0][RTW89_CN][12] = 24, [0][0][RTW89_QATAR][12] = 24, + [0][0][RTW89_UK][12] = 24, [0][0][RTW89_FCC][14] = 48, [0][0][RTW89_ETSI][14] = 24, [0][0][RTW89_MKK][14] = 26, @@ -48807,6 +49245,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][14] = 48, [0][0][RTW89_CN][14] = 24, [0][0][RTW89_QATAR][14] = 24, + [0][0][RTW89_UK][14] = 24, [0][0][RTW89_FCC][15] = 48, [0][0][RTW89_ETSI][15] = 24, [0][0][RTW89_MKK][15] = 44, @@ -48818,6 +49257,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][15] = 48, [0][0][RTW89_CN][15] = 127, [0][0][RTW89_QATAR][15] = 24, + [0][0][RTW89_UK][15] = 24, [0][0][RTW89_FCC][17] = 48, [0][0][RTW89_ETSI][17] = 24, [0][0][RTW89_MKK][17] = 44, @@ -48829,6 +49269,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][17] = 48, [0][0][RTW89_CN][17] = 127, [0][0][RTW89_QATAR][17] = 24, + [0][0][RTW89_UK][17] = 24, [0][0][RTW89_FCC][19] = 48, [0][0][RTW89_ETSI][19] = 24, [0][0][RTW89_MKK][19] = 44, @@ -48840,6 +49281,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][19] = 48, [0][0][RTW89_CN][19] = 127, [0][0][RTW89_QATAR][19] = 24, + [0][0][RTW89_UK][19] = 24, [0][0][RTW89_FCC][21] = 48, [0][0][RTW89_ETSI][21] = 24, [0][0][RTW89_MKK][21] = 44, @@ -48851,6 +49293,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][21] = 48, [0][0][RTW89_CN][21] = 127, [0][0][RTW89_QATAR][21] = 24, + [0][0][RTW89_UK][21] = 24, [0][0][RTW89_FCC][23] = 48, [0][0][RTW89_ETSI][23] = 24, [0][0][RTW89_MKK][23] = 44, @@ -48862,6 +49305,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][23] = 48, [0][0][RTW89_CN][23] = 127, [0][0][RTW89_QATAR][23] = 24, + [0][0][RTW89_UK][23] = 24, [0][0][RTW89_FCC][25] = 48, [0][0][RTW89_ETSI][25] = 24, [0][0][RTW89_MKK][25] = 44, @@ -48873,6 +49317,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][25] = 48, [0][0][RTW89_CN][25] = 127, [0][0][RTW89_QATAR][25] = 24, + [0][0][RTW89_UK][25] = 24, [0][0][RTW89_FCC][27] = 48, [0][0][RTW89_ETSI][27] = 24, [0][0][RTW89_MKK][27] = 44, @@ -48884,6 +49329,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][27] = 48, [0][0][RTW89_CN][27] = 127, [0][0][RTW89_QATAR][27] = 24, + [0][0][RTW89_UK][27] = 24, [0][0][RTW89_FCC][29] = 48, [0][0][RTW89_ETSI][29] = 24, [0][0][RTW89_MKK][29] = 44, @@ -48895,6 +49341,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][29] = 48, [0][0][RTW89_CN][29] = 127, [0][0][RTW89_QATAR][29] = 24, + [0][0][RTW89_UK][29] = 24, [0][0][RTW89_FCC][31] = 48, [0][0][RTW89_ETSI][31] = 24, [0][0][RTW89_MKK][31] = 44, @@ -48906,6 +49353,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][31] = 48, [0][0][RTW89_CN][31] = 127, [0][0][RTW89_QATAR][31] = 24, + [0][0][RTW89_UK][31] = 24, [0][0][RTW89_FCC][33] = 48, [0][0][RTW89_ETSI][33] = 24, [0][0][RTW89_MKK][33] = 44, @@ -48917,6 +49365,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][33] = 48, [0][0][RTW89_CN][33] = 127, [0][0][RTW89_QATAR][33] = 24, + [0][0][RTW89_UK][33] = 24, [0][0][RTW89_FCC][35] = 48, [0][0][RTW89_ETSI][35] = 24, [0][0][RTW89_MKK][35] = 44, @@ -48928,6 +49377,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][35] = 48, [0][0][RTW89_CN][35] = 127, [0][0][RTW89_QATAR][35] = 24, + [0][0][RTW89_UK][35] = 24, [0][0][RTW89_FCC][37] = 48, [0][0][RTW89_ETSI][37] = 127, [0][0][RTW89_MKK][37] = 44, @@ -48939,6 +49389,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][37] = 48, [0][0][RTW89_CN][37] = 127, [0][0][RTW89_QATAR][37] = 127, + [0][0][RTW89_UK][37] = 58, [0][0][RTW89_FCC][38] = 76, [0][0][RTW89_ETSI][38] = 28, [0][0][RTW89_MKK][38] = 127, @@ -48950,6 +49401,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][38] = 76, [0][0][RTW89_CN][38] = 62, [0][0][RTW89_QATAR][38] = 28, + [0][0][RTW89_UK][38] = 28, [0][0][RTW89_FCC][40] = 76, [0][0][RTW89_ETSI][40] = 28, [0][0][RTW89_MKK][40] = 127, @@ -48961,6 +49413,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][40] = 76, [0][0][RTW89_CN][40] = 62, [0][0][RTW89_QATAR][40] = 28, + [0][0][RTW89_UK][40] = 28, [0][0][RTW89_FCC][42] = 76, [0][0][RTW89_ETSI][42] = 28, [0][0][RTW89_MKK][42] = 127, @@ -48972,6 +49425,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][42] = 76, [0][0][RTW89_CN][42] = 62, [0][0][RTW89_QATAR][42] = 28, + [0][0][RTW89_UK][42] = 28, [0][0][RTW89_FCC][44] = 76, [0][0][RTW89_ETSI][44] = 28, [0][0][RTW89_MKK][44] = 127, @@ -48983,6 +49437,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][44] = 76, [0][0][RTW89_CN][44] = 62, [0][0][RTW89_QATAR][44] = 28, + [0][0][RTW89_UK][44] = 28, [0][0][RTW89_FCC][46] = 76, [0][0][RTW89_ETSI][46] = 28, [0][0][RTW89_MKK][46] = 127, @@ -48994,6 +49449,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MEXICO][46] = 76, [0][0][RTW89_CN][46] = 62, [0][0][RTW89_QATAR][46] = 28, + [0][0][RTW89_UK][46] = 28, [0][1][RTW89_FCC][0] = 36, [0][1][RTW89_ETSI][0] = 12, [0][1][RTW89_MKK][0] = 14, @@ -49005,6 +49461,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][0] = 36, [0][1][RTW89_CN][0] = 12, [0][1][RTW89_QATAR][0] = 12, + [0][1][RTW89_UK][0] = 12, [0][1][RTW89_FCC][2] = 36, [0][1][RTW89_ETSI][2] = 12, [0][1][RTW89_MKK][2] = 14, @@ -49016,6 +49473,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][2] = 36, [0][1][RTW89_CN][2] = 12, [0][1][RTW89_QATAR][2] = 12, + [0][1][RTW89_UK][2] = 12, [0][1][RTW89_FCC][4] = 36, [0][1][RTW89_ETSI][4] = 12, [0][1][RTW89_MKK][4] = 14, @@ -49027,6 +49485,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][4] = 36, [0][1][RTW89_CN][4] = 12, [0][1][RTW89_QATAR][4] = 12, + [0][1][RTW89_UK][4] = 12, [0][1][RTW89_FCC][6] = 36, [0][1][RTW89_ETSI][6] = 12, [0][1][RTW89_MKK][6] = 14, @@ -49038,6 +49497,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][6] = 36, [0][1][RTW89_CN][6] = 12, [0][1][RTW89_QATAR][6] = 12, + [0][1][RTW89_UK][6] = 12, [0][1][RTW89_FCC][8] = 36, [0][1][RTW89_ETSI][8] = 12, [0][1][RTW89_MKK][8] = 14, @@ -49049,6 +49509,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][8] = 36, [0][1][RTW89_CN][8] = 12, [0][1][RTW89_QATAR][8] = 12, + [0][1][RTW89_UK][8] = 12, [0][1][RTW89_FCC][10] = 36, [0][1][RTW89_ETSI][10] = 12, [0][1][RTW89_MKK][10] = 14, @@ -49060,6 +49521,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][10] = 36, [0][1][RTW89_CN][10] = 12, [0][1][RTW89_QATAR][10] = 12, + [0][1][RTW89_UK][10] = 12, [0][1][RTW89_FCC][12] = 36, [0][1][RTW89_ETSI][12] = 12, [0][1][RTW89_MKK][12] = 14, @@ -49071,6 +49533,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][12] = 36, [0][1][RTW89_CN][12] = 12, [0][1][RTW89_QATAR][12] = 12, + [0][1][RTW89_UK][12] = 12, [0][1][RTW89_FCC][14] = 36, [0][1][RTW89_ETSI][14] = 12, [0][1][RTW89_MKK][14] = 14, @@ -49082,6 +49545,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][14] = 36, [0][1][RTW89_CN][14] = 12, [0][1][RTW89_QATAR][14] = 12, + [0][1][RTW89_UK][14] = 12, [0][1][RTW89_FCC][15] = 36, [0][1][RTW89_ETSI][15] = 12, [0][1][RTW89_MKK][15] = 32, @@ -49093,6 +49557,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][15] = 36, [0][1][RTW89_CN][15] = 127, [0][1][RTW89_QATAR][15] = 12, + [0][1][RTW89_UK][15] = 12, [0][1][RTW89_FCC][17] = 36, [0][1][RTW89_ETSI][17] = 12, [0][1][RTW89_MKK][17] = 32, @@ -49104,6 +49569,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][17] = 36, [0][1][RTW89_CN][17] = 127, [0][1][RTW89_QATAR][17] = 12, + [0][1][RTW89_UK][17] = 12, [0][1][RTW89_FCC][19] = 36, [0][1][RTW89_ETSI][19] = 12, [0][1][RTW89_MKK][19] = 32, @@ -49115,6 +49581,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][19] = 36, [0][1][RTW89_CN][19] = 127, [0][1][RTW89_QATAR][19] = 12, + [0][1][RTW89_UK][19] = 12, [0][1][RTW89_FCC][21] = 36, [0][1][RTW89_ETSI][21] = 12, [0][1][RTW89_MKK][21] = 32, @@ -49126,6 +49593,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][21] = 36, [0][1][RTW89_CN][21] = 127, [0][1][RTW89_QATAR][21] = 12, + [0][1][RTW89_UK][21] = 12, [0][1][RTW89_FCC][23] = 36, [0][1][RTW89_ETSI][23] = 12, [0][1][RTW89_MKK][23] = 32, @@ -49137,6 +49605,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][23] = 36, [0][1][RTW89_CN][23] = 127, [0][1][RTW89_QATAR][23] = 12, + [0][1][RTW89_UK][23] = 12, [0][1][RTW89_FCC][25] = 36, [0][1][RTW89_ETSI][25] = 12, [0][1][RTW89_MKK][25] = 32, @@ -49148,6 +49617,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][25] = 36, [0][1][RTW89_CN][25] = 127, [0][1][RTW89_QATAR][25] = 12, + [0][1][RTW89_UK][25] = 12, [0][1][RTW89_FCC][27] = 36, [0][1][RTW89_ETSI][27] = 12, [0][1][RTW89_MKK][27] = 32, @@ -49159,6 +49629,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][27] = 36, [0][1][RTW89_CN][27] = 127, [0][1][RTW89_QATAR][27] = 12, + [0][1][RTW89_UK][27] = 12, [0][1][RTW89_FCC][29] = 36, [0][1][RTW89_ETSI][29] = 12, [0][1][RTW89_MKK][29] = 32, @@ -49170,6 +49641,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][29] = 36, [0][1][RTW89_CN][29] = 127, [0][1][RTW89_QATAR][29] = 12, + [0][1][RTW89_UK][29] = 12, [0][1][RTW89_FCC][31] = 36, [0][1][RTW89_ETSI][31] = 12, [0][1][RTW89_MKK][31] = 32, @@ -49181,6 +49653,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][31] = 36, [0][1][RTW89_CN][31] = 127, [0][1][RTW89_QATAR][31] = 12, + [0][1][RTW89_UK][31] = 12, [0][1][RTW89_FCC][33] = 36, [0][1][RTW89_ETSI][33] = 12, [0][1][RTW89_MKK][33] = 32, @@ -49192,6 +49665,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][33] = 36, [0][1][RTW89_CN][33] = 127, [0][1][RTW89_QATAR][33] = 12, + [0][1][RTW89_UK][33] = 12, [0][1][RTW89_FCC][35] = 36, [0][1][RTW89_ETSI][35] = 12, [0][1][RTW89_MKK][35] = 32, @@ -49203,6 +49677,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][35] = 36, [0][1][RTW89_CN][35] = 127, [0][1][RTW89_QATAR][35] = 12, + [0][1][RTW89_UK][35] = 12, [0][1][RTW89_FCC][37] = 36, [0][1][RTW89_ETSI][37] = 127, [0][1][RTW89_MKK][37] = 32, @@ -49214,6 +49689,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][37] = 36, [0][1][RTW89_CN][37] = 127, [0][1][RTW89_QATAR][37] = 127, + [0][1][RTW89_UK][37] = 46, [0][1][RTW89_FCC][38] = 72, [0][1][RTW89_ETSI][38] = 16, [0][1][RTW89_MKK][38] = 127, @@ -49225,6 +49701,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][38] = 72, [0][1][RTW89_CN][38] = 50, [0][1][RTW89_QATAR][38] = 16, + [0][1][RTW89_UK][38] = 16, [0][1][RTW89_FCC][40] = 76, [0][1][RTW89_ETSI][40] = 16, [0][1][RTW89_MKK][40] = 127, @@ -49236,6 +49713,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][40] = 76, [0][1][RTW89_CN][40] = 50, [0][1][RTW89_QATAR][40] = 16, + [0][1][RTW89_UK][40] = 16, [0][1][RTW89_FCC][42] = 76, [0][1][RTW89_ETSI][42] = 16, [0][1][RTW89_MKK][42] = 127, @@ -49247,6 +49725,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][42] = 76, [0][1][RTW89_CN][42] = 50, [0][1][RTW89_QATAR][42] = 16, + [0][1][RTW89_UK][42] = 16, [0][1][RTW89_FCC][44] = 76, [0][1][RTW89_ETSI][44] = 16, [0][1][RTW89_MKK][44] = 127, @@ -49258,6 +49737,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][44] = 76, [0][1][RTW89_CN][44] = 50, [0][1][RTW89_QATAR][44] = 16, + [0][1][RTW89_UK][44] = 16, [0][1][RTW89_FCC][46] = 76, [0][1][RTW89_ETSI][46] = 16, [0][1][RTW89_MKK][46] = 127, @@ -49269,6 +49749,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MEXICO][46] = 76, [0][1][RTW89_CN][46] = 50, [0][1][RTW89_QATAR][46] = 16, + [0][1][RTW89_UK][46] = 16, [1][0][RTW89_FCC][0] = 62, [1][0][RTW89_ETSI][0] = 36, [1][0][RTW89_MKK][0] = 36, @@ -49280,6 +49761,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][0] = 62, [1][0][RTW89_CN][0] = 36, [1][0][RTW89_QATAR][0] = 36, + [1][0][RTW89_UK][0] = 36, [1][0][RTW89_FCC][2] = 62, [1][0][RTW89_ETSI][2] = 36, [1][0][RTW89_MKK][2] = 36, @@ -49291,6 +49773,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][2] = 62, [1][0][RTW89_CN][2] = 36, [1][0][RTW89_QATAR][2] = 36, + [1][0][RTW89_UK][2] = 36, [1][0][RTW89_FCC][4] = 62, [1][0][RTW89_ETSI][4] = 36, [1][0][RTW89_MKK][4] = 36, @@ -49302,6 +49785,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][4] = 62, [1][0][RTW89_CN][4] = 36, [1][0][RTW89_QATAR][4] = 36, + [1][0][RTW89_UK][4] = 36, [1][0][RTW89_FCC][6] = 62, [1][0][RTW89_ETSI][6] = 36, [1][0][RTW89_MKK][6] = 36, @@ -49313,6 +49797,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][6] = 62, [1][0][RTW89_CN][6] = 36, [1][0][RTW89_QATAR][6] = 36, + [1][0][RTW89_UK][6] = 36, [1][0][RTW89_FCC][8] = 62, [1][0][RTW89_ETSI][8] = 36, [1][0][RTW89_MKK][8] = 36, @@ -49324,6 +49809,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][8] = 62, [1][0][RTW89_CN][8] = 36, [1][0][RTW89_QATAR][8] = 36, + [1][0][RTW89_UK][8] = 36, [1][0][RTW89_FCC][10] = 62, [1][0][RTW89_ETSI][10] = 36, [1][0][RTW89_MKK][10] = 36, @@ -49335,6 +49821,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][10] = 62, [1][0][RTW89_CN][10] = 36, [1][0][RTW89_QATAR][10] = 36, + [1][0][RTW89_UK][10] = 36, [1][0][RTW89_FCC][12] = 62, [1][0][RTW89_ETSI][12] = 36, [1][0][RTW89_MKK][12] = 36, @@ -49346,6 +49833,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][12] = 62, [1][0][RTW89_CN][12] = 36, [1][0][RTW89_QATAR][12] = 36, + [1][0][RTW89_UK][12] = 36, [1][0][RTW89_FCC][14] = 62, [1][0][RTW89_ETSI][14] = 36, [1][0][RTW89_MKK][14] = 36, @@ -49357,6 +49845,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][14] = 62, [1][0][RTW89_CN][14] = 36, [1][0][RTW89_QATAR][14] = 36, + [1][0][RTW89_UK][14] = 36, [1][0][RTW89_FCC][15] = 62, [1][0][RTW89_ETSI][15] = 36, [1][0][RTW89_MKK][15] = 58, @@ -49368,6 +49857,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][15] = 62, [1][0][RTW89_CN][15] = 127, [1][0][RTW89_QATAR][15] = 36, + [1][0][RTW89_UK][15] = 36, [1][0][RTW89_FCC][17] = 62, [1][0][RTW89_ETSI][17] = 36, [1][0][RTW89_MKK][17] = 58, @@ -49379,6 +49869,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][17] = 62, [1][0][RTW89_CN][17] = 127, [1][0][RTW89_QATAR][17] = 36, + [1][0][RTW89_UK][17] = 36, [1][0][RTW89_FCC][19] = 62, [1][0][RTW89_ETSI][19] = 36, [1][0][RTW89_MKK][19] = 58, @@ -49390,6 +49881,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][19] = 62, [1][0][RTW89_CN][19] = 127, [1][0][RTW89_QATAR][19] = 36, + [1][0][RTW89_UK][19] = 36, [1][0][RTW89_FCC][21] = 62, [1][0][RTW89_ETSI][21] = 36, [1][0][RTW89_MKK][21] = 58, @@ -49401,6 +49893,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][21] = 62, [1][0][RTW89_CN][21] = 127, [1][0][RTW89_QATAR][21] = 36, + [1][0][RTW89_UK][21] = 36, [1][0][RTW89_FCC][23] = 62, [1][0][RTW89_ETSI][23] = 36, [1][0][RTW89_MKK][23] = 58, @@ -49412,6 +49905,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][23] = 62, [1][0][RTW89_CN][23] = 127, [1][0][RTW89_QATAR][23] = 36, + [1][0][RTW89_UK][23] = 36, [1][0][RTW89_FCC][25] = 62, [1][0][RTW89_ETSI][25] = 36, [1][0][RTW89_MKK][25] = 58, @@ -49423,6 +49917,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][25] = 62, [1][0][RTW89_CN][25] = 127, [1][0][RTW89_QATAR][25] = 36, + [1][0][RTW89_UK][25] = 36, [1][0][RTW89_FCC][27] = 62, [1][0][RTW89_ETSI][27] = 36, [1][0][RTW89_MKK][27] = 58, @@ -49434,6 +49929,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][27] = 62, [1][0][RTW89_CN][27] = 127, [1][0][RTW89_QATAR][27] = 36, + [1][0][RTW89_UK][27] = 36, [1][0][RTW89_FCC][29] = 62, [1][0][RTW89_ETSI][29] = 36, [1][0][RTW89_MKK][29] = 58, @@ -49445,6 +49941,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][29] = 62, [1][0][RTW89_CN][29] = 127, [1][0][RTW89_QATAR][29] = 36, + [1][0][RTW89_UK][29] = 36, [1][0][RTW89_FCC][31] = 62, [1][0][RTW89_ETSI][31] = 36, [1][0][RTW89_MKK][31] = 58, @@ -49456,6 +49953,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][31] = 62, [1][0][RTW89_CN][31] = 127, [1][0][RTW89_QATAR][31] = 36, + [1][0][RTW89_UK][31] = 36, [1][0][RTW89_FCC][33] = 62, [1][0][RTW89_ETSI][33] = 36, [1][0][RTW89_MKK][33] = 58, @@ -49467,6 +49965,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][33] = 62, [1][0][RTW89_CN][33] = 127, [1][0][RTW89_QATAR][33] = 36, + [1][0][RTW89_UK][33] = 36, [1][0][RTW89_FCC][35] = 62, [1][0][RTW89_ETSI][35] = 36, [1][0][RTW89_MKK][35] = 58, @@ -49478,6 +49977,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][35] = 62, [1][0][RTW89_CN][35] = 127, [1][0][RTW89_QATAR][35] = 36, + [1][0][RTW89_UK][35] = 36, [1][0][RTW89_FCC][37] = 62, [1][0][RTW89_ETSI][37] = 127, [1][0][RTW89_MKK][37] = 58, @@ -49489,6 +49989,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][37] = 62, [1][0][RTW89_CN][37] = 127, [1][0][RTW89_QATAR][37] = 127, + [1][0][RTW89_UK][37] = 64, [1][0][RTW89_FCC][38] = 76, [1][0][RTW89_ETSI][38] = 28, [1][0][RTW89_MKK][38] = 127, @@ -49500,6 +50001,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][38] = 76, [1][0][RTW89_CN][38] = 74, [1][0][RTW89_QATAR][38] = 28, + [1][0][RTW89_UK][38] = 34, [1][0][RTW89_FCC][40] = 76, [1][0][RTW89_ETSI][40] = 28, [1][0][RTW89_MKK][40] = 127, @@ -49511,6 +50013,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][40] = 76, [1][0][RTW89_CN][40] = 74, [1][0][RTW89_QATAR][40] = 28, + [1][0][RTW89_UK][40] = 34, [1][0][RTW89_FCC][42] = 76, [1][0][RTW89_ETSI][42] = 28, [1][0][RTW89_MKK][42] = 127, @@ -49522,6 +50025,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][42] = 76, [1][0][RTW89_CN][42] = 74, [1][0][RTW89_QATAR][42] = 28, + [1][0][RTW89_UK][42] = 34, [1][0][RTW89_FCC][44] = 76, [1][0][RTW89_ETSI][44] = 28, [1][0][RTW89_MKK][44] = 127, @@ -49533,6 +50037,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][44] = 76, [1][0][RTW89_CN][44] = 74, [1][0][RTW89_QATAR][44] = 28, + [1][0][RTW89_UK][44] = 34, [1][0][RTW89_FCC][46] = 76, [1][0][RTW89_ETSI][46] = 28, [1][0][RTW89_MKK][46] = 127, @@ -49544,6 +50049,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MEXICO][46] = 76, [1][0][RTW89_CN][46] = 74, [1][0][RTW89_QATAR][46] = 28, + [1][0][RTW89_UK][46] = 34, [1][1][RTW89_FCC][0] = 46, [1][1][RTW89_ETSI][0] = 22, [1][1][RTW89_MKK][0] = 24, @@ -49555,6 +50061,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][0] = 46, [1][1][RTW89_CN][0] = 22, [1][1][RTW89_QATAR][0] = 22, + [1][1][RTW89_UK][0] = 22, [1][1][RTW89_FCC][2] = 46, [1][1][RTW89_ETSI][2] = 22, [1][1][RTW89_MKK][2] = 24, @@ -49566,6 +50073,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][2] = 46, [1][1][RTW89_CN][2] = 22, [1][1][RTW89_QATAR][2] = 22, + [1][1][RTW89_UK][2] = 22, [1][1][RTW89_FCC][4] = 46, [1][1][RTW89_ETSI][4] = 22, [1][1][RTW89_MKK][4] = 24, @@ -49577,6 +50085,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][4] = 46, [1][1][RTW89_CN][4] = 22, [1][1][RTW89_QATAR][4] = 22, + [1][1][RTW89_UK][4] = 22, [1][1][RTW89_FCC][6] = 46, [1][1][RTW89_ETSI][6] = 22, [1][1][RTW89_MKK][6] = 24, @@ -49588,6 +50097,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][6] = 46, [1][1][RTW89_CN][6] = 22, [1][1][RTW89_QATAR][6] = 22, + [1][1][RTW89_UK][6] = 22, [1][1][RTW89_FCC][8] = 46, [1][1][RTW89_ETSI][8] = 22, [1][1][RTW89_MKK][8] = 24, @@ -49599,6 +50109,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][8] = 46, [1][1][RTW89_CN][8] = 22, [1][1][RTW89_QATAR][8] = 22, + [1][1][RTW89_UK][8] = 22, [1][1][RTW89_FCC][10] = 46, [1][1][RTW89_ETSI][10] = 22, [1][1][RTW89_MKK][10] = 24, @@ -49610,6 +50121,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][10] = 46, [1][1][RTW89_CN][10] = 22, [1][1][RTW89_QATAR][10] = 22, + [1][1][RTW89_UK][10] = 22, [1][1][RTW89_FCC][12] = 46, [1][1][RTW89_ETSI][12] = 22, [1][1][RTW89_MKK][12] = 24, @@ -49621,6 +50133,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][12] = 46, [1][1][RTW89_CN][12] = 22, [1][1][RTW89_QATAR][12] = 22, + [1][1][RTW89_UK][12] = 22, [1][1][RTW89_FCC][14] = 46, [1][1][RTW89_ETSI][14] = 22, [1][1][RTW89_MKK][14] = 24, @@ -49632,6 +50145,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][14] = 46, [1][1][RTW89_CN][14] = 22, [1][1][RTW89_QATAR][14] = 22, + [1][1][RTW89_UK][14] = 22, [1][1][RTW89_FCC][15] = 46, [1][1][RTW89_ETSI][15] = 22, [1][1][RTW89_MKK][15] = 46, @@ -49643,6 +50157,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][15] = 46, [1][1][RTW89_CN][15] = 127, [1][1][RTW89_QATAR][15] = 22, + [1][1][RTW89_UK][15] = 22, [1][1][RTW89_FCC][17] = 46, [1][1][RTW89_ETSI][17] = 22, [1][1][RTW89_MKK][17] = 46, @@ -49654,6 +50169,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][17] = 46, [1][1][RTW89_CN][17] = 127, [1][1][RTW89_QATAR][17] = 22, + [1][1][RTW89_UK][17] = 22, [1][1][RTW89_FCC][19] = 46, [1][1][RTW89_ETSI][19] = 22, [1][1][RTW89_MKK][19] = 46, @@ -49665,6 +50181,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][19] = 46, [1][1][RTW89_CN][19] = 127, [1][1][RTW89_QATAR][19] = 22, + [1][1][RTW89_UK][19] = 22, [1][1][RTW89_FCC][21] = 46, [1][1][RTW89_ETSI][21] = 22, [1][1][RTW89_MKK][21] = 46, @@ -49676,6 +50193,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][21] = 46, [1][1][RTW89_CN][21] = 127, [1][1][RTW89_QATAR][21] = 22, + [1][1][RTW89_UK][21] = 22, [1][1][RTW89_FCC][23] = 46, [1][1][RTW89_ETSI][23] = 22, [1][1][RTW89_MKK][23] = 46, @@ -49687,6 +50205,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][23] = 46, [1][1][RTW89_CN][23] = 127, [1][1][RTW89_QATAR][23] = 22, + [1][1][RTW89_UK][23] = 22, [1][1][RTW89_FCC][25] = 46, [1][1][RTW89_ETSI][25] = 22, [1][1][RTW89_MKK][25] = 46, @@ -49698,6 +50217,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][25] = 46, [1][1][RTW89_CN][25] = 127, [1][1][RTW89_QATAR][25] = 22, + [1][1][RTW89_UK][25] = 22, [1][1][RTW89_FCC][27] = 46, [1][1][RTW89_ETSI][27] = 22, [1][1][RTW89_MKK][27] = 46, @@ -49709,6 +50229,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][27] = 46, [1][1][RTW89_CN][27] = 127, [1][1][RTW89_QATAR][27] = 22, + [1][1][RTW89_UK][27] = 22, [1][1][RTW89_FCC][29] = 46, [1][1][RTW89_ETSI][29] = 22, [1][1][RTW89_MKK][29] = 46, @@ -49720,6 +50241,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][29] = 46, [1][1][RTW89_CN][29] = 127, [1][1][RTW89_QATAR][29] = 22, + [1][1][RTW89_UK][29] = 22, [1][1][RTW89_FCC][31] = 46, [1][1][RTW89_ETSI][31] = 22, [1][1][RTW89_MKK][31] = 46, @@ -49731,6 +50253,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][31] = 46, [1][1][RTW89_CN][31] = 127, [1][1][RTW89_QATAR][31] = 22, + [1][1][RTW89_UK][31] = 22, [1][1][RTW89_FCC][33] = 46, [1][1][RTW89_ETSI][33] = 22, [1][1][RTW89_MKK][33] = 46, @@ -49742,6 +50265,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][33] = 46, [1][1][RTW89_CN][33] = 127, [1][1][RTW89_QATAR][33] = 22, + [1][1][RTW89_UK][33] = 22, [1][1][RTW89_FCC][35] = 46, [1][1][RTW89_ETSI][35] = 22, [1][1][RTW89_MKK][35] = 46, @@ -49753,6 +50277,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][35] = 46, [1][1][RTW89_CN][35] = 127, [1][1][RTW89_QATAR][35] = 22, + [1][1][RTW89_UK][35] = 22, [1][1][RTW89_FCC][37] = 46, [1][1][RTW89_ETSI][37] = 127, [1][1][RTW89_MKK][37] = 46, @@ -49764,6 +50289,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][37] = 46, [1][1][RTW89_CN][37] = 127, [1][1][RTW89_QATAR][37] = 127, + [1][1][RTW89_UK][37] = 52, [1][1][RTW89_FCC][38] = 74, [1][1][RTW89_ETSI][38] = 16, [1][1][RTW89_MKK][38] = 127, @@ -49775,6 +50301,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][38] = 74, [1][1][RTW89_CN][38] = 62, [1][1][RTW89_QATAR][38] = 16, + [1][1][RTW89_UK][38] = 22, [1][1][RTW89_FCC][40] = 76, [1][1][RTW89_ETSI][40] = 16, [1][1][RTW89_MKK][40] = 127, @@ -49786,6 +50313,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][40] = 76, [1][1][RTW89_CN][40] = 62, [1][1][RTW89_QATAR][40] = 16, + [1][1][RTW89_UK][40] = 22, [1][1][RTW89_FCC][42] = 76, [1][1][RTW89_ETSI][42] = 16, [1][1][RTW89_MKK][42] = 127, @@ -49797,6 +50325,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][42] = 76, [1][1][RTW89_CN][42] = 62, [1][1][RTW89_QATAR][42] = 16, + [1][1][RTW89_UK][42] = 22, [1][1][RTW89_FCC][44] = 76, [1][1][RTW89_ETSI][44] = 16, [1][1][RTW89_MKK][44] = 127, @@ -49808,6 +50337,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][44] = 76, [1][1][RTW89_CN][44] = 62, [1][1][RTW89_QATAR][44] = 16, + [1][1][RTW89_UK][44] = 22, [1][1][RTW89_FCC][46] = 76, [1][1][RTW89_ETSI][46] = 16, [1][1][RTW89_MKK][46] = 127, @@ -49819,6 +50349,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MEXICO][46] = 76, [1][1][RTW89_CN][46] = 62, [1][1][RTW89_QATAR][46] = 16, + [1][1][RTW89_UK][46] = 22, [2][0][RTW89_FCC][0] = 74, [2][0][RTW89_ETSI][0] = 46, [2][0][RTW89_MKK][0] = 50, @@ -49830,6 +50361,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][0] = 62, [2][0][RTW89_CN][0] = 46, [2][0][RTW89_QATAR][0] = 46, + [2][0][RTW89_UK][0] = 46, [2][0][RTW89_FCC][2] = 74, [2][0][RTW89_ETSI][2] = 46, [2][0][RTW89_MKK][2] = 50, @@ -49841,6 +50373,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][2] = 62, [2][0][RTW89_CN][2] = 46, [2][0][RTW89_QATAR][2] = 46, + [2][0][RTW89_UK][2] = 46, [2][0][RTW89_FCC][4] = 74, [2][0][RTW89_ETSI][4] = 46, [2][0][RTW89_MKK][4] = 50, @@ -49852,6 +50385,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][4] = 62, [2][0][RTW89_CN][4] = 46, [2][0][RTW89_QATAR][4] = 46, + [2][0][RTW89_UK][4] = 46, [2][0][RTW89_FCC][6] = 74, [2][0][RTW89_ETSI][6] = 46, [2][0][RTW89_MKK][6] = 50, @@ -49863,6 +50397,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][6] = 62, [2][0][RTW89_CN][6] = 46, [2][0][RTW89_QATAR][6] = 46, + [2][0][RTW89_UK][6] = 46, [2][0][RTW89_FCC][8] = 74, [2][0][RTW89_ETSI][8] = 46, [2][0][RTW89_MKK][8] = 50, @@ -49874,6 +50409,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][8] = 74, [2][0][RTW89_CN][8] = 46, [2][0][RTW89_QATAR][8] = 46, + [2][0][RTW89_UK][8] = 46, [2][0][RTW89_FCC][10] = 74, [2][0][RTW89_ETSI][10] = 46, [2][0][RTW89_MKK][10] = 50, @@ -49885,6 +50421,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][10] = 74, [2][0][RTW89_CN][10] = 46, [2][0][RTW89_QATAR][10] = 46, + [2][0][RTW89_UK][10] = 46, [2][0][RTW89_FCC][12] = 74, [2][0][RTW89_ETSI][12] = 46, [2][0][RTW89_MKK][12] = 50, @@ -49896,6 +50433,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][12] = 74, [2][0][RTW89_CN][12] = 46, [2][0][RTW89_QATAR][12] = 46, + [2][0][RTW89_UK][12] = 46, [2][0][RTW89_FCC][14] = 74, [2][0][RTW89_ETSI][14] = 46, [2][0][RTW89_MKK][14] = 50, @@ -49907,6 +50445,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][14] = 74, [2][0][RTW89_CN][14] = 46, [2][0][RTW89_QATAR][14] = 46, + [2][0][RTW89_UK][14] = 46, [2][0][RTW89_FCC][15] = 74, [2][0][RTW89_ETSI][15] = 46, [2][0][RTW89_MKK][15] = 70, @@ -49918,6 +50457,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][15] = 74, [2][0][RTW89_CN][15] = 127, [2][0][RTW89_QATAR][15] = 46, + [2][0][RTW89_UK][15] = 46, [2][0][RTW89_FCC][17] = 74, [2][0][RTW89_ETSI][17] = 46, [2][0][RTW89_MKK][17] = 70, @@ -49929,6 +50469,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][17] = 74, [2][0][RTW89_CN][17] = 127, [2][0][RTW89_QATAR][17] = 46, + [2][0][RTW89_UK][17] = 46, [2][0][RTW89_FCC][19] = 74, [2][0][RTW89_ETSI][19] = 46, [2][0][RTW89_MKK][19] = 70, @@ -49940,6 +50481,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][19] = 74, [2][0][RTW89_CN][19] = 127, [2][0][RTW89_QATAR][19] = 46, + [2][0][RTW89_UK][19] = 46, [2][0][RTW89_FCC][21] = 74, [2][0][RTW89_ETSI][21] = 46, [2][0][RTW89_MKK][21] = 70, @@ -49951,6 +50493,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][21] = 74, [2][0][RTW89_CN][21] = 127, [2][0][RTW89_QATAR][21] = 46, + [2][0][RTW89_UK][21] = 46, [2][0][RTW89_FCC][23] = 74, [2][0][RTW89_ETSI][23] = 46, [2][0][RTW89_MKK][23] = 70, @@ -49962,6 +50505,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][23] = 74, [2][0][RTW89_CN][23] = 127, [2][0][RTW89_QATAR][23] = 46, + [2][0][RTW89_UK][23] = 46, [2][0][RTW89_FCC][25] = 74, [2][0][RTW89_ETSI][25] = 46, [2][0][RTW89_MKK][25] = 70, @@ -49973,6 +50517,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][25] = 74, [2][0][RTW89_CN][25] = 127, [2][0][RTW89_QATAR][25] = 46, + [2][0][RTW89_UK][25] = 46, [2][0][RTW89_FCC][27] = 74, [2][0][RTW89_ETSI][27] = 46, [2][0][RTW89_MKK][27] = 70, @@ -49984,6 +50529,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][27] = 74, [2][0][RTW89_CN][27] = 127, [2][0][RTW89_QATAR][27] = 46, + [2][0][RTW89_UK][27] = 46, [2][0][RTW89_FCC][29] = 74, [2][0][RTW89_ETSI][29] = 46, [2][0][RTW89_MKK][29] = 70, @@ -49995,6 +50541,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][29] = 74, [2][0][RTW89_CN][29] = 127, [2][0][RTW89_QATAR][29] = 46, + [2][0][RTW89_UK][29] = 46, [2][0][RTW89_FCC][31] = 74, [2][0][RTW89_ETSI][31] = 46, [2][0][RTW89_MKK][31] = 70, @@ -50006,6 +50553,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][31] = 74, [2][0][RTW89_CN][31] = 127, [2][0][RTW89_QATAR][31] = 46, + [2][0][RTW89_UK][31] = 46, [2][0][RTW89_FCC][33] = 74, [2][0][RTW89_ETSI][33] = 46, [2][0][RTW89_MKK][33] = 70, @@ -50017,6 +50565,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][33] = 74, [2][0][RTW89_CN][33] = 127, [2][0][RTW89_QATAR][33] = 46, + [2][0][RTW89_UK][33] = 46, [2][0][RTW89_FCC][35] = 74, [2][0][RTW89_ETSI][35] = 46, [2][0][RTW89_MKK][35] = 70, @@ -50028,6 +50577,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][35] = 74, [2][0][RTW89_CN][35] = 127, [2][0][RTW89_QATAR][35] = 46, + [2][0][RTW89_UK][35] = 46, [2][0][RTW89_FCC][37] = 74, [2][0][RTW89_ETSI][37] = 127, [2][0][RTW89_MKK][37] = 70, @@ -50039,6 +50589,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][37] = 74, [2][0][RTW89_CN][37] = 127, [2][0][RTW89_QATAR][37] = 127, + [2][0][RTW89_UK][37] = 74, [2][0][RTW89_FCC][38] = 76, [2][0][RTW89_ETSI][38] = 28, [2][0][RTW89_MKK][38] = 127, @@ -50050,6 +50601,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][38] = 76, [2][0][RTW89_CN][38] = 76, [2][0][RTW89_QATAR][38] = 28, + [2][0][RTW89_UK][38] = 44, [2][0][RTW89_FCC][40] = 76, [2][0][RTW89_ETSI][40] = 28, [2][0][RTW89_MKK][40] = 127, @@ -50061,6 +50613,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][40] = 76, [2][0][RTW89_CN][40] = 76, [2][0][RTW89_QATAR][40] = 28, + [2][0][RTW89_UK][40] = 44, [2][0][RTW89_FCC][42] = 76, [2][0][RTW89_ETSI][42] = 28, [2][0][RTW89_MKK][42] = 127, @@ -50072,6 +50625,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][42] = 76, [2][0][RTW89_CN][42] = 76, [2][0][RTW89_QATAR][42] = 28, + [2][0][RTW89_UK][42] = 44, [2][0][RTW89_FCC][44] = 76, [2][0][RTW89_ETSI][44] = 28, [2][0][RTW89_MKK][44] = 127, @@ -50083,6 +50637,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][44] = 76, [2][0][RTW89_CN][44] = 76, [2][0][RTW89_QATAR][44] = 28, + [2][0][RTW89_UK][44] = 44, [2][0][RTW89_FCC][46] = 76, [2][0][RTW89_ETSI][46] = 28, [2][0][RTW89_MKK][46] = 127, @@ -50094,6 +50649,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MEXICO][46] = 76, [2][0][RTW89_CN][46] = 76, [2][0][RTW89_QATAR][46] = 28, + [2][0][RTW89_UK][46] = 44, [2][1][RTW89_FCC][0] = 58, [2][1][RTW89_ETSI][0] = 32, [2][1][RTW89_MKK][0] = 38, @@ -50105,6 +50661,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][0] = 50, [2][1][RTW89_CN][0] = 32, [2][1][RTW89_QATAR][0] = 32, + [2][1][RTW89_UK][0] = 32, [2][1][RTW89_FCC][2] = 58, [2][1][RTW89_ETSI][2] = 32, [2][1][RTW89_MKK][2] = 38, @@ -50116,6 +50673,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][2] = 50, [2][1][RTW89_CN][2] = 32, [2][1][RTW89_QATAR][2] = 32, + [2][1][RTW89_UK][2] = 32, [2][1][RTW89_FCC][4] = 58, [2][1][RTW89_ETSI][4] = 32, [2][1][RTW89_MKK][4] = 38, @@ -50127,6 +50685,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][4] = 50, [2][1][RTW89_CN][4] = 32, [2][1][RTW89_QATAR][4] = 32, + [2][1][RTW89_UK][4] = 32, [2][1][RTW89_FCC][6] = 58, [2][1][RTW89_ETSI][6] = 32, [2][1][RTW89_MKK][6] = 38, @@ -50138,6 +50697,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][6] = 50, [2][1][RTW89_CN][6] = 32, [2][1][RTW89_QATAR][6] = 32, + [2][1][RTW89_UK][6] = 32, [2][1][RTW89_FCC][8] = 58, [2][1][RTW89_ETSI][8] = 32, [2][1][RTW89_MKK][8] = 38, @@ -50149,6 +50709,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][8] = 58, [2][1][RTW89_CN][8] = 32, [2][1][RTW89_QATAR][8] = 32, + [2][1][RTW89_UK][8] = 32, [2][1][RTW89_FCC][10] = 58, [2][1][RTW89_ETSI][10] = 32, [2][1][RTW89_MKK][10] = 38, @@ -50160,6 +50721,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][10] = 58, [2][1][RTW89_CN][10] = 32, [2][1][RTW89_QATAR][10] = 32, + [2][1][RTW89_UK][10] = 32, [2][1][RTW89_FCC][12] = 58, [2][1][RTW89_ETSI][12] = 32, [2][1][RTW89_MKK][12] = 38, @@ -50171,6 +50733,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][12] = 58, [2][1][RTW89_CN][12] = 32, [2][1][RTW89_QATAR][12] = 32, + [2][1][RTW89_UK][12] = 32, [2][1][RTW89_FCC][14] = 58, [2][1][RTW89_ETSI][14] = 32, [2][1][RTW89_MKK][14] = 38, @@ -50182,6 +50745,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][14] = 58, [2][1][RTW89_CN][14] = 32, [2][1][RTW89_QATAR][14] = 32, + [2][1][RTW89_UK][14] = 32, [2][1][RTW89_FCC][15] = 58, [2][1][RTW89_ETSI][15] = 32, [2][1][RTW89_MKK][15] = 58, @@ -50193,6 +50757,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][15] = 58, [2][1][RTW89_CN][15] = 127, [2][1][RTW89_QATAR][15] = 32, + [2][1][RTW89_UK][15] = 32, [2][1][RTW89_FCC][17] = 58, [2][1][RTW89_ETSI][17] = 32, [2][1][RTW89_MKK][17] = 58, @@ -50204,6 +50769,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][17] = 58, [2][1][RTW89_CN][17] = 127, [2][1][RTW89_QATAR][17] = 32, + [2][1][RTW89_UK][17] = 32, [2][1][RTW89_FCC][19] = 58, [2][1][RTW89_ETSI][19] = 32, [2][1][RTW89_MKK][19] = 58, @@ -50215,6 +50781,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][19] = 58, [2][1][RTW89_CN][19] = 127, [2][1][RTW89_QATAR][19] = 32, + [2][1][RTW89_UK][19] = 32, [2][1][RTW89_FCC][21] = 58, [2][1][RTW89_ETSI][21] = 32, [2][1][RTW89_MKK][21] = 58, @@ -50226,6 +50793,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][21] = 58, [2][1][RTW89_CN][21] = 127, [2][1][RTW89_QATAR][21] = 32, + [2][1][RTW89_UK][21] = 32, [2][1][RTW89_FCC][23] = 58, [2][1][RTW89_ETSI][23] = 32, [2][1][RTW89_MKK][23] = 58, @@ -50237,6 +50805,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][23] = 58, [2][1][RTW89_CN][23] = 127, [2][1][RTW89_QATAR][23] = 32, + [2][1][RTW89_UK][23] = 32, [2][1][RTW89_FCC][25] = 58, [2][1][RTW89_ETSI][25] = 32, [2][1][RTW89_MKK][25] = 58, @@ -50248,6 +50817,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][25] = 58, [2][1][RTW89_CN][25] = 127, [2][1][RTW89_QATAR][25] = 32, + [2][1][RTW89_UK][25] = 32, [2][1][RTW89_FCC][27] = 58, [2][1][RTW89_ETSI][27] = 32, [2][1][RTW89_MKK][27] = 58, @@ -50259,6 +50829,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][27] = 58, [2][1][RTW89_CN][27] = 127, [2][1][RTW89_QATAR][27] = 32, + [2][1][RTW89_UK][27] = 32, [2][1][RTW89_FCC][29] = 58, [2][1][RTW89_ETSI][29] = 32, [2][1][RTW89_MKK][29] = 58, @@ -50270,6 +50841,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][29] = 58, [2][1][RTW89_CN][29] = 127, [2][1][RTW89_QATAR][29] = 32, + [2][1][RTW89_UK][29] = 32, [2][1][RTW89_FCC][31] = 58, [2][1][RTW89_ETSI][31] = 32, [2][1][RTW89_MKK][31] = 58, @@ -50281,6 +50853,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][31] = 58, [2][1][RTW89_CN][31] = 127, [2][1][RTW89_QATAR][31] = 32, + [2][1][RTW89_UK][31] = 32, [2][1][RTW89_FCC][33] = 58, [2][1][RTW89_ETSI][33] = 32, [2][1][RTW89_MKK][33] = 58, @@ -50292,6 +50865,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][33] = 58, [2][1][RTW89_CN][33] = 127, [2][1][RTW89_QATAR][33] = 32, + [2][1][RTW89_UK][33] = 32, [2][1][RTW89_FCC][35] = 58, [2][1][RTW89_ETSI][35] = 32, [2][1][RTW89_MKK][35] = 58, @@ -50303,6 +50877,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][35] = 58, [2][1][RTW89_CN][35] = 127, [2][1][RTW89_QATAR][35] = 32, + [2][1][RTW89_UK][35] = 32, [2][1][RTW89_FCC][37] = 58, [2][1][RTW89_ETSI][37] = 127, [2][1][RTW89_MKK][37] = 58, @@ -50314,6 +50889,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][37] = 58, [2][1][RTW89_CN][37] = 127, [2][1][RTW89_QATAR][37] = 127, + [2][1][RTW89_UK][37] = 62, [2][1][RTW89_FCC][38] = 76, [2][1][RTW89_ETSI][38] = 16, [2][1][RTW89_MKK][38] = 127, @@ -50325,6 +50901,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][38] = 76, [2][1][RTW89_CN][38] = 64, [2][1][RTW89_QATAR][38] = 16, + [2][1][RTW89_UK][38] = 32, [2][1][RTW89_FCC][40] = 76, [2][1][RTW89_ETSI][40] = 16, [2][1][RTW89_MKK][40] = 127, @@ -50336,6 +50913,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][40] = 76, [2][1][RTW89_CN][40] = 64, [2][1][RTW89_QATAR][40] = 16, + [2][1][RTW89_UK][40] = 32, [2][1][RTW89_FCC][42] = 76, [2][1][RTW89_ETSI][42] = 16, [2][1][RTW89_MKK][42] = 127, @@ -50347,6 +50925,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][42] = 76, [2][1][RTW89_CN][42] = 64, [2][1][RTW89_QATAR][42] = 16, + [2][1][RTW89_UK][42] = 32, [2][1][RTW89_FCC][44] = 76, [2][1][RTW89_ETSI][44] = 16, [2][1][RTW89_MKK][44] = 127, @@ -50358,6 +50937,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][44] = 76, [2][1][RTW89_CN][44] = 64, [2][1][RTW89_QATAR][44] = 16, + [2][1][RTW89_UK][44] = 32, [2][1][RTW89_FCC][46] = 76, [2][1][RTW89_ETSI][46] = 16, [2][1][RTW89_MKK][46] = 127, @@ -50369,6 +50949,7 @@ const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_MEXICO][46] = 76, [2][1][RTW89_CN][46] = 64, [2][1][RTW89_QATAR][46] = 16, + [2][1][RTW89_UK][46] = 32, }; #define DECLARE_DIG_TABLE(name) \ -- cgit v1.2.3 From 034307088cb2bdf9b2f79cdc3ddc2be22c89bbfd Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Fri, 18 Mar 2022 10:32:06 +0800 Subject: rtw89: regd: consider 6G band Wrap regd debug dump into a macro and add dump for 6G band field. Extend the macro used to configure regd table to account for multiple bands beyond 2G and 5G. And the follow-up commit will configure the corresponding values for 6G band into regd table. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/regd.c | 37 +++++++++++++++---------------- 1 file changed, 18 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 4c37e590e43c..4d4bc201485b 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -5,10 +5,9 @@ #include "debug.h" #include "ps.h" -#define COUNTRY_REGD(_alpha2, _txpwr_regd_2g, _txpwr_regd_5g) \ +#define COUNTRY_REGD(_alpha2, _txpwr_regd...) \ {.alpha2 = (_alpha2), \ - .txpwr_regd[RTW89_BAND_2G] = (_txpwr_regd_2g), \ - .txpwr_regd[RTW89_BAND_5G] = (_txpwr_regd_5g) \ + .txpwr_regd = {_txpwr_regd}, \ } static const struct rtw89_regulatory rtw89_ww_regd = @@ -272,6 +271,17 @@ static bool rtw89_regd_is_ww(const struct rtw89_regulatory *regd) return regd == &rtw89_ww_regd; } +#define rtw89_debug_regd(_dev, _regd, _desc, _argv...) \ +do { \ + typeof(_regd) __r = _regd; \ + rtw89_debug(_dev, RTW89_DBG_REGD, _desc \ + ": %c%c: mapping txregd to {2g: %d, 5g: %d, 6g: %d}\n", \ + ##_argv, __r->alpha2[0], __r->alpha2[1], \ + __r->txpwr_regd[RTW89_BAND_2G], \ + __r->txpwr_regd[RTW89_BAND_5G], \ + __r->txpwr_regd[RTW89_BAND_6G]); \ +} while (0) + int rtw89_regd_init(struct rtw89_dev *rtwdev, void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)) @@ -294,20 +304,12 @@ int rtw89_regd_init(struct rtw89_dev *rtwdev, if (ret) rtw89_warn(rtwdev, "failed to hint regulatory:%d\n", ret); - rtw89_debug(rtwdev, RTW89_DBG_REGD, - "efuse country code %c%c, mapping to 2g txregd %d, 5g txregd %d\n", - rtwdev->efuse.country_code[0], rtwdev->efuse.country_code[1], - rtwdev->regd->txpwr_regd[RTW89_BAND_2G], - rtwdev->regd->txpwr_regd[RTW89_BAND_5G]); - + rtw89_debug_regd(rtwdev, chip_regd, "efuse country code"); return 0; } - rtw89_debug(rtwdev, RTW89_DBG_REGD, - "worldwide roaming chip, follow the setting of stack(%c%c), mapping to 2g txregd %d, 5g txregd %d\n", - rtwdev->regd->alpha2[0], rtwdev->regd->alpha2[1], - rtwdev->regd->txpwr_regd[RTW89_BAND_2G], - rtwdev->regd->txpwr_regd[RTW89_BAND_5G]); + rtw89_debug_regd(rtwdev, rtwdev->regd, + "worldwide roaming chip, follow the setting of stack"); return 0; } @@ -341,11 +343,8 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request goto exit; } rtw89_regd_notifier_apply(rtwdev, wiphy, request); - rtw89_debug(rtwdev, RTW89_DBG_REGD, - "get alpha2 %c%c from initiator %d, mapping to 2g txregd %d, 5g txregd %d\n", - request->alpha2[0], request->alpha2[1], request->initiator, - rtwdev->regd->txpwr_regd[RTW89_BAND_2G], - rtwdev->regd->txpwr_regd[RTW89_BAND_5G]); + rtw89_debug_regd(rtwdev, rtwdev->regd, "get from initiator %d, alpha2", + request->initiator); rtw89_chip_set_txpwr(rtwdev); -- cgit v1.2.3 From 1ae30c37ecf11781efea733e84927f4480e12ebd Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Fri, 18 Mar 2022 10:32:07 +0800 Subject: rtw89: regd: update mapping table to R59-R32 Update notes: Configure rtw89_regulatory for 6G band according to country. Adjust country GB to use RTW89_UK entry on all bands. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/regd.c | 476 +++++++++++++++--------------- 1 file changed, 238 insertions(+), 238 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 4d4bc201485b..20c7afd3e70f 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -14,244 +14,244 @@ static const struct rtw89_regulatory rtw89_ww_regd = COUNTRY_REGD("00", RTW89_WW, RTW89_WW); static const struct rtw89_regulatory rtw89_regd_map[] = { - COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO), - COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE), - COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO), - COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GB", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR), - COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE), - COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CN", RTW89_CN, RTW89_CN), - COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC), - COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CA", RTW89_IC, RTW89_IC), - COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK), - COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC), - COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA), + COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE), + COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_NA), + COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC), + COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK), + COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR), + COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE), + COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), + COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN), + COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC), + COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_NA), + COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_NA), + COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC), + COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_NA), + COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA), + COUNTRY_REGD("CC", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GY", RTW89_NCC, RTW89_NCC, RTW89_NA), + COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA), + COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA), + COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA), + COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA), + COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA), + COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_NA), + COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), }; static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2) -- cgit v1.2.3 From bed4045ffb9c7453d2b6627f5d29b27973f1f653 Mon Sep 17 00:00:00 2001 From: Johnson Lin Date: Fri, 18 Mar 2022 10:32:08 +0800 Subject: rtw89: packed IGI configuration flow into function for DIG feature Refinement of DIG flow, a mechanism to adjust Rx gain for better Rx performance, by packing IGI(initial gain index) configuration flow into one function. Signed-off-by: Johnson Lin Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/phy.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index c9a04ca0b1f2..c27148acfa04 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -3180,6 +3180,21 @@ static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable); } +static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev) +{ + struct rtw89_dig_info *dig = &rtwdev->dig; + + if (dig->force_gaincode_idx_en) { + rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); + rtw89_debug(rtwdev, RTW89_DBG_DIG, + "Force gaincode index enabled.\n"); + } else { + rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi, + &dig->cur_gaincode); + rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode); + } +} + static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, bool enable) { @@ -3294,15 +3309,7 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev) dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, dig->igi_fa_rssi); - if (dig->force_gaincode_idx_en) { - rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); - rtw89_debug(rtwdev, RTW89_DBG_DIG, - "Force gaincode index enabled.\n"); - } else { - rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi, - &dig->cur_gaincode); - rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode); - } + rtw89_phy_dig_config_igi(rtwdev); rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); -- cgit v1.2.3 From 1e6f0d2a677a6bd0f719158f9a1453ce7b11bb0c Mon Sep 17 00:00:00 2001 From: Johnson Lin Date: Fri, 18 Mar 2022 10:32:09 +0800 Subject: rtw89: disabled IGI configuration for unsupported hardware Bypass IGI, known as Rx gain, adjustment flow for incompatible hardware architectures. Signed-off-by: Johnson Lin Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 2 ++ drivers/net/wireless/realtek/rtw89/core.h | 1 + drivers/net/wireless/realtek/rtw89/phy.c | 3 ++- 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index c61061358980..bce4834f18ec 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -2752,6 +2752,8 @@ static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev) rtwdev->hal.support_cckpd = !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) && !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV); + rtwdev->hal.support_igi = + rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV; } static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 5900cbc0efd9..725484be2b62 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2489,6 +2489,7 @@ struct rtw89_hal { u8 tx_nss; u8 rx_nss; bool support_cckpd; + bool support_igi; }; #define RTW89_MAX_MAC_ID_NUM 128 diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index c27148acfa04..193afb1f53f5 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -3309,7 +3309,8 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev) dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, dig->igi_fa_rssi); - rtw89_phy_dig_config_igi(rtwdev); + if (rtwdev->hal.support_igi) + rtw89_phy_dig_config_igi(rtwdev); rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); -- cgit v1.2.3 From a95bd62ec01df487b18f57eacc4c38d6d73a059a Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 Mar 2022 10:32:10 +0800 Subject: rtw89: add chip_info::h2c_desc_size/fill_txdesc_fwcmd to support new chips 8852A and 8852C use different H2C header and size, so add h2c_desc_size to allocate different header size and fill content by fill_txdesc_fwcmd. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-8-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/cam.c | 2 +- drivers/net/wireless/realtek/rtw89/core.c | 20 +++++++ drivers/net/wireless/realtek/rtw89/core.h | 19 ++++++ drivers/net/wireless/realtek/rtw89/fw.c | 74 ++++++++++++----------- drivers/net/wireless/realtek/rtw89/fw.h | 4 +- drivers/net/wireless/realtek/rtw89/pci.c | 10 ++-- drivers/net/wireless/realtek/rtw89/rtw8852a.c | 2 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 2 + drivers/net/wireless/realtek/rtw89/txrx.h | 86 +++++++++++++++++++++++++++ 9 files changed, 176 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 26bef9fdd205..34df3c07c55c 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -18,7 +18,7 @@ rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev, u8 *cmd; int i, j; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(cmd_len); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, cmd_len); if (!skb) return NULL; diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index bce4834f18ec..a5c13fd7d8a0 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -977,6 +977,26 @@ void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_core_fill_txdesc); +static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | + FIELD_PREP(AX_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ? + RTW89_CORE_RX_TYPE_FWDL : + RTW89_CORE_RX_TYPE_H2C); + + return cpu_to_le32(dword); +} + +void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + struct rtw89_rxdesc_short *txwd_v1 = (struct rtw89_rxdesc_short *)txdesc; + + txwd_v1->dword0 = rtw89_build_txwd_fwcmd0_v1(desc_info); +} +EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1); + static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev, struct sk_buff *skb, struct rtw89_rx_phy_ppdu *phy_ppdu) diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 725484be2b62..c17756ff5476 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -117,6 +117,8 @@ enum rtw89_core_rx_type { RTW89_CORE_RX_TYPE_C2H = 10, RTW89_CORE_RX_TYPE_CSI = 11, RTW89_CORE_RX_TYPE_CQI = 12, + RTW89_CORE_RX_TYPE_H2C = 13, + RTW89_CORE_RX_TYPE_FWDL = 14, }; enum rtw89_txq_flags { @@ -2076,6 +2078,9 @@ struct rtw89_chip_ops { s8 pw_ofst, enum rtw89_mac_idx mac_idx); int (*pwr_on_func)(struct rtw89_dev *rtwdev); int (*pwr_off_func)(struct rtw89_dev *rtwdev); + void (*fill_txdesc_fwcmd)(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl); int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex_gnt *gnt_cfg); @@ -2344,6 +2349,7 @@ struct rtw89_chip_info { u8 ps_mode_supported; u32 hci_func_en_addr; + u32 h2c_desc_size; u32 h2c_ctrl_reg; const u32 *h2c_regs; u32 c2h_ctrl_reg; @@ -3507,6 +3513,16 @@ static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) chip->ops->ctrl_btg(rtwdev, btg); } +static inline +void rtw89_chip_fill_txdesc_fwcmd(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + chip->ops->fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); +} + static inline void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex_gnt *gnt_cfg) @@ -3580,6 +3596,9 @@ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel); void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); +void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); void rtw89_core_rx(struct rtw89_dev *rtwdev, struct rtw89_rx_desc_info *desc_info, struct sk_buff *skb); diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 2c9470616a1b..5985b40950bc 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -10,31 +10,33 @@ #include "phy.h" #include "reg.h" -static struct sk_buff *rtw89_fw_h2c_alloc_skb(u32 len, bool header) +static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, + bool header) { struct sk_buff *skb; u32 header_len = 0; + u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; if (header) header_len = H2C_HEADER_LEN; - skb = dev_alloc_skb(len + header_len + 24); + skb = dev_alloc_skb(len + header_len + h2c_desc_size); if (!skb) return NULL; - skb_reserve(skb, header_len + 24); + skb_reserve(skb, header_len + h2c_desc_size); memset(skb->data, 0, len); return skb; } -struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len) +struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) { - return rtw89_fw_h2c_alloc_skb(len, true); + return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); } -struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len) +struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) { - return rtw89_fw_h2c_alloc_skb(len, false); + return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); } static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) @@ -309,7 +311,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l struct sk_buff *skb; u32 ret = 0; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); return -ENOMEM; @@ -375,7 +377,7 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, else pkt_len = residue_len; - skb = rtw89_fw_h2c_alloc_skb_no_hdr(pkt_len); + skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); return -ENOMEM; @@ -570,7 +572,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CAM_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); return -ENOMEM; @@ -619,7 +621,7 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, return 0; } - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); return -ENOMEM; @@ -665,7 +667,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LOG_CFG_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); return -ENOMEM; @@ -701,7 +703,7 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid) { struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_GENERAL_PKT_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); return -ENOMEM; @@ -738,7 +740,7 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, { struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LPS_PARM_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); return -ENOMEM; @@ -784,7 +786,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0; u8 macid = rtwvif->mac_id; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); return -ENOMEM; @@ -894,7 +896,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, if (sta) __get_sta_he_pkt_padding(rtwdev, sta, pads); - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); return -ENOMEM; @@ -945,7 +947,7 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, { struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); return -ENOMEM; @@ -997,7 +999,7 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, } bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(bcn_total_len); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); dev_kfree_skb_any(skb_beacon); @@ -1051,7 +1053,7 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, self_role = rtwvif->self_role; } - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_ROLE_MAINTAIN_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); return -ENOMEM; @@ -1093,7 +1095,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; } - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); return -ENOMEM; @@ -1137,7 +1139,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, u8 len = sizeof(struct rtw89_fw_macid_pause_grp); struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); return -ENOMEM; @@ -1170,7 +1172,7 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, { struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_EDCA_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); return -ENOMEM; @@ -1205,7 +1207,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_OFLD_CFG_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); return -ENOMEM; @@ -1235,7 +1237,7 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi struct sk_buff *skb; u8 *cmd; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_RA_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); return -ENOMEM; @@ -1306,7 +1308,7 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) struct sk_buff *skb; u8 *cmd; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_INIT); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); return -ENOMEM; @@ -1365,7 +1367,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) u8 *cmd; int i; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_ROLE); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); return -ENOMEM; @@ -1433,7 +1435,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) struct sk_buff *skb; u8 *cmd; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_CTRL); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); return -ENOMEM; @@ -1475,7 +1477,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) struct sk_buff *skb; u8 *cmd; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_RFK); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); return -ENOMEM; @@ -1515,7 +1517,7 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) struct sk_buff *skb; u8 *cmd; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); return -ENOMEM; @@ -1557,7 +1559,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, *id = alloc_id; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_PKT_OFLD + skb_ofld->len); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); return -ENOMEM; @@ -1596,7 +1598,7 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; u8 *cmd; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(skb_len); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); return -ENOMEM; @@ -1660,7 +1662,7 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, struct sk_buff *skb; u8 *cmd; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_SCAN_OFFLOAD); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); return -ENOMEM; @@ -1709,7 +1711,7 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, u8 class = info->rf_path == RF_PATH_A ? H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); return -ENOMEM; @@ -1738,7 +1740,7 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, { struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(len); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); return -ENOMEM; @@ -1765,7 +1767,7 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) { struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_no_hdr(len); + skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); return -ENOMEM; @@ -2295,7 +2297,7 @@ int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) { struct sk_buff *skb; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_FW_CPU_EXCEPTION_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for fw cpu exception\n"); diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 24ab249a8ece..2a010154a8e8 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -2277,8 +2277,8 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, struct rtw89_lps_parm *lps_param); -struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len); -struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len); +struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len); +struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len); int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, struct rtw89_mac_h2c_info *h2c_info, struct rtw89_mac_c2h_info *c2h_info); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 32e8283e22f3..9335fba28fc1 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1043,16 +1043,18 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; - struct rtw89_txwd_body *txwd_body; + void *txdesc; + int txdesc_size = chip->h2c_desc_size; struct pci_dev *pdev = rtwpci->pdev; struct sk_buff *skb = tx_req->skb; struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); dma_addr_t dma; - txwd_body = (struct rtw89_txwd_body *)skb_push(skb, sizeof(*txwd_body)); - memset(txwd_body, 0, sizeof(*txwd_body)); - rtw89_core_fill_txdesc(rtwdev, desc_info, txwd_body); + txdesc = skb_push(skb, txdesc_size); + memset(txdesc, 0, txdesc_size); + rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, dma)) { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 67aaa05cb751..6aa3d19a7464 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2021,6 +2021,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset, .pwr_on_func = NULL, .pwr_off_func = NULL, + .fill_txdesc_fwcmd = rtw89_core_fill_txdesc, .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path, .mac_cfg_gnt = rtw89_mac_cfg_gnt, .stop_sch_tx = rtw89_mac_stop_sch_tx, @@ -2097,6 +2098,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { BIT(RTW89_PS_MODE_CLK_GATED) | BIT(RTW89_PS_MODE_PWR_GATED), .hci_func_en_addr = R_AX_HCI_FUNC_EN, + .h2c_desc_size = sizeof(struct rtw89_txwd_body), .h2c_ctrl_reg = R_AX_H2CREG_CTRL, .h2c_regs = rtw8852a_h2c_regs, .c2h_ctrl_reg = R_AX_C2HREG_CTRL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 123cc3c4318d..08a9c01a359e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -492,6 +492,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852c_pwr_on_func, .pwr_off_func = rtw8852c_pwr_off_func, + .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v1, .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1, .mac_cfg_gnt = rtw89_mac_cfg_gnt_v1, .stop_sch_tx = rtw89_mac_stop_sch_tx_v1, @@ -515,6 +516,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .phycap_addr = 0x590, .phycap_size = 0x60, .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1, + .h2c_desc_size = sizeof(struct rtw89_rxdesc_short), .h2c_ctrl_reg = R_AX_H2CREG_CTRL_V1, .h2c_regs = rtw8852c_h2c_regs, .c2h_ctrl_reg = R_AX_C2HREG_CTRL_V1, diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index 86e3d8b400d6..4e81d6df9368 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -79,6 +79,92 @@ /* TX WD INFO DWORD 5 */ +/* RX WD dword0 */ +#define AX_RXD_RPKT_LEN_MASK GENMASK(13, 0) +#define AX_RXD_SHIFT_MASK GENMASK(15, 14) +#define AX_RXD_WL_HD_IV_LEN_MASK GENMASK(21, 16) +#define AX_RXD_BB_SEL BIT(22) +#define AX_RXD_MAC_INFO_VLD BIT(23) +#define AX_RXD_RPKT_TYPE_MASK GENMASK(27, 24) +#define AX_RXD_DRV_INFO_SIZE_MASK GENMASK(30, 28) +#define AX_RXD_LONG_RXD BIT(31) + +/* RX WD dword1 */ +#define AX_RXD_PPDU_TYPE_MASK GENMASK(3, 0) +#define AX_RXD_PPDU_CNT_MASK GENMASK(6, 4) +#define AX_RXD_SR_EN BIT(7) +#define AX_RXD_USER_ID_MASK GENMASK(15, 8) +#define AX_RXD_USER_ID_v1_MASK GENMASK(13, 8) +#define AX_RXD_RX_DATARATE_MASK GENMASK(24, 16) +#define AX_RXD_RX_GI_LTF_MASK GENMASK(27, 25) +#define AX_RXD_NON_SRG_PPDU BIT(28) +#define AX_RXD_INTER_PPDU BIT(29) +#define AX_RXD_NON_SRG_PPDU_v1 BIT(14) +#define AX_RXD_INTER_PPDU_v1 BIT(15) +#define AX_RXD_BW_MASK GENMASK(31, 30) +#define AX_RXD_BW_v1_MASK GENMASK(31, 29) + +/* RX WD dword2 */ +#define AX_RXD_FREERUN_CNT_MASK GENMASK(31, 0) + +/* RX WD dword3 */ +#define AX_RXD_A1_MATCH BIT(0) +#define AX_RXD_SW_DEC BIT(1) +#define AX_RXD_HW_DEC BIT(2) +#define AX_RXD_AMPDU BIT(3) +#define AX_RXD_AMPDU_END_PKT BIT(4) +#define AX_RXD_AMSDU BIT(5) +#define AX_RXD_AMSDU_CUT BIT(6) +#define AX_RXD_LAST_MSDU BIT(7) +#define AX_RXD_BYPASS BIT(8) +#define AX_RXD_CRC32_ERR BIT(9) +#define AX_RXD_ICV_ERR BIT(10) +#define AX_RXD_MAGIC_WAKE BIT(11) +#define AX_RXD_UNICAST_WAKE BIT(12) +#define AX_RXD_PATTERN_WAKE BIT(13) +#define AX_RXD_GET_CH_INFO_MASK GENMASK(15, 14) +#define AX_RXD_PATTERN_IDX_MASK GENMASK(20, 16) +#define AX_RXD_TARGET_IDC_MASK GENMASK(23, 21) +#define AX_RXD_CHKSUM_OFFLOAD_EN BIT(24) +#define AX_RXD_WITH_LLC BIT(25) +#define AX_RXD_RX_STATISTICS BIT(26) + +/* RX WD dword4 */ +#define AX_RXD_TYPE_MASK GENMASK(1, 0) +#define AX_RXD_MC BIT(2) +#define AX_RXD_BC BIT(3) +#define AX_RXD_MD BIT(4) +#define AX_RXD_MF BIT(5) +#define AX_RXD_PWR BIT(6) +#define AX_RXD_QOS BIT(7) +#define AX_RXD_TID_MASK GENMASK(11, 8) +#define AX_RXD_EOSP BIT(12) +#define AX_RXD_HTC BIT(13) +#define AX_RXD_QNULL BIT(14) +#define AX_RXD_SEQ_MASK GENMASK(27, 16) +#define AX_RXD_FRAG_MASK GENMASK(31, 28) + +/* RX WD dword5 */ +#define AX_RXD_SEC_CAM_IDX_MASK GENMASK(7, 0) +#define AX_RXD_ADDR_CAM_MASK GENMASK(15, 8) +#define AX_RXD_MAC_ID_MASK GENMASK(23, 16) +#define AX_RXD_RX_PL_ID_MASK GENMASK(27, 24) +#define AX_RXD_ADDR_CAM_VLD BIT(28) +#define AX_RXD_ADDR_FWD_EN BIT(29) +#define AX_RXD_RX_PL_MATCH BIT(30) + +/* RX WD dword6 */ +#define AX_RXD_MAC_ADDR_MASK GENMASK(31, 0) + +/* RX WD dword7 */ +#define AX_RXD_MAC_ADDR_H_MASK GENMASK(15, 0) +#define AX_RXD_SMART_ANT BIT(16) +#define AX_RXD_SEC_TYPE_MASK GENMASK(20, 17) +#define AX_RXD_HDR_CNV BIT(21) +#define AX_RXD_HDR_OFFSET_MASK GENMASK(26, 22) +#define AX_RXD_BIP_KEYID BIT(27) +#define AX_RXD_BIP_ENC BIT(28) + /* RX DESC helpers */ /* Short Descriptor */ #define RTW89_GET_RXWD_LONG_RXD(rxdesc) \ -- cgit v1.2.3 From 6d5b5d6290ece7fd3652546ebc1ba98236327b5f Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 Mar 2022 10:32:11 +0800 Subject: rtw89: pci: support variant of fill_txaddr_info The txaddr_info is used to fill the DMA address of skb->data. The v1 version can support up to 10 entries, but the maximum size of each entry is 2047, so it fill more than one entry for large packet, like 3000 bytes. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-9-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 1 + drivers/net/wireless/realtek/rtw89/pci.c | 64 +++++++++++++++++++++++--- drivers/net/wireless/realtek/rtw89/pci.h | 33 +++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852ae.c | 2 + drivers/net/wireless/realtek/rtw89/rtw8852ce.c | 2 + 5 files changed, 95 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index c17756ff5476..501381bb74ad 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -721,6 +721,7 @@ struct rtw89_tx_desc_info { u8 ampdu_density; u8 ampdu_num; bool sec_en; + u8 addr_info_nr; u8 sec_type; u8 sec_cam_idx; u16 data_rate; diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 9335fba28fc1..48a5feaf2722 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -977,6 +977,58 @@ static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); } +u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, + void *txaddr_info_addr, u32 total_len, + dma_addr_t dma, u8 *add_info_nr) +{ + struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; + + txaddr_info->length = cpu_to_le16(total_len); + txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | + RTW89_PCI_ADDR_NUM(1)); + txaddr_info->dma = cpu_to_le32(dma); + + *add_info_nr = 1; + + return sizeof(*txaddr_info); +} +EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); + +u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, + void *txaddr_info_addr, u32 total_len, + dma_addr_t dma, u8 *add_info_nr) +{ + struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; + u32 remain = total_len; + u32 len; + u16 length_option; + int n; + + for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { + len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? + TXADDR_INFO_LENTHG_V1_MAX : remain; + remain -= len; + + length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | + FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | + FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); + txaddr_info->length_opt = cpu_to_le16(length_option); + txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); + txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); + + dma += len; + txaddr_info++; + } + + WARN_ONCE(remain, "length overflow remain=%u total_len=%u", + remain, total_len); + + *add_info_nr = n; + + return n * sizeof(*txaddr_info); +} +EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); + static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, struct rtw89_pci_tx_wd *txwd, @@ -987,7 +1039,7 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, struct rtw89_txwd_body *txwd_body; struct rtw89_txwd_info *txwd_info; struct rtw89_pci_tx_wp_info *txwp_info; - struct rtw89_pci_tx_addr_info_32 *txaddr_info; + void *txaddr_info_addr; struct pci_dev *pdev = rtwpci->pdev; struct sk_buff *skb = tx_req->skb; struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); @@ -1009,7 +1061,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, tx_data->dma = dma; - txaddr_info_len = sizeof(*txaddr_info); txwp_len = sizeof(*txwp_info); txwd_len = sizeof(*txwd_body); txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; @@ -1021,11 +1072,10 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, txwp_info->seq3 = 0; tx_ring->tx_cnt++; - txaddr_info = txwd->vaddr + txwd_len + txwp_len; - txaddr_info->length = cpu_to_le16(skb->len); - txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | - RTW89_PCI_ADDR_NUM(1)); - txaddr_info->dma = cpu_to_le32(dma); + txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; + txaddr_info_len = + rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, + dma, &desc_info->addr_info_nr); txwd->len = txwd_len + txwp_len + txaddr_info_len; diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 2c8030af3e72..a67595b21185 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -448,6 +448,10 @@ struct rtw89_pci_ch_dma_addr_set { struct rtw89_pci_info { const struct rtw89_pci_ch_dma_addr_set *dma_addr_set; + + u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev, + void *txaddr_info_addr, u32 total_len, + dma_addr_t dma, u8 *add_info_nr); }; struct rtw89_pci_bd_ram { @@ -493,6 +497,18 @@ struct rtw89_pci_tx_addr_info_32 { __le32 dma; } __packed; +#define RTW89_TXADDR_INFO_NR_V1 10 + +struct rtw89_pci_tx_addr_info_32_v1 { + __le16 length_opt; +#define B_PCIADDR_LEN_V1_MASK GENMASK(10, 0) +#define B_PCIADDR_HIGH_SEL_V1_MASK GENMASK(14, 11) +#define B_PCIADDR_LS_V1_MASK BIT(15) +#define TXADDR_INFO_LENTHG_V1_MAX ALIGN_DOWN(BIT(11) - 1, 4) + __le16 dma_low_lsb; + __le16 dma_low_msb; +} __packed; + #define RTW89_PCI_RPP_POLLUTED BIT(31) #define RTW89_PCI_RPP_SEQ GENMASK(30, 16) #define RTW89_PCI_RPP_TX_STATUS GENMASK(15, 13) @@ -698,5 +714,22 @@ struct pci_device_id; int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); void rtw89_pci_remove(struct pci_dev *pdev); +u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, + void *txaddr_info_addr, u32 total_len, + dma_addr_t dma, u8 *add_info_nr); +u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, + void *txaddr_info_addr, u32 total_len, + dma_addr_t dma, u8 *add_info_nr); + +static inline +u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev, + void *txaddr_info_addr, u32 total_len, + dma_addr_t dma, u8 *add_info_nr) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + + return info->fill_txaddr_info(rtwdev, txaddr_info_addr, total_len, + dma, add_info_nr); +} #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 48459aba441d..8ffc0dd90d41 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -10,6 +10,8 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .dma_addr_set = &rtw89_pci_ch_dma_addr_set, + + .fill_txaddr_info = rtw89_pci_fill_txaddr_info, }; static const struct rtw89_driver_info rtw89_8852ae_info = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index e71370585b4d..09794836d5c0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -11,6 +11,8 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1, + + .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1, }; static const struct rtw89_driver_info rtw89_8852ce_info = { -- cgit v1.2.3 From f59acdde5197f3ea96ebf3d6bd95741d210dab9b Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 Mar 2022 10:32:12 +0800 Subject: rtw89: support variant of fill_txdesc The txdesc is descriptor related to skb->data. The v1 version contains 8 dwords txwd_body and 6 dwords txwd_info, and the format is also different from original one. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-10-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 68 +++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/core.h | 28 +++++++++++ drivers/net/wireless/realtek/rtw89/pci.c | 8 ++-- drivers/net/wireless/realtek/rtw89/rtw8852a.c | 2 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 2 + drivers/net/wireless/realtek/rtw89/txrx.h | 12 +++++ 6 files changed, 116 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index a5c13fd7d8a0..3c2cbe479197 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -897,6 +897,26 @@ static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info) return cpu_to_le32(dword); } +static __le32 rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) | + FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) | + FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) | + FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) | + FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) | + FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) | + FIELD_PREP(RTW89_TXWD_BODY1_SEC_TYPE, desc_info->sec_type); + + return cpu_to_le32(dword); +} + static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) | @@ -916,6 +936,14 @@ static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info) return cpu_to_le32(dword); } +static __le32 rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_BODY7_USE_RATE_V1, desc_info->use_rate) | + FIELD_PREP(RTW89_TXWD_BODY7_DATA_RATE, desc_info->data_rate); + + return cpu_to_le32(dword); +} + static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) | @@ -926,6 +954,13 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info) return cpu_to_le32(dword); } +static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb); + + return cpu_to_le32(dword); +} + static __le32 rtw89_build_txwd_info1(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(RTW89_TXWD_INFO1_MAX_AGGNUM, desc_info->ampdu_num) | @@ -946,6 +981,15 @@ static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info) return cpu_to_le32(dword); } +static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) | + FIELD_PREP(RTW89_TXWD_INFO2_FORCE_KEY_EN, desc_info->sec_en) | + FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx); + + return cpu_to_le32(dword); +} + static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) | @@ -977,6 +1021,30 @@ void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_core_fill_txdesc); +void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + struct rtw89_txwd_body_v1 *txwd_body = (struct rtw89_txwd_body_v1 *)txdesc; + struct rtw89_txwd_info *txwd_info; + + txwd_body->dword0 = rtw89_build_txwd_body0_v1(desc_info); + txwd_body->dword1 = rtw89_build_txwd_body1_v1(desc_info); + txwd_body->dword2 = rtw89_build_txwd_body2(desc_info); + txwd_body->dword3 = rtw89_build_txwd_body3(desc_info); + txwd_body->dword7 = rtw89_build_txwd_body7_v1(desc_info); + + if (!desc_info->en_wd_info) + return; + + txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1); + txwd_info->dword0 = rtw89_build_txwd_info0_v1(desc_info); + txwd_info->dword1 = rtw89_build_txwd_info1(desc_info); + txwd_info->dword2 = rtw89_build_txwd_info2_v1(desc_info); + txwd_info->dword4 = rtw89_build_txwd_info4(desc_info); +} +EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1); + static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) | diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 501381bb74ad..b5587d799bd8 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -642,6 +642,17 @@ struct rtw89_txwd_body { __le32 dword5; } __packed; +struct rtw89_txwd_body_v1 { + __le32 dword0; + __le32 dword1; + __le32 dword2; + __le32 dword3; + __le32 dword4; + __le32 dword5; + __le32 dword6; + __le32 dword7; +} __packed; + struct rtw89_txwd_info { __le32 dword0; __le32 dword1; @@ -2079,6 +2090,9 @@ struct rtw89_chip_ops { s8 pw_ofst, enum rtw89_mac_idx mac_idx); int (*pwr_on_func)(struct rtw89_dev *rtwdev); int (*pwr_off_func)(struct rtw89_dev *rtwdev); + void (*fill_txdesc)(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); void (*fill_txdesc_fwcmd)(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); @@ -2351,6 +2365,7 @@ struct rtw89_chip_info { u32 hci_func_en_addr; u32 h2c_desc_size; + u32 txwd_body_size; u32 h2c_ctrl_reg; const u32 *h2c_regs; u32 c2h_ctrl_reg; @@ -3514,6 +3529,16 @@ static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) chip->ops->ctrl_btg(rtwdev, btg); } +static inline +void rtw89_chip_fill_txdesc(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + chip->ops->fill_txdesc(rtwdev, desc_info, txdesc); +} + static inline void rtw89_chip_fill_txdesc_fwcmd(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, @@ -3597,6 +3622,9 @@ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel); void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); +void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + void *txdesc); void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info, void *txdesc); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 48a5feaf2722..3a27d6f8c630 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1035,8 +1035,8 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; - struct rtw89_txwd_body *txwd_body; struct rtw89_txwd_info *txwd_info; struct rtw89_pci_tx_wp_info *txwp_info; void *txaddr_info_addr; @@ -1050,8 +1050,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, dma_addr_t dma; int ret; - rtw89_core_fill_txdesc(rtwdev, desc_info, txwd->vaddr); - dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, dma)) { rtw89_err(rtwdev, "failed to map skb dma data\n"); @@ -1062,7 +1060,7 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, tx_data->dma = dma; txwp_len = sizeof(*txwp_info); - txwd_len = sizeof(*txwd_body); + txwd_len = chip->txwd_body_size; txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; txwp_info = txwd->vaddr + txwd_len; @@ -1079,6 +1077,8 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, txwd->len = txwd_len + txwp_len + txaddr_info_len; + rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); + skb_queue_tail(&txwd->queue, skb); return 0; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 6aa3d19a7464..a0dd4db384c6 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2021,6 +2021,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset, .pwr_on_func = NULL, .pwr_off_func = NULL, + .fill_txdesc = rtw89_core_fill_txdesc, .fill_txdesc_fwcmd = rtw89_core_fill_txdesc, .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path, .mac_cfg_gnt = rtw89_mac_cfg_gnt, @@ -2099,6 +2100,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { BIT(RTW89_PS_MODE_PWR_GATED), .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), + .txwd_body_size = sizeof(struct rtw89_txwd_body), .h2c_ctrl_reg = R_AX_H2CREG_CTRL, .h2c_regs = rtw8852a_h2c_regs, .c2h_ctrl_reg = R_AX_C2HREG_CTRL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 08a9c01a359e..df0c67c7322f 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -492,6 +492,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852c_pwr_on_func, .pwr_off_func = rtw8852c_pwr_off_func, + .fill_txdesc = rtw89_core_fill_txdesc_v1, .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v1, .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1, .mac_cfg_gnt = rtw89_mac_cfg_gnt_v1, @@ -517,6 +518,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .phycap_size = 0x60, .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1, .h2c_desc_size = sizeof(struct rtw89_rxdesc_short), + .txwd_body_size = sizeof(struct rtw89_txwd_body_v1), .h2c_ctrl_reg = R_AX_H2CREG_CTRL_V1, .h2c_regs = rtw8852c_h2c_regs, .c2h_ctrl_reg = R_AX_C2HREG_CTRL_V1, diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index 4e81d6df9368..c943e4e95721 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -24,6 +24,7 @@ /* TX WD BODY DWORD 0 */ #define RTW89_TXWD_BODY0_WP_OFFSET GENMASK(31, 24) +#define RTW89_TXWD_BODY0_WP_OFFSET_V1 GENMASK(28, 24) #define RTW89_TXWD_BODY0_MORE_DATA BIT(23) #define RTW89_TXWD_BODY0_WD_INFO_EN BIT(22) #define RTW89_TXWD_BODY0_FW_DL BIT(20) @@ -35,7 +36,9 @@ #define RTW89_TXWD_BODY0_HW_SSN_MODE GENMASK(1, 0) /* TX WD BODY DWORD 1 */ +#define RTW89_TXWD_BODY1_ADDR_INFO_NUM GENMASK(31, 26) #define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16) +#define RTW89_TXWD_BODY1_SEC_TYPE GENMASK(3, 0) /* TX WD BODY DWORD 2 */ #define RTW89_TXWD_BODY2_MACID GENMASK(30, 24) @@ -52,6 +55,14 @@ /* TX WD BODY DWORD 5 */ +/* TX WD BODY DWORD 6 (V1) */ + +/* TX WD BODY DWORD 7 (V1) */ +#define RTW89_TXWD_BODY7_USE_RATE_V1 BIT(31) +#define RTW89_TXWD_BODY7_DATA_BW GENMASK(29, 28) +#define RTW89_TXWD_BODY7_GI_LTF GENMASK(27, 25) +#define RTW89_TXWD_BODY7_DATA_RATE GENMASK(24, 16) + /* TX WD INFO DWORD 0 */ #define RTW89_TXWD_INFO0_USE_RATE BIT(30) #define RTW89_TXWD_INFO0_DATA_BW GENMASK(29, 28) @@ -69,6 +80,7 @@ #define RTW89_TXWD_INFO2_AMPDU_DENSITY GENMASK(20, 18) #define RTW89_TXWD_INFO2_SEC_TYPE GENMASK(12, 9) #define RTW89_TXWD_INFO2_SEC_HW_ENC BIT(8) +#define RTW89_TXWD_INFO2_FORCE_KEY_EN BIT(8) #define RTW89_TXWD_INFO2_SEC_CAM_IDX GENMASK(7, 0) /* TX WD INFO DWORD 3 */ -- cgit v1.2.3 From 79a6c9a4f3c4473dd508a7384ae328bf683b7382 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 Mar 2022 10:32:13 +0800 Subject: rtw89: support hardware generate security header The newer chip will generate security header itself, so don't set IEEE80211_KEY_FLAG_GENERATE_IV in this kind of chip. But, it needs to fill key_index, PN and 802.11 header length to TX descriptor, and then hardware uses these to generate security header. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-11-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/cam.c | 4 ++- drivers/net/wireless/realtek/rtw89/core.c | 50 +++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/core.h | 3 ++ drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 + drivers/net/wireless/realtek/rtw89/txrx.h | 7 ++++ 6 files changed, 65 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 34df3c07c55c..34827f174ba1 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -320,6 +320,7 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { + const struct rtw89_chip_info *chip = rtwdev->chip; u8 hw_key_type; bool ext_key = false; int ret; @@ -353,7 +354,8 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, return -EOPNOTSUPP; } - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + if (!chip->hw_sec_hdr) + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; ret = rtw89_cam_sec_key_install(rtwdev, vif, sta, key, hw_key_type, ext_key); diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 3c2cbe479197..245d8514961e 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -435,6 +435,7 @@ static void rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { + const struct rtw89_chip_info *chip = rtwdev->chip; struct ieee80211_vif *vif = tx_req->vif; struct ieee80211_sta *sta = tx_req->sta; struct ieee80211_tx_info *info; @@ -446,6 +447,7 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; struct sk_buff *skb = tx_req->skb; u8 sec_type = RTW89_SEC_KEY_TYPE_NONE; + u64 pn64; if (!vif) { rtw89_warn(rtwdev, "cannot set sec key without vif\n"); @@ -491,8 +493,21 @@ rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, } desc_info->sec_en = true; + desc_info->sec_keyid = key->keyidx; desc_info->sec_type = sec_type; desc_info->sec_cam_idx = sec_cam->sec_cam_idx; + + if (!chip->hw_sec_hdr) + return; + + pn64 = atomic64_inc_return(&key->tx_pn); + desc_info->sec_seq[0] = pn64; + desc_info->sec_seq[1] = pn64 >> 8; + desc_info->sec_seq[2] = pn64 >> 16; + desc_info->sec_seq[3] = pn64 >> 24; + desc_info->sec_seq[4] = pn64 >> 32; + desc_info->sec_seq[5] = pn64 >> 40; + desc_info->wp_offset = 1; /* in unit of 8 bytes for security header */ } static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev, @@ -755,6 +770,17 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev, return PACKET_MAX; } +static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev, + struct rtw89_tx_desc_info *desc_info, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 fc = hdr->frame_control; + + desc_info->hdr_llc_len = ieee80211_hdrlen(fc); + desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */ +} + static void rtw89_core_tx_wake(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) @@ -806,6 +832,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, rtw89_core_tx_update_data_info(rtwdev, tx_req); pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req); rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type); + rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb); break; case RTW89_CORE_TX_TYPE_FWCMD: rtw89_core_tx_update_h2c_info(rtwdev, tx_req); @@ -912,6 +939,7 @@ static __le32 rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info *desc_info) static __le32 rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(RTW89_TXWD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) | + FIELD_PREP(RTW89_TXWD_BODY1_SEC_KEYID, desc_info->sec_keyid) | FIELD_PREP(RTW89_TXWD_BODY1_SEC_TYPE, desc_info->sec_type); return cpu_to_le32(dword); @@ -936,6 +964,24 @@ static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info) return cpu_to_le32(dword); } +static __le32 rtw89_build_txwd_body4(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) | + FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]); + + return cpu_to_le32(dword); +} + +static __le32 rtw89_build_txwd_body5(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) | + FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) | + FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) | + FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]); + + return cpu_to_le32(dword); +} + static __le32 rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(RTW89_TXWD_BODY7_USE_RATE_V1, desc_info->use_rate) | @@ -1032,6 +1078,10 @@ void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev, txwd_body->dword1 = rtw89_build_txwd_body1_v1(desc_info); txwd_body->dword2 = rtw89_build_txwd_body2(desc_info); txwd_body->dword3 = rtw89_build_txwd_body3(desc_info); + if (desc_info->sec_en) { + txwd_body->dword4 = rtw89_build_txwd_body4(desc_info); + txwd_body->dword5 = rtw89_build_txwd_body5(desc_info); + } txwd_body->dword7 = rtw89_build_txwd_body7_v1(desc_info); if (!desc_info->en_wd_info) diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index b5587d799bd8..9f53d581a48f 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -733,8 +733,10 @@ struct rtw89_tx_desc_info { u8 ampdu_num; bool sec_en; u8 addr_info_nr; + u8 sec_keyid; u8 sec_type; u8 sec_cam_idx; + u8 sec_seq[6]; u16 data_rate; u16 data_retry_lowest_rate; bool fw_dl; @@ -2302,6 +2304,7 @@ struct rtw89_chip_info { u32 rf_base_addr[2]; u8 support_bands; bool support_bw160; + bool hw_sec_hdr; u8 rf_path_num; u8 tx_nss; u8 rx_nss; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index a0dd4db384c6..0ab1c57c845b 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2066,6 +2066,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), .support_bw160 = false, + .hw_sec_hdr = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index df0c67c7322f..4773f6c739dc 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -508,6 +508,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .rf_base_addr = {0xe000, 0xf000}, .pwr_on_seq = NULL, .pwr_off_seq = NULL, + .hw_sec_hdr = true, .sec_ctrl_efuse_size = 4, .physical_efuse_size = 1216, .logical_efuse_size = 2048, diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index c943e4e95721..1250e26ade40 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -38,6 +38,7 @@ /* TX WD BODY DWORD 1 */ #define RTW89_TXWD_BODY1_ADDR_INFO_NUM GENMASK(31, 26) #define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16) +#define RTW89_TXWD_BODY1_SEC_KEYID GENMASK(5, 4) #define RTW89_TXWD_BODY1_SEC_TYPE GENMASK(3, 0) /* TX WD BODY DWORD 2 */ @@ -52,8 +53,14 @@ #define RTW89_TXWD_BODY3_SW_SEQ GENMASK(11, 0) /* TX WD BODY DWORD 4 */ +#define RTW89_TXWD_BODY4_SEC_IV_L1 GENMASK(31, 24) +#define RTW89_TXWD_BODY4_SEC_IV_L0 GENMASK(23, 16) /* TX WD BODY DWORD 5 */ +#define RTW89_TXWD_BODY5_SEC_IV_H5 GENMASK(31, 24) +#define RTW89_TXWD_BODY5_SEC_IV_H4 GENMASK(23, 16) +#define RTW89_TXWD_BODY5_SEC_IV_H3 GENMASK(15, 8) +#define RTW89_TXWD_BODY5_SEC_IV_H2 GENMASK(7, 0) /* TX WD BODY DWORD 6 (V1) */ -- cgit v1.2.3 From 84fc6999f0d01920687d8555e3f1bb87625a66ed Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 Mar 2022 10:32:14 +0800 Subject: rtw89: read RX bandwidth from v1 type RX descriptor 8852C uses different fields to represent RX bandwidth. Since other fields are the same, I check chip_id to get bandwidth instead of creating another v1 function. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318023214.32411-12-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 6 +++++- drivers/net/wireless/realtek/rtw89/txrx.h | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 245d8514961e..d923e4a0f963 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -1492,6 +1492,7 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, struct rtw89_rx_desc_info *desc_info, u8 *data, u32 data_offset) { + const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_rxdesc_short *rxd_s; struct rtw89_rxdesc_long *rxd_l; u8 shift_len, drv_info_len; @@ -1502,7 +1503,10 @@ void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev, desc_info->long_rxdesc = RTW89_GET_RXWD_LONG_RXD(rxd_s); desc_info->pkt_type = RTW89_GET_RXWD_RPKT_TYPE(rxd_s); desc_info->mac_info_valid = RTW89_GET_RXWD_MAC_INFO_VALID(rxd_s); - desc_info->bw = RTW89_GET_RXWD_BW(rxd_s); + if (chip->chip_id == RTL8852C) + desc_info->bw = RTW89_GET_RXWD_BW_V1(rxd_s); + else + desc_info->bw = RTW89_GET_RXWD_BW(rxd_s); desc_info->data_rate = RTW89_GET_RXWD_DATA_RATE(rxd_s); desc_info->gi_ltf = RTW89_GET_RXWD_GI_LTF(rxd_s); desc_info->user_id = RTW89_GET_RXWD_USER_ID(rxd_s); diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index 1250e26ade40..b889e7bf34c0 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -204,6 +204,8 @@ le32_get_bits((rxdesc)->dword0, GENMASK(13, 0)) #define RTW89_GET_RXWD_BW(rxdesc) \ le32_get_bits((rxdesc)->dword1, GENMASK(31, 30)) +#define RTW89_GET_RXWD_BW_V1(rxdesc) \ + le32_get_bits((rxdesc)->dword1, GENMASK(31, 29)) #define RTW89_GET_RXWD_GI_LTF(rxdesc) \ le32_get_bits((rxdesc)->dword1, GENMASK(27, 25)) #define RTW89_GET_RXWD_DATA_RATE(rxdesc) \ -- cgit v1.2.3 From 26bb93407c748d731f25581ccae196e1016072bf Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 Mar 2022 11:52:02 +0800 Subject: rtw89: handle potential uninitialized variable The smatch reports: rtw8852a.c:1857 rtw8852a_btc_set_wl_txpwr_ctrl() error: uninitialized symbol '_cur'. rtw8852a.c:1858 rtw8852a_btc_set_wl_txpwr_ctrl() error: uninitialized symbol '_cur'. This is because rtw89_mac_txpwr_read32() can possibly return before setting argument _cur, and the caller will use the uninitialized value. To fix this problem, check the return value of rtw89_mac_txpwr_read32(). Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220318035202.42437-1-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852a.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 0ab1c57c845b..a8b972d4bee8 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -1843,7 +1843,8 @@ rtw8852a_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val) u32 _cur, _wrt; \ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \ "btc ctrl %s: 0x%x\n", #_case, _val); \ - rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, _reg, &_cur);\ + if (rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, _reg, &_cur))\ + break; \ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \ "btc ctrl ori 0x%x: 0x%x\n", _reg, _cur); \ _wrt = __do_clr(_val) ? \ -- cgit v1.2.3 From 121210ec935c47b076709974d95360f5e9c9b869 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Fri, 1 Apr 2022 20:30:40 +0300 Subject: ath11k: mhi: remove state machine State machines are difficult to understand and in this case it's just useless, which is shown by the diffstat. So remove it entirely to make the code simpler. No functional changes. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03003-QCAHSPSWPL_V1_V2_SILICONZ_LITE-2 Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401173042.17467-2-kvalo@kernel.org --- drivers/net/wireless/ath/ath11k/mhi.c | 194 ++-------------------------------- drivers/net/wireless/ath/ath11k/mhi.h | 13 --- drivers/net/wireless/ath/ath11k/pci.h | 2 +- 3 files changed, 11 insertions(+), 198 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 1aa1e0f01b85..6ad91c600d0e 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -463,195 +463,17 @@ void ath11k_mhi_unregister(struct ath11k_pci *ab_pci) mhi_free_controller(mhi_ctrl); } -static char *ath11k_mhi_state_to_str(enum ath11k_mhi_state mhi_state) -{ - switch (mhi_state) { - case ATH11K_MHI_INIT: - return "INIT"; - case ATH11K_MHI_DEINIT: - return "DEINIT"; - case ATH11K_MHI_POWER_ON: - return "POWER_ON"; - case ATH11K_MHI_POWER_OFF: - return "POWER_OFF"; - case ATH11K_MHI_FORCE_POWER_OFF: - return "FORCE_POWER_OFF"; - case ATH11K_MHI_SUSPEND: - return "SUSPEND"; - case ATH11K_MHI_RESUME: - return "RESUME"; - case ATH11K_MHI_TRIGGER_RDDM: - return "TRIGGER_RDDM"; - case ATH11K_MHI_RDDM_DONE: - return "RDDM_DONE"; - default: - return "UNKNOWN"; - } -}; - -static void ath11k_mhi_set_state_bit(struct ath11k_pci *ab_pci, - enum ath11k_mhi_state mhi_state) -{ - struct ath11k_base *ab = ab_pci->ab; - - switch (mhi_state) { - case ATH11K_MHI_INIT: - set_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state); - break; - case ATH11K_MHI_DEINIT: - clear_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state); - break; - case ATH11K_MHI_POWER_ON: - set_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state); - break; - case ATH11K_MHI_POWER_OFF: - case ATH11K_MHI_FORCE_POWER_OFF: - clear_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state); - clear_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state); - clear_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state); - break; - case ATH11K_MHI_SUSPEND: - set_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state); - break; - case ATH11K_MHI_RESUME: - clear_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state); - break; - case ATH11K_MHI_TRIGGER_RDDM: - set_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state); - break; - case ATH11K_MHI_RDDM_DONE: - set_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state); - break; - default: - ath11k_err(ab, "unhandled mhi state (%d)\n", mhi_state); - } -} - -static int ath11k_mhi_check_state_bit(struct ath11k_pci *ab_pci, - enum ath11k_mhi_state mhi_state) -{ - struct ath11k_base *ab = ab_pci->ab; - - switch (mhi_state) { - case ATH11K_MHI_INIT: - if (!test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state)) - return 0; - break; - case ATH11K_MHI_DEINIT: - case ATH11K_MHI_POWER_ON: - if (test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state) && - !test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state)) - return 0; - break; - case ATH11K_MHI_FORCE_POWER_OFF: - if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state)) - return 0; - break; - case ATH11K_MHI_POWER_OFF: - case ATH11K_MHI_SUSPEND: - if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) && - !test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state)) - return 0; - break; - case ATH11K_MHI_RESUME: - if (test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state)) - return 0; - break; - case ATH11K_MHI_TRIGGER_RDDM: - if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) && - !test_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state)) - return 0; - break; - case ATH11K_MHI_RDDM_DONE: - return 0; - default: - ath11k_err(ab, "unhandled mhi state: %s(%d)\n", - ath11k_mhi_state_to_str(mhi_state), mhi_state); - } - - ath11k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n", - ath11k_mhi_state_to_str(mhi_state), mhi_state, - ab_pci->mhi_state); - - return -EINVAL; -} - -static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci, - enum ath11k_mhi_state mhi_state) -{ - struct ath11k_base *ab = ab_pci->ab; - int ret; - - ret = ath11k_mhi_check_state_bit(ab_pci, mhi_state); - if (ret) - goto out; - - ath11k_dbg(ab, ATH11K_DBG_PCI, "setting mhi state: %s(%d)\n", - ath11k_mhi_state_to_str(mhi_state), mhi_state); - - switch (mhi_state) { - case ATH11K_MHI_INIT: - ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl); - break; - case ATH11K_MHI_DEINIT: - mhi_unprepare_after_power_down(ab_pci->mhi_ctrl); - ret = 0; - break; - case ATH11K_MHI_POWER_ON: - ret = mhi_sync_power_up(ab_pci->mhi_ctrl); - break; - case ATH11K_MHI_POWER_OFF: - mhi_power_down(ab_pci->mhi_ctrl, true); - ret = 0; - break; - case ATH11K_MHI_FORCE_POWER_OFF: - mhi_power_down(ab_pci->mhi_ctrl, false); - ret = 0; - break; - case ATH11K_MHI_SUSPEND: - ret = mhi_pm_suspend(ab_pci->mhi_ctrl); - break; - case ATH11K_MHI_RESUME: - /* Do force MHI resume as some devices like QCA6390, WCN6855 - * are not in M3 state but they are functional. So just ignore - * the MHI state while resuming. - */ - ret = mhi_pm_resume_force(ab_pci->mhi_ctrl); - break; - case ATH11K_MHI_TRIGGER_RDDM: - ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl); - break; - case ATH11K_MHI_RDDM_DONE: - break; - default: - ath11k_err(ab, "unhandled MHI state (%d)\n", mhi_state); - ret = -EINVAL; - } - - if (ret) - goto out; - - ath11k_mhi_set_state_bit(ab_pci, mhi_state); - - return 0; - -out: - ath11k_err(ab, "failed to set mhi state: %s(%d)\n", - ath11k_mhi_state_to_str(mhi_state), mhi_state); - return ret; -} - int ath11k_mhi_start(struct ath11k_pci *ab_pci) { int ret; ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS; - ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_INIT); + ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl); if (ret) goto out; - ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_ON); + ret = mhi_sync_power_up(ab_pci->mhi_ctrl); if (ret) goto out; @@ -663,16 +485,20 @@ out: void ath11k_mhi_stop(struct ath11k_pci *ab_pci) { - ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_OFF); - ath11k_mhi_set_state(ab_pci, ATH11K_MHI_DEINIT); + mhi_power_down(ab_pci->mhi_ctrl, true); + mhi_unprepare_after_power_down(ab_pci->mhi_ctrl); } void ath11k_mhi_suspend(struct ath11k_pci *ab_pci) { - ath11k_mhi_set_state(ab_pci, ATH11K_MHI_SUSPEND); + mhi_pm_suspend(ab_pci->mhi_ctrl); } void ath11k_mhi_resume(struct ath11k_pci *ab_pci) { - ath11k_mhi_set_state(ab_pci, ATH11K_MHI_RESUME); + /* Do force MHI resume as some devices like QCA6390, WCN6855 + * are not in M3 state but they are functional. So just ignore + * the MHI state while resuming. + */ + mhi_pm_resume_force(ab_pci->mhi_ctrl); } diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h index 488dada5d31c..5dd024f879c4 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.h +++ b/drivers/net/wireless/ath/ath11k/mhi.h @@ -16,19 +16,6 @@ #define MHICTRL 0x38 #define MHICTRL_RESET_MASK 0x2 -enum ath11k_mhi_state { - ATH11K_MHI_INIT, - ATH11K_MHI_DEINIT, - ATH11K_MHI_POWER_ON, - ATH11K_MHI_POWER_OFF, - ATH11K_MHI_FORCE_POWER_OFF, - ATH11K_MHI_SUSPEND, - ATH11K_MHI_RESUME, - ATH11K_MHI_TRIGGER_RDDM, - ATH11K_MHI_RDDM, - ATH11K_MHI_RDDM_DONE, -}; - int ath11k_mhi_start(struct ath11k_pci *ar_pci); void ath11k_mhi_stop(struct ath11k_pci *ar_pci); int ath11k_mhi_register(struct ath11k_pci *ar_pci); diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index 16a000b9cc5e..e9a01f344ec6 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -63,7 +63,7 @@ struct ath11k_pci { u16 dev_id; char amss_path[100]; struct mhi_controller *mhi_ctrl; - unsigned long mhi_state; + const struct ath11k_msi_config *msi_config; u32 register_window; /* protects register_window above */ -- cgit v1.2.3 From 3e80fcbca37221cd1e5a33eea4b0f215f66a7a00 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Tue, 5 Apr 2022 11:26:39 +0300 Subject: ath11k: mhi: add error handling for suspend and resume While reviewing the mhi.c I noticed we were just ignoring the errors coming from MHI subsystem during suspend and resume. Add proper checks and warning messages. Also pass the error value to callers. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03003-QCAHSPSWPL_V1_V2_SILICONZ_LITE-2 Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401173042.17467-3-kvalo@kernel.org --- drivers/net/wireless/ath/ath11k/mhi.c | 26 ++++++++++++++++++++++---- drivers/net/wireless/ath/ath11k/mhi.h | 4 ++-- drivers/net/wireless/ath/ath11k/pci.c | 8 ++------ 3 files changed, 26 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 6ad91c600d0e..defdf641df0b 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -489,16 +489,34 @@ void ath11k_mhi_stop(struct ath11k_pci *ab_pci) mhi_unprepare_after_power_down(ab_pci->mhi_ctrl); } -void ath11k_mhi_suspend(struct ath11k_pci *ab_pci) +int ath11k_mhi_suspend(struct ath11k_pci *ab_pci) { - mhi_pm_suspend(ab_pci->mhi_ctrl); + struct ath11k_base *ab = ab_pci->ab; + int ret; + + ret = mhi_pm_suspend(ab_pci->mhi_ctrl); + if (ret) { + ath11k_warn(ab, "failed to suspend mhi: %d", ret); + return ret; + } + + return 0; } -void ath11k_mhi_resume(struct ath11k_pci *ab_pci) +int ath11k_mhi_resume(struct ath11k_pci *ab_pci) { + struct ath11k_base *ab = ab_pci->ab; + int ret; + /* Do force MHI resume as some devices like QCA6390, WCN6855 * are not in M3 state but they are functional. So just ignore * the MHI state while resuming. */ - mhi_pm_resume_force(ab_pci->mhi_ctrl); + ret = mhi_pm_resume_force(ab_pci->mhi_ctrl); + if (ret) { + ath11k_warn(ab, "failed to resume mhi: %d", ret); + return ret; + } + + return 0; } diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h index 5dd024f879c4..8d9f852da695 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.h +++ b/drivers/net/wireless/ath/ath11k/mhi.h @@ -23,7 +23,7 @@ void ath11k_mhi_unregister(struct ath11k_pci *ar_pci); void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab); void ath11k_mhi_clear_vector(struct ath11k_base *ab); -void ath11k_mhi_suspend(struct ath11k_pci *ar_pci); -void ath11k_mhi_resume(struct ath11k_pci *ar_pci); +int ath11k_mhi_suspend(struct ath11k_pci *ar_pci); +int ath11k_mhi_resume(struct ath11k_pci *ar_pci); #endif diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 3fd5b416a564..024661a17008 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -619,18 +619,14 @@ static int ath11k_pci_hif_suspend(struct ath11k_base *ab) { struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); - ath11k_mhi_suspend(ar_pci); - - return 0; + return ath11k_mhi_suspend(ar_pci); } static int ath11k_pci_hif_resume(struct ath11k_base *ab) { struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); - ath11k_mhi_resume(ar_pci); - - return 0; + return ath11k_mhi_resume(ar_pci); } static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab) -- cgit v1.2.3 From b9e34ba6b314780a47ac40f450ec04f18be85b5e Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Tue, 5 Apr 2022 11:26:44 +0300 Subject: ath11k: mhi: remove unnecessary goto from ath11k_mhi_start() No need to have goto for a return statement, so simplify the code. While at it, print warning messages if power up calls fail. No functional changes. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03003-QCAHSPSWPL_V1_V2_SILICONZ_LITE-2 Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401173042.17467-4-kvalo@kernel.org --- drivers/net/wireless/ath/ath11k/mhi.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index defdf641df0b..c44df17719f6 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -465,22 +465,24 @@ void ath11k_mhi_unregister(struct ath11k_pci *ab_pci) int ath11k_mhi_start(struct ath11k_pci *ab_pci) { + struct ath11k_base *ab = ab_pci->ab; int ret; ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS; ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl); - if (ret) - goto out; + if (ret) { + ath11k_warn(ab, "failed to prepare mhi: %d", ret); + return ret; + } ret = mhi_sync_power_up(ab_pci->mhi_ctrl); - if (ret) - goto out; + if (ret) { + ath11k_warn(ab, "failed to power up mhi: %d", ret); + return ret; + } return 0; - -out: - return ret; } void ath11k_mhi_stop(struct ath11k_pci *ab_pci) -- cgit v1.2.3 From 740c431c22fedc2425c9cd263654745b806b6fc7 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:40 +0800 Subject: rtw89: pci: add register definition to rtw89_pci_info to generalize pci code The PCI code of 8852AE and 8852CE are different, but the flow and register names are similar. To reuse the code, add a struct to define register or value accordingly. We also use chip id to control the slightly different flow. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 138 +++++++++++++++++++------ drivers/net/wireless/realtek/rtw89/pci.h | 68 ++++++++++++ drivers/net/wireless/realtek/rtw89/reg.h | 40 +++++++ drivers/net/wireless/realtek/rtw89/rtw8852ae.c | 11 ++ drivers/net/wireless/realtek/rtw89/rtw8852ce.c | 11 ++ 5 files changed, 238 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 3a27d6f8c630..2395a29c176f 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1426,16 +1426,23 @@ static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + const struct rtw89_pci_info *info = rtwdev->pci_info; + u32 txhci_en = info->txhci_en_bit; + u32 rxhci_en = info->rxhci_en_bit; + if (enable) { + if (chip_id != RTL8852C) + rtw89_write32_clr(rtwdev, info->dma_stop1_reg, + B_AX_STOP_PCIEIO); rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, - B_AX_TXHCI_EN | B_AX_RXHCI_EN); - rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, - B_AX_STOP_PCIEIO); + txhci_en | rxhci_en); } else { - rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, - B_AX_STOP_PCIEIO); + if (chip_id != RTL8852C) + rtw89_write32_set(rtwdev, info->dma_stop1_reg, + B_AX_STOP_PCIEIO); rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, - B_AX_TXHCI_EN | B_AX_RXHCI_EN); + txhci_en | rxhci_en); } } @@ -1500,6 +1507,28 @@ rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) return 0; } +static int +rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) +{ + u32 shift; + int ret; + u16 val; + + ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); + if (!ret) + return ret; + + shift = __ffs(mask); + val &= ~mask; + val |= ((data << shift) & mask); + + ret = rtw89_write16_mdio(rtwdev, addr, val, speed); + if (!ret) + return ret; + + return 0; +} + static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) { int ret; @@ -1628,8 +1657,7 @@ static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) bool l1_flag = false; int ret = 0; - if ((rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) || - rtwdev->chip->chip_id == RTL8852C) + if (rtwdev->chip->chip_id != RTL8852B) return 0; ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); @@ -1793,12 +1821,15 @@ static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) { + if (rtwdev->chip->chip_id != RTL8852A) + return; + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); } static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) { - if (rtwdev->chip->chip_id == RTL8852C) + if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) return; rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); @@ -1808,7 +1839,7 @@ static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) { int ret; - if (rtwdev->chip->chip_id == RTL8852C) + if (rtwdev->chip->chip_id != RTL8852A) return 0; ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, @@ -1843,6 +1874,40 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) B_AX_PCIE_DIS_WLSUS_AFT_PDN); } +static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id != RTL8852B) + return 0; + + return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, + PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); +} + +static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) +{ + if (pwr_up) + rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); + else + rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); +} + +static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id != RTL8852C) + return; + + rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); + rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); +} + +static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id != RTL8852C && rtwdev->hal.cv == CHIP_CAV) + return; + + rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); +} + static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) { if (rtwdev->chip->chip_id == RTL8852C) @@ -1867,19 +1932,23 @@ static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) { + const struct rtw89_pci_info *info = rtwdev->pci_info; + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | B_AX_CLR_CH12_IDX; + u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; + u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; - if (rtwdev->chip->chip_id == RTL8852A) + if (chip_id == RTL8852A || chip_id == RTL8852C) val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; /* clear DMA indexes */ rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); - if (rtwdev->chip->chip_id == RTL8852A) - rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR2, + if (chip_id == RTL8852A || chip_id == RTL8852C) + rtw89_write32_set(rtwdev, txbd_rwptr_clr2, B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); - rtw89_write32_set(rtwdev, R_AX_RXBD_RWPTR_CLR, + rtw89_write32_set(rtwdev, rxbd_rwptr_clr, B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); } @@ -1897,6 +1966,7 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) { + const struct rtw89_pci_info *info = rtwdev->pci_info; u32 dma_busy; u32 check; u32 lbc; @@ -1913,6 +1983,7 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) rtw89_pci_aphy_pwrcut(rtwdev); rtw89_pci_hci_ldo(rtwdev); + rtw89_pci_dphy_delay(rtwdev); ret = rtw89_pci_auto_refclk_cal(rtwdev, false); if (ret) { @@ -1920,21 +1991,26 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) return ret; } + rtw89_pci_power_wake(rtwdev, true); + rtw89_pci_autoload_hang(rtwdev); + rtw89_pci_l12_vmain(rtwdev); rtw89_pci_set_sic(rtwdev); rtw89_pci_set_dbg(rtwdev); - if (rtwdev->chip->chip_id == RTL8852A) + if (rtwdev->chip->chip_id == RTL8852A) { rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_AUXCLK_GATE); - lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); - lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER); - lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; - rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); + lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); + lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER); + lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; + rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); - rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, - B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); - rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_WPDMA); + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); + } + + rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA); /* stop DMA activities */ rtw89_pci_ctrl_dma_all(rtwdev, false); @@ -1975,9 +2051,9 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) } /* enable FW CMD queue to download firmware */ - rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL); - rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_CH12); - rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL); + rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); + rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12); + rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); /* start DMA activities */ rtw89_pci_ctrl_dma_all(rtwdev, true); @@ -2018,6 +2094,7 @@ static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev) static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) { + const struct rtw89_pci_info *info = rtwdev->pci_info; int ret; ret = rtw89_pci_ltr_set(rtwdev); @@ -2035,11 +2112,11 @@ static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); /* enable DMA for all queues */ - rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL); - rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL); + rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); + rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); /* Release PCI IO */ - rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, + rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); return 0; @@ -2767,17 +2844,18 @@ static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en) { + const struct rtw89_pci_info *info = rtwdev->pci_info; u32 val32; if (en == MAC_AX_FUNC_EN) { val32 = B_AX_STOP_PCIEIO; - rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, val32); + rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32); val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); } else { val32 = B_AX_STOP_PCIEIO; - rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, val32); + rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32); val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index a67595b21185..8eabeaeec045 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -12,6 +12,9 @@ #define MDIO_PG0_G2 2 #define MDIO_PG1_G2 3 #define RAC_ANA10 0x10 +#define RAC_REG_REV2 0x1B +#define BAC_CMU_EN_DLY_MASK GENMASK(15, 12) +#define PCIE_DPHY_DLY_25US 0x1 #define RAC_ANA19 0x19 #define RAC_ANA1F 0x1F #define RAC_ANA24 0x24 @@ -35,6 +38,48 @@ #define R_AX_MDIO_WDATA 0x10A4 #define R_AX_MDIO_RDATA 0x10A6 +#define R_AX_PCIE_BG_CLR 0x303C +#define B_AX_BG_CLR_ASYNC_M3 BIT(4) + +#define R_AX_PCIE_IO_RCY_M1 0x3100 +#define B_AX_PCIE_IO_RCY_P_M1 BIT(5) +#define B_AX_PCIE_IO_RCY_WDT_P_M1 BIT(4) +#define B_AX_PCIE_IO_RCY_WDT_MODE_M1 BIT(3) +#define B_AX_PCIE_IO_RCY_TRIG_M1 BIT(0) + +#define R_AX_PCIE_WDT_TIMER_M1 0x3104 +#define B_AX_PCIE_WDT_TIMER_M1_MASK GENMASK(31, 0) + +#define R_AX_PCIE_IO_RCY_M2 0x310C +#define B_AX_PCIE_IO_RCY_P_M2 BIT(5) +#define B_AX_PCIE_IO_RCY_WDT_P_M2 BIT(4) +#define B_AX_PCIE_IO_RCY_WDT_MODE_M2 BIT(3) +#define B_AX_PCIE_IO_RCY_TRIG_M2 BIT(0) + +#define R_AX_PCIE_WDT_TIMER_M2 0x3110 +#define B_AX_PCIE_WDT_TIMER_M2_MASK GENMASK(31, 0) + +#define R_AX_PCIE_IO_RCY_E0 0x3118 +#define B_AX_PCIE_IO_RCY_P_E0 BIT(5) +#define B_AX_PCIE_IO_RCY_WDT_P_E0 BIT(4) +#define B_AX_PCIE_IO_RCY_WDT_MODE_E0 BIT(3) +#define B_AX_PCIE_IO_RCY_TRIG_E0 BIT(0) + +#define R_AX_PCIE_WDT_TIMER_E0 0x311C +#define B_AX_PCIE_WDT_TIMER_E0_MASK GENMASK(31, 0) + +#define R_AX_PCIE_IO_RCY_S1 0x3124 +#define B_AX_PCIE_IO_RCY_RP_S1 BIT(7) +#define B_AX_PCIE_IO_RCY_WP_S1 BIT(6) +#define B_AX_PCIE_IO_RCY_WDT_RP_S1 BIT(5) +#define B_AX_PCIE_IO_RCY_WDT_WP_S1 BIT(4) +#define B_AX_PCIE_IO_RCY_WDT_MODE_S1 BIT(3) +#define B_AX_PCIE_IO_RCY_RTRIG_S1 BIT(1) +#define B_AX_PCIE_IO_RCY_WTRIG_S1 BIT(0) + +#define R_AX_PCIE_WDT_TIMER_S1 0x3128 +#define B_AX_PCIE_WDT_TIMER_S1_MASK GENMASK(31, 0) + #define RTW89_PCI_WR_RETRY_CNT 20 /* Interrupts */ @@ -330,6 +375,7 @@ #define R_AX_PCIE_INIT_CFG2 0x1004 #define B_AX_WD_ITVL_IDLE GENMASK(27, 24) #define B_AX_WD_ITVL_ACT GENMASK(19, 16) +#define B_AX_PCIE_RX_APPLEN_MASK GENMASK(13, 0) #define R_AX_PCIE_PS_CTRL 0x1008 #define B_AX_L1OFF_PWR_OFF_EN BIT(5) @@ -356,11 +402,22 @@ #define B_AX_PCIE_TXBD_LEN0 BIT(1) #define B_AX_PCIE_TXBD_4KBOUD_LENERR BIT(0) +#define R_AX_TXBD_RWPTR_CLR2_V1 0x11C4 +#define B_AX_CLR_CH11_IDX BIT(1) +#define B_AX_CLR_CH10_IDX BIT(0) + #define R_AX_LBC_WATCHDOG 0x11D8 #define B_AX_LBC_TIMER GENMASK(7, 4) #define B_AX_LBC_FLAG BIT(1) #define B_AX_LBC_EN BIT(0) +#define R_AX_RXBD_RWPTR_CLR_V1 0x1200 +#define B_AX_CLR_RPQ_IDX BIT(1) +#define B_AX_CLR_RXQ_IDX BIT(0) + +#define R_AX_HAXI_EXP_CTRL 0x1204 +#define B_AX_MAX_TAG_NUM_V1_MASK GENMASK(2, 0) + #define R_AX_PCIE_EXP_CTRL 0x13F0 #define B_AX_EN_CHKDSC_NO_RX_STUCK BIT(20) #define B_AX_MAX_TAG_NUM GENMASK(18, 16) @@ -447,6 +504,17 @@ struct rtw89_pci_ch_dma_addr_set { }; struct rtw89_pci_info { + u32 init_cfg_reg; + u32 txhci_en_bit; + u32 rxhci_en_bit; + u32 rxbd_mode_bit; + u32 exp_ctrl_reg; + u32 max_tag_num_mask; + u32 rxbd_rwptr_clr_reg; + u32 txbd_rwptr_clr2_reg; + u32 dma_stop1_reg; + u32 dma_stop2_reg; + const struct rtw89_pci_ch_dma_addr_set *dma_addr_set; u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 25b106788118..f67584efeea9 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -103,10 +103,14 @@ #define R_AX_SYS_SDIO_CTRL 0x0070 #define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15) #define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14) +#define B_AX_PCIE_FORCE_PWR_NGAT BIT(13) #define B_AX_PCIE_CALIB_EN_V1 BIT(12) #define B_AX_PCIE_AUXCLK_GATE BIT(11) #define B_AX_LTE_MUX_CTRL_PATH BIT(26) +#define R_AX_HCI_OPT_CTRL 0x0074 +#define BIT_WAKE_CTRL BIT(5) + #define R_AX_PLATFORM_ENABLE 0x0088 #define B_AX_WCPU_EN BIT(1) #define B_AX_PLATFORM_EN BIT(0) @@ -220,6 +224,38 @@ #define R_AX_FILTER_MODEL_ADDR 0x0C04 +#define R_AX_HAXI_INIT_CFG1 0x1000 +#define B_AX_WD_ITVL_IDLE_V1_MASK GENMASK(31, 28) +#define B_AX_WD_ITVL_ACT_V1_MASK GENMASK(27, 24) +#define B_AX_DMA_MODE_MASK GENMASK(19, 18) +#define DMA_MOD_PCIE_1B 0x0 +#define DMA_MOD_PCIE_4B 0x1 +#define DMA_MOD_USB 0x2 +#define DMA_MOD_SDIO 0x3 +#define B_AX_STOP_AXI_MST BIT(17) +#define B_AX_HAXI_RST_KEEP_REG BIT(16) +#define B_AX_RXHCI_EN_V1 BIT(15) +#define B_AX_RXBD_MODE_V1 BIT(14) +#define B_AX_HAXI_MAX_RXDMA_MASK GENMASK(9, 8) +#define B_AX_TXHCI_EN_V1 BIT(7) +#define B_AX_FLUSH_AXI_MST BIT(4) +#define B_AX_RST_BDRAM BIT(3) +#define B_AX_HAXI_MAX_TXDMA_MASK GENMASK(1, 0) + +#define R_AX_HAXI_DMA_STOP1 0x1010 +#define B_AX_STOP_WPDMA BIT(19) +#define B_AX_STOP_CH12 BIT(18) +#define B_AX_STOP_CH9 BIT(17) +#define B_AX_STOP_CH8 BIT(16) +#define B_AX_STOP_ACH7 BIT(15) +#define B_AX_STOP_ACH6 BIT(14) +#define B_AX_STOP_ACH5 BIT(13) +#define B_AX_STOP_ACH4 BIT(12) +#define B_AX_STOP_ACH3 BIT(11) +#define B_AX_STOP_ACH2 BIT(10) +#define B_AX_STOP_ACH1 BIT(9) +#define B_AX_STOP_ACH0 BIT(8) + #define R_AX_PCIE_DBG_CTRL 0x11C0 #define B_AX_DBG_DUMMY_MASK GENMASK(23, 16) #define B_AX_DBG_SEL_MASK GENMASK(15, 13) @@ -228,6 +264,10 @@ #define B_AX_ASFF_FULL_NO_STK BIT(1) #define B_AX_EN_STUCK_DBG BIT(0) +#define R_AX_HAXI_DMA_STOP2 0x11C0 +#define B_AX_STOP_CH11 BIT(1) +#define B_AX_STOP_CH10 BIT(0) + #define R_AX_HCI_FC_CTRL_V1 0x1700 #define R_AX_CH_PAGE_CTRL_V1 0x1704 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 8ffc0dd90d41..b9047ac6b86d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -9,6 +9,17 @@ #include "rtw8852a.h" static const struct rtw89_pci_info rtw8852a_pci_info = { + .init_cfg_reg = R_AX_PCIE_INIT_CFG1, + .txhci_en_bit = B_AX_TXHCI_EN, + .rxhci_en_bit = B_AX_RXHCI_EN, + .rxbd_mode_bit = B_AX_RXBD_MODE, + .exp_ctrl_reg = R_AX_PCIE_EXP_CTRL, + .max_tag_num_mask = B_AX_MAX_TAG_NUM, + .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR, + .txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2, + .dma_stop1_reg = R_AX_PCIE_DMA_STOP1, + .dma_stop2_reg = R_AX_PCIE_DMA_STOP2, + .dma_addr_set = &rtw89_pci_ch_dma_addr_set, .fill_txaddr_info = rtw89_pci_fill_txaddr_info, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index 09794836d5c0..33e69e34e385 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -10,6 +10,17 @@ #include "rtw8852c.h" static const struct rtw89_pci_info rtw8852c_pci_info = { + .init_cfg_reg = R_AX_HAXI_INIT_CFG1, + .txhci_en_bit = B_AX_TXHCI_EN_V1, + .rxhci_en_bit = B_AX_RXHCI_EN_V1, + .rxbd_mode_bit = B_AX_RXBD_MODE_V1, + .exp_ctrl_reg = R_AX_HAXI_EXP_CTRL, + .max_tag_num_mask = B_AX_MAX_TAG_NUM_V1_MASK, + .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR_V1, + .txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2_V1, + .dma_stop1_reg = R_AX_HAXI_DMA_STOP1, + .dma_stop2_reg = R_AX_HAXI_DMA_STOP2, + .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1, .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1, -- cgit v1.2.3 From b9467e94b1f2c0ade913ad3767cffa310787de5b Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:41 +0800 Subject: rtw89: pci: add pci attributes to configure operating mode Refine operating mode function to support variant chips. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 133 ++++++++++++++++++++++--- drivers/net/wireless/realtek/rtw89/pci.h | 114 +++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852ae.c | 15 +++ drivers/net/wireless/realtek/rtw89/rtw8852ce.c | 15 +++ 4 files changed, 262 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 2395a29c176f..e064d355250c 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1917,6 +1917,33 @@ static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) B_AX_SIC_EN_FORCE_CLKREQ); } +static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + u32 val32; + + if (rtwdev->chip->chip_id != RTL8852C) + return; + + if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { + val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, + info->io_rcy_tmr); + rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); + rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); + rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); + + rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); + rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); + rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); + } else { + rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); + rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); + rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); + } + + rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); +} + static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) { if (rtwdev->chip->chip_id == RTL8852C) @@ -1952,6 +1979,95 @@ static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); } +static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; + enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; + enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; + enum mac_ax_tag_mode tag_mode = info->tag_mode; + enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; + enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; + enum mac_ax_tx_burst tx_burst = info->tx_burst; + enum mac_ax_rx_burst rx_burst = info->rx_burst; + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + u8 cv = rtwdev->hal.cv; + u32 val32; + + if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { + if (chip_id == RTL8852A && cv == CHIP_CBV) + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); + } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { + if (chip_id == RTL8852A || chip_id == RTL8852B) + rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); + } + + if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { + if (chip_id == RTL8852A && cv == CHIP_CBV) + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); + } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { + if (chip_id == RTL8852A || chip_id == RTL8852B) + rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); + } + + if (rxbd_mode == MAC_AX_RXBD_PKT) { + rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); + } else if (rxbd_mode == MAC_AX_RXBD_SEP) { + rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); + + if (chip_id == RTL8852A || chip_id == RTL8852B) + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, + B_AX_PCIE_RX_APPLEN_MASK, 0); + } + + if (chip_id == RTL8852A || chip_id == RTL8852B) { + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); + } else if (chip_id == RTL8852C) { + rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); + rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); + } + + if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (tag_mode == MAC_AX_TAG_SGL) { + val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & + ~B_AX_LATENCY_CONTROL; + rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); + } else if (tag_mode == MAC_AX_TAG_MULTI) { + val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | + B_AX_LATENCY_CONTROL; + rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); + } + } + + rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, + info->multi_tag_num); + + if (chip_id == RTL8852A || chip_id == RTL8852B) { + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, + wd_dma_idle_intvl); + rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, + wd_dma_act_intvl); + } else if (chip_id == RTL8852C) { + rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, + wd_dma_idle_intvl); + rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, + wd_dma_act_intvl); + } + + if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { + rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, + B_AX_HOST_ADDR_INFO_8B_SEL); + rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); + } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { + rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, + B_AX_HOST_ADDR_INFO_8B_SEL); + rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); + } + + return 0; +} + static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) { if (rtwdev->chip->chip_id == RTL8852A) { @@ -1995,6 +2111,7 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) rtw89_pci_autoload_hang(rtwdev); rtw89_pci_l12_vmain(rtwdev); rtw89_pci_set_sic(rtwdev); + rtw89_pci_set_io_rcy(rtwdev); rtw89_pci_set_dbg(rtwdev); if (rtwdev->chip->chip_id == RTL8852A) { @@ -2025,21 +2142,7 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) } rtw89_pci_clr_idx_all(rtwdev); - - /* configure TX/RX op modes */ - rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE | - B_AX_RX_TRUNC_MODE); - rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RXBD_MODE); - rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, 7); - rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, 3); - /* multi-tag mode */ - rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_LATENCY_CONTROL); - rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, B_AX_MAX_TAG_NUM, - RTW89_MAC_TAG_NUM_8); - rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, - RTW89_MAC_WD_DMA_INTVL_256NS); - rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, - RTW89_MAC_WD_DMA_INTVL_256NS); + rtw89_pci_mode_op(rtwdev); /* fill TRX BD indexes */ rtw89_pci_ops_reset(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 8eabeaeec045..8d49033fa270 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -490,6 +490,105 @@ enum rtw89_pcie_clkdly_hw { PCIE_CLKDLY_HW_200US = 0x5, }; +enum mac_ax_bd_trunc_mode { + MAC_AX_BD_NORM, + MAC_AX_BD_TRUNC, + MAC_AX_BD_DEF = 0xFE +}; + +enum mac_ax_rxbd_mode { + MAC_AX_RXBD_PKT, + MAC_AX_RXBD_SEP, + MAC_AX_RXBD_DEF = 0xFE +}; + +enum mac_ax_tag_mode { + MAC_AX_TAG_SGL, + MAC_AX_TAG_MULTI, + MAC_AX_TAG_DEF = 0xFE +}; + +enum mac_ax_tx_burst { + MAC_AX_TX_BURST_16B = 0, + MAC_AX_TX_BURST_32B = 1, + MAC_AX_TX_BURST_64B = 2, + MAC_AX_TX_BURST_V1_64B = 0, + MAC_AX_TX_BURST_128B = 3, + MAC_AX_TX_BURST_V1_128B = 1, + MAC_AX_TX_BURST_256B = 4, + MAC_AX_TX_BURST_V1_256B = 2, + MAC_AX_TX_BURST_512B = 5, + MAC_AX_TX_BURST_1024B = 6, + MAC_AX_TX_BURST_2048B = 7, + MAC_AX_TX_BURST_DEF = 0xFE +}; + +enum mac_ax_rx_burst { + MAC_AX_RX_BURST_16B = 0, + MAC_AX_RX_BURST_32B = 1, + MAC_AX_RX_BURST_64B = 2, + MAC_AX_RX_BURST_V1_64B = 0, + MAC_AX_RX_BURST_128B = 3, + MAC_AX_RX_BURST_V1_128B = 1, + MAC_AX_RX_BURST_V1_256B = 0, + MAC_AX_RX_BURST_DEF = 0xFE +}; + +enum mac_ax_wd_dma_intvl { + MAC_AX_WD_DMA_INTVL_0S, + MAC_AX_WD_DMA_INTVL_256NS, + MAC_AX_WD_DMA_INTVL_512NS, + MAC_AX_WD_DMA_INTVL_768NS, + MAC_AX_WD_DMA_INTVL_1US, + MAC_AX_WD_DMA_INTVL_1_5US, + MAC_AX_WD_DMA_INTVL_2US, + MAC_AX_WD_DMA_INTVL_4US, + MAC_AX_WD_DMA_INTVL_8US, + MAC_AX_WD_DMA_INTVL_16US, + MAC_AX_WD_DMA_INTVL_DEF = 0xFE +}; + +enum mac_ax_multi_tag_num { + MAC_AX_TAG_NUM_1, + MAC_AX_TAG_NUM_2, + MAC_AX_TAG_NUM_3, + MAC_AX_TAG_NUM_4, + MAC_AX_TAG_NUM_5, + MAC_AX_TAG_NUM_6, + MAC_AX_TAG_NUM_7, + MAC_AX_TAG_NUM_8, + MAC_AX_TAG_NUM_DEF = 0xFE +}; + +enum mac_ax_lbc_tmr { + MAC_AX_LBC_TMR_8US = 0, + MAC_AX_LBC_TMR_16US, + MAC_AX_LBC_TMR_32US, + MAC_AX_LBC_TMR_64US, + MAC_AX_LBC_TMR_128US, + MAC_AX_LBC_TMR_256US, + MAC_AX_LBC_TMR_512US, + MAC_AX_LBC_TMR_1MS, + MAC_AX_LBC_TMR_2MS, + MAC_AX_LBC_TMR_4MS, + MAC_AX_LBC_TMR_8MS, + MAC_AX_LBC_TMR_DEF = 0xFE +}; + +enum mac_ax_pcie_func_ctrl { + MAC_AX_PCIE_DISABLE = 0, + MAC_AX_PCIE_ENABLE = 1, + MAC_AX_PCIE_DEFAULT = 0xFE, + MAC_AX_PCIE_IGNORE = 0xFF +}; + +enum mac_ax_io_rcy_tmr { + MAC_AX_IO_RCY_ANA_TMR_2MS = 24000, + MAC_AX_IO_RCY_ANA_TMR_4MS = 48000, + MAC_AX_IO_RCY_ANA_TMR_6MS = 72000, + MAC_AX_IO_RCY_ANA_TMR_DEF = 0xFE +}; + struct rtw89_pci_ch_dma_addr { u32 num; u32 idx; @@ -504,6 +603,21 @@ struct rtw89_pci_ch_dma_addr_set { }; struct rtw89_pci_info { + enum mac_ax_bd_trunc_mode txbd_trunc_mode; + enum mac_ax_bd_trunc_mode rxbd_trunc_mode; + enum mac_ax_rxbd_mode rxbd_mode; + enum mac_ax_tag_mode tag_mode; + enum mac_ax_tx_burst tx_burst; + enum mac_ax_rx_burst rx_burst; + enum mac_ax_wd_dma_intvl wd_dma_idle_intvl; + enum mac_ax_wd_dma_intvl wd_dma_act_intvl; + enum mac_ax_multi_tag_num multi_tag_num; + enum mac_ax_pcie_func_ctrl lbc_en; + enum mac_ax_lbc_tmr lbc_tmr; + enum mac_ax_pcie_func_ctrl autok_en; + enum mac_ax_pcie_func_ctrl io_rcy_en; + enum mac_ax_io_rcy_tmr io_rcy_tmr; + u32 init_cfg_reg; u32 txhci_en_bit; u32 rxhci_en_bit; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index b9047ac6b86d..42dfafb3d7f5 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -9,6 +9,21 @@ #include "rtw8852a.h" static const struct rtw89_pci_info rtw8852a_pci_info = { + .txbd_trunc_mode = MAC_AX_BD_TRUNC, + .rxbd_trunc_mode = MAC_AX_BD_TRUNC, + .rxbd_mode = MAC_AX_RXBD_PKT, + .tag_mode = MAC_AX_TAG_MULTI, + .tx_burst = MAC_AX_TX_BURST_2048B, + .rx_burst = MAC_AX_RX_BURST_128B, + .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS, + .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS, + .multi_tag_num = MAC_AX_TAG_NUM_8, + .lbc_en = MAC_AX_PCIE_ENABLE, + .lbc_tmr = MAC_AX_LBC_TMR_2MS, + .autok_en = MAC_AX_PCIE_DISABLE, + .io_rcy_en = MAC_AX_PCIE_DISABLE, + .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS, + .init_cfg_reg = R_AX_PCIE_INIT_CFG1, .txhci_en_bit = B_AX_TXHCI_EN, .rxhci_en_bit = B_AX_RXHCI_EN, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index 33e69e34e385..621918465c47 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -10,6 +10,21 @@ #include "rtw8852c.h" static const struct rtw89_pci_info rtw8852c_pci_info = { + .txbd_trunc_mode = MAC_AX_BD_TRUNC, + .rxbd_trunc_mode = MAC_AX_BD_TRUNC, + .rxbd_mode = MAC_AX_RXBD_PKT, + .tag_mode = MAC_AX_TAG_MULTI, + .tx_burst = MAC_AX_TX_BURST_V1_256B, + .rx_burst = MAC_AX_RX_BURST_V1_128B, + .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS, + .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS, + .multi_tag_num = MAC_AX_TAG_NUM_8, + .lbc_en = MAC_AX_PCIE_ENABLE, + .lbc_tmr = MAC_AX_LBC_TMR_2MS, + .autok_en = MAC_AX_PCIE_DISABLE, + .io_rcy_en = MAC_AX_PCIE_ENABLE, + .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS, + .init_cfg_reg = R_AX_HAXI_INIT_CFG1, .txhci_en_bit = B_AX_TXHCI_EN_V1, .rxhci_en_bit = B_AX_RXHCI_EN_V1, -- cgit v1.2.3 From 1e3f205548155af7df781bb23bed15af17aa8ace Mon Sep 17 00:00:00 2001 From: Chia-Yuan Li Date: Fri, 25 Mar 2022 14:00:42 +0800 Subject: rtw89: pci: refine pci pre_init function The pre_init is used to initialize partial PCI function during PCI probe. It doesn't need to initialize all functions, so probe can be faster. Signed-off-by: Chia-Yuan Li Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 158 ++++++++++++++++++++----- drivers/net/wireless/realtek/rtw89/pci.h | 16 +++ drivers/net/wireless/realtek/rtw89/reg.h | 31 +++++ drivers/net/wireless/realtek/rtw89/rtw8852ae.c | 3 + drivers/net/wireless/realtek/rtw89/rtw8852ce.c | 3 + 5 files changed, 180 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index e064d355250c..43bb4490380d 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1437,12 +1437,19 @@ static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) B_AX_STOP_PCIEIO); rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, txhci_en | rxhci_en); + if (chip_id == RTL8852C) + rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_STOP_AXI_MST); } else { if (chip_id != RTL8852C) rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_PCIEIO); - rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, - txhci_en | rxhci_en); + else + rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_STOP_AXI_MST); + if (chip_id == RTL8852C) + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_STOP_AXI_MST); } } @@ -1865,13 +1872,16 @@ static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) { - if (rtwdev->chip->chip_id != RTL8852A) - return; - - rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, - B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); - rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, - B_AX_PCIE_DIS_WLSUS_AFT_PDN); + if (rtwdev->chip->chip_id == RTL8852A || + rtwdev->chip->chip_id == RTL8852B) { + rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, + B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); + rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, + B_AX_PCIE_DIS_WLSUS_AFT_PDN); + } else if (rtwdev->chip->chip_id == RTL8852C) { + rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, + B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); + } } static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) @@ -1902,12 +1912,24 @@ static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) { - if (rtwdev->chip->chip_id != RTL8852C && rtwdev->hal.cv == CHIP_CAV) + if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) return; rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); } +static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) +{ + if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) + return; + + rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, + B_AX_SYSON_DIS_PMCR_AX_WRMSK); + rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); + rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, + B_AX_SYSON_DIS_PMCR_AX_WRMSK); +} + static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) { if (rtwdev->chip->chip_id == RTL8852C) @@ -1917,6 +1939,25 @@ static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) B_AX_SIC_EN_FORCE_CLKREQ); } +static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + u32 lbc; + + if (rtwdev->chip->chip_id == RTL8852C) + return; + + lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); + if (info->lbc_en == MAC_AX_PCIE_ENABLE) { + lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); + lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; + rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); + } else { + lbc &= ~B_AX_LBC_EN; + } + rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); +} + static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) { const struct rtw89_pci_info *info = rtwdev->pci_info; @@ -1957,6 +1998,15 @@ static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) B_AX_EN_CHKDSC_NO_RX_STUCK); } +static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id == RTL8852C) + return; + + rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, + B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); +} + static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) { const struct rtw89_pci_info *info = rtwdev->pci_info; @@ -1979,6 +2029,68 @@ static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); } +static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + u32 ret, check, dma_busy; + u32 dma_busy1 = info->dma_busy1_reg; + u32 dma_busy2 = info->dma_busy2_reg; + + check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | + B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY | + B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY | + B_AX_CH9_BUSY | B_AX_CH12_BUSY; + + ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, + 10, 100, false, rtwdev, dma_busy1); + if (ret) + return ret; + + check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; + + ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, + 10, 100, false, rtwdev, dma_busy2); + if (ret) + return ret; + + return 0; +} + +static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + u32 ret, check, dma_busy; + u32 dma_busy3 = info->dma_busy3_reg; + + check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; + + ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, + 10, 100, false, rtwdev, dma_busy3); + if (ret) + return ret; + + return 0; +} + +static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) +{ + u32 ret; + + ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); + if (ret) { + rtw89_err(rtwdev, "txdma ch busy\n"); + return ret; + } + + ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); + if (ret) { + rtw89_err(rtwdev, "rxdma ch busy\n"); + return ret; + } + + return 0; +} + static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) { const struct rtw89_pci_info *info = rtwdev->pci_info; @@ -2083,9 +2195,6 @@ static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) { const struct rtw89_pci_info *info = rtwdev->pci_info; - u32 dma_busy; - u32 check; - u32 lbc; int ret; rtw89_pci_rxdma_prefth(rtwdev); @@ -2110,34 +2219,21 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) rtw89_pci_power_wake(rtwdev, true); rtw89_pci_autoload_hang(rtwdev); rtw89_pci_l12_vmain(rtwdev); + rtw89_pci_gen2_force_ib(rtwdev); rtw89_pci_set_sic(rtwdev); + rtw89_pci_set_lbc(rtwdev); rtw89_pci_set_io_rcy(rtwdev); rtw89_pci_set_dbg(rtwdev); - - if (rtwdev->chip->chip_id == RTL8852A) { - rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, - B_AX_PCIE_AUXCLK_GATE); - - lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); - lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER); - lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; - rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); - - rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, - B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); - } + rtw89_pci_set_keep_reg(rtwdev); rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA); /* stop DMA activities */ rtw89_pci_ctrl_dma_all(rtwdev, false); - /* check PCI at idle state */ - check = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; - ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, - 100, 3000, false, rtwdev, R_AX_PCIE_DMA_BUSY1); + ret = rtw89_pci_poll_dma_all_idle(rtwdev); if (ret) { - rtw89_err(rtwdev, "failed to poll io busy\n"); + rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); return ret; } diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 8d49033fa270..2e8695208fcc 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -366,6 +366,19 @@ #define B_AX_PCIEIO_TX_BUSY BIT(21) #define B_AX_PCIEIO_BUSY BIT(20) #define B_AX_WPDMA_BUSY BIT(19) +#define B_AX_CH12_BUSY BIT(18) +#define B_AX_CH9_BUSY BIT(17) +#define B_AX_CH8_BUSY BIT(16) +#define B_AX_ACH7_BUSY BIT(15) +#define B_AX_ACH6_BUSY BIT(14) +#define B_AX_ACH5_BUSY BIT(13) +#define B_AX_ACH4_BUSY BIT(12) +#define B_AX_ACH3_BUSY BIT(11) +#define B_AX_ACH2_BUSY BIT(10) +#define B_AX_ACH1_BUSY BIT(9) +#define B_AX_ACH0_BUSY BIT(8) +#define B_AX_RPQ_BUSY BIT(1) +#define B_AX_RXQ_BUSY BIT(0) #define R_AX_PCIE_DMA_BUSY2 0x131C #define B_AX_CH11_BUSY BIT(1) @@ -628,6 +641,9 @@ struct rtw89_pci_info { u32 txbd_rwptr_clr2_reg; u32 dma_stop1_reg; u32 dma_stop2_reg; + u32 dma_busy1_reg; + u32 dma_busy2_reg; + u32 dma_busy3_reg; const struct rtw89_pci_ch_dma_addr_set *dma_addr_set; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index f67584efeea9..6cda6dcb5d86 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -111,6 +111,14 @@ #define R_AX_HCI_OPT_CTRL 0x0074 #define BIT_WAKE_CTRL BIT(5) +#define R_AX_HCI_BG_CTRL 0x0078 +#define B_AX_IBX_EN_VALUE BIT(15) +#define B_AX_IB_EN_VALUE BIT(14) +#define B_AX_FORCED_IB_EN BIT(4) +#define B_AX_EN_REGBG BIT(3) +#define B_AX_R_AX_BG_LPF BIT(2) +#define B_AX_R_AX_BG GENMASK(1, 0) + #define R_AX_PLATFORM_ENABLE 0x0088 #define B_AX_WCPU_EN BIT(1) #define B_AX_PLATFORM_EN BIT(0) @@ -256,6 +264,21 @@ #define B_AX_STOP_ACH1 BIT(9) #define B_AX_STOP_ACH0 BIT(8) +#define R_AX_HAXI_DMA_BUSY1 0x101C +#define B_AX_HAXIIO_BUSY BIT(20) +#define B_AX_WPDMA_BUSY BIT(19) +#define B_AX_CH12_BUSY BIT(18) +#define B_AX_CH9_BUSY BIT(17) +#define B_AX_CH8_BUSY BIT(16) +#define B_AX_ACH7_BUSY BIT(15) +#define B_AX_ACH6_BUSY BIT(14) +#define B_AX_ACH5_BUSY BIT(13) +#define B_AX_ACH4_BUSY BIT(12) +#define B_AX_ACH3_BUSY BIT(11) +#define B_AX_ACH2_BUSY BIT(10) +#define B_AX_ACH1_BUSY BIT(9) +#define B_AX_ACH0_BUSY BIT(8) + #define R_AX_PCIE_DBG_CTRL 0x11C0 #define B_AX_DBG_DUMMY_MASK GENMASK(23, 16) #define B_AX_DBG_SEL_MASK GENMASK(15, 13) @@ -268,6 +291,14 @@ #define B_AX_STOP_CH11 BIT(1) #define B_AX_STOP_CH10 BIT(0) +#define R_AX_HAXI_DMA_BUSY2 0x11C8 +#define B_AX_CH11_BUSY BIT(1) +#define B_AX_CH10_BUSY BIT(0) + +#define R_AX_HAXI_DMA_BUSY3 0x1208 +#define B_AX_RPQ_BUSY BIT(1) +#define B_AX_RXQ_BUSY BIT(0) + #define R_AX_HCI_FC_CTRL_V1 0x1700 #define R_AX_CH_PAGE_CTRL_V1 0x1704 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 42dfafb3d7f5..6055e8b9887f 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -34,6 +34,9 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2, .dma_stop1_reg = R_AX_PCIE_DMA_STOP1, .dma_stop2_reg = R_AX_PCIE_DMA_STOP2, + .dma_busy1_reg = R_AX_PCIE_DMA_BUSY1, + .dma_busy2_reg = R_AX_PCIE_DMA_BUSY2, + .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1, .dma_addr_set = &rtw89_pci_ch_dma_addr_set, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index 621918465c47..dca023e79101 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -35,6 +35,9 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2_V1, .dma_stop1_reg = R_AX_HAXI_DMA_STOP1, .dma_stop2_reg = R_AX_HAXI_DMA_STOP2, + .dma_busy1_reg = R_AX_HAXI_DMA_BUSY1, + .dma_busy2_reg = R_AX_HAXI_DMA_BUSY2, + .dma_busy3_reg = R_AX_HAXI_DMA_BUSY3, .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1, -- cgit v1.2.3 From 0db862fb025c933675e3efb388eb96da781d6365 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:43 +0800 Subject: rtw89: pci: add LTR setting for v1 chip Add LTR handle to PCI deinit as well. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 58 +++++++++++++++++++++++++- drivers/net/wireless/realtek/rtw89/pci.h | 3 ++ drivers/net/wireless/realtek/rtw89/reg.h | 22 ++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852ae.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852ce.c | 1 + 5 files changed, 83 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 43bb4490380d..2fd746d6987c 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -2182,10 +2182,13 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) { + const struct rtw89_pci_info *info = rtwdev->pci_info; + if (rtwdev->chip->chip_id == RTL8852A) { /* ltr sw trigger */ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); } + info->ltr_set(rtwdev, false); rtw89_pci_ctrl_dma_all(rtwdev, false); rtw89_pci_clr_idx_all(rtwdev); @@ -2260,10 +2263,13 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) return 0; } -static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev) +int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) { u32 val; + if (!en) + return 0; + val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); if (rtw89_pci_ltr_is_err_reg_val(val)) return -EINVAL; @@ -2290,13 +2296,61 @@ static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev) return 0; } +EXPORT_SYMBOL(rtw89_pci_ltr_set); + +int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) +{ + u32 dec_ctrl; + u32 val32; + + val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); + if (rtw89_pci_ltr_is_err_reg_val(val32)) + return -EINVAL; + val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); + if (rtw89_pci_ltr_is_err_reg_val(val32)) + return -EINVAL; + dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); + if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) + return -EINVAL; + val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); + if (rtw89_pci_ltr_is_err_reg_val(val32)) + return -EINVAL; + val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); + if (rtw89_pci_ltr_is_err_reg_val(val32)) + return -EINVAL; + + if (!en) { + dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); + dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | + B_AX_LTR_REQ_DRV; + } else { + dec_ctrl |= B_AX_LTR_HW_DEC_EN; + } + + dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; + dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); + + if (en) + rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, + B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); + rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, + PCI_LTR_IDLE_TIMER_3_2MS); + rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); + rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); + rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); + rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); + rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); + + return 0; +} +EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) { const struct rtw89_pci_info *info = rtwdev->pci_info; int ret; - ret = rtw89_pci_ltr_set(rtwdev); + ret = info->ltr_set(rtwdev, true); if (ret) { rtw89_err(rtwdev, "pci ltr set fail\n"); return ret; diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 2e8695208fcc..99f0cd2f47da 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -647,6 +647,7 @@ struct rtw89_pci_info { const struct rtw89_pci_ch_dma_addr_set *dma_addr_set; + int (*ltr_set)(struct rtw89_dev *rtwdev, bool en); u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev, void *txaddr_info_addr, u32 total_len, dma_addr_t dma, u8 *add_info_nr); @@ -912,6 +913,8 @@ struct pci_device_id; int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); void rtw89_pci_remove(struct pci_dev *pdev); +int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en); +int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en); u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, void *txaddr_info_addr, u32 total_len, dma_addr_t dma, u8 *add_info_nr); diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 6cda6dcb5d86..21a451264e50 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -299,6 +299,27 @@ #define B_AX_RPQ_BUSY BIT(1) #define B_AX_RXQ_BUSY BIT(0) +#define R_AX_LTR_DEC_CTRL 0x1600 +#define B_AX_LTR_IDX_DRV_VLD BIT(16) +#define B_AX_LTR_CURR_IDX_DRV_MASK GENMASK(15, 14) +#define B_AX_LTR_IDX_FW_VLD BIT(13) +#define B_AX_LTR_CURR_IDX_FW_MASK GENMASK(12, 11) +#define B_AX_LTR_IDX_HW_VLD BIT(10) +#define B_AX_LTR_CURR_IDX_HW_MASK GENMASK(9, 8) +#define B_AX_LTR_REQ_DRV BIT(7) +#define B_AX_LTR_IDX_DRV_MASK GENMASK(6, 5) +#define PCIE_LTR_IDX_IDLE 3 +#define B_AX_LTR_DRV_DEC_EN BIT(4) +#define B_AX_LTR_FW_DEC_EN BIT(3) +#define B_AX_LTR_HW_DEC_EN BIT(2) +#define B_AX_LTR_SPACE_IDX_V1_MASK GENMASK(1, 0) +#define LTR_EN_BITS (B_AX_LTR_HW_DEC_EN | B_AX_LTR_FW_DEC_EN | B_AX_LTR_DRV_DEC_EN) + +#define R_AX_LTR_LATENCY_IDX0 0x1604 +#define R_AX_LTR_LATENCY_IDX1 0x1608 +#define R_AX_LTR_LATENCY_IDX2 0x160C +#define R_AX_LTR_LATENCY_IDX3 0x1610 + #define R_AX_HCI_FC_CTRL_V1 0x1700 #define R_AX_CH_PAGE_CTRL_V1 0x1704 @@ -440,6 +461,7 @@ #define B_AX_APP_LTR_ACT BIT(5) #define B_AX_APP_LTR_IDLE BIT(4) #define B_AX_LTR_EN BIT(1) +#define B_AX_LTR_WD_NOEMP_CHK_V1 BIT(1) #define B_AX_LTR_HW_EN BIT(0) #define R_AX_LTR_CTRL_1 0x8414 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 6055e8b9887f..61a1693535d8 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -40,6 +40,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .dma_addr_set = &rtw89_pci_ch_dma_addr_set, + .ltr_set = rtw89_pci_ltr_set, .fill_txaddr_info = rtw89_pci_fill_txaddr_info, }; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index dca023e79101..aeafac553f40 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -41,6 +41,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1, + .ltr_set = rtw89_pci_ltr_set_v1, .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1, }; -- cgit v1.2.3 From bab9e239178679db33d0cca8e14ce448419a3078 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:44 +0800 Subject: rtw89: pci: set address info registers depends on chips Address info registers are used to configure size of DMA address info to point skb->data. With different size, it can support different number of scatters. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 2fd746d6987c..25d385be6e86 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -2348,6 +2348,7 @@ EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) { const struct rtw89_pci_info *info = rtwdev->pci_info; + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; int ret; ret = info->ltr_set(rtwdev, true); @@ -2355,14 +2356,16 @@ static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) rtw89_err(rtwdev, "pci ltr set fail\n"); return ret; } - if (rtwdev->chip->chip_id == RTL8852A) { + if (chip_id == RTL8852A) { /* ltr sw trigger */ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); } - /* ADDR info 8-byte mode */ - rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, - B_AX_HOST_ADDR_INFO_8B_SEL); - rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); + if (chip_id == RTL8852A || chip_id == RTL8852B) { + /* ADDR info 8-byte mode */ + rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, + B_AX_HOST_ADDR_INFO_8B_SEL); + rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); + } /* enable DMA for all queues */ rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); -- cgit v1.2.3 From 22a66e7c3abe48237853239b2184751cf97aca07 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:45 +0800 Subject: rtw89: pci: add deglitch setting Add setting to support 8852ce. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 27 ++++++++++++++++----------- drivers/net/wireless/realtek/rtw89/pci.h | 3 +++ 2 files changed, 19 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 25d385be6e86..5112b2d443c3 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1809,19 +1809,24 @@ end: static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; int ret; - if (rtwdev->chip->chip_id != RTL8852A) - return 0; - - ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, - PCIE_PHY_GEN1); - if (ret) - return ret; - ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, - PCIE_PHY_GEN2); - if (ret) - return ret; + if (chip_id == RTL8852A) { + ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, + PCIE_PHY_GEN1); + if (ret) + return ret; + ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, + PCIE_PHY_GEN2); + if (ret) + return ret; + } else if (chip_id == RTL8852C) { + rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, + B_AX_DEGLITCH); + rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, + B_AX_DEGLITCH); + } return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 99f0cd2f47da..805fc3e8c1a4 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -80,6 +80,9 @@ #define R_AX_PCIE_WDT_TIMER_S1 0x3128 #define B_AX_PCIE_WDT_TIMER_S1_MASK GENMASK(31, 0) +#define R_RAC_DIRECT_OFFSET_G1 0x3800 +#define R_RAC_DIRECT_OFFSET_G2 0x3880 + #define RTW89_PCI_WR_RETRY_CNT 20 /* Interrupts */ -- cgit v1.2.3 From e1e7a574b20ff3ce474c67ef6dfbc715d0870e9a Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:46 +0800 Subject: rtw89: pci: add L1 settings Configure L1 settings of enter and exit. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-8-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 18 ++++++++++++++++++ drivers/net/wireless/realtek/rtw89/pci.h | 7 +++++++ 2 files changed, 25 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 5112b2d443c3..dcf907b81cff 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1935,6 +1935,22 @@ static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) B_AX_SYSON_DIS_PMCR_AX_WRMSK); } +static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id != RTL8852C) + return; + + rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); +} + +static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) +{ + if (rtwdev->chip->chip_id != RTL8852C) + return; + + rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); +} + static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) { if (rtwdev->chip->chip_id == RTL8852C) @@ -2228,6 +2244,8 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) rtw89_pci_autoload_hang(rtwdev); rtw89_pci_l12_vmain(rtwdev); rtw89_pci_gen2_force_ib(rtwdev); + rtw89_pci_l1_ent_lat(rtwdev); + rtw89_pci_wd_exit_l1(rtwdev); rtw89_pci_set_sic(rtwdev); rtw89_pci_set_lbc(rtwdev); rtw89_pci_set_io_rcy(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 805fc3e8c1a4..a085d1f27a12 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -38,6 +38,13 @@ #define R_AX_MDIO_WDATA 0x10A4 #define R_AX_MDIO_RDATA 0x10A6 +#define R_AX_PCIE_PS_CTRL_V1 0x3008 +#define B_AX_CMAC_EXIT_L1_EN BIT(7) +#define B_AX_DMAC0_EXIT_L1_EN BIT(6) +#define B_AX_SEL_XFER_PENDING BIT(3) +#define B_AX_SEL_REQ_ENTR_L1 BIT(2) +#define B_AX_SEL_REQ_EXIT_L1 BIT(0) + #define R_AX_PCIE_BG_CLR 0x303C #define B_AX_BG_CLR_ASYNC_M3 BIT(4) -- cgit v1.2.3 From a7d82a7aae656089c478b5ccf83ede38d49ac223 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:47 +0800 Subject: rtw89: extend dmac_pre_init to support 8852C DMAC is short for data MAC. 8852C has more settings than 8852A, so add them. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-9-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 37 ++++++++++++++++++++++++++------ drivers/net/wireless/realtek/rtw89/reg.h | 1 + 2 files changed, 31 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index d75105441372..66122d696151 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2756,18 +2756,41 @@ static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, return 0; } -static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev) +static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 val; int ret; - val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | - B_AX_PKT_BUF_EN; + if (chip_id == RTL8852C) + val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | + B_AX_PKT_BUF_EN | B_AX_H_AXIDMA_EN; + else + val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | + B_AX_PKT_BUF_EN; rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val); val = B_AX_DISPATCHER_CLK_EN; rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val); + if (chip_id != RTL8852C) + goto dle; + + val = rtw89_read32(rtwdev, R_AX_HAXI_INIT_CFG1); + val &= ~(B_AX_DMA_MODE_MASK | B_AX_STOP_AXI_MST); + val |= FIELD_PREP(B_AX_DMA_MODE_MASK, DMA_MOD_PCIE_1B) | + B_AX_TXHCI_EN_V1 | B_AX_RXHCI_EN_V1; + rtw89_write32(rtwdev, R_AX_HAXI_INIT_CFG1, val); + + rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP1, + B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | B_AX_STOP_ACH3 | + B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | B_AX_STOP_ACH6 | + B_AX_STOP_ACH7 | B_AX_STOP_CH8 | B_AX_STOP_CH9 | + B_AX_STOP_CH12 | B_AX_STOP_ACH2); + rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP2, B_AX_STOP_CH10 | B_AX_STOP_CH11); + rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_AXIDMA_EN); + +dle: ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode); if (ret) { rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret); @@ -2825,16 +2848,16 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) rtw89_mac_hci_func_en(rtwdev); + ret = rtw89_mac_dmac_pre_init(rtwdev); + if (ret) + return ret; + if (rtwdev->hci.ops->mac_pre_init) { ret = rtwdev->hci.ops->mac_pre_init(rtwdev); if (ret) return ret; } - ret = rtw89_mac_fw_dl_pre_init(rtwdev); - if (ret) - return ret; - rtw89_mac_disable_cpu(rtwdev); ret = rtw89_mac_enable_cpu(rtwdev, 0, true); if (ret) diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 21a451264e50..9d9554d28ab6 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -120,6 +120,7 @@ #define B_AX_R_AX_BG GENMASK(1, 0) #define R_AX_PLATFORM_ENABLE 0x0088 +#define B_AX_AXIDMA_EN BIT(3) #define B_AX_WCPU_EN BIT(1) #define B_AX_PLATFORM_EN BIT(0) -- cgit v1.2.3 From cf7b8b8088111d32a16c31f01be2438e8d411c7a Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:48 +0800 Subject: rtw89: update STA scheduler parameters for v1 chip The v1 chip has additional setting of STA scheduler, so add it. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-10-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 16 +++++++++++++++- drivers/net/wireless/realtek/rtw89/reg.h | 9 +++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 66122d696151..b43c947afa61 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1560,6 +1560,17 @@ static bool dle_is_txq_empty(struct rtw89_dev *rtwdev) return false; } +static void _patch_ss2f_path(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B) + return; + + rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK, + SS2F_PATH_WLCPU); +} + static int sta_sch_init(struct rtw89_dev *rtwdev) { u32 p_val; @@ -1581,7 +1592,10 @@ static int sta_sch_init(struct rtw89_dev *rtwdev) return ret; } - rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG); + rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG | + B_AX_SS_NONEMPTY_SS2FINFO_EN); + + _patch_ss2f_path(rtwdev); return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 9d9554d28ab6..fd9874137af7 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -801,8 +801,17 @@ #define R_AX_SS_CTRL 0x9E10 #define B_AX_SS_INIT_DONE_1 BIT(31) #define B_AX_SS_WARM_INIT_FLG BIT(29) +#define B_AX_SS_NONEMPTY_SS2FINFO_EN BIT(28) #define B_AX_SS_EN BIT(0) +#define R_AX_SS2FINFO_PATH 0x9E50 +#define B_AX_SS_UL_REL BIT(31) +#define B_AX_SS_REL_QUEUE_MASK GENMASK(29, 24) +#define B_AX_SS_REL_PORT_MASK GENMASK(18, 16) +#define B_AX_SS_DEST_QUEUE_MASK GENMASK(13, 8) +#define SS2F_PATH_WLCPU 0x0A +#define B_AX_SS_DEST_PORT_MASK GENMASK(2, 0) + #define R_AX_SS_MACID_PAUSE_0 0x9EB0 #define B_AX_SS_MACID31_0_PAUSE_SH 0 #define B_AX_SS_MACID31_0_PAUSE_MASK GENMASK(31, 0) -- cgit v1.2.3 From 61ebeecb3d670f1e37e20095ff2504960404538f Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:49 +0800 Subject: rtw89: add chip_ops::{enable,disable}_bb_rf to support v1 chip The v1 chip use specific functions to enable and disable BB/RF. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-11-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 7 +++-- drivers/net/wireless/realtek/rtw89/core.h | 2 ++ drivers/net/wireless/realtek/rtw89/mac.c | 10 ++++-- drivers/net/wireless/realtek/rtw89/mac.h | 19 ++++++++++- drivers/net/wireless/realtek/rtw89/reg.h | 6 ++++ drivers/net/wireless/realtek/rtw89/rtw8852a.c | 2 ++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 45 +++++++++++++++++++++++++++ 7 files changed, 86 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index d923e4a0f963..f540ee34fc2c 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -2706,8 +2706,11 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) /* efuse process */ /* pre-config BB/RF, BB reset/RFC reset */ - rtw89_mac_disable_bb_rf(rtwdev); - rtw89_mac_enable_bb_rf(rtwdev); + rtw89_chip_disable_bb_rf(rtwdev); + ret = rtw89_chip_enable_bb_rf(rtwdev); + if (ret) + return ret; + rtw89_phy_init_bb_reg(rtwdev); rtw89_phy_init_rf_reg(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 9f53d581a48f..ee2edd9e9173 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2059,6 +2059,8 @@ struct rtw89_hci_info { }; struct rtw89_chip_ops { + int (*enable_bb_rf)(struct rtw89_dev *rtwdev); + void (*disable_bb_rf)(struct rtw89_dev *rtwdev); void (*bb_reset)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void (*bb_sethw)(struct rtw89_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index b43c947afa61..280db2f3f89b 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2828,7 +2828,7 @@ static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev) B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN); } -void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) +int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) { rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); @@ -2836,7 +2836,10 @@ void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); + + return 0; } +EXPORT_SYMBOL(rtw89_mac_enable_bb_rf); void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) { @@ -2847,6 +2850,7 @@ void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); } +EXPORT_SYMBOL(rtw89_mac_disable_bb_rf); int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) { @@ -2892,7 +2896,9 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev) if (ret) goto fail; - rtw89_mac_enable_bb_rf(rtwdev); + ret = rtw89_chip_enable_bb_rf(rtwdev); + if (ret) + goto fail; ret = rtw89_mac_sys_init(rtwdev); if (ret) diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 6edeb1aafaf7..de65d9becb05 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -797,8 +797,23 @@ int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val); int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif); -void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev); +int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev); void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev); + +static inline int rtw89_chip_enable_bb_rf(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + return chip->ops->enable_bb_rf(rtwdev); +} + +static inline void rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + chip->ops->disable_bb_rf(rtwdev); +} + u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev); int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err); void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, @@ -903,6 +918,8 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, u8 *tx_retry); enum rtw89_mac_xtal_si_offset { + XTAL0 = 0x0, + XTAL3 = 0x3, XTAL_SI_XTAL_SC_XI = 0x04, #define XTAL_SC_XI_MASK GENMASK(7, 0) XTAL_SI_XTAL_SC_XO = 0x05, diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index fd9874137af7..1f4cf30f3822 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -218,6 +218,7 @@ #define B_AX_EECS_PULL_LOW_EN BIT(16) #define R_AX_WLRF_CTRL 0x02F0 +#define B_AX_AFC_AFEDIG BIT(17) #define B_AX_WLRF1_CTRL_7 BIT(15) #define B_AX_WLRF1_CTRL_1 BIT(9) #define B_AX_WLRF_CTRL_7 BIT(7) @@ -231,6 +232,11 @@ #define B_AX_USB_HCISYS_PWR_STE_MASK GENMASK(3, 2) #define B_AX_PCIE_HCISYS_PWR_STE_MASK GENMASK(1, 0) +#define R_AX_AFE_OFF_CTRL1 0x0444 +#define B_AX_S1_LDO_VSEL_F_MASK GENMASK(25, 24) +#define B_AX_S1_LDO2PWRCUT_F BIT(23) +#define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21) + #define R_AX_FILTER_MODEL_ADDR 0x0C04 #define R_AX_HAXI_INIT_CFG1 0x1000 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index a8b972d4bee8..501631b1f041 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -1997,6 +1997,8 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev, } static const struct rtw89_chip_ops rtw8852a_chip_ops = { + .enable_bb_rf = rtw89_mac_enable_bb_rf, + .disable_bb_rf = rtw89_mac_disable_bb_rf, .bb_reset = rtw8852a_bb_reset, .bb_sethw = rtw8852a_bb_sethw, .read_rf = rtw89_phy_read_rf, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 4773f6c739dc..5dbb711defc4 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -483,7 +483,52 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, } } +static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev) +{ + int ret; + + rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, + B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); + + rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + + rtw89_write32_mask(rtwdev, R_AX_AFE_OFF_CTRL1, B_AX_S0_LDO_VSEL_F_MASK, 0x1); + rtw89_write32_mask(rtwdev, R_AX_AFE_OFF_CTRL1, B_AX_S1_LDO_VSEL_F_MASK, 0x1); + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL0, 0x7, FULL_BIT_MASK); + if (ret) + return ret; + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0x6c, FULL_BIT_MASK); + if (ret) + return ret; + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xc7, FULL_BIT_MASK); + if (ret) + return ret; + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xc7, FULL_BIT_MASK); + if (ret) + return ret; + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL3, 0xd, FULL_BIT_MASK); + if (ret) + return ret; + + return 0; +} + +static void rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev) +{ + rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, + B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); +} + static const struct rtw89_chip_ops rtw8852c_chip_ops = { + .enable_bb_rf = rtw8852c_mac_enable_bb_rf, + .disable_bb_rf = rtw8852c_mac_disable_bb_rf, .read_efuse = rtw8852c_read_efuse, .read_phycap = rtw8852c_read_phycap, .power_trim = rtw8852c_power_trim, -- cgit v1.2.3 From 5cb5562d2a21637d1aa23791bec1e45368494ba4 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:50 +0800 Subject: rtw89: Turn on CR protection of CMAC CMAC is Control MAC, and this patch is to turn on CR (control registers) protection. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-12-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 280db2f3f89b..6498df52ace9 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1114,7 +1114,8 @@ static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN | B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | - B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN; + B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN | + B_AX_CMAC_CRPRT; ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN | B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN | B_AX_RMAC_CKEN; -- cgit v1.2.3 From b61adeed5409380479b3a9b43113348c26881342 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:51 +0800 Subject: rtw89: 8852c: update security engine setting The security setting of 8852A and 8852C are different, so change the settings accordingly. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-13-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 8 +++++++- drivers/net/wireless/realtek/rtw89/reg.h | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 6498df52ace9..8e6d45979fde 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1620,6 +1620,7 @@ static int mpdu_proc_init(struct rtw89_dev *rtwdev) static int sec_eng_init(struct rtw89_dev *rtwdev) { + const struct rtw89_chip_info *chip = rtwdev->chip; u32 val = 0; int ret; @@ -1633,7 +1634,8 @@ static int sec_eng_init(struct rtw89_dev *rtwdev) /* init TX encryption */ val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC); val |= (B_AX_MC_DEC | B_AX_BC_DEC); - val &= ~B_AX_TX_PARTIAL_MODE; + if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B) + val &= ~B_AX_TX_PARTIAL_MODE; rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val); /* init MIC ICV append */ @@ -1643,6 +1645,10 @@ static int sec_eng_init(struct rtw89_dev *rtwdev) /* option init */ rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val); + if (chip->chip_id == RTL8852C) + rtw89_write32_mask(rtwdev, R_AX_SEC_DEBUG1, + B_AX_TX_TIMEOUT_SEL_MASK, AX_TX_TO_VAL); + return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 1f4cf30f3822..3505c9dd8a79 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -799,6 +799,9 @@ #define R_AX_SEC_CAM_RDATA 0x9D14 #define R_AX_SEC_CAM_WDATA 0x9D18 #define R_AX_SEC_DEBUG 0x9D1C +#define R_AX_SEC_DEBUG1 0x9D1C +#define B_AX_TX_TIMEOUT_SEL_MASK GENMASK(31, 30) +#define AX_TX_TO_VAL 0x2 #define R_AX_SEC_TX_DEBUG 0x9D20 #define R_AX_SEC_RX_DEBUG 0x9D24 #define R_AX_SEC_TRX_PKT_CNT 0x9D28 -- cgit v1.2.3 From c49154ff8bcb995467b23c50afcee74c11265547 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:52 +0800 Subject: rtw89: update scheduler setting Update IC specific settings accordingly. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-14-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 11 +++++++++++ drivers/net/wireless/realtek/rtw89/reg.h | 12 ++++++++++++ 2 files changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 8e6d45979fde..16a97bcb8d48 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1731,6 +1731,17 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; + reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_1, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, SIFS_MACTXEN_T1); + + if (rtwdev->chip->chip_id == RTL8852B) { + reg = rtw89_mac_reg_by_idx(R_AX_SCH_EXT_CTRL, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV); + } + + reg = rtw89_mac_reg_by_idx(R_AX_CCA_CFG_0, mac_idx); + rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN); + reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US); diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 3505c9dd8a79..dea7d2c8547b 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -977,6 +977,14 @@ #define R_AX_PREBKF_CFG_0_C1 0xE338 #define B_AX_PREBKF_TIME_MASK GENMASK(4, 0) +#define R_AX_PREBKF_CFG_1 0xC33C +#define R_AX_PREBKF_CFG_1_C1 0xE33C +#define B_AX_SIFS_TIMEOUT_TB_AGGR_MASK GENMASK(30, 24) +#define B_AX_SIFS_PREBKF_MASK GENMASK(23, 16) +#define B_AX_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8) +#define B_AX_SIFS_MACTXEN_T1_MASK GENMASK(6, 0) +#define SIFS_MACTXEN_T1 0x47 + #define R_AX_CCA_CFG_0 0xC340 #define R_AX_CCA_CFG_0_C1 0xE340 #define B_AX_BTCCA_BRK_TXOP_EN BIT(9) @@ -1076,6 +1084,10 @@ #define R_AX_SCH_DBG_C1 0xE3F8 #define B_AX_SCHEDULER_DBG_MASK GENMASK(31, 0) +#define R_AX_SCH_EXT_CTRL 0xC3FC +#define R_AX_SCH_EXT_CTRL_C1 0xE3FC +#define B_AX_PORT_RST_TSF_ADV BIT(1) + #define R_AX_PORT_CFG_P0 0xC400 #define R_AX_PORT_CFG_P1 0xC440 #define R_AX_PORT_CFG_P2 0xC480 -- cgit v1.2.3 From 19cb94273f40b08dbb5677619d05a4c594226ac9 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:53 +0800 Subject: rtw89: initialize NAV control Configure NAV function and its parameters. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-15-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 17 +++++++++++++++++ drivers/net/wireless/realtek/rtw89/reg.h | 10 ++++++++++ 2 files changed, 27 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 16a97bcb8d48..c0e35143a904 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1883,6 +1883,16 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) return 0; } +static int nav_ctrl_init(struct rtw89_dev *rtwdev) +{ + rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN | + B_AX_WMAC_TF_UP_NAV_EN | + B_AX_WMAC_NAV_UPPER_EN); + rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_12MS); + + return 0; +} + static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx) { u32 reg; @@ -2098,6 +2108,13 @@ static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) return ret; } + ret = nav_ctrl_init(rtwdev); + if (ret) { + rtw89_err(rtwdev, "[ERR]CMAC%d NAV CTRL init %d\n", mac_idx, + ret); + return ret; + } + ret = spatial_reuse_init(rtwdev, mac_idx); if (ret) { rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n", diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index dea7d2c8547b..aca9fc3ac09e 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -1435,6 +1435,16 @@ #define R_AX_MAC_LOOPBACK_C1 0xEC20 #define B_AX_MACLBK_EN BIT(0) +#define R_AX_WMAC_NAV_CTL 0xCC80 +#define R_AX_WMAC_NAV_CTL_C1 0xEC80 +#define B_AX_WMAC_NAV_UPPER_EN BIT(26) +#define B_AX_WMAC_0P125US_TIMER_MASK GENMASK(25, 18) +#define B_AX_WMAC_PLCP_UP_NAV_EN BIT(17) +#define B_AX_WMAC_TF_UP_NAV_EN BIT(16) +#define B_AX_WMAC_NAV_UPPER_MASK GENMASK(15, 8) +#define NAV_12MS 0xBC +#define B_AX_WMAC_RTS_RST_DUR_MASK GENMASK(7, 0) + #define R_AX_RXTRIG_TEST_USER_2 0xCCB0 #define R_AX_RXTRIG_TEST_USER_2_C1 0xECB0 #define B_AX_RXTRIG_MACID_MASK GENMASK(31, 24) -- cgit v1.2.3 From 75fd91aa92f91a441b115368f07b381b47130fb8 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:54 +0800 Subject: rtw89: update TMAC parameters TMAC is short for TX MAC, and this patch is to configure FIFO thresholds. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-16-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 7 +++++++ drivers/net/wireless/realtek/rtw89/reg.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index c0e35143a904..0292ec560562 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1919,6 +1919,13 @@ static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx); rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); + reg = rtw89_mac_reg_by_idx(R_AX_TCR0, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_TCR_UDF_THSD_MASK, TCR_UDF_THSD); + + reg = rtw89_mac_reg_by_idx(R_AX_TXD_FIFO_CTRL, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_HIGH_MCS_THRE_MASK, TXDFIFO_HIGH_MCS_THRE); + rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_LOW_MCS_THRE_MASK, TXDFIFO_LOW_MCS_THRE); + return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index aca9fc3ac09e..3822cf0daef0 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -1351,6 +1351,24 @@ #define R_AX_RXDMA_PKT_INFO_1 0xC818 #define R_AX_RXDMA_PKT_INFO_2 0xC81C +#define R_AX_TCR0 0xCA00 +#define R_AX_TCR0_C1 0xEA00 +#define B_AX_TCR_ZLD_NUM_MASK GENMASK(31, 24) +#define B_AX_TCR_UDF_EN BIT(23) +#define B_AX_TCR_UDF_THSD_MASK GENMASK(22, 16) +#define TCR_UDF_THSD 0x6 +#define B_AX_TCR_ERRSTEN_MASK GENMASK(15, 10) +#define B_AX_TCR_VHTSIGA1_TXPS BIT(9) +#define B_AX_TCR_PLCP_ERRHDL_EN BIT(8) +#define B_AX_TCR_PADSEL BIT(7) +#define B_AX_TCR_MASK_SIGBCRC BIT(6) +#define B_AX_TCR_SR_VAL15_ALLOW BIT(5) +#define B_AX_TCR_EN_EOF BIT(4) +#define B_AX_TCR_EN_SCRAM_INC BIT(3) +#define B_AX_TCR_EN_20MST BIT(2) +#define B_AX_TCR_CRC BIT(1) +#define B_AX_TCR_DISGCLK BIT(0) + #define R_AX_TCR1 0xCA04 #define R_AX_TCR1_C1 0xEA04 #define B_AX_TXDFIFO_THRESHOLD GENMASK(31, 28) @@ -1374,6 +1392,17 @@ #define R_AX_PPWRBIT_SETTING 0xCA0C #define R_AX_PPWRBIT_SETTING_C1 0xEA0C +#define R_AX_TXD_FIFO_CTRL 0xCA1C +#define R_AX_TXD_FIFO_CTRL_C1 0xEA1C +#define B_AX_NON_LEGACY_PPDU_ZLD_USTIMER_MASK GENMASK(28, 24) +#define B_AX_LEGACY_PPDU_ZLD_USTIMER_MASK GENMASK(20, 16) +#define B_AX_TXDFIFO_HIGH_MCS_THRE_MASK GENMASK(15, 12) +#define TXDFIFO_HIGH_MCS_THRE 0x7 +#define B_AX_TXDFIFO_LOW_MCS_THRE_MASK GENMASK(11, 8) +#define TXDFIFO_LOW_MCS_THRE 0x7 +#define B_AX_HIGH_MCS_PHY_RATE_MASK GENMASK(7, 4) +#define B_AX_BW_PHY_RATE_MASK GENMASK(1, 0) + #define R_AX_MACTX_DBG_SEL_CNT 0xCA20 #define R_AX_MACTX_DBG_SEL_CNT_C1 0xEA20 #define B_AX_MACTX_MPDU_CNT GENMASK(31, 24) -- cgit v1.2.3 From 9fb4862e913ceb5f6e668033c214e07eca7ee18a Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 25 Mar 2022 14:00:55 +0800 Subject: rtw89: update ptcl_init ptcl_init, standing for protocol initialization, is updated to the latest version. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325060055.58482-17-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 20 +++++++++++++++----- drivers/net/wireless/realtek/rtw89/reg.h | 24 ++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 0292ec560562..6a2ba975d8a0 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2065,6 +2065,8 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) val = rtw89_read32(rtwdev, reg); val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); + val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, + B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK); val |= B_AX_HW_CTS2SELF_EN; rtw89_write32(rtwdev, reg, val); @@ -2075,11 +2077,19 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) rtw89_write32(rtwdev, reg, val); } - reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); - val = rtw89_read32(rtwdev, reg); - val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK); - val |= B_AX_HW_CTS2SELF_EN; - rtw89_write32(rtwdev, reg, val); + if (mac_idx == RTW89_MAC_0) { + rtw89_write8_set(rtwdev, R_AX_PTCL_COMMON_SETTING_0, + B_AX_CMAC_TX_MODE_0 | B_AX_CMAC_TX_MODE_1); + rtw89_write8_clr(rtwdev, R_AX_PTCL_COMMON_SETTING_0, + B_AX_PTCL_TRIGGER_SS_EN_0 | + B_AX_PTCL_TRIGGER_SS_EN_1 | + B_AX_PTCL_TRIGGER_SS_EN_UL); + rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL, + B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU); + } else if (mac_idx == RTW89_MAC_1) { + rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL_C1, + B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU); + } return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 3822cf0daef0..a0c60528b578 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -1248,6 +1248,18 @@ #define R_AX_PORT_HGQ_WINDOW_CFG 0xC5A0 #define R_AX_PORT_HGQ_WINDOW_CFG_C1 0xE5A0 +#define R_AX_PTCL_COMMON_SETTING_0 0xC600 +#define R_AX_PTCL_COMMON_SETTING_0_C1 0xE600 +#define B_AX_PCIE_MODE_MASK GENMASK(15, 14) +#define B_AX_CPUMGQ_LIFETIME_EN BIT(8) +#define B_AX_MGQ_LIFETIME_EN BIT(7) +#define B_AX_LIFETIME_EN BIT(6) +#define B_AX_PTCL_TRIGGER_SS_EN_UL BIT(4) +#define B_AX_PTCL_TRIGGER_SS_EN_1 BIT(3) +#define B_AX_PTCL_TRIGGER_SS_EN_0 BIT(2) +#define B_AX_CMAC_TX_MODE_1 BIT(1) +#define B_AX_CMAC_TX_MODE_0 BIT(0) + #define R_AX_AMPDU_AGG_LIMIT 0xC610 #define B_AX_AMPDU_MAX_TIME_MASK GENMASK(31, 24) #define B_AX_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16) @@ -1292,6 +1304,18 @@ #define B_AX_PORT_DROP_4_0_MASK GENMASK(20, 16) #define B_AX_MBSSID_DROP_15_0_MASK GENMASK(15, 0) +#define R_AX_PTCLRPT_FULL_HDL 0xC660 +#define R_AX_PTCLRPT_FULL_HDL_C1 0xE660 +#define B_AX_RPT_LATCH_PHY_TIME_MASK GENMASK(15, 12) +#define B_AX_F2PCMD_FWWD_RLS_MODE BIT(9) +#define B_AX_F2PCMD_RPT_EN BIT(8) +#define B_AX_BCN_RPT_PATH_MASK GENMASK(7, 6) +#define B_AX_SPE_RPT_PATH_MASK GENMASK(5, 4) +#define FWD_TO_WLCPU 1 +#define B_AX_TX_RPT_PATH_MASK GENMASK(3, 2) +#define B_AX_F2PCMDRPT_FULL_DROP BIT(1) +#define B_AX_NON_F2PCMDRPT_FULL_DROP BIT(0) + #define R_AX_BT_PLT 0xC67C #define R_AX_BT_PLT_C1 0xE67C #define B_AX_BT_PLT_PKT_CNT_MASK GENMASK(31, 16) -- cgit v1.2.3 From ee20d538c4989daddd383b6a89129a2b514580e6 Mon Sep 17 00:00:00 2001 From: Po Hao Huang Date: Fri, 1 Apr 2022 13:50:40 +0800 Subject: rtw89: change idle mode condition during hw_scan Previously we only consider single interface's status, idle mode behavior could be unexpected when multiple interfaces is active. Change to enter/leave idle mode by mac80211's configuration state. Signed-off-by: Po Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401055043.12512-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index f540ee34fc2c..89506daa54cc 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -1940,9 +1940,9 @@ static void rtw89_ips_work(struct work_struct *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, ips_work); - mutex_lock(&rtwdev->mutex); - rtw89_enter_ips(rtwdev); + if (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE) + rtw89_enter_ips(rtwdev); mutex_unlock(&rtwdev->mutex); } @@ -2848,7 +2848,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, rtwdev->scanning = true; rtw89_leave_lps(rtwdev); - if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK) + if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) rtw89_leave_ips(rtwdev); ether_addr_copy(rtwvif->mac_addr, mac_addr); @@ -2872,7 +2872,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, rtwdev->scanning = false; rtwdev->dig.bypass_dig = true; - if (hw_scan && rtwvif->net_type == RTW89_NET_TYPE_NO_LINK) + if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work); } -- cgit v1.2.3 From 2b8219e9b746ee3090baa1daa192b16e5bc72f9d Mon Sep 17 00:00:00 2001 From: Po Hao Huang Date: Fri, 1 Apr 2022 13:50:41 +0800 Subject: rtw89: packet offload handler to avoid warning Add a dummy function for packet offload to eliminate warning message "c2h class 1 func 2 not support". This c2h is for debug purpose and its presence itself can do the work, so further parsing won't be required for now. Signed-off-by: Po Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401055043.12512-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 6a2ba975d8a0..34e14a53a585 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -3498,12 +3498,18 @@ rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) { } +static void +rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h, + u32 len) +{ +} + static void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) = { [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL, [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL, - [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL, + [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp, [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp, -- cgit v1.2.3 From 841f2633840ea2eb1ecea4a7efab9d19d9abaf62 Mon Sep 17 00:00:00 2001 From: Ching-Te Ku Date: Fri, 1 Apr 2022 13:50:42 +0800 Subject: rtw89: coex: Add case for scan offload Turn off coexistence driver control, let firmware can control the traffic during scan. This prevents potential unexpected behavior of the BT driver. Signed-off-by: Ching-Te Ku Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401055043.12512-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/coex.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index 684583955511..3d5664cc3a20 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -3068,7 +3068,17 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; - if (rtwdev->dbcc_en) { + if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G); + if (btc->mdinfo.ant.type == BTC_ANT_SHARED) + _set_policy(rtwdev, BTC_CXP_OFFE_DEF, + BTC_RSN_NTFY_SCAN_START); + else + _set_policy(rtwdev, BTC_CXP_OFF_EQ0, + BTC_RSN_NTFY_SCAN_START); + + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n"); + } else if (rtwdev->dbcc_en) { if (wl_dinfo->real_band[RTW89_PHY_0] != RTW89_BAND_2G && wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G) _action_wl_5g(rtwdev); -- cgit v1.2.3 From 65ee4971a262a024e239e5d2b7f4dee1b3dff40e Mon Sep 17 00:00:00 2001 From: Po Hao Huang Date: Fri, 1 Apr 2022 13:50:43 +0800 Subject: rtw89: fix misconfiguration on hw_scan channel time Without this patch, hw scan won't stay long enough on DFS/passive channels. Found previous logic error and fix it. Signed-off-by: Po Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401055043.12512-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 5985b40950bc..fc77d9bfd626 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -2101,7 +2101,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, ch_info->num_pkt = 0; break; case RTW89_CHAN_DFS: - ch_info->period = min_t(u8, ch_info->period, + ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); ch_info->dwell_time = RTW89_DWELL_TIME; break; -- cgit v1.2.3 From 3e12968f6d12a34b540c39cbd696a760cc4616f0 Mon Sep 17 00:00:00 2001 From: Niels Dossche Date: Mon, 21 Mar 2022 23:55:16 +0100 Subject: mwifiex: add mutex lock for call in mwifiex_dfs_chan_sw_work_queue cfg80211_ch_switch_notify uses ASSERT_WDEV_LOCK to assert that net_device->ieee80211_ptr->mtx (which is the same as priv->wdev.mtx) is held during the function's execution. mwifiex_dfs_chan_sw_work_queue is one of its callers, which does not hold that lock, therefore violating the assertion. Add a lock around the call. Disclaimer: I am currently working on a static analyser to detect missing locks. This was a reported case. I manually verified the report by looking at the code, so that I do not send wrong information or patches. After concluding that this seems to be a true positive, I created this patch. However, as I do not in fact have this particular hardware, I was unable to test it. Reviewed-by: Brian Norris Signed-off-by: Niels Dossche Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220321225515.32113-1-dossche.niels@gmail.com --- drivers/net/wireless/marvell/mwifiex/11h.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c index d2ee6469e67b..3fa25cd64cda 100644 --- a/drivers/net/wireless/marvell/mwifiex/11h.c +++ b/drivers/net/wireless/marvell/mwifiex/11h.c @@ -303,5 +303,7 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) mwifiex_dbg(priv->adapter, MSG, "indicating channel switch completion to kernel\n"); + mutex_lock(&priv->wdev.mtx); cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef); + mutex_unlock(&priv->wdev.mtx); } -- cgit v1.2.3 From 92cadedd9d5f4355d8ca2765f2259708f0e5592c Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 23 Mar 2022 09:39:50 +0100 Subject: brcmfmac: Avoid keeping power to SDIO card unless WOWL is used Keeping the power to the SDIO card during system wide suspend, consumes energy. Especially on battery driven embedded systems, this can be a problem. Therefore, let's change the behaviour into allowing the SDIO card to be powered off, unless WOWL is supported and enabled. Note that, the downside from this change, is that during system resume the SDIO card needs to be re-initialized and the FW must be re-programmed. Even if this may take some time to complete, it should we worth it, rather than draining the battery. Signed-off-by: Ulf Hansson Tested-by: Christophe Roullier Reviewed-by: Yann Gautier Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220323083950.414783-1-ulf.hansson@linaro.org --- .../wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 39 ++++++++++++---------- 1 file changed, 22 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index ac02244a6fdf..9c598ea97499 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1119,9 +1119,21 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(sdiodev->func1); - brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled); - sdiodev->wowl_enabled = enabled; + /* Power must be preserved to be able to support WOWL. */ + if (!(pm_caps & MMC_PM_KEEP_POWER)) + goto notsup; + + if (sdiodev->settings->bus.sdio.oob_irq_supported || + pm_caps & MMC_PM_WAKE_SDIO_IRQ) { + sdiodev->wowl_enabled = enabled; + brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled); + return; + } + +notsup: + brcmf_dbg(SDIO, "WOWL not supported\n"); } #ifdef CONFIG_PM_SLEEP @@ -1130,7 +1142,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev) struct sdio_func *func; struct brcmf_bus *bus_if; struct brcmf_sdio_dev *sdiodev; - mmc_pm_flag_t pm_caps, sdio_flags; + mmc_pm_flag_t sdio_flags; int ret = 0; func = container_of(dev, struct sdio_func, dev); @@ -1142,20 +1154,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev) bus_if = dev_get_drvdata(dev); sdiodev = bus_if->bus_priv.sdio; - pm_caps = sdio_get_host_pm_caps(func); - - if (pm_caps & MMC_PM_KEEP_POWER) { - /* preserve card power during suspend */ + if (sdiodev->wowl_enabled) { brcmf_sdiod_freezer_on(sdiodev); brcmf_sdio_wd_timer(sdiodev->bus, 0); sdio_flags = MMC_PM_KEEP_POWER; - if (sdiodev->wowl_enabled) { - if (sdiodev->settings->bus.sdio.oob_irq_supported) - enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); - else - sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; - } + if (sdiodev->settings->bus.sdio.oob_irq_supported) + enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); + else + sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) brcmf_err("Failed to set pm_flags %x\n", sdio_flags); @@ -1176,21 +1183,19 @@ static int brcmf_ops_sdio_resume(struct device *dev) struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct sdio_func *func = container_of(dev, struct sdio_func, dev); - mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(func); int ret = 0; brcmf_dbg(SDIO, "Enter: F%d\n", func->num); if (func->num != 2) return 0; - if (!(pm_caps & MMC_PM_KEEP_POWER)) { + if (!sdiodev->wowl_enabled) { /* bus was powered off and device removed, probe again */ ret = brcmf_sdiod_probe(sdiodev); if (ret) brcmf_err("Failed to probe device on resume\n"); } else { - if (sdiodev->wowl_enabled && - sdiodev->settings->bus.sdio.oob_irq_supported) + if (sdiodev->settings->bus.sdio.oob_irq_supported) disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); brcmf_sdiod_freezer_off(sdiodev); -- cgit v1.2.3 From a0ff2a87194a968b9547fd4d824a09092171d1ea Mon Sep 17 00:00:00 2001 From: Jakob Koschel Date: Thu, 24 Mar 2022 08:21:24 +0100 Subject: rtlwifi: replace usage of found with dedicated list iterator variable To move the list iterator variable into the list_for_each_entry_*() macro in the future it should be avoided to use the list iterator variable after the loop body. To *never* use the list iterator variable after the loop it was concluded to use a separate iterator variable instead of a found boolean [1]. This removes the need to use a found variable and simply checking if the variable was set, can determine if the break/goto was hit. Link: https://lore.kernel.org/all/CAHk-=wgRr_D8CB-D9Kg-c=EHreAsk5SqXPwr9Y7k9sA6cWXJ6w@mail.gmail.com/ Signed-off-by: Jakob Koschel Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220324072124.62458-1-jakobkoschel@gmail.com --- drivers/net/wireless/realtek/rtlwifi/base.c | 13 ++++++------- drivers/net/wireless/realtek/rtlwifi/pci.c | 15 +++++++-------- 2 files changed, 13 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index ffd150ec181f..a7ef84f55939 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1994,8 +1994,7 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); unsigned long flags; - struct rtl_bssid_entry *entry; - bool entry_found = false; + struct rtl_bssid_entry *entry = NULL, *iter; /* check if it is scanning */ if (!mac->act_scanning) @@ -2008,10 +2007,10 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb) spin_lock_irqsave(&rtlpriv->locks.scan_list_lock, flags); - list_for_each_entry(entry, &rtlpriv->scan_list.list, list) { - if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) { - list_del_init(&entry->list); - entry_found = true; + list_for_each_entry(iter, &rtlpriv->scan_list.list, list) { + if (memcmp(iter->bssid, hdr->addr3, ETH_ALEN) == 0) { + list_del_init(&iter->list); + entry = iter; rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "Update BSSID=%pM to scan list (total=%d)\n", hdr->addr3, rtlpriv->scan_list.num); @@ -2019,7 +2018,7 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb) } } - if (!entry_found) { + if (!entry) { entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index ad327bae754b..8e4c15654746 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -323,14 +323,13 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - bool find_buddy_priv = false; - struct rtl_priv *tpriv; + struct rtl_priv *tpriv = NULL, *iter; struct rtl_pci_priv *tpcipriv = NULL; if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) { - list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list, + list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list, list) { - tpcipriv = (struct rtl_pci_priv *)tpriv->priv; + tpcipriv = (struct rtl_pci_priv *)iter->priv; rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "pcipriv->ndis_adapter.funcnumber %x\n", pcipriv->ndis_adapter.funcnumber); @@ -344,19 +343,19 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw, tpcipriv->ndis_adapter.devnumber && pcipriv->ndis_adapter.funcnumber != tpcipriv->ndis_adapter.funcnumber) { - find_buddy_priv = true; + tpriv = iter; break; } } } rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "find_buddy_priv %d\n", find_buddy_priv); + "find_buddy_priv %d\n", tpriv != NULL); - if (find_buddy_priv) + if (tpriv) *buddy_priv = tpriv; - return find_buddy_priv; + return tpriv != NULL; } static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw) -- cgit v1.2.3 From 21338c5bdeb969bfb2d78bd06a71a40b9a82a51e Mon Sep 17 00:00:00 2001 From: Chris Chiu Date: Fri, 25 Mar 2022 11:57:34 +0800 Subject: rtl8xxxu: feed antenna information for cfg80211 Fill up the available TX/RX antenna so the iw commands can show correct antenna information for different chips. Signed-off-by: Chris Chiu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325035735.4745-2-chris.chiu@canonical.com --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 06d59ffb7444..d225a1257530 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1607,6 +1607,7 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv) static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; + struct ieee80211_hw *hw = priv->hw; u32 val32, bonding; u16 val16; @@ -1684,6 +1685,9 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->has_wifi = 1; } + hw->wiphy->available_antennas_tx = BIT(priv->tx_paths) - 1; + hw->wiphy->available_antennas_rx = BIT(priv->rx_paths) - 1; + switch (priv->rtl_chip) { case RTL8188E: case RTL8192E: @@ -4282,6 +4286,17 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv, rtl8xxxu_debug = tmp_debug; } +static +int rtl8xxxu_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +{ + struct rtl8xxxu_priv *priv = hw->priv; + + *tx_ant = BIT(priv->tx_paths) - 1; + *rx_ant = BIT(priv->rx_paths) - 1; + + return 0; +} + static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac) { @@ -6472,6 +6487,7 @@ static const struct ieee80211_ops rtl8xxxu_ops = { .set_key = rtl8xxxu_set_key, .ampdu_action = rtl8xxxu_ampdu_action, .sta_statistics = rtl8xxxu_sta_statistics, + .get_antenna = rtl8xxxu_get_antenna, }; static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv, -- cgit v1.2.3 From bd917b3d28c9815e2c55b9ff16680fdc2ba28d68 Mon Sep 17 00:00:00 2001 From: Chris Chiu Date: Fri, 25 Mar 2022 11:57:35 +0800 Subject: rtl8xxxu: fill up txrate info for gen1 chips RTL8188CUS/RTL8192CU(gen1) don't support rate adaptive report hence no real txrate info can be retrieved. The vendor driver reports the highest rate in HT capabilities from the IEs to avoid empty txrate. This commit initiates the txrate information with the highest supported rate negotiated with AP. The gen2 chip keeps update the txrate from the rate adaptive reports, and gen1 chips at least have non-NULL txrate after associated. Signed-off-by: Chris Chiu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220325035735.4745-3-chris.chiu@canonical.com --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 88 +++++++++++++++------- 1 file changed, 59 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index d225a1257530..6d9b5cf01b11 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4473,6 +4473,35 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) priv->rx_buf_aggregation = 1; } +static const struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = { + {.bitrate = 10, .hw_value = 0x00,}, + {.bitrate = 20, .hw_value = 0x01,}, + {.bitrate = 55, .hw_value = 0x02,}, + {.bitrate = 110, .hw_value = 0x03,}, + {.bitrate = 60, .hw_value = 0x04,}, + {.bitrate = 90, .hw_value = 0x05,}, + {.bitrate = 120, .hw_value = 0x06,}, + {.bitrate = 180, .hw_value = 0x07,}, + {.bitrate = 240, .hw_value = 0x08,}, + {.bitrate = 360, .hw_value = 0x09,}, + {.bitrate = 480, .hw_value = 0x0a,}, + {.bitrate = 540, .hw_value = 0x0b,}, +}; + +static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss) +{ + if (rate <= DESC_RATE_54M) + return; + + if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) { + if (rate < DESC_RATE_MCS8) + *nss = 1; + else + *nss = 2; + *mcs = rate - DESC_RATE_MCS0; + } +} + static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg) { struct ieee80211_hw *hw = priv->hw; @@ -4534,9 +4563,12 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct rtl8xxxu_priv *priv = hw->priv; struct device *dev = &priv->udev->dev; struct ieee80211_sta *sta; + struct rtl8xxxu_ra_report *rarpt; u32 val32; u8 val8; + rarpt = &priv->ra_report; + if (changed & BSS_CHANGED_ASSOC) { dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc); @@ -4545,6 +4577,10 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (bss_conf->assoc) { u32 ramask; int sgi = 0; + u8 highest_rate; + u8 mcs = 0, nss = 0; + u32 bit_rate; + rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); @@ -4569,6 +4605,29 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, sgi = 1; rcu_read_unlock(); + highest_rate = fls(ramask) - 1; + if (highest_rate < DESC_RATE_MCS0) { + rarpt->txrate.legacy = + rtl8xxxu_legacy_ratetable[highest_rate].bitrate; + } else { + rtl8xxxu_desc_to_mcsrate(highest_rate, + &mcs, &nss); + rarpt->txrate.flags |= RATE_INFO_FLAGS_MCS; + + rarpt->txrate.mcs = mcs; + rarpt->txrate.nss = nss; + + if (sgi) { + rarpt->txrate.flags |= + RATE_INFO_FLAGS_SHORT_GI; + } + + rarpt->txrate.bw |= RATE_INFO_BW_20; + } + bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate); + rarpt->bit_rate = bit_rate; + rarpt->desc_rate = highest_rate; + priv->vif = vif; priv->rssi_level = RTL8XXXU_RATR_STA_INIT; @@ -5419,35 +5478,6 @@ void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv) } } -static struct ieee80211_rate rtl8xxxu_legacy_ratetable[] = { - {.bitrate = 10, .hw_value = 0x00,}, - {.bitrate = 20, .hw_value = 0x01,}, - {.bitrate = 55, .hw_value = 0x02,}, - {.bitrate = 110, .hw_value = 0x03,}, - {.bitrate = 60, .hw_value = 0x04,}, - {.bitrate = 90, .hw_value = 0x05,}, - {.bitrate = 120, .hw_value = 0x06,}, - {.bitrate = 180, .hw_value = 0x07,}, - {.bitrate = 240, .hw_value = 0x08,}, - {.bitrate = 360, .hw_value = 0x09,}, - {.bitrate = 480, .hw_value = 0x0a,}, - {.bitrate = 540, .hw_value = 0x0b,}, -}; - -static void rtl8xxxu_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss) -{ - if (rate <= DESC_RATE_54M) - return; - - if (rate >= DESC_RATE_MCS0 && rate <= DESC_RATE_MCS15) { - if (rate < DESC_RATE_MCS8) - *nss = 1; - else - *nss = 2; - *mcs = rate - DESC_RATE_MCS0; - } -} - static void rtl8xxxu_c2hcmd_callback(struct work_struct *work) { struct rtl8xxxu_priv *priv; -- cgit v1.2.3 From 3f6b867559b3d43a7ce1b4799b755e812fc0d503 Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Fri, 25 Mar 2022 18:17:13 +0800 Subject: b43legacy: Fix assigning negative value to unsigned variable fix warning reported by smatch: drivers/net/wireless/broadcom/b43legacy/phy.c:1181 b43legacy_phy_lo_b_measure() warn: assigning (-772) to unsigned variable 'fval' Signed-off-by: Haowen Bai Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1648203433-8736-1-git-send-email-baihaowen@meizu.com --- drivers/net/wireless/broadcom/b43legacy/phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c index 05404fbd1e70..c1395e622759 100644 --- a/drivers/net/wireless/broadcom/b43legacy/phy.c +++ b/drivers/net/wireless/broadcom/b43legacy/phy.c @@ -1123,7 +1123,7 @@ void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev) struct b43legacy_phy *phy = &dev->phy; u16 regstack[12] = { 0 }; u16 mls; - u16 fval; + s16 fval; int i; int j; -- cgit v1.2.3 From 11800d893b38e0e12d636c170c1abc19c43c730c Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Fri, 25 Mar 2022 18:15:15 +0800 Subject: b43: Fix assigning negative value to unsigned variable fix warning reported by smatch: drivers/net/wireless/broadcom/b43/phy_n.c:585 b43_nphy_adjust_lna_gain_table() warn: assigning (-2) to unsigned variable '*(lna_gain[0])' Signed-off-by: Haowen Bai Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1648203315-28093-1-git-send-email-baihaowen@meizu.com --- drivers/net/wireless/broadcom/b43/phy_n.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index cf3ccf4ddfe7..aa5c99465674 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -582,7 +582,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) u16 data[4]; s16 gain[2]; u16 minmax[2]; - static const u16 lna_gain[4] = { -2, 10, 19, 25 }; + static const s16 lna_gain[4] = { -2, 10, 19, 25 }; if (nphy->hang_avoid) b43_nphy_stay_in_carrier_search(dev, 1); -- cgit v1.2.3 From e8366bbabe1d207cf7c5b11ae50e223ae6fc278b Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Fri, 1 Apr 2022 15:10:54 +0800 Subject: ipw2x00: Fix potential NULL dereference in libipw_xmit() crypt and crypt->ops could be null, so we need to checking null before dereference Signed-off-by: Haowen Bai Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1648797055-25730-1-git-send-email-baihaowen@meizu.com --- drivers/net/wireless/intel/ipw2x00/libipw_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c index 36d1e6b2568d..4aec1fce1ae2 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c @@ -383,7 +383,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) /* Each fragment may need to have room for encryption * pre/postfix */ - if (host_encrypt) + if (host_encrypt && crypt && crypt->ops) bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_mpdu_postfix_len; -- cgit v1.2.3 From 3223e922ccf8b5c3dd0b05faeaba407655ee0774 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 2 Apr 2022 12:10:37 +0200 Subject: orinoco: Prepare cleanup of powerpc's asm/prom.h powerpc's asm/prom.h brings some headers that it doesn't need itself. In order to clean it up, first add missing headers in users of asm/prom.h Signed-off-by: Christophe Leroy Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/4e3bfd4ffe2ed6b713ddd99b69dcc3d96adffe34.1648833427.git.christophe.leroy@csgroup.eu --- drivers/net/wireless/intersil/orinoco/airport.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intersil/orinoco/airport.c b/drivers/net/wireless/intersil/orinoco/airport.c index 77e6c53040a3..a890bfa0d5cc 100644 --- a/drivers/net/wireless/intersil/orinoco/airport.c +++ b/drivers/net/wireless/intersil/orinoco/airport.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "orinoco.h" -- cgit v1.2.3 From 92bbf95df7683d3ab1ab1aa3aaa029c5c5870591 Mon Sep 17 00:00:00 2001 From: Meng Tang Date: Wed, 6 Apr 2022 09:54:44 +0800 Subject: ipw2x00: use DEVICE_ATTR_*() macro Use DEVICE_ATTR_*() helper instead of plain DEVICE_ATTR, which makes the code a bit shorter and easier to read. Signed-off-by: Meng Tang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220406015444.14408-1-tangmeng@uniontech.com --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 64 +++++++------- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 119 +++++++++++++-------------- 2 files changed, 90 insertions(+), 93 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 2ace2b27ecad..5234511dac78 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -3501,7 +3501,7 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv) priv->msg_buffers = NULL; } -static ssize_t show_pci(struct device *d, struct device_attribute *attr, +static ssize_t pci_show(struct device *d, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(d); @@ -3521,34 +3521,34 @@ static ssize_t show_pci(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(pci, 0444, show_pci, NULL); +static DEVICE_ATTR_RO(pci); -static ssize_t show_cfg(struct device *d, struct device_attribute *attr, +static ssize_t cfg_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *p = dev_get_drvdata(d); return sprintf(buf, "0x%08x\n", (int)p->config); } -static DEVICE_ATTR(cfg, 0444, show_cfg, NULL); +static DEVICE_ATTR_RO(cfg); -static ssize_t show_status(struct device *d, struct device_attribute *attr, +static ssize_t status_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *p = dev_get_drvdata(d); return sprintf(buf, "0x%08x\n", (int)p->status); } -static DEVICE_ATTR(status, 0444, show_status, NULL); +static DEVICE_ATTR_RO(status); -static ssize_t show_capability(struct device *d, struct device_attribute *attr, +static ssize_t capability_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *p = dev_get_drvdata(d); return sprintf(buf, "0x%08x\n", (int)p->capability); } -static DEVICE_ATTR(capability, 0444, show_capability, NULL); +static DEVICE_ATTR_RO(capability); #define IPW2100_REG(x) { IPW_ ##x, #x } static const struct { @@ -3785,7 +3785,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"), IPW2100_ORD(NIC_MANF_DATE_TIME, "MANF Date/Time STAMP"), IPW2100_ORD(UCODE_VERSION, "Ucode Version"),}; -static ssize_t show_registers(struct device *d, struct device_attribute *attr, +static ssize_t registers_show(struct device *d, struct device_attribute *attr, char *buf) { int i; @@ -3805,9 +3805,9 @@ static ssize_t show_registers(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(registers, 0444, show_registers, NULL); +static DEVICE_ATTR_RO(registers); -static ssize_t show_hardware(struct device *d, struct device_attribute *attr, +static ssize_t hardware_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -3846,9 +3846,9 @@ static ssize_t show_hardware(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(hardware, 0444, show_hardware, NULL); +static DEVICE_ATTR_RO(hardware); -static ssize_t show_memory(struct device *d, struct device_attribute *attr, +static ssize_t memory_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -3905,7 +3905,7 @@ static ssize_t show_memory(struct device *d, struct device_attribute *attr, return len; } -static ssize_t store_memory(struct device *d, struct device_attribute *attr, +static ssize_t memory_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -3940,9 +3940,9 @@ static ssize_t store_memory(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(memory, 0644, show_memory, store_memory); +static DEVICE_ATTR_RW(memory); -static ssize_t show_ordinals(struct device *d, struct device_attribute *attr, +static ssize_t ordinals_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -3976,9 +3976,9 @@ static ssize_t show_ordinals(struct device *d, struct device_attribute *attr, return len; } -static DEVICE_ATTR(ordinals, 0444, show_ordinals, NULL); +static DEVICE_ATTR_RO(ordinals); -static ssize_t show_stats(struct device *d, struct device_attribute *attr, +static ssize_t stats_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -3997,7 +3997,7 @@ static ssize_t show_stats(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(stats, 0444, show_stats, NULL); +static DEVICE_ATTR_RO(stats); static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode) { @@ -4043,7 +4043,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode) return 0; } -static ssize_t show_internals(struct device *d, struct device_attribute *attr, +static ssize_t internals_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -4095,9 +4095,9 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr, return len; } -static DEVICE_ATTR(internals, 0444, show_internals, NULL); +static DEVICE_ATTR_RO(internals); -static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, +static ssize_t bssinfo_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -4140,7 +4140,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, return out - buf; } -static DEVICE_ATTR(bssinfo, 0444, show_bssinfo, NULL); +static DEVICE_ATTR_RO(bssinfo); #ifdef CONFIG_IPW2100_DEBUG static ssize_t debug_level_show(struct device_driver *d, char *buf) @@ -4165,7 +4165,7 @@ static ssize_t debug_level_store(struct device_driver *d, static DRIVER_ATTR_RW(debug_level); #endif /* CONFIG_IPW2100_DEBUG */ -static ssize_t show_fatal_error(struct device *d, +static ssize_t fatal_error_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -4190,7 +4190,7 @@ static ssize_t show_fatal_error(struct device *d, return out - buf; } -static ssize_t store_fatal_error(struct device *d, +static ssize_t fatal_error_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -4199,16 +4199,16 @@ static ssize_t store_fatal_error(struct device *d, return count; } -static DEVICE_ATTR(fatal_error, 0644, show_fatal_error, store_fatal_error); +static DEVICE_ATTR_RW(fatal_error); -static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, +static ssize_t scan_age_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); return sprintf(buf, "%d\n", priv->ieee->scan_age); } -static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, +static ssize_t scan_age_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -4232,9 +4232,9 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, return strnlen(buf, count); } -static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age); +static DEVICE_ATTR_RW(scan_age); -static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, +static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr, char *buf) { /* 0 - RF kill not enabled @@ -4278,7 +4278,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) return 1; } -static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, +static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw2100_priv *priv = dev_get_drvdata(d); @@ -4286,7 +4286,7 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill); +static DEVICE_ATTR_RW(rf_kill); static struct attribute *ipw2100_sysfs_entries[] = { &dev_attr_hardware.attr, diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 5727c7c00a28..ed343d4fb9d5 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -1259,7 +1259,7 @@ static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv) return error; } -static ssize_t show_event_log(struct device *d, +static ssize_t event_log_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1289,9 +1289,9 @@ static ssize_t show_event_log(struct device *d, return len; } -static DEVICE_ATTR(event_log, 0444, show_event_log, NULL); +static DEVICE_ATTR_RO(event_log); -static ssize_t show_error(struct device *d, +static ssize_t error_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1326,7 +1326,7 @@ static ssize_t show_error(struct device *d, return len; } -static ssize_t clear_error(struct device *d, +static ssize_t error_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1337,9 +1337,9 @@ static ssize_t clear_error(struct device *d, return count; } -static DEVICE_ATTR(error, 0644, show_error, clear_error); +static DEVICE_ATTR_RW(error); -static ssize_t show_cmd_log(struct device *d, +static ssize_t cmd_log_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1364,12 +1364,12 @@ static ssize_t show_cmd_log(struct device *d, return len; } -static DEVICE_ATTR(cmd_log, 0444, show_cmd_log, NULL); +static DEVICE_ATTR_RO(cmd_log); #ifdef CONFIG_IPW2200_PROMISCUOUS static void ipw_prom_free(struct ipw_priv *priv); static int ipw_prom_alloc(struct ipw_priv *priv); -static ssize_t store_rtap_iface(struct device *d, +static ssize_t rtap_iface_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1414,7 +1414,7 @@ static ssize_t store_rtap_iface(struct device *d, return count; } -static ssize_t show_rtap_iface(struct device *d, +static ssize_t rtap_iface_show(struct device *d, struct device_attribute *attr, char *buf) { @@ -1429,9 +1429,9 @@ static ssize_t show_rtap_iface(struct device *d, } } -static DEVICE_ATTR(rtap_iface, 0600, show_rtap_iface, store_rtap_iface); +static DEVICE_ATTR_ADMIN_RW(rtap_iface); -static ssize_t store_rtap_filter(struct device *d, +static ssize_t rtap_filter_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1451,7 +1451,7 @@ static ssize_t store_rtap_filter(struct device *d, return count; } -static ssize_t show_rtap_filter(struct device *d, +static ssize_t rtap_filter_show(struct device *d, struct device_attribute *attr, char *buf) { @@ -1460,17 +1460,17 @@ static ssize_t show_rtap_filter(struct device *d, priv->prom_priv ? priv->prom_priv->filter : 0); } -static DEVICE_ATTR(rtap_filter, 0600, show_rtap_filter, store_rtap_filter); +static DEVICE_ATTR_ADMIN_RW(rtap_filter); #endif -static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, +static ssize_t scan_age_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *priv = dev_get_drvdata(d); return sprintf(buf, "%d\n", priv->ieee->scan_age); } -static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, +static ssize_t scan_age_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1504,16 +1504,16 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, return len; } -static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age); +static DEVICE_ATTR_RW(scan_age); -static ssize_t show_led(struct device *d, struct device_attribute *attr, +static ssize_t led_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *priv = dev_get_drvdata(d); return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1); } -static ssize_t store_led(struct device *d, struct device_attribute *attr, +static ssize_t led_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1537,36 +1537,36 @@ static ssize_t store_led(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(led, 0644, show_led, store_led); +static DEVICE_ATTR_RW(led); -static ssize_t show_status(struct device *d, +static ssize_t status_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *p = dev_get_drvdata(d); return sprintf(buf, "0x%08x\n", (int)p->status); } -static DEVICE_ATTR(status, 0444, show_status, NULL); +static DEVICE_ATTR_RO(status); -static ssize_t show_cfg(struct device *d, struct device_attribute *attr, +static ssize_t cfg_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *p = dev_get_drvdata(d); return sprintf(buf, "0x%08x\n", (int)p->config); } -static DEVICE_ATTR(cfg, 0444, show_cfg, NULL); +static DEVICE_ATTR_RO(cfg); -static ssize_t show_nic_type(struct device *d, +static ssize_t nic_type_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *priv = dev_get_drvdata(d); return sprintf(buf, "TYPE: %d\n", priv->nic_type); } -static DEVICE_ATTR(nic_type, 0444, show_nic_type, NULL); +static DEVICE_ATTR_RO(nic_type); -static ssize_t show_ucode_version(struct device *d, +static ssize_t ucode_version_show(struct device *d, struct device_attribute *attr, char *buf) { u32 len = sizeof(u32), tmp = 0; @@ -1578,9 +1578,9 @@ static ssize_t show_ucode_version(struct device *d, return sprintf(buf, "0x%08x\n", tmp); } -static DEVICE_ATTR(ucode_version, 0644, show_ucode_version, NULL); +static DEVICE_ATTR_RO(ucode_version); -static ssize_t show_rtc(struct device *d, struct device_attribute *attr, +static ssize_t rtc_show(struct device *d, struct device_attribute *attr, char *buf) { u32 len = sizeof(u32), tmp = 0; @@ -1592,20 +1592,20 @@ static ssize_t show_rtc(struct device *d, struct device_attribute *attr, return sprintf(buf, "0x%08x\n", tmp); } -static DEVICE_ATTR(rtc, 0644, show_rtc, NULL); +static DEVICE_ATTR_RO(rtc); /* * Add a device attribute to view/control the delay between eeprom * operations. */ -static ssize_t show_eeprom_delay(struct device *d, +static ssize_t eeprom_delay_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *p = dev_get_drvdata(d); int n = p->eeprom_delay; return sprintf(buf, "%i\n", n); } -static ssize_t store_eeprom_delay(struct device *d, +static ssize_t eeprom_delay_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1614,9 +1614,9 @@ static ssize_t store_eeprom_delay(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(eeprom_delay, 0644, show_eeprom_delay, store_eeprom_delay); +static DEVICE_ATTR_RW(eeprom_delay); -static ssize_t show_command_event_reg(struct device *d, +static ssize_t command_event_reg_show(struct device *d, struct device_attribute *attr, char *buf) { u32 reg = 0; @@ -1625,7 +1625,7 @@ static ssize_t show_command_event_reg(struct device *d, reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT); return sprintf(buf, "0x%08x\n", reg); } -static ssize_t store_command_event_reg(struct device *d, +static ssize_t command_event_reg_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1637,10 +1637,9 @@ static ssize_t store_command_event_reg(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(command_event_reg, 0644, - show_command_event_reg, store_command_event_reg); +static DEVICE_ATTR_RW(command_event_reg); -static ssize_t show_mem_gpio_reg(struct device *d, +static ssize_t mem_gpio_reg_show(struct device *d, struct device_attribute *attr, char *buf) { u32 reg = 0; @@ -1649,7 +1648,7 @@ static ssize_t show_mem_gpio_reg(struct device *d, reg = ipw_read_reg32(p, 0x301100); return sprintf(buf, "0x%08x\n", reg); } -static ssize_t store_mem_gpio_reg(struct device *d, +static ssize_t mem_gpio_reg_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1661,9 +1660,9 @@ static ssize_t store_mem_gpio_reg(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(mem_gpio_reg, 0644, show_mem_gpio_reg, store_mem_gpio_reg); +static DEVICE_ATTR_RW(mem_gpio_reg); -static ssize_t show_indirect_dword(struct device *d, +static ssize_t indirect_dword_show(struct device *d, struct device_attribute *attr, char *buf) { u32 reg = 0; @@ -1676,7 +1675,7 @@ static ssize_t show_indirect_dword(struct device *d, return sprintf(buf, "0x%08x\n", reg); } -static ssize_t store_indirect_dword(struct device *d, +static ssize_t indirect_dword_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1687,10 +1686,9 @@ static ssize_t store_indirect_dword(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(indirect_dword, 0644, - show_indirect_dword, store_indirect_dword); +static DEVICE_ATTR_RW(indirect_dword); -static ssize_t show_indirect_byte(struct device *d, +static ssize_t indirect_byte_show(struct device *d, struct device_attribute *attr, char *buf) { u8 reg = 0; @@ -1703,7 +1701,7 @@ static ssize_t show_indirect_byte(struct device *d, return sprintf(buf, "0x%02x\n", reg); } -static ssize_t store_indirect_byte(struct device *d, +static ssize_t indirect_byte_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1714,10 +1712,9 @@ static ssize_t store_indirect_byte(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(indirect_byte, 0644, - show_indirect_byte, store_indirect_byte); +static DEVICE_ATTR_RW(indirect_byte); -static ssize_t show_direct_dword(struct device *d, +static ssize_t direct_dword_show(struct device *d, struct device_attribute *attr, char *buf) { u32 reg = 0; @@ -1730,7 +1727,7 @@ static ssize_t show_direct_dword(struct device *d, return sprintf(buf, "0x%08x\n", reg); } -static ssize_t store_direct_dword(struct device *d, +static ssize_t direct_dword_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -1741,7 +1738,7 @@ static ssize_t store_direct_dword(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(direct_dword, 0644, show_direct_dword, store_direct_dword); +static DEVICE_ATTR_RW(direct_dword); static int rf_kill_active(struct ipw_priv *priv) { @@ -1756,7 +1753,7 @@ static int rf_kill_active(struct ipw_priv *priv) return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0; } -static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, +static ssize_t rf_kill_show(struct device *d, struct device_attribute *attr, char *buf) { /* 0 - RF kill not enabled @@ -1802,7 +1799,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) return 1; } -static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, +static ssize_t rf_kill_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1812,9 +1809,9 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill); +static DEVICE_ATTR_RW(rf_kill); -static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr, +static ssize_t speed_scan_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1829,7 +1826,7 @@ static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr, return sprintf(buf, "0\n"); } -static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, +static ssize_t speed_scan_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1865,16 +1862,16 @@ static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(speed_scan, 0644, show_speed_scan, store_speed_scan); +static DEVICE_ATTR_RW(speed_scan); -static ssize_t show_net_stats(struct device *d, struct device_attribute *attr, +static ssize_t net_stats_show(struct device *d, struct device_attribute *attr, char *buf) { struct ipw_priv *priv = dev_get_drvdata(d); return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0'); } -static ssize_t store_net_stats(struct device *d, struct device_attribute *attr, +static ssize_t net_stats_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw_priv *priv = dev_get_drvdata(d); @@ -1886,9 +1883,9 @@ static ssize_t store_net_stats(struct device *d, struct device_attribute *attr, return count; } -static DEVICE_ATTR(net_stats, 0644, show_net_stats, store_net_stats); +static DEVICE_ATTR_RW(net_stats); -static ssize_t show_channels(struct device *d, +static ssize_t channels_show(struct device *d, struct device_attribute *attr, char *buf) { @@ -1932,7 +1929,7 @@ static ssize_t show_channels(struct device *d, return len; } -static DEVICE_ATTR(channels, 0400, show_channels, NULL); +static DEVICE_ATTR_ADMIN_RO(channels); static void notify_wx_assoc_event(struct ipw_priv *priv) { -- cgit v1.2.3 From f4b41f062c424209e3939a81e6da022e049a45f2 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Mon, 4 Apr 2022 18:30:22 +0200 Subject: net: remove noblock parameter from skb_recv_datagram() skb_recv_datagram() has two parameters 'flags' and 'noblock' that are merged inside skb_recv_datagram() by 'flags | (noblock ? MSG_DONTWAIT : 0)' As 'flags' may contain MSG_DONTWAIT as value most callers split the 'flags' into 'flags' and 'noblock' with finally obsolete bit operations like this: skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); And this is not even done consistently with the 'flags' parameter. This patch removes the obsolete and costly splitting into two parameters and only performs bit operations when really needed on the caller side. One missing conversion thankfully reported by kernel test robot. I missed to enable kunit tests to build the mctp code. Reported-by: kernel test robot Signed-off-by: Oliver Hartkopp Signed-off-by: David S. Miller --- drivers/isdn/mISDN/socket.c | 2 +- drivers/net/ppp/pppoe.c | 3 +-- include/linux/skbuff.h | 3 +-- net/appletalk/ddp.c | 3 +-- net/atm/common.c | 2 +- net/ax25/af_ax25.c | 3 +-- net/bluetooth/af_bluetooth.c | 3 +-- net/bluetooth/hci_sock.c | 3 +-- net/caif/caif_socket.c | 2 +- net/can/bcm.c | 5 +---- net/can/isotp.c | 4 +--- net/can/j1939/socket.c | 2 +- net/can/raw.c | 6 +----- net/core/datagram.c | 5 ++--- net/ieee802154/socket.c | 6 ++++-- net/ipv4/ping.c | 3 ++- net/ipv4/raw.c | 3 ++- net/ipv6/raw.c | 3 ++- net/iucv/af_iucv.c | 3 +-- net/key/af_key.c | 2 +- net/l2tp/l2tp_ip.c | 3 ++- net/l2tp/l2tp_ip6.c | 3 ++- net/l2tp/l2tp_ppp.c | 3 +-- net/mctp/af_mctp.c | 2 +- net/mctp/test/route-test.c | 8 ++++---- net/netlink/af_netlink.c | 3 +-- net/netrom/af_netrom.c | 3 ++- net/nfc/llcp_sock.c | 3 +-- net/nfc/rawsock.c | 3 +-- net/packet/af_packet.c | 2 +- net/phonet/datagram.c | 3 ++- net/phonet/pep.c | 6 ++++-- net/qrtr/af_qrtr.c | 3 +-- net/rose/af_rose.c | 3 ++- net/unix/af_unix.c | 5 +++-- net/vmw_vsock/vmci_transport.c | 5 +---- net/x25/af_x25.c | 3 +-- 37 files changed, 57 insertions(+), 70 deletions(-) (limited to 'drivers') diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index a6606736d8c5..2776ca5fc33f 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -121,7 +121,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (sk->sk_state == MISDN_CLOSED) return 0; - skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 3619520340b7..1b41cd9732d7 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1011,8 +1011,7 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, goto end; } - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &error); + skb = skb_recv_datagram(sk, flags, &error); if (error < 0) goto end; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3a30cae8b0a5..2394441fa3dd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3836,8 +3836,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, struct sk_buff *__skb_recv_datagram(struct sock *sk, struct sk_buff_head *sk_queue, unsigned int flags, int *off, int *err); -struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, - int *err); +struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err); __poll_t datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int skb_copy_datagram_iter(const struct sk_buff *from, int offset, diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index bf5736c1d458..a06f4d4a6f47 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1753,8 +1753,7 @@ static int atalk_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int err = 0; struct sk_buff *skb; - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &err); + skb = skb_recv_datagram(sk, flags, &err); lock_sock(sk); if (!skb) diff --git a/net/atm/common.c b/net/atm/common.c index 1cfa9bf1d187..d0c8ab7ff8f6 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -540,7 +540,7 @@ int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, !test_bit(ATM_VF_READY, &vcc->flags)) return 0; - skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); + skb = skb_recv_datagram(sk, flags, &error); if (!skb) return error; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 363d47f94532..116481e4da82 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1669,8 +1669,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, } /* Now we can treat all alike */ - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &err); + skb = skb_recv_datagram(sk, flags, &err); if (skb == NULL) goto out; diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index a0cb2e3da8d4..62705734343b 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -251,7 +251,6 @@ EXPORT_SYMBOL(bt_accept_dequeue); int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; @@ -263,7 +262,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (flags & MSG_OOB) return -EOPNOTSUPP; - skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 33b3c0ffc339..189e3115c8c6 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1453,7 +1453,6 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; @@ -1470,7 +1469,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, if (sk->sk_state == BT_CLOSED) return 0; - skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 2b8892d502f7..251e666ba9a2 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -282,7 +282,7 @@ static int caif_seqpkt_recvmsg(struct socket *sock, struct msghdr *m, if (flags & MSG_OOB) goto read_error; - skb = skb_recv_datagram(sk, flags, 0 , &ret); + skb = skb_recv_datagram(sk, flags, &ret); if (!skb) goto read_error; copylen = skb->len; diff --git a/net/can/bcm.c b/net/can/bcm.c index 95d209b52e6a..64c07e650bb4 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1632,12 +1632,9 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; - int noblock; int err; - noblock = flags & MSG_DONTWAIT; - flags &= ~MSG_DONTWAIT; - skb = skb_recv_datagram(sk, flags, noblock, &error); + skb = skb_recv_datagram(sk, flags, &error); if (!skb) return error; diff --git a/net/can/isotp.c b/net/can/isotp.c index bafb0fb5f0e0..02d81effaa54 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1047,7 +1047,6 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, struct sock *sk = sock->sk; struct sk_buff *skb; struct isotp_sock *so = isotp_sk(sk); - int noblock = flags & MSG_DONTWAIT; int ret = 0; if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK)) @@ -1056,8 +1055,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, if (!so->bound) return -EADDRNOTAVAIL; - flags &= ~MSG_DONTWAIT; - skb = skb_recv_datagram(sk, flags, noblock, &ret); + skb = skb_recv_datagram(sk, flags, &ret); if (!skb) return ret; diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index 6dff4510687a..0bb4fd3f6264 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -802,7 +802,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg, return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939, SCM_J1939_ERRQUEUE); - skb = skb_recv_datagram(sk, flags, 0, &ret); + skb = skb_recv_datagram(sk, flags, &ret); if (!skb) return ret; diff --git a/net/can/raw.c b/net/can/raw.c index 7105fa4824e4..0cf728dcff36 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -846,16 +846,12 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, struct sock *sk = sock->sk; struct sk_buff *skb; int err = 0; - int noblock; - - noblock = flags & MSG_DONTWAIT; - flags &= ~MSG_DONTWAIT; if (flags & MSG_ERRQUEUE) return sock_recv_errqueue(sk, msg, size, SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE); - skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; diff --git a/net/core/datagram.c b/net/core/datagram.c index ee290776c661..70126d15ca6e 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -310,12 +310,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, EXPORT_SYMBOL(__skb_recv_datagram); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, - int noblock, int *err) + int *err) { int off = 0; - return __skb_recv_datagram(sk, &sk->sk_receive_queue, - flags | (noblock ? MSG_DONTWAIT : 0), + return __skb_recv_datagram(sk, &sk->sk_receive_queue, flags, &off, err); } EXPORT_SYMBOL(skb_recv_datagram); diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 3b2366a88c3c..a725dd9bbda8 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -314,7 +314,8 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int err = -EOPNOTSUPP; struct sk_buff *skb; - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; @@ -703,7 +704,8 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, struct dgram_sock *ro = dgram_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name); - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 3ee947557b88..550dc5c795c0 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -861,7 +861,8 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, if (flags & MSG_ERRQUEUE) return inet_recv_error(sk, msg, len, addr_len); - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 9f97b9cbf7b3..c9dd9603f2e7 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -769,7 +769,8 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, goto out; } - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c51d5ce3711c..8bb41f3b246a 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -477,7 +477,8 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len, addr_len); - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a1760add5bf1..a0385ddbffcf 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1223,7 +1223,6 @@ static void iucv_process_message_q(struct sock *sk) static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned int copied, rlen; @@ -1242,7 +1241,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, /* receive/dequeue next skb: * the function understands MSG_PEEK and, thus, does not dequeue skb */ - skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; diff --git a/net/key/af_key.c b/net/key/af_key.c index fd51db3be91c..d09ec26b1081 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3696,7 +3696,7 @@ static int pfkey_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) goto out; - skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); + skb = skb_recv_datagram(sk, flags, &err); if (skb == NULL) goto out; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index b3edafa5fba4..c6a5cc2d88e7 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -526,7 +526,8 @@ static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto out; - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 96f975777438..97fde8a9209b 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -671,7 +671,8 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len, addr_len); - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index bf35710127dd..8be1fdc68a0b 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -191,8 +191,7 @@ static int pppol2tp_recvmsg(struct socket *sock, struct msghdr *msg, goto end; err = 0; - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto end; diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index f0702d920d8d..5f204eb8abd2 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -196,7 +196,7 @@ static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK)) return -EOPNOTSUPP; - skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &rc); + skb = skb_recv_datagram(sk, flags, &rc); if (!skb) return rc; diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index 61205cf40074..24df29e135ed 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -352,7 +352,7 @@ static void mctp_test_route_input_sk(struct kunit *test) if (params->deliver) { KUNIT_EXPECT_EQ(test, rc, 0); - skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc); + skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2); KUNIT_EXPECT_EQ(test, skb->len, 1); @@ -360,7 +360,7 @@ static void mctp_test_route_input_sk(struct kunit *test) } else { KUNIT_EXPECT_NE(test, rc, 0); - skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc); + skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); KUNIT_EXPECT_PTR_EQ(test, skb2, NULL); } @@ -423,7 +423,7 @@ static void mctp_test_route_input_sk_reasm(struct kunit *test) rc = mctp_route_input(&rt->rt, skb); } - skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc); + skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); if (params->rx_len) { KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2); @@ -582,7 +582,7 @@ static void mctp_test_route_input_sk_keys(struct kunit *test) rc = mctp_route_input(&rt->rt, skb); /* (potentially) receive message */ - skb2 = skb_recv_datagram(sock->sk, 0, 1, &rc); + skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); if (params->deliver) KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 47a876ccd288..9fa85bb36c0e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1931,7 +1931,6 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, struct scm_cookie scm; struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); - int noblock = flags & MSG_DONTWAIT; size_t copied; struct sk_buff *skb, *data_skb; int err, ret; @@ -1941,7 +1940,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, copied = 0; - skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (skb == NULL) goto out; diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index fa9dc2ba3941..6f7f4392cffb 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1159,7 +1159,8 @@ static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, } /* Now we can treat all alike */ - if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) { + skb = skb_recv_datagram(sk, flags, &er); + if (!skb) { release_sock(sk); return er; } diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 4ca35791c93b..77642d18a3b4 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -821,7 +821,6 @@ static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg, static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; unsigned int copied, rlen; struct sk_buff *skb, *cskb; @@ -842,7 +841,7 @@ static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg, if (flags & (MSG_OOB)) return -EOPNOTSUPP; - skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) { pr_err("Recv datagram failed state %d %d %d", sk->sk_state, err, sock_error(sk)); diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 0ca214ab5aef..8dd569765f96 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -238,7 +238,6 @@ static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { - int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; @@ -246,7 +245,7 @@ static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); - skb = skb_recv_datagram(sk, flags, noblock, &rc); + skb = skb_recv_datagram(sk, flags, &rc); if (!skb) return rc; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c39c09899fd0..d3caaf4d4b3e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3421,7 +3421,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, * but then it will block. */ - skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); + skb = skb_recv_datagram(sk, flags, &err); /* * An error occurred so return it. Because skb_recv_datagram() diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index 393e6aa7a592..3f2e62b63dd4 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c @@ -123,7 +123,8 @@ static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, MSG_CMSG_COMPAT)) goto out_nofree; - skb = skb_recv_datagram(sk, flags, noblock, &rval); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &rval); if (skb == NULL) goto out_nofree; diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 65d463ad8770..441a26706592 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -772,7 +772,8 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp, u8 pipe_handle, enabled, n_sb; u8 aligned = 0; - skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp); + skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0, + errp); if (!skb) return NULL; @@ -1267,7 +1268,8 @@ static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return -EINVAL; } - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags |= (noblock ? MSG_DONTWAIT : 0); + skb = skb_recv_datagram(sk, flags, &err); lock_sock(sk); if (skb == NULL) { if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT) diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c index ec2322529727..5c2fb992803b 100644 --- a/net/qrtr/af_qrtr.c +++ b/net/qrtr/af_qrtr.c @@ -1035,8 +1035,7 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, return -EADDRNOTAVAIL; } - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &rc); + skb = skb_recv_datagram(sk, flags, &rc); if (!skb) { release_sock(sk); return rc; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 30a1cf4c16c6..bf2d986a6bc3 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1230,7 +1230,8 @@ static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, return -ENOTCONN; /* Now we can treat all alike */ - if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) + skb = skb_recv_datagram(sk, flags, &er); + if (!skb) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e71a312faa1e..fecbd95da918 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1643,7 +1643,8 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags, * so that no locks are necessary. */ - skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err); + skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0, + &err); if (!skb) { /* This means receive shutdown. */ if (err == 0) @@ -2500,7 +2501,7 @@ static int unix_read_sock(struct sock *sk, read_descriptor_t *desc, int used, err; mutex_lock(&u->iolock); - skb = skb_recv_datagram(sk, 0, 1, &err); + skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); mutex_unlock(&u->iolock); if (!skb) return err; diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index b17dc9745188..b14f0ed7427b 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1732,19 +1732,16 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk, int flags) { int err; - int noblock; struct vmci_datagram *dg; size_t payload_len; struct sk_buff *skb; - noblock = flags & MSG_DONTWAIT; - if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; - skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); + skb = skb_recv_datagram(&vsk->sk, flags, &err); if (!skb) return err; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 3a171828638b..6bc2ac8d8146 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1315,8 +1315,7 @@ static int x25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, } else { /* Now we can treat all alike */ release_sock(sk); - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, - flags & MSG_DONTWAIT, &rc); + skb = skb_recv_datagram(sk, flags, &rc); lock_sock(sk); if (!skb) goto out; -- cgit v1.2.3 From bb2a1934ca01d32ab7d8d109e574682199725afd Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Tue, 5 Apr 2022 14:09:51 +0200 Subject: net: phy: mscc-miim: add support to set MDIO bus frequency Until now, the MDIO bus will have the hardware default bus frequency. Read the desired frequency of the bus from the device tree and configure it. Signed-off-by: Michael Walle Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/mdio/mdio-mscc-miim.c | 58 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index c483ba67c21f..c3d1a7eaec41 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -30,6 +31,8 @@ #define MSCC_MIIM_CMD_VLD BIT(31) #define MSCC_MIIM_REG_DATA 0xC #define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17)) +#define MSCC_MIIM_REG_CFG 0x10 +#define MSCC_MIIM_CFG_PRESCALE_MASK GENMASK(7, 0) #define MSCC_PHY_REG_PHY_CFG 0x0 #define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3)) @@ -50,6 +53,8 @@ struct mscc_miim_dev { int mii_status_offset; struct regmap *phy_regs; const struct mscc_miim_info *info; + struct clk *clk; + u32 bus_freq; }; /* When high resolution timers aren't built-in: we can't use usleep_range() as @@ -235,9 +240,32 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name, } EXPORT_SYMBOL(mscc_miim_setup); +static int mscc_miim_clk_set(struct mii_bus *bus) +{ + struct mscc_miim_dev *miim = bus->priv; + unsigned long rate; + u32 div; + + /* Keep the current settings */ + if (!miim->bus_freq) + return 0; + + rate = clk_get_rate(miim->clk); + + div = DIV_ROUND_UP(rate, 2 * miim->bus_freq) - 1; + if (div == 0 || div & ~MSCC_MIIM_CFG_PRESCALE_MASK) { + dev_err(&bus->dev, "Incorrect MDIO clock frequency\n"); + return -EINVAL; + } + + return regmap_update_bits(miim->regs, MSCC_MIIM_REG_CFG, + MSCC_MIIM_CFG_PRESCALE_MASK, div); +} + static int mscc_miim_probe(struct platform_device *pdev) { struct regmap *mii_regmap, *phy_regmap = NULL; + struct device_node *np = pdev->dev.of_node; void __iomem *regs, *phy_regs; struct mscc_miim_dev *miim; struct resource *res; @@ -288,21 +316,47 @@ static int mscc_miim_probe(struct platform_device *pdev) if (!miim->info) return -EINVAL; - ret = of_mdiobus_register(bus, pdev->dev.of_node); + miim->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(miim->clk)) + return PTR_ERR(miim->clk); + + of_property_read_u32(np, "clock-frequency", &miim->bus_freq); + + if (miim->bus_freq && !miim->clk) { + dev_err(&pdev->dev, + "cannot use clock-frequency without a clock\n"); + return -EINVAL; + } + + ret = clk_prepare_enable(miim->clk); + if (ret) + return ret; + + ret = mscc_miim_clk_set(bus); + if (ret) + goto out_disable_clk; + + ret = of_mdiobus_register(bus, np); if (ret < 0) { dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); - return ret; + goto out_disable_clk; } platform_set_drvdata(pdev, bus); return 0; + +out_disable_clk: + clk_disable_unprepare(miim->clk); + return ret; } static int mscc_miim_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); + struct mscc_miim_dev *miim = bus->priv; + clk_disable_unprepare(miim->clk); mdiobus_unregister(bus); return 0; -- cgit v1.2.3 From d776a57e4a284b33bc839687afe9c381f3577eee Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 5 Apr 2022 21:57:43 +0200 Subject: net: ethernet: mtk_eth_soc: add support for coherent DMA It improves performance by eliminating the need for a cache flush on rx and tx In preparation for supporting WED (Wireless Ethernet Dispatch), also add a function for disabling coherent DMA at runtime. Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 95 +++++++++++++++++++++-------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 9 +++ 2 files changed, 80 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index f02d07ec5ccb..70db217ed831 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -786,7 +787,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) dma_addr_t dma_addr; int i; - eth->scratch_ring = dma_alloc_coherent(eth->dev, + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, cnt * sizeof(struct mtk_tx_dma), ð->phy_scratch_ring, GFP_ATOMIC); @@ -798,10 +799,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) if (unlikely(!eth->scratch_head)) return -ENOMEM; - dma_addr = dma_map_single(eth->dev, + dma_addr = dma_map_single(eth->dma_dev, eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; phy_ring_tail = eth->phy_scratch_ring + @@ -855,26 +856,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, { if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { - dma_unmap_single(eth->dev, + dma_unmap_single(eth->dma_dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { - dma_unmap_page(eth->dev, + dma_unmap_page(eth->dma_dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } } else { if (dma_unmap_len(tx_buf, dma_len0)) { - dma_unmap_page(eth->dev, + dma_unmap_page(eth->dma_dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } if (dma_unmap_len(tx_buf, dma_len1)) { - dma_unmap_page(eth->dev, + dma_unmap_page(eth->dma_dev, dma_unmap_addr(tx_buf, dma_addr1), dma_unmap_len(tx_buf, dma_len1), DMA_TO_DEVICE); @@ -952,9 +953,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (skb_vlan_tag_present(skb)) txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); - mapped_addr = dma_map_single(eth->dev, skb->data, + mapped_addr = dma_map_single(eth->dma_dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) return -ENOMEM; WRITE_ONCE(itxd->txd1, mapped_addr); @@ -993,10 +994,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); - mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, + mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset, frag_map_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) goto err_dma; if (i == nr_frags - 1 && @@ -1274,18 +1275,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, netdev->stats.rx_dropped++; goto release_desc; } - dma_addr = dma_map_single(eth->dev, + dma_addr = dma_map_single(eth->dma_dev, new_data + NET_SKB_PAD + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) { skb_free_frag(new_data); netdev->stats.rx_dropped++; goto release_desc; } - dma_unmap_single(eth->dev, trxd.rxd1, + dma_unmap_single(eth->dma_dev, trxd.rxd1, ring->buf_size, DMA_FROM_DEVICE); /* receive data */ @@ -1558,7 +1559,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (!ring->buf) goto no_tx_mem; - ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, &ring->phys, GFP_ATOMIC); if (!ring->dma) goto no_tx_mem; @@ -1576,7 +1577,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) * descriptors in ring->dma_pdma. */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, + ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, &ring->phys_pdma, GFP_ATOMIC); if (!ring->dma_pdma) @@ -1635,7 +1636,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) } if (ring->dma) { - dma_free_coherent(eth->dev, + dma_free_coherent(eth->dma_dev, MTK_DMA_SIZE * sizeof(*ring->dma), ring->dma, ring->phys); @@ -1643,7 +1644,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) } if (ring->dma_pdma) { - dma_free_coherent(eth->dev, + dma_free_coherent(eth->dma_dev, MTK_DMA_SIZE * sizeof(*ring->dma_pdma), ring->dma_pdma, ring->phys_pdma); @@ -1688,18 +1689,18 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; } - ring->dma = dma_alloc_coherent(eth->dev, + ring->dma = dma_alloc_coherent(eth->dma_dev, rx_dma_size * sizeof(*ring->dma), &ring->phys, GFP_ATOMIC); if (!ring->dma) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { - dma_addr_t dma_addr = dma_map_single(eth->dev, + dma_addr_t dma_addr = dma_map_single(eth->dma_dev, ring->data[i] + NET_SKB_PAD + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; ring->dma[i].rxd1 = (unsigned int)dma_addr; @@ -1735,7 +1736,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) continue; if (!ring->dma[i].rxd1) continue; - dma_unmap_single(eth->dev, + dma_unmap_single(eth->dma_dev, ring->dma[i].rxd1, ring->buf_size, DMA_FROM_DEVICE); @@ -1746,7 +1747,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) } if (ring->dma) { - dma_free_coherent(eth->dev, + dma_free_coherent(eth->dma_dev, ring->dma_size * sizeof(*ring->dma), ring->dma, ring->phys); @@ -2099,7 +2100,7 @@ static void mtk_dma_free(struct mtk_eth *eth) if (eth->netdev[i]) netdev_reset_queue(eth->netdev[i]); if (eth->scratch_ring) { - dma_free_coherent(eth->dev, + dma_free_coherent(eth->dma_dev, MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), eth->scratch_ring, eth->phy_scratch_ring); @@ -2448,6 +2449,8 @@ static void mtk_dim_tx(struct work_struct *work) static int mtk_hw_init(struct mtk_eth *eth) { + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | + ETHSYS_DMA_AG_MAP_PPE; int i, val, ret; if (test_and_set_bit(MTK_HW_INIT, ð->state)) @@ -2460,6 +2463,10 @@ static int mtk_hw_init(struct mtk_eth *eth) if (ret) goto err_disable_pm; + if (eth->ethsys) + regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, + of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { ret = device_reset(eth->dev); if (ret) { @@ -3040,6 +3047,35 @@ free_netdev: return err; } +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) +{ + struct net_device *dev, *tmp; + LIST_HEAD(dev_list); + int i; + + rtnl_lock(); + + for (i = 0; i < MTK_MAC_COUNT; i++) { + dev = eth->netdev[i]; + + if (!dev || !(dev->flags & IFF_UP)) + continue; + + list_add_tail(&dev->close_list, &dev_list); + } + + dev_close_many(&dev_list, false); + + eth->dma_dev = dma_dev; + + list_for_each_entry_safe(dev, tmp, &dev_list, close_list) { + list_del_init(&dev->close_list); + dev_open(dev, NULL); + } + + rtnl_unlock(); +} + static int mtk_probe(struct platform_device *pdev) { struct device_node *mac_np; @@ -3053,6 +3089,7 @@ static int mtk_probe(struct platform_device *pdev) eth->soc = of_device_get_match_data(&pdev->dev); eth->dev = &pdev->dev; + eth->dma_dev = &pdev->dev; eth->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(eth->base)) return PTR_ERR(eth->base); @@ -3101,6 +3138,16 @@ static int mtk_probe(struct platform_device *pdev) } } + if (of_dma_is_coherent(pdev->dev.of_node)) { + struct regmap *cci; + + cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,cci-control"); + /* enable CPU/bus coherency */ + if (!IS_ERR(cci)) + regmap_write(cci, 0, 3); + } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), GFP_KERNEL); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index c9d42be314b5..e701544c4287 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -465,6 +465,12 @@ #define RSTCTRL_FE BIT(6) #define RSTCTRL_PPE BIT(31) +/* ethernet dma channel agent map */ +#define ETHSYS_DMA_AG_MAP 0x408 +#define ETHSYS_DMA_AG_MAP_PDMA BIT(0) +#define ETHSYS_DMA_AG_MAP_QDMA BIT(1) +#define ETHSYS_DMA_AG_MAP_PPE BIT(2) + /* SGMII subsystem config registers */ /* Register to auto-negotiation restart */ #define SGMSYS_PCS_CONTROL_1 0x0 @@ -882,6 +888,7 @@ struct mtk_sgmii { /* struct mtk_eth - This is the main datasructure for holding the state * of the driver * @dev: The device pointer + * @dev: The device pointer used for dma mapping/alloc * @base: The mapped register i/o base * @page_lock: Make sure that register operations are atomic * @tx_irq__lock: Make sure that IRQ register operations are atomic @@ -925,6 +932,7 @@ struct mtk_sgmii { struct mtk_eth { struct device *dev; + struct device *dma_dev; void __iomem *base; spinlock_t page_lock; spinlock_t tx_irq_lock; @@ -1023,6 +1031,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_eth_offload_init(struct mtk_eth *eth); int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data); +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); #endif /* MTK_ETH_H */ -- cgit v1.2.3 From 804775dfc2885e93a0a4b35db1914c2cc25172b5 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 5 Apr 2022 21:57:47 +0200 Subject: net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED) The Wireless Ethernet Dispatch subsystem on the MT7622 SoC can be configured to intercept and handle access to the DMA queues and PCIe interrupts for a MT7615/MT7915 wireless card. It can manage the internal WDMA (Wireless DMA) controller, which allows ethernet packets to be passed from the packet switch engine (PSE) to the wireless card, bypassing the CPU entirely. This can be used to implement hardware flow offloading from ethernet to WLAN. Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/Kconfig | 4 + drivers/net/ethernet/mediatek/Makefile | 5 + drivers/net/ethernet/mediatek/mtk_eth_soc.c | 17 + drivers/net/ethernet/mediatek/mtk_eth_soc.h | 3 + drivers/net/ethernet/mediatek/mtk_wed.c | 875 ++++++++++++++++++++++++ drivers/net/ethernet/mediatek/mtk_wed.h | 128 ++++ drivers/net/ethernet/mediatek/mtk_wed_debugfs.c | 175 +++++ drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 + drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++++ include/linux/soc/mediatek/mtk_wed.h | 131 ++++ 10 files changed, 1597 insertions(+) create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h create mode 100644 include/linux/soc/mediatek/mtk_wed.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 86d356b4388d..da4ec235d146 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK if NET_VENDOR_MEDIATEK +config NET_MEDIATEK_SOC_WED + depends on ARCH_MEDIATEK || COMPILE_TEST + def_bool NET_MEDIATEK_SOC != n + config NET_MEDIATEK_SOC tristate "MediaTek SoC Gigabit Ethernet support" depends on NET_DSA || !NET_DSA diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 79d4cdbbcbf5..45ba0970504a 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -5,4 +5,9 @@ obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o +ifdef CONFIG_DEBUG_FS +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o +endif +obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 70db217ed831..4d7c542d89fb 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -24,6 +24,7 @@ #include #include "mtk_eth_soc.h" +#include "mtk_wed.h" static int mtk_msg_level = -1; module_param_named(msg_level, mtk_msg_level, int, 0); @@ -3170,6 +3171,22 @@ static int mtk_probe(struct platform_device *pdev) } } + for (i = 0;; i++) { + struct device_node *np = of_parse_phandle(pdev->dev.of_node, + "mediatek,wed", i); + static const u32 wdma_regs[] = { + MTK_WDMA0_BASE, + MTK_WDMA1_BASE + }; + void __iomem *wdma; + + if (!np || i >= ARRAY_SIZE(wdma_regs)) + break; + + wdma = eth->base + wdma_regs[i]; + mtk_wed_add_hw(np, eth, wdma, i); + } + for (i = 0; i < 3; i++) { if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) eth->irq[i] = eth->irq[0]; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index e701544c4287..74661682fd92 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -295,6 +295,9 @@ #define MTK_GDM1_TX_GPCNT 0x2438 #define MTK_STAT_OFFSET 0x40 +#define MTK_WDMA0_BASE 0x2800 +#define MTK_WDMA1_BASE 0x2c00 + /* QDMA descriptor txd4 */ #define TX_DMA_CHKSUM (0x7 << 29) #define TX_DMA_TSO BIT(28) diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c new file mode 100644 index 000000000000..f0eacf819cd9 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -0,0 +1,875 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mtk_eth_soc.h" +#include "mtk_wed_regs.h" +#include "mtk_wed.h" +#include "mtk_ppe.h" + +#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) + +#define MTK_WED_PKT_SIZE 1900 +#define MTK_WED_BUF_SIZE 2048 +#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) + +#define MTK_WED_TX_RING_SIZE 2048 +#define MTK_WED_WDMA_RING_SIZE 1024 + +static struct mtk_wed_hw *hw_list[2]; +static DEFINE_MUTEX(hw_lock); + +static void +wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) +{ + regmap_update_bits(dev->hw->regs, reg, mask | val, val); +} + +static void +wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return wed_m32(dev, reg, 0, mask); +} + +static void +wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return wed_m32(dev, reg, mask, 0); +} + +static void +wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) +{ + wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); +} + +static void +wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + wdma_m32(dev, reg, 0, mask); +} + +static u32 +mtk_wed_read_reset(struct mtk_wed_device *dev) +{ + return wed_r32(dev, MTK_WED_RESET); +} + +static void +mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) +{ + u32 status; + + wed_w32(dev, MTK_WED_RESET, mask); + if (readx_poll_timeout(mtk_wed_read_reset, dev, status, + !(status & mask), 0, 1000)) + WARN_ON_ONCE(1); +} + +static struct mtk_wed_hw * +mtk_wed_assign(struct mtk_wed_device *dev) +{ + struct mtk_wed_hw *hw; + + hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; + if (!hw || hw->wed_dev) + return NULL; + + hw->wed_dev = dev; + return hw; +} + +static int +mtk_wed_buffer_alloc(struct mtk_wed_device *dev) +{ + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + void **page_list; + int token = dev->wlan.token_start; + int ring_size; + int n_pages; + int i, page_idx; + + ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + n_pages = ring_size / MTK_WED_BUF_PER_PAGE; + + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + dev->buf_ring.size = ring_size; + dev->buf_ring.pages = page_list; + + desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), + &desc_phys, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + dev->buf_ring.desc = desc; + dev->buf_ring.desc_phys = desc_phys; + + for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { + dma_addr_t page_phys, buf_phys; + struct page *page; + void *buf; + int s; + + page = __dev_alloc_pages(GFP_KERNEL, 0); + if (!page) + return -ENOMEM; + + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev->hw->dev, page_phys)) { + __free_page(page); + return -ENOMEM; + } + + page_list[page_idx++] = page; + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + buf = page_to_virt(page); + buf_phys = page_phys; + + for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { + u32 txd_size; + + txd_size = dev->wlan.init_buf(buf, buf_phys, token++); + + desc->buf0 = buf_phys; + desc->buf1 = buf_phys + txd_size; + desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, + txd_size) | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, + MTK_WED_BUF_SIZE - txd_size) | + MTK_WDMA_DESC_CTRL_LAST_SEG1; + desc->info = 0; + desc++; + + buf += MTK_WED_BUF_SIZE; + buf_phys += MTK_WED_BUF_SIZE; + } + + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static void +mtk_wed_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wdma_desc *desc = dev->buf_ring.desc; + void **page_list = dev->buf_ring.pages; + int page_idx; + int i; + + if (!page_list) + return; + + if (!desc) + goto free_pagelist; + + for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { + void *page = page_list[page_idx++]; + + if (!page) + break; + + dma_unmap_page(dev->hw->dev, desc[i].buf0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(page); + } + + dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), + desc, dev->buf_ring.desc_phys); + +free_pagelist: + kfree(page_list); +} + +static void +mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) +{ + if (!ring->desc) + return; + + dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc), + ring->desc, ring->desc_phys); +} + +static void +mtk_wed_free_tx_rings(struct mtk_wed_device *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) + mtk_wed_free_ring(dev, &dev->tx_ring[i]); + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) + mtk_wed_free_ring(dev, &dev->tx_wdma[i]); +} + +static void +mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) +{ + u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; + + if (!dev->hw->num_flows) + mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + + wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); + wed_r32(dev, MTK_WED_EXT_INT_MASK); +} + +static void +mtk_wed_stop(struct mtk_wed_device *dev) +{ + regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); + mtk_wed_set_ext_int(dev, false); + + wed_clr(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_EN | + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); + wdma_w32(dev, MTK_WDMA_INT_MASK, 0); + wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); + + wed_clr(dev, MTK_WED_GLO_CFG, + MTK_WED_GLO_CFG_TX_DMA_EN | + MTK_WED_GLO_CFG_RX_DMA_EN); + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); +} + +static void +mtk_wed_detach(struct mtk_wed_device *dev) +{ + struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node; + struct mtk_wed_hw *hw = dev->hw; + + mutex_lock(&hw_lock); + + mtk_wed_stop(dev); + + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + mtk_wed_reset(dev, MTK_WED_RESET_WED); + + mtk_wed_free_buffer(dev); + mtk_wed_free_tx_rings(dev); + + if (of_dma_is_coherent(wlan_node)) + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, + BIT(hw->index), BIT(hw->index)); + + if (!hw_list[!hw->index]->wed_dev && + hw->eth->dma_dev != hw->eth->dev) + mtk_eth_set_dma_device(hw->eth, hw->eth->dev); + + memset(dev, 0, sizeof(*dev)); + module_put(THIS_MODULE); + + hw->wed_dev = NULL; + mutex_unlock(&hw_lock); +} + +static void +mtk_wed_hw_init_early(struct mtk_wed_device *dev) +{ + u32 mask, set; + u32 offset; + + mtk_wed_stop(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); + + mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | + MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | + MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; + set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | + MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | + MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); + + wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES); + + offset = dev->hw->index ? 0x04000400 : 0; + wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); + wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); + + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index)); + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); +} + +static void +mtk_wed_hw_init(struct mtk_wed_device *dev) +{ + if (dev->init_done) + return; + + dev->init_done = true; + mtk_wed_set_ext_int(dev, false); + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); + + wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); + + wed_w32(dev, MTK_WED_TX_BM_TKID, + FIELD_PREP(MTK_WED_TX_BM_TKID_START, + dev->wlan.token_start) | + FIELD_PREP(MTK_WED_TX_BM_TKID_END, + dev->wlan.token_start + dev->wlan.nbuf - 1)); + + wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); + + wed_w32(dev, MTK_WED_TX_BM_DYN_THR, + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | + MTK_WED_TX_BM_DYN_THR_HI); + + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + + wed_set(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + + wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); +} + +static void +mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size) +{ + int i; + + for (i = 0; i < size; i++) { + desc[i].buf0 = 0; + desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc[i].buf1 = 0; + desc[i].info = 0; + } +} + +static u32 +mtk_wed_check_busy(struct mtk_wed_device *dev) +{ + if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) + return true; + + if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_CTRL) & + (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) + return true; + + return false; +} + +static int +mtk_wed_poll_busy(struct mtk_wed_device *dev) +{ + int sleep = 15000; + int timeout = 100 * sleep; + u32 val; + + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, + timeout, false, dev); +} + +static void +mtk_wed_reset_dma(struct mtk_wed_device *dev) +{ + bool busy = false; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { + struct mtk_wdma_desc *desc = dev->tx_ring[i].desc; + + if (!desc) + continue; + + mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE); + } + + if (mtk_wed_poll_busy(dev)) + busy = mtk_wed_check_busy(dev); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); + } else { + wed_w32(dev, MTK_WED_RESET_IDX, + MTK_WED_RESET_IDX_TX | + MTK_WED_RESET_IDX_RX); + wed_w32(dev, MTK_WED_RESET_IDX, 0); + } + + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); + } else { + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, + MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); + + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); + + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); + } + + for (i = 0; i < 100; i++) { + val = wed_r32(dev, MTK_WED_TX_BM_INTF); + if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) + break; + } + + mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); + } else { + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, + MTK_WED_WPDMA_RESET_IDX_TX | + MTK_WED_WPDMA_RESET_IDX_RX); + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); + } + +} + +static int +mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, + int size) +{ + ring->desc = dma_alloc_coherent(dev->hw->dev, + size * sizeof(*ring->desc), + &ring->desc_phys, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->size = size; + mtk_wed_ring_reset(ring->desc, size); + + return 0; +} + +static int +mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) +{ + struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; + + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE)) + return -ENOMEM; + + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, + size); + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); + + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, + size); + + return 0; +} + +static void +mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) +{ + u32 wdma_mask; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) + if (!dev->tx_wdma[i].desc) + mtk_wed_wdma_ring_setup(dev, i, 16); + + wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); + + mtk_wed_hw_init(dev); + + wed_set(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_EN | + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); + + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, + MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | + MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); + + wed_set(dev, MTK_WED_WPDMA_INT_CTRL, + MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); + wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); + + wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); + wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); + + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); + wed_w32(dev, MTK_WED_INT_MASK, irq_mask); + + wed_set(dev, MTK_WED_GLO_CFG, + MTK_WED_GLO_CFG_TX_DMA_EN | + MTK_WED_GLO_CFG_RX_DMA_EN); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + + mtk_wed_set_ext_int(dev, true); + val = dev->wlan.wpdma_phys | + MTK_PCIE_MIRROR_MAP_EN | + FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); + + if (dev->hw->index) + val |= BIT(1); + val |= BIT(0); + regmap_write(dev->hw->mirror, dev->hw->index * 4, val); + + dev->running = true; +} + +static int +mtk_wed_attach(struct mtk_wed_device *dev) + __releases(RCU) +{ + struct mtk_wed_hw *hw; + int ret = 0; + + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "mtk_wed_attach without holding the RCU read lock"); + + if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 || + !try_module_get(THIS_MODULE)) + ret = -ENODEV; + + rcu_read_unlock(); + + if (ret) + return ret; + + mutex_lock(&hw_lock); + + hw = mtk_wed_assign(dev); + if (!hw) { + module_put(THIS_MODULE); + ret = -ENODEV; + goto out; + } + + dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index); + + dev->hw = hw; + dev->dev = hw->dev; + dev->irq = hw->irq; + dev->wdma_idx = hw->index; + + if (hw->eth->dma_dev == hw->eth->dev && + of_dma_is_coherent(hw->eth->dev->of_node)) + mtk_eth_set_dma_device(hw->eth, hw->dev); + + ret = mtk_wed_buffer_alloc(dev); + if (ret) { + mtk_wed_detach(dev); + goto out; + } + + mtk_wed_hw_init_early(dev); + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); + +out: + mutex_unlock(&hw_lock); + + return ret; +} + +static int +mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->tx_ring[idx]; + + /* + * Tx ring redirection: + * Instead of configuring the WLAN PDMA TX ring directly, the WLAN + * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) + * registers. + * + * WED driver posts its own DMA ring as WLAN PDMA TX and configures it + * into MTK_WED_WPDMA_RING_TX(n) registers. + * It gets filled with packets picked up from WED TX ring and from + * WDMA RX. + */ + + BUG_ON(idx > ARRAY_SIZE(dev->tx_ring)); + + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE)) + return -ENOMEM; + + if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) + return -ENOMEM; + + ring->reg_base = MTK_WED_RING_TX(idx); + ring->wpdma = regs; + + /* WED -> WPDMA */ + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); + + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, + ring->desc_phys); + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, + MTK_WED_TX_RING_SIZE); + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); + + return 0; +} + +static int +mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->txfree_ring; + int i; + + /* + * For txfree event handling, the same DMA ring is shared between WED + * and WLAN. The WLAN driver accesses the ring index registers through + * WED + */ + ring->reg_base = MTK_WED_RING_RX(1); + ring->wpdma = regs; + + for (i = 0; i < 12; i += 4) { + u32 val = readl(regs + i); + + wed_w32(dev, MTK_WED_RING_RX(1) + i, val); + wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val); + } + + return 0; +} + +static u32 +mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) +{ + u32 val; + + val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); + wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); + val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK; + if (!dev->hw->num_flows) + val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + if (val && net_ratelimit()) + pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); + + val = wed_r32(dev, MTK_WED_INT_STATUS); + val &= mask; + wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ + + return val; +} + +static void +mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) +{ + if (!dev->running) + return; + + mtk_wed_set_ext_int(dev, !!mask); + wed_w32(dev, MTK_WED_INT_MASK, mask); +} + +int mtk_wed_flow_add(int index) +{ + struct mtk_wed_hw *hw = hw_list[index]; + int ret; + + if (!hw || !hw->wed_dev) + return -ENODEV; + + if (hw->num_flows) { + hw->num_flows++; + return 0; + } + + mutex_lock(&hw_lock); + if (!hw->wed_dev) { + ret = -ENODEV; + goto out; + } + + ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); + if (!ret) + hw->num_flows++; + mtk_wed_set_ext_int(hw->wed_dev, true); + +out: + mutex_unlock(&hw_lock); + + return ret; +} + +void mtk_wed_flow_remove(int index) +{ + struct mtk_wed_hw *hw = hw_list[index]; + + if (!hw) + return; + + if (--hw->num_flows) + return; + + mutex_lock(&hw_lock); + if (!hw->wed_dev) + goto out; + + hw->wed_dev->wlan.offload_disable(hw->wed_dev); + mtk_wed_set_ext_int(hw->wed_dev, true); + +out: + mutex_unlock(&hw_lock); +} + +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, int index) +{ + static const struct mtk_wed_ops wed_ops = { + .attach = mtk_wed_attach, + .tx_ring_setup = mtk_wed_tx_ring_setup, + .txfree_ring_setup = mtk_wed_txfree_ring_setup, + .start = mtk_wed_start, + .stop = mtk_wed_stop, + .reset_dma = mtk_wed_reset_dma, + .reg_read = wed_r32, + .reg_write = wed_w32, + .irq_get = mtk_wed_irq_get, + .irq_set_mask = mtk_wed_irq_set_mask, + .detach = mtk_wed_detach, + }; + struct device_node *eth_np = eth->dev->of_node; + struct platform_device *pdev; + struct mtk_wed_hw *hw; + struct regmap *regs; + int irq; + + if (!np) + return; + + pdev = of_find_device_by_node(np); + if (!pdev) + return; + + get_device(&pdev->dev); + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return; + + regs = syscon_regmap_lookup_by_phandle(np, NULL); + if (!regs) + return; + + rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); + + mutex_lock(&hw_lock); + + if (WARN_ON(hw_list[index])) + goto unlock; + + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + hw->node = np; + hw->regs = regs; + hw->eth = eth; + hw->dev = &pdev->dev; + hw->wdma = wdma; + hw->index = index; + hw->irq = irq; + hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, + "mediatek,pcie-mirror"); + hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, + "mediatek,hifsys"); + if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { + kfree(hw); + goto unlock; + } + + if (!index) { + regmap_write(hw->mirror, 0, 0); + regmap_write(hw->mirror, 4, 0); + } + mtk_wed_hw_add_debugfs(hw); + + hw_list[index] = hw; + +unlock: + mutex_unlock(&hw_lock); +} + +void mtk_wed_exit(void) +{ + int i; + + rcu_assign_pointer(mtk_soc_wed_ops, NULL); + + synchronize_rcu(); + + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { + struct mtk_wed_hw *hw; + + hw = hw_list[i]; + if (!hw) + continue; + + hw_list[i] = NULL; + debugfs_remove(hw->debugfs_dir); + put_device(hw->dev); + kfree(hw); + } +} diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h new file mode 100644 index 000000000000..404c9a9b130d --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau */ + +#ifndef __MTK_WED_PRIV_H +#define __MTK_WED_PRIV_H + +#include +#include +#include + +struct mtk_eth; + +struct mtk_wed_hw { + struct device_node *node; + struct mtk_eth *eth; + struct regmap *regs; + struct regmap *hifsys; + struct device *dev; + void __iomem *wdma; + struct regmap *mirror; + struct dentry *debugfs_dir; + struct mtk_wed_device *wed_dev; + u32 debugfs_reg; + u32 num_flows; + char dirname[5]; + int irq; + int index; +}; + + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +static inline void +wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + regmap_write(dev->hw->regs, reg, val); +} + +static inline u32 +wed_r32(struct mtk_wed_device *dev, u32 reg) +{ + unsigned int val; + + regmap_read(dev->hw->regs, reg, &val); + + return val; +} + +static inline void +wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + writel(val, dev->hw->wdma + reg); +} + +static inline u32 +wdma_r32(struct mtk_wed_device *dev, u32 reg) +{ + return readl(dev->hw->wdma + reg); +} + +static inline u32 +wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg) +{ + if (!dev->tx_ring[ring].wpdma) + return 0; + + return readl(dev->tx_ring[ring].wpdma + reg); +} + +static inline void +wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) +{ + if (!dev->tx_ring[ring].wpdma) + return; + + writel(val, dev->tx_ring[ring].wpdma + reg); +} + +static inline u32 +wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) +{ + if (!dev->txfree_ring.wpdma) + return 0; + + return readl(dev->txfree_ring.wpdma + reg); +} + +static inline void +wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + if (!dev->txfree_ring.wpdma) + return; + + writel(val, dev->txfree_ring.wpdma + reg); +} + +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, int index); +void mtk_wed_exit(void); +int mtk_wed_flow_add(int index); +void mtk_wed_flow_remove(int index); +#else +static inline void +mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, int index) +{ +} +static inline void +mtk_wed_exit(void) +{ +} +static inline int mtk_wed_flow_add(int index) +{ + return -EINVAL; +} +static inline void mtk_wed_flow_remove(int index) +{ +} +#endif + +#ifdef CONFIG_DEBUG_FS +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw); +#else +static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) +{ +} +#endif + +#endif diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c new file mode 100644 index 000000000000..a81d3fd1a439 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau */ + +#include +#include "mtk_wed.h" +#include "mtk_wed_regs.h" + +struct reg_dump { + const char *name; + u16 offset; + u8 type; + u8 base; +}; + +enum { + DUMP_TYPE_STRING, + DUMP_TYPE_WED, + DUMP_TYPE_WDMA, + DUMP_TYPE_WPDMA_TX, + DUMP_TYPE_WPDMA_TXFREE, +}; + +#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } +#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } +#define DUMP_RING(_prefix, _base, ...) \ + { _prefix " BASE", _base, __VA_ARGS__ }, \ + { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ + { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \ + { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } + +#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) +#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) + +#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) +#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA) + +#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) +#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) + +static void +print_reg_val(struct seq_file *s, const char *name, u32 val) +{ + seq_printf(s, "%-32s %08x\n", name, val); +} + +static void +dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, + const struct reg_dump *regs, int n_regs) +{ + const struct reg_dump *cur; + u32 val; + + for (cur = regs; cur < ®s[n_regs]; cur++) { + switch (cur->type) { + case DUMP_TYPE_STRING: + seq_printf(s, "%s======== %s:\n", + cur > regs ? "\n" : "", + cur->name); + continue; + case DUMP_TYPE_WED: + val = wed_r32(dev, cur->offset); + break; + case DUMP_TYPE_WDMA: + val = wdma_r32(dev, cur->offset); + break; + case DUMP_TYPE_WPDMA_TX: + val = wpdma_tx_r32(dev, cur->base, cur->offset); + break; + case DUMP_TYPE_WPDMA_TXFREE: + val = wpdma_txfree_r32(dev, cur->offset); + break; + } + print_reg_val(s, cur->name, val); + } +} + + +static int +wed_txinfo_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("WED TX"), + DUMP_WED(WED_TX_MIB(0)), + DUMP_WED_RING(WED_RING_TX(0)), + + DUMP_WED(WED_TX_MIB(1)), + DUMP_WED_RING(WED_RING_TX(1)), + + DUMP_STR("WPDMA TX"), + DUMP_WED(WED_WPDMA_TX_MIB(0)), + DUMP_WED_RING(WED_WPDMA_RING_TX(0)), + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)), + + DUMP_WED(WED_WPDMA_TX_MIB(1)), + DUMP_WED_RING(WED_WPDMA_RING_TX(1)), + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)), + + DUMP_STR("WPDMA TX"), + DUMP_WPDMA_TX_RING(0), + DUMP_WPDMA_TX_RING(1), + + DUMP_STR("WED WDMA RX"), + DUMP_WED(WED_WDMA_RX_MIB(0)), + DUMP_WED_RING(WED_WDMA_RING_RX(0)), + DUMP_WED(WED_WDMA_RX_THRES(0)), + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)), + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)), + + DUMP_WED(WED_WDMA_RX_MIB(1)), + DUMP_WED_RING(WED_WDMA_RING_RX(1)), + DUMP_WED(WED_WDMA_RX_THRES(1)), + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)), + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)), + + DUMP_STR("WDMA RX"), + DUMP_WDMA(WDMA_GLO_CFG), + DUMP_WDMA_RING(WDMA_RING_RX(0)), + DUMP_WDMA_RING(WDMA_RING_RX(1)), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (!dev) + return 0; + + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_txinfo); + + +static int +mtk_wed_reg_set(void *data, u64 val) +{ + struct mtk_wed_hw *hw = data; + + regmap_write(hw->regs, hw->debugfs_reg, val); + + return 0; +} + +static int +mtk_wed_reg_get(void *data, u64 *val) +{ + struct mtk_wed_hw *hw = data; + unsigned int regval; + int ret; + + ret = regmap_read(hw->regs, hw->debugfs_reg, ®val); + if (ret) + return ret; + + *val = regval; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set, + "0x%08llx\n"); + +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) +{ + struct dentry *dir; + + snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index); + dir = debugfs_create_dir(hw->dirname, NULL); + if (!dir) + return; + + hw->debugfs_dir = dir; + debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); + debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); + debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); +} diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c new file mode 100644 index 000000000000..a5d9d8a5bce2 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau */ + +#include +#include + +const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; +EXPORT_SYMBOL_GPL(mtk_soc_wed_ops); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h new file mode 100644 index 000000000000..0a0465ea58b4 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau */ + +#ifndef __MTK_WED_REGS_H +#define __MTK_WED_REGS_H + +#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) +#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) +#define MTK_WDMA_DESC_CTRL_BURST BIT(16) +#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16) +#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) +#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) + +struct mtk_wdma_desc { + __le32 buf0; + __le32 ctrl; + __le32 buf1; + __le32 info; +} __packed __aligned(4); + +#define MTK_WED_RESET 0x008 +#define MTK_WED_RESET_TX_BM BIT(0) +#define MTK_WED_RESET_TX_FREE_AGENT BIT(4) +#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) +#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) +#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) +#define MTK_WED_RESET_WED_TX_DMA BIT(12) +#define MTK_WED_RESET_WDMA_RX_DRV BIT(17) +#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) +#define MTK_WED_RESET_WED BIT(31) + +#define MTK_WED_CTRL 0x00c +#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0) +#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1) +#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2) +#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) +#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) +#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) +#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) +#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) +#define MTK_WED_CTRL_RESERVE_EN BIT(12) +#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) +#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) +#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) + +#define MTK_WED_EXT_INT_STATUS 0x020 +#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0) +#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1) +#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4) +#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8) +#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9) +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12) +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20) +#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21) +#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) +#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ + MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \ + MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \ + MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR) + +#define MTK_WED_EXT_INT_MASK 0x028 + +#define MTK_WED_STATUS 0x060 +#define MTK_WED_STATUS_TX GENMASK(15, 8) + +#define MTK_WED_TX_BM_CTRL 0x080 +#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) +#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) +#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28) + +#define MTK_WED_TX_BM_BASE 0x084 + +#define MTK_WED_TX_BM_TKID 0x088 +#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) +#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) + +#define MTK_WED_TX_BM_BUF_LEN 0x08c + +#define MTK_WED_TX_BM_INTF 0x09c +#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0) +#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16) +#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28) +#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29) + +#define MTK_WED_TX_BM_DYN_THR 0x0a0 +#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0) +#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16) + +#define MTK_WED_INT_STATUS 0x200 +#define MTK_WED_INT_MASK 0x204 + +#define MTK_WED_GLO_CFG 0x208 +#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0) +#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2) +#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) +#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7) +#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) +#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9) +#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) +#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) +#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) +#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) +#define MTK_WED_GLO_CFG_SW_RESET BIT(24) +#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) +#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27) +#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28) +#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29) +#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MTK_WED_RESET_IDX 0x20c +#define MTK_WED_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WED_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) + +#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) + +#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) + +#define MTK_WED_WPDMA_INT_TRIGGER 0x504 +#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) +#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) + +#define MTK_WED_WPDMA_GLO_CFG 0x508 +#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0) +#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3) +#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) +#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) +#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9) +#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) +#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) +#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) +#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) +#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24) +#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) +#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27) +#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28) +#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29) +#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MTK_WED_WPDMA_RESET_IDX 0x50c +#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WED_WPDMA_INT_CTRL 0x520 +#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21) + +#define MTK_WED_WPDMA_INT_MASK 0x524 + +#define MTK_WED_PCIE_CFG_BASE 0x560 + +#define MTK_WED_PCIE_INT_TRIGGER 0x570 +#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) + +#define MTK_WED_WPDMA_CFG_BASE 0x580 + +#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) +#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) + +#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) +#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) +#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) +#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) + +#define MTK_WED_WDMA_GLO_CFG 0xa04 +#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) +#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) +#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) +#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) +#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13) +#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16) +#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17) +#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18) +#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19) +#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20) +#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21) +#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22) +#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23) +#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24) +#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25) +#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26) +#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30) + +#define MTK_WED_WDMA_RESET_IDX 0xa08 +#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) +#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) + +#define MTK_WED_WDMA_INT_TRIGGER 0xa28 +#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) + +#define MTK_WED_WDMA_INT_CTRL 0xa2c +#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) + +#define MTK_WED_WDMA_OFFSET0 0xaa4 +#define MTK_WED_WDMA_OFFSET1 0xaa8 + +#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4) +#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) +#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) + +#define MTK_WED_RING_OFS_BASE 0x00 +#define MTK_WED_RING_OFS_COUNT 0x04 +#define MTK_WED_RING_OFS_CPU_IDX 0x08 +#define MTK_WED_RING_OFS_DMA_IDX 0x0c + +#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10) + +#define MTK_WDMA_GLO_CFG 0x204 +#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26) + +#define MTK_WDMA_RESET_IDX 0x208 +#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WDMA_INT_MASK 0x228 +#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0) +#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16) +#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28) +#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29) +#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) +#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) + +#define MTK_WDMA_INT_GRP1 0x250 +#define MTK_WDMA_INT_GRP2 0x254 + +#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) +#define MTK_PCIE_MIRROR_MAP_EN BIT(0) +#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) + +/* DMA channel mapping */ +#define HIFSYS_DMA_AG_MAP 0x008 + +#endif diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h new file mode 100644 index 000000000000..7e00cca06709 --- /dev/null +++ b/include/linux/soc/mediatek/mtk_wed.h @@ -0,0 +1,131 @@ +#ifndef __MTK_WED_H +#define __MTK_WED_H + +#include +#include +#include +#include + +#define MTK_WED_TX_QUEUES 2 + +struct mtk_wed_hw; +struct mtk_wdma_desc; + +struct mtk_wed_ring { + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + int size; + + u32 reg_base; + void __iomem *wpdma; +}; + +struct mtk_wed_device { +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + const struct mtk_wed_ops *ops; + struct device *dev; + struct mtk_wed_hw *hw; + bool init_done, running; + int wdma_idx; + int irq; + + struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES]; + struct mtk_wed_ring txfree_ring; + struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES]; + + struct { + int size; + void **pages; + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + } buf_ring; + + /* filled by driver: */ + struct { + struct pci_dev *pci_dev; + + u32 wpdma_phys; + + u16 token_start; + unsigned int nbuf; + + u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id); + int (*offload_enable)(struct mtk_wed_device *wed); + void (*offload_disable)(struct mtk_wed_device *wed); + } wlan; +#endif +}; + +struct mtk_wed_ops { + int (*attach)(struct mtk_wed_device *dev); + int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring, + void __iomem *regs); + int (*txfree_ring_setup)(struct mtk_wed_device *dev, + void __iomem *regs); + void (*detach)(struct mtk_wed_device *dev); + + void (*stop)(struct mtk_wed_device *dev); + void (*start)(struct mtk_wed_device *dev, u32 irq_mask); + void (*reset_dma)(struct mtk_wed_device *dev); + + u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg); + void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val); + + u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask); + void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask); +}; + +extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; + +static inline int +mtk_wed_device_attach(struct mtk_wed_device *dev) +{ + int ret = -ENODEV; + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + rcu_read_lock(); + dev->ops = rcu_dereference(mtk_soc_wed_ops); + if (dev->ops) + ret = dev->ops->attach(dev); + else + rcu_read_unlock(); + + if (ret) + dev->ops = NULL; +#endif + + return ret; +} + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +#define mtk_wed_device_active(_dev) !!(_dev)->ops +#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev) +#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask) +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \ + (_dev)->ops->tx_ring_setup(_dev, _ring, _regs) +#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \ + (_dev)->ops->txfree_ring_setup(_dev, _regs) +#define mtk_wed_device_reg_read(_dev, _reg) \ + (_dev)->ops->reg_read(_dev, _reg) +#define mtk_wed_device_reg_write(_dev, _reg, _val) \ + (_dev)->ops->reg_write(_dev, _reg, _val) +#define mtk_wed_device_irq_get(_dev, _mask) \ + (_dev)->ops->irq_get(_dev, _mask) +#define mtk_wed_device_irq_set_mask(_dev, _mask) \ + (_dev)->ops->irq_set_mask(_dev, _mask) +#else +static inline bool mtk_wed_device_active(struct mtk_wed_device *dev) +{ + return false; +} +#define mtk_wed_device_detach(_dev) do {} while (0) +#define mtk_wed_device_start(_dev, _mask) do {} while (0) +#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV +#define mtk_wed_device_reg_read(_dev, _reg) 0 +#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0) +#define mtk_wed_device_irq_get(_dev, _mask) 0 +#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0) +#endif + +#endif -- cgit v1.2.3 From a333215e10cb5d3b1e0685ca117f0e9452215485 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 5 Apr 2022 21:57:48 +0200 Subject: net: ethernet: mtk_eth_soc: implement flow offloading to WED devices This allows hardware flow offloading from Ethernet to WLAN on MT7622 SoC Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_ppe.c | 18 ++++++++ drivers/net/ethernet/mediatek/mtk_ppe.h | 14 ++++--- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 56 ++++++++++++++++++++++++- drivers/net/ethernet/mediatek/mtk_wed.h | 7 ++++ include/linux/netdevice.h | 7 ++++ net/core/dev.c | 4 ++ 6 files changed, 98 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 3ad10c793308..472bcd3269a7 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -329,6 +329,24 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid) return 0; } +int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, + int bss, int wcid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry); + u32 *ib2 = mtk_foe_entry_ib2(entry); + + *ib2 &= ~MTK_FOE_IB2_PORT_MG; + *ib2 |= MTK_FOE_IB2_WDMA_WINFO; + if (wdma_idx) + *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; + + l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | + FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | + FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); + + return 0; +} + static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) { return !(entry->ib1 & MTK_FOE_IB1_STATIC) && diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index 242fb8f2ae65..df8ccaf48171 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -48,9 +48,9 @@ enum { #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) #define MTK_FOE_IB2_MULTICAST BIT(8) -#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12) -#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16) -#define MTK_FOE_IB2_WHNAT_NAT BIT(17) +#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12) +#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16) +#define MTK_FOE_IB2_WDMA_WINFO BIT(17) #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) @@ -58,9 +58,9 @@ enum { #define MTK_FOE_IB2_DSCP GENMASK(31, 24) -#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0) -#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6) -#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14) +#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0) +#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6) +#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14) enum { MTK_FOE_STATE_INVALID, @@ -281,6 +281,8 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry, int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port); int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); +int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, + int bss, int wcid); int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, u16 timestamp); int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 7bb1f20002b5..bcf342bb9051 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -10,6 +10,7 @@ #include #include #include "mtk_eth_soc.h" +#include "mtk_wed.h" struct mtk_flow_data { struct ethhdr eth; @@ -39,6 +40,7 @@ struct mtk_flow_entry { struct rhash_head node; unsigned long cookie; u16 hash; + s8 wed_index; }; static const struct rhashtable_params mtk_flow_ht_params = { @@ -80,6 +82,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) memcpy(dest, src, act->mangle.mask ? 2 : 4); } +static int +mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) +{ + struct net_device_path_ctx ctx = { + .dev = dev, + .daddr = addr, + }; + struct net_device_path path = {}; + + if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) + return -1; + + if (!dev->netdev_ops->ndo_fill_forward_path) + return -1; + + if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path)) + return -1; + + if (path.type != DEV_PATH_MTK_WDMA) + return -1; + + info->wdma_idx = path.mtk_wdma.wdma_idx; + info->queue = path.mtk_wdma.queue; + info->bss = path.mtk_wdma.bss; + info->wcid = path.mtk_wdma.wcid; + + return 0; +} + static int mtk_flow_mangle_ports(const struct flow_action_entry *act, @@ -149,10 +180,20 @@ mtk_flow_get_dsa_port(struct net_device **dev) static int mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, - struct net_device *dev) + struct net_device *dev, const u8 *dest_mac, + int *wed_index) { + struct mtk_wdma_info info = {}; int pse_port, dsa_port; + if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { + mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss, + info.wcid); + pse_port = 3; + *wed_index = info.wdma_idx; + goto out; + } + dsa_port = mtk_flow_get_dsa_port(&dev); if (dsa_port >= 0) mtk_foe_entry_set_dsa(foe, dsa_port); @@ -164,6 +205,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, else return -EOPNOTSUPP; +out: mtk_foe_entry_set_pse_port(foe, pse_port); return 0; @@ -179,6 +221,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) struct net_device *odev = NULL; struct mtk_flow_entry *entry; int offload_type = 0; + int wed_index = -1; u16 addr_type = 0; u32 timestamp; u8 l4proto = 0; @@ -326,10 +369,14 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) if (data.pppoe.num == 1) mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid); - err = mtk_flow_set_output_device(eth, &foe, odev); + err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, + &wed_index); if (err) return err; + if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) + return err; + entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; @@ -343,6 +390,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) } entry->hash = hash; + entry->wed_index = wed_index; err = rhashtable_insert_fast(ð->flow_table, &entry->node, mtk_flow_ht_params); if (err < 0) @@ -353,6 +401,8 @@ clear_flow: mtk_foe_entry_clear(ð->ppe, hash); free: kfree(entry); + if (wed_index >= 0) + mtk_wed_flow_remove(wed_index); return err; } @@ -369,6 +419,8 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) mtk_foe_entry_clear(ð->ppe, entry->hash); rhashtable_remove_fast(ð->flow_table, &entry->node, mtk_flow_ht_params); + if (entry->wed_index >= 0) + mtk_wed_flow_remove(entry->wed_index); kfree(entry); return 0; diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h index 404c9a9b130d..981ec613f4b0 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.h +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -7,6 +7,7 @@ #include #include #include +#include struct mtk_eth; @@ -27,6 +28,12 @@ struct mtk_wed_hw { int index; }; +struct mtk_wdma_info { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; +}; #ifdef CONFIG_NET_MEDIATEK_SOC_WED static inline void diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b6a1e7f643da..7b2a0b739684 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -862,6 +862,7 @@ enum net_device_path_type { DEV_PATH_BRIDGE, DEV_PATH_PPPOE, DEV_PATH_DSA, + DEV_PATH_MTK_WDMA, }; struct net_device_path { @@ -887,6 +888,12 @@ struct net_device_path { int port; u16 proto; } dsa; + struct { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; + } mtk_wdma; }; }; diff --git a/net/core/dev.c b/net/core/dev.c index 2ec17358d7b4..d5a362d53b34 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -701,6 +701,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, if (WARN_ON_ONCE(last_dev == ctx.dev)) return -1; } + + if (!ctx.dev) + return ret; + path = dev_fwd_path(stack); if (!path) return -1; -- cgit v1.2.3 From 817b2fdf166766b462a034a8cc87d0b53c0054ec Mon Sep 17 00:00:00 2001 From: David Bentham Date: Tue, 5 Apr 2022 21:57:50 +0200 Subject: net: ethernet: mtk_eth_soc: add ipv6 flow offload support Add the missing IPv6 flow offloading support for routing only. Hardware flow offloading is done by the packet processing engine (PPE) of the Ethernet MAC and as it doesn't support mangling of IPv6 packets, IPv6 NAT cannot be supported. Signed-off-by: David Bentham Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 28 +++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index bcf342bb9051..0113cddcebf4 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,11 @@ struct mtk_flow_data { __be32 src_addr; __be32 dst_addr; } v4; + + struct { + struct in6_addr src_addr; + struct in6_addr dst_addr; + } v6; }; __be16 src_port; @@ -65,6 +71,14 @@ mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, data->v4.dst_addr, data->dst_port); } +static int +mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data) +{ + return mtk_foe_entry_set_ipv6_tuple(foe, + data->v6.src_addr.s6_addr32, data->src_port, + data->v6.dst_addr.s6_addr32, data->dst_port); +} + static void mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) { @@ -296,6 +310,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) case FLOW_DISSECTOR_KEY_IPV4_ADDRS: offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; + break; default: return -EOPNOTSUPP; } @@ -331,6 +348,17 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) mtk_flow_set_ipv4_addr(&foe, &data, false); } + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs addrs; + + flow_rule_match_ipv6_addrs(rule, &addrs); + + data.v6.src_addr = addrs.key->src; + data.v6.dst_addr = addrs.key->dst; + + mtk_flow_set_ipv6_addr(&foe, &data); + } + flow_action_for_each(i, act, &rule->action) { if (act->id != FLOW_ACTION_MANGLE) continue; -- cgit v1.2.3 From bb14c19122b768c3d659da2e91fb9324a2374d06 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 5 Apr 2022 21:57:51 +0200 Subject: net: ethernet: mtk_eth_soc: support TC_SETUP_BLOCK for PPE offload This allows offload entries to be created from user space Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 0113cddcebf4..da3bc93676f8 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -563,10 +563,13 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - if (type == TC_SETUP_FT) + switch (type) { + case TC_SETUP_BLOCK: + case TC_SETUP_FT: return mtk_eth_setup_tc_block(dev, type_data); - - return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } } int mtk_eth_offload_init(struct mtk_eth *eth) -- cgit v1.2.3 From 1ccc723b5829d63a29627e1d2de2da84efd037c2 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 5 Apr 2022 21:57:52 +0200 Subject: net: ethernet: mtk_eth_soc: allocate struct mtk_ppe separately Preparation for adding more data to it, which will increase its size. Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 ++++++----- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- drivers/net/ethernet/mediatek/mtk_ppe.c | 11 ++++++++--- drivers/net/ethernet/mediatek/mtk_ppe.h | 3 +-- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 12 ++++++------ 5 files changed, 22 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4d7c542d89fb..5a2f3cb26e66 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2269,7 +2269,7 @@ static int mtk_open(struct net_device *dev) if (err) return err; - if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) + if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0) gdm_config = MTK_GDMA_TO_PPE; mtk_gdm_config(eth, gdm_config); @@ -2343,7 +2343,7 @@ static int mtk_stop(struct net_device *dev) mtk_dma_free(eth); if (eth->soc->offload_version) - mtk_ppe_stop(ð->ppe); + mtk_ppe_stop(eth->ppe); return 0; } @@ -3262,10 +3262,11 @@ static int mtk_probe(struct platform_device *pdev) } if (eth->soc->offload_version) { - err = mtk_ppe_init(ð->ppe, eth->dev, - eth->base + MTK_ETH_PPE_BASE, 2); - if (err) + eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2); + if (!eth->ppe) { + err = -ENOMEM; goto err_free_dev; + } err = mtk_eth_offload_init(eth); if (err) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 74661682fd92..c98c7ee42c6f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -985,7 +985,7 @@ struct mtk_eth { u32 rx_dma_l4_valid; int ip_align; - struct mtk_ppe ppe; + struct mtk_ppe *ppe; struct rhashtable flow_table; }; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 472bcd3269a7..c56518272729 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -384,10 +384,15 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, return hash; } -int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, +struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version) { struct mtk_foe_entry *foe; + struct mtk_ppe *ppe; + + ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); + if (!ppe) + return NULL; /* need to allocate a separate device, since it PPE DMA access is * not coherent. @@ -399,13 +404,13 @@ int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe), &ppe->foe_phys, GFP_KERNEL); if (!foe) - return -ENOMEM; + return NULL; ppe->foe_table = foe; mtk_ppe_debugfs_init(ppe); - return 0; + return ppe; } static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index df8ccaf48171..190d2560ac86 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -246,8 +246,7 @@ struct mtk_ppe { void *acct_table; }; -int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base, - int version); +struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version); int mtk_ppe_start(struct mtk_ppe *ppe); int mtk_ppe_stop(struct mtk_ppe *ppe); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index da3bc93676f8..76a94ec85232 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -411,7 +411,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) entry->cookie = f->cookie; timestamp = mtk_eth_timestamp(eth); - hash = mtk_foe_entry_commit(ð->ppe, &foe, timestamp); + hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp); if (hash < 0) { err = hash; goto free; @@ -426,7 +426,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) return 0; clear_flow: - mtk_foe_entry_clear(ð->ppe, hash); + mtk_foe_entry_clear(eth->ppe, hash); free: kfree(entry); if (wed_index >= 0) @@ -444,7 +444,7 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) if (!entry) return -ENOENT; - mtk_foe_entry_clear(ð->ppe, entry->hash); + mtk_foe_entry_clear(eth->ppe, entry->hash); rhashtable_remove_fast(ð->flow_table, &entry->node, mtk_flow_ht_params); if (entry->wed_index >= 0) @@ -466,7 +466,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) if (!entry) return -ENOENT; - timestamp = mtk_foe_entry_timestamp(ð->ppe, entry->hash); + timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash); if (timestamp < 0) return -ETIMEDOUT; @@ -522,7 +522,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) struct flow_block_cb *block_cb; flow_setup_cb_t *cb; - if (!eth->ppe.foe_table) + if (!eth->ppe || !eth->ppe->foe_table) return -EOPNOTSUPP; if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) @@ -574,7 +574,7 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, int mtk_eth_offload_init(struct mtk_eth *eth) { - if (!eth->ppe.foe_table) + if (!eth->ppe || !eth->ppe->foe_table) return 0; return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); -- cgit v1.2.3 From c4f033d9e03e99fb024b2300d61e099aa7646bb7 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 5 Apr 2022 21:57:53 +0200 Subject: net: ethernet: mtk_eth_soc: rework hardware flow table management The hardware was designed to handle flow detection and creation of flow entries by itself, relying on the software primarily for filling in egress routing information. When there is a hash collision between multiple flows, this allows the hardware to maintain the entry for the most active flow. Additionally, the hardware only keeps offloading active for entries with at least 30 packets per second. With this rework, the code no longer creates a hardware entries directly. Instead, the hardware entry is only created when the PPE reports a matching unbound flow with the minimum target rate. In order to reduce CPU overhead, looking for flows belonging to a hash entry is rate limited to once every 100ms. This rework is also used as preparation for emulating bridge offload by managing L4 offload entries on demand. Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 10 +- drivers/net/ethernet/mediatek/mtk_ppe.c | 134 +++++++++++++++++++++--- drivers/net/ethernet/mediatek/mtk_ppe.h | 38 +++++-- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 42 ++------ 4 files changed, 170 insertions(+), 54 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 5a2f3cb26e66..209d00f56f62 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "mtk_eth_soc.h" @@ -1239,7 +1240,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, struct net_device *netdev; unsigned int pktlen; dma_addr_t dma_addr; - u32 hash; + u32 hash, reason; int mac; ring = mtk_get_rx_ring(eth); @@ -1315,6 +1316,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); } + reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); + if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) + mtk_ppe_check_skb(eth->ppe, skb, + trxd.rxd4 & MTK_RXD4_FOE_ENTRY); + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && (trxd.rxd2 & RX_DMA_VTAG)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), @@ -3262,7 +3268,7 @@ static int mtk_probe(struct platform_device *pdev) } if (eth->soc->offload_version) { - eth->ppe = mtk_ppe_init(eth->dev, eth->base + MTK_ETH_PPE_BASE, 2); + eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2); if (!eth->ppe) { err = -ENOMEM; goto err_free_dev; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index c56518272729..a7fe33bbde63 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -6,9 +6,12 @@ #include #include #include +#include "mtk_eth_soc.h" #include "mtk_ppe.h" #include "mtk_ppe_regs.h" +static DEFINE_SPINLOCK(ppe_lock); + static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) { writel(val, ppe->base + reg); @@ -41,6 +44,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) return ppe_m32(ppe, reg, val, 0); } +static u32 mtk_eth_timestamp(struct mtk_eth *eth) +{ + return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; +} + static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) { int ret; @@ -353,26 +361,59 @@ static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; } -int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, - u16 timestamp) +static bool +mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data) +{ + int type, len; + + if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) + return false; + + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); + if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) + len = offsetof(struct mtk_foe_entry, ipv6._rsv); + else + len = offsetof(struct mtk_foe_entry, ipv4.ib2); + + return !memcmp(&entry->data.data, &data->data, len - 4); +} + +static void +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) { struct mtk_foe_entry *hwe; - u32 hash; + struct mtk_foe_entry foe; + + spin_lock_bh(&ppe_lock); + if (entry->hash == 0xffff) + goto out; + hwe = &ppe->foe_table[entry->hash]; + memcpy(&foe, hwe, sizeof(foe)); + if (!mtk_flow_entry_match(entry, &foe)) { + entry->hash = 0xffff; + goto out; + } + + entry->data.ib1 = foe.ib1; + +out: + spin_unlock_bh(&ppe_lock); +} + +static void +__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 hash) +{ + struct mtk_foe_entry *hwe; + u16 timestamp; + + timestamp = mtk_eth_timestamp(ppe->eth); timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP; entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp); - hash = mtk_ppe_hash_entry(entry); hwe = &ppe->foe_table[hash]; - if (!mtk_foe_entry_usable(hwe)) { - hwe++; - hash++; - - if (!mtk_foe_entry_usable(hwe)) - return -ENOSPC; - } - memcpy(&hwe->data, &entry->data, sizeof(hwe->data)); wmb(); hwe->ib1 = entry->ib1; @@ -380,13 +421,77 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, dma_wmb(); mtk_ppe_cache_clear(ppe); +} - return hash; +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + spin_lock_bh(&ppe_lock); + hlist_del_init(&entry->list); + if (entry->hash != 0xffff) { + ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; + ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, + MTK_FOE_STATE_BIND); + dma_wmb(); + } + entry->hash = 0xffff; + spin_unlock_bh(&ppe_lock); +} + +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + u32 hash = mtk_ppe_hash_entry(&entry->data); + + entry->hash = 0xffff; + spin_lock_bh(&ppe_lock); + hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); + spin_unlock_bh(&ppe_lock); + + return 0; +} + +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) +{ + struct hlist_head *head = &ppe->foe_flow[hash / 2]; + struct mtk_flow_entry *entry; + struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; + bool found = false; + + if (hlist_empty(head)) + return; + + spin_lock_bh(&ppe_lock); + hlist_for_each_entry(entry, head, list) { + if (found || !mtk_flow_entry_match(entry, hwe)) { + if (entry->hash != 0xffff) + entry->hash = 0xffff; + continue; + } + + entry->hash = hash; + __mtk_foe_entry_commit(ppe, &entry->data, hash); + found = true; + } + spin_unlock_bh(&ppe_lock); +} + +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; + u16 timestamp; + + mtk_flow_entry_update(ppe, entry); + timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; + + if (timestamp > now) + return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; + else + return now - timestamp; } -struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version) { + struct device *dev = eth->dev; struct mtk_foe_entry *foe; struct mtk_ppe *ppe; @@ -398,6 +503,7 @@ struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, * not coherent. */ ppe->base = base; + ppe->eth = eth; ppe->dev = dev; ppe->version = version; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index 190d2560ac86..217b44bed8b6 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -235,7 +235,17 @@ enum { MTK_PPE_CPU_REASON_INVALID = 0x1f, }; +struct mtk_flow_entry { + struct rhash_head node; + struct hlist_node list; + unsigned long cookie; + struct mtk_foe_entry data; + u16 hash; + s8 wed_index; +}; + struct mtk_ppe { + struct mtk_eth *eth; struct device *dev; void __iomem *base; int version; @@ -243,18 +253,33 @@ struct mtk_ppe { struct mtk_foe_entry *foe_table; dma_addr_t foe_phys; + u16 foe_check_time[MTK_PPE_ENTRIES]; + struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; + void *acct_table; }; -struct mtk_ppe *mtk_ppe_init(struct device *dev, void __iomem *base, int version); +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version); int mtk_ppe_start(struct mtk_ppe *ppe); int mtk_ppe_stop(struct mtk_ppe *ppe); +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); + static inline void -mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash) +mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) { - ppe->foe_table[hash].ib1 = 0; - dma_wmb(); + u16 now, diff; + + if (!ppe) + return; + + now = (u16)jiffies; + diff = now - ppe->foe_check_time[hash]; + if (diff < HZ / 10) + return; + + ppe->foe_check_time[hash] = now; + __mtk_ppe_check_skb(ppe, skb, hash); } static inline int @@ -282,8 +307,9 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid); int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid); int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq, int bss, int wcid); -int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, - u16 timestamp); +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); int mtk_ppe_debugfs_init(struct mtk_ppe *ppe); #endif diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 76a94ec85232..378685fe92b6 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -42,13 +42,6 @@ struct mtk_flow_data { } pppoe; }; -struct mtk_flow_entry { - struct rhash_head node; - unsigned long cookie; - u16 hash; - s8 wed_index; -}; - static const struct rhashtable_params mtk_flow_ht_params = { .head_offset = offsetof(struct mtk_flow_entry, node), .key_offset = offsetof(struct mtk_flow_entry, cookie), @@ -56,12 +49,6 @@ static const struct rhashtable_params mtk_flow_ht_params = { .automatic_shrinking = true, }; -static u32 -mtk_eth_timestamp(struct mtk_eth *eth) -{ - return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP; -} - static int mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data, bool egress) @@ -237,10 +224,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) int offload_type = 0; int wed_index = -1; u16 addr_type = 0; - u32 timestamp; u8 l4proto = 0; int err = 0; - int hash; int i; if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) @@ -410,23 +395,21 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) return -ENOMEM; entry->cookie = f->cookie; - timestamp = mtk_eth_timestamp(eth); - hash = mtk_foe_entry_commit(eth->ppe, &foe, timestamp); - if (hash < 0) { - err = hash; + memcpy(&entry->data, &foe, sizeof(entry->data)); + entry->wed_index = wed_index; + + if (mtk_foe_entry_commit(eth->ppe, entry) < 0) goto free; - } - entry->hash = hash; - entry->wed_index = wed_index; err = rhashtable_insert_fast(ð->flow_table, &entry->node, mtk_flow_ht_params); if (err < 0) - goto clear_flow; + goto clear; return 0; -clear_flow: - mtk_foe_entry_clear(eth->ppe, hash); + +clear: + mtk_foe_entry_clear(eth->ppe, entry); free: kfree(entry); if (wed_index >= 0) @@ -444,7 +427,7 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) if (!entry) return -ENOENT; - mtk_foe_entry_clear(eth->ppe, entry->hash); + mtk_foe_entry_clear(eth->ppe, entry); rhashtable_remove_fast(ð->flow_table, &entry->node, mtk_flow_ht_params); if (entry->wed_index >= 0) @@ -458,7 +441,6 @@ static int mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) { struct mtk_flow_entry *entry; - int timestamp; u32 idle; entry = rhashtable_lookup(ð->flow_table, &f->cookie, @@ -466,11 +448,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) if (!entry) return -ENOENT; - timestamp = mtk_foe_entry_timestamp(eth->ppe, entry->hash); - if (timestamp < 0) - return -ETIMEDOUT; - - idle = mtk_eth_timestamp(eth) - timestamp; + idle = mtk_foe_entry_idle_time(eth->ppe, entry); f->stats.lastused = jiffies - idle * HZ; return 0; -- cgit v1.2.3 From 8ff25d377445dcc587584b7c0228bf0eafb26f9f Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 5 Apr 2022 21:57:54 +0200 Subject: net: ethernet: mtk_eth_soc: remove bridge flow offload type entry support According to MediaTek, this feature is not supported in current hardware Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_ppe.c | 8 -------- drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c | 1 - 2 files changed, 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index a7fe33bbde63..aa0d190db65d 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -84,13 +84,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e) u32 hash; switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) { - case MTK_PPE_PKT_TYPE_BRIDGE: - hv1 = e->bridge.src_mac_lo; - hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16); - hv2 = e->bridge.src_mac_hi >> 16; - hv2 ^= e->bridge.dest_mac_lo; - hv3 = e->bridge.dest_mac_hi; - break; case MTK_PPE_PKT_TYPE_IPV4_ROUTE: case MTK_PPE_PKT_TYPE_IPV4_HNAPT: hv1 = e->ipv4.orig.ports; @@ -572,7 +565,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe) MTK_PPE_FLOW_CFG_IP4_NAT | MTK_PPE_FLOW_CFG_IP4_NAPT | MTK_PPE_FLOW_CFG_IP4_DSLITE | - MTK_PPE_FLOW_CFG_L2_BRIDGE | MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c index d4b482340cb9..eb0b598f14e4 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c @@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type) static const char * const type_str[] = { [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", - [MTK_PPE_PKT_TYPE_BRIDGE] = "L2", [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", -- cgit v1.2.3 From 33fc42de33278b2b3ec6f3390512987bc29a62b7 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Tue, 5 Apr 2022 21:57:55 +0200 Subject: net: ethernet: mtk_eth_soc: support creating mac address based offload entries This will be used to implement a limited form of bridge offloading. Since the hardware does not support flow table entries with just source and destination MAC address, the driver has to emulate it. The hardware automatically creates entries entries for incoming flows, even when they are bridged instead of routed, and reports when packets for these flows have reached the minimum PPS rate for offloading. After this happens, we look up the L2 flow offload entry based on the MAC header and fill in the output routing information in the flow table. The dynamically created per-flow entries are automatically removed when either the hardware flowtable entry expires, is replaced, or if the offload rule they belong to is removed Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_ppe.c | 241 +++++++++++++++++++++--- drivers/net/ethernet/mediatek/mtk_ppe.h | 44 +++-- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 60 ++++-- 3 files changed, 299 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index aa0d190db65d..282a1f34a88a 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -6,12 +6,22 @@ #include #include #include +#include +#include +#include #include "mtk_eth_soc.h" #include "mtk_ppe.h" #include "mtk_ppe_regs.h" static DEFINE_SPINLOCK(ppe_lock); +static const struct rhashtable_params mtk_flow_l2_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, l2_node), + .key_offset = offsetof(struct mtk_flow_entry, data.bridge), + .key_len = offsetof(struct mtk_foe_bridge, key_end), + .automatic_shrinking = true, +}; + static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) { writel(val, ppe->base + reg); @@ -123,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry) { int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return &entry->bridge.l2; + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) return &entry->ipv6.l2; @@ -134,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry) { int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return &entry->bridge.ib2; + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) return &entry->ipv6.ib2; @@ -168,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto, if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) entry->ipv6.ports = ports_pad; - if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + if (type == MTK_PPE_PKT_TYPE_BRIDGE) { + ether_addr_copy(entry->bridge.src_mac, src_mac); + ether_addr_copy(entry->bridge.dest_mac, dest_mac); + entry->bridge.ib2 = val; + l2 = &entry->bridge.l2; + } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { entry->ipv6.ib2 = val; l2 = &entry->ipv6.l2; } else { @@ -371,6 +392,84 @@ mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data) return !memcmp(&entry->data.data, &data->data, len - 4); } +static void +__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + struct hlist_head *head; + struct hlist_node *tmp; + + if (entry->type == MTK_FLOW_TYPE_L2) { + rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, + mtk_flow_l2_ht_params); + + head = &entry->l2_flows; + hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) + __mtk_foe_entry_clear(ppe, entry); + return; + } + + hlist_del_init(&entry->list); + if (entry->hash != 0xffff) { + ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; + ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, + MTK_FOE_STATE_BIND); + dma_wmb(); + } + entry->hash = 0xffff; + + if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) + return; + + hlist_del_init(&entry->l2_data.list); + kfree(entry); +} + +static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) +{ + u16 timestamp; + u16 now; + + now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; + timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; + + if (timestamp > now) + return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; + else + return now - timestamp; +} + +static void +mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + struct mtk_flow_entry *cur; + struct mtk_foe_entry *hwe; + struct hlist_node *tmp; + int idle; + + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { + int cur_idle; + u32 ib1; + + hwe = &ppe->foe_table[cur->hash]; + ib1 = READ_ONCE(hwe->ib1); + + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { + cur->hash = 0xffff; + __mtk_foe_entry_clear(ppe, cur); + continue; + } + + cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); + if (cur_idle >= idle) + continue; + + idle = cur_idle; + entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; + entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; + } +} + static void mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) { @@ -378,6 +477,12 @@ mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) struct mtk_foe_entry foe; spin_lock_bh(&ppe_lock); + + if (entry->type == MTK_FLOW_TYPE_L2) { + mtk_flow_entry_update_l2(ppe, entry); + goto out; + } + if (entry->hash == 0xffff) goto out; @@ -419,21 +524,28 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) { spin_lock_bh(&ppe_lock); - hlist_del_init(&entry->list); - if (entry->hash != 0xffff) { - ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; - ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, - MTK_FOE_STATE_BIND); - dma_wmb(); - } - entry->hash = 0xffff; + __mtk_foe_entry_clear(ppe, entry); spin_unlock_bh(&ppe_lock); } +static int +mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + entry->type = MTK_FLOW_TYPE_L2; + + return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, + mtk_flow_l2_ht_params); +} + int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) { - u32 hash = mtk_ppe_hash_entry(&entry->data); + int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1); + u32 hash; + + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return mtk_foe_entry_commit_l2(ppe, entry); + hash = mtk_ppe_hash_entry(&entry->data); entry->hash = 0xffff; spin_lock_bh(&ppe_lock); hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]); @@ -442,18 +554,72 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) return 0; } +static void +mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, + u16 hash) +{ + struct mtk_flow_entry *flow_info; + struct mtk_foe_entry foe, *hwe; + struct mtk_foe_mac_info *l2; + u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP; + int type; + + flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end), + GFP_ATOMIC); + if (!flow_info) + return; + + flow_info->l2_data.base_flow = entry; + flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; + flow_info->hash = hash; + hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]); + hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); + + hwe = &ppe->foe_table[hash]; + memcpy(&foe, hwe, sizeof(foe)); + foe.ib1 &= ib1_mask; + foe.ib1 |= entry->data.ib1 & ~ib1_mask; + + l2 = mtk_foe_entry_l2(&foe); + memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); + + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1); + if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) + memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); + else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) + l2->etype = ETH_P_IPV6; + + *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2; + + __mtk_foe_entry_commit(ppe, &foe, hash); +} + void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) { struct hlist_head *head = &ppe->foe_flow[hash / 2]; - struct mtk_flow_entry *entry; struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; + struct mtk_flow_entry *entry; + struct mtk_foe_bridge key = {}; + struct ethhdr *eh; bool found = false; - - if (hlist_empty(head)) - return; + u8 *tag; spin_lock_bh(&ppe_lock); + + if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) + goto out; + hlist_for_each_entry(entry, head, list) { + if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { + if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == + MTK_FOE_STATE_BIND)) + continue; + + entry->hash = 0xffff; + __mtk_foe_entry_clear(ppe, entry); + continue; + } + if (found || !mtk_flow_entry_match(entry, hwe)) { if (entry->hash != 0xffff) entry->hash = 0xffff; @@ -464,21 +630,50 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) __mtk_foe_entry_commit(ppe, &entry->data, hash); found = true; } + + if (found) + goto out; + + eh = eth_hdr(skb); + ether_addr_copy(key.dest_mac, eh->h_dest); + ether_addr_copy(key.src_mac, eh->h_source); + tag = skb->data - 2; + key.vlan = 0; + switch (skb->protocol) { +#if IS_ENABLED(CONFIG_NET_DSA) + case htons(ETH_P_XDSA): + if (!netdev_uses_dsa(skb->dev) || + skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) + goto out; + + tag += 4; + if (get_unaligned_be16(tag) != ETH_P_8021Q) + break; + + fallthrough; +#endif + case htons(ETH_P_8021Q): + key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK; + break; + default: + break; + } + + entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params); + if (!entry) + goto out; + + mtk_foe_entry_commit_subflow(ppe, entry, hash); + +out: spin_unlock_bh(&ppe_lock); } int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) { - u16 now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP; - u16 timestamp; - mtk_flow_entry_update(ppe, entry); - timestamp = entry->data.ib1 & MTK_FOE_IB1_BIND_TIMESTAMP; - if (timestamp > now) - return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now; - else - return now - timestamp; + return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); } struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, @@ -492,6 +687,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, if (!ppe) return NULL; + rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params); + /* need to allocate a separate device, since it PPE DMA access is * not coherent. */ diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index 217b44bed8b6..1f5cf1c9a947 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -6,6 +6,7 @@ #include #include +#include #define MTK_ETH_PPE_BASE 0xc00 @@ -84,19 +85,16 @@ struct mtk_foe_mac_info { u16 src_mac_lo; }; +/* software-only entry type */ struct mtk_foe_bridge { - u32 dest_mac_hi; - - u16 src_mac_lo; - u16 dest_mac_lo; + u8 dest_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + u16 vlan; - u32 src_mac_hi; + struct {} key_end; u32 ib2; - u32 _rsv[5]; - - u32 udf_tsid; struct mtk_foe_mac_info l2; }; @@ -235,13 +233,33 @@ enum { MTK_PPE_CPU_REASON_INVALID = 0x1f, }; +enum { + MTK_FLOW_TYPE_L4, + MTK_FLOW_TYPE_L2, + MTK_FLOW_TYPE_L2_SUBFLOW, +}; + struct mtk_flow_entry { + union { + struct hlist_node list; + struct { + struct rhash_head l2_node; + struct hlist_head l2_flows; + }; + }; + u8 type; + s8 wed_index; + u16 hash; + union { + struct mtk_foe_entry data; + struct { + struct mtk_flow_entry *base_flow; + struct hlist_node list; + struct {} end; + } l2_data; + }; struct rhash_head node; - struct hlist_node list; unsigned long cookie; - struct mtk_foe_entry data; - u16 hash; - s8 wed_index; }; struct mtk_ppe { @@ -256,6 +274,8 @@ struct mtk_ppe { u16 foe_check_time[MTK_PPE_ENTRIES]; struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2]; + struct rhashtable l2_flows; + void *acct_table; }; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 378685fe92b6..1fe31058b0f2 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -31,6 +31,8 @@ struct mtk_flow_data { __be16 src_port; __be16 dst_port; + u16 vlan_in; + struct { u16 id; __be16 proto; @@ -257,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) return -EOPNOTSUPP; } + switch (addr_type) { + case 0: + offload_type = MTK_PPE_PKT_TYPE_BRIDGE; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); + memcpy(data.eth.h_source, match.key->src, ETH_ALEN); + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + + if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) + return -EOPNOTSUPP; + + data.vlan_in = match.key->vlan_id; + } + break; + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; + break; + default: + return -EOPNOTSUPP; + } + flow_action_for_each(i, act, &rule->action) { switch (act->id) { case FLOW_ACTION_MANGLE: + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) mtk_flow_offload_mangle_eth(act, &data.eth); break; @@ -291,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) } } - switch (addr_type) { - case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; - break; - case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; - break; - default: - return -EOPNOTSUPP; - } - if (!is_valid_ether_addr(data.eth.h_source) || !is_valid_ether_addr(data.eth.h_dest)) return -EINVAL; @@ -315,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports ports; + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + flow_rule_match_ports(rule, &ports); data.src_port = ports.key->src; data.dst_port = ports.key->dst; - } else { + } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { return -EOPNOTSUPP; } @@ -348,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) if (act->id != FLOW_ACTION_MANGLE) continue; + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + switch (act->mangle.htype) { case FLOW_ACT_MANGLE_HDR_TYPE_TCP: case FLOW_ACT_MANGLE_HDR_TYPE_UDP: @@ -373,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) return err; } + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + foe.bridge.vlan = data.vlan_in; + if (data.vlan.num == 1) { if (data.vlan.proto != htons(ETH_P_8021Q)) return -EOPNOTSUPP; -- cgit v1.2.3 From be8d9d05271c961dfe366793a3886f4bd6f39dca Mon Sep 17 00:00:00 2001 From: Wang Qing Date: Wed, 6 Apr 2022 02:17:26 -0700 Subject: net: ethernet: xilinx: use of_property_read_bool() instead of of_get_property "little-endian" has no specific content, use more helper function of_property_read_bool() instead of of_get_property() Signed-off-by: Wang Qing Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/ll_temac_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 869e362e09c1..3f6b9dfca095 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1515,7 +1515,7 @@ static int temac_probe(struct platform_device *pdev) of_node_put(dma_np); return PTR_ERR(lp->sdma_regs); } - if (of_get_property(dma_np, "little-endian", NULL)) { + if (of_property_read_bool(dma_np, "little-endian")) { lp->dma_in = temac_dma_in32_le; lp->dma_out = temac_dma_out32_le; } else { -- cgit v1.2.3 From 207d924dcf324fa4334938a38463aa0f1fcdc756 Mon Sep 17 00:00:00 2001 From: Wang Qing Date: Wed, 6 Apr 2022 02:17:03 -0700 Subject: net: usb: remove duplicate assignment netdev_alloc_skb() has assigned ssi->netdev to skb->dev if successed, no need to repeat assignment. Signed-off-by: Wang Qing Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 3353e761016d..17cf0d10fef9 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -190,7 +190,6 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skbn = netdev_alloc_skb(net, pkt_len + LL_MAX_HEADER); if (!skbn) return 0; - skbn->dev = net; switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) { case 0x40: -- cgit v1.2.3 From a5b116a0fa90d6d0e7af4f39199a6ae1f0afc9c7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 5 Apr 2022 21:15:48 -0700 Subject: net: wan: remove the lanmedia (lmc) driver The driver for LAN Media WAN interfaces spews build warnings on microblaze. The virt_to_bus() calls discard the volatile keyword. The right thing to do would be to migrate this driver to a modern DMA API but it seems unlikely anyone is actually using it. There had been no fixes or functional changes here since the git era begun. Let's remove this driver, there isn't much changing in the APIs, if users come forward we can apologize and revert. Link: https://lore.kernel.org/all/20220321144013.440d7fc0@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com/ Signed-off-by: Jakub Kicinski Acked-by: Arnd Bergmann Acked-by: Thomas Bogendoerfer Signed-off-by: David S. Miller --- arch/mips/configs/gpr_defconfig | 1 - arch/mips/configs/mtx1_defconfig | 1 - drivers/net/wan/Kconfig | 28 - drivers/net/wan/Makefile | 2 - drivers/net/wan/lmc/Makefile | 18 - drivers/net/wan/lmc/lmc.h | 33 - drivers/net/wan/lmc/lmc_debug.c | 65 -- drivers/net/wan/lmc/lmc_debug.h | 52 - drivers/net/wan/lmc/lmc_ioctl.h | 255 ----- drivers/net/wan/lmc/lmc_main.c | 2009 -------------------------------------- drivers/net/wan/lmc/lmc_media.c | 1206 ----------------------- drivers/net/wan/lmc/lmc_proto.c | 106 -- drivers/net/wan/lmc/lmc_proto.h | 18 - drivers/net/wan/lmc/lmc_var.h | 468 --------- 14 files changed, 4262 deletions(-) delete mode 100644 drivers/net/wan/lmc/Makefile delete mode 100644 drivers/net/wan/lmc/lmc.h delete mode 100644 drivers/net/wan/lmc/lmc_debug.c delete mode 100644 drivers/net/wan/lmc/lmc_debug.h delete mode 100644 drivers/net/wan/lmc/lmc_ioctl.h delete mode 100644 drivers/net/wan/lmc/lmc_main.c delete mode 100644 drivers/net/wan/lmc/lmc_media.c delete mode 100644 drivers/net/wan/lmc/lmc_proto.c delete mode 100644 drivers/net/wan/lmc/lmc_proto.h delete mode 100644 drivers/net/wan/lmc/lmc_var.h (limited to 'drivers') diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 5cb91509bb7c..f86838bcae4c 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -214,7 +214,6 @@ CONFIG_ATH_DEBUG=y CONFIG_ATH5K=y CONFIG_ATH5K_DEBUG=y CONFIG_WAN=y -CONFIG_LANMEDIA=m CONFIG_HDLC=m CONFIG_HDLC_RAW=m CONFIG_HDLC_RAW_ETH=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 205d3b34528c..eb0b313a2109 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -363,7 +363,6 @@ CONFIG_USB_AN2720=y CONFIG_USB_EPSON2888=y CONFIG_USB_SIERRA_NET=m CONFIG_WAN=y -CONFIG_LANMEDIA=m CONFIG_HDLC=m CONFIG_HDLC_RAW=m CONFIG_HDLC_RAW_ETH=m diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 140780ac1745..588b2333cdb8 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -57,34 +57,6 @@ config COSA The driver will be compiled as a module: the module will be called cosa. -# -# Lan Media's board. Currently 1000, 1200, 5200, 5245 -# -config LANMEDIA - tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" - depends on PCI && VIRT_TO_BUS && HDLC - help - Driver for the following Lan Media family of serial boards: - - - LMC 1000 board allows you to connect synchronous serial devices - (for example base-band modems, or any other device with the X.21, - V.24, V.35 or V.36 interface) to your Linux box. - - - LMC 1200 with on board DSU board allows you to connect your Linux - box directly to a T1 or E1 circuit. - - - LMC 5200 board provides a HSSI interface capable of running up to - 52 Mbits per second. - - - LMC 5245 board connects directly to a T3 circuit saving the - additional external hardware. - - To change setting such as clock source you will need lmcctl. - It is available at (broken link). - - To compile this driver as a module, choose M here: the - module will be called lmc. - # There is no way to detect a Sealevel board. Force it modular config SEALEVEL_4021 tristate "Sealevel Systems 4021 support" diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 480bcd1f6c1c..1cd42147b34f 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -19,8 +19,6 @@ obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o obj-$(CONFIG_COSA) += cosa.o obj-$(CONFIG_FARSYNC) += farsync.o -obj-$(CONFIG_LANMEDIA) += lmc/ - obj-$(CONFIG_LAPBETHER) += lapbether.o obj-$(CONFIG_N2) += n2.o obj-$(CONFIG_C101) += c101.o diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile deleted file mode 100644 index f00fe4491d69..000000000000 --- a/drivers/net/wan/lmc/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the Lan Media 21140 based WAN cards -# Specifically the 1000,1200,5200,5245 -# - -obj-$(CONFIG_LANMEDIA) += lmc.o - -lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o - -# Like above except every packet gets echoed to KERN_DEBUG -# in hex -# -# DBDEF = \ -# -DDEBUG \ -# -DLMC_PACKET_LOG - -ccflags-y := $(DBGDEF) diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h deleted file mode 100644 index d7d59b4595f9..000000000000 --- a/drivers/net/wan/lmc/lmc.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LMC_H_ -#define _LMC_H_ - -#include "lmc_var.h" - -/* - * prototypes for everyone - */ -int lmc_probe(struct net_device * dev); -unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned - devaddr, unsigned regno); -void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, - unsigned regno, unsigned data); -void lmc_led_on(lmc_softc_t * const, u32); -void lmc_led_off(lmc_softc_t * const, u32); -unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); -void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); -void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits); -void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits); - -int lmc_ioctl(struct net_device *dev, struct if_settings *ifs); - -extern lmc_media_t lmc_ds3_media; -extern lmc_media_t lmc_ssi_media; -extern lmc_media_t lmc_t1_media; -extern lmc_media_t lmc_hssi_media; - -#ifdef _DBG_EVENTLOG -static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); -#endif - -#endif diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c deleted file mode 100644 index 2b6051bda3fb..000000000000 --- a/drivers/net/wan/lmc/lmc_debug.c +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include - -#include "lmc_debug.h" - -/* - * Prints out len, max to 80 octets using printk, 20 per line - */ -#ifdef DEBUG -#ifdef LMC_PACKET_LOG -void lmcConsoleLog(char *type, unsigned char *ucData, int iLen) -{ - int iNewLine = 1; - char str[80], *pstr; - - sprintf(str, KERN_DEBUG "lmc: %s: ", type); - pstr = str+strlen(str); - - if(iLen > 240){ - printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen); - iLen = 240; - } - else{ - printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen); - } - - while(iLen > 0) - { - sprintf(pstr, "%02x ", *ucData); - pstr+=3; - ucData++; - if( !(iNewLine % 20)) - { - sprintf(pstr, "\n"); - printk(str); - sprintf(str, KERN_DEBUG "lmc: %s: ", type); - pstr=str+strlen(str); - } - iNewLine++; - iLen--; - } - sprintf(pstr, "\n"); - printk(str); -} -#endif -#endif - -#ifdef DEBUG -u32 lmcEventLogIndex; -u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; - -void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3) -{ - lmcEventLogBuf[lmcEventLogIndex++] = EventNum; - lmcEventLogBuf[lmcEventLogIndex++] = arg2; - lmcEventLogBuf[lmcEventLogIndex++] = arg3; - lmcEventLogBuf[lmcEventLogIndex++] = jiffies; - - lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1; -} -#endif /* DEBUG */ - -/* --------------------------- end if_lmc_linux.c ------------------------ */ diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h deleted file mode 100644 index cfae9eddf003..000000000000 --- a/drivers/net/wan/lmc/lmc_debug.h +++ /dev/null @@ -1,52 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LMC_DEBUG_H_ -#define _LMC_DEBUG_H_ - -#ifdef DEBUG -#ifdef LMC_PACKET_LOG -#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z)) -#else -#define LMC_CONSOLE_LOG(x,y,z) -#endif -#else -#define LMC_CONSOLE_LOG(x,y,z) -#endif - - - -/* Debug --- Event log definitions --- */ -/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */ -#define LMC_EVENTLOGSIZE 1024 /* number of events in eventlog */ -#define LMC_EVENTLOGARGS 4 /* number of args for each event */ - -/* event indicators */ -#define LMC_EVENT_XMT 1 -#define LMC_EVENT_XMTEND 2 -#define LMC_EVENT_XMTINT 3 -#define LMC_EVENT_RCVINT 4 -#define LMC_EVENT_RCVEND 5 -#define LMC_EVENT_INT 6 -#define LMC_EVENT_XMTINTTMO 7 -#define LMC_EVENT_XMTPRCTMO 8 -#define LMC_EVENT_INTEND 9 -#define LMC_EVENT_RESET1 10 -#define LMC_EVENT_RESET2 11 -#define LMC_EVENT_FORCEDRESET 12 -#define LMC_EVENT_WATCHDOG 13 -#define LMC_EVENT_BADPKTSURGE 14 -#define LMC_EVENT_TBUSY0 15 -#define LMC_EVENT_TBUSY1 16 - - -#ifdef DEBUG -extern u32 lmcEventLogIndex; -extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; -#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) -#else -#define LMC_EVENT_LOG(x,y,z) -#endif /* end ifdef _DBG_EVENTLOG */ - -void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); -void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); - -#endif diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h deleted file mode 100644 index 8c65e2176e94..000000000000 --- a/drivers/net/wan/lmc/lmc_ioctl.h +++ /dev/null @@ -1,255 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _LMC_IOCTL_H_ -#define _LMC_IOCTL_H_ -/* $Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $ */ - - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - */ - -#define LMCIOCGINFO SIOCDEVPRIVATE+3 /* get current state */ -#define LMCIOCSINFO SIOCDEVPRIVATE+4 /* set state to user values */ -#define LMCIOCGETLMCSTATS SIOCDEVPRIVATE+5 -#define LMCIOCCLEARLMCSTATS SIOCDEVPRIVATE+6 -#define LMCIOCDUMPEVENTLOG SIOCDEVPRIVATE+7 -#define LMCIOCGETXINFO SIOCDEVPRIVATE+8 -#define LMCIOCSETCIRCUIT SIOCDEVPRIVATE+9 -#define LMCIOCUNUSEDATM SIOCDEVPRIVATE+10 -#define LMCIOCRESET SIOCDEVPRIVATE+11 -#define LMCIOCT1CONTROL SIOCDEVPRIVATE+12 -#define LMCIOCIFTYPE SIOCDEVPRIVATE+13 -#define LMCIOCXILINX SIOCDEVPRIVATE+14 - -#define LMC_CARDTYPE_UNKNOWN -1 -#define LMC_CARDTYPE_HSSI 1 /* probed card is a HSSI card */ -#define LMC_CARDTYPE_DS3 2 /* probed card is a DS3 card */ -#define LMC_CARDTYPE_SSI 3 /* probed card is a SSI card */ -#define LMC_CARDTYPE_T1 4 /* probed card is a T1 card */ - -#define LMC_CTL_CARDTYPE_LMC5200 0 /* HSSI */ -#define LMC_CTL_CARDTYPE_LMC5245 1 /* DS3 */ -#define LMC_CTL_CARDTYPE_LMC1000 2 /* SSI, V.35 */ -#define LMC_CTL_CARDTYPE_LMC1200 3 /* DS1 */ - -#define LMC_CTL_OFF 0 /* generic OFF value */ -#define LMC_CTL_ON 1 /* generic ON value */ - -#define LMC_CTL_CLOCK_SOURCE_EXT 0 /* clock off line */ -#define LMC_CTL_CLOCK_SOURCE_INT 1 /* internal clock */ - -#define LMC_CTL_CRC_LENGTH_16 16 -#define LMC_CTL_CRC_LENGTH_32 32 -#define LMC_CTL_CRC_BYTESIZE_2 2 -#define LMC_CTL_CRC_BYTESIZE_4 4 - - -#define LMC_CTL_CABLE_LENGTH_LT_100FT 0 /* DS3 cable < 100 feet */ -#define LMC_CTL_CABLE_LENGTH_GT_100FT 1 /* DS3 cable >= 100 feet */ - -#define LMC_CTL_CIRCUIT_TYPE_E1 0 -#define LMC_CTL_CIRCUIT_TYPE_T1 1 - -/* - * IFTYPE defines - */ -#define LMC_PPP 1 /* use generic HDLC interface */ -#define LMC_NET 2 /* use direct net interface */ -#define LMC_RAW 3 /* use direct net interface */ - -/* - * These are not in the least IOCTL related, but I want them common. - */ -/* - * assignments for the GPIO register on the DEC chip (common) - */ -#define LMC_GEP_INIT 0x01 /* 0: */ -#define LMC_GEP_RESET 0x02 /* 1: */ -#define LMC_GEP_MODE 0x10 /* 4: */ -#define LMC_GEP_DP 0x20 /* 5: */ -#define LMC_GEP_DATA 0x40 /* 6: serial out */ -#define LMC_GEP_CLK 0x80 /* 7: serial clock */ - -/* - * HSSI GPIO assignments - */ -#define LMC_GEP_HSSI_ST 0x04 /* 2: receive timing sense (deprecated) */ -#define LMC_GEP_HSSI_CLOCK 0x08 /* 3: clock source */ - -/* - * T1 GPIO assignments - */ -#define LMC_GEP_SSI_GENERATOR 0x04 /* 2: enable prog freq gen serial i/f */ -#define LMC_GEP_SSI_TXCLOCK 0x08 /* 3: provide clock on TXCLOCK output */ - -/* - * Common MII16 bits - */ -#define LMC_MII16_LED0 0x0080 -#define LMC_MII16_LED1 0x0100 -#define LMC_MII16_LED2 0x0200 -#define LMC_MII16_LED3 0x0400 /* Error, and the red one */ -#define LMC_MII16_LED_ALL 0x0780 /* LED bit mask */ -#define LMC_MII16_FIFO_RESET 0x0800 - -/* - * definitions for HSSI - */ -#define LMC_MII16_HSSI_TA 0x0001 -#define LMC_MII16_HSSI_CA 0x0002 -#define LMC_MII16_HSSI_LA 0x0004 -#define LMC_MII16_HSSI_LB 0x0008 -#define LMC_MII16_HSSI_LC 0x0010 -#define LMC_MII16_HSSI_TM 0x0020 -#define LMC_MII16_HSSI_CRC 0x0040 - -/* - * assignments for the MII register 16 (DS3) - */ -#define LMC_MII16_DS3_ZERO 0x0001 -#define LMC_MII16_DS3_TRLBK 0x0002 -#define LMC_MII16_DS3_LNLBK 0x0004 -#define LMC_MII16_DS3_RAIS 0x0008 -#define LMC_MII16_DS3_TAIS 0x0010 -#define LMC_MII16_DS3_BIST 0x0020 -#define LMC_MII16_DS3_DLOS 0x0040 -#define LMC_MII16_DS3_CRC 0x1000 -#define LMC_MII16_DS3_SCRAM 0x2000 -#define LMC_MII16_DS3_SCRAM_LARS 0x4000 - -/* Note: 2 pairs of LEDs where swapped by mistake - * in Xilinx code for DS3 & DS1 adapters */ -#define LMC_DS3_LED0 0x0100 /* bit 08 yellow */ -#define LMC_DS3_LED1 0x0080 /* bit 07 blue */ -#define LMC_DS3_LED2 0x0400 /* bit 10 green */ -#define LMC_DS3_LED3 0x0200 /* bit 09 red */ - -/* - * framer register 0 and 7 (7 is latched and reset on read) - */ -#define LMC_FRAMER_REG0_DLOS 0x80 /* digital loss of service */ -#define LMC_FRAMER_REG0_OOFS 0x40 /* out of frame sync */ -#define LMC_FRAMER_REG0_AIS 0x20 /* alarm indication signal */ -#define LMC_FRAMER_REG0_CIS 0x10 /* channel idle */ -#define LMC_FRAMER_REG0_LOC 0x08 /* loss of clock */ - -/* - * Framer register 9 contains the blue alarm signal - */ -#define LMC_FRAMER_REG9_RBLUE 0x02 /* Blue alarm failure */ - -/* - * Framer register 0x10 contains xbit error - */ -#define LMC_FRAMER_REG10_XBIT 0x01 /* X bit error alarm failure */ - -/* - * And SSI, LMC1000 - */ -#define LMC_MII16_SSI_DTR 0x0001 /* DTR output RW */ -#define LMC_MII16_SSI_DSR 0x0002 /* DSR input RO */ -#define LMC_MII16_SSI_RTS 0x0004 /* RTS output RW */ -#define LMC_MII16_SSI_CTS 0x0008 /* CTS input RO */ -#define LMC_MII16_SSI_DCD 0x0010 /* DCD input RO */ -#define LMC_MII16_SSI_RI 0x0020 /* RI input RO */ -#define LMC_MII16_SSI_CRC 0x1000 /* CRC select - RW */ - -/* - * bits 0x0080 through 0x0800 are generic, and described - * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET - */ -#define LMC_MII16_SSI_LL 0x1000 /* LL output RW */ -#define LMC_MII16_SSI_RL 0x2000 /* RL output RW */ -#define LMC_MII16_SSI_TM 0x4000 /* TM input RO */ -#define LMC_MII16_SSI_LOOP 0x8000 /* loopback enable RW */ - -/* - * Some of the MII16 bits are mirrored in the MII17 register as well, - * but let's keep thing separate for now, and get only the cable from - * the MII17. - */ -#define LMC_MII17_SSI_CABLE_MASK 0x0038 /* mask to extract the cable type */ -#define LMC_MII17_SSI_CABLE_SHIFT 3 /* shift to extract the cable type */ - -/* - * And T1, LMC1200 - */ -#define LMC_MII16_T1_UNUSED1 0x0003 -#define LMC_MII16_T1_XOE 0x0004 -#define LMC_MII16_T1_RST 0x0008 /* T1 chip reset - RW */ -#define LMC_MII16_T1_Z 0x0010 /* output impedance T1=1, E1=0 output - RW */ -#define LMC_MII16_T1_INTR 0x0020 /* interrupt from 8370 - RO */ -#define LMC_MII16_T1_ONESEC 0x0040 /* one second square wave - ro */ - -#define LMC_MII16_T1_LED0 0x0100 -#define LMC_MII16_T1_LED1 0x0080 -#define LMC_MII16_T1_LED2 0x0400 -#define LMC_MII16_T1_LED3 0x0200 -#define LMC_MII16_T1_FIFO_RESET 0x0800 - -#define LMC_MII16_T1_CRC 0x1000 /* CRC select - RW */ -#define LMC_MII16_T1_UNUSED2 0xe000 - - -/* 8370 framer registers */ - -#define T1FRAMER_ALARM1_STATUS 0x47 -#define T1FRAMER_ALARM2_STATUS 0x48 -#define T1FRAMER_FERR_LSB 0x50 -#define T1FRAMER_FERR_MSB 0x51 /* framing bit error counter */ -#define T1FRAMER_LCV_LSB 0x54 -#define T1FRAMER_LCV_MSB 0x55 /* line code violation counter */ -#define T1FRAMER_AERR 0x5A - -/* mask for the above AERR register */ -#define T1FRAMER_LOF_MASK (0x0f0) /* receive loss of frame */ -#define T1FRAMER_COFA_MASK (0x0c0) /* change of frame alignment */ -#define T1FRAMER_SEF_MASK (0x03) /* severely errored frame */ - -/* 8370 framer register ALM1 (0x47) values - * used to determine link status - */ - -#define T1F_SIGFRZ 0x01 /* signaling freeze */ -#define T1F_RLOF 0x02 /* receive loss of frame alignment */ -#define T1F_RLOS 0x04 /* receive loss of signal */ -#define T1F_RALOS 0x08 /* receive analog loss of signal or RCKI loss of clock */ -#define T1F_RAIS 0x10 /* receive alarm indication signal */ -#define T1F_UNUSED 0x20 -#define T1F_RYEL 0x40 /* receive yellow alarm */ -#define T1F_RMYEL 0x80 /* receive multiframe yellow alarm */ - -#define LMC_T1F_WRITE 0 -#define LMC_T1F_READ 1 - -typedef struct lmc_st1f_control { - int command; - int address; - int value; - char __user *data; -} lmc_t1f_control; - -enum lmc_xilinx_c { - lmc_xilinx_reset = 1, - lmc_xilinx_load_prom = 2, - lmc_xilinx_load = 3 -}; - -struct lmc_xilinx_control { - enum lmc_xilinx_c command; - int len; - char __user *data; -}; - -/* ------------------ end T1 defs ------------------- */ - -#define LMC_MII_LedMask 0x0780 -#define LMC_MII_LedBitPos 7 - -#endif diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c deleted file mode 100644 index 76c6b4f89890..000000000000 --- a/drivers/net/wan/lmc/lmc_main.c +++ /dev/null @@ -1,2009 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - * - * With Help By: - * David Boggs - * Ron Crane - * Alan Cox - * - * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards. - * - * To control link specific options lmcctl is required. - * It can be obtained from ftp.lanmedia.com. - * - * Linux driver notes: - * Linux uses the device struct lmc_private to pass private information - * around. - * - * The initialization portion of this driver (the lmc_reset() and the - * lmc_dec_reset() functions, as well as the led controls and the - * lmc_initcsrs() functions. - * - * The watchdog function runs every second and checks to see if - * we still have link, and that the timing source is what we expected - * it to be. If link is lost, the interface is marked down, and - * we no longer can transmit. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* Processor type for cache alignment. */ -#include -#include -#include -#include -//#include - -#define DRIVER_MAJOR_VERSION 1 -#define DRIVER_MINOR_VERSION 34 -#define DRIVER_SUB_VERSION 0 - -#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION) - -#include "lmc.h" -#include "lmc_var.h" -#include "lmc_ioctl.h" -#include "lmc_debug.h" -#include "lmc_proto.h" - -static int LMC_PKT_BUF_SZ = 1542; - -static const struct pci_device_id lmc_pci_tbl[] = { - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, - PCI_VENDOR_ID_LMC, PCI_ANY_ID }, - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, - PCI_ANY_ID, PCI_VENDOR_ID_LMC }, - { 0 } -}; - -MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); -MODULE_LICENSE("GPL v2"); - - -static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static int lmc_rx (struct net_device *dev); -static int lmc_open(struct net_device *dev); -static int lmc_close(struct net_device *dev); -static struct net_device_stats *lmc_get_stats(struct net_device *dev); -static irqreturn_t lmc_interrupt(int irq, void *dev_instance); -static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size); -static void lmc_softreset(lmc_softc_t * const); -static void lmc_running_reset(struct net_device *dev); -static int lmc_ifdown(struct net_device * const); -static void lmc_watchdog(struct timer_list *t); -static void lmc_reset(lmc_softc_t * const sc); -static void lmc_dec_reset(lmc_softc_t * const sc); -static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue); - -/* - * linux reserves 16 device specific IOCTLs. We call them - * LMCIOC* to control various bits of our world. - */ -static int lmc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd) /*fold00*/ -{ - lmc_softc_t *sc = dev_to_sc(dev); - lmc_ctl_t ctl; - int ret = -EOPNOTSUPP; - u16 regVal; - unsigned long flags; - - /* - * Most functions mess with the structure - * Disable interrupts while we do the polling - */ - - switch (cmd) { - /* - * Return current driver state. Since we keep this up - * To date internally, just copy this out to the user. - */ - case LMCIOCGINFO: /*fold01*/ - if (copy_to_user(data, &sc->ictl, sizeof(lmc_ctl_t))) - ret = -EFAULT; - else - ret = 0; - break; - - case LMCIOCSINFO: /*fold01*/ - if (!capable(CAP_NET_ADMIN)) { - ret = -EPERM; - break; - } - - if(dev->flags & IFF_UP){ - ret = -EBUSY; - break; - } - - if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) { - ret = -EFAULT; - break; - } - - spin_lock_irqsave(&sc->lmc_lock, flags); - sc->lmc_media->set_status (sc, &ctl); - - if(ctl.crc_length != sc->ictl.crc_length) { - sc->lmc_media->set_crc_length(sc, ctl.crc_length); - if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) - sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE; - else - sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; - } - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - ret = 0; - break; - - case LMCIOCIFTYPE: /*fold01*/ - { - u16 old_type = sc->if_type; - u16 new_type; - - if (!capable(CAP_NET_ADMIN)) { - ret = -EPERM; - break; - } - - if (copy_from_user(&new_type, data, sizeof(u16))) { - ret = -EFAULT; - break; - } - - - if (new_type == old_type) - { - ret = 0 ; - break; /* no change */ - } - - spin_lock_irqsave(&sc->lmc_lock, flags); - lmc_proto_close(sc); - - sc->if_type = new_type; - lmc_proto_attach(sc); - ret = lmc_proto_open(sc); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - break; - } - - case LMCIOCGETXINFO: /*fold01*/ - spin_lock_irqsave(&sc->lmc_lock, flags); - sc->lmc_xinfo.Magic0 = 0xBEEFCAFE; - - sc->lmc_xinfo.PciCardType = sc->lmc_cardtype; - sc->lmc_xinfo.PciSlotNumber = 0; - sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION; - sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION; - sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION; - sc->lmc_xinfo.XilinxRevisionNumber = - lmc_mii_readreg (sc, 0, 3) & 0xf; - sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ; - sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc); - sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - sc->lmc_xinfo.Magic1 = 0xDEADBEEF; - - if (copy_to_user(data, &sc->lmc_xinfo, sizeof(struct lmc_xinfo))) - ret = -EFAULT; - else - ret = 0; - - break; - - case LMCIOCGETLMCSTATS: - spin_lock_irqsave(&sc->lmc_lock, flags); - if (sc->lmc_cardtype == LMC_CARDTYPE_T1) { - lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB); - sc->extra_stats.framingBitErrorCount += - lmc_mii_readreg(sc, 0, 18) & 0xff; - lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB); - sc->extra_stats.framingBitErrorCount += - (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; - lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB); - sc->extra_stats.lineCodeViolationCount += - lmc_mii_readreg(sc, 0, 18) & 0xff; - lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB); - sc->extra_stats.lineCodeViolationCount += - (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; - lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR); - regVal = lmc_mii_readreg(sc, 0, 18) & 0xff; - - sc->extra_stats.lossOfFrameCount += - (regVal & T1FRAMER_LOF_MASK) >> 4; - sc->extra_stats.changeOfFrameAlignmentCount += - (regVal & T1FRAMER_COFA_MASK) >> 2; - sc->extra_stats.severelyErroredFrameCount += - regVal & T1FRAMER_SEF_MASK; - } - spin_unlock_irqrestore(&sc->lmc_lock, flags); - if (copy_to_user(data, &sc->lmc_device->stats, - sizeof(sc->lmc_device->stats)) || - copy_to_user(data + sizeof(sc->lmc_device->stats), - &sc->extra_stats, sizeof(sc->extra_stats))) - ret = -EFAULT; - else - ret = 0; - break; - - case LMCIOCCLEARLMCSTATS: - if (!capable(CAP_NET_ADMIN)) { - ret = -EPERM; - break; - } - - spin_lock_irqsave(&sc->lmc_lock, flags); - memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats)); - memset(&sc->extra_stats, 0, sizeof(sc->extra_stats)); - sc->extra_stats.check = STATCHECK; - sc->extra_stats.version_size = (DRIVER_VERSION << 16) + - sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); - sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; - spin_unlock_irqrestore(&sc->lmc_lock, flags); - ret = 0; - break; - - case LMCIOCSETCIRCUIT: /*fold01*/ - if (!capable(CAP_NET_ADMIN)){ - ret = -EPERM; - break; - } - - if(dev->flags & IFF_UP){ - ret = -EBUSY; - break; - } - - if (copy_from_user(&ctl, data, sizeof(lmc_ctl_t))) { - ret = -EFAULT; - break; - } - spin_lock_irqsave(&sc->lmc_lock, flags); - sc->lmc_media->set_circuit_type(sc, ctl.circuit_type); - sc->ictl.circuit_type = ctl.circuit_type; - spin_unlock_irqrestore(&sc->lmc_lock, flags); - ret = 0; - - break; - - case LMCIOCRESET: /*fold01*/ - if (!capable(CAP_NET_ADMIN)){ - ret = -EPERM; - break; - } - - spin_lock_irqsave(&sc->lmc_lock, flags); - /* Reset driver and bring back to current state */ - printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); - lmc_running_reset (dev); - printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); - - LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - ret = 0; - break; - -#ifdef DEBUG - case LMCIOCDUMPEVENTLOG: - if (copy_to_user(data, &lmcEventLogIndex, sizeof(u32))) { - ret = -EFAULT; - break; - } - if (copy_to_user(data + sizeof(u32), lmcEventLogBuf, - sizeof(lmcEventLogBuf))) - ret = -EFAULT; - else - ret = 0; - - break; -#endif /* end ifdef _DBG_EVENTLOG */ - case LMCIOCT1CONTROL: /*fold01*/ - if (sc->lmc_cardtype != LMC_CARDTYPE_T1){ - ret = -EOPNOTSUPP; - break; - } - break; - case LMCIOCXILINX: /*fold01*/ - { - struct lmc_xilinx_control xc; /*fold02*/ - - if (!capable(CAP_NET_ADMIN)){ - ret = -EPERM; - break; - } - - /* - * Stop the xwitter whlie we restart the hardware - */ - netif_stop_queue(dev); - - if (copy_from_user(&xc, data, sizeof(struct lmc_xilinx_control))) { - ret = -EFAULT; - break; - } - switch(xc.command){ - case lmc_xilinx_reset: /*fold02*/ - { - spin_lock_irqsave(&sc->lmc_lock, flags); - lmc_mii_readreg (sc, 0, 16); - - /* - * Make all of them 0 and make input - */ - lmc_gpio_mkinput(sc, 0xff); - - /* - * make the reset output - */ - lmc_gpio_mkoutput(sc, LMC_GEP_RESET); - - /* - * RESET low to force configuration. This also forces - * the transmitter clock to be internal, but we expect to reset - * that later anyway. - */ - - sc->lmc_gpio &= ~LMC_GEP_RESET; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - - /* - * hold for more than 10 microseconds - */ - udelay(50); - - sc->lmc_gpio |= LMC_GEP_RESET; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - - /* - * stop driving Xilinx-related signals - */ - lmc_gpio_mkinput(sc, 0xff); - - /* Reset the frammer hardware */ - sc->lmc_media->set_link_status (sc, 1); - sc->lmc_media->set_status (sc, NULL); -// lmc_softreset(sc); - - { - int i; - for(i = 0; i < 5; i++){ - lmc_led_on(sc, LMC_DS3_LED0); - mdelay(100); - lmc_led_off(sc, LMC_DS3_LED0); - lmc_led_on(sc, LMC_DS3_LED1); - mdelay(100); - lmc_led_off(sc, LMC_DS3_LED1); - lmc_led_on(sc, LMC_DS3_LED3); - mdelay(100); - lmc_led_off(sc, LMC_DS3_LED3); - lmc_led_on(sc, LMC_DS3_LED2); - mdelay(100); - lmc_led_off(sc, LMC_DS3_LED2); - } - } - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - - - ret = 0x0; - - } - - break; - case lmc_xilinx_load_prom: /*fold02*/ - { - int timeout = 500000; - spin_lock_irqsave(&sc->lmc_lock, flags); - lmc_mii_readreg (sc, 0, 16); - - /* - * Make all of them 0 and make input - */ - lmc_gpio_mkinput(sc, 0xff); - - /* - * make the reset output - */ - lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET); - - /* - * RESET low to force configuration. This also forces - * the transmitter clock to be internal, but we expect to reset - * that later anyway. - */ - - sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP); - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - - /* - * hold for more than 10 microseconds - */ - udelay(50); - - sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - /* - * busy wait for the chip to reset - */ - while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && - (timeout-- > 0)) - cpu_relax(); - - - /* - * stop driving Xilinx-related signals - */ - lmc_gpio_mkinput(sc, 0xff); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - ret = 0x0; - - - break; - - } - - case lmc_xilinx_load: /*fold02*/ - { - char *data; - int pos; - int timeout = 500000; - - if (!xc.data) { - ret = -EINVAL; - break; - } - - data = memdup_user(xc.data, xc.len); - if (IS_ERR(data)) { - ret = PTR_ERR(data); - break; - } - - printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data); - - spin_lock_irqsave(&sc->lmc_lock, flags); - lmc_gpio_mkinput(sc, 0xff); - - /* - * Clear the Xilinx and start prgramming from the DEC - */ - - /* - * Set ouput as: - * Reset: 0 (active) - * DP: 0 (active) - * Mode: 1 - * - */ - sc->lmc_gpio = 0x00; - sc->lmc_gpio &= ~LMC_GEP_DP; - sc->lmc_gpio &= ~LMC_GEP_RESET; - sc->lmc_gpio |= LMC_GEP_MODE; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET); - - /* - * Wait at least 10 us 20 to be safe - */ - udelay(50); - - /* - * Clear reset and activate programming lines - * Reset: Input - * DP: Input - * Clock: Output - * Data: Output - * Mode: Output - */ - lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET); - - /* - * Set LOAD, DATA, Clock to 1 - */ - sc->lmc_gpio = 0x00; - sc->lmc_gpio |= LMC_GEP_MODE; - sc->lmc_gpio |= LMC_GEP_DATA; - sc->lmc_gpio |= LMC_GEP_CLK; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE ); - - /* - * busy wait for the chip to reset - */ - while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && - (timeout-- > 0)) - cpu_relax(); - - printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear its memory\n", dev->name, 500000-timeout); - - for(pos = 0; pos < xc.len; pos++){ - switch(data[pos]){ - case 0: - sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */ - break; - case 1: - sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */ - break; - default: - printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]); - sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */ - } - sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */ - sc->lmc_gpio |= LMC_GEP_MODE; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - udelay(1); - - sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */ - sc->lmc_gpio |= LMC_GEP_MODE; - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - udelay(1); - } - if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){ - printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name); - } - else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){ - printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name); - } - else { - printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos); - } - - lmc_gpio_mkinput(sc, 0xff); - - sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); - - sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - kfree(data); - - ret = 0; - - break; - } - default: /*fold02*/ - ret = -EBADE; - break; - } - - netif_wake_queue(dev); - sc->lmc_txfull = 0; - - } - break; - default: - break; - } - - return ret; -} - - -/* the watchdog process that cruises around */ -static void lmc_watchdog(struct timer_list *t) /*fold00*/ -{ - lmc_softc_t *sc = from_timer(sc, t, timer); - struct net_device *dev = sc->lmc_device; - int link_status; - u32 ticks; - unsigned long flags; - - spin_lock_irqsave(&sc->lmc_lock, flags); - - if(sc->check != 0xBEAFCAFE){ - printk("LMC: Corrupt net_device struct, breaking out\n"); - spin_unlock_irqrestore(&sc->lmc_lock, flags); - return; - } - - - /* Make sure the tx jabber and rx watchdog are off, - * and the transmit and receive processes are running. - */ - - LMC_CSR_WRITE (sc, csr_15, 0x00000011); - sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN; - LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); - - if (sc->lmc_ok == 0) - goto kick_timer; - - LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); - - /* --- begin time out check ----------------------------------- - * check for a transmit interrupt timeout - * Has the packet xmt vs xmt serviced threshold been exceeded */ - if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && - sc->lmc_device->stats.tx_packets > sc->lasttx_packets && - sc->tx_TimeoutInd == 0) - { - - /* wait for the watchdog to come around again */ - sc->tx_TimeoutInd = 1; - } - else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && - sc->lmc_device->stats.tx_packets > sc->lasttx_packets && - sc->tx_TimeoutInd) - { - - LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); - - sc->tx_TimeoutDisplay = 1; - sc->extra_stats.tx_TimeoutCnt++; - - /* DEC chip is stuck, hit it with a RESET!!!! */ - lmc_running_reset (dev); - - - /* look at receive & transmit process state to make sure they are running */ - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); - - /* look at: DSR - 02 for Reg 16 - * CTS - 08 - * DCD - 10 - * RI - 20 - * for Reg 17 - */ - LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17)); - - /* reset the transmit timeout detection flag */ - sc->tx_TimeoutInd = 0; - sc->lastlmc_taint_tx = sc->lmc_taint_tx; - sc->lasttx_packets = sc->lmc_device->stats.tx_packets; - } else { - sc->tx_TimeoutInd = 0; - sc->lastlmc_taint_tx = sc->lmc_taint_tx; - sc->lasttx_packets = sc->lmc_device->stats.tx_packets; - } - - /* --- end time out check ----------------------------------- */ - - - link_status = sc->lmc_media->get_link_status (sc); - - /* - * hardware level link lost, but the interface is marked as up. - * Mark it as down. - */ - if ((link_status == 0) && (sc->last_link_status != 0)) { - printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name); - sc->last_link_status = 0; - /* lmc_reset (sc); Why reset??? The link can go down ok */ - - /* Inform the world that link has been lost */ - netif_carrier_off(dev); - } - - /* - * hardware link is up, but the interface is marked as down. - * Bring it back up again. - */ - if (link_status != 0 && sc->last_link_status == 0) { - printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name); - sc->last_link_status = 1; - /* lmc_reset (sc); Again why reset??? */ - - netif_carrier_on(dev); - } - - /* Call media specific watchdog functions */ - sc->lmc_media->watchdog(sc); - - /* - * Poke the transmitter to make sure it - * never stops, even if we run out of mem - */ - LMC_CSR_WRITE(sc, csr_rxpoll, 0); - - /* - * Check for code that failed - * and try and fix it as appropriate - */ - if(sc->failed_ring == 1){ - /* - * Failed to setup the recv/xmit rin - * Try again - */ - sc->failed_ring = 0; - lmc_softreset(sc); - } - if(sc->failed_recv_alloc == 1){ - /* - * We failed to alloc mem in the - * interrupt handler, go through the rings - * and rebuild them - */ - sc->failed_recv_alloc = 0; - lmc_softreset(sc); - } - - - /* - * remember the timer value - */ -kick_timer: - - ticks = LMC_CSR_READ (sc, csr_gp_timer); - LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL); - sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff); - - /* - * restart this timer. - */ - sc->timer.expires = jiffies + (HZ); - add_timer (&sc->timer); - - spin_unlock_irqrestore(&sc->lmc_lock, flags); -} - -static int lmc_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) - return 0; - return -EINVAL; -} - -static const struct net_device_ops lmc_ops = { - .ndo_open = lmc_open, - .ndo_stop = lmc_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_siocwandev = hdlc_ioctl, - .ndo_siocdevprivate = lmc_siocdevprivate, - .ndo_tx_timeout = lmc_driver_timeout, - .ndo_get_stats = lmc_get_stats, -}; - -static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - lmc_softc_t *sc; - struct net_device *dev; - u16 subdevice; - u16 AdapModelNum; - int err; - static int cards_found; - - err = pcim_enable_device(pdev); - if (err) { - printk(KERN_ERR "lmc: pci enable failed: %d\n", err); - return err; - } - - err = pci_request_regions(pdev, "lmc"); - if (err) { - printk(KERN_ERR "lmc: pci_request_region failed\n"); - return err; - } - - /* - * Allocate our own device structure - */ - sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL); - if (!sc) - return -ENOMEM; - - dev = alloc_hdlcdev(sc); - if (!dev) { - printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); - return -ENOMEM; - } - - - dev->type = ARPHRD_HDLC; - dev_to_hdlc(dev)->xmit = lmc_start_xmit; - dev_to_hdlc(dev)->attach = lmc_attach; - dev->netdev_ops = &lmc_ops; - dev->watchdog_timeo = HZ; /* 1 second */ - dev->tx_queue_len = 100; - sc->lmc_device = dev; - sc->name = dev->name; - sc->if_type = LMC_PPP; - sc->check = 0xBEAFCAFE; - dev->base_addr = pci_resource_start(pdev, 0); - dev->irq = pdev->irq; - pci_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - /* - * This will get the protocol layer ready and do any 1 time init's - * Must have a valid sc and dev structure - */ - lmc_proto_attach(sc); - - /* Init the spin lock so can call it latter */ - - spin_lock_init(&sc->lmc_lock); - pci_set_master(pdev); - - printk(KERN_INFO "hdlc: detected at %lx, irq %d\n", - dev->base_addr, dev->irq); - - err = register_hdlc_device(dev); - if (err) { - printk(KERN_ERR "%s: register_netdev failed.\n", dev->name); - free_netdev(dev); - return err; - } - - sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; - - /* - * - * Check either the subvendor or the subdevice, some systems reverse - * the setting in the bois, seems to be version and arch dependent? - * Fix the error, exchange the two values - */ - if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC) - subdevice = pdev->subsystem_vendor; - - switch (subdevice) { - case PCI_DEVICE_ID_LMC_HSSI: - printk(KERN_INFO "%s: LMC HSSI\n", dev->name); - sc->lmc_cardtype = LMC_CARDTYPE_HSSI; - sc->lmc_media = &lmc_hssi_media; - break; - case PCI_DEVICE_ID_LMC_DS3: - printk(KERN_INFO "%s: LMC DS3\n", dev->name); - sc->lmc_cardtype = LMC_CARDTYPE_DS3; - sc->lmc_media = &lmc_ds3_media; - break; - case PCI_DEVICE_ID_LMC_SSI: - printk(KERN_INFO "%s: LMC SSI\n", dev->name); - sc->lmc_cardtype = LMC_CARDTYPE_SSI; - sc->lmc_media = &lmc_ssi_media; - break; - case PCI_DEVICE_ID_LMC_T1: - printk(KERN_INFO "%s: LMC T1\n", dev->name); - sc->lmc_cardtype = LMC_CARDTYPE_T1; - sc->lmc_media = &lmc_t1_media; - break; - default: - printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name); - unregister_hdlc_device(dev); - return -EIO; - break; - } - - lmc_initcsrs (sc, dev->base_addr, 8); - - lmc_gpio_mkinput (sc, 0xff); - sc->lmc_gpio = 0; /* drive no signals yet */ - - sc->lmc_media->defaults (sc); - - sc->lmc_media->set_link_status (sc, LMC_LINK_UP); - - /* verify that the PCI Sub System ID matches the Adapter Model number - * from the MII register - */ - AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; - - if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */ - subdevice != PCI_DEVICE_ID_LMC_T1) && - (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */ - subdevice != PCI_DEVICE_ID_LMC_SSI) && - (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */ - subdevice != PCI_DEVICE_ID_LMC_DS3) && - (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */ - subdevice != PCI_DEVICE_ID_LMC_HSSI)) - printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI" - " Subsystem ID = 0x%04x\n", - dev->name, AdapModelNum, subdevice); - - /* - * reset clock - */ - LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); - - sc->board_idx = cards_found++; - sc->extra_stats.check = STATCHECK; - sc->extra_stats.version_size = (DRIVER_VERSION << 16) + - sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); - sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; - - sc->lmc_ok = 0; - sc->last_link_status = 0; - - return 0; -} - -/* - * Called from pci when removing module. - */ -static void lmc_remove_one(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (dev) { - printk(KERN_DEBUG "%s: removing...\n", dev->name); - unregister_hdlc_device(dev); - free_netdev(dev); - } -} - -/* After this is called, packets can be sent. - * Does not initialize the addresses - */ -static int lmc_open(struct net_device *dev) -{ - lmc_softc_t *sc = dev_to_sc(dev); - int err; - - lmc_led_on(sc, LMC_DS3_LED0); - - lmc_dec_reset(sc); - lmc_reset(sc); - - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0); - LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16), - lmc_mii_readreg(sc, 0, 17)); - - if (sc->lmc_ok) - return 0; - - lmc_softreset (sc); - - /* Since we have to use PCI bus, this should work on x86,alpha,ppc */ - if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){ - printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); - return -EAGAIN; - } - sc->got_irq = 1; - - /* Assert Terminal Active */ - sc->lmc_miireg16 |= LMC_MII16_LED_ALL; - sc->lmc_media->set_link_status (sc, LMC_LINK_UP); - - /* - * reset to last state. - */ - sc->lmc_media->set_status (sc, NULL); - - /* setup default bits to be used in tulip_desc_t transmit descriptor - * -baz */ - sc->TxDescriptControlInit = ( - LMC_TDES_INTERRUPT_ON_COMPLETION - | LMC_TDES_FIRST_SEGMENT - | LMC_TDES_LAST_SEGMENT - | LMC_TDES_SECOND_ADDR_CHAINED - | LMC_TDES_DISABLE_PADDING - ); - - if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) { - /* disable 32 bit CRC generated by ASIC */ - sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE; - } - sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length); - /* Acknoledge the Terminal Active and light LEDs */ - - /* dev->flags |= IFF_UP; */ - - if ((err = lmc_proto_open(sc)) != 0) - return err; - - netif_start_queue(dev); - sc->extra_stats.tx_tbusy0++; - - /* - * select what interrupts we want to get - */ - sc->lmc_intrmask = 0; - /* Should be using the default interrupt mask defined in the .h file. */ - sc->lmc_intrmask |= (TULIP_STS_NORMALINTR - | TULIP_STS_RXINTR - | TULIP_STS_TXINTR - | TULIP_STS_ABNRMLINTR - | TULIP_STS_SYSERROR - | TULIP_STS_TXSTOPPED - | TULIP_STS_TXUNDERFLOW - | TULIP_STS_RXSTOPPED - | TULIP_STS_RXNOBUF - ); - LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); - - sc->lmc_cmdmode |= TULIP_CMD_TXRUN; - sc->lmc_cmdmode |= TULIP_CMD_RXRUN; - LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); - - sc->lmc_ok = 1; /* Run watchdog */ - - /* - * Set the if up now - pfb - */ - - sc->last_link_status = 1; - - /* - * Setup a timer for the watchdog on probe, and start it running. - * Since lmc_ok == 0, it will be a NOP for now. - */ - timer_setup(&sc->timer, lmc_watchdog, 0); - sc->timer.expires = jiffies + HZ; - add_timer (&sc->timer); - - return 0; -} - -/* Total reset to compensate for the AdTran DSU doing bad things - * under heavy load - */ - -static void lmc_running_reset (struct net_device *dev) /*fold00*/ -{ - lmc_softc_t *sc = dev_to_sc(dev); - - /* stop interrupts */ - /* Clear the interrupt mask */ - LMC_CSR_WRITE (sc, csr_intr, 0x00000000); - - lmc_dec_reset (sc); - lmc_reset (sc); - lmc_softreset (sc); - /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */ - sc->lmc_media->set_link_status (sc, 1); - sc->lmc_media->set_status (sc, NULL); - - netif_wake_queue(dev); - - sc->lmc_txfull = 0; - sc->extra_stats.tx_tbusy0++; - - sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; - LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); - - sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); - LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); -} - - -/* This is what is called when you ifconfig down a device. - * This disables the timer for the watchdog and keepalives, - * and disables the irq for dev. - */ -static int lmc_close(struct net_device *dev) -{ - /* not calling release_region() as we should */ - lmc_softc_t *sc = dev_to_sc(dev); - - sc->lmc_ok = 0; - sc->lmc_media->set_link_status (sc, 0); - del_timer (&sc->timer); - lmc_proto_close(sc); - lmc_ifdown (dev); - - return 0; -} - -/* Ends the transfer of packets */ -/* When the interface goes down, this is called */ -static int lmc_ifdown (struct net_device *dev) /*fold00*/ -{ - lmc_softc_t *sc = dev_to_sc(dev); - u32 csr6; - int i; - - /* Don't let anything else go on right now */ - // dev->start = 0; - netif_stop_queue(dev); - sc->extra_stats.tx_tbusy1++; - - /* stop interrupts */ - /* Clear the interrupt mask */ - LMC_CSR_WRITE (sc, csr_intr, 0x00000000); - - /* Stop Tx and Rx on the chip */ - csr6 = LMC_CSR_READ (sc, csr_command); - csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */ - csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ - LMC_CSR_WRITE (sc, csr_command, csr6); - - sc->lmc_device->stats.rx_missed_errors += - LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; - - /* release the interrupt */ - if(sc->got_irq == 1){ - free_irq (dev->irq, dev); - sc->got_irq = 0; - } - - /* free skbuffs in the Rx queue */ - for (i = 0; i < LMC_RXDESCS; i++) - { - struct sk_buff *skb = sc->lmc_rxq[i]; - sc->lmc_rxq[i] = NULL; - sc->lmc_rxring[i].status = 0; - sc->lmc_rxring[i].length = 0; - sc->lmc_rxring[i].buffer1 = 0xDEADBEEF; - if (skb != NULL) - dev_kfree_skb(skb); - sc->lmc_rxq[i] = NULL; - } - - for (i = 0; i < LMC_TXDESCS; i++) - { - if (sc->lmc_txq[i] != NULL) - dev_kfree_skb(sc->lmc_txq[i]); - sc->lmc_txq[i] = NULL; - } - - lmc_led_off (sc, LMC_MII16_LED_ALL); - - netif_wake_queue(dev); - sc->extra_stats.tx_tbusy0++; - - return 0; -} - -/* Interrupt handling routine. This will take an incoming packet, or clean - * up after a trasmit. - */ -static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ -{ - struct net_device *dev = (struct net_device *) dev_instance; - lmc_softc_t *sc = dev_to_sc(dev); - u32 csr; - int i; - s32 stat; - unsigned int badtx; - int max_work = LMC_RXDESCS; - int handled = 0; - - spin_lock(&sc->lmc_lock); - - /* - * Read the csr to find what interrupts we have (if any) - */ - csr = LMC_CSR_READ (sc, csr_status); - - /* - * Make sure this is our interrupt - */ - if ( ! (csr & sc->lmc_intrmask)) { - goto lmc_int_fail_out; - } - - /* always go through this loop at least once */ - while (csr & sc->lmc_intrmask) { - handled = 1; - - /* - * Clear interrupt bits, we handle all case below - */ - LMC_CSR_WRITE (sc, csr_status, csr); - - /* - * One of - * - Transmit process timed out CSR5<1> - * - Transmit jabber timeout CSR5<3> - * - Transmit underflow CSR5<5> - * - Transmit Receiver buffer unavailable CSR5<7> - * - Receive process stopped CSR5<8> - * - Receive watchdog timeout CSR5<9> - * - Early transmit interrupt CSR5<10> - * - * Is this really right? Should we do a running reset for jabber? - * (being a WAN card and all) - */ - if (csr & TULIP_STS_ABNRMLINTR){ - lmc_running_reset (dev); - break; - } - - if (csr & TULIP_STS_RXINTR) - lmc_rx (dev); - - if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) { - - int n_compl = 0 ; - /* reset the transmit timeout detection flag -baz */ - sc->extra_stats.tx_NoCompleteCnt = 0; - - badtx = sc->lmc_taint_tx; - i = badtx % LMC_TXDESCS; - - while ((badtx < sc->lmc_next_tx)) { - stat = sc->lmc_txring[i].status; - - LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat, - sc->lmc_txring[i].length); - /* - * If bit 31 is 1 the tulip owns it break out of the loop - */ - if (stat & 0x80000000) - break; - - n_compl++ ; /* i.e., have an empty slot in ring */ - /* - * If we have no skbuff or have cleared it - * Already continue to the next buffer - */ - if (sc->lmc_txq[i] == NULL) - continue; - - /* - * Check the total error summary to look for any errors - */ - if (stat & 0x8000) { - sc->lmc_device->stats.tx_errors++; - if (stat & 0x4104) - sc->lmc_device->stats.tx_aborted_errors++; - if (stat & 0x0C00) - sc->lmc_device->stats.tx_carrier_errors++; - if (stat & 0x0200) - sc->lmc_device->stats.tx_window_errors++; - if (stat & 0x0002) - sc->lmc_device->stats.tx_fifo_errors++; - } else { - sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; - - sc->lmc_device->stats.tx_packets++; - } - - dev_consume_skb_irq(sc->lmc_txq[i]); - sc->lmc_txq[i] = NULL; - - badtx++; - i = badtx % LMC_TXDESCS; - } - - if (sc->lmc_next_tx - badtx > LMC_TXDESCS) - { - printk ("%s: out of sync pointer\n", dev->name); - badtx += LMC_TXDESCS; - } - LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); - sc->lmc_txfull = 0; - netif_wake_queue(dev); - sc->extra_stats.tx_tbusy0++; - - -#ifdef DEBUG - sc->extra_stats.dirtyTx = badtx; - sc->extra_stats.lmc_next_tx = sc->lmc_next_tx; - sc->extra_stats.lmc_txfull = sc->lmc_txfull; -#endif - sc->lmc_taint_tx = badtx; - - /* - * Why was there a break here??? - */ - } /* end handle transmit interrupt */ - - if (csr & TULIP_STS_SYSERROR) { - u32 error; - printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr); - error = csr>>23 & 0x7; - switch(error){ - case 0x000: - printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name); - break; - case 0x001: - printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); - break; - case 0x002: - printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); - break; - default: - printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name); - } - lmc_dec_reset (sc); - lmc_reset (sc); - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); - LMC_EVENT_LOG(LMC_EVENT_RESET2, - lmc_mii_readreg (sc, 0, 16), - lmc_mii_readreg (sc, 0, 17)); - - } - - - if(max_work-- <= 0) - break; - - /* - * Get current csr status to make sure - * we've cleared all interrupts - */ - csr = LMC_CSR_READ (sc, csr_status); - } /* end interrupt loop */ - LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr); - -lmc_int_fail_out: - - spin_unlock(&sc->lmc_lock); - - return IRQ_RETVAL(handled); -} - -static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - lmc_softc_t *sc = dev_to_sc(dev); - u32 flag; - int entry; - unsigned long flags; - - spin_lock_irqsave(&sc->lmc_lock, flags); - - /* normal path, tbusy known to be zero */ - - entry = sc->lmc_next_tx % LMC_TXDESCS; - - sc->lmc_txq[entry] = skb; - sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data); - - LMC_CONSOLE_LOG("xmit", skb->data, skb->len); - -#ifndef GCOM - /* If the queue is less than half full, don't interrupt */ - if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2) - { - /* Do not interrupt on completion of this packet */ - flag = 0x60000000; - netif_wake_queue(dev); - } - else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2) - { - /* This generates an interrupt on completion of this packet */ - flag = 0xe0000000; - netif_wake_queue(dev); - } - else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1) - { - /* Do not interrupt on completion of this packet */ - flag = 0x60000000; - netif_wake_queue(dev); - } - else - { - /* This generates an interrupt on completion of this packet */ - flag = 0xe0000000; - sc->lmc_txfull = 1; - netif_stop_queue(dev); - } -#else - flag = LMC_TDES_INTERRUPT_ON_COMPLETION; - - if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) - { /* ring full, go busy */ - sc->lmc_txfull = 1; - netif_stop_queue(dev); - sc->extra_stats.tx_tbusy1++; - LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); - } -#endif - - - if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */ - flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */ - - /* don't pad small packets either */ - flag = sc->lmc_txring[entry].length = (skb->len) | flag | - sc->TxDescriptControlInit; - - /* set the transmit timeout flag to be checked in - * the watchdog timer handler. -baz - */ - - sc->extra_stats.tx_NoCompleteCnt++; - sc->lmc_next_tx++; - - /* give ownership to the chip */ - LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry); - sc->lmc_txring[entry].status = 0x80000000; - - /* send now! */ - LMC_CSR_WRITE (sc, csr_txpoll, 0); - - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - return NETDEV_TX_OK; -} - - -static int lmc_rx(struct net_device *dev) -{ - lmc_softc_t *sc = dev_to_sc(dev); - int i; - int rx_work_limit = LMC_RXDESCS; - int rxIntLoopCnt; /* debug -baz */ - int localLengthErrCnt = 0; - long stat; - struct sk_buff *skb, *nsb; - u16 len; - - lmc_led_on(sc, LMC_DS3_LED3); - - rxIntLoopCnt = 0; /* debug -baz */ - - i = sc->lmc_next_rx % LMC_RXDESCS; - - while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4) - { - rxIntLoopCnt++; /* debug -baz */ - len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); - if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ - if ((stat & 0x0000ffff) != 0x7fff) { - /* Oversized frame */ - sc->lmc_device->stats.rx_length_errors++; - goto skip_packet; - } - } - - if (stat & 0x00000008) { /* Catch a dribbling bit error */ - sc->lmc_device->stats.rx_errors++; - sc->lmc_device->stats.rx_frame_errors++; - goto skip_packet; - } - - - if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */ - sc->lmc_device->stats.rx_errors++; - sc->lmc_device->stats.rx_crc_errors++; - goto skip_packet; - } - - if (len > LMC_PKT_BUF_SZ) { - sc->lmc_device->stats.rx_length_errors++; - localLengthErrCnt++; - goto skip_packet; - } - - if (len < sc->lmc_crcSize + 2) { - sc->lmc_device->stats.rx_length_errors++; - sc->extra_stats.rx_SmallPktCnt++; - localLengthErrCnt++; - goto skip_packet; - } - - if(stat & 0x00004000){ - printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); - } - - len -= sc->lmc_crcSize; - - skb = sc->lmc_rxq[i]; - - /* - * We ran out of memory at some point - * just allocate an skb buff and continue. - */ - - if (!skb) { - nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); - if (nsb) { - sc->lmc_rxq[i] = nsb; - nsb->dev = dev; - sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); - } - sc->failed_recv_alloc = 1; - goto skip_packet; - } - - sc->lmc_device->stats.rx_packets++; - sc->lmc_device->stats.rx_bytes += len; - - LMC_CONSOLE_LOG("recv", skb->data, len); - - /* - * I'm not sure of the sanity of this - * Packets could be arriving at a constant - * 44.210mbits/sec and we're going to copy - * them into a new buffer?? - */ - - if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */ - /* - * If it's a large packet don't copy it just hand it up - */ - give_it_anyways: - - sc->lmc_rxq[i] = NULL; - sc->lmc_rxring[i].buffer1 = 0x0; - - skb_put (skb, len); - skb->protocol = lmc_proto_type(sc, skb); - skb_reset_mac_header(skb); - /* skb_reset_network_header(skb); */ - skb->dev = dev; - lmc_proto_netif(sc, skb); - - /* - * This skb will be destroyed by the upper layers, make a new one - */ - nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); - if (nsb) { - sc->lmc_rxq[i] = nsb; - nsb->dev = dev; - sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); - /* Transferred to 21140 below */ - } - else { - /* - * We've run out of memory, stop trying to allocate - * memory and exit the interrupt handler - * - * The chip may run out of receivers and stop - * in which care we'll try to allocate the buffer - * again. (once a second) - */ - sc->extra_stats.rx_BuffAllocErr++; - LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); - sc->failed_recv_alloc = 1; - goto skip_out_of_mem; - } - } - else { - nsb = dev_alloc_skb(len); - if(!nsb) { - goto give_it_anyways; - } - skb_copy_from_linear_data(skb, skb_put(nsb, len), len); - - nsb->protocol = lmc_proto_type(sc, nsb); - skb_reset_mac_header(nsb); - /* skb_reset_network_header(nsb); */ - nsb->dev = dev; - lmc_proto_netif(sc, nsb); - } - - skip_packet: - LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); - sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4; - - sc->lmc_next_rx++; - i = sc->lmc_next_rx % LMC_RXDESCS; - rx_work_limit--; - if (rx_work_limit < 0) - break; - } - - /* detect condition for LMC1000 where DSU cable attaches and fills - * descriptors with bogus packets - * - if (localLengthErrCnt > LMC_RXDESCS - 3) { - sc->extra_stats.rx_BadPktSurgeCnt++; - LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt, - sc->extra_stats.rx_BadPktSurgeCnt); - } */ - - /* save max count of receive descriptors serviced */ - if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt) - sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ - -#ifdef DEBUG - if (rxIntLoopCnt == 0) - { - for (i = 0; i < LMC_RXDESCS; i++) - { - if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT) - != DESC_OWNED_BY_DC21X4) - { - rxIntLoopCnt++; - } - } - LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0); - } -#endif - - - lmc_led_off(sc, LMC_DS3_LED3); - -skip_out_of_mem: - return 0; -} - -static struct net_device_stats *lmc_get_stats(struct net_device *dev) -{ - lmc_softc_t *sc = dev_to_sc(dev); - unsigned long flags; - - spin_lock_irqsave(&sc->lmc_lock, flags); - - sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; - - spin_unlock_irqrestore(&sc->lmc_lock, flags); - - return &sc->lmc_device->stats; -} - -static struct pci_driver lmc_driver = { - .name = "lmc", - .id_table = lmc_pci_tbl, - .probe = lmc_init_one, - .remove = lmc_remove_one, -}; - -module_pci_driver(lmc_driver); - -unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/ -{ - int i; - int command = (0xf6 << 10) | (devaddr << 5) | regno; - int retval = 0; - - LMC_MII_SYNC (sc); - - for (i = 15; i >= 0; i--) - { - int dataval = (command & (1 << i)) ? 0x20000 : 0; - - LMC_CSR_WRITE (sc, csr_9, dataval); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - } - - for (i = 19; i > 0; i--) - { - LMC_CSR_WRITE (sc, csr_9, 0x40000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0); - LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - } - - return (retval >> 1) & 0xffff; -} - -void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/ -{ - int i = 32; - int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data; - - LMC_MII_SYNC (sc); - - i = 31; - while (i >= 0) - { - int datav; - - if (command & (1 << i)) - datav = 0x20000; - else - datav = 0x00000; - - LMC_CSR_WRITE (sc, csr_9, datav); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000)); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - i--; - } - - i = 2; - while (i > 0) - { - LMC_CSR_WRITE (sc, csr_9, 0x40000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - LMC_CSR_WRITE (sc, csr_9, 0x50000); - lmc_delay (); - /* __SLOW_DOWN_IO; */ - i--; - } -} - -static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ -{ - int i; - - /* Initialize the receive rings and buffers. */ - sc->lmc_txfull = 0; - sc->lmc_next_rx = 0; - sc->lmc_next_tx = 0; - sc->lmc_taint_rx = 0; - sc->lmc_taint_tx = 0; - - /* - * Setup each one of the receiver buffers - * allocate an skbuff for each one, setup the descriptor table - * and point each buffer at the next one - */ - - for (i = 0; i < LMC_RXDESCS; i++) - { - struct sk_buff *skb; - - if (sc->lmc_rxq[i] == NULL) - { - skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); - if(skb == NULL){ - printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name); - sc->failed_ring = 1; - break; - } - else{ - sc->lmc_rxq[i] = skb; - } - } - else - { - skb = sc->lmc_rxq[i]; - } - - skb->dev = sc->lmc_device; - - /* owned by 21140 */ - sc->lmc_rxring[i].status = 0x80000000; - - /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ - sc->lmc_rxring[i].length = skb_tailroom(skb); - - /* use to be tail which is dumb since you're thinking why write - * to the end of the packj,et but since there's nothing there tail == data - */ - sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data); - - /* This is fair since the structure is static and we have the next address */ - sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]); - - } - - /* - * Sets end of ring - */ - if (i != 0) { - sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */ - sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */ - } - LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */ - - /* Initialize the transmit rings and buffers */ - for (i = 0; i < LMC_TXDESCS; i++) - { - if (sc->lmc_txq[i] != NULL){ /* have buffer */ - dev_kfree_skb(sc->lmc_txq[i]); /* free it */ - sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */ - } - sc->lmc_txq[i] = NULL; - sc->lmc_txring[i].status = 0x00000000; - sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]); - } - sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]); - LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring)); -} - -void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/ -{ - sc->lmc_gpio_io &= ~bits; - LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); -} - -void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/ -{ - sc->lmc_gpio_io |= bits; - LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); -} - -void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/ -{ - if ((~sc->lmc_miireg16) & led) /* Already on! */ - return; - - sc->lmc_miireg16 &= ~led; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); -} - -void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/ -{ - if (sc->lmc_miireg16 & led) /* Already set don't do anything */ - return; - - sc->lmc_miireg16 |= led; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); -} - -static void lmc_reset(lmc_softc_t * const sc) /*fold00*/ -{ - sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); - - sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; - lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); - - /* - * make some of the GPIO pins be outputs - */ - lmc_gpio_mkoutput(sc, LMC_GEP_RESET); - - /* - * RESET low to force state reset. This also forces - * the transmitter clock to be internal, but we expect to reset - * that later anyway. - */ - sc->lmc_gpio &= ~(LMC_GEP_RESET); - LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); - - /* - * hold for more than 10 microseconds - */ - udelay(50); - - /* - * stop driving Xilinx-related signals - */ - lmc_gpio_mkinput(sc, LMC_GEP_RESET); - - /* - * Call media specific init routine - */ - sc->lmc_media->init(sc); - - sc->extra_stats.resetCount++; -} - -static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ -{ - u32 val; - - /* - * disable all interrupts - */ - sc->lmc_intrmask = 0; - LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask); - - /* - * Reset the chip with a software reset command. - * Wait 10 microseconds (actually 50 PCI cycles but at - * 33MHz that comes to two microseconds but wait a - * bit longer anyways) - */ - LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); - udelay(25); -#ifdef __sparc__ - sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode); - sc->lmc_busmode = 0x00100000; - sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET; - LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode); -#endif - sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command); - - /* - * We want: - * no ethernet address in frames we write - * disable padding (txdesc, padding disable) - * ignore runt frames (rdes0 bit 15) - * no receiver watchdog or transmitter jabber timer - * (csr15 bit 0,14 == 1) - * if using 16-bit CRC, turn off CRC (trans desc, crc disable) - */ - - sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS - | TULIP_CMD_FULLDUPLEX - | TULIP_CMD_PASSBADPKT - | TULIP_CMD_NOHEARTBEAT - | TULIP_CMD_PORTSELECT - | TULIP_CMD_RECEIVEALL - | TULIP_CMD_MUSTBEONE - ); - sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE - | TULIP_CMD_THRESHOLDCTL - | TULIP_CMD_STOREFWD - | TULIP_CMD_TXTHRSHLDCTL - ); - - LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode); - - /* - * disable receiver watchdog and transmit jabber - */ - val = LMC_CSR_READ(sc, csr_sia_general); - val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE); - LMC_CSR_WRITE(sc, csr_sia_general, val); -} - -static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/ - size_t csr_size) -{ - sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size; - sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size; - sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size; - sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size; - sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size; - sc->lmc_csrs.csr_status = csr_base + 5 * csr_size; - sc->lmc_csrs.csr_command = csr_base + 6 * csr_size; - sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size; - sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size; - sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size; - sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size; - sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size; - sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size; - sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size; - sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size; - sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size; -} - -static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue) -{ - lmc_softc_t *sc = dev_to_sc(dev); - u32 csr6; - unsigned long flags; - - spin_lock_irqsave(&sc->lmc_lock, flags); - - printk("%s: Xmitter busy|\n", dev->name); - - sc->extra_stats.tx_tbusy_calls++; - if (time_is_before_jiffies(dev_trans_start(dev) + TX_TIMEOUT)) - goto bug_out; - - /* - * Chip seems to have locked up - * Reset it - * This whips out all our descriptor - * table and starts from scartch - */ - - LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, - LMC_CSR_READ (sc, csr_status), - sc->extra_stats.tx_ProcTimeout); - - lmc_running_reset (dev); - - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); - LMC_EVENT_LOG(LMC_EVENT_RESET2, - lmc_mii_readreg (sc, 0, 16), - lmc_mii_readreg (sc, 0, 17)); - - /* restart the tx processes */ - csr6 = LMC_CSR_READ (sc, csr_command); - LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002); - LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002); - - /* immediate transmit */ - LMC_CSR_WRITE (sc, csr_txpoll, 0); - - sc->lmc_device->stats.tx_errors++; - sc->extra_stats.tx_ProcTimeout++; /* -baz */ - - netif_trans_update(dev); /* prevent tx timeout */ - -bug_out: - - spin_unlock_irqrestore(&sc->lmc_lock, flags); -} diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c deleted file mode 100644 index ec1ac7b1f3fd..000000000000 --- a/drivers/net/wan/lmc/lmc_media.c +++ /dev/null @@ -1,1206 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include /* Processor type for cache alignment. */ -#include -#include - -#include - -#include "lmc.h" -#include "lmc_var.h" -#include "lmc_ioctl.h" -#include "lmc_debug.h" - -#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1 - - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - */ - -/* - * protocol independent method. - */ -static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *); - -/* - * media independent methods to check on media status, link, light LEDs, - * etc. - */ -static void lmc_ds3_init (lmc_softc_t * const); -static void lmc_ds3_default (lmc_softc_t * const); -static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *); -static void lmc_ds3_set_100ft (lmc_softc_t * const, int); -static int lmc_ds3_get_link_status (lmc_softc_t * const); -static void lmc_ds3_set_crc_length (lmc_softc_t * const, int); -static void lmc_ds3_set_scram (lmc_softc_t * const, int); -static void lmc_ds3_watchdog (lmc_softc_t * const); - -static void lmc_hssi_init (lmc_softc_t * const); -static void lmc_hssi_default (lmc_softc_t * const); -static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *); -static void lmc_hssi_set_clock (lmc_softc_t * const, int); -static int lmc_hssi_get_link_status (lmc_softc_t * const); -static void lmc_hssi_set_link_status (lmc_softc_t * const, int); -static void lmc_hssi_set_crc_length (lmc_softc_t * const, int); -static void lmc_hssi_watchdog (lmc_softc_t * const); - -static void lmc_ssi_init (lmc_softc_t * const); -static void lmc_ssi_default (lmc_softc_t * const); -static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *); -static void lmc_ssi_set_clock (lmc_softc_t * const, int); -static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *); -static int lmc_ssi_get_link_status (lmc_softc_t * const); -static void lmc_ssi_set_link_status (lmc_softc_t * const, int); -static void lmc_ssi_set_crc_length (lmc_softc_t * const, int); -static void lmc_ssi_watchdog (lmc_softc_t * const); - -static void lmc_t1_init (lmc_softc_t * const); -static void lmc_t1_default (lmc_softc_t * const); -static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *); -static int lmc_t1_get_link_status (lmc_softc_t * const); -static void lmc_t1_set_circuit_type (lmc_softc_t * const, int); -static void lmc_t1_set_crc_length (lmc_softc_t * const, int); -static void lmc_t1_set_clock (lmc_softc_t * const, int); -static void lmc_t1_watchdog (lmc_softc_t * const); - -static void lmc_dummy_set_1 (lmc_softc_t * const, int); -static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); - -static inline void write_av9110_bit (lmc_softc_t *, int); -static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); - -lmc_media_t lmc_ds3_media = { - .init = lmc_ds3_init, /* special media init stuff */ - .defaults = lmc_ds3_default, /* reset to default state */ - .set_status = lmc_ds3_set_status, /* reset status to state provided */ - .set_clock_source = lmc_dummy_set_1, /* set clock source */ - .set_speed = lmc_dummy_set2_1, /* set line speed */ - .set_cable_length = lmc_ds3_set_100ft, /* set cable length */ - .set_scrambler = lmc_ds3_set_scram, /* set scrambler */ - .get_link_status = lmc_ds3_get_link_status, /* get link status */ - .set_link_status = lmc_dummy_set_1, /* set link status */ - .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */ - .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ - .watchdog = lmc_ds3_watchdog -}; - -lmc_media_t lmc_hssi_media = { - .init = lmc_hssi_init, /* special media init stuff */ - .defaults = lmc_hssi_default, /* reset to default state */ - .set_status = lmc_hssi_set_status, /* reset status to state provided */ - .set_clock_source = lmc_hssi_set_clock, /* set clock source */ - .set_speed = lmc_dummy_set2_1, /* set line speed */ - .set_cable_length = lmc_dummy_set_1, /* set cable length */ - .set_scrambler = lmc_dummy_set_1, /* set scrambler */ - .get_link_status = lmc_hssi_get_link_status, /* get link status */ - .set_link_status = lmc_hssi_set_link_status, /* set link status */ - .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */ - .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ - .watchdog = lmc_hssi_watchdog -}; - -lmc_media_t lmc_ssi_media = { - .init = lmc_ssi_init, /* special media init stuff */ - .defaults = lmc_ssi_default, /* reset to default state */ - .set_status = lmc_ssi_set_status, /* reset status to state provided */ - .set_clock_source = lmc_ssi_set_clock, /* set clock source */ - .set_speed = lmc_ssi_set_speed, /* set line speed */ - .set_cable_length = lmc_dummy_set_1, /* set cable length */ - .set_scrambler = lmc_dummy_set_1, /* set scrambler */ - .get_link_status = lmc_ssi_get_link_status, /* get link status */ - .set_link_status = lmc_ssi_set_link_status, /* set link status */ - .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */ - .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */ - .watchdog = lmc_ssi_watchdog -}; - -lmc_media_t lmc_t1_media = { - .init = lmc_t1_init, /* special media init stuff */ - .defaults = lmc_t1_default, /* reset to default state */ - .set_status = lmc_t1_set_status, /* reset status to state provided */ - .set_clock_source = lmc_t1_set_clock, /* set clock source */ - .set_speed = lmc_dummy_set2_1, /* set line speed */ - .set_cable_length = lmc_dummy_set_1, /* set cable length */ - .set_scrambler = lmc_dummy_set_1, /* set scrambler */ - .get_link_status = lmc_t1_get_link_status, /* get link status */ - .set_link_status = lmc_dummy_set_1, /* set link status */ - .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */ - .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */ - .watchdog = lmc_t1_watchdog -}; - -static void -lmc_dummy_set_1 (lmc_softc_t * const sc, int a) -{ -} - -static void -lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a) -{ -} - -/* - * HSSI methods - */ - -static void -lmc_hssi_init (lmc_softc_t * const sc) -{ - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200; - - lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK); -} - -static void -lmc_hssi_default (lmc_softc_t * const sc) -{ - sc->lmc_miireg16 = LMC_MII16_LED_ALL; - - sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); - sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); -} - -/* - * Given a user provided state, set ourselves up to match it. This will - * always reset the card if needed. - */ -static void -lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (ctl == NULL) - { - sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source); - lmc_set_protocol (sc, NULL); - - return; - } - - /* - * check for change in clock source - */ - if (ctl->clock_source && !sc->ictl.clock_source) - { - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT); - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT; - } - else if (!ctl->clock_source && sc->ictl.clock_source) - { - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); - } - - lmc_set_protocol (sc, ctl); -} - -/* - * 1 == internal, 0 == external - */ -static void -lmc_hssi_set_clock (lmc_softc_t * const sc, int ie) -{ - int old; - old = sc->ictl.clock_source; - if (ie == LMC_CTL_CLOCK_SOURCE_EXT) - { - sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK; - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; - if(old != ie) - printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); - } - else - { - sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; - if(old != ie) - printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); - } -} - -/* - * return hardware link status. - * 0 == link is down, 1 == link is up. - */ -static int -lmc_hssi_get_link_status (lmc_softc_t * const sc) -{ - /* - * We're using the same code as SSI since - * they're practically the same - */ - return lmc_ssi_get_link_status(sc); -} - -static void -lmc_hssi_set_link_status (lmc_softc_t * const sc, int state) -{ - if (state == LMC_LINK_UP) - sc->lmc_miireg16 |= LMC_MII16_HSSI_TA; - else - sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA; - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -/* - * 0 == 16bit, 1 == 32bit - */ -static void -lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state) -{ - if (state == LMC_CTL_CRC_LENGTH_32) - { - /* 32 bit */ - sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; - } - else - { - /* 16 bit */ - sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -static void -lmc_hssi_watchdog (lmc_softc_t * const sc) -{ - /* HSSI is blank */ -} - -/* - * DS3 methods - */ - -/* - * Set cable length - */ -static void -lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie) -{ - if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT) - { - sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO; - sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT; - } - else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT) - { - sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO; - sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT; - } - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -static void -lmc_ds3_default (lmc_softc_t * const sc) -{ - sc->lmc_miireg16 = LMC_MII16_LED_ALL; - - sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); - sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT); - sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF); - sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); -} - -/* - * Given a user provided state, set ourselves up to match it. This will - * always reset the card if needed. - */ -static void -lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (ctl == NULL) - { - sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length); - sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff); - lmc_set_protocol (sc, NULL); - - return; - } - - /* - * check for change in cable length setting - */ - if (ctl->cable_length && !sc->ictl.cable_length) - lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT); - else if (!ctl->cable_length && sc->ictl.cable_length) - lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT); - - /* - * Check for change in scrambler setting (requires reset) - */ - if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff) - lmc_ds3_set_scram (sc, LMC_CTL_ON); - else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff) - lmc_ds3_set_scram (sc, LMC_CTL_OFF); - - lmc_set_protocol (sc, ctl); -} - -static void -lmc_ds3_init (lmc_softc_t * const sc) -{ - int i; - - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245; - - /* writes zeros everywhere */ - for (i = 0; i < 21; i++) - { - lmc_mii_writereg (sc, 0, 17, i); - lmc_mii_writereg (sc, 0, 18, 0); - } - - /* set some essential bits */ - lmc_mii_writereg (sc, 0, 17, 1); - lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */ - - lmc_mii_writereg (sc, 0, 17, 5); - lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */ - - lmc_mii_writereg (sc, 0, 17, 14); - lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */ - - /* clear counters and latched bits */ - for (i = 0; i < 21; i++) - { - lmc_mii_writereg (sc, 0, 17, i); - lmc_mii_readreg (sc, 0, 18); - } -} - -/* - * 1 == DS3 payload scrambled, 0 == not scrambled - */ -static void -lmc_ds3_set_scram (lmc_softc_t * const sc, int ie) -{ - if (ie == LMC_CTL_ON) - { - sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM; - sc->ictl.scrambler_onoff = LMC_CTL_ON; - } - else - { - sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM; - sc->ictl.scrambler_onoff = LMC_CTL_OFF; - } - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -/* - * return hardware link status. - * 0 == link is down, 1 == link is up. - */ -static int -lmc_ds3_get_link_status (lmc_softc_t * const sc) -{ - u16 link_status, link_status_11; - int ret = 1; - - lmc_mii_writereg (sc, 0, 17, 7); - link_status = lmc_mii_readreg (sc, 0, 18); - - /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions - * led0 yellow = far-end adapter is in Red alarm condition - * led1 blue = received an Alarm Indication signal - * (upstream failure) - * led2 Green = power to adapter, Gate Array loaded & driver - * attached - * led3 red = Loss of Signal (LOS) or out of frame (OOF) - * conditions detected on T3 receive signal - */ - - lmc_led_on(sc, LMC_DS3_LED2); - - if ((link_status & LMC_FRAMER_REG0_DLOS) || - (link_status & LMC_FRAMER_REG0_OOFS)){ - ret = 0; - if(sc->last_led_err[3] != 1){ - u16 r1; - lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ - r1 = lmc_mii_readreg (sc, 0, 18); - r1 &= 0xfe; - lmc_mii_writereg(sc, 0, 18, r1); - printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */ - sc->last_led_err[3] = 1; - } - else { - lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ - if(sc->last_led_err[3] == 1){ - u16 r1; - lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ - r1 = lmc_mii_readreg (sc, 0, 18); - r1 |= 0x01; - lmc_mii_writereg(sc, 0, 18, r1); - } - sc->last_led_err[3] = 0; - } - - lmc_mii_writereg(sc, 0, 17, 0x10); - link_status_11 = lmc_mii_readreg(sc, 0, 18); - if((link_status & LMC_FRAMER_REG0_AIS) || - (link_status_11 & LMC_FRAMER_REG10_XBIT)) { - ret = 0; - if(sc->last_led_err[0] != 1){ - printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name); - printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED0); - sc->last_led_err[0] = 1; - } - else { - lmc_led_off(sc, LMC_DS3_LED0); - sc->last_led_err[0] = 0; - } - - lmc_mii_writereg (sc, 0, 17, 9); - link_status = lmc_mii_readreg (sc, 0, 18); - - if(link_status & LMC_FRAMER_REG9_RBLUE){ - ret = 0; - if(sc->last_led_err[1] != 1){ - printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED1); - sc->last_led_err[1] = 1; - } - else { - lmc_led_off(sc, LMC_DS3_LED1); - sc->last_led_err[1] = 0; - } - - return ret; -} - -/* - * 0 == 16bit, 1 == 32bit - */ -static void -lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state) -{ - if (state == LMC_CTL_CRC_LENGTH_32) - { - /* 32 bit */ - sc->lmc_miireg16 |= LMC_MII16_DS3_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; - } - else - { - /* 16 bit */ - sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -static void -lmc_ds3_watchdog (lmc_softc_t * const sc) -{ - -} - - -/* - * SSI methods - */ - -static void lmc_ssi_init(lmc_softc_t * const sc) -{ - u16 mii17; - int cable; - - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; - - mii17 = lmc_mii_readreg(sc, 0, 17); - - cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; - sc->ictl.cable_type = cable; - - lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK); -} - -static void -lmc_ssi_default (lmc_softc_t * const sc) -{ - sc->lmc_miireg16 = LMC_MII16_LED_ALL; - - /* - * make TXCLOCK always be an output - */ - lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); - - sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); - sc->lmc_media->set_speed (sc, NULL); - sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); -} - -/* - * Given a user provided state, set ourselves up to match it. This will - * always reset the card if needed. - */ -static void -lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (ctl == NULL) - { - sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source); - sc->lmc_media->set_speed (sc, &sc->ictl); - lmc_set_protocol (sc, NULL); - - return; - } - - /* - * check for change in clock source - */ - if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT - && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT) - { - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT); - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT; - } - else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT - && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT) - { - sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT); - sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; - } - - if (ctl->clock_rate != sc->ictl.clock_rate) - sc->lmc_media->set_speed (sc, ctl); - - lmc_set_protocol (sc, ctl); -} - -/* - * 1 == internal, 0 == external - */ -static void -lmc_ssi_set_clock (lmc_softc_t * const sc, int ie) -{ - int old; - old = ie; - if (ie == LMC_CTL_CLOCK_SOURCE_EXT) - { - sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; - if(ie != old) - printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); - } - else - { - sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK; - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; - if(ie != old) - printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); - } -} - -static void -lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - lmc_ctl_t *ictl = &sc->ictl; - lmc_av9110_t *av; - - /* original settings for clock rate of: - * 100 Khz (8,25,0,0,2) were incorrect - * they should have been 80,125,1,3,3 - * There are 17 param combinations to produce this freq. - * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations) - */ - if (ctl == NULL) - { - av = &ictl->cardspec.ssi; - ictl->clock_rate = 1500000; - av->f = ictl->clock_rate; - av->n = 120; - av->m = 100; - av->v = 1; - av->x = 1; - av->r = 2; - - write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); - return; - } - - av = &ctl->cardspec.ssi; - - if (av->f == 0) - return; - - ictl->clock_rate = av->f; /* really, this is the rate we are */ - ictl->cardspec.ssi = *av; - - write_av9110 (sc, av->n, av->m, av->v, av->x, av->r); -} - -/* - * return hardware link status. - * 0 == link is down, 1 == link is up. - */ -static int -lmc_ssi_get_link_status (lmc_softc_t * const sc) -{ - u16 link_status; - u32 ticks; - int ret = 1; - int hw_hdsk = 1; - - /* - * missing CTS? Hmm. If we require CTS on, we may never get the - * link to come up, so omit it in this test. - * - * Also, it seems that with a loopback cable, DCD isn't asserted, - * so just check for things like this: - * DSR _must_ be asserted. - * One of DCD or CTS must be asserted. - */ - - /* LMC 1000 (SSI) LED definitions - * led0 Green = power to adapter, Gate Array loaded & - * driver attached - * led1 Green = DSR and DTR and RTS and CTS are set - * led2 Green = Cable detected - * led3 red = No timing is available from the - * cable or the on-board frequency - * generator. - */ - - link_status = lmc_mii_readreg (sc, 0, 16); - - /* Is the transmit clock still available */ - ticks = LMC_CSR_READ (sc, csr_gp_timer); - ticks = 0x0000ffff - (ticks & 0x0000ffff); - - lmc_led_on (sc, LMC_MII16_LED0); - - /* ====== transmit clock determination ===== */ - if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) { - lmc_led_off(sc, LMC_MII16_LED3); - } - else if (ticks == 0 ) { /* no clock found ? */ - ret = 0; - if (sc->last_led_err[3] != 1) { - sc->extra_stats.tx_lossOfClockCnt++; - printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); - } - sc->last_led_err[3] = 1; - lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ - } - else { - if(sc->last_led_err[3] == 1) - printk(KERN_WARNING "%s: Clock Returned\n", sc->name); - sc->last_led_err[3] = 0; - lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */ - } - - if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */ - ret = 0; - hw_hdsk = 0; - } - -#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE - if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){ - ret = 0; - hw_hdsk = 0; - } -#endif - - if(hw_hdsk == 0){ - if(sc->last_led_err[1] != 1) - printk(KERN_WARNING "%s: DSR not asserted\n", sc->name); - sc->last_led_err[1] = 1; - lmc_led_off(sc, LMC_MII16_LED1); - } - else { - if(sc->last_led_err[1] != 0) - printk(KERN_WARNING "%s: DSR now asserted\n", sc->name); - sc->last_led_err[1] = 0; - lmc_led_on(sc, LMC_MII16_LED1); - } - - if(ret == 1) { - lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */ - } - - return ret; -} - -static void -lmc_ssi_set_link_status (lmc_softc_t * const sc, int state) -{ - if (state == LMC_LINK_UP) - { - sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS); - printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS); - } - else - { - sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS); - printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS); - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); - -} - -/* - * 0 == 16bit, 1 == 32bit - */ -static void -lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state) -{ - if (state == LMC_CTL_CRC_LENGTH_32) - { - /* 32 bit */ - sc->lmc_miireg16 |= LMC_MII16_SSI_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; - sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4; - - } - else - { - /* 16 bit */ - sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; - sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2; - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -/* - * These are bits to program the ssi frequency generator - */ -static inline void -write_av9110_bit (lmc_softc_t * sc, int c) -{ - /* - * set the data bit as we need it. - */ - sc->lmc_gpio &= ~(LMC_GEP_CLK); - if (c & 0x01) - sc->lmc_gpio |= LMC_GEP_DATA; - else - sc->lmc_gpio &= ~(LMC_GEP_DATA); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - - /* - * set the clock to high - */ - sc->lmc_gpio |= LMC_GEP_CLK; - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - - /* - * set the clock to low again. - */ - sc->lmc_gpio &= ~(LMC_GEP_CLK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); -} - -static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r) -{ - int i; - -#if 0 - printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n", - LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r); -#endif - - sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR; - sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - - /* - * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK - * as outputs. - */ - lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK - | LMC_GEP_SSI_GENERATOR)); - - sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - - /* - * a shifting we will go... - */ - for (i = 0; i < 7; i++) - write_av9110_bit (sc, n >> i); - for (i = 0; i < 7; i++) - write_av9110_bit (sc, m >> i); - for (i = 0; i < 1; i++) - write_av9110_bit (sc, v >> i); - for (i = 0; i < 2; i++) - write_av9110_bit (sc, x >> i); - for (i = 0; i < 2; i++) - write_av9110_bit (sc, r >> i); - for (i = 0; i < 5; i++) - write_av9110_bit (sc, 0x17 >> i); - - /* - * stop driving serial-related signals - */ - lmc_gpio_mkinput (sc, - (LMC_GEP_DATA | LMC_GEP_CLK - | LMC_GEP_SSI_GENERATOR)); -} - -static void lmc_ssi_watchdog(lmc_softc_t * const sc) -{ - u16 mii17 = lmc_mii_readreg(sc, 0, 17); - if (((mii17 >> 3) & 7) == 7) - lmc_led_off(sc, LMC_MII16_LED2); - else - lmc_led_on(sc, LMC_MII16_LED2); -} - -/* - * T1 methods - */ - -/* - * The framer regs are multiplexed through MII regs 17 & 18 - * write the register address to MII reg 17 and the * data to MII reg 18. */ -static void -lmc_t1_write (lmc_softc_t * const sc, int a, int d) -{ - lmc_mii_writereg (sc, 0, 17, a); - lmc_mii_writereg (sc, 0, 18, d); -} - -/* Save a warning -static int -lmc_t1_read (lmc_softc_t * const sc, int a) -{ - lmc_mii_writereg (sc, 0, 17, a); - return lmc_mii_readreg (sc, 0, 18); -} -*/ - - -static void -lmc_t1_init (lmc_softc_t * const sc) -{ - u16 mii16; - int i; - - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; - mii16 = lmc_mii_readreg (sc, 0, 16); - - /* reset 8370 */ - mii16 &= ~LMC_MII16_T1_RST; - lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST); - lmc_mii_writereg (sc, 0, 16, mii16); - - /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */ - sc->lmc_miireg16 = mii16; - lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1); - mii16 = sc->lmc_miireg16; - - lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */ - lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */ - lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */ - lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */ - lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */ - lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */ - lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */ - lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */ - lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */ - lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */ - lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */ - lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */ - lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */ - lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */ - lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */ - lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */ - lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */ - lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */ - lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */ - lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */ - lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */ - lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */ - lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */ - lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */ - lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */ - lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */ - lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */ - lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */ - for (i = 0; i < 32; i++) - { - lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */ - lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */ - lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */ - } - for (i = 1; i < 25; i++) - { - lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */ - } - - mii16 |= LMC_MII16_T1_XOE; - lmc_mii_writereg (sc, 0, 16, mii16); - sc->lmc_miireg16 = mii16; -} - -static void -lmc_t1_default (lmc_softc_t * const sc) -{ - sc->lmc_miireg16 = LMC_MII16_LED_ALL; - sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN); - sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1); - sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16); - /* Right now we can only clock from out internal source */ - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; -} -/* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed. - */ -static void -lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (ctl == NULL) - { - sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type); - lmc_set_protocol (sc, NULL); - - return; - } - /* - * check for change in circuit type */ - if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1 - && sc->ictl.circuit_type == - LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc, - LMC_CTL_CIRCUIT_TYPE_E1); - else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1 - && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1) - sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1); - lmc_set_protocol (sc, ctl); -} -/* - * return hardware link status. - * 0 == link is down, 1 == link is up. - */ static int -lmc_t1_get_link_status (lmc_softc_t * const sc) -{ - u16 link_status; - int ret = 1; - - /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions - * led0 yellow = far-end adapter is in Red alarm condition - * led1 blue = received an Alarm Indication signal - * (upstream failure) - * led2 Green = power to adapter, Gate Array loaded & driver - * attached - * led3 red = Loss of Signal (LOS) or out of frame (OOF) - * conditions detected on T3 receive signal - */ - lmc_led_on(sc, LMC_DS3_LED2); - - lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS); - link_status = lmc_mii_readreg (sc, 0, 18); - - - if (link_status & T1F_RAIS) { /* turn on blue LED */ - ret = 0; - if(sc->last_led_err[1] != 1){ - printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED1); - sc->last_led_err[1] = 1; - } - else { - if(sc->last_led_err[1] != 0){ - printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name); - } - lmc_led_off (sc, LMC_DS3_LED1); - sc->last_led_err[1] = 0; - } - - /* - * Yellow Alarm is nasty evil stuff, looks at data patterns - * inside the channel and confuses it with HDLC framing - * ignore all yellow alarms. - * - * Do listen to MultiFrame Yellow alarm which while implemented - * different ways isn't in the channel and hence somewhat - * more reliable - */ - - if (link_status & T1F_RMYEL) { - ret = 0; - if(sc->last_led_err[0] != 1){ - printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED0); - sc->last_led_err[0] = 1; - } - else { - if(sc->last_led_err[0] != 0){ - printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name); - } - lmc_led_off(sc, LMC_DS3_LED0); - sc->last_led_err[0] = 0; - } - - /* - * Loss of signal and los of frame - * Use the green bit to identify which one lit the led - */ - if(link_status & T1F_RLOF){ - ret = 0; - if(sc->last_led_err[3] != 1){ - printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED3); - sc->last_led_err[3] = 1; - - } - else { - if(sc->last_led_err[3] != 0){ - printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name); - } - if( ! (link_status & T1F_RLOS)) - lmc_led_off(sc, LMC_DS3_LED3); - sc->last_led_err[3] = 0; - } - - if(link_status & T1F_RLOS){ - ret = 0; - if(sc->last_led_err[2] != 1){ - printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name); - } - lmc_led_on(sc, LMC_DS3_LED3); - sc->last_led_err[2] = 1; - - } - else { - if(sc->last_led_err[2] != 0){ - printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name); - } - if( ! (link_status & T1F_RLOF)) - lmc_led_off(sc, LMC_DS3_LED3); - sc->last_led_err[2] = 0; - } - - sc->lmc_xinfo.t1_alarm1_status = link_status; - - lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS); - sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18); - - return ret; -} - -/* - * 1 == T1 Circuit Type , 0 == E1 Circuit Type - */ -static void -lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie) -{ - if (ie == LMC_CTL_CIRCUIT_TYPE_T1) { - sc->lmc_miireg16 |= LMC_MII16_T1_Z; - sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1; - printk(KERN_INFO "%s: In T1 Mode\n", sc->name); - } - else { - sc->lmc_miireg16 &= ~LMC_MII16_T1_Z; - sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1; - printk(KERN_INFO "%s: In E1 Mode\n", sc->name); - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); - -} - -/* - * 0 == 16bit, 1 == 32bit */ -static void -lmc_t1_set_crc_length (lmc_softc_t * const sc, int state) -{ - if (state == LMC_CTL_CRC_LENGTH_32) - { - /* 32 bit */ - sc->lmc_miireg16 |= LMC_MII16_T1_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32; - sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4; - - } - else - { - /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC; - sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16; - sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2; - - } - - lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16); -} - -/* - * 1 == internal, 0 == external - */ -static void -lmc_t1_set_clock (lmc_softc_t * const sc, int ie) -{ - int old; - old = ie; - if (ie == LMC_CTL_CLOCK_SOURCE_EXT) - { - sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK); - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT; - if(old != ie) - printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS); - } - else - { - sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK; - LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); - sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT; - if(old != ie) - printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS); - } -} - -static void -lmc_t1_watchdog (lmc_softc_t * const sc) -{ -} - -static void -lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl) -{ - if (!ctl) - sc->ictl.keepalive_onoff = LMC_CTL_ON; -} diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c deleted file mode 100644 index e5487616a816..000000000000 --- a/drivers/net/wan/lmc/lmc_proto.c +++ /dev/null @@ -1,106 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - * - * With Help By: - * David Boggs - * Ron Crane - * Allan Cox - * - * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* Processor type for cache alignment. */ -#include -#include -#include - -#include "lmc.h" -#include "lmc_var.h" -#include "lmc_debug.h" -#include "lmc_ioctl.h" -#include "lmc_proto.h" - -// attach -void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ -{ - if (sc->if_type == LMC_NET) { - struct net_device *dev = sc->lmc_device; - /* - * They set a few basics because they don't use HDLC - */ - dev->flags |= IFF_POINTOPOINT; - dev->hard_header_len = 0; - dev->addr_len = 0; - } -} - -int lmc_proto_open(lmc_softc_t *sc) -{ - int ret = 0; - - if (sc->if_type == LMC_PPP) { - ret = hdlc_open(sc->lmc_device); - if (ret < 0) - printk(KERN_WARNING "%s: HDLC open failed: %d\n", - sc->name, ret); - } - return ret; -} - -void lmc_proto_close(lmc_softc_t *sc) -{ - if (sc->if_type == LMC_PPP) - hdlc_close(sc->lmc_device); -} - -__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ -{ - switch(sc->if_type){ - case LMC_PPP: - return hdlc_type_trans(skb, sc->lmc_device); - case LMC_NET: - return htons(ETH_P_802_2); - case LMC_RAW: /* Packet type for skbuff kind of useless */ - return htons(ETH_P_802_2); - default: - printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name); - return htons(ETH_P_802_2); - } -} - -void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ -{ - switch(sc->if_type){ - case LMC_PPP: - case LMC_NET: - default: - netif_rx(skb); - break; - case LMC_RAW: - break; - } -} diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h deleted file mode 100644 index e56e7072de44..000000000000 --- a/drivers/net/wan/lmc/lmc_proto.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LMC_PROTO_H_ -#define _LMC_PROTO_H_ - -#include - -void lmc_proto_attach(lmc_softc_t *sc); -int lmc_proto_open(lmc_softc_t *sc); -void lmc_proto_close(lmc_softc_t *sc); -__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); -void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); - -static inline lmc_softc_t* dev_to_sc(struct net_device *dev) -{ - return (lmc_softc_t *)dev_to_hdlc(dev)->priv; -} - -#endif diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h deleted file mode 100644 index 99f0aa787a35..000000000000 --- a/drivers/net/wan/lmc/lmc_var.h +++ /dev/null @@ -1,468 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef _LMC_VAR_H_ -#define _LMC_VAR_H_ - - /* - * Copyright (c) 1997-2000 LAN Media Corporation (LMC) - * All rights reserved. www.lanmedia.com - * - * This code is written by: - * Andrew Stanley-Jones (asj@cban.com) - * Rob Braun (bbraun@vix.com), - * Michael Graff (explorer@vix.com) and - * Matt Thomas (matt@3am-software.com). - */ - -#include - -/* - * basic definitions used in lmc include files - */ - -typedef struct lmc___softc lmc_softc_t; -typedef struct lmc___media lmc_media_t; -typedef struct lmc___ctl lmc_ctl_t; - -#define lmc_csrptr_t unsigned long - -#define LMC_REG_RANGE 0x80 - -#define LMC_PRINTF_FMT "%s" -#define LMC_PRINTF_ARGS (sc->lmc_device->name) - -#define TX_TIMEOUT (2*HZ) - -#define LMC_TXDESCS 32 -#define LMC_RXDESCS 32 - -#define LMC_LINK_UP 1 -#define LMC_LINK_DOWN 0 - -/* These macros for generic read and write to and from the dec chip */ -#define LMC_CSR_READ(sc, csr) \ - inl((sc)->lmc_csrs.csr) -#define LMC_CSR_WRITE(sc, reg, val) \ - outl((val), (sc)->lmc_csrs.reg) - -//#ifdef _LINUX_DELAY_H -// #define SLOW_DOWN_IO udelay(2); -// #undef __SLOW_DOWN_IO -// #define __SLOW_DOWN_IO udelay(2); -//#endif - -#define DELAY(n) SLOW_DOWN_IO - -#define lmc_delay() inl(sc->lmc_csrs.csr_9) - -/* This macro sync's up with the mii so that reads and writes can take place */ -#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \ - LMC_CSR_WRITE((sc), csr_9, 0x20000); \ - lmc_delay(); \ - LMC_CSR_WRITE((sc), csr_9, 0x30000); \ - lmc_delay(); \ - n--; }} while(0) - -struct lmc_regfile_t { - lmc_csrptr_t csr_busmode; /* CSR0 */ - lmc_csrptr_t csr_txpoll; /* CSR1 */ - lmc_csrptr_t csr_rxpoll; /* CSR2 */ - lmc_csrptr_t csr_rxlist; /* CSR3 */ - lmc_csrptr_t csr_txlist; /* CSR4 */ - lmc_csrptr_t csr_status; /* CSR5 */ - lmc_csrptr_t csr_command; /* CSR6 */ - lmc_csrptr_t csr_intr; /* CSR7 */ - lmc_csrptr_t csr_missed_frames; /* CSR8 */ - lmc_csrptr_t csr_9; /* CSR9 */ - lmc_csrptr_t csr_10; /* CSR10 */ - lmc_csrptr_t csr_11; /* CSR11 */ - lmc_csrptr_t csr_12; /* CSR12 */ - lmc_csrptr_t csr_13; /* CSR13 */ - lmc_csrptr_t csr_14; /* CSR14 */ - lmc_csrptr_t csr_15; /* CSR15 */ -}; - -#define csr_enetrom csr_9 /* 21040 */ -#define csr_reserved csr_10 /* 21040 */ -#define csr_full_duplex csr_11 /* 21040 */ -#define csr_bootrom csr_10 /* 21041/21140A/?? */ -#define csr_gp csr_12 /* 21140* */ -#define csr_watchdog csr_15 /* 21140* */ -#define csr_gp_timer csr_11 /* 21041/21140* */ -#define csr_srom_mii csr_9 /* 21041/21140* */ -#define csr_sia_status csr_12 /* 2104x */ -#define csr_sia_connectivity csr_13 /* 2104x */ -#define csr_sia_tx_rx csr_14 /* 2104x */ -#define csr_sia_general csr_15 /* 2104x */ - -/* tulip length/control transmit descriptor definitions - * used to define bits in the second tulip_desc_t field (length) - * for the transmit descriptor -baz */ - -#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF)) -#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800)) -#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000)) -#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000)) -#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000)) -#define LMC_TDES_END_OF_RING ((u32)(0x02000000)) -#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000)) -#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000)) -#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000)) -#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000)) -#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000)) -#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000)) - -#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 -#define TDES_COLLISION_COUNT_BIT_NUMBER 3 - -/* Constants for the RCV descriptor RDES */ - -#define LMC_RDES_OVERFLOW ((u32)(0x00000001)) -#define LMC_RDES_CRC_ERROR ((u32)(0x00000002)) -#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004)) -#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008)) -#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010)) -#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020)) -#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040)) -#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080)) -#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100)) -#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200)) -#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400)) -#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800)) -#define LMC_RDES_DATA_TYPE ((u32)(0x00003000)) -#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000)) -#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000)) -#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000)) -#define LMC_RDES_OWN_BIT ((u32)(0x80000000)) - -#define RDES_FRAME_LENGTH_BIT_NUMBER 16 - -#define LMC_RDES_ERROR_MASK ( (u32)( \ - LMC_RDES_OVERFLOW \ - | LMC_RDES_DRIBBLING_BIT \ - | LMC_RDES_REPORT_ON_MII_ERR \ - | LMC_RDES_COLLISION_SEEN ) ) - - -/* - * Ioctl info - */ - -typedef struct { - u32 n; - u32 m; - u32 v; - u32 x; - u32 r; - u32 f; - u32 exact; -} lmc_av9110_t; - -/* - * Common structure passed to the ioctl code. - */ -struct lmc___ctl { - u32 cardtype; - u32 clock_source; /* HSSI, T1 */ - u32 clock_rate; /* T1 */ - u32 crc_length; - u32 cable_length; /* DS3 */ - u32 scrambler_onoff; /* DS3 */ - u32 cable_type; /* T1 */ - u32 keepalive_onoff; /* protocol */ - u32 ticks; /* ticks/sec */ - union { - lmc_av9110_t ssi; - } cardspec; - u32 circuit_type; /* T1 or E1 */ -}; - - -/* - * Careful, look at the data sheet, there's more to this - * structure than meets the eye. It should probably be: - * - * struct tulip_desc_t { - * u8 own:1; - * u32 status:31; - * u32 control:10; - * u32 buffer1; - * u32 buffer2; - * }; - * You could also expand status control to provide more bit information - */ - -struct tulip_desc_t { - s32 status; - s32 length; - u32 buffer1; - u32 buffer2; -}; - -/* - * media independent methods to check on media status, link, light LEDs, - * etc. - */ -struct lmc___media { - void (* init)(lmc_softc_t * const); - void (* defaults)(lmc_softc_t * const); - void (* set_status)(lmc_softc_t * const, lmc_ctl_t *); - void (* set_clock_source)(lmc_softc_t * const, int); - void (* set_speed)(lmc_softc_t * const, lmc_ctl_t *); - void (* set_cable_length)(lmc_softc_t * const, int); - void (* set_scrambler)(lmc_softc_t * const, int); - int (* get_link_status)(lmc_softc_t * const); - void (* set_link_status)(lmc_softc_t * const, int); - void (* set_crc_length)(lmc_softc_t * const, int); - void (* set_circuit_type)(lmc_softc_t * const, int); - void (* watchdog)(lmc_softc_t * const); -}; - - -#define STATCHECK 0xBEEFCAFE - -struct lmc_extra_statistics -{ - u32 version_size; - u32 lmc_cardtype; - - u32 tx_ProcTimeout; - u32 tx_IntTimeout; - u32 tx_NoCompleteCnt; - u32 tx_MaxXmtsB4Int; - u32 tx_TimeoutCnt; - u32 tx_OutOfSyncPtr; - u32 tx_tbusy0; - u32 tx_tbusy1; - u32 tx_tbusy_calls; - u32 resetCount; - u32 lmc_txfull; - u32 tbusy; - u32 dirtyTx; - u32 lmc_next_tx; - u32 otherTypeCnt; - u32 lastType; - u32 lastTypeOK; - u32 txLoopCnt; - u32 usedXmtDescripCnt; - u32 txIndexCnt; - u32 rxIntLoopCnt; - - u32 rx_SmallPktCnt; - u32 rx_BadPktSurgeCnt; - u32 rx_BuffAllocErr; - u32 tx_lossOfClockCnt; - - /* T1 error counters */ - u32 framingBitErrorCount; - u32 lineCodeViolationCount; - - u32 lossOfFrameCount; - u32 changeOfFrameAlignmentCount; - u32 severelyErroredFrameCount; - - u32 check; -}; - -typedef struct lmc_xinfo { - u32 Magic0; /* BEEFCAFE */ - - u32 PciCardType; - u32 PciSlotNumber; /* PCI slot number */ - - u16 DriverMajorVersion; - u16 DriverMinorVersion; - u16 DriverSubVersion; - - u16 XilinxRevisionNumber; - u16 MaxFrameSize; - - u16 t1_alarm1_status; - u16 t1_alarm2_status; - - int link_status; - u32 mii_reg16; - - u32 Magic1; /* DEADBEEF */ -} LMC_XINFO; - - -/* - * forward decl - */ -struct lmc___softc { - char *name; - u8 board_idx; - struct lmc_extra_statistics extra_stats; - struct net_device *lmc_device; - - int hang, rxdesc, bad_packet, some_counter; - u32 txgo; - struct lmc_regfile_t lmc_csrs; - volatile u32 lmc_txtick; - volatile u32 lmc_rxtick; - u32 lmc_flags; - u32 lmc_intrmask; /* our copy of csr_intr */ - u32 lmc_cmdmode; /* our copy of csr_cmdmode */ - u32 lmc_busmode; /* our copy of csr_busmode */ - u32 lmc_gpio_io; /* state of in/out settings */ - u32 lmc_gpio; /* state of outputs */ - struct sk_buff* lmc_txq[LMC_TXDESCS]; - struct sk_buff* lmc_rxq[LMC_RXDESCS]; - volatile - struct tulip_desc_t lmc_rxring[LMC_RXDESCS]; - volatile - struct tulip_desc_t lmc_txring[LMC_TXDESCS]; - unsigned int lmc_next_rx, lmc_next_tx; - volatile - unsigned int lmc_taint_tx, lmc_taint_rx; - int lmc_tx_start, lmc_txfull; - int lmc_txbusy; - u16 lmc_miireg16; - int lmc_ok; - int last_link_status; - int lmc_cardtype; - u32 last_frameerr; - lmc_media_t *lmc_media; - struct timer_list timer; - lmc_ctl_t ictl; - u32 TxDescriptControlInit; - - int tx_TimeoutInd; /* additional driver state */ - int tx_TimeoutDisplay; - unsigned int lastlmc_taint_tx; - int lasttx_packets; - u32 tx_clockState; - u32 lmc_crcSize; - LMC_XINFO lmc_xinfo; - char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ - char lmc_timing; /* for HSSI and SSI */ - int got_irq; - - char last_led_err[4]; - - u32 last_int; - u32 num_int; - - spinlock_t lmc_lock; - u16 if_type; /* HDLC/PPP or NET */ - - /* Failure cases */ - u8 failed_ring; - u8 failed_recv_alloc; - - /* Structure check */ - u32 check; -}; - -#define LMC_PCI_TIME 1 -#define LMC_EXT_TIME 0 - -#define PKT_BUF_SZ 1542 /* was 1536 */ - -/* CSR5 settings */ -#define TIMER_INT 0x00000800 -#define TP_LINK_FAIL 0x00001000 -#define TP_LINK_PASS 0x00000010 -#define NORMAL_INT 0x00010000 -#define ABNORMAL_INT 0x00008000 -#define RX_JABBER_INT 0x00000200 -#define RX_DIED 0x00000100 -#define RX_NOBUFF 0x00000080 -#define RX_INT 0x00000040 -#define TX_FIFO_UNDER 0x00000020 -#define TX_JABBER 0x00000008 -#define TX_NOBUFF 0x00000004 -#define TX_DIED 0x00000002 -#define TX_INT 0x00000001 - -/* CSR6 settings */ -#define OPERATION_MODE 0x00000200 /* Full Duplex */ -#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */ -#define RECEIVE_ALL 0x40000000 /* Receive All */ -#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */ - -/* Dec control registers CSR6 as well */ -#define LMC_DEC_ST 0x00002000 -#define LMC_DEC_SR 0x00000002 - -/* CSR15 settings */ -#define RECV_WATCHDOG_DISABLE 0x00000010 -#define JABBER_DISABLE 0x00000001 - -/* More settings */ -/* - * aSR6 -- Command (Operation Mode) Register - */ -#define TULIP_CMD_RECEIVEALL 0x40000000L /* (RW) Receivel all frames? */ -#define TULIP_CMD_MUSTBEONE 0x02000000L /* (RW) Must Be One (21140) */ -#define TULIP_CMD_TXTHRSHLDCTL 0x00400000L /* (RW) Transmit Threshold Mode (21140) */ -#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Forward (21140) */ -#define TULIP_CMD_NOHEARTBEAT 0x00080000L /* (RW) No Heartbeat (21140) */ -#define TULIP_CMD_PORTSELECT 0x00040000L /* (RW) Post Select (100Mb) (21140) */ -#define TULIP_CMD_FULLDUPLEX 0x00000200L /* (RW) Full Duplex Mode */ -#define TULIP_CMD_OPERMODE 0x00000C00L /* (RW) Operating Mode */ -#define TULIP_CMD_PROMISCUOUS 0x00000041L /* (RW) Promiscuous Mode */ -#define TULIP_CMD_PASSBADPKT 0x00000008L /* (RW) Pass Bad Frames */ -#define TULIP_CMD_THRESHOLDCTL 0x0000C000L /* (RW) Threshold Control */ - -#define TULIP_GP_PINSET 0x00000100L -#define TULIP_BUSMODE_SWRESET 0x00000001L -#define TULIP_WATCHDOG_TXDISABLE 0x00000001L -#define TULIP_WATCHDOG_RXDISABLE 0x00000010L - -#define TULIP_STS_NORMALINTR 0x00010000L /* (RW) Normal Interrupt */ -#define TULIP_STS_ABNRMLINTR 0x00008000L /* (RW) Abnormal Interrupt */ -#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interrupt */ -#define TULIP_STS_SYSERROR 0x00002000L /* (RW) System Error */ -#define TULIP_STS_GTE 0x00000800L /* (RW) General Pupose Timer Exp */ -#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interrupt */ -#define TULIP_STS_RXWT 0x00000200L /* (RW) Receiver Watchdog Timeout */ -#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receiver Process Stopped */ -#define TULIP_STS_RXNOBUF 0x00000080L /* (RW) Receive Buf Unavail */ -#define TULIP_STS_RXINTR 0x00000040L /* (RW) Receive Interrupt */ -#define TULIP_STS_TXUNDERFLOW 0x00000020L /* (RW) Transmit Underflow */ -#define TULIP_STS_TXJABER 0x00000008L /* (RW) Jabber timeout */ -#define TULIP_STS_TXNOBUF 0x00000004L -#define TULIP_STS_TXSTOPPED 0x00000002L /* (RW) Transmit Process Stopped */ -#define TULIP_STS_TXINTR 0x00000001L /* (RW) Transmit Interrupt */ - -#define TULIP_STS_RXS_STOPPED 0x00000000L /* 000 - Stopped */ - -#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receive Process Stopped */ -#define TULIP_STS_RXNOBUF 0x00000080L - -#define TULIP_CMD_TXRUN 0x00002000L /* (RW) Start/Stop Transmitter */ -#define TULIP_CMD_RXRUN 0x00000002L /* (RW) Start/Stop Receive Filtering */ -#define TULIP_DSTS_TxDEFERRED 0x00000001 /* Initially Deferred */ -#define TULIP_DSTS_OWNER 0x80000000 /* Owner (1 = 21040) */ -#define TULIP_DSTS_RxMIIERR 0x00000008 -#define LMC_DSTS_ERRSUM (TULIP_DSTS_RxMIIERR) - -#define TULIP_DEFAULT_INTR_MASK (TULIP_STS_NORMALINTR \ - | TULIP_STS_RXINTR \ - | TULIP_STS_TXINTR \ - | TULIP_STS_ABNRMLINTR \ - | TULIP_STS_SYSERROR \ - | TULIP_STS_TXSTOPPED \ - | TULIP_STS_TXUNDERFLOW\ - | TULIP_STS_RXSTOPPED ) - -#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000)) -#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000)) - -#ifndef TULIP_CMD_RECEIVEALL -#define TULIP_CMD_RECEIVEALL 0x40000000L -#endif - -/* Adapter module number */ -#define LMC_ADAP_HSSI 2 -#define LMC_ADAP_DS3 3 -#define LMC_ADAP_SSI 4 -#define LMC_ADAP_T1 5 - -#define LMC_MTU 1500 - -#define LMC_CRC_LEN_16 2 /* 16-bit CRC */ -#define LMC_CRC_LEN_32 4 - -#endif /* _LMC_VAR_H_ */ -- cgit v1.2.3 From 8dd7cdb0f473407fdcd231c3eccef07371fd5e58 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 5 Apr 2022 17:15:09 +0200 Subject: bnx2x: Fix undefined behavior due to shift overflowing the constant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix: drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c: In function ‘bnx2x_check_blocks_with_parity3’: drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:4917:4: error: case label does not reduce to an integer constant case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: ^~~~ See https://lore.kernel.org/r/YkwQ6%2BtIH8GQpuct@zn.tnic for the gory details as to why it triggers with older gccs only. Signed-off-by: Borislav Petkov Cc: Ariel Elior Cc: Sudarsana Kalluru Cc: Manish Chopra Cc: Paolo Abeni Link: https://lore.kernel.org/r/20220405151517.29753-4-bp@alien8.de Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5caa75b41b73..881ac33fe914 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -6218,7 +6218,7 @@ #define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) #define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1U<<31) #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) #define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) #define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) -- cgit v1.2.3 From e8bd70250a821edb541c3abe1eacdad9f8dc7adf Mon Sep 17 00:00:00 2001 From: Volodymyr Mytnyk Date: Tue, 5 Apr 2022 16:20:14 +0300 Subject: prestera: acl: add action hw_stats support Currently, when user adds a tc action and the action gets offloaded, the user expects the HW stats to be counted also. This limits the amount of supported offloaded filters, as HW counter resources may be quite limited. Without counter assigned, the HW is capable to carry much more filters. To resolve the issue above, the following types of HW stats are offloaded and supported by the driver: any - current default, user does not care about the type. delayed - polled from HW periodically. disabled - no HW stats needed. immediate - not supported. Example: tc filter add dev PORT ingress proto ip flower skip_sw ip_proto 0x11 \ action drop tc filter add dev PORT ingress proto ip flower skip_sw ip_proto 0x12 \ action drop hw_stats disabled tc filter add dev sw1p1 ingress proto ip flower skip_sw ip_proto 0x14 \ action drop hw_stats delayed Signed-off-by: Volodymyr Mytnyk Link: https://lore.kernel.org/r/1649164814-18731-1-git-send-email-volodymyr.mytnyk@plvision.eu Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/prestera/prestera_acl.c | 7 ------- .../net/ethernet/marvell/prestera/prestera_flower.c | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index 47c899c08951..e5627782fac6 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -421,13 +421,6 @@ int prestera_acl_rule_add(struct prestera_switch *sw, rule->re_arg.vtcam_id = ruleset->vtcam_id; rule->re_key.prio = rule->priority; - /* setup counter */ - rule->re_arg.count.valid = true; - err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index, - &rule->re_arg.count.client); - if (err) - goto err_rule_add; - rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key); err = WARN_ON(rule->re) ? -EEXIST : 0; if (err) diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index 921959a980ee..c12b09ac6559 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -70,6 +70,24 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block, if (!flow_action_has_entries(flow_action)) return 0; + if (!flow_action_mixed_hw_stats_check(flow_action, extack)) + return -EOPNOTSUPP; + + act = flow_action_first_entry_get(flow_action); + if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { + /* Nothing to do */ + } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) { + /* setup counter first */ + rule->re_arg.count.valid = true; + err = prestera_acl_chain_to_client(chain_index, + &rule->re_arg.count.client); + if (err) + return err; + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); + return -EOPNOTSUPP; + } + flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_ACCEPT: -- cgit v1.2.3 From 135a161a5ea9e4f01b37defa53e7f5db43a7ca99 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 21 Mar 2022 11:59:50 +0100 Subject: ice: switch: add and use u16[] aliases to ice_adv_lkup_elem::{h, m}_u ice_adv_lkup_elem fields h_u and m_u are being accessed as raw u16 arrays in several places. To reduce cast and braces burden, add permanent array-of-u16 aliases with the same size as the `union ice_prot_hdr` itself via anonymous unions to the actual struct declaration, and just access them directly. This: - removes the need to cast the union to u16[] and then dereference it each time -> reduces the horizon for potential bugs; - improves -Warray-bounds coverage -- the array size is now known at compilation time; - addresses cppcheck complaints. Signed-off-by: Alexander Lobakin Reviewed-by: Michal Swiatkowski Tested-by: Marcin Szycik Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_switch.c | 15 +++++++-------- drivers/net/ethernet/intel/ice/ice_switch.h | 12 ++++++++++-- 2 files changed, 17 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 25b8f6f726eb..075df2474688 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -5811,12 +5811,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * over any significant packet data. */ for (j = 0; j < len / sizeof(u16); j++) - if (((u16 *)&lkups[i].m_u)[j]) + if (lkups[i].m_raw[j]) ((u16 *)(pkt + offset))[j] = (((u16 *)(pkt + offset))[j] & - ~((u16 *)&lkups[i].m_u)[j]) | - (((u16 *)&lkups[i].h_u)[j] & - ((u16 *)&lkups[i].m_u)[j]); + ~lkups[i].m_raw[j]) | + (lkups[i].h_raw[j] & + lkups[i].m_raw[j]); } s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); @@ -6065,11 +6065,10 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, /* get # of words we need to match */ word_cnt = 0; for (i = 0; i < lkups_cnt; i++) { - u16 j, *ptr; + u16 j; - ptr = (u16 *)&lkups[i].m_u; - for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++) - if (ptr[j] != 0) + for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++) + if (lkups[i].m_raw[j]) word_cnt++; } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index ed3d1d03befa..ecac75e71395 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -138,8 +138,16 @@ struct ice_update_recipe_lkup_idx_params { struct ice_adv_lkup_elem { enum ice_protocol_type type; - union ice_prot_hdr h_u; /* Header values */ - union ice_prot_hdr m_u; /* Mask of header values to match */ + union { + union ice_prot_hdr h_u; /* Header values */ + /* Used to iterate over the headers */ + u16 h_raw[sizeof(union ice_prot_hdr) / sizeof(u16)]; + }; + union { + union ice_prot_hdr m_u; /* Mask of header values to match */ + /* Used to iterate over header mask */ + u16 m_raw[sizeof(union ice_prot_hdr) / sizeof(u16)]; + }; }; struct ice_sw_act_ctrl { -- cgit v1.2.3 From 27ffa273a0405b546b4562b1c65e616d41a1f30e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 21 Mar 2022 11:59:51 +0100 Subject: ice: switch: unobscurify bitops loop in ice_fill_adv_dummy_packet() A loop performing header modification according to the provided mask in ice_fill_adv_dummy_packet() is very cryptic (and error-prone). Replace two identical cast-deferences with a variable. Replace three struct-member-array-accesses with a variable. Invert the condition, reduce the indentation by one -> eliminate line wraps. Signed-off-by: Alexander Lobakin Reviewed-by: Michal Swiatkowski Tested-by: Marcin Szycik Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_switch.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 075df2474688..0936d39de70c 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -5810,13 +5810,15 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * indicated by the mask to make sure we don't improperly write * over any significant packet data. */ - for (j = 0; j < len / sizeof(u16); j++) - if (lkups[i].m_raw[j]) - ((u16 *)(pkt + offset))[j] = - (((u16 *)(pkt + offset))[j] & - ~lkups[i].m_raw[j]) | - (lkups[i].h_raw[j] & - lkups[i].m_raw[j]); + for (j = 0; j < len / sizeof(u16); j++) { + u16 *ptr = (u16 *)(pkt + offset); + u16 mask = lkups[i].m_raw[j]; + + if (!mask) + continue; + + ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask); + } } s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); -- cgit v1.2.3 From 1b699f81dba78c724f6f94b02f01e216b64bf88b Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 21 Mar 2022 11:59:52 +0100 Subject: ice: switch: use a struct to pass packet template params ice_find_dummy_packet() contains a lot of boilerplate code and a nice room for copy-paste mistakes. Instead of passing 3 separate pointers back and forth to get packet template (dummy) params, directly return a structure containing them. Then, use a macro to compose compound literals and avoid code duplication on return path. Now, dummy packet type/name is needed only once to return a full correct triple pkt-pkt_len-offsets, and those are all one-liners. dummy_ipv4_gtpu_ipv4_packet_offsets is just moved around and renamed (as well as dummy_ipv6_gtp_packet_offsets) with no function changes. Signed-off-by: Alexander Lobakin Reviewed-by: Michal Swiatkowski Tested-by: Marcin Szycik Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_switch.c | 268 ++++++++++------------------ 1 file changed, 94 insertions(+), 174 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 0936d39de70c..4697eb8b4c66 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -35,6 +35,20 @@ struct ice_dummy_pkt_offsets { u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ }; +struct ice_dummy_pkt_profile { + const struct ice_dummy_pkt_offsets *offsets; + const u8 *pkt; + u16 pkt_len; +}; + +#define ICE_PKT_PROFILE(type) ({ \ + (struct ice_dummy_pkt_profile){ \ + .pkt = dummy_##type##_packet, \ + .pkt_len = sizeof(dummy_##type##_packet), \ + .offsets = dummy_##type##_packet_offsets, \ + }; \ +}) + static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, @@ -1141,6 +1155,15 @@ static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; +static const +struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = { + { ICE_MAC_OFOS, 0 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_OF, 34 }, + { ICE_GTP_NO_PAY, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, @@ -1172,16 +1195,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { }; static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = { - { ICE_MAC_OFOS, 0 }, - { ICE_IPV4_OFOS, 14 }, - { ICE_UDP_OF, 34 }, - { ICE_GTP_NO_PAY, 42 }, - { ICE_PROTOCOL_LAST, 0 }, -}; - -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = { +struct ice_dummy_pkt_offsets dummy_ipv6_gtp_packet_offsets[] = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -5501,15 +5515,12 @@ err_free_lkup_exts: * structure per protocol header * @lkups_cnt: number of protocols * @tun_type: tunnel type - * @pkt: dummy packet to fill according to filter match criteria - * @pkt_len: packet length of dummy packet - * @offsets: pointer to receive the pointer to the offsets for the packet + * + * Returns the &ice_dummy_pkt_profile corresponding to these lookup params. */ -static void +static struct ice_dummy_pkt_profile ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, - enum ice_sw_tunnel_type tun_type, - const u8 **pkt, u16 *pkt_len, - const struct ice_dummy_pkt_offsets **offsets) + enum ice_sw_tunnel_type tun_type) { bool inner_tcp = false, inner_udp = false, outer_ipv6 = false; bool vlan = false, inner_ipv6 = false, gtp_no_pay = false; @@ -5545,168 +5556,86 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, if (tun_type == ICE_SW_TUN_GTPU) { if (outer_ipv6) { if (gtp_no_pay) { - *pkt = dummy_ipv6_gtp_packet; - *pkt_len = sizeof(dummy_ipv6_gtp_packet); - *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; + return ICE_PKT_PROFILE(ipv6_gtp); } else if (inner_ipv6) { - if (inner_udp) { - *pkt = dummy_ipv6_gtpu_ipv6_udp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_udp_packet); - *offsets = dummy_ipv6_gtpu_ipv6_udp_packet_offsets; - } else { - *pkt = dummy_ipv6_gtpu_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv6_tcp_packet); - *offsets = dummy_ipv6_gtpu_ipv6_tcp_packet_offsets; - } + if (inner_udp) + return ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp); + else + return ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp); } else { - if (inner_udp) { - *pkt = dummy_ipv6_gtpu_ipv4_udp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_udp_packet); - *offsets = dummy_ipv6_gtpu_ipv4_udp_packet_offsets; - } else { - *pkt = dummy_ipv6_gtpu_ipv4_tcp_packet; - *pkt_len = sizeof(dummy_ipv6_gtpu_ipv4_tcp_packet); - *offsets = dummy_ipv6_gtpu_ipv4_tcp_packet_offsets; - } + if (inner_udp) + return ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp); + else + return ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp); } } else { if (gtp_no_pay) { - *pkt = dummy_ipv4_gtpu_ipv4_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); - *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; + return ICE_PKT_PROFILE(ipv4_gtpu_ipv4); } else if (inner_ipv6) { - if (inner_udp) { - *pkt = dummy_ipv4_gtpu_ipv6_udp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_udp_packet); - *offsets = dummy_ipv4_gtpu_ipv6_udp_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv6_tcp_packet); - *offsets = dummy_ipv4_gtpu_ipv6_tcp_packet_offsets; - } + if (inner_udp) + return ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp); + else + return ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp); } else { - if (inner_udp) { - *pkt = dummy_ipv4_gtpu_ipv4_udp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_udp_packet); - *offsets = dummy_ipv4_gtpu_ipv4_udp_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv4_tcp_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_tcp_packet); - *offsets = dummy_ipv4_gtpu_ipv4_tcp_packet_offsets; - } + if (inner_udp) + return ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp); + else + return ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp); } } - return; } if (tun_type == ICE_SW_TUN_GTPC) { - if (outer_ipv6) { - *pkt = dummy_ipv6_gtp_packet; - *pkt_len = sizeof(dummy_ipv6_gtp_packet); - *offsets = dummy_ipv6_gtp_no_pay_packet_offsets; - } else { - *pkt = dummy_ipv4_gtpu_ipv4_packet; - *pkt_len = sizeof(dummy_ipv4_gtpu_ipv4_packet); - *offsets = dummy_ipv4_gtp_no_pay_packet_offsets; - } - return; + if (outer_ipv6) + return ICE_PKT_PROFILE(ipv6_gtp); + else + return ICE_PKT_PROFILE(ipv4_gtpu_ipv4); } if (tun_type == ICE_SW_TUN_NVGRE) { - if (inner_tcp && inner_ipv6) { - *pkt = dummy_gre_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_gre_ipv6_tcp_packet); - *offsets = dummy_gre_ipv6_tcp_packet_offsets; - return; - } - if (inner_tcp) { - *pkt = dummy_gre_tcp_packet; - *pkt_len = sizeof(dummy_gre_tcp_packet); - *offsets = dummy_gre_tcp_packet_offsets; - return; - } - if (inner_ipv6) { - *pkt = dummy_gre_ipv6_udp_packet; - *pkt_len = sizeof(dummy_gre_ipv6_udp_packet); - *offsets = dummy_gre_ipv6_udp_packet_offsets; - return; - } - *pkt = dummy_gre_udp_packet; - *pkt_len = sizeof(dummy_gre_udp_packet); - *offsets = dummy_gre_udp_packet_offsets; - return; + if (inner_tcp && inner_ipv6) + return ICE_PKT_PROFILE(gre_ipv6_tcp); + else if (inner_tcp) + return ICE_PKT_PROFILE(gre_tcp); + else if (inner_ipv6) + return ICE_PKT_PROFILE(gre_ipv6_udp); + else + return ICE_PKT_PROFILE(gre_udp); } if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE) { - if (inner_tcp && inner_ipv6) { - *pkt = dummy_udp_tun_ipv6_tcp_packet; - *pkt_len = sizeof(dummy_udp_tun_ipv6_tcp_packet); - *offsets = dummy_udp_tun_ipv6_tcp_packet_offsets; - return; - } - if (inner_tcp) { - *pkt = dummy_udp_tun_tcp_packet; - *pkt_len = sizeof(dummy_udp_tun_tcp_packet); - *offsets = dummy_udp_tun_tcp_packet_offsets; - return; - } - if (inner_ipv6) { - *pkt = dummy_udp_tun_ipv6_udp_packet; - *pkt_len = sizeof(dummy_udp_tun_ipv6_udp_packet); - *offsets = dummy_udp_tun_ipv6_udp_packet_offsets; - return; - } - *pkt = dummy_udp_tun_udp_packet; - *pkt_len = sizeof(dummy_udp_tun_udp_packet); - *offsets = dummy_udp_tun_udp_packet_offsets; - return; + if (inner_tcp && inner_ipv6) + return ICE_PKT_PROFILE(udp_tun_ipv6_tcp); + else if (inner_tcp) + return ICE_PKT_PROFILE(udp_tun_tcp); + else if (inner_ipv6) + return ICE_PKT_PROFILE(udp_tun_ipv6_udp); + else + return ICE_PKT_PROFILE(udp_tun_udp); } if (inner_udp && !outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_udp_packet; - *pkt_len = sizeof(dummy_vlan_udp_packet); - *offsets = dummy_vlan_udp_packet_offsets; - return; - } - *pkt = dummy_udp_packet; - *pkt_len = sizeof(dummy_udp_packet); - *offsets = dummy_udp_packet_offsets; - return; + if (vlan) + return ICE_PKT_PROFILE(vlan_udp); + else + return ICE_PKT_PROFILE(udp); } else if (inner_udp && outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_udp_ipv6_packet; - *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet); - *offsets = dummy_vlan_udp_ipv6_packet_offsets; - return; - } - *pkt = dummy_udp_ipv6_packet; - *pkt_len = sizeof(dummy_udp_ipv6_packet); - *offsets = dummy_udp_ipv6_packet_offsets; - return; + if (vlan) + return ICE_PKT_PROFILE(vlan_udp_ipv6); + else + return ICE_PKT_PROFILE(udp_ipv6); } else if ((inner_tcp && outer_ipv6) || outer_ipv6) { - if (vlan) { - *pkt = dummy_vlan_tcp_ipv6_packet; - *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet); - *offsets = dummy_vlan_tcp_ipv6_packet_offsets; - return; - } - *pkt = dummy_tcp_ipv6_packet; - *pkt_len = sizeof(dummy_tcp_ipv6_packet); - *offsets = dummy_tcp_ipv6_packet_offsets; - return; + if (vlan) + return ICE_PKT_PROFILE(vlan_tcp_ipv6); + else + return ICE_PKT_PROFILE(tcp_ipv6); } - if (vlan) { - *pkt = dummy_vlan_tcp_packet; - *pkt_len = sizeof(dummy_vlan_tcp_packet); - *offsets = dummy_vlan_tcp_packet_offsets; - } else { - *pkt = dummy_tcp_packet; - *pkt_len = sizeof(dummy_tcp_packet); - *offsets = dummy_tcp_packet_offsets; - } + if (vlan) + return ICE_PKT_PROFILE(vlan_tcp); + + return ICE_PKT_PROFILE(tcp); } /** @@ -5716,15 +5645,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, * structure per protocol header * @lkups_cnt: number of protocols * @s_rule: stores rule information from the match criteria - * @dummy_pkt: dummy packet to fill according to filter match criteria - * @pkt_len: packet length of dummy packet - * @offsets: offset info for the dummy packet + * @profile: dummy packet profile (the template, its size and header offsets) */ static int ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, struct ice_aqc_sw_rules_elem *s_rule, - const u8 *dummy_pkt, u16 pkt_len, - const struct ice_dummy_pkt_offsets *offsets) + const struct ice_dummy_pkt_profile *profile) { u8 *pkt; u16 i; @@ -5734,9 +5660,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, */ pkt = s_rule->pdata.lkup_tx_rx.hdr; - memcpy(pkt, dummy_pkt, pkt_len); + memcpy(pkt, profile->pkt, profile->pkt_len); for (i = 0; i < lkups_cnt; i++) { + const struct ice_dummy_pkt_offsets *offsets = profile->offsets; enum ice_protocol_type type; u16 offset = 0, len = 0, j; bool found = false; @@ -5821,7 +5748,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, } } - s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len); + s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(profile->pkt_len); return 0; } @@ -6044,12 +5971,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_rule_query_data *added_entry) { struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; - u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle; - const struct ice_dummy_pkt_offsets *pkt_offsets; struct ice_aqc_sw_rules_elem *s_rule = NULL; + u16 rid = 0, i, rule_buf_sz, vsi_handle; + struct ice_dummy_pkt_profile profile; struct list_head *rule_head; struct ice_switch_info *sw; - const u8 *pkt = NULL; u16 word_cnt; u32 act = 0; int status; @@ -6077,13 +6003,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) return -EINVAL; - /* make sure that we can locate a dummy packet */ - ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len, - &pkt_offsets); - if (!pkt) { - status = -EINVAL; - goto err_ice_add_adv_rule; - } + /* locate a dummy packet */ + profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type); if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI || rinfo->sw_act.fltr_act == ICE_FWD_TO_Q || @@ -6124,7 +6045,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } return status; } - rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len; + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile.pkt_len; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) return -ENOMEM; @@ -6184,8 +6105,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); - status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, - pkt_len, pkt_offsets); + status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, &profile); if (status) goto err_ice_add_adv_rule; @@ -6193,7 +6113,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->pdata.lkup_tx_rx.hdr, - pkt_offsets); + profile.offsets); if (status) goto err_ice_add_adv_rule; } -- cgit v1.2.3 From 07a28842bb4f65e66aa297a031a57dec4828d6a0 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 21 Mar 2022 11:59:53 +0100 Subject: ice: switch: use convenience macros to declare dummy pkt templates Declarations of dummy/template packet headers and offsets can be minified to improve readability and simplify adding new templates. Move all the repetitive constructions into two macros and let them do the name and type expansions. Linewrap removal is yet another positive side effect. Signed-off-by: Alexander Lobakin Reviewed-by: Michal Swiatkowski Tested-by: Marcin Szycik Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_switch.c | 133 +++++++++++++--------------- 1 file changed, 62 insertions(+), 71 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 4697eb8b4c66..cde9e480ea89 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -41,15 +41,22 @@ struct ice_dummy_pkt_profile { u16 pkt_len; }; +#define ICE_DECLARE_PKT_OFFSETS(type) \ + static const struct ice_dummy_pkt_offsets \ + ice_dummy_##type##_packet_offsets[] + +#define ICE_DECLARE_PKT_TEMPLATE(type) \ + static const u8 ice_dummy_##type##_packet[] + #define ICE_PKT_PROFILE(type) ({ \ (struct ice_dummy_pkt_profile){ \ - .pkt = dummy_##type##_packet, \ - .pkt_len = sizeof(dummy_##type##_packet), \ - .offsets = dummy_##type##_packet_offsets, \ + .pkt = ice_dummy_##type##_packet, \ + .pkt_len = sizeof(ice_dummy_##type##_packet), \ + .offsets = ice_dummy_##type##_packet_offsets, \ }; \ }) -static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -61,7 +68,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -96,7 +103,7 @@ static const u8 dummy_gre_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -108,7 +115,7 @@ static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -140,7 +147,7 @@ static const u8 dummy_gre_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -155,7 +162,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -193,7 +200,7 @@ static const u8 dummy_udp_tun_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -208,7 +215,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -243,8 +250,7 @@ static const u8 dummy_udp_tun_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets -dummy_gre_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -256,7 +262,7 @@ dummy_gre_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -296,8 +302,7 @@ static const u8 dummy_gre_ipv6_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets -dummy_gre_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -309,7 +314,7 @@ dummy_gre_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_gre_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -346,8 +351,7 @@ static const u8 dummy_gre_ipv6_udp_packet[] = { 0x00, 0x08, 0x00, 0x00, }; -static const struct ice_dummy_pkt_offsets -dummy_udp_tun_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -362,7 +366,7 @@ dummy_udp_tun_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -405,8 +409,7 @@ static const u8 dummy_udp_tun_ipv6_tcp_packet[] = { 0x00, 0x00, 0x00, 0x00 }; -static const struct ice_dummy_pkt_offsets -dummy_udp_tun_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -421,7 +424,7 @@ dummy_udp_tun_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_udp_tun_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -462,7 +465,7 @@ static const u8 dummy_udp_tun_ipv6_udp_packet[] = { }; /* offset info for MAC + IPv4 + UDP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -471,7 +474,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = { }; /* Dummy packet for MAC + IPv4 + UDP */ -static const u8 dummy_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -491,7 +494,7 @@ static const u8 dummy_udp_packet[] = { }; /* offset info for MAC + VLAN + IPv4 + UDP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -501,7 +504,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = { }; /* C-tag (801.1Q), IPv4:UDP dummy packet */ -static const u8 dummy_vlan_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_udp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -523,7 +526,7 @@ static const u8 dummy_vlan_udp_packet[] = { }; /* offset info for MAC + IPv4 + TCP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV4_OFOS, 14 }, @@ -532,7 +535,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = { }; /* Dummy packet for MAC + IPv4 + TCP */ -static const u8 dummy_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -555,7 +558,7 @@ static const u8 dummy_tcp_packet[] = { }; /* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */ -static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -565,7 +568,7 @@ static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = { }; /* C-tag (801.1Q), IPv4:TCP dummy packet */ -static const u8 dummy_vlan_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_tcp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -589,7 +592,7 @@ static const u8 dummy_vlan_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV6_OFOS, 14 }, @@ -597,7 +600,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_tcp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -625,8 +628,7 @@ static const u8 dummy_tcp_ipv6_packet[] = { }; /* C-tag (802.1Q): IPv6 + TCP */ -static const struct ice_dummy_pkt_offsets -dummy_vlan_tcp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_tcp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -636,7 +638,7 @@ dummy_vlan_tcp_ipv6_packet_offsets[] = { }; /* C-tag (802.1Q), IPv6 + TCP dummy packet */ -static const u8 dummy_vlan_tcp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_tcp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -666,7 +668,7 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = { }; /* IPv6 + UDP */ -static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, { ICE_IPV6_OFOS, 14 }, @@ -675,7 +677,7 @@ static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = { }; /* IPv6 + UDP dummy packet */ -static const u8 dummy_udp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -703,8 +705,7 @@ static const u8 dummy_udp_ipv6_packet[] = { }; /* C-tag (802.1Q): IPv6 + UDP */ -static const struct ice_dummy_pkt_offsets -dummy_vlan_udp_ipv6_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(vlan_udp_ipv6) = { { ICE_MAC_OFOS, 0 }, { ICE_VLAN_OFOS, 12 }, { ICE_ETYPE_OL, 16 }, @@ -714,7 +715,7 @@ dummy_vlan_udp_ipv6_packet_offsets[] = { }; /* C-tag (802.1Q), IPv6 + UDP dummy packet */ -static const u8 dummy_vlan_udp_ipv6_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(vlan_udp_ipv6) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -741,8 +742,7 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = { }; /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -752,7 +752,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -790,8 +790,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_tcp_packet[] = { }; /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -801,7 +800,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -836,8 +835,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_udp_packet[] = { }; /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */ -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -847,7 +845,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -889,8 +887,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -900,7 +897,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -939,8 +936,7 @@ static const u8 dummy_ipv4_gtpu_ipv6_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -950,7 +946,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -992,8 +988,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1003,7 +998,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv4_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1042,8 +1037,7 @@ static const u8 dummy_ipv6_gtpu_ipv4_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1053,7 +1047,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_tcp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1100,8 +1094,7 @@ static const u8 dummy_ipv6_gtpu_ipv6_tcp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1111,7 +1104,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtpu_ipv6_udp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = { 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1155,8 +1148,7 @@ static const u8 dummy_ipv6_gtpu_ipv6_udp_packet[] = { 0x00, 0x00, /* 2 bytes for 4 byte alignment */ }; -static const -struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV4_OFOS, 14 }, { ICE_UDP_OF, 34 }, @@ -1164,7 +1156,7 @@ struct ice_dummy_pkt_offsets dummy_ipv4_gtpu_ipv4_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1194,8 +1186,7 @@ static const u8 dummy_ipv4_gtpu_ipv4_packet[] = { 0x00, 0x00, }; -static const -struct ice_dummy_pkt_offsets dummy_ipv6_gtp_packet_offsets[] = { +ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = { { ICE_MAC_OFOS, 0 }, { ICE_IPV6_OFOS, 14 }, { ICE_UDP_OF, 54 }, @@ -1203,7 +1194,7 @@ struct ice_dummy_pkt_offsets dummy_ipv6_gtp_packet_offsets[] = { { ICE_PROTOCOL_LAST, 0 }, }; -static const u8 dummy_ipv6_gtp_packet[] = { +ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -- cgit v1.2.3 From e33163a40d1a1f7fead2ce26f9b75da6b581a49e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 21 Mar 2022 11:59:54 +0100 Subject: ice: switch: convert packet template match code to rodata Trade text size for rodata size and replace tons of nested if-elses to the const mask match based structs. The almost entire ice_find_dummy_packet() now becomes just one plain while-increment loop. The order in ice_dummy_pkt_profiles[] should be same with the if-elses order previously, as masks become less and less strict through the array to follow the original code flow. Apart from removing 80 locs of 4-level if-elses, it brings a solid text size optimization: add/remove: 0/1 grow/shrink: 1/1 up/down: 2/-1058 (-1056) Function old new delta ice_fill_adv_dummy_packet 289 291 +2 ice_adv_add_update_vsi_list 201 - -201 ice_add_adv_rule 2950 2093 -857 Total: Before=414512, After=413456, chg -0.25% add/remove: 53/52 grow/shrink: 0/0 up/down: 4660/-3988 (672) RO Data old new delta ice_dummy_pkt_profiles - 672 +672 Total: Before=37895, After=38567, chg +1.77% Signed-off-by: Alexander Lobakin Reviewed-by: Michal Swiatkowski Tested-by: Marcin Szycik Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_switch.c | 215 ++++++++++++++-------------- 1 file changed, 108 insertions(+), 107 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index cde9e480ea89..496250f9f8fc 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -30,6 +30,19 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; +enum { + ICE_PKT_VLAN = BIT(0), + ICE_PKT_OUTER_IPV6 = BIT(1), + ICE_PKT_TUN_GTPC = BIT(2), + ICE_PKT_TUN_GTPU = BIT(3), + ICE_PKT_TUN_NVGRE = BIT(4), + ICE_PKT_TUN_UDP = BIT(5), + ICE_PKT_INNER_IPV6 = BIT(6), + ICE_PKT_INNER_TCP = BIT(7), + ICE_PKT_INNER_UDP = BIT(8), + ICE_PKT_GTP_NOPAY = BIT(9), +}; + struct ice_dummy_pkt_offsets { enum ice_protocol_type type; u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ @@ -38,23 +51,23 @@ struct ice_dummy_pkt_offsets { struct ice_dummy_pkt_profile { const struct ice_dummy_pkt_offsets *offsets; const u8 *pkt; + u32 match; u16 pkt_len; }; -#define ICE_DECLARE_PKT_OFFSETS(type) \ - static const struct ice_dummy_pkt_offsets \ +#define ICE_DECLARE_PKT_OFFSETS(type) \ + static const struct ice_dummy_pkt_offsets \ ice_dummy_##type##_packet_offsets[] -#define ICE_DECLARE_PKT_TEMPLATE(type) \ +#define ICE_DECLARE_PKT_TEMPLATE(type) \ static const u8 ice_dummy_##type##_packet[] -#define ICE_PKT_PROFILE(type) ({ \ - (struct ice_dummy_pkt_profile){ \ - .pkt = ice_dummy_##type##_packet, \ - .pkt_len = sizeof(ice_dummy_##type##_packet), \ - .offsets = ice_dummy_##type##_packet_offsets, \ - }; \ -}) +#define ICE_PKT_PROFILE(type, m) { \ + .match = (m), \ + .pkt = ice_dummy_##type##_packet, \ + .pkt_len = sizeof(ice_dummy_##type##_packet), \ + .offsets = ice_dummy_##type##_packet_offsets, \ +} ICE_DECLARE_PKT_OFFSETS(gre_tcp) = { { ICE_MAC_OFOS, 0 }, @@ -1220,6 +1233,55 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 0x00, 0x00, }; +static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { + ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 | + ICE_PKT_GTP_NOPAY), + ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY), + ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU | + ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), + ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), + ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE), + ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP | + ICE_PKT_INNER_IPV6 | + ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP), + ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP | + ICE_PKT_INNER_IPV6), + ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP), + ICE_PKT_PROFILE(vlan_udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP | + ICE_PKT_VLAN), + ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(vlan_udp, ICE_PKT_INNER_UDP | ICE_PKT_VLAN), + ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP), + ICE_PKT_PROFILE(vlan_tcp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_VLAN), + ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(vlan_tcp, ICE_PKT_VLAN), + ICE_PKT_PROFILE(tcp, 0), +}; + #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ (DUMMY_ETH_HDR_LEN * \ @@ -5509,124 +5571,63 @@ err_free_lkup_exts: * * Returns the &ice_dummy_pkt_profile corresponding to these lookup params. */ -static struct ice_dummy_pkt_profile +static const struct ice_dummy_pkt_profile * ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, enum ice_sw_tunnel_type tun_type) { - bool inner_tcp = false, inner_udp = false, outer_ipv6 = false; - bool vlan = false, inner_ipv6 = false, gtp_no_pay = false; + const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles; + u32 match = 0; u16 i; + switch (tun_type) { + case ICE_SW_TUN_GTPC: + match |= ICE_PKT_TUN_GTPC; + break; + case ICE_SW_TUN_GTPU: + match |= ICE_PKT_TUN_GTPU; + break; + case ICE_SW_TUN_NVGRE: + match |= ICE_PKT_TUN_NVGRE; + break; + case ICE_SW_TUN_GENEVE: + case ICE_SW_TUN_VXLAN: + match |= ICE_PKT_TUN_UDP; + break; + default: + break; + } + for (i = 0; i < lkups_cnt; i++) { if (lkups[i].type == ICE_UDP_ILOS) - inner_udp = true; + match |= ICE_PKT_INNER_UDP; else if (lkups[i].type == ICE_TCP_IL) - inner_tcp = true; + match |= ICE_PKT_INNER_TCP; else if (lkups[i].type == ICE_IPV6_OFOS) - outer_ipv6 = true; + match |= ICE_PKT_OUTER_IPV6; else if (lkups[i].type == ICE_VLAN_OFOS) - vlan = true; + match |= ICE_PKT_VLAN; else if (lkups[i].type == ICE_ETYPE_OL && lkups[i].h_u.ethertype.ethtype_id == cpu_to_be16(ICE_IPV6_ETHER_ID) && lkups[i].m_u.ethertype.ethtype_id == cpu_to_be16(0xFFFF)) - outer_ipv6 = true; + match |= ICE_PKT_OUTER_IPV6; else if (lkups[i].type == ICE_ETYPE_IL && lkups[i].h_u.ethertype.ethtype_id == cpu_to_be16(ICE_IPV6_ETHER_ID) && lkups[i].m_u.ethertype.ethtype_id == cpu_to_be16(0xFFFF)) - inner_ipv6 = true; + match |= ICE_PKT_INNER_IPV6; else if (lkups[i].type == ICE_IPV6_IL) - inner_ipv6 = true; + match |= ICE_PKT_INNER_IPV6; else if (lkups[i].type == ICE_GTP_NO_PAY) - gtp_no_pay = true; - } - - if (tun_type == ICE_SW_TUN_GTPU) { - if (outer_ipv6) { - if (gtp_no_pay) { - return ICE_PKT_PROFILE(ipv6_gtp); - } else if (inner_ipv6) { - if (inner_udp) - return ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp); - else - return ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp); - } else { - if (inner_udp) - return ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp); - else - return ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp); - } - } else { - if (gtp_no_pay) { - return ICE_PKT_PROFILE(ipv4_gtpu_ipv4); - } else if (inner_ipv6) { - if (inner_udp) - return ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp); - else - return ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp); - } else { - if (inner_udp) - return ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp); - else - return ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp); - } - } + match |= ICE_PKT_GTP_NOPAY; } - if (tun_type == ICE_SW_TUN_GTPC) { - if (outer_ipv6) - return ICE_PKT_PROFILE(ipv6_gtp); - else - return ICE_PKT_PROFILE(ipv4_gtpu_ipv4); - } - - if (tun_type == ICE_SW_TUN_NVGRE) { - if (inner_tcp && inner_ipv6) - return ICE_PKT_PROFILE(gre_ipv6_tcp); - else if (inner_tcp) - return ICE_PKT_PROFILE(gre_tcp); - else if (inner_ipv6) - return ICE_PKT_PROFILE(gre_ipv6_udp); - else - return ICE_PKT_PROFILE(gre_udp); - } + while (ret->match && (match & ret->match) != ret->match) + ret++; - if (tun_type == ICE_SW_TUN_VXLAN || - tun_type == ICE_SW_TUN_GENEVE) { - if (inner_tcp && inner_ipv6) - return ICE_PKT_PROFILE(udp_tun_ipv6_tcp); - else if (inner_tcp) - return ICE_PKT_PROFILE(udp_tun_tcp); - else if (inner_ipv6) - return ICE_PKT_PROFILE(udp_tun_ipv6_udp); - else - return ICE_PKT_PROFILE(udp_tun_udp); - } - - if (inner_udp && !outer_ipv6) { - if (vlan) - return ICE_PKT_PROFILE(vlan_udp); - else - return ICE_PKT_PROFILE(udp); - } else if (inner_udp && outer_ipv6) { - if (vlan) - return ICE_PKT_PROFILE(vlan_udp_ipv6); - else - return ICE_PKT_PROFILE(udp_ipv6); - } else if ((inner_tcp && outer_ipv6) || outer_ipv6) { - if (vlan) - return ICE_PKT_PROFILE(vlan_tcp_ipv6); - else - return ICE_PKT_PROFILE(tcp_ipv6); - } - - if (vlan) - return ICE_PKT_PROFILE(vlan_tcp); - - return ICE_PKT_PROFILE(tcp); + return ret; } /** @@ -5963,8 +5964,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, { struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL; struct ice_aqc_sw_rules_elem *s_rule = NULL; + const struct ice_dummy_pkt_profile *profile; u16 rid = 0, i, rule_buf_sz, vsi_handle; - struct ice_dummy_pkt_profile profile; struct list_head *rule_head; struct ice_switch_info *sw; u16 word_cnt; @@ -6036,7 +6037,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } return status; } - rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile.pkt_len; + rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + profile->pkt_len; s_rule = kzalloc(rule_buf_sz, GFP_KERNEL); if (!s_rule) return -ENOMEM; @@ -6096,7 +6097,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid); s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); - status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, &profile); + status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile); if (status) goto err_ice_add_adv_rule; @@ -6104,7 +6105,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) { status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->pdata.lkup_tx_rx.hdr, - profile.offsets); + profile->offsets); if (status) goto err_ice_add_adv_rule; } -- cgit v1.2.3 From e416531f04594de678614f9dc63a84bd12b4ef24 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 6 Apr 2022 14:37:52 -0700 Subject: net: hyperv: remove use of bpf_op_t Following patch will hide that typedef. There seems to be no strong reason for hyperv to use it, so let's not. Acked-by: Wei Liu Signed-off-by: Jakub Kicinski --- drivers/net/hyperv/netvsc_bpf.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 7856905414eb..232c4a0efd7b 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -137,7 +137,6 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog, int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) { struct netdev_bpf xdp; - bpf_op_t ndo_bpf; int ret; ASSERT_RTNL(); @@ -145,8 +144,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) if (!vf_netdev) return 0; - ndo_bpf = vf_netdev->netdev_ops->ndo_bpf; - if (!ndo_bpf) + if (!vf_netdev->netdev_ops->ndo_bpf) return 0; memset(&xdp, 0, sizeof(xdp)); @@ -157,7 +155,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) xdp.command = XDP_SETUP_PROG; xdp.prog = prog; - ret = ndo_bpf(vf_netdev, &xdp); + ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp); if (ret && prog) bpf_prog_put(prog); -- cgit v1.2.3 From 6a62924c0a81316b2aff713566593405b57545d5 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 5 Apr 2022 14:40:55 +0100 Subject: sfc: Stop using iommu_present() Even if an IOMMU might be present for some PCI segment in the system, that doesn't necessarily mean it provides translation for the device we care about. It appears that what we care about here is specifically whether DMA mapping ops involve any IOMMU overhead or not, so check for translation actually being active for our device. Signed-off-by: Robin Murphy Acked-by: Edward Cree Link: https://lore.kernel.org/r/7350f957944ecfce6cce90f422e3992a1f428775.1649166055.git.robin.murphy@arm.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/falcon/rx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 0c6cc2191369..6bbdb5d2eebf 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -718,12 +718,14 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx, struct ef4_rx_queue *rx_queue) { unsigned int bufs_in_recycle_ring, page_ring_size; + struct iommu_domain __maybe_unused *domain; /* Set the RX recycle ring size */ #ifdef CONFIG_PPC64 bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU; #else - if (iommu_present(&pci_bus_type)) + domain = iommu_get_domain_for_dev(&efx->pci_dev->dev); + if (domain && domain->type != IOMMU_DOMAIN_IDENTITY) bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU; else bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU; -- cgit v1.2.3 From 4daf5f1956308641092769aa1924286b19513328 Mon Sep 17 00:00:00 2001 From: Xiaomeng Tong Date: Wed, 6 Apr 2022 09:59:21 +0800 Subject: qed: remove an unneed NULL check on list iterator The define for_each_pci_dev(d) is: while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) Thus, the list iterator 'd' is always non-NULL so it doesn't need to be checked. So just remove the unnecessary NULL check. Also remove the unnecessary initializer because the list iterator is always initialized. Signed-off-by: Xiaomeng Tong Link: https://lore.kernel.org/r/20220406015921.29267-1-xiam0nd.tong@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c index 96a2077fd315..7e286cddbedb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c +++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c @@ -161,11 +161,11 @@ EXPORT_SYMBOL(qed_vlan_get_ndev); struct pci_dev *qed_validate_ndev(struct net_device *ndev) { - struct pci_dev *pdev = NULL; struct net_device *upper; + struct pci_dev *pdev; for_each_pci_dev(pdev) { - if (pdev && pdev->driver && + if (pdev->driver && !strcmp(pdev->driver->name, "qede")) { upper = pci_get_drvdata(pdev); if (upper->ifindex == ndev->ifindex) -- cgit v1.2.3 From 26894cd971168d14fd8f4c05435aef59188c9514 Mon Sep 17 00:00:00 2001 From: "Andrea Parri (Microsoft)" Date: Thu, 7 Apr 2022 06:40:34 +0200 Subject: hv_netvsc: Print value of invalid ID in netvsc_send_{completion,tx_complete}() That being useful for debugging purposes. Notice that the packet descriptor is in "private" guest memory, so that Hyper-V can not tamper with it. While at it, remove two unnecessary u64-casts. Signed-off-by: Andrea Parri (Microsoft) Signed-off-by: Jakub Kicinski --- drivers/net/hyperv/netvsc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 9442f751ad3a..4061af5baaea 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -792,9 +792,9 @@ static void netvsc_send_tx_complete(struct net_device *ndev, int queue_sends; u64 cmd_rqst; - cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id); + cmd_rqst = channel->request_addr_callback(channel, desc->trans_id); if (cmd_rqst == VMBUS_RQST_ERROR) { - netdev_err(ndev, "Incorrect transaction id\n"); + netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); return; } @@ -854,9 +854,9 @@ static void netvsc_send_completion(struct net_device *ndev, /* First check if this is a VMBUS completion without data payload */ if (!msglen) { cmd_rqst = incoming_channel->request_addr_callback(incoming_channel, - (u64)desc->trans_id); + desc->trans_id); if (cmd_rqst == VMBUS_RQST_ERROR) { - netdev_err(ndev, "Invalid transaction id\n"); + netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); return; } -- cgit v1.2.3 From b231c3f3414cfc7bf8fb1e246ed5a3d523616520 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:58:56 -0400 Subject: bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff Move initialization of xdp_buff outside of bnxt_rx_xdp to prepare for allowing bnxt_rx_xdp to operate on multibuffer xdp_buffs. v2: Fix uninitalized variables warning in bnxt_xdp.c. v3: Add new define BNXT_PAGE_MODE_BUF_SIZE Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 11 ++++--- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 8 +++-- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 46 ++++++++++++++++++++------- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 7 +++- 4 files changed, 53 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 874fad0a5cf8..826d94c49d26 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1731,6 +1731,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u8 *data_ptr, agg_bufs, cmp_type; dma_addr_t dma_addr; struct sk_buff *skb; + struct xdp_buff xdp; u32 flags, misc; void *data; int rc = 0; @@ -1839,11 +1840,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, len = flags >> RX_CMP_LEN_SHIFT; dma_addr = rx_buf->mapping; - if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) { - rc = 1; - goto next_rx; + if (bnxt_xdp_attached(bp, rxr)) { + bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp); + if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) { + rc = 1; + goto next_rx; + } } - if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 98453a78cbd0..0f35459d5206 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -591,10 +591,12 @@ struct nqe_cn { #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) #define BNXT_MAX_MTU 9500 -#define BNXT_MAX_PAGE_MODE_MTU \ +#define BNXT_PAGE_MODE_BUF_SIZE \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ - XDP_PACKET_HEADROOM - \ - SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))) + XDP_PACKET_HEADROOM) +#define BNXT_MAX_PAGE_MODE_MTU \ + BNXT_PAGE_MODE_BUF_SIZE - \ + SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)) #define BNXT_MIN_PKT_SIZE 52 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 03b1d6c04504..a3924e6030fe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -106,18 +106,44 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) } } +bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); + + return !!xdp_prog; +} + +void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 cons, u8 **data_ptr, unsigned int *len, + struct xdp_buff *xdp) +{ + struct bnxt_sw_rx_bd *rx_buf; + struct pci_dev *pdev; + dma_addr_t mapping; + u32 offset; + + pdev = bp->pdev; + rx_buf = &rxr->rx_buf_ring[cons]; + offset = bp->rx_offset; + + mapping = rx_buf->mapping - bp->rx_dma_offset; + dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); + + xdp_init_buff(xdp, BNXT_PAGE_MODE_BUF_SIZE + offset, &rxr->xdp_rxq); + xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false); +} + /* returns the following: * true - packet consumed by XDP and new buffer is allocated. * false - packet should be passed to the stack. */ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) + struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event) { struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); struct bnxt_tx_ring_info *txr; struct bnxt_sw_rx_bd *rx_buf; struct pci_dev *pdev; - struct xdp_buff xdp; dma_addr_t mapping; void *orig_data; u32 tx_avail; @@ -128,16 +154,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, return false; pdev = bp->pdev; - rx_buf = &rxr->rx_buf_ring[cons]; offset = bp->rx_offset; - mapping = rx_buf->mapping - bp->rx_dma_offset; - dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); - txr = rxr->bnapi->tx_ring; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ - xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq); - xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false); orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -150,15 +170,17 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, *event &= ~BNXT_RX_EVENT; *len = xdp.data_end - xdp.data; - if (orig_data != xdp.data) { + if (orig_data != xdp.data) offset = xdp.data - xdp.data_hard_start; - *data_ptr = xdp.data_hard_start + offset; - } + switch (act) { case XDP_PASS: return false; case XDP_TX: + rx_buf = &rxr->rx_buf_ring[cons]; + mapping = rx_buf->mapping - bp->rx_dma_offset; + if (tx_avail < 1) { trace_xdp_exception(bp->dev, xdp_prog, act); bnxt_reuse_rx_data(rxr, cons, page); @@ -177,6 +199,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, * redirect is coming from a frame received by the * bnxt_en driver. */ + rx_buf = &rxr->rx_buf_ring[cons]; + mapping = rx_buf->mapping - bp->rx_dma_offset; dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 067bb5e821f5..97e7905dbb20 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -17,10 +17,15 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, dma_addr_t mapping, u32 len); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct page *page, u8 **data_ptr, unsigned int *len, + struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event); int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); int bnxt_xdp_xmit(struct net_device *dev, int num_frames, struct xdp_frame **frames, u32 flags); +bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); + +void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 cons, u8 **data_ptr, unsigned int *len, + struct xdp_buff *xdp); #endif -- cgit v1.2.3 From ee536dcbdce4966009b4ea15f03cba045161249a Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:58:57 -0400 Subject: bnxt: add flag to denote that an xdp program is currently attached This will be used to determine if bnxt_rx_xdp should be called rather than calling it every time. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 826d94c49d26..f6973f57ccd2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1729,6 +1729,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct bnxt_sw_rx_bd *rx_buf; unsigned int len; u8 *data_ptr, agg_bufs, cmp_type; + bool xdp_active = false; dma_addr_t dma_addr; struct sk_buff *skb; struct xdp_buff xdp; @@ -1842,11 +1843,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (bnxt_xdp_attached(bp, rxr)) { bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp); + xdp_active = true; + } + + /* skip running XDP prog if there are aggregation bufs */ + if (!agg_bufs && xdp_active) { if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) { rc = 1; goto next_rx; } } + if (len <= bp->rx_copy_thresh) { skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); -- cgit v1.2.3 From ca1df2dd8e2f2c18a90d21e59ad56d43c2e9322e Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:58:58 -0400 Subject: bnxt: refactor bnxt_rx_pages operate on skb_shared_info Rather than operating on an sk_buff, add frags from the aggregation ring into the frags of an skb_shared_info. This will allow the caller to use either an sk_buff or xdp_buff. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 50 ++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f6973f57ccd2..4ae94387d07b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1038,22 +1038,23 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return skb; } -static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, - struct sk_buff *skb, u16 idx, - u32 agg_bufs, bool tpa) +static u32 __bnxt_rx_pages(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct skb_shared_info *shinfo, + u16 idx, u32 agg_bufs, bool tpa) { struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; u16 prod = rxr->rx_agg_prod; + u32 i, total_frag_len = 0; bool p5_tpa = false; - u32 i; if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) p5_tpa = true; for (i = 0; i < agg_bufs; i++) { + skb_frag_t *frag = &shinfo->frags[i]; u16 cons, frag_len; struct rx_agg_cmp *agg; struct bnxt_sw_rx_agg_bd *cons_rx_buf; @@ -1069,8 +1070,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; cons_rx_buf = &rxr->rx_agg_ring[cons]; - skb_fill_page_desc(skb, i, cons_rx_buf->page, - cons_rx_buf->offset, frag_len); + skb_frag_off_set(frag, cons_rx_buf->offset); + skb_frag_size_set(frag, frag_len); + __skb_frag_set_page(frag, cons_rx_buf->page); + shinfo->nr_frags = i + 1; __clear_bit(cons, rxr->rx_agg_bmap); /* It is possible for bnxt_alloc_rx_page() to allocate @@ -1082,15 +1085,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, cons_rx_buf->page = NULL; if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { - struct skb_shared_info *shinfo; unsigned int nr_frags; - shinfo = skb_shinfo(skb); nr_frags = --shinfo->nr_frags; __skb_frag_set_page(&shinfo->frags[nr_frags], NULL); - - dev_kfree_skb(skb); - cons_rx_buf->page = page; /* Update prod since possibly some pages have been @@ -1098,20 +1096,38 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, */ rxr->rx_agg_prod = prod; bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); - return NULL; + return 0; } dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING); - skb->data_len += frag_len; - skb->len += frag_len; - skb->truesize += PAGE_SIZE; - + total_frag_len += frag_len; prod = NEXT_RX_AGG(prod); } rxr->rx_agg_prod = prod; + return total_frag_len; +} + +static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 total_frag_len = 0; + + total_frag_len = __bnxt_rx_pages(bp, cpr, shinfo, idx, agg_bufs, tpa); + + if (!total_frag_len) { + dev_kfree_skb(skb); + return NULL; + } + + skb->data_len += total_frag_len; + skb->len += total_frag_len; + skb->truesize += PAGE_SIZE * agg_bufs; return skb; } -- cgit v1.2.3 From 23e4c0469ad03f695993cceccb50cbddf9ef8963 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:58:59 -0400 Subject: bnxt: rename bnxt_rx_pages to bnxt_rx_agg_pages_skb Clarify that this is reading buffers from the aggregation ring. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4ae94387d07b..5487d8661f13 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1038,10 +1038,10 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return skb; } -static u32 __bnxt_rx_pages(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, - struct skb_shared_info *shinfo, - u16 idx, u32 agg_bufs, bool tpa) +static u32 __bnxt_rx_agg_pages(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct skb_shared_info *shinfo, + u16 idx, u32 agg_bufs, bool tpa) { struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; @@ -1110,15 +1110,15 @@ static u32 __bnxt_rx_pages(struct bnxt *bp, return total_frag_len; } -static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, - struct bnxt_cp_ring_info *cpr, - struct sk_buff *skb, u16 idx, - u32 agg_bufs, bool tpa) +static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) { struct skb_shared_info *shinfo = skb_shinfo(skb); u32 total_frag_len = 0; - total_frag_len = __bnxt_rx_pages(bp, cpr, shinfo, idx, agg_bufs, tpa); + total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa); if (!total_frag_len) { dev_kfree_skb(skb); @@ -1660,7 +1660,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); + skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ cpr->sw_stats.rx.rx_oom_discards += 1; @@ -1898,7 +1898,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (agg_bufs) { - skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); + skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); if (!skb) { cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; -- cgit v1.2.3 From 4c6c123c9af9c94be4726134ca72ba5a0be0ebd0 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:59:00 -0400 Subject: bnxt: adding bnxt_rx_agg_pages_xdp for aggregated xdp This patch adds a new function that will read pages from the aggregation ring and create an xdp_buff with frags based on the entries in the aggregation ring. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5487d8661f13..65992a33b973 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1131,6 +1131,27 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, return skb; } +static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct xdp_buff *xdp, u16 idx, + u32 agg_bufs, bool tpa) +{ + struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); + u32 total_frag_len = 0; + + if (!xdp_buff_has_frags(xdp)) + shinfo->nr_frags = 0; + + total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa); + + if (total_frag_len) { + xdp_buff_set_frags_flag(xdp); + shinfo->nr_frags = agg_bufs; + shinfo->xdp_frags_size = total_frag_len; + } + return total_frag_len; +} + static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u8 agg_bufs, u32 *raw_cons) { @@ -1859,6 +1880,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (bnxt_xdp_attached(bp, rxr)) { bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp); + if (agg_bufs) { + u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, + cp_cons, agg_bufs, + false); + if (!frag_len) { + cpr->sw_stats.rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } + } xdp_active = true; } -- cgit v1.2.3 From 31b9998bf225eca51f0d9f8d694d807495bf80a8 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:59:01 -0400 Subject: bnxt: set xdp_buff pfmemalloc flag if needed Set the pfmemaloc flag in the xdp buff so that this can be copied to the skb if needed for an XDP_PASS action. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 65992a33b973..6e1d43410e8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1041,7 +1041,8 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, static u32 __bnxt_rx_agg_pages(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct skb_shared_info *shinfo, - u16 idx, u32 agg_bufs, bool tpa) + u16 idx, u32 agg_bufs, bool tpa, + struct xdp_buff *xdp) { struct bnxt_napi *bnapi = cpr->bnapi; struct pci_dev *pdev = bp->pdev; @@ -1084,6 +1085,9 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, page = cons_rx_buf->page; cons_rx_buf->page = NULL; + if (xdp && page_is_pfmemalloc(page)) + xdp_buff_set_frag_pfmemalloc(xdp); + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { unsigned int nr_frags; @@ -1118,8 +1122,8 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, struct skb_shared_info *shinfo = skb_shinfo(skb); u32 total_frag_len = 0; - total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa); - + total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, + agg_bufs, tpa, NULL); if (!total_frag_len) { dev_kfree_skb(skb); return NULL; @@ -1142,8 +1146,8 @@ static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, if (!xdp_buff_has_frags(xdp)) shinfo->nr_frags = 0; - total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa); - + total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, + idx, agg_bufs, tpa, xdp); if (total_frag_len) { xdp_buff_set_frags_flag(xdp); shinfo->nr_frags = agg_bufs; -- cgit v1.2.3 From 32861236190bf1247d18e245cee0814603d2c29f Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:59:02 -0400 Subject: bnxt: change receive ring space parameters Modify ring header data split and jumbo parameters to account for the fact that the design for XDP multibuffer puts close to the first 4k of data in a page and the remaining portions of the packet go in the aggregation ring. v3: Simplified code around initial buffer size calculation Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 42 ++++++++++++++++++++----------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 28 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6e1d43410e8c..2a919905f256 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -56,6 +56,7 @@ #include #include #include +#include #include "bnxt_hsi.h" #include "bnxt.h" @@ -1933,11 +1934,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (agg_bufs) { - skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); - if (!skb) { - cpr->sw_stats.rx.rx_oom_discards += 1; - rc = -ENOMEM; - goto next_rx; + if (!xdp_active) { + skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); + if (!skb) { + cpr->sw_stats.rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } } } @@ -3854,7 +3857,7 @@ void bnxt_set_ring_params(struct bnxt *bp) /* 8 for CRC and VLAN */ rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); - rx_space = rx_size + NET_SKB_PAD + + rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; @@ -3895,9 +3898,15 @@ void bnxt_set_ring_params(struct bnxt *bp) } bp->rx_agg_ring_size = agg_ring_size; bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; - rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); - rx_space = rx_size + NET_SKB_PAD + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + if (BNXT_RX_PAGE_MODE(bp)) { + rx_space = BNXT_PAGE_MODE_BUF_SIZE; + rx_size = BNXT_MAX_PAGE_MODE_MTU; + } else { + rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); + rx_space = rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } } bp->rx_buf_use_size = rx_size; @@ -5287,12 +5296,15 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) if (rc) return rc; - req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); - req->enables = - cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | - VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); + req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); + + if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) { + req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables |= + cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); + } /* thresholds not implemented in firmware yet */ req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 0f35459d5206..319d6851eecc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1819,6 +1819,7 @@ struct bnxt { #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \ (bp)->max_tpa_v2) && !is_kdump_kernel()) +#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO) #define BNXT_CHIP_SR2(bp) \ ((bp)->chip_num == CHIP_NUM_58818) -- cgit v1.2.3 From 9a6aa3504885331a2fbf843c8cb7fa6be49a3d40 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:59:03 -0400 Subject: bnxt: add page_pool support for aggregation ring when using xdp If we are using aggregation rings with XDP enabled, allocate page buffers for the aggregation rings from the page_pool. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 77 +++++++++++++++++++------------ 1 file changed, 47 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2a919905f256..f89a45042f38 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -739,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, page_pool_recycle_direct(rxr->page_pool, page); return NULL; } - *mapping += bp->rx_dma_offset; return page; } @@ -781,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, if (!page) return -ENOMEM; + mapping += bp->rx_dma_offset; rx_buf->data = page; rx_buf->data_ptr = page_address(page) + bp->rx_offset; } else { @@ -841,33 +841,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, u16 sw_prod = rxr->rx_sw_agg_prod; unsigned int offset = 0; - if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { - page = rxr->rx_page; - if (!page) { + if (BNXT_RX_PAGE_MODE(bp)) { + page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); + + if (!page) + return -ENOMEM; + + } else { + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { + page = rxr->rx_page; + if (!page) { + page = alloc_page(gfp); + if (!page) + return -ENOMEM; + rxr->rx_page = page; + rxr->rx_page_offset = 0; + } + offset = rxr->rx_page_offset; + rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; + if (rxr->rx_page_offset == PAGE_SIZE) + rxr->rx_page = NULL; + else + get_page(page); + } else { page = alloc_page(gfp); if (!page) return -ENOMEM; - rxr->rx_page = page; - rxr->rx_page_offset = 0; } - offset = rxr->rx_page_offset; - rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; - if (rxr->rx_page_offset == PAGE_SIZE) - rxr->rx_page = NULL; - else - get_page(page); - } else { - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - } - mapping = dma_map_page_attrs(&pdev->dev, page, offset, - BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_WEAK_ORDERING); - if (dma_mapping_error(&pdev->dev, mapping)) { - __free_page(page); - return -EIO; + mapping = dma_map_page_attrs(&pdev->dev, page, offset, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, + DMA_ATTR_WEAK_ORDERING); + if (dma_mapping_error(&pdev->dev, mapping)) { + __free_page(page); + return -EIO; + } } if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) @@ -1105,7 +1113,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, } dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, - DMA_FROM_DEVICE, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); total_frag_len += frag_len; @@ -2936,14 +2944,23 @@ skip_rx_buf_free: if (!page) continue; - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_WEAK_ORDERING); + if (BNXT_RX_PAGE_MODE(bp)) { + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); - rx_agg_buf->page = NULL; - __clear_bit(i, rxr->rx_agg_bmap); + page_pool_recycle_direct(rxr->page_pool, page); + } else { + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, + DMA_ATTR_WEAK_ORDERING); + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); - __free_page(page); + __free_page(page); + } } skip_rx_agg_free: -- cgit v1.2.3 From 1dc4c557bfedfcdf7fc0c46795857773b7ad66e7 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:59:04 -0400 Subject: bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff Since we have an xdp_buff with frags there needs to be a way to convert that into a valid sk_buff in the event that XDP_PASS is the resulting operation. This adds a new rx_skb_func when the netdev has an MTU that prevents the packets from sitting in a single page. This also make sure that GRO/LRO stay disabled even when using the aggregation ring for large buffers. v3: Use BNXT_PAGE_MODE_BUF_SIZE for build_skb Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 65 ++++++++++++++++++++++++--- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 23 ++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 4 ++ 3 files changed, 85 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f89a45042f38..f96f41c7927e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -971,6 +971,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, rxr->rx_sw_agg_prod = sw_prod; } +static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 cons, void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + unsigned int len = offset_and_len & 0xffff; + struct page *page = data; + u16 prod = rxr->rx_prod; + struct sk_buff *skb; + int err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + dma_addr -= bp->rx_dma_offset; + dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE + + bp->rx_dma_offset); + if (!skb) { + __free_page(page); + return NULL; + } + skb_mark_for_recycle(skb); + skb_reserve(skb, bp->rx_dma_offset); + __skb_put(skb, len); + + return skb; +} + static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, void *data, u8 *data_ptr, @@ -993,7 +1026,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, dma_addr -= bp->rx_dma_offset; dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); - page_pool_release_page(rxr->page_pool, page); if (unlikely(!payload)) payload = eth_get_headlen(bp->dev, data_ptr, len); @@ -1004,6 +1036,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, return NULL; } + skb_mark_for_recycle(skb); off = (void *)data_ptr - page_address(page); skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, @@ -1949,6 +1982,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = -ENOMEM; goto next_rx; } + } else { + skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); + if (!skb) { + /* we should be able to free the old skb here */ + cpr->sw_stats.rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } } } @@ -3964,14 +4005,21 @@ void bnxt_set_ring_params(struct bnxt *bp) int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { if (page_mode) { - if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) - return -EOPNOTSUPP; - bp->dev->max_mtu = - min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); bp->flags &= ~BNXT_FLAG_AGG_RINGS; - bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; + bp->flags |= BNXT_FLAG_RX_PAGE_MODE; + + if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { + bp->flags |= BNXT_FLAG_JUMBO; + bp->rx_skb_func = bnxt_rx_multi_page_skb; + bp->dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_MTU); + } else { + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->rx_skb_func = bnxt_rx_page_skb; + bp->dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); + } bp->rx_dir = DMA_BIDIRECTIONAL; - bp->rx_skb_func = bnxt_rx_page_skb; /* Disable LRO or GRO_HW */ netdev_update_features(bp->dev); } else { @@ -11121,6 +11169,9 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + if (!(bp->flags & BNXT_FLAG_TPA)) + features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + if (!(features & NETIF_F_GRO)) features &= ~NETIF_F_GRO_HW; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index a3924e6030fe..5183357ca11c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -361,3 +361,26 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) } return rc; } + +struct sk_buff * +bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, + struct page_pool *pool, struct xdp_buff *xdp, + struct rx_cmp_ext *rxcmp1) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + + if (!skb) + return NULL; + skb_checksum_none_assert(skb); + if (RX_CMP_L4_CS_OK(rxcmp1)) { + if (bp->dev->features & NETIF_F_RXCSUM) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = RX_CMP_ENCAP(rxcmp1); + } + } + xdp_update_skb_shared_info(skb, num_frags, + sinfo->xdp_frags_size, + PAGE_SIZE * sinfo->nr_frags, + xdp_buff_is_frag_pfmemalloc(xdp)); + return skb; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 97e7905dbb20..27290f649be3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -28,4 +28,8 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, u8 **data_ptr, unsigned int *len, struct xdp_buff *xdp); +struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, + u8 num_frags, struct page_pool *pool, + struct xdp_buff *xdp, + struct rx_cmp_ext *rxcmp1); #endif -- cgit v1.2.3 From a7559bc8c17c3f9a91dcbeefe8642ba757fd09e8 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:59:05 -0400 Subject: bnxt: support transmit and free of aggregation buffers This patch adds the following features: - Support for XDP_TX and XDP_DROP action when using xdp_buff with frags - Support for freeing all frags attached to an xdp_buff - Cleanup of TX ring buffers after transmits complete - Slight change in definition of bnxt_sw_tx_bd since nr_frags and RX producer may both need to be used - Clear out skb_shared_info at the end of the buffer v2: Fix uninitialized variable warning in bnxt_xdp_buff_frags_free(). Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 18 +++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 7 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 117 +++++++++++++++++++--- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 5 +- 5 files changed, 126 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f96f41c7927e..25d74c9030fd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1951,9 +1951,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { - if (agg_bufs) - bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, - agg_bufs, false); + if (agg_bufs) { + if (!xdp_active) + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, + agg_bufs, false); + else + bnxt_xdp_buff_frags_free(rxr, &xdp); + } cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; @@ -1986,6 +1990,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); if (!skb) { /* we should be able to free the old skb here */ + bnxt_xdp_buff_frags_free(rxr, &xdp); cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; @@ -2605,10 +2610,13 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; - if (bnapi->events & BNXT_AGG_EVENT) - bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); } + if (bnapi->events & BNXT_AGG_EVENT) { + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + } bnapi->events = 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 319d6851eecc..a498ee297946 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -701,13 +701,12 @@ struct bnxt_sw_tx_bd { }; DEFINE_DMA_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_LEN(len); + struct page *page; u8 is_gso; u8 is_push; u8 action; - union { - unsigned short nr_frags; - u16 rx_prod; - }; + unsigned short nr_frags; + u16 rx_prod; }; struct bnxt_sw_rx_bd { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 22e965e18fbc..b3a48d6675fe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -3491,7 +3491,7 @@ static int bnxt_run_loopback(struct bnxt *bp) dev_kfree_skb(skb); return -EIO; } - bnxt_xmit_bd(bp, txr, map, pkt_size); + bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); /* Sync BD data before updating doorbell */ wmb(); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 5183357ca11c..c2905f0a8c6c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -24,36 +24,91 @@ DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, - dma_addr_t mapping, u32 len) + dma_addr_t mapping, u32 len, + struct xdp_buff *xdp) { - struct bnxt_sw_tx_bd *tx_buf; + struct skb_shared_info *sinfo; + struct bnxt_sw_tx_bd *tx_buf, *first_buf; struct tx_bd *txbd; + int num_frags = 0; u32 flags; u16 prod; + int i; + + if (xdp && xdp_buff_has_frags(xdp)) { + sinfo = xdp_get_shared_info_from_buff(xdp); + num_frags = sinfo->nr_frags; + } + /* fill up the first buffer */ prod = txr->tx_prod; tx_buf = &txr->tx_buf_ring[prod]; + first_buf = tx_buf; + tx_buf->nr_frags = num_frags; + if (xdp) + tx_buf->page = virt_to_head_page(xdp->data); txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; - flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) | - TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9]; + flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT); txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd->tx_bd_opaque = prod; txbd->tx_bd_haddr = cpu_to_le64(mapping); + /* now let us fill up the frags into the next buffers */ + for (i = 0; i < num_frags ; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + struct bnxt_sw_tx_bd *frag_tx_buf; + struct pci_dev *pdev = bp->pdev; + dma_addr_t frag_mapping; + int frag_len; + + prod = NEXT_TX(prod); + txr->tx_prod = prod; + + /* first fill up the first buffer */ + frag_tx_buf = &txr->tx_buf_ring[prod]; + frag_tx_buf->page = skb_frag_page(frag); + + txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; + + frag_len = skb_frag_size(frag); + frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0, + frag_len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping))) + return NULL; + + dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping); + + flags = frag_len << TX_BD_LEN_SHIFT; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + txbd->tx_bd_opaque = prod; + txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); + + len = frag_len; + } + + flags &= ~TX_BD_LEN; + txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | + TX_BD_FLAGS_PACKET_END); + /* Sync TX BD */ + wmb(); prod = NEXT_TX(prod); txr->tx_prod = prod; - return tx_buf; + + return first_buf; } static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, - dma_addr_t mapping, u32 len, u16 rx_prod) + dma_addr_t mapping, u32 len, u16 rx_prod, + struct xdp_buff *xdp) { struct bnxt_sw_tx_bd *tx_buf; - tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); + tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); tx_buf->rx_prod = rx_prod; tx_buf->action = XDP_TX; + } static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, @@ -63,7 +118,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, { struct bnxt_sw_tx_bd *tx_buf; - tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); + tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); tx_buf->action = XDP_REDIRECT; tx_buf->xdpf = xdpf; dma_unmap_addr_set(tx_buf, mapping, mapping); @@ -78,7 +133,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) struct bnxt_sw_tx_bd *tx_buf; u16 tx_cons = txr->tx_cons; u16 last_tx_cons = tx_cons; - int i; + int i, j, frags; for (i = 0; i < nr_pkts; i++) { tx_buf = &txr->tx_buf_ring[tx_cons]; @@ -96,6 +151,13 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) } else if (tx_buf->action == XDP_TX) { rx_doorbell_needed = true; last_tx_cons = tx_cons; + + frags = tx_buf->nr_frags; + for (j = 0; j < frags; j++) { + tx_cons = NEXT_TX(tx_cons); + tx_buf = &txr->tx_buf_ring[tx_cons]; + page_pool_recycle_direct(rxr->page_pool, tx_buf->page); + } } tx_cons = NEXT_TX(tx_cons); } @@ -103,6 +165,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) if (rx_doorbell_needed) { tx_buf = &txr->tx_buf_ring[last_tx_cons]; bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); + } } @@ -133,6 +196,23 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false); } +void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, + struct xdp_buff *xdp) +{ + struct skb_shared_info *shinfo; + int i; + + if (!xdp || !xdp_buff_has_frags(xdp)) + return; + shinfo = xdp_get_shared_info_from_buff(xdp); + for (i = 0; i < shinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&shinfo->frags[i]); + + page_pool_recycle_direct(rxr->page_pool, page); + } + shinfo->nr_frags = 0; +} + /* returns the following: * true - packet consumed by XDP and new buffer is allocated. * false - packet should be passed to the stack. @@ -145,6 +225,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct bnxt_sw_rx_bd *rx_buf; struct pci_dev *pdev; dma_addr_t mapping; + u32 tx_needed = 1; void *orig_data; u32 tx_avail; u32 offset; @@ -180,18 +261,28 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, case XDP_TX: rx_buf = &rxr->rx_buf_ring[cons]; mapping = rx_buf->mapping - bp->rx_dma_offset; + *event = 0; + + if (unlikely(xdp_buff_has_frags(&xdp))) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp); - if (tx_avail < 1) { + tx_needed += sinfo->nr_frags; + *event = BNXT_AGG_EVENT; + } + + if (tx_avail < tx_needed) { trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; } - *event = BNXT_TX_EVENT; dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, bp->rx_dir); + + *event |= BNXT_TX_EVENT; __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, - NEXT_RX(rxr->rx_prod)); + NEXT_RX(rxr->rx_prod), &xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; case XDP_REDIRECT: @@ -208,6 +299,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, /* if we are unable to allocate a new buffer, abort and reuse */ if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; } @@ -227,6 +319,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, trace_xdp_exception(bp->dev, xdp_prog, act); fallthrough; case XDP_DROP: + bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_reuse_rx_data(rxr, cons, page); break; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 27290f649be3..505911ae095d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -14,7 +14,8 @@ DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, - dma_addr_t mapping, u32 len); + dma_addr_t mapping, u32 len, + struct xdp_buff *xdp); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct xdp_buff xdp, struct page *page, unsigned int *len, @@ -28,6 +29,8 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, u8 **data_ptr, unsigned int *len, struct xdp_buff *xdp); +void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, + struct xdp_buff *xdp); struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, struct page_pool *pool, struct xdp_buff *xdp, -- cgit v1.2.3 From 9f4b28301ce6a594a692a0abc2002d0bb912f2b7 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 8 Apr 2022 03:59:06 -0400 Subject: bnxt: XDP multibuffer enablement Allow aggregation buffers to be in place in the receive path and allow XDP programs to be attached when using a larger than 4k MTU. v3: Add a check to sure XDP program supports multipage packets. Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +-- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 5 +++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 25d74c9030fd..0489c1c2e7dd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1939,8 +1939,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, xdp_active = true; } - /* skip running XDP prog if there are aggregation bufs */ - if (!agg_bufs && xdp_active) { + if (xdp_active) { if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) { rc = 1; goto next_rx; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index c2905f0a8c6c..f02fe906dedb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -387,8 +387,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) int tx_xdp = 0, rc, tc; struct bpf_prog *old; - if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { - netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n", + if (prog && !prog->aux->xdp_has_frags && + bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { + netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n", bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); return -EOPNOTSUPP; } -- cgit v1.2.3 From e05afd0848f8452edb7b4138536fae2aaf01552a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 5 Apr 2022 21:16:27 -0700 Subject: net: atm: remove the ambassador driver The driver for ATM Ambassador devices spews build warnings on microblaze. The virt_to_bus() calls discard the volatile keyword. The right thing to do would be to migrate this driver to a modern DMA API but it seems unlikely anyone is actually using it. There had been no fixes or functional changes here since the git era begun. In fact it sounds like the FW loading was broken from 2008 'til 2012 - see commit fcdc90b025e6 ("atm: forever loop loading ambassador firmware"). Let's remove this driver, there isn't much changing in the APIs, if users come forward we can apologize and revert. Link: https://lore.kernel.org/all/20220321144013.440d7fc0@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com/ Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- arch/mips/configs/gpr_defconfig | 1 - arch/mips/configs/mtx1_defconfig | 1 - drivers/atm/Kconfig | 25 - drivers/atm/Makefile | 1 - drivers/atm/ambassador.c | 2400 -------------------------------------- drivers/atm/ambassador.h | 648 ---------- 6 files changed, 3076 deletions(-) delete mode 100644 drivers/atm/ambassador.c delete mode 100644 drivers/atm/ambassador.h (limited to 'drivers') diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index f86838bcae4c..b0489437621a 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -182,7 +182,6 @@ CONFIG_ATM_FIRESTREAM=m CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m -CONFIG_ATM_AMBASSADOR=m CONFIG_ATM_HORIZON=m CONFIG_ATM_IA=m CONFIG_ATM_FORE200E=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index eb0b313a2109..c98099f0b354 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -259,7 +259,6 @@ CONFIG_ATM_FIRESTREAM=m CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m -CONFIG_ATM_AMBASSADOR=m CONFIG_ATM_HORIZON=m CONFIG_ATM_IA=m CONFIG_ATM_FORE200E=m diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index b9370bbca828..7be08e24955c 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -244,31 +244,6 @@ config ATM_IDT77252_USE_SUNI depends on ATM_IDT77252 default y -config ATM_AMBASSADOR - tristate "Madge Ambassador (Collage PCI 155 Server)" - depends on PCI && VIRT_TO_BUS - select BITREVERSE - help - This is a driver for ATMizer based ATM card produced by Madge - Networks Ltd. Say Y (or M to compile as a module named ambassador) - here if you have one of these cards. - -config ATM_AMBASSADOR_DEBUG - bool "Enable debugging messages" - depends on ATM_AMBASSADOR - help - Somewhat useful debugging messages are available. The choice of - messages is controlled by a bitmap. This may be specified as a - module argument (kernel command line argument as well?), changed - dynamically using an ioctl (not yet) or changed by sending the - string "Dxxxx" to VCI 1023 (where x is a hex digit). See the file - for the meanings of the bits in the - mask. - - When active, these messages can have a significant impact on the - speed of the driver, and the size of your syslog files! When - inactive, they will have only a modest impact on performance. - config ATM_HORIZON tristate "Madge Horizon [Ultra] (Collage PCI 25 and Collage PCI 155 Client)" depends on PCI && VIRT_TO_BUS diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile index aa191616a72e..99ecbc280643 100644 --- a/drivers/atm/Makefile +++ b/drivers/atm/Makefile @@ -7,7 +7,6 @@ fore_200e-y := fore200e.o obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o obj-$(CONFIG_ATM_NICSTAR) += nicstar.o -obj-$(CONFIG_ATM_AMBASSADOR) += ambassador.o obj-$(CONFIG_ATM_HORIZON) += horizon.o obj-$(CONFIG_ATM_IA) += iphase.o suni.o obj-$(CONFIG_ATM_FORE200E) += fore_200e.o diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c deleted file mode 100644 index c039b8a4fefe..000000000000 --- a/drivers/atm/ambassador.c +++ /dev/null @@ -1,2400 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - Madge Ambassador ATM Adapter driver. - Copyright (C) 1995-1999 Madge Networks Ltd. - -*/ - -/* * dedicated to the memory of Graham Gordon 1971-1998 * */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "ambassador.h" - -#define maintainer_string "Giuliano Procida at Madge Networks " -#define description_string "Madge ATM Ambassador driver" -#define version_string "1.2.4" - -static inline void __init show_version (void) { - printk ("%s version %s\n", description_string, version_string); -} - -/* - - Theory of Operation - - I Hardware, detection, initialisation and shutdown. - - 1. Supported Hardware - - This driver is for the PCI ATMizer-based Ambassador card (except - very early versions). It is not suitable for the similar EISA "TR7" - card. Commercially, both cards are known as Collage Server ATM - adapters. - - The loader supports image transfer to the card, image start and few - other miscellaneous commands. - - Only AAL5 is supported with vpi = 0 and vci in the range 0 to 1023. - - The cards are big-endian. - - 2. Detection - - Standard PCI stuff, the early cards are detected and rejected. - - 3. Initialisation - - The cards are reset and the self-test results are checked. The - microcode image is then transferred and started. This waits for a - pointer to a descriptor containing details of the host-based queues - and buffers and various parameters etc. Once they are processed - normal operations may begin. The BIA is read using a microcode - command. - - 4. Shutdown - - This may be accomplished either by a card reset or via the microcode - shutdown command. Further investigation required. - - 5. Persistent state - - The card reset does not affect PCI configuration (good) or the - contents of several other "shared run-time registers" (bad) which - include doorbell and interrupt control as well as EEPROM and PCI - control. The driver must be careful when modifying these registers - not to touch bits it does not use and to undo any changes at exit. - - II Driver software - - 0. Generalities - - The adapter is quite intelligent (fast) and has a simple interface - (few features). VPI is always zero, 1024 VCIs are supported. There - is limited cell rate support. UBR channels can be capped and ABR - (explicit rate, but not EFCI) is supported. There is no CBR or VBR - support. - - 1. Driver <-> Adapter Communication - - Apart from the basic loader commands, the driver communicates - through three entities: the command queue (CQ), the transmit queue - pair (TXQ) and the receive queue pairs (RXQ). These three entities - are set up by the host and passed to the microcode just after it has - been started. - - All queues are host-based circular queues. They are contiguous and - (due to hardware limitations) have some restrictions as to their - locations in (bus) memory. They are of the "full means the same as - empty so don't do that" variety since the adapter uses pointers - internally. - - The queue pairs work as follows: one queue is for supply to the - adapter, items in it are pending and are owned by the adapter; the - other is the queue for return from the adapter, items in it have - been dealt with by the adapter. The host adds items to the supply - (TX descriptors and free RX buffer descriptors) and removes items - from the return (TX and RX completions). The adapter deals with out - of order completions. - - Interrupts (card to host) and the doorbell (host to card) are used - for signalling. - - 1. CQ - - This is to communicate "open VC", "close VC", "get stats" etc. to - the adapter. At most one command is retired every millisecond by the - card. There is no out of order completion or notification. The - driver needs to check the return code of the command, waiting as - appropriate. - - 2. TXQ - - TX supply items are of variable length (scatter gather support) and - so the queue items are (more or less) pointers to the real thing. - Each TX supply item contains a unique, host-supplied handle (the skb - bus address seems most sensible as this works for Alphas as well, - there is no need to do any endian conversions on the handles). - - TX return items consist of just the handles above. - - 3. RXQ (up to 4 of these with different lengths and buffer sizes) - - RX supply items consist of a unique, host-supplied handle (the skb - bus address again) and a pointer to the buffer data area. - - RX return items consist of the handle above, the VC, length and a - status word. This just screams "oh so easy" doesn't it? - - Note on RX pool sizes: - - Each pool should have enough buffers to handle a back-to-back stream - of minimum sized frames on a single VC. For example: - - frame spacing = 3us (about right) - - delay = IRQ lat + RX handling + RX buffer replenish = 20 (us) (a guess) - - min number of buffers for one VC = 1 + delay/spacing (buffers) - - delay/spacing = latency = (20+2)/3 = 7 (buffers) (rounding up) - - The 20us delay assumes that there is no need to sleep; if we need to - sleep to get buffers we are going to drop frames anyway. - - In fact, each pool should have enough buffers to support the - simultaneous reassembly of a separate frame on each VC and cope with - the case in which frames complete in round robin cell fashion on - each VC. - - Only one frame can complete at each cell arrival, so if "n" VCs are - open, the worst case is to have them all complete frames together - followed by all starting new frames together. - - desired number of buffers = n + delay/spacing - - These are the extreme requirements, however, they are "n+k" for some - "k" so we have only the constant to choose. This is the argument - rx_lats which current defaults to 7. - - Actually, "n ? n+k : 0" is better and this is what is implemented, - subject to the limit given by the pool size. - - 4. Driver locking - - Simple spinlocks are used around the TX and RX queue mechanisms. - Anyone with a faster, working method is welcome to implement it. - - The adapter command queue is protected with a spinlock. We always - wait for commands to complete. - - A more complex form of locking is used around parts of the VC open - and close functions. There are three reasons for a lock: 1. we need - to do atomic rate reservation and release (not used yet), 2. Opening - sometimes involves two adapter commands which must not be separated - by another command on the same VC, 3. the changes to RX pool size - must be atomic. The lock needs to work over context switches, so we - use a semaphore. - - III Hardware Features and Microcode Bugs - - 1. Byte Ordering - - *%^"$&%^$*&^"$(%^$#&^%$(&#%$*(&^#%!"!"!*! - - 2. Memory access - - All structures that are not accessed using DMA must be 4-byte - aligned (not a problem) and must not cross 4MB boundaries. - - There is a DMA memory hole at E0000000-E00000FF (groan). - - TX fragments (DMA read) must not cross 4MB boundaries (would be 16MB - but for a hardware bug). - - RX buffers (DMA write) must not cross 16MB boundaries and must - include spare trailing bytes up to the next 4-byte boundary; they - will be written with rubbish. - - The PLX likes to prefetch; if reading up to 4 u32 past the end of - each TX fragment is not a problem, then TX can be made to go a - little faster by passing a flag at init that disables a prefetch - workaround. We do not pass this flag. (new microcode only) - - Now we: - . Note that alloc_skb rounds up size to a 16byte boundary. - . Ensure all areas do not traverse 4MB boundaries. - . Ensure all areas do not start at a E00000xx bus address. - (I cannot be certain, but this may always hold with Linux) - . Make all failures cause a loud message. - . Discard non-conforming SKBs (causes TX failure or RX fill delay). - . Discard non-conforming TX fragment descriptors (the TX fails). - In the future we could: - . Allow RX areas that traverse 4MB (but not 16MB) boundaries. - . Segment TX areas into some/more fragments, when necessary. - . Relax checks for non-DMA items (ignore hole). - . Give scatter-gather (iovec) requirements using ???. (?) - - 3. VC close is broken (only for new microcode) - - The VC close adapter microcode command fails to do anything if any - frames have been received on the VC but none have been transmitted. - Frames continue to be reassembled and passed (with IRQ) to the - driver. - - IV To Do List - - . Fix bugs! - - . Timer code may be broken. - - . Deal with buggy VC close (somehow) in microcode 12. - - . Handle interrupted and/or non-blocking writes - is this a job for - the protocol layer? - - . Add code to break up TX fragments when they span 4MB boundaries. - - . Add SUNI phy layer (need to know where SUNI lives on card). - - . Implement a tx_alloc fn to (a) satisfy TX alignment etc. and (b) - leave extra headroom space for Ambassador TX descriptors. - - . Understand these elements of struct atm_vcc: recvq (proto?), - sleep, callback, listenq, backlog_quota, reply and user_back. - - . Adjust TX/RX skb allocation to favour IP with LANE/CLIP (configurable). - - . Impose a TX-pending limit (2?) on each VC, help avoid TX q overflow. - - . Decide whether RX buffer recycling is or can be made completely safe; - turn it back on. It looks like Werner is going to axe this. - - . Implement QoS changes on open VCs (involves extracting parts of VC open - and close into separate functions and using them to make changes). - - . Hack on command queue so that someone can issue multiple commands and wait - on the last one (OR only "no-op" or "wait" commands are waited for). - - . Eliminate need for while-schedule around do_command. - -*/ - -static void do_housekeeping (struct timer_list *t); -/********** globals **********/ - -static unsigned short debug = 0; -static unsigned int cmds = 8; -static unsigned int txs = 32; -static unsigned int rxs[NUM_RX_POOLS] = { 64, 64, 64, 64 }; -static unsigned int rxs_bs[NUM_RX_POOLS] = { 4080, 12240, 36720, 65535 }; -static unsigned int rx_lats = 7; -static unsigned char pci_lat = 0; - -static const unsigned long onegigmask = -1 << 30; - -/********** access to adapter **********/ - -static inline void wr_plain (const amb_dev * dev, size_t addr, u32 data) { - PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x", addr, data); -#ifdef AMB_MMIO - dev->membase[addr / sizeof(u32)] = data; -#else - outl (data, dev->iobase + addr); -#endif -} - -static inline u32 rd_plain (const amb_dev * dev, size_t addr) { -#ifdef AMB_MMIO - u32 data = dev->membase[addr / sizeof(u32)]; -#else - u32 data = inl (dev->iobase + addr); -#endif - PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x", addr, data); - return data; -} - -static inline void wr_mem (const amb_dev * dev, size_t addr, u32 data) { - __be32 be = cpu_to_be32 (data); - PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x b[%08x]", addr, data, be); -#ifdef AMB_MMIO - dev->membase[addr / sizeof(u32)] = be; -#else - outl (be, dev->iobase + addr); -#endif -} - -static inline u32 rd_mem (const amb_dev * dev, size_t addr) { -#ifdef AMB_MMIO - __be32 be = dev->membase[addr / sizeof(u32)]; -#else - __be32 be = inl (dev->iobase + addr); -#endif - u32 data = be32_to_cpu (be); - PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x b[%08x]", addr, data, be); - return data; -} - -/********** dump routines **********/ - -static inline void dump_registers (const amb_dev * dev) { -#ifdef DEBUG_AMBASSADOR - if (debug & DBG_REGS) { - size_t i; - PRINTD (DBG_REGS, "reading PLX control: "); - for (i = 0x00; i < 0x30; i += sizeof(u32)) - rd_mem (dev, i); - PRINTD (DBG_REGS, "reading mailboxes: "); - for (i = 0x40; i < 0x60; i += sizeof(u32)) - rd_mem (dev, i); - PRINTD (DBG_REGS, "reading doorb irqev irqen reset:"); - for (i = 0x60; i < 0x70; i += sizeof(u32)) - rd_mem (dev, i); - } -#else - (void) dev; -#endif - return; -} - -static inline void dump_loader_block (volatile loader_block * lb) { -#ifdef DEBUG_AMBASSADOR - unsigned int i; - PRINTDB (DBG_LOAD, "lb @ %p; res: %d, cmd: %d, pay:", - lb, be32_to_cpu (lb->result), be32_to_cpu (lb->command)); - for (i = 0; i < MAX_COMMAND_DATA; ++i) - PRINTDM (DBG_LOAD, " %08x", be32_to_cpu (lb->payload.data[i])); - PRINTDE (DBG_LOAD, ", vld: %08x", be32_to_cpu (lb->valid)); -#else - (void) lb; -#endif - return; -} - -static inline void dump_command (command * cmd) { -#ifdef DEBUG_AMBASSADOR - unsigned int i; - PRINTDB (DBG_CMD, "cmd @ %p, req: %08x, pars:", - cmd, /*be32_to_cpu*/ (cmd->request)); - for (i = 0; i < 3; ++i) - PRINTDM (DBG_CMD, " %08x", /*be32_to_cpu*/ (cmd->args.par[i])); - PRINTDE (DBG_CMD, ""); -#else - (void) cmd; -#endif - return; -} - -static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) { -#ifdef DEBUG_AMBASSADOR - unsigned int i; - unsigned char * data = skb->data; - PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc); - for (i=0; ilen && i < 256;i++) - PRINTDM (DBG_DATA, "%02x ", data[i]); - PRINTDE (DBG_DATA,""); -#else - (void) prefix; - (void) vc; - (void) skb; -#endif - return; -} - -/********** check memory areas for use by Ambassador **********/ - -/* see limitations under Hardware Features */ - -static int check_area (void * start, size_t length) { - // assumes length > 0 - const u32 fourmegmask = -1 << 22; - const u32 twofivesixmask = -1 << 8; - const u32 starthole = 0xE0000000; - u32 startaddress = virt_to_bus (start); - u32 lastaddress = startaddress+length-1; - if ((startaddress ^ lastaddress) & fourmegmask || - (startaddress & twofivesixmask) == starthole) { - PRINTK (KERN_ERR, "check_area failure: [%x,%x] - mail maintainer!", - startaddress, lastaddress); - return -1; - } else { - return 0; - } -} - -/********** free an skb (as per ATM device driver documentation) **********/ - -static void amb_kfree_skb (struct sk_buff * skb) { - if (ATM_SKB(skb)->vcc->pop) { - ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb); - } else { - dev_kfree_skb_any (skb); - } -} - -/********** TX completion **********/ - -static void tx_complete (amb_dev * dev, tx_out * tx) { - tx_simple * tx_descr = bus_to_virt (tx->handle); - struct sk_buff * skb = tx_descr->skb; - - PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); - - // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); - - // free the descriptor - kfree (tx_descr); - - // free the skb - amb_kfree_skb (skb); - - dev->stats.tx_ok++; - return; -} - -/********** RX completion **********/ - -static void rx_complete (amb_dev * dev, rx_out * rx) { - struct sk_buff * skb = bus_to_virt (rx->handle); - u16 vc = be16_to_cpu (rx->vc); - // unused: u16 lec_id = be16_to_cpu (rx->lec_id); - u16 status = be16_to_cpu (rx->status); - u16 rx_len = be16_to_cpu (rx->length); - - PRINTD (DBG_FLOW|DBG_RX, "rx_complete %p %p (len=%hu)", dev, rx, rx_len); - - // XXX move this in and add to VC stats ??? - if (!status) { - struct atm_vcc * atm_vcc = dev->rxer[vc]; - dev->stats.rx.ok++; - - if (atm_vcc) { - - if (rx_len <= atm_vcc->qos.rxtp.max_sdu) { - - if (atm_charge (atm_vcc, skb->truesize)) { - - // prepare socket buffer - ATM_SKB(skb)->vcc = atm_vcc; - skb_put (skb, rx_len); - - dump_skb ("<<<", vc, skb); - - // VC layer stats - atomic_inc(&atm_vcc->stats->rx); - __net_timestamp(skb); - // end of our responsibility - atm_vcc->push (atm_vcc, skb); - return; - - } else { - // someone fix this (message), please! - PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize); - // drop stats incremented in atm_charge - } - - } else { - PRINTK (KERN_INFO, "dropped over-size frame"); - // should we count this? - atomic_inc(&atm_vcc->stats->rx_drop); - } - - } else { - PRINTD (DBG_WARN|DBG_RX, "got frame but RX closed for channel %hu", vc); - // this is an adapter bug, only in new version of microcode - } - - } else { - dev->stats.rx.error++; - if (status & CRC_ERR) - dev->stats.rx.badcrc++; - if (status & LEN_ERR) - dev->stats.rx.toolong++; - if (status & ABORT_ERR) - dev->stats.rx.aborted++; - if (status & UNUSED_ERR) - dev->stats.rx.unused++; - } - - dev_kfree_skb_any (skb); - return; -} - -/* - - Note on queue handling. - - Here "give" and "take" refer to queue entries and a queue (pair) - rather than frames to or from the host or adapter. Empty frame - buffers are given to the RX queue pair and returned unused or - containing RX frames. TX frames (well, pointers to TX fragment - lists) are given to the TX queue pair, completions are returned. - -*/ - -/********** command queue **********/ - -// I really don't like this, but it's the best I can do at the moment - -// also, the callers are responsible for byte order as the microcode -// sometimes does 16-bit accesses (yuk yuk yuk) - -static int command_do (amb_dev * dev, command * cmd) { - amb_cq * cq = &dev->cq; - volatile amb_cq_ptrs * ptrs = &cq->ptrs; - command * my_slot; - - PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev); - - if (test_bit (dead, &dev->flags)) - return 0; - - spin_lock (&cq->lock); - - // if not full... - if (cq->pending < cq->maximum) { - // remember my slot for later - my_slot = ptrs->in; - PRINTD (DBG_CMD, "command in slot %p", my_slot); - - dump_command (cmd); - - // copy command in - *ptrs->in = *cmd; - cq->pending++; - ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit); - - // mail the command - wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in)); - - if (cq->pending > cq->high) - cq->high = cq->pending; - spin_unlock (&cq->lock); - - // these comments were in a while-loop before, msleep removes the loop - // go to sleep - // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout); - msleep(cq->pending); - - // wait for my slot to be reached (all waiters are here or above, until...) - while (ptrs->out != my_slot) { - PRINTD (DBG_CMD, "wait: command slot (now at %p)", ptrs->out); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule(); - } - - // wait on my slot (... one gets to its slot, and... ) - while (ptrs->out->request != cpu_to_be32 (SRB_COMPLETE)) { - PRINTD (DBG_CMD, "wait: command slot completion"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule(); - } - - PRINTD (DBG_CMD, "command complete"); - // update queue (... moves the queue along to the next slot) - spin_lock (&cq->lock); - cq->pending--; - // copy command out - *cmd = *ptrs->out; - ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit); - spin_unlock (&cq->lock); - - return 0; - } else { - cq->filled++; - spin_unlock (&cq->lock); - return -EAGAIN; - } - -} - -/********** TX queue pair **********/ - -static int tx_give (amb_dev * dev, tx_in * tx) { - amb_txq * txq = &dev->txq; - unsigned long flags; - - PRINTD (DBG_FLOW|DBG_TX, "tx_give %p", dev); - - if (test_bit (dead, &dev->flags)) - return 0; - - spin_lock_irqsave (&txq->lock, flags); - - if (txq->pending < txq->maximum) { - PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr); - - *txq->in.ptr = *tx; - txq->pending++; - txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); - // hand over the TX and ring the bell - wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr)); - wr_mem (dev, offsetof(amb_mem, doorbell), TX_FRAME); - - if (txq->pending > txq->high) - txq->high = txq->pending; - spin_unlock_irqrestore (&txq->lock, flags); - return 0; - } else { - txq->filled++; - spin_unlock_irqrestore (&txq->lock, flags); - return -EAGAIN; - } -} - -static int tx_take (amb_dev * dev) { - amb_txq * txq = &dev->txq; - unsigned long flags; - - PRINTD (DBG_FLOW|DBG_TX, "tx_take %p", dev); - - spin_lock_irqsave (&txq->lock, flags); - - if (txq->pending && txq->out.ptr->handle) { - // deal with TX completion - tx_complete (dev, txq->out.ptr); - // mark unused again - txq->out.ptr->handle = 0; - // remove item - txq->pending--; - txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit); - - spin_unlock_irqrestore (&txq->lock, flags); - return 0; - } else { - - spin_unlock_irqrestore (&txq->lock, flags); - return -1; - } -} - -/********** RX queue pairs **********/ - -static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) { - amb_rxq * rxq = &dev->rxq[pool]; - unsigned long flags; - - PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool); - - spin_lock_irqsave (&rxq->lock, flags); - - if (rxq->pending < rxq->maximum) { - PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr); - - *rxq->in.ptr = *rx; - rxq->pending++; - rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit); - // hand over the RX buffer - wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); - - spin_unlock_irqrestore (&rxq->lock, flags); - return 0; - } else { - spin_unlock_irqrestore (&rxq->lock, flags); - return -1; - } -} - -static int rx_take (amb_dev * dev, unsigned char pool) { - amb_rxq * rxq = &dev->rxq[pool]; - unsigned long flags; - - PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool); - - spin_lock_irqsave (&rxq->lock, flags); - - if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) { - // deal with RX completion - rx_complete (dev, rxq->out.ptr); - // mark unused again - rxq->out.ptr->status = 0; - rxq->out.ptr->length = 0; - // remove item - rxq->pending--; - rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit); - - if (rxq->pending < rxq->low) - rxq->low = rxq->pending; - spin_unlock_irqrestore (&rxq->lock, flags); - return 0; - } else { - if (!rxq->pending && rxq->buffers_wanted) - rxq->emptied++; - spin_unlock_irqrestore (&rxq->lock, flags); - return -1; - } -} - -/********** RX Pool handling **********/ - -/* pre: buffers_wanted = 0, post: pending = 0 */ -static void drain_rx_pool (amb_dev * dev, unsigned char pool) { - amb_rxq * rxq = &dev->rxq[pool]; - - PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool); - - if (test_bit (dead, &dev->flags)) - return; - - /* we are not quite like the fill pool routines as we cannot just - remove one buffer, we have to remove all of them, but we might as - well pretend... */ - if (rxq->pending > rxq->buffers_wanted) { - command cmd; - cmd.request = cpu_to_be32 (SRB_FLUSH_BUFFER_Q); - cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT); - while (command_do (dev, &cmd)) - schedule(); - /* the pool may also be emptied via the interrupt handler */ - while (rxq->pending > rxq->buffers_wanted) - if (rx_take (dev, pool)) - schedule(); - } - - return; -} - -static void drain_rx_pools (amb_dev * dev) { - unsigned char pool; - - PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pools %p", dev); - - for (pool = 0; pool < NUM_RX_POOLS; ++pool) - drain_rx_pool (dev, pool); -} - -static void fill_rx_pool (amb_dev * dev, unsigned char pool, - gfp_t priority) -{ - rx_in rx; - amb_rxq * rxq; - - PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority); - - if (test_bit (dead, &dev->flags)) - return; - - rxq = &dev->rxq[pool]; - while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) { - - struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority); - if (!skb) { - PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool); - return; - } - if (check_area (skb->data, skb->truesize)) { - dev_kfree_skb_any (skb); - return; - } - // cast needed as there is no %? for pointer differences - PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", - skb, skb->head, (long) skb_end_offset(skb)); - rx.handle = virt_to_bus (skb); - rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); - if (rx_give (dev, &rx, pool)) - dev_kfree_skb_any (skb); - - } - - return; -} - -// top up all RX pools -static void fill_rx_pools (amb_dev * dev) { - unsigned char pool; - - PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pools %p", dev); - - for (pool = 0; pool < NUM_RX_POOLS; ++pool) - fill_rx_pool (dev, pool, GFP_ATOMIC); - - return; -} - -/********** enable host interrupts **********/ - -static void interrupts_on (amb_dev * dev) { - wr_plain (dev, offsetof(amb_mem, interrupt_control), - rd_plain (dev, offsetof(amb_mem, interrupt_control)) - | AMB_INTERRUPT_BITS); -} - -/********** disable host interrupts **********/ - -static void interrupts_off (amb_dev * dev) { - wr_plain (dev, offsetof(amb_mem, interrupt_control), - rd_plain (dev, offsetof(amb_mem, interrupt_control)) - &~ AMB_INTERRUPT_BITS); -} - -/********** interrupt handling **********/ - -static irqreturn_t interrupt_handler(int irq, void *dev_id) { - amb_dev * dev = dev_id; - - PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler: %p", dev_id); - - { - u32 interrupt = rd_plain (dev, offsetof(amb_mem, interrupt)); - - // for us or someone else sharing the same interrupt - if (!interrupt) { - PRINTD (DBG_IRQ, "irq not for me: %d", irq); - return IRQ_NONE; - } - - // definitely for us - PRINTD (DBG_IRQ, "FYI: interrupt was %08x", interrupt); - wr_plain (dev, offsetof(amb_mem, interrupt), -1); - } - - { - unsigned int irq_work = 0; - unsigned char pool; - for (pool = 0; pool < NUM_RX_POOLS; ++pool) - while (!rx_take (dev, pool)) - ++irq_work; - while (!tx_take (dev)) - ++irq_work; - - if (irq_work) { - fill_rx_pools (dev); - - PRINTD (DBG_IRQ, "work done: %u", irq_work); - } else { - PRINTD (DBG_IRQ|DBG_WARN, "no work done"); - } - } - - PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id); - return IRQ_HANDLED; -} - -/********** make rate (not quite as much fun as Horizon) **********/ - -static int make_rate (unsigned int rate, rounding r, - u16 * bits, unsigned int * actual) { - unsigned char exp = -1; // hush gcc - unsigned int man = -1; // hush gcc - - PRINTD (DBG_FLOW|DBG_QOS, "make_rate %u", rate); - - // rates in cells per second, ITU format (nasty 16-bit floating-point) - // given 5-bit e and 9-bit m: - // rate = EITHER (1+m/2^9)*2^e OR 0 - // bits = EITHER 1<<14 | e<<9 | m OR 0 - // (bit 15 is "reserved", bit 14 "non-zero") - // smallest rate is 0 (special representation) - // largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1) - // smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0) - // simple algorithm: - // find position of top bit, this gives e - // remove top bit and shift (rounding if feeling clever) by 9-e - - // ucode bug: please don't set bit 14! so 0 rate not representable - - if (rate > 0xffc00000U) { - // larger than largest representable rate - - if (r == round_up) { - return -EINVAL; - } else { - exp = 31; - man = 511; - } - - } else if (rate) { - // representable rate - - exp = 31; - man = rate; - - // invariant: rate = man*2^(exp-31) - while (!(man & (1<<31))) { - exp = exp - 1; - man = man<<1; - } - - // man has top bit set - // rate = (2^31+(man-2^31))*2^(exp-31) - // rate = (1+(man-2^31)/2^31)*2^exp - man = man<<1; - man &= 0xffffffffU; // a nop on 32-bit systems - // rate = (1+man/2^32)*2^exp - - // exp is in the range 0 to 31, man is in the range 0 to 2^32-1 - // time to lose significance... we want m in the range 0 to 2^9-1 - // rounding presents a minor problem... we first decide which way - // we are rounding (based on given rounding direction and possibly - // the bits of the mantissa that are to be discarded). - - switch (r) { - case round_down: { - // just truncate - man = man>>(32-9); - break; - } - case round_up: { - // check all bits that we are discarding - if (man & (~0U>>9)) { - man = (man>>(32-9)) + 1; - if (man == (1<<9)) { - // no need to check for round up outside of range - man = 0; - exp += 1; - } - } else { - man = (man>>(32-9)); - } - break; - } - case round_nearest: { - // check msb that we are discarding - if (man & (1<<(32-9-1))) { - man = (man>>(32-9)) + 1; - if (man == (1<<9)) { - // no need to check for round up outside of range - man = 0; - exp += 1; - } - } else { - man = (man>>(32-9)); - } - break; - } - } - - } else { - // zero rate - not representable - - if (r == round_down) { - return -EINVAL; - } else { - exp = 0; - man = 0; - } - - } - - PRINTD (DBG_QOS, "rate: man=%u, exp=%hu", man, exp); - - if (bits) - *bits = /* (1<<14) | */ (exp<<9) | man; - - if (actual) - *actual = (exp >= 9) - ? (1 << exp) + (man << (exp-9)) - : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp)); - - return 0; -} - -/********** Linux ATM Operations **********/ - -// some are not yet implemented while others do not make sense for -// this device - -/********** Open a VC **********/ - -static int amb_open (struct atm_vcc * atm_vcc) -{ - int error; - - struct atm_qos * qos; - struct atm_trafprm * txtp; - struct atm_trafprm * rxtp; - u16 tx_rate_bits = -1; // hush gcc - u16 tx_vc_bits = -1; // hush gcc - u16 tx_frame_bits = -1; // hush gcc - - amb_dev * dev = AMB_DEV(atm_vcc->dev); - amb_vcc * vcc; - unsigned char pool = -1; // hush gcc - short vpi = atm_vcc->vpi; - int vci = atm_vcc->vci; - - PRINTD (DBG_FLOW|DBG_VCC, "amb_open %x %x", vpi, vci); - -#ifdef ATM_VPI_UNSPEC - // UNSPEC is deprecated, remove this code eventually - if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) { - PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)"); - return -EINVAL; - } -#endif - - if (!(0 <= vpi && vpi < (1<qos; - - if (qos->aal != ATM_AAL5) { - PRINTD (DBG_QOS, "AAL not supported"); - return -EINVAL; - } - - // traffic parameters - - PRINTD (DBG_QOS, "TX:"); - txtp = &qos->txtp; - if (txtp->traffic_class != ATM_NONE) { - switch (txtp->traffic_class) { - case ATM_UBR: { - // we take "the PCR" as a rate-cap - int pcr = atm_pcr_goal (txtp); - if (!pcr) { - // no rate cap - tx_rate_bits = 0; - tx_vc_bits = TX_UBR; - tx_frame_bits = TX_FRAME_NOTCAP; - } else { - rounding r; - if (pcr < 0) { - r = round_down; - pcr = -pcr; - } else { - r = round_up; - } - error = make_rate (pcr, r, &tx_rate_bits, NULL); - if (error) - return error; - tx_vc_bits = TX_UBR_CAPPED; - tx_frame_bits = TX_FRAME_CAPPED; - } - break; - } -#if 0 - case ATM_ABR: { - pcr = atm_pcr_goal (txtp); - PRINTD (DBG_QOS, "pcr goal = %d", pcr); - break; - } -#endif - default: { - // PRINTD (DBG_QOS, "request for non-UBR/ABR denied"); - PRINTD (DBG_QOS, "request for non-UBR denied"); - return -EINVAL; - } - } - PRINTD (DBG_QOS, "tx_rate_bits=%hx, tx_vc_bits=%hx", - tx_rate_bits, tx_vc_bits); - } - - PRINTD (DBG_QOS, "RX:"); - rxtp = &qos->rxtp; - if (rxtp->traffic_class == ATM_NONE) { - // do nothing - } else { - // choose an RX pool (arranged in increasing size) - for (pool = 0; pool < NUM_RX_POOLS; ++pool) - if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) { - PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)", - pool, rxtp->max_sdu, dev->rxq[pool].buffer_size); - break; - } - if (pool == NUM_RX_POOLS) { - PRINTD (DBG_WARN|DBG_VCC|DBG_QOS|DBG_POOL, - "no pool suitable for VC (RX max_sdu %d is too large)", - rxtp->max_sdu); - return -EINVAL; - } - - switch (rxtp->traffic_class) { - case ATM_UBR: { - break; - } -#if 0 - case ATM_ABR: { - pcr = atm_pcr_goal (rxtp); - PRINTD (DBG_QOS, "pcr goal = %d", pcr); - break; - } -#endif - default: { - // PRINTD (DBG_QOS, "request for non-UBR/ABR denied"); - PRINTD (DBG_QOS, "request for non-UBR denied"); - return -EINVAL; - } - } - } - - // get space for our vcc stuff - vcc = kmalloc (sizeof(amb_vcc), GFP_KERNEL); - if (!vcc) { - PRINTK (KERN_ERR, "out of memory!"); - return -ENOMEM; - } - atm_vcc->dev_data = (void *) vcc; - - // no failures beyond this point - - // we are not really "immediately before allocating the connection - // identifier in hardware", but it will just have to do! - set_bit(ATM_VF_ADDR,&atm_vcc->flags); - - if (txtp->traffic_class != ATM_NONE) { - command cmd; - - vcc->tx_frame_bits = tx_frame_bits; - - mutex_lock(&dev->vcc_sf); - if (dev->rxer[vci]) { - // RXer on the channel already, just modify rate... - cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); - cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0 - cmd.args.modify_rate.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT); - while (command_do (dev, &cmd)) - schedule(); - // ... and TX flags, preserving the RX pool - cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); - cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0 - cmd.args.modify_flags.flags = cpu_to_be32 - ( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT) - | (tx_vc_bits << SRB_FLAGS_SHIFT) ); - while (command_do (dev, &cmd)) - schedule(); - } else { - // no RXer on the channel, just open (with pool zero) - cmd.request = cpu_to_be32 (SRB_OPEN_VC); - cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0 - cmd.args.open.flags = cpu_to_be32 (tx_vc_bits << SRB_FLAGS_SHIFT); - cmd.args.open.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT); - while (command_do (dev, &cmd)) - schedule(); - } - dev->txer[vci].tx_present = 1; - mutex_unlock(&dev->vcc_sf); - } - - if (rxtp->traffic_class != ATM_NONE) { - command cmd; - - vcc->rx_info.pool = pool; - - mutex_lock(&dev->vcc_sf); - /* grow RX buffer pool */ - if (!dev->rxq[pool].buffers_wanted) - dev->rxq[pool].buffers_wanted = rx_lats; - dev->rxq[pool].buffers_wanted += 1; - fill_rx_pool (dev, pool, GFP_KERNEL); - - if (dev->txer[vci].tx_present) { - // TXer on the channel already - // switch (from pool zero) to this pool, preserving the TX bits - cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); - cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0 - cmd.args.modify_flags.flags = cpu_to_be32 - ( (pool << SRB_POOL_SHIFT) - | (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT) ); - } else { - // no TXer on the channel, open the VC (with no rate info) - cmd.request = cpu_to_be32 (SRB_OPEN_VC); - cmd.args.open.vc = cpu_to_be32 (vci); // vpi 0 - cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT); - cmd.args.open.rate = cpu_to_be32 (0); - } - while (command_do (dev, &cmd)) - schedule(); - // this link allows RX frames through - dev->rxer[vci] = atm_vcc; - mutex_unlock(&dev->vcc_sf); - } - - // indicate readiness - set_bit(ATM_VF_READY,&atm_vcc->flags); - - return 0; -} - -/********** Close a VC **********/ - -static void amb_close (struct atm_vcc * atm_vcc) { - amb_dev * dev = AMB_DEV (atm_vcc->dev); - amb_vcc * vcc = AMB_VCC (atm_vcc); - u16 vci = atm_vcc->vci; - - PRINTD (DBG_VCC|DBG_FLOW, "amb_close"); - - // indicate unreadiness - clear_bit(ATM_VF_READY,&atm_vcc->flags); - - // disable TXing - if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { - command cmd; - - mutex_lock(&dev->vcc_sf); - if (dev->rxer[vci]) { - // RXer still on the channel, just modify rate... XXX not really needed - cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); - cmd.args.modify_rate.vc = cpu_to_be32 (vci); // vpi 0 - cmd.args.modify_rate.rate = cpu_to_be32 (0); - // ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool - } else { - // no RXer on the channel, close channel - cmd.request = cpu_to_be32 (SRB_CLOSE_VC); - cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0 - } - dev->txer[vci].tx_present = 0; - while (command_do (dev, &cmd)) - schedule(); - mutex_unlock(&dev->vcc_sf); - } - - // disable RXing - if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { - command cmd; - - // this is (the?) one reason why we need the amb_vcc struct - unsigned char pool = vcc->rx_info.pool; - - mutex_lock(&dev->vcc_sf); - if (dev->txer[vci].tx_present) { - // TXer still on the channel, just go to pool zero XXX not really needed - cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); - cmd.args.modify_flags.vc = cpu_to_be32 (vci); // vpi 0 - cmd.args.modify_flags.flags = cpu_to_be32 - (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT); - } else { - // no TXer on the channel, close the VC - cmd.request = cpu_to_be32 (SRB_CLOSE_VC); - cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0 - } - // forget the rxer - no more skbs will be pushed - if (atm_vcc != dev->rxer[vci]) - PRINTK (KERN_ERR, "%s vcc=%p rxer[vci]=%p", - "arghhh! we're going to die!", - vcc, dev->rxer[vci]); - dev->rxer[vci] = NULL; - while (command_do (dev, &cmd)) - schedule(); - - /* shrink RX buffer pool */ - dev->rxq[pool].buffers_wanted -= 1; - if (dev->rxq[pool].buffers_wanted == rx_lats) { - dev->rxq[pool].buffers_wanted = 0; - drain_rx_pool (dev, pool); - } - mutex_unlock(&dev->vcc_sf); - } - - // free our structure - kfree (vcc); - - // say the VPI/VCI is free again - clear_bit(ATM_VF_ADDR,&atm_vcc->flags); - - return; -} - -/********** Send **********/ - -static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { - amb_dev * dev = AMB_DEV(atm_vcc->dev); - amb_vcc * vcc = AMB_VCC(atm_vcc); - u16 vc = atm_vcc->vci; - unsigned int tx_len = skb->len; - unsigned char * tx_data = skb->data; - tx_simple * tx_descr; - tx_in tx; - - if (test_bit (dead, &dev->flags)) - return -EIO; - - PRINTD (DBG_FLOW|DBG_TX, "amb_send vc %x data %p len %u", - vc, tx_data, tx_len); - - dump_skb (">>>", vc, skb); - - if (!dev->txer[vc].tx_present) { - PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", vc); - return -EBADFD; - } - - // this is a driver private field so we have to set it ourselves, - // despite the fact that we are _required_ to use it to check for a - // pop function - ATM_SKB(skb)->vcc = atm_vcc; - - if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) { - PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping..."); - return -EIO; - } - - if (check_area (skb->data, skb->len)) { - atomic_inc(&atm_vcc->stats->tx_err); - return -ENOMEM; // ? - } - - // allocate memory for fragments - tx_descr = kmalloc (sizeof(tx_simple), GFP_KERNEL); - if (!tx_descr) { - PRINTK (KERN_ERR, "could not allocate TX descriptor"); - return -ENOMEM; - } - if (check_area (tx_descr, sizeof(tx_simple))) { - kfree (tx_descr); - return -ENOMEM; - } - PRINTD (DBG_TX, "fragment list allocated at %p", tx_descr); - - tx_descr->skb = skb; - - tx_descr->tx_frag.bytes = cpu_to_be32 (tx_len); - tx_descr->tx_frag.address = cpu_to_be32 (virt_to_bus (tx_data)); - - tx_descr->tx_frag_end.handle = virt_to_bus (tx_descr); - tx_descr->tx_frag_end.vc = 0; - tx_descr->tx_frag_end.next_descriptor_length = 0; - tx_descr->tx_frag_end.next_descriptor = 0; -#ifdef AMB_NEW_MICROCODE - tx_descr->tx_frag_end.cpcs_uu = 0; - tx_descr->tx_frag_end.cpi = 0; - tx_descr->tx_frag_end.pad = 0; -#endif - - tx.vc = cpu_to_be16 (vcc->tx_frame_bits | vc); - tx.tx_descr_length = cpu_to_be16 (sizeof(tx_frag)+sizeof(tx_frag_end)); - tx.tx_descr_addr = cpu_to_be32 (virt_to_bus (&tx_descr->tx_frag)); - - while (tx_give (dev, &tx)) - schedule(); - return 0; -} - -/********** Change QoS on a VC **********/ - -// int amb_change_qos (struct atm_vcc * atm_vcc, struct atm_qos * qos, int flags); - -/********** Free RX Socket Buffer **********/ - -#if 0 -static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) { - amb_dev * dev = AMB_DEV (atm_vcc->dev); - amb_vcc * vcc = AMB_VCC (atm_vcc); - unsigned char pool = vcc->rx_info.pool; - rx_in rx; - - // This may be unsafe for various reasons that I cannot really guess - // at. However, I note that the ATM layer calls kfree_skb rather - // than dev_kfree_skb at this point so we are least covered as far - // as buffer locking goes. There may be bugs if pcap clones RX skbs. - - PRINTD (DBG_FLOW|DBG_SKB, "amb_rx_free skb %p (atm_vcc %p, vcc %p)", - skb, atm_vcc, vcc); - - rx.handle = virt_to_bus (skb); - rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); - - skb->data = skb->head; - skb_reset_tail_pointer(skb); - skb->len = 0; - - if (!rx_give (dev, &rx, pool)) { - // success - PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool); - return; - } - - // just do what the ATM layer would have done - dev_kfree_skb_any (skb); - - return; -} -#endif - -/********** Proc File Output **********/ - -static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) { - amb_dev * dev = AMB_DEV (atm_dev); - int left = *pos; - unsigned char pool; - - PRINTD (DBG_FLOW, "amb_proc_read"); - - /* more diagnostics here? */ - - if (!left--) { - amb_stats * s = &dev->stats; - return sprintf (page, - "frames: TX OK %lu, RX OK %lu, RX bad %lu " - "(CRC %lu, long %lu, aborted %lu, unused %lu).\n", - s->tx_ok, s->rx.ok, s->rx.error, - s->rx.badcrc, s->rx.toolong, - s->rx.aborted, s->rx.unused); - } - - if (!left--) { - amb_cq * c = &dev->cq; - return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ", - c->pending, c->high, c->maximum); - } - - if (!left--) { - amb_txq * t = &dev->txq; - return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n", - t->pending, t->maximum, t->high, t->filled); - } - - if (!left--) { - unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:"); - for (pool = 0; pool < NUM_RX_POOLS; ++pool) { - amb_rxq * r = &dev->rxq[pool]; - count += sprintf (page+count, " %u/%u/%u %u %u", - r->pending, r->maximum, r->buffers_wanted, r->low, r->emptied); - } - count += sprintf (page+count, ".\n"); - return count; - } - - if (!left--) { - unsigned int count = sprintf (page, "RX buffer sizes:"); - for (pool = 0; pool < NUM_RX_POOLS; ++pool) { - amb_rxq * r = &dev->rxq[pool]; - count += sprintf (page+count, " %u", r->buffer_size); - } - count += sprintf (page+count, ".\n"); - return count; - } - -#if 0 - if (!left--) { - // suni block etc? - } -#endif - - return 0; -} - -/********** Operation Structure **********/ - -static const struct atmdev_ops amb_ops = { - .open = amb_open, - .close = amb_close, - .send = amb_send, - .proc_read = amb_proc_read, - .owner = THIS_MODULE, -}; - -/********** housekeeping **********/ -static void do_housekeeping (struct timer_list *t) { - amb_dev * dev = from_timer(dev, t, housekeeping); - - // could collect device-specific (not driver/atm-linux) stats here - - // last resort refill once every ten seconds - fill_rx_pools (dev); - mod_timer(&dev->housekeeping, jiffies + 10*HZ); - - return; -} - -/********** creation of communication queues **********/ - -static int create_queues(amb_dev *dev, unsigned int cmds, unsigned int txs, - unsigned int *rxs, unsigned int *rx_buffer_sizes) -{ - unsigned char pool; - size_t total = 0; - void * memory; - void * limit; - - PRINTD (DBG_FLOW, "create_queues %p", dev); - - total += cmds * sizeof(command); - - total += txs * (sizeof(tx_in) + sizeof(tx_out)); - - for (pool = 0; pool < NUM_RX_POOLS; ++pool) - total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out)); - - memory = kmalloc (total, GFP_KERNEL); - if (!memory) { - PRINTK (KERN_ERR, "could not allocate queues"); - return -ENOMEM; - } - if (check_area (memory, total)) { - PRINTK (KERN_ERR, "queues allocated in nasty area"); - kfree (memory); - return -ENOMEM; - } - - limit = memory + total; - PRINTD (DBG_INIT, "queues from %p to %p", memory, limit); - - PRINTD (DBG_CMD, "command queue at %p", memory); - - { - command * cmd = memory; - amb_cq * cq = &dev->cq; - - cq->pending = 0; - cq->high = 0; - cq->maximum = cmds - 1; - - cq->ptrs.start = cmd; - cq->ptrs.in = cmd; - cq->ptrs.out = cmd; - cq->ptrs.limit = cmd + cmds; - - memory = cq->ptrs.limit; - } - - PRINTD (DBG_TX, "TX queue pair at %p", memory); - - { - tx_in * in = memory; - tx_out * out; - amb_txq * txq = &dev->txq; - - txq->pending = 0; - txq->high = 0; - txq->filled = 0; - txq->maximum = txs - 1; - - txq->in.start = in; - txq->in.ptr = in; - txq->in.limit = in + txs; - - memory = txq->in.limit; - out = memory; - - txq->out.start = out; - txq->out.ptr = out; - txq->out.limit = out + txs; - - memory = txq->out.limit; - } - - PRINTD (DBG_RX, "RX queue pairs at %p", memory); - - for (pool = 0; pool < NUM_RX_POOLS; ++pool) { - rx_in * in = memory; - rx_out * out; - amb_rxq * rxq = &dev->rxq[pool]; - - rxq->buffer_size = rx_buffer_sizes[pool]; - rxq->buffers_wanted = 0; - - rxq->pending = 0; - rxq->low = rxs[pool] - 1; - rxq->emptied = 0; - rxq->maximum = rxs[pool] - 1; - - rxq->in.start = in; - rxq->in.ptr = in; - rxq->in.limit = in + rxs[pool]; - - memory = rxq->in.limit; - out = memory; - - rxq->out.start = out; - rxq->out.ptr = out; - rxq->out.limit = out + rxs[pool]; - - memory = rxq->out.limit; - } - - if (memory == limit) { - return 0; - } else { - PRINTK (KERN_ERR, "bad queue alloc %p != %p (tell maintainer)", memory, limit); - kfree (limit - total); - return -ENOMEM; - } - -} - -/********** destruction of communication queues **********/ - -static void destroy_queues (amb_dev * dev) { - // all queues assumed empty - void * memory = dev->cq.ptrs.start; - // includes txq.in, txq.out, rxq[].in and rxq[].out - - PRINTD (DBG_FLOW, "destroy_queues %p", dev); - - PRINTD (DBG_INIT, "freeing queues at %p", memory); - kfree (memory); - - return; -} - -/********** basic loader commands and error handling **********/ -// centisecond timeouts - guessing away here -static unsigned int command_timeouts [] = { - [host_memory_test] = 15, - [read_adapter_memory] = 2, - [write_adapter_memory] = 2, - [adapter_start] = 50, - [get_version_number] = 10, - [interrupt_host] = 1, - [flash_erase_sector] = 1, - [adap_download_block] = 1, - [adap_erase_flash] = 1, - [adap_run_in_iram] = 1, - [adap_end_download] = 1 -}; - - -static unsigned int command_successes [] = { - [host_memory_test] = COMMAND_PASSED_TEST, - [read_adapter_memory] = COMMAND_READ_DATA_OK, - [write_adapter_memory] = COMMAND_WRITE_DATA_OK, - [adapter_start] = COMMAND_COMPLETE, - [get_version_number] = COMMAND_COMPLETE, - [interrupt_host] = COMMAND_COMPLETE, - [flash_erase_sector] = COMMAND_COMPLETE, - [adap_download_block] = COMMAND_COMPLETE, - [adap_erase_flash] = COMMAND_COMPLETE, - [adap_run_in_iram] = COMMAND_COMPLETE, - [adap_end_download] = COMMAND_COMPLETE -}; - -static int decode_loader_result (loader_command cmd, u32 result) -{ - int res; - const char *msg; - - if (result == command_successes[cmd]) - return 0; - - switch (result) { - case BAD_COMMAND: - res = -EINVAL; - msg = "bad command"; - break; - case COMMAND_IN_PROGRESS: - res = -ETIMEDOUT; - msg = "command in progress"; - break; - case COMMAND_PASSED_TEST: - res = 0; - msg = "command passed test"; - break; - case COMMAND_FAILED_TEST: - res = -EIO; - msg = "command failed test"; - break; - case COMMAND_READ_DATA_OK: - res = 0; - msg = "command read data ok"; - break; - case COMMAND_READ_BAD_ADDRESS: - res = -EINVAL; - msg = "command read bad address"; - break; - case COMMAND_WRITE_DATA_OK: - res = 0; - msg = "command write data ok"; - break; - case COMMAND_WRITE_BAD_ADDRESS: - res = -EINVAL; - msg = "command write bad address"; - break; - case COMMAND_WRITE_FLASH_FAILURE: - res = -EIO; - msg = "command write flash failure"; - break; - case COMMAND_COMPLETE: - res = 0; - msg = "command complete"; - break; - case COMMAND_FLASH_ERASE_FAILURE: - res = -EIO; - msg = "command flash erase failure"; - break; - case COMMAND_WRITE_BAD_DATA: - res = -EINVAL; - msg = "command write bad data"; - break; - default: - res = -EINVAL; - msg = "unknown error"; - PRINTD (DBG_LOAD|DBG_ERR, - "decode_loader_result got %d=%x !", - result, result); - break; - } - - PRINTK (KERN_ERR, "%s", msg); - return res; -} - -static int do_loader_command(volatile loader_block *lb, const amb_dev *dev, - loader_command cmd) -{ - - unsigned long timeout; - - PRINTD (DBG_FLOW|DBG_LOAD, "do_loader_command"); - - /* do a command - - Set the return value to zero, set the command type and set the - valid entry to the right magic value. The payload is already - correctly byte-ordered so we leave it alone. Hit the doorbell - with the bus address of this structure. - - */ - - lb->result = 0; - lb->command = cpu_to_be32 (cmd); - lb->valid = cpu_to_be32 (DMA_VALID); - // dump_registers (dev); - // dump_loader_block (lb); - wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask); - - timeout = command_timeouts[cmd] * 10; - - while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS)) - if (timeout) { - timeout = msleep_interruptible(timeout); - } else { - PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd); - dump_registers (dev); - dump_loader_block (lb); - return -ETIMEDOUT; - } - - if (cmd == adapter_start) { - // wait for start command to acknowledge... - timeout = 100; - while (rd_plain (dev, offsetof(amb_mem, doorbell))) - if (timeout) { - timeout = msleep_interruptible(timeout); - } else { - PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x", - be32_to_cpu (lb->result)); - dump_registers (dev); - return -ETIMEDOUT; - } - return 0; - } else { - return decode_loader_result (cmd, be32_to_cpu (lb->result)); - } - -} - -/* loader: determine loader version */ - -static int get_loader_version(loader_block *lb, const amb_dev *dev, - u32 *version) -{ - int res; - - PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version"); - - res = do_loader_command (lb, dev, get_version_number); - if (res) - return res; - if (version) - *version = be32_to_cpu (lb->payload.version); - return 0; -} - -/* loader: write memory data blocks */ - -static int loader_write(loader_block *lb, const amb_dev *dev, - const struct ihex_binrec *rec) -{ - transfer_block * tb = &lb->payload.transfer; - - PRINTD (DBG_FLOW|DBG_LOAD, "loader_write"); - - tb->address = rec->addr; - tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4); - memcpy(tb->data, rec->data, be16_to_cpu(rec->len)); - return do_loader_command (lb, dev, write_adapter_memory); -} - -/* loader: verify memory data blocks */ - -static int loader_verify(loader_block *lb, const amb_dev *dev, - const struct ihex_binrec *rec) -{ - transfer_block * tb = &lb->payload.transfer; - int res; - - PRINTD (DBG_FLOW|DBG_LOAD, "loader_verify"); - - tb->address = rec->addr; - tb->count = cpu_to_be32(be16_to_cpu(rec->len) / 4); - res = do_loader_command (lb, dev, read_adapter_memory); - if (!res && memcmp(tb->data, rec->data, be16_to_cpu(rec->len))) - res = -EINVAL; - return res; -} - -/* loader: start microcode */ - -static int loader_start(loader_block *lb, const amb_dev *dev, u32 address) -{ - PRINTD (DBG_FLOW|DBG_LOAD, "loader_start"); - - lb->payload.start = cpu_to_be32 (address); - return do_loader_command (lb, dev, adapter_start); -} - -/********** reset card **********/ - -static inline void sf (const char * msg) -{ - PRINTK (KERN_ERR, "self-test failed: %s", msg); -} - -static int amb_reset (amb_dev * dev, int diags) { - u32 word; - - PRINTD (DBG_FLOW|DBG_LOAD, "amb_reset"); - - word = rd_plain (dev, offsetof(amb_mem, reset_control)); - // put card into reset state - wr_plain (dev, offsetof(amb_mem, reset_control), word | AMB_RESET_BITS); - // wait a short while - udelay (10); -#if 1 - // put card into known good state - wr_plain (dev, offsetof(amb_mem, interrupt_control), AMB_DOORBELL_BITS); - // clear all interrupts just in case - wr_plain (dev, offsetof(amb_mem, interrupt), -1); -#endif - // clear self-test done flag - wr_plain (dev, offsetof(amb_mem, mb.loader.ready), 0); - // take card out of reset state - wr_plain (dev, offsetof(amb_mem, reset_control), word &~ AMB_RESET_BITS); - - if (diags) { - unsigned long timeout; - // 4.2 second wait - msleep(4200); - // half second time-out - timeout = 500; - while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready))) - if (timeout) { - timeout = msleep_interruptible(timeout); - } else { - PRINTD (DBG_LOAD|DBG_ERR, "reset timed out"); - return -ETIMEDOUT; - } - - // get results of self-test - // XXX double check byte-order - word = rd_mem (dev, offsetof(amb_mem, mb.loader.result)); - if (word & SELF_TEST_FAILURE) { - if (word & GPINT_TST_FAILURE) - sf ("interrupt"); - if (word & SUNI_DATA_PATTERN_FAILURE) - sf ("SUNI data pattern"); - if (word & SUNI_DATA_BITS_FAILURE) - sf ("SUNI data bits"); - if (word & SUNI_UTOPIA_FAILURE) - sf ("SUNI UTOPIA interface"); - if (word & SUNI_FIFO_FAILURE) - sf ("SUNI cell buffer FIFO"); - if (word & SRAM_FAILURE) - sf ("bad SRAM"); - // better return value? - return -EIO; - } - - } - return 0; -} - -/********** transfer and start the microcode **********/ - -static int ucode_init(loader_block *lb, amb_dev *dev) -{ - const struct firmware *fw; - unsigned long start_address; - const struct ihex_binrec *rec; - const char *errmsg = NULL; - int res; - - res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev); - if (res) { - PRINTK (KERN_ERR, "Cannot load microcode data"); - return res; - } - - /* First record contains just the start address */ - rec = (const struct ihex_binrec *)fw->data; - if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) { - errmsg = "no start record"; - goto fail; - } - start_address = be32_to_cpup((__be32 *)rec->data); - - rec = ihex_next_binrec(rec); - - PRINTD (DBG_FLOW|DBG_LOAD, "ucode_init"); - - while (rec) { - PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr), - be16_to_cpu(rec->len)); - if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) { - errmsg = "record too long"; - goto fail; - } - if (be16_to_cpu(rec->len) & 3) { - errmsg = "odd number of bytes"; - goto fail; - } - res = loader_write(lb, dev, rec); - if (res) - break; - - res = loader_verify(lb, dev, rec); - if (res) - break; - rec = ihex_next_binrec(rec); - } - release_firmware(fw); - if (!res) - res = loader_start(lb, dev, start_address); - - return res; -fail: - release_firmware(fw); - PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg); - return -EINVAL; -} - -/********** give adapter parameters **********/ - -static inline __be32 bus_addr(void * addr) { - return cpu_to_be32 (virt_to_bus (addr)); -} - -static int amb_talk(amb_dev *dev) -{ - adap_talk_block a; - unsigned char pool; - unsigned long timeout; - - PRINTD (DBG_FLOW, "amb_talk %p", dev); - - a.command_start = bus_addr (dev->cq.ptrs.start); - a.command_end = bus_addr (dev->cq.ptrs.limit); - a.tx_start = bus_addr (dev->txq.in.start); - a.tx_end = bus_addr (dev->txq.in.limit); - a.txcom_start = bus_addr (dev->txq.out.start); - a.txcom_end = bus_addr (dev->txq.out.limit); - - for (pool = 0; pool < NUM_RX_POOLS; ++pool) { - // the other "a" items are set up by the adapter - a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start); - a.rec_struct[pool].buffer_end = bus_addr (dev->rxq[pool].in.limit); - a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start); - a.rec_struct[pool].rx_end = bus_addr (dev->rxq[pool].out.limit); - a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size); - } - -#ifdef AMB_NEW_MICROCODE - // disable fast PLX prefetching - a.init_flags = 0; -#endif - - // pass the structure - wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a)); - - // 2.2 second wait (must not touch doorbell during 2 second DMA test) - msleep(2200); - // give the adapter another half second? - timeout = 500; - while (rd_plain (dev, offsetof(amb_mem, doorbell))) - if (timeout) { - timeout = msleep_interruptible(timeout); - } else { - PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out"); - return -ETIMEDOUT; - } - - return 0; -} - -// get microcode version -static void amb_ucode_version(amb_dev *dev) -{ - u32 major; - u32 minor; - command cmd; - cmd.request = cpu_to_be32 (SRB_GET_VERSION); - while (command_do (dev, &cmd)) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule(); - } - major = be32_to_cpu (cmd.args.version.major); - minor = be32_to_cpu (cmd.args.version.minor); - PRINTK (KERN_INFO, "microcode version is %u.%u", major, minor); -} - -// get end station address -static void amb_esi(amb_dev *dev, u8 *esi) -{ - u32 lower4; - u16 upper2; - command cmd; - - cmd.request = cpu_to_be32 (SRB_GET_BIA); - while (command_do (dev, &cmd)) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule(); - } - lower4 = be32_to_cpu (cmd.args.bia.lower4); - upper2 = be32_to_cpu (cmd.args.bia.upper2); - PRINTD (DBG_LOAD, "BIA: lower4: %08x, upper2 %04x", lower4, upper2); - - if (esi) { - unsigned int i; - - PRINTDB (DBG_INIT, "ESI:"); - for (i = 0; i < ESI_LEN; ++i) { - if (i < 4) - esi[i] = bitrev8(lower4>>(8*i)); - else - esi[i] = bitrev8(upper2>>(8*(i-4))); - PRINTDM (DBG_INIT, " %02x", esi[i]); - } - - PRINTDE (DBG_INIT, ""); - } - - return; -} - -static void fixup_plx_window (amb_dev *dev, loader_block *lb) -{ - // fix up the PLX-mapped window base address to match the block - unsigned long blb; - u32 mapreg; - blb = virt_to_bus(lb); - // the kernel stack had better not ever cross a 1Gb boundary! - mapreg = rd_plain (dev, offsetof(amb_mem, stuff[10])); - mapreg &= ~onegigmask; - mapreg |= blb & onegigmask; - wr_plain (dev, offsetof(amb_mem, stuff[10]), mapreg); - return; -} - -static int amb_init(amb_dev *dev) -{ - loader_block lb; - - u32 version; - - if (amb_reset (dev, 1)) { - PRINTK (KERN_ERR, "card reset failed!"); - } else { - fixup_plx_window (dev, &lb); - - if (get_loader_version (&lb, dev, &version)) { - PRINTK (KERN_INFO, "failed to get loader version"); - } else { - PRINTK (KERN_INFO, "loader version is %08x", version); - - if (ucode_init (&lb, dev)) { - PRINTK (KERN_ERR, "microcode failure"); - } else if (create_queues (dev, cmds, txs, rxs, rxs_bs)) { - PRINTK (KERN_ERR, "failed to get memory for queues"); - } else { - - if (amb_talk (dev)) { - PRINTK (KERN_ERR, "adapter did not accept queues"); - } else { - - amb_ucode_version (dev); - return 0; - - } /* amb_talk */ - - destroy_queues (dev); - } /* create_queues, ucode_init */ - - amb_reset (dev, 0); - } /* get_loader_version */ - - } /* amb_reset */ - - return -EINVAL; -} - -static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev) -{ - unsigned char pool; - - // set up known dev items straight away - dev->pci_dev = pci_dev; - pci_set_drvdata(pci_dev, dev); - - dev->iobase = pci_resource_start (pci_dev, 1); - dev->irq = pci_dev->irq; - dev->membase = bus_to_virt(pci_resource_start(pci_dev, 0)); - - // flags (currently only dead) - dev->flags = 0; - - // Allocate cell rates (fibre) - // ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53 - // to be really pedantic, this should be ATM_OC3c_PCR - dev->tx_avail = ATM_OC3_PCR; - dev->rx_avail = ATM_OC3_PCR; - - // semaphore for txer/rxer modifications - we cannot use a - // spinlock as the critical region needs to switch processes - mutex_init(&dev->vcc_sf); - // queue manipulation spinlocks; we want atomic reads and - // writes to the queue descriptors (handles IRQ and SMP) - // consider replacing "int pending" -> "atomic_t available" - // => problem related to who gets to move queue pointers - spin_lock_init (&dev->cq.lock); - spin_lock_init (&dev->txq.lock); - for (pool = 0; pool < NUM_RX_POOLS; ++pool) - spin_lock_init (&dev->rxq[pool].lock); -} - -static void setup_pci_dev(struct pci_dev *pci_dev) -{ - unsigned char lat; - - // enable bus master accesses - pci_set_master(pci_dev); - - // frobnicate latency (upwards, usually) - pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat); - - if (!pci_lat) - pci_lat = (lat < MIN_PCI_LATENCY) ? MIN_PCI_LATENCY : lat; - - if (lat != pci_lat) { - PRINTK (KERN_INFO, "Changing PCI latency timer from %hu to %hu", - lat, pci_lat); - pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat); - } -} - -static int amb_probe(struct pci_dev *pci_dev, - const struct pci_device_id *pci_ent) -{ - amb_dev * dev; - int err; - unsigned int irq; - - err = pci_enable_device(pci_dev); - if (err < 0) { - PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card"); - goto out; - } - - // read resources from PCI configuration space - irq = pci_dev->irq; - - if (pci_dev->device == PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD) { - PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card"); - err = -EINVAL; - goto out_disable; - } - - PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at" - " IO %llx, IRQ %u, MEM %p", - (unsigned long long)pci_resource_start(pci_dev, 1), - irq, bus_to_virt(pci_resource_start(pci_dev, 0))); - - // check IO region - err = pci_request_region(pci_dev, 1, DEV_LABEL); - if (err < 0) { - PRINTK (KERN_ERR, "IO range already in use!"); - goto out_disable; - } - - dev = kzalloc(sizeof(amb_dev), GFP_KERNEL); - if (!dev) { - PRINTK (KERN_ERR, "out of memory!"); - err = -ENOMEM; - goto out_release; - } - - setup_dev(dev, pci_dev); - - err = amb_init(dev); - if (err < 0) { - PRINTK (KERN_ERR, "adapter initialisation failure"); - goto out_free; - } - - setup_pci_dev(pci_dev); - - // grab (but share) IRQ and install handler - err = request_irq(irq, interrupt_handler, IRQF_SHARED, DEV_LABEL, dev); - if (err < 0) { - PRINTK (KERN_ERR, "request IRQ failed!"); - goto out_reset; - } - - dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1, - NULL); - if (!dev->atm_dev) { - PRINTD (DBG_ERR, "failed to register Madge ATM adapter"); - err = -EINVAL; - goto out_free_irq; - } - - PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p", - dev->atm_dev->number, dev, dev->atm_dev); - dev->atm_dev->dev_data = (void *) dev; - - // register our address - amb_esi (dev, dev->atm_dev->esi); - - // 0 bits for vpi, 10 bits for vci - dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS; - dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS; - - timer_setup(&dev->housekeeping, do_housekeeping, 0); - mod_timer(&dev->housekeeping, jiffies); - - // enable host interrupts - interrupts_on (dev); - -out: - return err; - -out_free_irq: - free_irq(irq, dev); -out_reset: - amb_reset(dev, 0); -out_free: - kfree(dev); -out_release: - pci_release_region(pci_dev, 1); -out_disable: - pci_disable_device(pci_dev); - goto out; -} - - -static void amb_remove_one(struct pci_dev *pci_dev) -{ - struct amb_dev *dev; - - dev = pci_get_drvdata(pci_dev); - - PRINTD(DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev); - del_timer_sync(&dev->housekeeping); - // the drain should not be necessary - drain_rx_pools(dev); - interrupts_off(dev); - amb_reset(dev, 0); - free_irq(dev->irq, dev); - pci_disable_device(pci_dev); - destroy_queues(dev); - atm_dev_deregister(dev->atm_dev); - kfree(dev); - pci_release_region(pci_dev, 1); -} - -static void __init amb_check_args (void) { - unsigned char pool; - unsigned int max_rx_size; - -#ifdef DEBUG_AMBASSADOR - PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK); -#else - if (debug) - PRINTK (KERN_NOTICE, "no debugging support"); -#endif - - if (cmds < MIN_QUEUE_SIZE) - PRINTK (KERN_NOTICE, "cmds has been raised to %u", - cmds = MIN_QUEUE_SIZE); - - if (txs < MIN_QUEUE_SIZE) - PRINTK (KERN_NOTICE, "txs has been raised to %u", - txs = MIN_QUEUE_SIZE); - - for (pool = 0; pool < NUM_RX_POOLS; ++pool) - if (rxs[pool] < MIN_QUEUE_SIZE) - PRINTK (KERN_NOTICE, "rxs[%hu] has been raised to %u", - pool, rxs[pool] = MIN_QUEUE_SIZE); - - // buffers sizes should be greater than zero and strictly increasing - max_rx_size = 0; - for (pool = 0; pool < NUM_RX_POOLS; ++pool) - if (rxs_bs[pool] <= max_rx_size) - PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)", - pool, rxs_bs[pool]); - else - max_rx_size = rxs_bs[pool]; - - if (rx_lats < MIN_RX_BUFFERS) - PRINTK (KERN_NOTICE, "rx_lats has been raised to %u", - rx_lats = MIN_RX_BUFFERS); - - return; -} - -/********** module stuff **********/ - -MODULE_AUTHOR(maintainer_string); -MODULE_DESCRIPTION(description_string); -MODULE_LICENSE("GPL"); -MODULE_FIRMWARE("atmsar11.fw"); -module_param(debug, ushort, 0644); -module_param(cmds, uint, 0); -module_param(txs, uint, 0); -module_param_array(rxs, uint, NULL, 0); -module_param_array(rxs_bs, uint, NULL, 0); -module_param(rx_lats, uint, 0); -module_param(pci_lat, byte, 0); -MODULE_PARM_DESC(debug, "debug bitmap, see .h file"); -MODULE_PARM_DESC(cmds, "number of command queue entries"); -MODULE_PARM_DESC(txs, "number of TX queue entries"); -MODULE_PARM_DESC(rxs, "number of RX queue entries [" __MODULE_STRING(NUM_RX_POOLS) "]"); -MODULE_PARM_DESC(rxs_bs, "size of RX buffers [" __MODULE_STRING(NUM_RX_POOLS) "]"); -MODULE_PARM_DESC(rx_lats, "number of extra buffers to cope with RX latencies"); -MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); - -/********** module entry **********/ - -static const struct pci_device_id amb_pci_tbl[] = { - { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 }, - { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 }, - { 0, } -}; - -MODULE_DEVICE_TABLE(pci, amb_pci_tbl); - -static struct pci_driver amb_driver = { - .name = "amb", - .probe = amb_probe, - .remove = amb_remove_one, - .id_table = amb_pci_tbl, -}; - -static int __init amb_module_init (void) -{ - PRINTD (DBG_FLOW|DBG_INIT, "init_module"); - - BUILD_BUG_ON(sizeof(amb_mem) != 4*16 + 4*12); - - show_version(); - - amb_check_args(); - - // get the juice - return pci_register_driver(&amb_driver); -} - -/********** module exit **********/ - -static void __exit amb_module_exit (void) -{ - PRINTD (DBG_FLOW|DBG_INIT, "cleanup_module"); - - pci_unregister_driver(&amb_driver); -} - -module_init(amb_module_init); -module_exit(amb_module_exit); diff --git a/drivers/atm/ambassador.h b/drivers/atm/ambassador.h deleted file mode 100644 index 086ceb8568dc..000000000000 --- a/drivers/atm/ambassador.h +++ /dev/null @@ -1,648 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - Madge Ambassador ATM Adapter driver. - Copyright (C) 1995-1999 Madge Networks Ltd. - -*/ - -#ifndef AMBASSADOR_H -#define AMBASSADOR_H - - -#ifdef CONFIG_ATM_AMBASSADOR_DEBUG -#define DEBUG_AMBASSADOR -#endif - -#define DEV_LABEL "amb" - -#ifndef PCI_VENDOR_ID_MADGE -#define PCI_VENDOR_ID_MADGE 0x10B6 -#endif -#ifndef PCI_VENDOR_ID_MADGE_AMBASSADOR -#define PCI_DEVICE_ID_MADGE_AMBASSADOR 0x1001 -#endif -#ifndef PCI_VENDOR_ID_MADGE_AMBASSADOR_BAD -#define PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD 0x1002 -#endif - -// diagnostic output - -#define PRINTK(severity,format,args...) \ - printk(severity DEV_LABEL ": " format "\n" , ## args) - -#ifdef DEBUG_AMBASSADOR - -#define DBG_ERR 0x0001 -#define DBG_WARN 0x0002 -#define DBG_INFO 0x0004 -#define DBG_INIT 0x0008 -#define DBG_LOAD 0x0010 -#define DBG_VCC 0x0020 -#define DBG_QOS 0x0040 -#define DBG_CMD 0x0080 -#define DBG_TX 0x0100 -#define DBG_RX 0x0200 -#define DBG_SKB 0x0400 -#define DBG_POOL 0x0800 -#define DBG_IRQ 0x1000 -#define DBG_FLOW 0x2000 -#define DBG_REGS 0x4000 -#define DBG_DATA 0x8000 -#define DBG_MASK 0xffff - -/* the ## prevents the annoying double expansion of the macro arguments */ -/* KERN_INFO is used since KERN_DEBUG often does not make it to the console */ -#define PRINTDB(bits,format,args...) \ - ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format , ## args) : 1 ) -#define PRINTDM(bits,format,args...) \ - ( (debug & (bits)) ? printk (format , ## args) : 1 ) -#define PRINTDE(bits,format,args...) \ - ( (debug & (bits)) ? printk (format "\n" , ## args) : 1 ) -#define PRINTD(bits,format,args...) \ - ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format "\n" , ## args) : 1 ) - -#else - -#define PRINTD(bits,format,args...) -#define PRINTDB(bits,format,args...) -#define PRINTDM(bits,format,args...) -#define PRINTDE(bits,format,args...) - -#endif - -#define PRINTDD(bits,format,args...) -#define PRINTDDB(sec,fmt,args...) -#define PRINTDDM(sec,fmt,args...) -#define PRINTDDE(sec,fmt,args...) - -// tunable values (?) - -/* MUST be powers of two -- why ? */ -#define COM_Q_ENTRIES 8 -#define TX_Q_ENTRIES 32 -#define RX_Q_ENTRIES 64 - -// fixed values - -// guessing -#define AMB_EXTENT 0x80 - -// Minimum allowed size for an Ambassador queue -#define MIN_QUEUE_SIZE 2 - -// Ambassador microcode allows 1 to 4 pools, we use 4 (simpler) -#define NUM_RX_POOLS 4 - -// minimum RX buffers required to cope with replenishing delay -#define MIN_RX_BUFFERS 1 - -// minimum PCI latency we will tolerate (32 IS TOO SMALL) -#define MIN_PCI_LATENCY 64 // 255 - -// VCs supported by card (VPI always 0) -#define NUM_VPI_BITS 0 -#define NUM_VCI_BITS 10 -#define NUM_VCS 1024 - -/* The status field bits defined so far. */ -#define RX_ERR 0x8000 // always present if there is an error (hmm) -#define CRC_ERR 0x4000 // AAL5 CRC error -#define LEN_ERR 0x2000 // overlength frame -#define ABORT_ERR 0x1000 // zero length field in received frame -#define UNUSED_ERR 0x0800 // buffer returned unused - -// Adaptor commands - -#define SRB_OPEN_VC 0 -/* par_0: dwordswap(VC_number) */ -/* par_1: dwordswap(flags<<16) or wordswap(flags)*/ -/* flags: */ - -/* LANE: 0x0004 */ -/* NOT_UBR: 0x0008 */ -/* ABR: 0x0010 */ - -/* RxPool0: 0x0000 */ -/* RxPool1: 0x0020 */ -/* RxPool2: 0x0040 */ -/* RxPool3: 0x0060 */ - -/* par_2: dwordswap(fp_rate<<16) or wordswap(fp_rate) */ - -#define SRB_CLOSE_VC 1 -/* par_0: dwordswap(VC_number) */ - -#define SRB_GET_BIA 2 -/* returns */ -/* par_0: dwordswap(half BIA) */ -/* par_1: dwordswap(half BIA) */ - -#define SRB_GET_SUNI_STATS 3 -/* par_0: dwordswap(physical_host_address) */ - -#define SRB_SET_BITS_8 4 -#define SRB_SET_BITS_16 5 -#define SRB_SET_BITS_32 6 -#define SRB_CLEAR_BITS_8 7 -#define SRB_CLEAR_BITS_16 8 -#define SRB_CLEAR_BITS_32 9 -/* par_0: dwordswap(ATMizer address) */ -/* par_1: dwordswap(mask) */ - -#define SRB_SET_8 10 -#define SRB_SET_16 11 -#define SRB_SET_32 12 -/* par_0: dwordswap(ATMizer address) */ -/* par_1: dwordswap(data) */ - -#define SRB_GET_32 13 -/* par_0: dwordswap(ATMizer address) */ -/* returns */ -/* par_1: dwordswap(ATMizer data) */ - -#define SRB_GET_VERSION 14 -/* returns */ -/* par_0: dwordswap(Major Version) */ -/* par_1: dwordswap(Minor Version) */ - -#define SRB_FLUSH_BUFFER_Q 15 -/* Only flags to define which buffer pool; all others must be zero */ -/* par_0: dwordswap(flags<<16) or wordswap(flags)*/ - -#define SRB_GET_DMA_SPEEDS 16 -/* returns */ -/* par_0: dwordswap(Read speed (bytes/sec)) */ -/* par_1: dwordswap(Write speed (bytes/sec)) */ - -#define SRB_MODIFY_VC_RATE 17 -/* par_0: dwordswap(VC_number) */ -/* par_1: dwordswap(fp_rate<<16) or wordswap(fp_rate) */ - -#define SRB_MODIFY_VC_FLAGS 18 -/* par_0: dwordswap(VC_number) */ -/* par_1: dwordswap(flags<<16) or wordswap(flags)*/ - -/* flags: */ - -/* LANE: 0x0004 */ -/* NOT_UBR: 0x0008 */ -/* ABR: 0x0010 */ - -/* RxPool0: 0x0000 */ -/* RxPool1: 0x0020 */ -/* RxPool2: 0x0040 */ -/* RxPool3: 0x0060 */ - -#define SRB_RATE_SHIFT 16 -#define SRB_POOL_SHIFT (SRB_FLAGS_SHIFT+5) -#define SRB_FLAGS_SHIFT 16 - -#define SRB_STOP_TASKING 19 -#define SRB_START_TASKING 20 -#define SRB_SHUT_DOWN 21 -#define MAX_SRB 21 - -#define SRB_COMPLETE 0xffffffff - -#define TX_FRAME 0x80000000 - -// number of types of SRB MUST be a power of two -- why? -#define NUM_OF_SRB 32 - -// number of bits of period info for rate -#define MAX_RATE_BITS 6 - -#define TX_UBR 0x0000 -#define TX_UBR_CAPPED 0x0008 -#define TX_ABR 0x0018 -#define TX_FRAME_NOTCAP 0x0000 -#define TX_FRAME_CAPPED 0x8000 - -#define FP_155_RATE 0x24b1 -#define FP_25_RATE 0x1f9d - -/* #define VERSION_NUMBER 0x01000000 // initial release */ -/* #define VERSION_NUMBER 0x01010000 // fixed startup probs PLX MB0 not cleared */ -/* #define VERSION_NUMBER 0x01020000 // changed SUNI reset timings; allowed r/w onchip */ - -/* #define VERSION_NUMBER 0x01030000 // clear local doorbell int reg on reset */ -/* #define VERSION_NUMBER 0x01040000 // PLX bug work around version PLUS */ -/* remove race conditions on basic interface */ -/* indicate to the host that diagnostics */ -/* have finished; if failed, how and what */ -/* failed */ -/* fix host memory test to fix PLX bug */ -/* allow flash upgrade and BIA upgrade directly */ -/* */ -#define VERSION_NUMBER 0x01050025 /* Jason's first hacked version. */ -/* Change in download algorithm */ - -#define DMA_VALID 0xb728e149 /* completely random */ - -#define FLASH_BASE 0xa0c00000 -#define FLASH_SIZE 0x00020000 /* 128K */ -#define BIA_BASE (FLASH_BASE+0x0001c000) /* Flash Sector 7 */ -#define BIA_ADDRESS ((void *)0xa0c1c000) -#define PLX_BASE 0xe0000000 - -typedef enum { - host_memory_test = 1, - read_adapter_memory, - write_adapter_memory, - adapter_start, - get_version_number, - interrupt_host, - flash_erase_sector, - adap_download_block = 0x20, - adap_erase_flash, - adap_run_in_iram, - adap_end_download -} loader_command; - -#define BAD_COMMAND (-1) -#define COMMAND_IN_PROGRESS 1 -#define COMMAND_PASSED_TEST 2 -#define COMMAND_FAILED_TEST 3 -#define COMMAND_READ_DATA_OK 4 -#define COMMAND_READ_BAD_ADDRESS 5 -#define COMMAND_WRITE_DATA_OK 6 -#define COMMAND_WRITE_BAD_ADDRESS 7 -#define COMMAND_WRITE_FLASH_FAILURE 8 -#define COMMAND_COMPLETE 9 -#define COMMAND_FLASH_ERASE_FAILURE 10 -#define COMMAND_WRITE_BAD_DATA 11 - -/* bit fields for mailbox[0] return values */ - -#define GPINT_TST_FAILURE 0x00000001 -#define SUNI_DATA_PATTERN_FAILURE 0x00000002 -#define SUNI_DATA_BITS_FAILURE 0x00000004 -#define SUNI_UTOPIA_FAILURE 0x00000008 -#define SUNI_FIFO_FAILURE 0x00000010 -#define SRAM_FAILURE 0x00000020 -#define SELF_TEST_FAILURE 0x0000003f - -/* mailbox[1] = 0 in progress, -1 on completion */ -/* mailbox[2] = current test 00 00 test(8 bit) phase(8 bit) */ -/* mailbox[3] = last failure, 00 00 test(8 bit) phase(8 bit) */ -/* mailbox[4],mailbox[5],mailbox[6] random failure values */ - -/* PLX/etc. memory map including command structure */ - -/* These registers may also be memory mapped in PCI memory */ - -#define UNUSED_LOADER_MAILBOXES 6 - -typedef struct { - u32 stuff[16]; - union { - struct { - u32 result; - u32 ready; - u32 stuff[UNUSED_LOADER_MAILBOXES]; - } loader; - struct { - u32 cmd_address; - u32 tx_address; - u32 rx_address[NUM_RX_POOLS]; - u32 gen_counter; - u32 spare; - } adapter; - } mb; - u32 doorbell; - u32 interrupt; - u32 interrupt_control; - u32 reset_control; -} amb_mem; - -/* RESET bit, IRQ (card to host) and doorbell (host to card) enable bits */ -#define AMB_RESET_BITS 0x40000000 -#define AMB_INTERRUPT_BITS 0x00000300 -#define AMB_DOORBELL_BITS 0x00030000 - -/* loader commands */ - -#define MAX_COMMAND_DATA 13 -#define MAX_TRANSFER_DATA 11 - -typedef struct { - __be32 address; - __be32 count; - __be32 data[MAX_TRANSFER_DATA]; -} transfer_block; - -typedef struct { - __be32 result; - __be32 command; - union { - transfer_block transfer; - __be32 version; - __be32 start; - __be32 data[MAX_COMMAND_DATA]; - } payload; - __be32 valid; -} loader_block; - -/* command queue */ - -/* Again all data are BIG ENDIAN */ - -typedef struct { - union { - struct { - __be32 vc; - __be32 flags; - __be32 rate; - } open; - struct { - __be32 vc; - __be32 rate; - } modify_rate; - struct { - __be32 vc; - __be32 flags; - } modify_flags; - struct { - __be32 vc; - } close; - struct { - __be32 lower4; - __be32 upper2; - } bia; - struct { - __be32 address; - } suni; - struct { - __be32 major; - __be32 minor; - } version; - struct { - __be32 read; - __be32 write; - } speed; - struct { - __be32 flags; - } flush; - struct { - __be32 address; - __be32 data; - } memory; - __be32 par[3]; - } args; - __be32 request; -} command; - -/* transmit queues and associated structures */ - -/* The hosts transmit structure. All BIG ENDIAN; host address - restricted to first 1GByte, but address passed to the card must - have the top MS bit or'ed in. -- check this */ - -/* TX is described by 1+ tx_frags followed by a tx_frag_end */ - -typedef struct { - __be32 bytes; - __be32 address; -} tx_frag; - -/* apart from handle the fields here are for the adapter to play with - and should be set to zero */ - -typedef struct { - u32 handle; - u16 vc; - u16 next_descriptor_length; - u32 next_descriptor; -#ifdef AMB_NEW_MICROCODE - u8 cpcs_uu; - u8 cpi; - u16 pad; -#endif -} tx_frag_end; - -typedef struct { - tx_frag tx_frag; - tx_frag_end tx_frag_end; - struct sk_buff * skb; -} tx_simple; - -#if 0 -typedef union { - tx_frag fragment; - tx_frag_end end_of_list; -} tx_descr; -#endif - -/* this "points" to the sequence of fragments and trailer */ - -typedef struct { - __be16 vc; - __be16 tx_descr_length; - __be32 tx_descr_addr; -} tx_in; - -/* handle is the handle from tx_in */ - -typedef struct { - u32 handle; -} tx_out; - -/* receive frame structure */ - -/* All BIG ENDIAN; handle is as passed from host; length is zero for - aborted frames, and frames with errors. Header is actually VC - number, lec-id is NOT yet supported. */ - -typedef struct { - u32 handle; - __be16 vc; - __be16 lec_id; // unused - __be16 status; - __be16 length; -} rx_out; - -/* buffer supply structure */ - -typedef struct { - u32 handle; - __be32 host_address; -} rx_in; - -/* This first structure is the area in host memory where the adapter - writes its pointer values. These pointer values are BIG ENDIAN and - reside in the same 4MB 'page' as this structure. The host gives the - adapter the address of this block by sending a doorbell interrupt - to the adapter after downloading the code and setting it going. The - addresses have the top 10 bits set to 1010000010b -- really? - - The host must initialise these before handing the block to the - adapter. */ - -typedef struct { - __be32 command_start; /* SRB commands completions */ - __be32 command_end; /* SRB commands completions */ - __be32 tx_start; - __be32 tx_end; - __be32 txcom_start; /* tx completions */ - __be32 txcom_end; /* tx completions */ - struct { - __be32 buffer_start; - __be32 buffer_end; - u32 buffer_q_get; - u32 buffer_q_end; - u32 buffer_aptr; - __be32 rx_start; /* rx completions */ - __be32 rx_end; - u32 rx_ptr; - __be32 buffer_size; /* size of host buffer */ - } rec_struct[NUM_RX_POOLS]; -#ifdef AMB_NEW_MICROCODE - u16 init_flags; - u16 talk_block_spare; -#endif -} adap_talk_block; - -/* This structure must be kept in line with the vcr image in sarmain.h - - This is the structure in the host filled in by the adapter by - GET_SUNI_STATS */ - -typedef struct { - u8 racp_chcs; - u8 racp_uhcs; - u16 spare; - u32 racp_rcell; - u32 tacp_tcell; - u32 flags; - u32 dropped_cells; - u32 dropped_frames; -} suni_stats; - -typedef enum { - dead -} amb_flags; - -#define NEXTQ(current,start,limit) \ - ( (current)+1 < (limit) ? (current)+1 : (start) ) - -typedef struct { - command * start; - command * in; - command * out; - command * limit; -} amb_cq_ptrs; - -typedef struct { - spinlock_t lock; - unsigned int pending; - unsigned int high; - unsigned int filled; - unsigned int maximum; // size - 1 (q implementation) - amb_cq_ptrs ptrs; -} amb_cq; - -typedef struct { - spinlock_t lock; - unsigned int pending; - unsigned int high; - unsigned int filled; - unsigned int maximum; // size - 1 (q implementation) - struct { - tx_in * start; - tx_in * ptr; - tx_in * limit; - } in; - struct { - tx_out * start; - tx_out * ptr; - tx_out * limit; - } out; -} amb_txq; - -typedef struct { - spinlock_t lock; - unsigned int pending; - unsigned int low; - unsigned int emptied; - unsigned int maximum; // size - 1 (q implementation) - struct { - rx_in * start; - rx_in * ptr; - rx_in * limit; - } in; - struct { - rx_out * start; - rx_out * ptr; - rx_out * limit; - } out; - unsigned int buffers_wanted; - unsigned int buffer_size; -} amb_rxq; - -typedef struct { - unsigned long tx_ok; - struct { - unsigned long ok; - unsigned long error; - unsigned long badcrc; - unsigned long toolong; - unsigned long aborted; - unsigned long unused; - } rx; -} amb_stats; - -// a single struct pointed to by atm_vcc->dev_data - -typedef struct { - u8 tx_vc_bits:7; - u8 tx_present:1; -} amb_tx_info; - -typedef struct { - unsigned char pool; -} amb_rx_info; - -typedef struct { - amb_rx_info rx_info; - u16 tx_frame_bits; - unsigned int tx_rate; - unsigned int rx_rate; -} amb_vcc; - -struct amb_dev { - u8 irq; - unsigned long flags; - u32 iobase; - u32 * membase; - - amb_cq cq; - amb_txq txq; - amb_rxq rxq[NUM_RX_POOLS]; - - struct mutex vcc_sf; - amb_tx_info txer[NUM_VCS]; - struct atm_vcc * rxer[NUM_VCS]; - unsigned int tx_avail; - unsigned int rx_avail; - - amb_stats stats; - - struct atm_dev * atm_dev; - struct pci_dev * pci_dev; - struct timer_list housekeeping; -}; - -typedef struct amb_dev amb_dev; - -#define AMB_DEV(atm_dev) ((amb_dev *) (atm_dev)->dev_data) -#define AMB_VCC(atm_vcc) ((amb_vcc *) (atm_vcc)->dev_data) - -/* rate rounding */ - -typedef enum { - round_up, - round_down, - round_nearest -} rounding; - -#endif -- cgit v1.2.3 From 737ca352569e744bf753b4522a6f91b120a734f1 Mon Sep 17 00:00:00 2001 From: Potin Lai Date: Thu, 7 Apr 2022 09:17:36 +0800 Subject: net: mdio: aspeed: move reg accessing part into separate functions Add aspeed_mdio_op() and aseed_mdio_get_data() for register accessing. aspeed_mdio_op() handles operations, write command to control register, then check and wait operations is finished (bit 31 is cleared). aseed_mdio_get_data() fetchs the result value of operation from data register. Signed-off-by: Potin Lai Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/mdio/mdio-aspeed.c | 70 +++++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index e2273588c75b..f22be2f069e9 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -39,34 +39,35 @@ struct aspeed_mdio { void __iomem *base; }; -static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad, + u16 data) { struct aspeed_mdio *ctx = bus->priv; u32 ctrl; - u32 data; - int rc; - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, - regnum); - - /* Just clause 22 for the moment */ - if (regnum & MII_ADDR_C45) - return -EOPNOTSUPP; + dev_dbg(&bus->dev, "%s: st: %u op: %u, phyad: %u, regad: %u, data: %u\n", + __func__, st, op, phyad, regad, data); ctrl = ASPEED_MDIO_CTRL_FIRE - | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) - | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ) - | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) - | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum); + | FIELD_PREP(ASPEED_MDIO_CTRL_ST, st) + | FIELD_PREP(ASPEED_MDIO_CTRL_OP, op) + | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, phyad) + | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regad) + | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data); iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); - rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, !(ctrl & ASPEED_MDIO_CTRL_FIRE), ASPEED_MDIO_INTERVAL_US, ASPEED_MDIO_TIMEOUT_US); - if (rc < 0) - return rc; +} + +static int aspeed_mdio_get_data(struct mii_bus *bus) +{ + struct aspeed_mdio *ctx = bus->priv; + int rc; + u32 data; rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, @@ -78,31 +79,36 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data); } -static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) { - struct aspeed_mdio *ctx = bus->priv; - u32 ctrl; + int rc; - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", - __func__, addr, regnum, val); + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, + regnum); /* Just clause 22 for the moment */ if (regnum & MII_ADDR_C45) return -EOPNOTSUPP; - ctrl = ASPEED_MDIO_CTRL_FIRE - | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22) - | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE) - | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr) - | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum) - | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val); + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_READ, + addr, regnum, 0); + if (rc < 0) + return rc; - iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); + return aspeed_mdio_get_data(bus); +} - return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, - !(ctrl & ASPEED_MDIO_CTRL_FIRE), - ASPEED_MDIO_INTERVAL_US, - ASPEED_MDIO_TIMEOUT_US); +static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", + __func__, addr, regnum, val); + + /* Just clause 22 for the moment */ + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE, + addr, regnum, val); } static int aspeed_mdio_probe(struct platform_device *pdev) -- cgit v1.2.3 From eb0571932314e6f4f0cb4e05c590fe78c01acbd7 Mon Sep 17 00:00:00 2001 From: Potin Lai Date: Thu, 7 Apr 2022 09:17:37 +0800 Subject: net: mdio: aspeed: Introduce read write function for c22 and c45 Add following additional functions to move out the implementation from aspeed_mdio_read() and aspeed_mdio_write(). c22: - aspeed_mdio_read_c22() - aspeed_mdio_write_c22() c45: - aspeed_mdio_read_c45() - aspeed_mdio_write_c45() Signed-off-by: Potin Lai Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/mdio/mdio-aspeed.c | 46 +++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index f22be2f069e9..5becddb56117 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -79,17 +79,10 @@ static int aspeed_mdio_get_data(struct mii_bus *bus) return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data); } -static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +static int aspeed_mdio_read_c22(struct mii_bus *bus, int addr, int regnum) { int rc; - dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, - regnum); - - /* Just clause 22 for the moment */ - if (regnum & MII_ADDR_C45) - return -EOPNOTSUPP; - rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_READ, addr, regnum, 0); if (rc < 0) @@ -98,17 +91,46 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) return aspeed_mdio_get_data(bus); } +static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE, + addr, regnum, val); +} + +static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum) +{ + /* TODO: add c45 support */ + return -EOPNOTSUPP; +} + +static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + /* TODO: add c45 support */ + return -EOPNOTSUPP; +} + +static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr, + regnum); + + if (regnum & MII_ADDR_C45) + return aspeed_mdio_read_c45(bus, addr, regnum); + + return aspeed_mdio_read_c22(bus, addr, regnum); +} + static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) { dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n", __func__, addr, regnum, val); - /* Just clause 22 for the moment */ if (regnum & MII_ADDR_C45) - return -EOPNOTSUPP; + return aspeed_mdio_write_c45(bus, addr, regnum, val); - return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE, - addr, regnum, val); + return aspeed_mdio_write_c22(bus, addr, regnum, val); } static int aspeed_mdio_probe(struct platform_device *pdev) -- cgit v1.2.3 From e6df1b4a2759d3781f17ae9cdf96bc607e20d0b8 Mon Sep 17 00:00:00 2001 From: Potin Lai Date: Thu, 7 Apr 2022 09:17:38 +0800 Subject: net: mdio: aspeed: Add c45 support Add Clause 45 support for Aspeed mdio driver. Signed-off-by: Potin Lai Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/mdio/mdio-aspeed.c | 37 ++++++++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index 5becddb56117..7aa49827196f 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -21,6 +21,10 @@ #define ASPEED_MDIO_CTRL_OP GENMASK(27, 26) #define MDIO_C22_OP_WRITE 0b01 #define MDIO_C22_OP_READ 0b10 +#define MDIO_C45_OP_ADDR 0b00 +#define MDIO_C45_OP_WRITE 0b01 +#define MDIO_C45_OP_PREAD 0b10 +#define MDIO_C45_OP_READ 0b11 #define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21) #define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16) #define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0) @@ -66,8 +70,8 @@ static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad, static int aspeed_mdio_get_data(struct mii_bus *bus) { struct aspeed_mdio *ctx = bus->priv; - int rc; u32 data; + int rc; rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, @@ -100,15 +104,37 @@ static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum, static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum) { - /* TODO: add c45 support */ - return -EOPNOTSUPP; + u8 c45_dev = (regnum >> 16) & 0x1F; + u16 c45_addr = regnum & 0xFFFF; + int rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, + addr, c45_dev, c45_addr); + if (rc < 0) + return rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ, + addr, c45_dev, 0); + if (rc < 0) + return rc; + + return aspeed_mdio_get_data(bus); } static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum, u16 val) { - /* TODO: add c45 support */ - return -EOPNOTSUPP; + u8 c45_dev = (regnum >> 16) & 0x1F; + u16 c45_addr = regnum & 0xFFFF; + int rc; + + rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, + addr, c45_dev, c45_addr); + if (rc < 0) + return rc; + + return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE, + addr, c45_dev, val); } static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum) @@ -153,6 +179,7 @@ static int aspeed_mdio_probe(struct platform_device *pdev) bus->parent = &pdev->dev; bus->read = aspeed_mdio_read; bus->write = aspeed_mdio_write; + bus->probe_capabilities = MDIOBUS_C22_C45; rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { -- cgit v1.2.3 From 58389c00d49cf4f9bee50530901834a8503240b6 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 7 Apr 2022 12:55:34 +0200 Subject: net: phy: micrel: ksz9031/ksz9131: add cabletest support Add cable test support for Micrel KSZ9x31 PHYs. Tested on i.MX8M Mini with KSZ9131RNX in 100/Full mode with pairs shuffled before magnetics: (note: Cable test started/completed messages are omitted) mx8mm-ksz9131-a-d-connected$ ethtool --cable-test eth0 Pair A code OK Pair B code Short within Pair Pair B, fault length: 0.80m Pair C code Short within Pair Pair C, fault length: 0.80m Pair D code OK mx8mm-ksz9131-a-b-connected$ ethtool --cable-test eth0 Pair A code OK Pair B code OK Pair C code Short within Pair Pair C, fault length: 0.00m Pair D code Short within Pair Pair D, fault length: 0.00m Tested on R8A77951 Salvator-XS with KSZ9031RNX and all four pairs connected: (note: Cable test started/completed messages are omitted) r8a7795-ksz9031-all-connected$ ethtool --cable-test eth0 Pair A code OK Pair B code OK Pair C code OK Pair D code OK The CTRL1000 CTL1000_ENABLE_MASTER and CTL1000_AS_MASTER bits are not restored by calling phy_init_hw(), they must be manually cached in ksz9x31_cable_test_start() and restored at the end of ksz9x31_cable_test_get_status(). Signed-off-by: Marek Vasut Cc: Heiner Kallweit Cc: Oleksij Rempel Cc: Paolo Abeni Reviewed-by: Andrew Lunn Tested-by: Claudiu Beznea Link: https://lore.kernel.org/r/20220407105534.85833-1-marex@denx.de Signed-off-by: Jakub Kicinski --- drivers/net/phy/micrel.c | 221 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fc53b71dc872..96840695debd 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -70,6 +70,27 @@ #define KSZ8081_LMD_SHORT_INDICATOR BIT(12) #define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0) +#define KSZ9x31_LMD 0x12 +#define KSZ9x31_LMD_VCT_EN BIT(15) +#define KSZ9x31_LMD_VCT_DIS_TX BIT(14) +#define KSZ9x31_LMD_VCT_PAIR(n) (((n) & 0x3) << 12) +#define KSZ9x31_LMD_VCT_SEL_RESULT 0 +#define KSZ9x31_LMD_VCT_SEL_THRES_HI BIT(10) +#define KSZ9x31_LMD_VCT_SEL_THRES_LO BIT(11) +#define KSZ9x31_LMD_VCT_SEL_MASK GENMASK(11, 10) +#define KSZ9x31_LMD_VCT_ST_NORMAL 0 +#define KSZ9x31_LMD_VCT_ST_OPEN 1 +#define KSZ9x31_LMD_VCT_ST_SHORT 2 +#define KSZ9x31_LMD_VCT_ST_FAIL 3 +#define KSZ9x31_LMD_VCT_ST_MASK GENMASK(9, 8) +#define KSZ9x31_LMD_VCT_DATA_REFLECTED_INVALID BIT(7) +#define KSZ9x31_LMD_VCT_DATA_SIG_WAIT_TOO_LONG BIT(6) +#define KSZ9x31_LMD_VCT_DATA_MASK100 BIT(5) +#define KSZ9x31_LMD_VCT_DATA_NLP_FLP BIT(4) +#define KSZ9x31_LMD_VCT_DATA_LO_PULSE_MASK GENMASK(3, 2) +#define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0) +#define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0) + /* Lan8814 general Interrupt control/status reg in GPHY specific block. */ #define LAN8814_INTC 0x18 #define LAN8814_INTS 0x1B @@ -280,6 +301,7 @@ struct kszphy_priv { struct kszphy_ptp_priv ptp_priv; const struct kszphy_type *type; int led_mode; + u16 vct_ctrl1000; bool rmii_ref_clk_sel; bool rmii_ref_clk_sel_val; u64 stats[ARRAY_SIZE(kszphy_hw_stats)]; @@ -1326,6 +1348,199 @@ static int ksz9031_read_status(struct phy_device *phydev) return 0; } +static int ksz9x31_cable_test_start(struct phy_device *phydev) +{ + struct kszphy_priv *priv = phydev->priv; + int ret; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * Prior to running the cable diagnostics, Auto-negotiation should + * be disabled, full duplex set and the link speed set to 1000Mbps + * via the Basic Control Register. + */ + ret = phy_modify(phydev, MII_BMCR, + BMCR_SPEED1000 | BMCR_FULLDPLX | + BMCR_ANENABLE | BMCR_SPEED100, + BMCR_SPEED1000 | BMCR_FULLDPLX); + if (ret) + return ret; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * The Master-Slave configuration should be set to Slave by writing + * a value of 0x1000 to the Auto-Negotiation Master Slave Control + * Register. + */ + ret = phy_read(phydev, MII_CTRL1000); + if (ret < 0) + return ret; + + /* Cache these bits, they need to be restored once LinkMD finishes. */ + priv->vct_ctrl1000 = ret & (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + ret &= ~(CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + ret |= CTL1000_ENABLE_MASTER; + + return phy_write(phydev, MII_CTRL1000, ret); +} + +static int ksz9x31_cable_test_result_trans(u16 status) +{ + switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) { + case KSZ9x31_LMD_VCT_ST_NORMAL: + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + case KSZ9x31_LMD_VCT_ST_OPEN: + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + case KSZ9x31_LMD_VCT_ST_SHORT: + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + case KSZ9x31_LMD_VCT_ST_FAIL: + fallthrough; + default: + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } +} + +static bool ksz9x31_cable_test_failed(u16 status) +{ + int stat = FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status); + + return stat == KSZ9x31_LMD_VCT_ST_FAIL; +} + +static bool ksz9x31_cable_test_fault_length_valid(u16 status) +{ + switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) { + case KSZ9x31_LMD_VCT_ST_OPEN: + fallthrough; + case KSZ9x31_LMD_VCT_ST_SHORT: + return true; + } + return false; +} + +static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat) +{ + int dt = FIELD_GET(KSZ9x31_LMD_VCT_DATA_MASK, stat); + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * + * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity + */ + if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9131) + dt = clamp(dt - 22, 0, 255); + + return (dt * 400) / 10; +} + +static int ksz9x31_cable_test_wait_for_completion(struct phy_device *phydev) +{ + int val, ret; + + ret = phy_read_poll_timeout(phydev, KSZ9x31_LMD, val, + !(val & KSZ9x31_LMD_VCT_EN), + 30000, 100000, true); + + return ret < 0 ? ret : 0; +} + +static int ksz9x31_cable_test_get_pair(int pair) +{ + static const int ethtool_pair[] = { + ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_PAIR_B, + ETHTOOL_A_CABLE_PAIR_C, + ETHTOOL_A_CABLE_PAIR_D, + }; + + return ethtool_pair[pair]; +} + +static int ksz9x31_cable_test_one_pair(struct phy_device *phydev, int pair) +{ + int ret, val; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * To test each individual cable pair, set the cable pair in the Cable + * Diagnostics Test Pair (VCT_PAIR[1:0]) field of the LinkMD Cable + * Diagnostic Register, along with setting the Cable Diagnostics Test + * Enable (VCT_EN) bit. The Cable Diagnostics Test Enable (VCT_EN) bit + * will self clear when the test is concluded. + */ + ret = phy_write(phydev, KSZ9x31_LMD, + KSZ9x31_LMD_VCT_EN | KSZ9x31_LMD_VCT_PAIR(pair)); + if (ret) + return ret; + + ret = ksz9x31_cable_test_wait_for_completion(phydev); + if (ret) + return ret; + + val = phy_read(phydev, KSZ9x31_LMD); + if (val < 0) + return val; + + if (ksz9x31_cable_test_failed(val)) + return -EAGAIN; + + ret = ethnl_cable_test_result(phydev, + ksz9x31_cable_test_get_pair(pair), + ksz9x31_cable_test_result_trans(val)); + if (ret) + return ret; + + if (!ksz9x31_cable_test_fault_length_valid(val)) + return 0; + + return ethnl_cable_test_fault_length(phydev, + ksz9x31_cable_test_get_pair(pair), + ksz9x31_cable_test_fault_length(phydev, val)); +} + +static int ksz9x31_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + struct kszphy_priv *priv = phydev->priv; + unsigned long pair_mask = 0xf; + int retries = 20; + int pair, ret, rv; + + *finished = false; + + /* Try harder if link partner is active */ + while (pair_mask && retries--) { + for_each_set_bit(pair, &pair_mask, 4) { + ret = ksz9x31_cable_test_one_pair(phydev, pair); + if (ret == -EAGAIN) + continue; + if (ret < 0) + return ret; + clear_bit(pair, &pair_mask); + } + /* If link partner is in autonegotiation mode it will send 2ms + * of FLPs with at least 6ms of silence. + * Add 2ms sleep to have better chances to hit this silence. + */ + if (pair_mask) + usleep_range(2000, 3000); + } + + /* Report remaining unfinished pair result as unknown. */ + for_each_set_bit(pair, &pair_mask, 4) { + ret = ethnl_cable_test_result(phydev, + ksz9x31_cable_test_get_pair(pair), + ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC); + } + + *finished = true; + + /* Restore cached bits from before LinkMD got started. */ + rv = phy_modify(phydev, MII_CTRL1000, + CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER, + priv->vct_ctrl1000); + if (rv) + return rv; + + return ret; +} + static int ksz8873mll_config_aneg(struct phy_device *phydev) { return 0; @@ -2806,6 +3021,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ9031 Gigabit PHY", + .flags = PHY_POLL_CABLE_TEST, .driver_data = &ksz9021_type, .probe = kszphy_probe, .get_features = ksz9031_get_features, @@ -2819,6 +3035,8 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = kszphy_suspend, .resume = kszphy_resume, + .cable_test_start = ksz9x31_cable_test_start, + .cable_test_get_status = ksz9x31_cable_test_get_status, }, { .phy_id = PHY_ID_LAN8814, .phy_id_mask = MICREL_PHY_ID_MASK, @@ -2853,6 +3071,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9131 Gigabit PHY", /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9131_config_init, @@ -2863,6 +3082,8 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = kszphy_suspend, .resume = kszphy_resume, + .cable_test_start = ksz9x31_cable_test_start, + .cable_test_get_status = ksz9x31_cable_test_get_status, }, { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = MICREL_PHY_ID_MASK, -- cgit v1.2.3 From bd4a2697e5e27a33d345827dfbdebb8f28f4aa87 Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Thu, 7 Apr 2022 16:24:02 +0100 Subject: sfc: use hardware tx timestamps for more than PTP The 8000 series and newer NICs all get hardware timestamps from the MAC and can provide timestamps on a normal TX queue, rather than via a slow path through the MC. As such we can use this path for any packet where a hardware timestamp is requested. This also enables support for PTP over transports other than IPv4+UDP. Signed-off-by: Bert Kenward Signed-off-by: Edward Cree Link: https://lore.kernel.org/r/510652dc-54b4-0e11-657e-e37ee3ca26a9@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/tx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 6983799e1c05..138bca611341 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -527,7 +527,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, /* PTP "event" packet */ if (unlikely(efx_xmit_with_hwtstamp(skb)) && - unlikely(efx_ptp_is_ptp_tx(efx, skb))) { + ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || + unlikely(efx_ptp_is_ptp_tx(efx, skb)))) { /* There may be existing transmits on the channel that are * waiting for this packet to trigger the doorbell write. * We need to send the packets at this point. -- cgit v1.2.3 From 626a5aaa50673a1340ef46e049929709c83d184a Mon Sep 17 00:00:00 2001 From: Colin Foster Date: Thu, 7 Apr 2022 16:44:45 -0700 Subject: net: mdio: mscc-miim: add local dev variable to cleanup probe function Create a local device *dev in order to not dereference the platform_device several times throughout the probe function. Signed-off-by: Colin Foster Reviewed-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/mdio/mdio-mscc-miim.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 85b4b097da05..08541007b18a 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -272,6 +272,7 @@ static int mscc_miim_probe(struct platform_device *pdev) { struct regmap *mii_regmap, *phy_regmap = NULL; struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; void __iomem *regs, *phy_regs; struct mscc_miim_dev *miim; struct resource *res; @@ -280,57 +281,55 @@ static int mscc_miim_probe(struct platform_device *pdev) regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(regs)) { - dev_err(&pdev->dev, "Unable to map MIIM registers\n"); + dev_err(dev, "Unable to map MIIM registers\n"); return PTR_ERR(regs); } - mii_regmap = devm_regmap_init_mmio(&pdev->dev, regs, - &mscc_miim_regmap_config); + mii_regmap = devm_regmap_init_mmio(dev, regs, &mscc_miim_regmap_config); if (IS_ERR(mii_regmap)) { - dev_err(&pdev->dev, "Unable to create MIIM regmap\n"); + dev_err(dev, "Unable to create MIIM regmap\n"); return PTR_ERR(mii_regmap); } /* This resource is optional */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { - phy_regs = devm_ioremap_resource(&pdev->dev, res); + phy_regs = devm_ioremap_resource(dev, res); if (IS_ERR(phy_regs)) { - dev_err(&pdev->dev, "Unable to map internal phy registers\n"); + dev_err(dev, "Unable to map internal phy registers\n"); return PTR_ERR(phy_regs); } - phy_regmap = devm_regmap_init_mmio(&pdev->dev, phy_regs, + phy_regmap = devm_regmap_init_mmio(dev, phy_regs, &mscc_miim_phy_regmap_config); if (IS_ERR(phy_regmap)) { - dev_err(&pdev->dev, "Unable to create phy register regmap\n"); + dev_err(dev, "Unable to create phy register regmap\n"); return PTR_ERR(phy_regmap); } } - ret = mscc_miim_setup(&pdev->dev, &bus, "mscc_miim", mii_regmap, 0); + ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0); if (ret < 0) { - dev_err(&pdev->dev, "Unable to setup the MDIO bus\n"); + dev_err(dev, "Unable to setup the MDIO bus\n"); return ret; } miim = bus->priv; miim->phy_regs = phy_regmap; - miim->info = device_get_match_data(&pdev->dev); + miim->info = device_get_match_data(dev); if (!miim->info) return -EINVAL; - miim->clk = devm_clk_get_optional(&pdev->dev, NULL); + miim->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(miim->clk)) return PTR_ERR(miim->clk); of_property_read_u32(np, "clock-frequency", &miim->bus_freq); if (miim->bus_freq && !miim->clk) { - dev_err(&pdev->dev, - "cannot use clock-frequency without a clock\n"); + dev_err(dev, "cannot use clock-frequency without a clock\n"); return -EINVAL; } @@ -344,7 +343,7 @@ static int mscc_miim_probe(struct platform_device *pdev) ret = of_mdiobus_register(bus, np); if (ret < 0) { - dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); + dev_err(dev, "Cannot register MDIO bus (%d)\n", ret); goto out_disable_clk; } -- cgit v1.2.3 From 2fa33b3518a8da0a5345b7ae0064223b5e4e156f Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:36 +0300 Subject: net/mlx5_fpga: Drop INNOVA IPsec support Mellanox INNOVA IPsec cards are EOL in Nov, 2019 [1]. As such, the code is unmaintained, untested and not in-use by any upstream/distro oriented customers. In order to reduce code complexity, drop the kernel code. [1] https://network.nvidia.com/related-docs/eol/LCR-000535.pdf Link: https://lore.kernel.org/r/2afe88ec5020a491079eacf6fe3c89b64d65195c.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 14 +- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 1 - .../net/ethernet/mellanox/mlx5/core/accel/ipsec.c | 6 +- .../net/ethernet/mellanox/mlx5/core/en/params.c | 7 - .../mellanox/mlx5/core/en_accel/ipsec_stats.c | 17 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 - drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 54 +- .../net/ethernet/mellanox/mlx5/core/fpga/core.h | 2 - .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | 1582 -------------------- .../net/ethernet/mellanox/mlx5/core/fpga/ipsec.h | 62 - drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 9 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 - include/linux/mlx5/mlx5_ifc_fpga.h | 148 -- 13 files changed, 5 insertions(+), 1906 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 0c82b376416b..e34e64a9ff4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -22,7 +22,6 @@ config MLX5_ACCEL config MLX5_FPGA bool "Mellanox Technologies Innova support" depends on MLX5_CORE - select MLX5_ACCEL help Build support for the Innova family of network cards by Mellanox Technologies. Innova network cards are comprised of a ConnectX chip @@ -143,17 +142,6 @@ config MLX5_CORE_IPOIB help MLX5 IPoIB offloads & acceleration support. -config MLX5_FPGA_IPSEC - bool "Mellanox Technologies IPsec Innova support" - depends on MLX5_CORE - depends on MLX5_FPGA - help - Build IPsec support for the Innova family of network cards by Mellanox - Technologies. Innova network cards are comprised of a ConnectX chip - and an FPGA chip on one board. If you select this option, the - mlx5_core driver will include the Innova FPGA core and allow building - sandbox-specific client drivers. - config MLX5_IPSEC bool "Mellanox Technologies IPsec Connect-X support" depends on MLX5_CORE_EN @@ -171,7 +159,7 @@ config MLX5_EN_IPSEC depends on MLX5_CORE_EN depends on XFRM_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - depends on MLX5_FPGA_IPSEC || MLX5_IPSEC + depends on MLX5_IPSEC help Build support for IPsec cryptography-offload acceleration in the NIC. Note: Support for hardware with this capability needs to be selected diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 44ff1623707a..e50361656305 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -89,7 +89,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # Accelerations & FPGA # mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o -mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index 09f5ce97af46..45296ec2d055 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -35,7 +35,6 @@ #include "accel/ipsec.h" #include "mlx5_core.h" -#include "fpga/ipsec.h" #include "accel/ipsec_offload.h" void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) @@ -43,10 +42,7 @@ void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) const struct mlx5_accel_ipsec_ops *ipsec_ops; int err = 0; - ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ? - mlx5_ipsec_offload_ops(mdev) : - mlx5_fpga_ipsec_ops(mdev); - + ipsec_ops = mlx5_ipsec_offload_ops(mdev); if (!ipsec_ops || !ipsec_ops->init) { mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 7f76c4f9389b..ebb12817b795 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -6,7 +6,6 @@ #include "en/port.h" #include "en_accel/en_accel.h" #include "accel/ipsec.h" -#include "fpga/ipsec.h" static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) @@ -327,9 +326,6 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) return false; - if (mlx5_fpga_is_ipsec_device(mdev)) - return false; - if (params->xdp_prog) { /* XSK params are not considered here. If striding RQ is in use, * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will @@ -423,9 +419,6 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, int max_mtu; int i; - if (mlx5_fpga_is_ipsec_device(mdev)) - byte_count += MLX5E_METADATA_ETHER_LEN; - if (mlx5e_rx_is_linear_skb(params, xsk)) { int frag_stride; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 5cb936541b9e..1607c305d3ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -38,7 +38,6 @@ #include "accel/ipsec.h" #include "fpga/sdk.h" #include "en_accel/ipsec.h" -#include "fpga/ipsec.h" static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) }, @@ -105,7 +104,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw) { - return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0; + return 0; } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) @@ -121,25 +120,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw) { - unsigned int i; - - if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) - for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - mlx5e_ipsec_hw_stats_desc[i].format); - return idx; } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw) { - int i; - - if (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) - for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->ipsec->stats, - mlx5e_ipsec_hw_stats_desc, - i); return idx; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 89a85030b0eb..0a303879d0f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -67,7 +67,6 @@ #include "en/ptp.h" #include "qos.h" #include "en/trap.h" -#include "fpga/ipsec.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@ -4467,12 +4466,6 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return -EINVAL; } - if (mlx5_fpga_is_ipsec_device(priv->mdev)) { - netdev_warn(netdev, - "XDP is not available on Innova cards with IPsec support\n"); - return -EINVAL; - } - new_params = priv->channels.params; new_params.xdp_prog = prog; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 84cebf4c5ada..a180c80e9f68 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -49,7 +49,6 @@ #include "en/rep/tc.h" #include "ipoib/ipoib.h" #include "accel/ipsec.h" -#include "fpga/ipsec.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/ktls_txrx.h" #include "en/xdp.h" @@ -2384,46 +2383,6 @@ const struct mlx5e_rx_handlers mlx5i_rx_handlers = { }; #endif /* CONFIG_MLX5_CORE_IPOIB */ -#ifdef CONFIG_MLX5_EN_IPSEC - -static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) -{ - struct mlx5_wq_cyc *wq = &rq->wqe.wq; - struct mlx5e_wqe_frag_info *wi; - struct sk_buff *skb; - u32 cqe_bcnt; - u16 ci; - - ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); - wi = get_frag(rq, ci); - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - - if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { - rq->stats->wqe_err++; - goto wq_free_wqe; - } - - skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, - mlx5e_skb_from_cqe_linear, - mlx5e_skb_from_cqe_nonlinear, - rq, cqe, wi, cqe_bcnt); - if (unlikely(!skb)) /* a DROP, save the page-reuse checks */ - goto wq_free_wqe; - - skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); - if (unlikely(!skb)) - goto wq_free_wqe; - - mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); - napi_gro_receive(rq->cq.napi, skb); - -wq_free_wqe: - mlx5e_free_rx_wqe(rq, wi, true); - mlx5_wq_cyc_pop(wq); -} - -#endif /* CONFIG_MLX5_EN_IPSEC */ - int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) { struct net_device *netdev = rq->netdev; @@ -2440,10 +2399,6 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - if (mlx5_fpga_is_ipsec_device(mdev)) { - netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); - return -EINVAL; - } if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; if (!rq->handle_rx_cqe) { @@ -2467,14 +2422,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool mlx5e_skb_from_cqe_nonlinear; rq->post_wqes = mlx5e_post_rx_wqes; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; - -#ifdef CONFIG_MLX5_EN_IPSEC - if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && - priv->ipsec) - rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; - else -#endif - rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; if (!rq->handle_rx_cqe) { netdev_err(netdev, "RX handler of RQ is not set\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h index e9e72d260681..750c32050165 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -57,8 +57,6 @@ struct mlx5_fpga_device { u32 mkey; struct mlx5_uars_page *uar; } conn_res; - - struct mlx5_fpga_ipsec *ipsec; }; #define mlx5_fpga_dbg(__adev, format, ...) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c deleted file mode 100644 index 8ec148010d62..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ /dev/null @@ -1,1582 +0,0 @@ -/* - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include -#include -#include -#include -#include - -#include "mlx5_core.h" -#include "fs_cmd.h" -#include "fpga/ipsec.h" -#include "fpga/sdk.h" -#include "fpga/core.h" - -enum mlx5_fpga_ipsec_cmd_status { - MLX5_FPGA_IPSEC_CMD_PENDING, - MLX5_FPGA_IPSEC_CMD_SEND_FAIL, - MLX5_FPGA_IPSEC_CMD_COMPLETE, -}; - -struct mlx5_fpga_ipsec_cmd_context { - struct mlx5_fpga_dma_buf buf; - enum mlx5_fpga_ipsec_cmd_status status; - struct mlx5_ifc_fpga_ipsec_cmd_resp resp; - int status_code; - struct completion complete; - struct mlx5_fpga_device *dev; - struct list_head list; /* Item in pending_cmds */ - u8 command[]; -}; - -struct mlx5_fpga_esp_xfrm; - -struct mlx5_fpga_ipsec_sa_ctx { - struct rhash_head hash; - struct mlx5_ifc_fpga_ipsec_sa hw_sa; - u32 sa_handle; - struct mlx5_core_dev *dev; - struct mlx5_fpga_esp_xfrm *fpga_xfrm; -}; - -struct mlx5_fpga_esp_xfrm { - unsigned int num_rules; - struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; - struct mutex lock; /* xfrm lock */ - struct mlx5_accel_esp_xfrm accel_xfrm; -}; - -struct mlx5_fpga_ipsec_rule { - struct rb_node node; - struct fs_fte *fte; - struct mlx5_fpga_ipsec_sa_ctx *ctx; -}; - -static const struct rhashtable_params rhash_sa = { - /* Keep out "cmd" field from the key as it's - * value is not constant during the lifetime - * of the key object. - */ - .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - - sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), - .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + - sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), - .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), - .automatic_shrinking = true, - .min_size = 1, -}; - -struct mlx5_fpga_ipsec { - struct mlx5_fpga_device *fdev; - struct list_head pending_cmds; - spinlock_t pending_cmds_lock; /* Protects pending_cmds */ - u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)]; - struct mlx5_fpga_conn *conn; - - struct notifier_block fs_notifier_ingress_bypass; - struct notifier_block fs_notifier_egress; - - /* Map hardware SA --> SA context - * (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx) - * We will use this hash to avoid SAs duplication in fpga which - * aren't allowed - */ - struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */ - struct mutex sa_hash_lock; - - /* Tree holding all rules for this fpga device - * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id) - */ - struct rb_root rules_rb; - struct mutex rules_rb_lock; /* rules lock */ - - struct ida halloc; -}; - -bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) -{ - if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) - return false; - - if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) != - MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX) - return false; - - if (MLX5_CAP_FPGA(mdev, sandbox_product_id) != - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC) - return false; - - return true; -} - -static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn, - struct mlx5_fpga_device *fdev, - struct mlx5_fpga_dma_buf *buf, - u8 status) -{ - struct mlx5_fpga_ipsec_cmd_context *context; - - if (status) { - context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context, - buf); - mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n", - status); - context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL; - complete(&context->complete); - } -} - -static inline -int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome) -{ - switch (syndrome) { - case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS: - return 0; - case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE: - return -EEXIST; - case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST: - return -EINVAL; - case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE: - return -EIO; - } - return -EIO; -} - -static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) -{ - struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data; - struct mlx5_fpga_ipsec_cmd_context *context; - enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome; - struct mlx5_fpga_device *fdev = cb_arg; - unsigned long flags; - - if (buf->sg[0].size < sizeof(*resp)) { - mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n", - buf->sg[0].size, sizeof(*resp)); - return; - } - - mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n", - ntohl(resp->syndrome)); - - spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); - context = list_first_entry_or_null(&fdev->ipsec->pending_cmds, - struct mlx5_fpga_ipsec_cmd_context, - list); - if (context) - list_del(&context->list); - spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); - - if (!context) { - mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n"); - return; - } - mlx5_fpga_dbg(fdev, "Handling response for %p\n", context); - - syndrome = ntohl(resp->syndrome); - context->status_code = syndrome_to_errno(syndrome); - context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE; - memcpy(&context->resp, resp, sizeof(*resp)); - - if (context->status_code) - mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n", - syndrome); - - complete(&context->complete); -} - -static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, - const void *cmd, int cmd_size) -{ - struct mlx5_fpga_ipsec_cmd_context *context; - struct mlx5_fpga_device *fdev = mdev->fpga; - unsigned long flags; - int res; - - if (!fdev || !fdev->ipsec) - return ERR_PTR(-EOPNOTSUPP); - - if (cmd_size & 3) - return ERR_PTR(-EINVAL); - - context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC); - if (!context) - return ERR_PTR(-ENOMEM); - - context->status = MLX5_FPGA_IPSEC_CMD_PENDING; - context->dev = fdev; - context->buf.complete = mlx5_fpga_ipsec_send_complete; - init_completion(&context->complete); - memcpy(&context->command, cmd, cmd_size); - context->buf.sg[0].size = cmd_size; - context->buf.sg[0].data = &context->command; - - spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); - res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); - if (!res) - list_add_tail(&context->list, &fdev->ipsec->pending_cmds); - spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); - - if (res) { - mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res); - kfree(context); - return ERR_PTR(res); - } - - /* Context should be freed by the caller after completion. */ - return context; -} - -static int mlx5_fpga_ipsec_cmd_wait(void *ctx) -{ - struct mlx5_fpga_ipsec_cmd_context *context = ctx; - unsigned long timeout = - msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC); - int res; - - res = wait_for_completion_timeout(&context->complete, timeout); - if (!res) { - mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n"); - return -ETIMEDOUT; - } - - if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE) - res = context->status_code; - else - res = -EIO; - - return res; -} - -static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec) -{ - if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command)) - return true; - return false; -} - -static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev, - struct mlx5_ifc_fpga_ipsec_sa *hw_sa, - int opcode) -{ - struct mlx5_core_dev *dev = fdev->mdev; - struct mlx5_ifc_fpga_ipsec_sa *sa; - struct mlx5_fpga_ipsec_cmd_context *cmd_context; - size_t sa_cmd_size; - int err; - - hw_sa->ipsec_sa_v1.cmd = htonl(opcode); - if (is_v2_sadb_supported(fdev->ipsec)) - sa_cmd_size = sizeof(*hw_sa); - else - sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1); - - cmd_context = (struct mlx5_fpga_ipsec_cmd_context *) - mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size); - if (IS_ERR(cmd_context)) - return PTR_ERR(cmd_context); - - err = mlx5_fpga_ipsec_cmd_wait(cmd_context); - if (err) - goto out; - - sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command; - if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) { - mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", - ntohl(sa->ipsec_sa_v1.sw_sa_handle), - ntohl(cmd_context->resp.sw_sa_handle)); - err = -EIO; - } - -out: - kfree(cmd_context); - return err; -} - -u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - u32 ret = 0; - - if (mlx5_fpga_is_ipsec_device(mdev)) { - ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE; - ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA; - } else { - return ret; - } - - if (!fdev->ipsec) - return ret; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp)) - ret |= MLX5_ACCEL_IPSEC_CAP_ESP; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6)) - ret |= MLX5_ACCEL_IPSEC_CAP_IPV6; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso)) - ret |= MLX5_ACCEL_IPSEC_CAP_LSO; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer)) - ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER; - - if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) { - ret |= MLX5_ACCEL_IPSEC_CAP_ESN; - ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; - } - - return ret; -} - -static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - - if (!fdev || !fdev->ipsec) - return 0; - - return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, - number_of_ipsec_counters); -} - -static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int counters_count) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - unsigned int i; - __be32 *data; - u32 count; - u64 addr; - int ret; - - if (!fdev || !fdev->ipsec) - return 0; - - addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, - ipsec_counters_addr_low) + - ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, - ipsec_counters_addr_high) << 32); - - count = mlx5_fpga_ipsec_counters_count(mdev); - - data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL); - if (!data) { - ret = -ENOMEM; - goto out; - } - - ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data, - MLX5_FPGA_ACCESS_TYPE_DONTCARE); - if (ret < 0) { - mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n", - ret); - goto out; - } - ret = 0; - - if (count > counters_count) - count = counters_count; - - /* Each counter is low word, then high. But each word is big-endian */ - for (i = 0; i < count; i++) - counters[i] = (u64)ntohl(data[i * 2]) | - ((u64)ntohl(data[i * 2 + 1]) << 32); - -out: - kfree(data); - return ret; -} - -static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) -{ - struct mlx5_fpga_ipsec_cmd_context *context; - struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0}; - int err; - - cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); - cmd.flags = htonl(flags); - context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); - if (IS_ERR(context)) - return PTR_ERR(context); - - err = mlx5_fpga_ipsec_cmd_wait(context); - if (err) - goto out; - - if ((context->resp.flags & cmd.flags) != cmd.flags) { - mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n", - cmd.flags, - context->resp.flags); - err = -EIO; - } - -out: - kfree(context); - return err; -} - -static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev) -{ - u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev); - u32 flags = 0; - - if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER) - flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER; - - return mlx5_fpga_ipsec_set_caps(mdev, flags); -} - -static void -mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, - struct mlx5_ifc_fpga_ipsec_sa *hw_sa) -{ - const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; - - /* key */ - memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key, - aes_gcm->key_len / 8); - /* Duplicate 128 bit key twice according to HW layout */ - if (aes_gcm->key_len == 128) - memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], - aes_gcm->aes_key, aes_gcm->key_len / 8); - - /* salt and seq_iv */ - memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv, - sizeof(aes_gcm->seq_iv)); - memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt, - sizeof(aes_gcm->salt)); - - /* esn */ - if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN; - hw_sa->ipsec_sa_v1.flags |= - (xfrm_attrs->flags & - MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? - MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; - hw_sa->esn = htonl(xfrm_attrs->esn); - } else { - hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN; - hw_sa->ipsec_sa_v1.flags &= - ~(xfrm_attrs->flags & - MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ? - MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0; - hw_sa->esn = 0; - } - - /* rx handle */ - hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle); - - /* enc mode */ - switch (aes_gcm->key_len) { - case 128: - hw_sa->ipsec_sa_v1.enc_mode = - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128; - break; - case 256: - hw_sa->ipsec_sa_v1.enc_mode = - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128; - break; - } - - /* flags */ - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID | - MLX5_FPGA_IPSEC_SA_SPI_EN | - MLX5_FPGA_IPSEC_SA_IP_ESP; - - if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT) - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX; - else - hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX; -} - -static void -mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs, - const __be32 saddr[4], - const __be32 daddr[4], - const __be32 spi, bool is_ipv6, - struct mlx5_ifc_fpga_ipsec_sa *hw_sa) -{ - mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa); - - /* IPs */ - memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip)); - memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip)); - - /* SPI */ - hw_sa->ipsec_sa_v1.spi = spi; - - /* flags */ - if (is_ipv6) - hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6; -} - -static bool is_full_mask(const void *p, size_t len) -{ - WARN_ON(len % 4); - - return !memchr_inv(p, 0xff, len); -} - -static bool validate_fpga_full_mask(struct mlx5_core_dev *dev, - const u32 *match_c, - const u32 *match_v) -{ - const void *misc_params_c = MLX5_ADDR_OF(fte_match_param, - match_c, - misc_parameters); - const void *headers_c = MLX5_ADDR_OF(fte_match_param, - match_c, - outer_headers); - const void *headers_v = MLX5_ADDR_OF(fte_match_param, - match_v, - outer_headers); - - if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) { - const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, - headers_c, - src_ipv4_src_ipv6.ipv4_layout.ipv4); - const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, - headers_c, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - - if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, - ipv4)) || - !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout, - ipv4))) - return false; - } else { - const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, - headers_c, - src_ipv4_src_ipv6.ipv6_layout.ipv6); - const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4, - headers_c, - dst_ipv4_dst_ipv6.ipv6_layout.ipv6); - - if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, - ipv6)) || - !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout, - ipv6))) - return false; - } - - if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, - outer_esp_spi), - MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi))) - return false; - - return true; -} - -static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev, - u8 match_criteria_enable, - const u32 *match_c, - const u32 *match_v) -{ - u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev); - bool ipv6_flow; - - ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v); - - if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) || - mlx5_fs_is_outer_udp_flow(match_c, match_v) || - mlx5_fs_is_outer_tcp_flow(match_c, match_v) || - mlx5_fs_is_vxlan_flow(match_c) || - !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) || - ipv6_flow)) - return false; - - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE)) - return false; - - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) && - mlx5_fs_is_outer_ipsec_flow(match_c)) - return false; - - if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) && - ipv6_flow) - return false; - - if (!validate_fpga_full_mask(dev, match_c, match_v)) - return false; - - return true; -} - -static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, - u8 match_criteria_enable, - const u32 *match_c, - const u32 *match_v, - struct mlx5_flow_act *flow_act, - struct mlx5_flow_context *flow_context) -{ - const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c, - outer_headers); - bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) || - MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0); - bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) || - MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0); - int ret; - - ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c, - match_v); - if (!ret) - return ret; - - if (is_dmac || is_smac || - (match_criteria_enable & - ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) || - (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) || - (flow_context->flags & FLOW_CONTEXT_HAS_TAG)) - return false; - - return true; -} - -static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *accel_xfrm, - const __be32 saddr[4], const __be32 daddr[4], - const __be32 spi, bool is_ipv6, u32 *sa_handle) -{ - struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; - struct mlx5_fpga_esp_xfrm *fpga_xfrm = - container_of(accel_xfrm, typeof(*fpga_xfrm), - accel_xfrm); - struct mlx5_fpga_device *fdev = mdev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - int opcode, err; - void *context; - - /* alloc SA */ - sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); - if (!sa_ctx) - return ERR_PTR(-ENOMEM); - - sa_ctx->dev = mdev; - - /* build candidate SA */ - mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs, - saddr, daddr, spi, is_ipv6, - &sa_ctx->hw_sa); - - mutex_lock(&fpga_xfrm->lock); - - if (fpga_xfrm->sa_ctx) { /* multiple rules for same accel_xfrm */ - /* all rules must be with same IPs and SPI */ - if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa, - sizeof(sa_ctx->hw_sa))) { - context = ERR_PTR(-EINVAL); - goto exists; - } - - ++fpga_xfrm->num_rules; - context = fpga_xfrm->sa_ctx; - goto exists; - } - - if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) { - err = ida_alloc_min(&fipsec->halloc, 1, GFP_KERNEL); - if (err < 0) { - context = ERR_PTR(err); - goto exists; - } - - sa_ctx->sa_handle = err; - if (sa_handle) - *sa_handle = sa_ctx->sa_handle; - } - /* This is unbounded fpga_xfrm, try to add to hash */ - mutex_lock(&fipsec->sa_hash_lock); - - err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash, - rhash_sa); - if (err) { - /* Can't bound different accel_xfrm to already existing sa_ctx. - * This is because we can't support multiple ketmats for - * same IPs and SPI - */ - context = ERR_PTR(-EEXIST); - goto unlock_hash; - } - - /* Bound accel_xfrm to sa_ctx */ - opcode = is_v2_sadb_supported(fdev->ipsec) ? - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 : - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA; - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); - sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; - if (err) { - context = ERR_PTR(err); - goto delete_hash; - } - - mutex_unlock(&fipsec->sa_hash_lock); - - ++fpga_xfrm->num_rules; - fpga_xfrm->sa_ctx = sa_ctx; - sa_ctx->fpga_xfrm = fpga_xfrm; - - mutex_unlock(&fpga_xfrm->lock); - - return sa_ctx; - -delete_hash: - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, - rhash_sa)); -unlock_hash: - mutex_unlock(&fipsec->sa_hash_lock); - if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) - ida_free(&fipsec->halloc, sa_ctx->sa_handle); -exists: - mutex_unlock(&fpga_xfrm->lock); - kfree(sa_ctx); - return context; -} - -static void * -mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev, - struct fs_fte *fte, - bool is_egress) -{ - struct mlx5_accel_esp_xfrm *accel_xfrm; - __be32 saddr[4], daddr[4], spi; - struct mlx5_flow_group *fg; - bool is_ipv6 = false; - - fs_get_obj(fg, fte->node.parent); - /* validate */ - if (is_egress && - !mlx5_is_fpga_egress_ipsec_rule(mdev, - fg->mask.match_criteria_enable, - fg->mask.match_criteria, - fte->val, - &fte->action, - &fte->flow_context)) - return ERR_PTR(-EINVAL); - else if (!mlx5_is_fpga_ipsec_rule(mdev, - fg->mask.match_criteria_enable, - fg->mask.match_criteria, - fte->val)) - return ERR_PTR(-EINVAL); - - /* get xfrm context */ - accel_xfrm = - (struct mlx5_accel_esp_xfrm *)fte->action.esp_id; - - /* IPs */ - if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria, - fte->val)) { - memcpy(&saddr[3], - MLX5_ADDR_OF(fte_match_set_lyr_2_4, - fte->val, - src_ipv4_src_ipv6.ipv4_layout.ipv4), - sizeof(saddr[3])); - memcpy(&daddr[3], - MLX5_ADDR_OF(fte_match_set_lyr_2_4, - fte->val, - dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - sizeof(daddr[3])); - } else { - memcpy(saddr, - MLX5_ADDR_OF(fte_match_param, - fte->val, - outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), - sizeof(saddr)); - memcpy(daddr, - MLX5_ADDR_OF(fte_match_param, - fte->val, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - sizeof(daddr)); - is_ipv6 = true; - } - - /* SPI */ - spi = MLX5_GET_BE(typeof(spi), - fte_match_param, fte->val, - misc_parameters.outer_esp_spi); - - /* create */ - return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm, - saddr, daddr, - spi, is_ipv6, NULL); -} - -static void -mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) -{ - struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - int opcode = is_v2_sadb_supported(fdev->ipsec) ? - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 : - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA; - int err; - - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode); - sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; - if (err) { - WARN_ON(err); - return; - } - - if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action == - MLX5_ACCEL_ESP_ACTION_DECRYPT) - ida_free(&fipsec->halloc, sa_ctx->sa_handle); - - mutex_lock(&fipsec->sa_hash_lock); - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash, - rhash_sa)); - mutex_unlock(&fipsec->sa_hash_lock); -} - -static void mlx5_fpga_ipsec_delete_sa_ctx(void *context) -{ - struct mlx5_fpga_esp_xfrm *fpga_xfrm = - ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm; - - mutex_lock(&fpga_xfrm->lock); - if (!--fpga_xfrm->num_rules) { - mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); - kfree(fpga_xfrm->sa_ctx); - fpga_xfrm->sa_ctx = NULL; - } - mutex_unlock(&fpga_xfrm->lock); -} - -static inline struct mlx5_fpga_ipsec_rule * -_rule_search(struct rb_root *root, struct fs_fte *fte) -{ - struct rb_node *node = root->rb_node; - - while (node) { - struct mlx5_fpga_ipsec_rule *rule = - container_of(node, struct mlx5_fpga_ipsec_rule, - node); - - if (rule->fte < fte) - node = node->rb_left; - else if (rule->fte > fte) - node = node->rb_right; - else - return rule; - } - return NULL; -} - -static struct mlx5_fpga_ipsec_rule * -rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte) -{ - struct mlx5_fpga_ipsec_rule *rule; - - mutex_lock(&ipsec_dev->rules_rb_lock); - rule = _rule_search(&ipsec_dev->rules_rb, fte); - mutex_unlock(&ipsec_dev->rules_rb_lock); - - return rule; -} - -static inline int _rule_insert(struct rb_root *root, - struct mlx5_fpga_ipsec_rule *rule) -{ - struct rb_node **new = &root->rb_node, *parent = NULL; - - /* Figure out where to put new node */ - while (*new) { - struct mlx5_fpga_ipsec_rule *this = - container_of(*new, struct mlx5_fpga_ipsec_rule, - node); - - parent = *new; - if (rule->fte < this->fte) - new = &((*new)->rb_left); - else if (rule->fte > this->fte) - new = &((*new)->rb_right); - else - return -EEXIST; - } - - /* Add new node and rebalance tree. */ - rb_link_node(&rule->node, parent, new); - rb_insert_color(&rule->node, root); - - return 0; -} - -static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev, - struct mlx5_fpga_ipsec_rule *rule) -{ - int ret; - - mutex_lock(&ipsec_dev->rules_rb_lock); - ret = _rule_insert(&ipsec_dev->rules_rb, rule); - mutex_unlock(&ipsec_dev->rules_rb_lock); - - return ret; -} - -static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, - struct mlx5_fpga_ipsec_rule *rule) -{ - struct rb_root *root = &ipsec_dev->rules_rb; - - mutex_lock(&ipsec_dev->rules_rb_lock); - rb_erase(&rule->node, root); - mutex_unlock(&ipsec_dev->rules_rb_lock); -} - -static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev, - struct mlx5_fpga_ipsec_rule *rule) -{ - _rule_delete(ipsec_dev, rule); - kfree(rule); -} - -struct mailbox_mod { - uintptr_t saved_esp_id; - u32 saved_action; - u32 saved_outer_esp_spi_value; -}; - -static void restore_spec_mailbox(struct fs_fte *fte, - struct mailbox_mod *mbox_mod) -{ - char *misc_params_v = MLX5_ADDR_OF(fte_match_param, - fte->val, - misc_parameters); - - MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, - mbox_mod->saved_outer_esp_spi_value); - fte->action.action |= mbox_mod->saved_action; - fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id; -} - -static void modify_spec_mailbox(struct mlx5_core_dev *mdev, - struct fs_fte *fte, - struct mailbox_mod *mbox_mod) -{ - char *misc_params_v = MLX5_ADDR_OF(fte_match_param, - fte->val, - misc_parameters); - - mbox_mod->saved_esp_id = fte->action.esp_id; - mbox_mod->saved_action = fte->action.action & - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT); - mbox_mod->saved_outer_esp_spi_value = - MLX5_GET(fte_match_set_misc, misc_params_v, - outer_esp_spi); - - fte->action.esp_id = 0; - fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT); - if (!MLX5_CAP_FLOWTABLE(mdev, - flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) - MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0); -} - -static enum fs_flow_table_type egress_to_fs_ft(bool egress) -{ - return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX; -} - -static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - u32 *in, - struct mlx5_flow_group *fg, - bool is_egress) -{ - int (*create_flow_group)(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, u32 *in, - struct mlx5_flow_group *fg) = - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group; - char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in, - match_criteria.misc_parameters); - struct mlx5_core_dev *dev = ns->dev; - u32 saved_outer_esp_spi_mask; - u8 match_criteria_enable; - int ret; - - if (MLX5_CAP_FLOWTABLE(dev, - flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) - return create_flow_group(ns, ft, in, fg); - - match_criteria_enable = - MLX5_GET(create_flow_group_in, in, match_criteria_enable); - saved_outer_esp_spi_mask = - MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); - if (!match_criteria_enable || !saved_outer_esp_spi_mask) - return create_flow_group(ns, ft, in, fg); - - MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0); - - if (!(*misc_params_c) && - !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1)) - MLX5_SET(create_flow_group_in, in, match_criteria_enable, - match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS); - - ret = create_flow_group(ns, ft, in, fg); - - MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask); - MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable); - - return ret; -} - -static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - struct fs_fte *fte, - bool is_egress) -{ - int (*create_fte)(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - struct fs_fte *fte) = - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte; - struct mlx5_core_dev *dev = ns->dev; - struct mlx5_fpga_device *fdev = dev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - struct mlx5_fpga_ipsec_rule *rule; - bool is_esp = fte->action.esp_id; - struct mailbox_mod mbox_mod; - int ret; - - if (!is_esp || - !(fte->action.action & - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return create_fte(ns, ft, fg, fte); - - rule = kzalloc(sizeof(*rule), GFP_KERNEL); - if (!rule) - return -ENOMEM; - - rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress); - if (IS_ERR(rule->ctx)) { - int err = PTR_ERR(rule->ctx); - - kfree(rule); - return err; - } - - rule->fte = fte; - WARN_ON(rule_insert(fipsec, rule)); - - modify_spec_mailbox(dev, fte, &mbox_mod); - ret = create_fte(ns, ft, fg, fte); - restore_spec_mailbox(fte, &mbox_mod); - if (ret) { - _rule_delete(fipsec, rule); - mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); - kfree(rule); - } - - return ret; -} - -static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - int modify_mask, - struct fs_fte *fte, - bool is_egress) -{ - int (*update_fte)(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - int modify_mask, - struct fs_fte *fte) = - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte; - struct mlx5_core_dev *dev = ns->dev; - bool is_esp = fte->action.esp_id; - struct mailbox_mod mbox_mod; - int ret; - - if (!is_esp || - !(fte->action.action & - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return update_fte(ns, ft, fg, modify_mask, fte); - - modify_spec_mailbox(dev, fte, &mbox_mod); - ret = update_fte(ns, ft, fg, modify_mask, fte); - restore_spec_mailbox(fte, &mbox_mod); - - return ret; -} - -static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct fs_fte *fte, - bool is_egress) -{ - int (*delete_fte)(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct fs_fte *fte) = - mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte; - struct mlx5_core_dev *dev = ns->dev; - struct mlx5_fpga_device *fdev = dev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - struct mlx5_fpga_ipsec_rule *rule; - bool is_esp = fte->action.esp_id; - struct mailbox_mod mbox_mod; - int ret; - - if (!is_esp || - !(fte->action.action & - (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT))) - return delete_fte(ns, ft, fte); - - rule = rule_search(fipsec, fte); - if (!rule) - return -ENOENT; - - mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx); - rule_delete(fipsec, rule); - - modify_spec_mailbox(dev, fte, &mbox_mod); - ret = delete_fte(ns, ft, fte); - restore_spec_mailbox(fte, &mbox_mod); - - return ret; -} - -static int -mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - u32 *in, - struct mlx5_flow_group *fg) -{ - return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true); -} - -static int -mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true); -} - -static int -mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - int modify_mask, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte, - true); -} - -static int -mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_delete_fte(ns, ft, fte, true); -} - -static int -mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - u32 *in, - struct mlx5_flow_group *fg) -{ - return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false); -} - -static int -mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false); -} - -static int -mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *fg, - int modify_mask, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte, - false); -} - -static int -mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct fs_fte *fte) -{ - return fpga_ipsec_fs_delete_fte(ns, ft, fte, false); -} - -static struct mlx5_flow_cmds fpga_ipsec_ingress; -static struct mlx5_flow_cmds fpga_ipsec_egress; - -const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) -{ - switch (type) { - case FS_FT_NIC_RX: - return &fpga_ipsec_ingress; - case FS_FT_NIC_TX: - return &fpga_ipsec_egress; - default: - WARN_ON(true); - return NULL; - } -} - -static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_conn_attr init_attr = {0}; - struct mlx5_fpga_device *fdev = mdev->fpga; - struct mlx5_fpga_conn *conn; - int err; - - if (!mlx5_fpga_is_ipsec_device(mdev)) - return 0; - - fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL); - if (!fdev->ipsec) - return -ENOMEM; - - fdev->ipsec->fdev = fdev; - - err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps), - fdev->ipsec->caps); - if (err) { - mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n", - err); - goto error; - } - - INIT_LIST_HEAD(&fdev->ipsec->pending_cmds); - spin_lock_init(&fdev->ipsec->pending_cmds_lock); - - init_attr.rx_size = SBU_QP_QUEUE_SIZE; - init_attr.tx_size = SBU_QP_QUEUE_SIZE; - init_attr.recv_cb = mlx5_fpga_ipsec_recv; - init_attr.cb_arg = fdev; - conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr); - if (IS_ERR(conn)) { - err = PTR_ERR(conn); - mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n", - err); - goto error; - } - fdev->ipsec->conn = conn; - - err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa); - if (err) - goto err_destroy_conn; - mutex_init(&fdev->ipsec->sa_hash_lock); - - fdev->ipsec->rules_rb = RB_ROOT; - mutex_init(&fdev->ipsec->rules_rb_lock); - - err = mlx5_fpga_ipsec_enable_supported_caps(mdev); - if (err) { - mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n", - err); - goto err_destroy_hash; - } - - ida_init(&fdev->ipsec->halloc); - - return 0; - -err_destroy_hash: - rhashtable_destroy(&fdev->ipsec->sa_hash); - -err_destroy_conn: - mlx5_fpga_sbu_conn_destroy(conn); - -error: - kfree(fdev->ipsec); - fdev->ipsec = NULL; - return err; -} - -static void destroy_rules_rb(struct rb_root *root) -{ - struct mlx5_fpga_ipsec_rule *r, *tmp; - - rbtree_postorder_for_each_entry_safe(r, tmp, root, node) { - rb_erase(&r->node, root); - mlx5_fpga_ipsec_delete_sa_ctx(r->ctx); - kfree(r); - } -} - -static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ - struct mlx5_fpga_device *fdev = mdev->fpga; - - if (!mlx5_fpga_is_ipsec_device(mdev)) - return; - - ida_destroy(&fdev->ipsec->halloc); - destroy_rules_rb(&fdev->ipsec->rules_rb); - rhashtable_destroy(&fdev->ipsec->sa_hash); - - mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn); - kfree(fdev->ipsec); - fdev->ipsec = NULL; -} - -void mlx5_fpga_ipsec_build_fs_cmds(void) -{ - /* ingress */ - fpga_ipsec_ingress.create_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table; - fpga_ipsec_ingress.destroy_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table; - fpga_ipsec_ingress.modify_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table; - fpga_ipsec_ingress.create_flow_group = - mlx5_fpga_ipsec_fs_create_flow_group_ingress; - fpga_ipsec_ingress.destroy_flow_group = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group; - fpga_ipsec_ingress.create_fte = - mlx5_fpga_ipsec_fs_create_fte_ingress; - fpga_ipsec_ingress.update_fte = - mlx5_fpga_ipsec_fs_update_fte_ingress; - fpga_ipsec_ingress.delete_fte = - mlx5_fpga_ipsec_fs_delete_fte_ingress; - fpga_ipsec_ingress.update_root_ft = - mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft; - - /* egress */ - fpga_ipsec_egress.create_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table; - fpga_ipsec_egress.destroy_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table; - fpga_ipsec_egress.modify_flow_table = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table; - fpga_ipsec_egress.create_flow_group = - mlx5_fpga_ipsec_fs_create_flow_group_egress; - fpga_ipsec_egress.destroy_flow_group = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group; - fpga_ipsec_egress.create_fte = - mlx5_fpga_ipsec_fs_create_fte_egress; - fpga_ipsec_egress.update_fte = - mlx5_fpga_ipsec_fs_update_fte_egress; - fpga_ipsec_egress.delete_fte = - mlx5_fpga_ipsec_fs_delete_fte_egress; - fpga_ipsec_egress.update_root_ft = - mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft; -} - -static int -mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - if (attrs->tfc_pad) { - mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n"); - return -EOPNOTSUPP; - } - - if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { - mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n"); - return -EOPNOTSUPP; - } - - if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { - mlx5_core_err(mdev, "Only aes gcm keymat is supported\n"); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.iv_algo != - MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { - mlx5_core_err(mdev, "Only iv sequence algo is supported\n"); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.icv_len != 128) { - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n"); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.key_len != 128 && - attrs->keymat.aes_gcm.key_len != 256) { - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); - return -EOPNOTSUPP; - } - - if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && - (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps, - v2_command))) { - mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); - return -EOPNOTSUPP; - } - - return 0; -} - -static struct mlx5_accel_esp_xfrm * -mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags) -{ - struct mlx5_fpga_esp_xfrm *fpga_xfrm; - - if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) { - mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n"); - return ERR_PTR(-EINVAL); - } - - if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { - mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); - return ERR_PTR(-EOPNOTSUPP); - } - - fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL); - if (!fpga_xfrm) - return ERR_PTR(-ENOMEM); - - mutex_init(&fpga_xfrm->lock); - memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs, - sizeof(fpga_xfrm->accel_xfrm.attrs)); - - return &fpga_xfrm->accel_xfrm; -} - -static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) -{ - struct mlx5_fpga_esp_xfrm *fpga_xfrm = - container_of(xfrm, struct mlx5_fpga_esp_xfrm, - accel_xfrm); - /* assuming no sa_ctx are connected to this xfrm_ctx */ - kfree(fpga_xfrm); -} - -static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - struct mlx5_core_dev *mdev = xfrm->mdev; - struct mlx5_fpga_device *fdev = mdev->fpga; - struct mlx5_fpga_ipsec *fipsec = fdev->ipsec; - struct mlx5_fpga_esp_xfrm *fpga_xfrm; - struct mlx5_ifc_fpga_ipsec_sa org_hw_sa; - - int err = 0; - - if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) - return 0; - - if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { - mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); - return -EOPNOTSUPP; - } - - if (is_v2_sadb_supported(fipsec)) { - mlx5_core_warn(mdev, "Modify esp is not supported\n"); - return -EOPNOTSUPP; - } - - fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm); - - mutex_lock(&fpga_xfrm->lock); - - if (!fpga_xfrm->sa_ctx) - /* Unbounded xfrm, change only sw attrs */ - goto change_sw_xfrm_attrs; - - /* copy original hw sa */ - memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa)); - mutex_lock(&fipsec->sa_hash_lock); - /* remove original hw sa from hash */ - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, - &fpga_xfrm->sa_ctx->hash, rhash_sa)); - /* update hw_sa with new xfrm attrs*/ - mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs, - &fpga_xfrm->sa_ctx->hw_sa); - /* try to insert new hw_sa to hash */ - err = rhashtable_insert_fast(&fipsec->sa_hash, - &fpga_xfrm->sa_ctx->hash, rhash_sa); - if (err) - goto rollback_sa; - - /* modify device with new hw_sa */ - err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa, - MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2); - fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0; - if (err) - WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, - &fpga_xfrm->sa_ctx->hash, - rhash_sa)); -rollback_sa: - if (err) { - /* return original hw_sa to hash */ - memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa, - sizeof(org_hw_sa)); - WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash, - &fpga_xfrm->sa_ctx->hash, - rhash_sa)); - } - mutex_unlock(&fipsec->sa_hash_lock); - -change_sw_xfrm_attrs: - if (!err) - memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); - mutex_unlock(&fpga_xfrm->lock); - return err; -} - -static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = { - .device_caps = mlx5_fpga_ipsec_device_caps, - .counters_count = mlx5_fpga_ipsec_counters_count, - .counters_read = mlx5_fpga_ipsec_counters_read, - .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx, - .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx, - .init = mlx5_fpga_ipsec_init, - .cleanup = mlx5_fpga_ipsec_cleanup, - .esp_create_xfrm = mlx5_fpga_esp_create_xfrm, - .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm, - .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm, -}; - -const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) -{ - if (!mlx5_fpga_is_ipsec_device(mdev)) - return NULL; - - return &fpga_ipsec_ops; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h deleted file mode 100644 index 8931b5584477..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_FPGA_IPSEC_H__ -#define __MLX5_FPGA_IPSEC_H__ - -#include "accel/ipsec.h" -#include "fs_cmd.h" - -#ifdef CONFIG_MLX5_FPGA_IPSEC -const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev); -u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); -const struct mlx5_flow_cmds * -mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); -void mlx5_fpga_ipsec_build_fs_cmds(void); -bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev); -#else -static inline -const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) -{ return NULL; } -static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } -static inline const struct mlx5_flow_cmds * -mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) -{ - return mlx5_fs_cmd_get_default(type); -} - -static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {}; -static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; } - -#endif /* CONFIG_MLX5_FPGA_IPSEC */ -#endif /* __MLX5_FPGA_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 816d991f7621..fe7bdccea301 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -40,8 +40,6 @@ #include "fs_cmd.h" #include "fs_ft_pool.h" #include "diag/fs_tracepoint.h" -#include "accel/ipsec.h" -#include "fpga/ipsec.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) @@ -2519,10 +2517,6 @@ static struct mlx5_flow_root_namespace struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_namespace *ns; - if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE && - (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX)) - cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type); - /* Create the root namespace */ root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL); if (!root_ns) @@ -3172,8 +3166,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } - if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE || - MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { + if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) { err = init_egress_root_ns(steering); if (err) goto err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 7f287e300fb4..387602bbfecc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -62,7 +62,6 @@ #include "lib/mlx5.h" #include "lib/tout.h" #include "fpga/core.h" -#include "fpga/ipsec.h" #include "accel/ipsec.h" #include "lib/clock.h" #include "lib/vxlan.h" @@ -1937,7 +1936,6 @@ static int __init init(void) get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); mlx5_core_verify_params(); - mlx5_fpga_ipsec_build_fs_cmds(); mlx5_register_debugfs(); err = pci_register_driver(&mlx5_core_driver); diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h index e3d824f6a309..45c7c0d67635 100644 --- a/include/linux/mlx5/mlx5_ifc_fpga.h +++ b/include/linux/mlx5/mlx5_ifc_fpga.h @@ -386,68 +386,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits { u8 reserved_at_40[0x40]; }; -struct mlx5_ifc_ipsec_extended_cap_bits { - u8 encapsulation[0x20]; - - u8 reserved_0[0x12]; - u8 v2_command[0x1]; - u8 udp_encap[0x1]; - u8 rx_no_trailer[0x1]; - u8 ipv4_fragment[0x1]; - u8 ipv6[0x1]; - u8 esn[0x1]; - u8 lso[0x1]; - u8 transport_and_tunnel_mode[0x1]; - u8 tunnel_mode[0x1]; - u8 transport_mode[0x1]; - u8 ah_esp[0x1]; - u8 esp[0x1]; - u8 ah[0x1]; - u8 ipv4_options[0x1]; - - u8 auth_alg[0x20]; - - u8 enc_alg[0x20]; - - u8 sa_cap[0x20]; - - u8 reserved_1[0x10]; - u8 number_of_ipsec_counters[0x10]; - - u8 ipsec_counters_addr_low[0x20]; - u8 ipsec_counters_addr_high[0x20]; -}; - -struct mlx5_ifc_ipsec_counters_bits { - u8 dec_in_packets[0x40]; - - u8 dec_out_packets[0x40]; - - u8 dec_bypass_packets[0x40]; - - u8 enc_in_packets[0x40]; - - u8 enc_out_packets[0x40]; - - u8 enc_bypass_packets[0x40]; - - u8 drop_dec_packets[0x40]; - - u8 failed_auth_dec_packets[0x40]; - - u8 drop_enc_packets[0x40]; - - u8 success_add_sa[0x40]; - - u8 fail_add_sa[0x40]; - - u8 success_delete_sa[0x40]; - - u8 fail_delete_sa[0x40]; - - u8 dropped_cmd[0x40]; -}; - enum { MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1, MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2, @@ -464,90 +402,4 @@ struct mlx5_ifc_fpga_qp_error_event_bits { u8 reserved_at_c0[0x8]; u8 fpga_qpn[0x18]; }; -enum mlx5_ifc_fpga_ipsec_response_syndrome { - MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0, - MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1, - MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2, - MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3, -}; - -struct mlx5_ifc_fpga_ipsec_cmd_resp { - __be32 syndrome; - union { - __be32 sw_sa_handle; - __be32 flags; - }; - u8 reserved[24]; -} __packed; - -enum mlx5_ifc_fpga_ipsec_cmd_opcode { - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0, - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1, - MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2, - MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3, - MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4, - MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5, -}; - -enum mlx5_ifc_fpga_ipsec_cap { - MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0), -}; - -struct mlx5_ifc_fpga_ipsec_cmd_cap { - __be32 cmd; - __be32 flags; - u8 reserved[24]; -} __packed; - -enum mlx5_ifc_fpga_ipsec_sa_flags { - MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0), - MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1), - MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2), - MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3), - MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4), - MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5), - MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6), - MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7), -}; - -enum mlx5_ifc_fpga_ipsec_sa_enc_mode { - MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0, - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1, - MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3, -}; - -struct mlx5_ifc_fpga_ipsec_sa_v1 { - __be32 cmd; - u8 key_enc[32]; - u8 key_auth[32]; - __be32 sip[4]; - __be32 dip[4]; - union { - struct { - __be32 reserved; - u8 salt_iv[8]; - __be32 salt; - } __packed gcm; - struct { - u8 salt[16]; - } __packed cbc; - }; - __be32 spi; - __be32 sw_sa_handle; - __be16 tfclen; - u8 enc_mode; - u8 reserved1[2]; - u8 flags; - u8 reserved2[2]; -}; - -struct mlx5_ifc_fpga_ipsec_sa { - struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1; - __be16 udp_sp; - __be16 udp_dp; - u8 reserved1[4]; - __be32 esn; - __be16 vid; /* only 12 bits, rest is reserved */ - __be16 reserved2; -} __packed; #endif /* MLX5_IFC_FPGA_H */ -- cgit v1.2.3 From df439fcb1cd4fe8e8b1c4065db4ef1b544aa5d9e Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:37 +0300 Subject: net/mlx5: Delete metadata handling logic Remove specific to FPGS IPsec metadata handling logic which is not required for mlx5 NICs devices. Link: https://lore.kernel.org/r/fe67a1de4fc6032a940e18c8a6461a1ccf902fc4.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- .../net/ethernet/mellanox/mlx5/core/accel/accel.h | 36 ---- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 6 - .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 199 --------------------- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 3 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 - 5 files changed, 245 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h deleted file mode 100644 index 82b185121edb..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef __MLX5E_ACCEL_H__ -#define __MLX5E_ACCEL_H__ - -#ifdef CONFIG_MLX5_ACCEL - -#include -#include - -static inline bool is_metadata_hdr_valid(struct sk_buff *skb) -{ - __be16 *ethtype; - - if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN)) - return false; - ethtype = (__be16 *)(skb->data + ETH_ALEN * 2); - if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE)) - return false; - return true; -} - -static inline void remove_metadata_hdr(struct sk_buff *skb) -{ - struct ethhdr *old_eth; - struct ethhdr *new_eth; - - /* Remove the metadata from the buffer */ - old_eth = (struct ethhdr *)skb->data; - new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN); - memmove(new_eth, old_eth, 2 * ETH_ALEN); - /* Ethertype is already in its new place */ - skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN); -} - -#endif /* CONFIG_MLX5_ACCEL */ - -#endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 6164c7f59efb..282d3abab8c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -116,7 +116,6 @@ struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_rule ipsec_rule; }; -void mlx5e_ipsec_build_inverse_table(void); int mlx5e_ipsec_init(struct mlx5e_priv *priv); void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); @@ -125,11 +124,6 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev, unsigned int handle); #else - -static inline void mlx5e_ipsec_build_inverse_table(void) -{ -} - static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv) { return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index b56fea142c24..28e0500d4a48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -37,75 +37,13 @@ #include "accel/ipsec_offload.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec.h" -#include "accel/accel.h" #include "en.h" -enum { - MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11, - MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12, - MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17, -}; - -struct mlx5e_ipsec_rx_metadata { - unsigned char nexthdr; - __be32 sa_handle; -} __packed; - enum { MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8, MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9, }; -struct mlx5e_ipsec_tx_metadata { - __be16 mss_inv; /* 1/MSS in 16bit fixed point, only for LSO */ - __be16 seq; /* LSBs of the first TCP seq, only for LSO */ - u8 esp_next_proto; /* Next protocol of ESP */ -} __packed; - -struct mlx5e_ipsec_metadata { - unsigned char syndrome; - union { - unsigned char raw[5]; - /* from FPGA to host, on successful decrypt */ - struct mlx5e_ipsec_rx_metadata rx; - /* from host to FPGA */ - struct mlx5e_ipsec_tx_metadata tx; - } __packed content; - /* packet type ID field */ - __be16 ethertype; -} __packed; - -#define MAX_LSO_MSS 2048 - -/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */ -static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS]; - -static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb) -{ - return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size]; -} - -static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb) -{ - struct mlx5e_ipsec_metadata *mdata; - struct ethhdr *eth; - - if (unlikely(skb_cow_head(skb, sizeof(*mdata)))) - return ERR_PTR(-ENOMEM); - - eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata)); - skb->mac_header -= sizeof(*mdata); - mdata = (struct mlx5e_ipsec_metadata *)(eth + 1); - - memmove(skb->data, skb->data + sizeof(*mdata), - 2 * ETH_ALEN); - - eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); - - memset(mdata->content.raw, 0, sizeof(mdata->content.raw)); - return mdata; -} - static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x) { unsigned int alen = crypto_aead_authsize(x->data); @@ -244,40 +182,6 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, skb_store_bits(skb, iv_offset, &seqno, 8); } -static void mlx5e_ipsec_set_metadata(struct sk_buff *skb, - struct mlx5e_ipsec_metadata *mdata, - struct xfrm_offload *xo) -{ - struct ip_esp_hdr *esph; - struct tcphdr *tcph; - - if (skb_is_gso(skb)) { - /* Add LSO metadata indication */ - esph = ip_esp_hdr(skb); - tcph = inner_tcp_hdr(skb); - netdev_dbg(skb->dev, " Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n", - skb->network_header, - skb->transport_header, - skb->inner_network_header, - skb->inner_transport_header); - netdev_dbg(skb->dev, " Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n", - skb->len, skb_shinfo(skb)->gso_size, - ntohs(tcph->source), ntohs(tcph->dest), - ntohl(tcph->seq), ntohl(esph->seq_no)); - mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP; - mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb); - mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF); - } else { - mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD; - } - mdata->content.tx.esp_next_proto = xo->proto; - - netdev_dbg(skb->dev, " TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n", - mdata->syndrome, mdata->content.tx.esp_next_proto, - ntohs(mdata->content.tx.mss_inv), - ntohs(mdata->content.tx.seq)); -} - void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe, struct mlx5e_accel_tx_ipsec_state *ipsec_st, struct mlx5_wqe_inline_seg *inlseg) @@ -363,7 +267,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct xfrm_offload *xo = xfrm_offload(skb); struct mlx5e_ipsec_sa_entry *sa_entry; - struct mlx5e_ipsec_metadata *mdata; struct xfrm_state *x; struct sec_path *sp; @@ -392,19 +295,8 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, goto drop; } - if (MLX5_CAP_GEN(priv->mdev, fpga)) { - mdata = mlx5e_ipsec_add_metadata(skb); - if (IS_ERR(mdata)) { - atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata); - goto drop; - } - } - sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; sa_entry->set_iv_op(skb, x, xo); - if (MLX5_CAP_GEN(priv->mdev, fpga)) - mlx5e_ipsec_set_metadata(skb, mdata, xo); - mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st); return true; @@ -414,79 +306,6 @@ drop: return false; } -static inline struct xfrm_state * -mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, - struct mlx5e_ipsec_metadata *mdata) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - struct xfrm_offload *xo; - struct xfrm_state *xs; - struct sec_path *sp; - u32 sa_handle; - - sp = secpath_set(skb); - if (unlikely(!sp)) { - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc); - return NULL; - } - - sa_handle = be32_to_cpu(mdata->content.rx.sa_handle); - xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle); - if (unlikely(!xs)) { - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss); - return NULL; - } - - sp = skb_sec_path(skb); - sp->xvec[sp->len++] = xs; - sp->olen++; - - xo = xfrm_offload(skb); - xo->flags = CRYPTO_DONE; - switch (mdata->syndrome) { - case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED: - xo->status = CRYPTO_SUCCESS; - if (likely(priv->ipsec->no_trailer)) { - xo->flags |= XFRM_ESP_NO_TRAILER; - xo->proto = mdata->content.rx.nexthdr; - } - break; - case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED: - xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; - break; - case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO: - xo->status = CRYPTO_INVALID_PROTOCOL; - break; - default: - atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); - return NULL; - } - return xs; -} - -struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb, u32 *cqe_bcnt) -{ - struct mlx5e_ipsec_metadata *mdata; - struct xfrm_state *xs; - - if (!is_metadata_hdr_valid(skb)) - return skb; - - /* Use the metadata */ - mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN); - xs = mlx5e_ipsec_build_sp(netdev, skb, mdata); - if (unlikely(!xs)) { - kfree_skb(skb); - return NULL; - } - - remove_metadata_hdr(skb); - *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; - - return skb; -} - enum { MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED, MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED, @@ -541,21 +360,3 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); } } - -void mlx5e_ipsec_build_inverse_table(void) -{ - u16 mss_inv; - u32 mss; - - /* Calculate 1/x inverse table for use in GSO data path. - * Using this table, we provide the IPSec accelerator with the value of - * 1/gso_size so that it can infer the position of each segment inside - * the GSO, and increment the ESP sequence number, and generate the IV. - * The HW needs this value in Q0.16 fixed-point number format - */ - mlx5e_ipsec_inverse_table[1] = htons(0xFFFF); - for (mss = 2; mss < MAX_LSO_MSS; mss++) { - mss_inv = div_u64(1ULL << 32, mss) >> 16; - mlx5e_ipsec_inverse_table[mss] = htons(mss_inv); - } -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 428881e0adcb..0ae4e12ce528 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -53,9 +53,6 @@ struct mlx5e_accel_tx_ipsec_state { #ifdef CONFIG_MLX5_EN_IPSEC -struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, - struct sk_buff *skb, u32 *cqe_bcnt); - void mlx5e_ipsec_inverse_table_init(void); void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_offload *xo); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0a303879d0f4..83365d04050b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5693,7 +5693,6 @@ int mlx5e_init(void) { int ret; - mlx5e_ipsec_build_inverse_table(); mlx5e_build_ptys2ethtool_map(); ret = auxiliary_driver_register(&mlx5e_driver); if (ret) -- cgit v1.2.3 From 501a9b23b23cb356362740347f40f7a46a9e866f Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:38 +0300 Subject: net/mlx5: Remove not-used IDA field from IPsec struct The IDA halloc variable is not needed and can be removed. Link: https://lore.kernel.org/r/cbecfbe01621e1b8bde746aa7f6c08497e656a25.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 2 -- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 3 +-- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 299e3f0fcb5c..213fbf63dde9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -425,7 +425,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) hash_init(ipsec->sadb_rx); spin_lock_init(&ipsec->sadb_rx_lock); - ida_init(&ipsec->halloc); ipsec->en_priv = priv; ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER); @@ -452,7 +451,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) mlx5e_accel_ipsec_fs_cleanup(priv); destroy_workqueue(ipsec->wq); - ida_destroy(&ipsec->halloc); kfree(ipsec); priv->ipsec = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 282d3abab8c5..51ae6145b6fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -82,8 +82,7 @@ struct mlx5e_ipsec { struct mlx5e_priv *en_priv; DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); bool no_trailer; - spinlock_t sadb_rx_lock; /* Protects sadb_rx and halloc */ - struct ida halloc; + spinlock_t sadb_rx_lock; /* Protects sadb_rx */ struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_stats stats; struct workqueue_struct *wq; -- cgit v1.2.3 From 3c811a6b45524a64fb9bc7e1c3292f7216279a75 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:39 +0300 Subject: net/mlx5: Remove XFRM no_trailer flag Only FPGA needed this NO_TRAILER flag, so remove this assignment. Link: https://lore.kernel.org/r/636d75421e1ca4254a062537eea001ab0e50e19b.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 2 -- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 2 -- 3 files changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 213fbf63dde9..13f6fed74950 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -426,8 +426,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) hash_init(ipsec->sadb_rx); spin_lock_init(&ipsec->sadb_rx_lock); ipsec->en_priv = priv; - ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) & - MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER); ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, priv->netdev->name); if (!ipsec->wq) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 51ae6145b6fe..6e4f0dbbd4e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -81,7 +81,6 @@ struct mlx5e_ipsec_tx; struct mlx5e_ipsec { struct mlx5e_priv *en_priv; DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); - bool no_trailer; spinlock_t sadb_rx_lock; /* Protects sadb_rx */ struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_stats stats; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 28e0500d4a48..8e0cf5e65100 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -347,8 +347,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) { case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED: xo->status = CRYPTO_SUCCESS; - if (WARN_ON_ONCE(priv->ipsec->no_trailer)) - xo->flags |= XFRM_ESP_NO_TRAILER; break; case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED: xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; -- cgit v1.2.3 From 0d90bd551446b0a3c7e8777b120c67a7d741657c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:40 +0300 Subject: net/mlx5: Remove FPGA ipsec specific statistics Delete the statistics that is not used anymore. Link: https://lore.kernel.org/r/3f194752881e095910c887dd5cede1dcba6acaf3.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 19 --------- .../mellanox/mlx5/core/en_accel/ipsec_stats.c | 46 ---------------------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 1 - 5 files changed, 68 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 6e4f0dbbd4e4..ee50052cbcb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -55,24 +55,6 @@ struct mlx5e_ipsec_sw_stats { atomic64_t ipsec_tx_drop_no_state; atomic64_t ipsec_tx_drop_not_ip; atomic64_t ipsec_tx_drop_trailer; - atomic64_t ipsec_tx_drop_metadata; -}; - -struct mlx5e_ipsec_stats { - u64 ipsec_dec_in_packets; - u64 ipsec_dec_out_packets; - u64 ipsec_dec_bypass_packets; - u64 ipsec_enc_in_packets; - u64 ipsec_enc_out_packets; - u64 ipsec_enc_bypass_packets; - u64 ipsec_dec_drop_packets; - u64 ipsec_dec_auth_fail_packets; - u64 ipsec_enc_drop_packets; - u64 ipsec_add_sa_success; - u64 ipsec_add_sa_fail; - u64 ipsec_del_sa_success; - u64 ipsec_del_sa_fail; - u64 ipsec_cmd_drop; }; struct mlx5e_accel_fs_esp; @@ -83,7 +65,6 @@ struct mlx5e_ipsec { DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); spinlock_t sadb_rx_lock; /* Protects sadb_rx */ struct mlx5e_ipsec_sw_stats sw_stats; - struct mlx5e_ipsec_stats stats; struct workqueue_struct *wq; struct mlx5e_accel_fs_esp *rx_fs; struct mlx5e_ipsec_tx *tx_fs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 1607c305d3ab..80886290fd22 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -39,23 +39,6 @@ #include "fpga/sdk.h" #include "en_accel/ipsec.h" -static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_in_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_out_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_bypass_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_in_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_out_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_bypass_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_drop_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_dec_auth_fail_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_enc_drop_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_success) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_add_sa_fail) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_success) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_del_sa_fail) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_stats, ipsec_cmd_drop) }, -}; - static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) }, @@ -64,13 +47,11 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_trailer) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_metadata) }, }; #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) -#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc) #define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw) @@ -102,31 +83,4 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw) return idx; } -static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw) -{ - return 0; -} - -static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) -{ - int ret = 0; - - if (priv->ipsec) - ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats, - NUM_IPSEC_HW_COUNTERS); - if (ret) - memset(&priv->ipsec->stats, 0, sizeof(priv->ipsec->stats)); -} - -static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw) -{ - return idx; -} - -static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw) -{ - return idx; -} - MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0); -MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 6b7e7ea6ded2..47f7b4c034cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1112,7 +1112,6 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = { &MLX5E_STATS_GRP(per_port_buff_congest), #ifdef CONFIG_MLX5_EN_IPSEC &MLX5E_STATS_GRP(ipsec_sw), - &MLX5E_STATS_GRP(ipsec_hw), #endif &MLX5E_STATS_GRP(ptp), }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 5123a220d7a4..57fa0489eeb8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -2443,7 +2443,6 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(pme), #ifdef CONFIG_MLX5_EN_IPSEC &MLX5E_STATS_GRP(ipsec_sw), - &MLX5E_STATS_GRP(ipsec_hw), #endif &MLX5E_STATS_GRP(tls), &MLX5E_STATS_GRP(channels), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a7a025d15c14..e48b15b55b6f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -482,7 +482,6 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio); extern MLX5E_DECLARE_STATS_GRP(pme); extern MLX5E_DECLARE_STATS_GRP(channels); extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest); -extern MLX5E_DECLARE_STATS_GRP(ipsec_hw); extern MLX5E_DECLARE_STATS_GRP(ipsec_sw); extern MLX5E_DECLARE_STATS_GRP(ptp); -- cgit v1.2.3 From 74ec29bdb0ebe09d5490cd50df5ab3c309d6b276 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:41 +0300 Subject: RDMA/mlx5: Delete never supported IPsec flow action The IPSEC_REQUIRED_METADATA capability bit is never set, and can be safely removed from the flow action flags. Link: https://lore.kernel.org/r/697cd60bd5c9b6a004c449c1a41c2798fac844ff.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/infiniband/hw/mlx5/main.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 32a0ea820573..d2e67ecc5479 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1797,15 +1797,11 @@ static int set_ucontext_resp(struct ib_ucontext *uctx, if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS)) resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM; - if (mlx5_accel_ipsec_device_caps(dev->mdev) & - MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA) - resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA; if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING; if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN) resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN; - /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */ } resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 : -- cgit v1.2.3 From de8bdb476908e64805df4bfbad20618cbb1f9ffa Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:42 +0300 Subject: RDMA/mlx5: Drop crypto flow steering API The mlx5 flow steering crypto API was intended to be used in FPGA devices, which is not supported for years already. The removal of mlx5 crypto FPGA code together with inability to configure encryption keys makes the low steering API completely unusable. So delete the code, so any ESP flow steering requests will fail with not supported error, as it is happening now anyway as no device support this type of API. Link: https://lore.kernel.org/r/634a5face7734381463d809bfb89850f6998deac.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/infiniband/hw/mlx5/fs.c | 223 +-------------------- drivers/infiniband/hw/mlx5/main.c | 27 --- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.c | 5 +- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 4 +- include/linux/mlx5/accel.h | 13 +- 5 files changed, 10 insertions(+), 262 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 661ed2b44508..9c2886bc72cb 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include "mlx5_ib.h" @@ -148,16 +147,6 @@ int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, { switch (maction->ib_action.type) { - case IB_FLOW_ACTION_ESP: - if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT)) - return -EINVAL; - /* Currently only AES_GCM keymat is supported by the driver */ - action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx; - action->action |= is_egress ? - MLX5_FLOW_CONTEXT_ACTION_ENCRYPT : - MLX5_FLOW_CONTEXT_ACTION_DECRYPT; - return 0; case IB_FLOW_ACTION_UNSPECIFIED: if (maction->flow_action_raw.sub_type == MLX5_IB_FLOW_ACTION_MODIFY_HEADER) { @@ -368,14 +357,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, ib_spec->type & IB_FLOW_SPEC_INNER); break; case IB_FLOW_SPEC_ESP: - if (ib_spec->esp.mask.seq) - return -EOPNOTSUPP; - - MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, - ntohl(ib_spec->esp.mask.spi)); - MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, - ntohl(ib_spec->esp.val.spi)); - break; + return -EOPNOTSUPP; case IB_FLOW_SPEC_TCP: if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD)) @@ -587,47 +569,6 @@ static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) return false; } -enum valid_spec { - VALID_SPEC_INVALID, - VALID_SPEC_VALID, - VALID_SPEC_NA, -}; - -static enum valid_spec -is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev, - const struct mlx5_flow_spec *spec, - const struct mlx5_flow_act *flow_act, - bool egress) -{ - const u32 *match_c = spec->match_criteria; - bool is_crypto = - (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | - MLX5_FLOW_CONTEXT_ACTION_DECRYPT)); - bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c); - bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP; - - /* - * Currently only crypto is supported in egress, when regular egress - * rules would be supported, always return VALID_SPEC_NA. - */ - if (!is_crypto) - return VALID_SPEC_NA; - - return is_crypto && is_ipsec && - (!egress || (!is_drop && - !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? - VALID_SPEC_VALID : VALID_SPEC_INVALID; -} - -static bool is_valid_spec(struct mlx5_core_dev *mdev, - const struct mlx5_flow_spec *spec, - const struct mlx5_flow_act *flow_act, - bool egress) -{ - /* We curretly only support ipsec egress flow */ - return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID; -} - static bool is_valid_ethertype(struct mlx5_core_dev *mdev, const struct ib_flow_attr *flow_attr, bool check_inner) @@ -1154,8 +1095,7 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); - if (is_egress && - !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) { + if (is_egress) { err = -EINVAL; goto free; } @@ -1740,149 +1680,6 @@ unlock: return ERR_PTR(err); } -static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags) -{ - u32 flags = 0; - - if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA) - flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA; - - return flags; -} - -#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \ - MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA -static struct ib_flow_action * -mlx5_ib_create_flow_action_esp(struct ib_device *device, - const struct ib_flow_action_attrs_esp *attr, - struct uverbs_attr_bundle *attrs) -{ - struct mlx5_ib_dev *mdev = to_mdev(device); - struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm; - struct mlx5_accel_esp_xfrm_attrs accel_attrs = {}; - struct mlx5_ib_flow_action *action; - u64 action_flags; - u64 flags; - int err = 0; - - err = uverbs_get_flags64( - &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, - ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1)); - if (err) - return ERR_PTR(err); - - flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags); - - /* We current only support a subset of the standard features. Only a - * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn - * (with overlap). Full offload mode isn't supported. - */ - if (!attr->keymat || attr->replay || attr->encap || - attr->spi || attr->seq || attr->tfc_pad || - attr->hard_limit_pkts || - (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | - IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT))) - return ERR_PTR(-EOPNOTSUPP); - - if (attr->keymat->protocol != - IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM) - return ERR_PTR(-EOPNOTSUPP); - - aes_gcm = &attr->keymat->keymat.aes_gcm; - - if (aes_gcm->icv_len != 16 || - aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) - return ERR_PTR(-EOPNOTSUPP); - - action = kmalloc(sizeof(*action), GFP_KERNEL); - if (!action) - return ERR_PTR(-ENOMEM); - - action->esp_aes_gcm.ib_flags = attr->flags; - memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key, - sizeof(accel_attrs.keymat.aes_gcm.aes_key)); - accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8; - memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt, - sizeof(accel_attrs.keymat.aes_gcm.salt)); - memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv, - sizeof(accel_attrs.keymat.aes_gcm.seq_iv)); - accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8; - accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ; - accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; - - accel_attrs.esn = attr->esn; - if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) - accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED; - if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) - accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; - - if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT) - accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT; - - action->esp_aes_gcm.ctx = - mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags); - if (IS_ERR(action->esp_aes_gcm.ctx)) { - err = PTR_ERR(action->esp_aes_gcm.ctx); - goto err_parse; - } - - action->esp_aes_gcm.ib_flags = attr->flags; - - return &action->ib_action; - -err_parse: - kfree(action); - return ERR_PTR(err); -} - -static int -mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action, - const struct ib_flow_action_attrs_esp *attr, - struct uverbs_attr_bundle *attrs) -{ - struct mlx5_ib_flow_action *maction = to_mflow_act(action); - struct mlx5_accel_esp_xfrm_attrs accel_attrs; - int err = 0; - - if (attr->keymat || attr->replay || attr->encap || - attr->spi || attr->seq || attr->tfc_pad || - attr->hard_limit_pkts || - (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | - IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS | - IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))) - return -EOPNOTSUPP; - - /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can - * be modified. - */ - if (!(maction->esp_aes_gcm.ib_flags & - IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) && - attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED | - IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)) - return -EINVAL; - - memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs, - sizeof(accel_attrs)); - - accel_attrs.esn = attr->esn; - if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW) - accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; - else - accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; - - err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx, - &accel_attrs); - if (err) - return err; - - maction->esp_aes_gcm.ib_flags &= - ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; - maction->esp_aes_gcm.ib_flags |= - attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW; - - return 0; -} - static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction) { switch (maction->flow_action_raw.sub_type) { @@ -1906,13 +1703,6 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action) struct mlx5_ib_flow_action *maction = to_mflow_act(action); switch (action->type) { - case IB_FLOW_ACTION_ESP: - /* - * We only support aes_gcm by now, so we implicitly know this is - * the underline crypto. - */ - mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx); - break; case IB_FLOW_ACTION_UNSPECIFIED: destroy_flow_action_raw(maction); break; @@ -2709,11 +2499,6 @@ static const struct ib_device_ops flow_ops = { .destroy_flow_action = mlx5_ib_destroy_flow_action, }; -static const struct ib_device_ops flow_ipsec_ops = { - .create_flow_action_esp = mlx5_ib_create_flow_action_esp, - .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp, -}; - int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) { dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL); @@ -2724,9 +2509,5 @@ int mlx5_ib_fs_init(struct mlx5_ib_dev *dev) mutex_init(&dev->flow_db->lock); ib_set_device_ops(&dev->ib_dev, &flow_ops); - if (mlx5_accel_ipsec_device_caps(dev->mdev) & - MLX5_ACCEL_IPSEC_CAP_DEVICE) - ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops); - return 0; } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d2e67ecc5479..61aa196d6484 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -41,7 +41,6 @@ #include "wr.h" #include "restrack.h" #include "counters.h" -#include #include #include #include @@ -906,10 +905,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, MLX5_RX_HASH_SRC_PORT_UDP | MLX5_RX_HASH_DST_PORT_UDP | MLX5_RX_HASH_INNER; - if (mlx5_accel_ipsec_device_caps(dev->mdev) & - MLX5_ACCEL_IPSEC_CAP_DEVICE) - resp.rss_caps.rx_hash_fields_mask |= - MLX5_RX_HASH_IPSEC_SPI; resp.response_length += sizeof(resp.rss_caps); } } else { @@ -1791,19 +1786,6 @@ static int set_ucontext_resp(struct ib_ucontext *uctx, resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1; - - if (mlx5_accel_ipsec_device_caps(dev->mdev) & - MLX5_ACCEL_IPSEC_CAP_DEVICE) { - if (mlx5_get_flow_namespace(dev->mdev, - MLX5_FLOW_NAMESPACE_EGRESS)) - resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM; - if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi)) - resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING; - if (mlx5_accel_ipsec_device_caps(dev->mdev) & - MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN) - resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN; - } - resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 : bfregi->total_num_bfregs - bfregi->num_dyn_bfregs; resp->num_ports = dev->num_ports; @@ -3600,13 +3582,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR, &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC), &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY)); -ADD_UVERBS_ATTRIBUTES_SIMPLE( - mlx5_ib_flow_action, - UVERBS_OBJECT_FLOW_ACTION, - UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, - UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, - enum mlx5_ib_uapi_flow_action_flags)); - ADD_UVERBS_ATTRIBUTES_SIMPLE( mlx5_ib_query_context, UVERBS_OBJECT_DEVICE, @@ -3624,8 +3599,6 @@ static const struct uapi_definition mlx5_ib_defs[] = { UAPI_DEF_CHAIN(mlx5_ib_std_types_defs), UAPI_DEF_CHAIN(mlx5_ib_dm_defs), - UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, - &mlx5_ib_flow_action), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context), UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index 45296ec2d055..387be13b2f1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -133,8 +133,7 @@ void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags) + const struct mlx5_accel_esp_xfrm_attrs *attrs) { const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; struct mlx5_accel_esp_xfrm *xfrm; @@ -142,7 +141,7 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) return ERR_PTR(-EOPNOTSUPP); - xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags); + xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, 0); if (IS_ERR(xfrm)) return xfrm; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 13f6fed74950..f6e3b549424f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -333,9 +333,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) /* create xfrm */ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); - sa_entry->xfrm = - mlx5_accel_esp_create_xfrm(priv->mdev, &attrs, - MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA); + sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs); if (IS_ERR(sa_entry->xfrm)) { err = PTR_ERR(sa_entry->xfrm); goto err_sa_entry; diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index dacf69516002..af67d51308cf 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -111,10 +111,6 @@ struct mlx5_accel_esp_xfrm { struct mlx5_accel_esp_xfrm_attrs attrs; }; -enum { - MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0, -}; - enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, @@ -132,8 +128,7 @@ u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags); + const struct mlx5_accel_esp_xfrm_attrs *attrs); void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs); @@ -144,8 +139,10 @@ static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { ret static inline struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags) { return ERR_PTR(-EOPNOTSUPP); } + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + return ERR_PTR(-EOPNOTSUPP); +} static inline void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} static inline int -- cgit v1.2.3 From 32313c6ae622ed7aa553fa1ee22d8e5f24146f0e Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:43 +0300 Subject: RDMA/core: Delete IPsec flow action logic from the core The removal of mlx5 flow steering logic, left the kernel without any RDMA drivers that implements flow action callbacks supplied by RDMA/core. Any user access to them caused to EOPNOTSUPP error, which can be achieved by simply removing ioctl implementation. Link: https://lore.kernel.org/r/a638e376314a2eb1c66f597c0bbeeab2e5de7faf.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Acked-by: Jason Gunthorpe Signed-off-by: Leon Romanovsky --- drivers/infiniband/core/device.c | 2 - .../infiniband/core/uverbs_std_types_flow_action.c | 383 +-------------------- include/rdma/ib_verbs.h | 8 - 3 files changed, 1 insertion(+), 392 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index a311df07b1bd..4deb60a3b43f 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2613,7 +2613,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, create_counters); SET_DEVICE_OP(dev_ops, create_cq); SET_DEVICE_OP(dev_ops, create_flow); - SET_DEVICE_OP(dev_ops, create_flow_action_esp); SET_DEVICE_OP(dev_ops, create_qp); SET_DEVICE_OP(dev_ops, create_rwq_ind_table); SET_DEVICE_OP(dev_ops, create_srq); @@ -2676,7 +2675,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, modify_ah); SET_DEVICE_OP(dev_ops, modify_cq); SET_DEVICE_OP(dev_ops, modify_device); - SET_DEVICE_OP(dev_ops, modify_flow_action_esp); SET_DEVICE_OP(dev_ops, modify_hw_stat); SET_DEVICE_OP(dev_ops, modify_port); SET_DEVICE_OP(dev_ops, modify_qp); diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c index d42ed7ff223e..0ddcf6da66c4 100644 --- a/drivers/infiniband/core/uverbs_std_types_flow_action.c +++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c @@ -46,385 +46,6 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject, return action->device->ops.destroy_flow_action(action); } -static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs, - u32 flags, bool is_modify) -{ - u64 verbs_flags = flags; - - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ESN)) - verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED; - - if (is_modify && uverbs_attr_is_valid(attrs, - UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) - verbs_flags |= IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS; - - return verbs_flags; -}; - -static int validate_flow_action_esp_keymat_aes_gcm(struct ib_flow_action_attrs_esp_keymats *keymat) -{ - struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm = - &keymat->keymat.aes_gcm; - - if (aes_gcm->iv_algo > IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ) - return -EOPNOTSUPP; - - if (aes_gcm->key_len != 32 && - aes_gcm->key_len != 24 && - aes_gcm->key_len != 16) - return -EINVAL; - - if (aes_gcm->icv_len != 16 && - aes_gcm->icv_len != 8 && - aes_gcm->icv_len != 12) - return -EINVAL; - - return 0; -} - -static int (* const flow_action_esp_keymat_validate[])(struct ib_flow_action_attrs_esp_keymats *keymat) = { - [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = validate_flow_action_esp_keymat_aes_gcm, -}; - -static int flow_action_esp_replay_none(struct ib_flow_action_attrs_esp_replays *replay, - bool is_modify) -{ - /* This is used in order to modify an esp flow action with an enabled - * replay protection to a disabled one. This is only supported via - * modify, as in create verb we can simply drop the REPLAY attribute and - * achieve the same thing. - */ - return is_modify ? 0 : -EINVAL; -} - -static int flow_action_esp_replay_def_ok(struct ib_flow_action_attrs_esp_replays *replay, - bool is_modify) -{ - /* Some replay protections could always be enabled without validating - * anything. - */ - return 0; -} - -static int (* const flow_action_esp_replay_validate[])(struct ib_flow_action_attrs_esp_replays *replay, - bool is_modify) = { - [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = flow_action_esp_replay_none, - [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = flow_action_esp_replay_def_ok, -}; - -static int parse_esp_ip(enum ib_flow_spec_type proto, - const void __user *val_ptr, - size_t len, union ib_flow_spec *out) -{ - int ret; - const struct ib_uverbs_flow_ipv4_filter ipv4 = { - .src_ip = cpu_to_be32(0xffffffffUL), - .dst_ip = cpu_to_be32(0xffffffffUL), - .proto = 0xff, - .tos = 0xff, - .ttl = 0xff, - .flags = 0xff, - }; - const struct ib_uverbs_flow_ipv6_filter ipv6 = { - .src_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - .dst_ip = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - .flow_label = cpu_to_be32(0xffffffffUL), - .next_hdr = 0xff, - .traffic_class = 0xff, - .hop_limit = 0xff, - }; - union { - struct ib_uverbs_flow_ipv4_filter ipv4; - struct ib_uverbs_flow_ipv6_filter ipv6; - } user_val = {}; - const void *user_pmask; - size_t val_len; - - /* If the flow IPv4/IPv6 flow specifications are extended, the mask - * should be changed as well. - */ - BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv4_filter, flags) + - sizeof(ipv4.flags) != sizeof(ipv4)); - BUILD_BUG_ON(offsetof(struct ib_uverbs_flow_ipv6_filter, reserved) + - sizeof(ipv6.reserved) != sizeof(ipv6)); - - switch (proto) { - case IB_FLOW_SPEC_IPV4: - if (len > sizeof(user_val.ipv4) && - !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv4), - len - sizeof(user_val.ipv4))) - return -EOPNOTSUPP; - - val_len = min_t(size_t, len, sizeof(user_val.ipv4)); - ret = copy_from_user(&user_val.ipv4, val_ptr, - val_len); - if (ret) - return -EFAULT; - - user_pmask = &ipv4; - break; - case IB_FLOW_SPEC_IPV6: - if (len > sizeof(user_val.ipv6) && - !ib_is_buffer_cleared(val_ptr + sizeof(user_val.ipv6), - len - sizeof(user_val.ipv6))) - return -EOPNOTSUPP; - - val_len = min_t(size_t, len, sizeof(user_val.ipv6)); - ret = copy_from_user(&user_val.ipv6, val_ptr, - val_len); - if (ret) - return -EFAULT; - - user_pmask = &ipv6; - break; - default: - return -EOPNOTSUPP; - } - - return ib_uverbs_kern_spec_to_ib_spec_filter(proto, user_pmask, - &user_val, - val_len, out); -} - -static int flow_action_esp_get_encap(struct ib_flow_spec_list *out, - struct uverbs_attr_bundle *attrs) -{ - struct ib_uverbs_flow_action_esp_encap uverbs_encap; - int ret; - - ret = uverbs_copy_from(&uverbs_encap, attrs, - UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP); - if (ret) - return ret; - - /* We currently support only one encap */ - if (uverbs_encap.next_ptr) - return -EOPNOTSUPP; - - if (uverbs_encap.type != IB_FLOW_SPEC_IPV4 && - uverbs_encap.type != IB_FLOW_SPEC_IPV6) - return -EOPNOTSUPP; - - return parse_esp_ip(uverbs_encap.type, - u64_to_user_ptr(uverbs_encap.val_ptr), - uverbs_encap.len, - &out->spec); -} - -struct ib_flow_action_esp_attr { - struct ib_flow_action_attrs_esp hdr; - struct ib_flow_action_attrs_esp_keymats keymat; - struct ib_flow_action_attrs_esp_replays replay; - /* We currently support only one spec */ - struct ib_flow_spec_list encap; -}; - -#define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW -static int parse_flow_action_esp(struct ib_device *ib_dev, - struct uverbs_attr_bundle *attrs, - struct ib_flow_action_esp_attr *esp_attr, - bool is_modify) -{ - struct ib_uverbs_flow_action_esp uverbs_esp = {}; - int ret; - - /* Optional param, if it doesn't exist, we get -ENOENT and skip it */ - ret = uverbs_copy_from(&esp_attr->hdr.esn, attrs, - UVERBS_ATTR_FLOW_ACTION_ESP_ESN); - if (IS_UVERBS_COPY_ERR(ret)) - return ret; - - /* This can be called from FLOW_ACTION_ESP_MODIFY where - * UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS is optional - */ - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS)) { - ret = uverbs_copy_from_or_zero(&uverbs_esp, attrs, - UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS); - if (ret) - return ret; - - if (uverbs_esp.flags & ~((ESP_LAST_SUPPORTED_FLAG << 1) - 1)) - return -EOPNOTSUPP; - - esp_attr->hdr.spi = uverbs_esp.spi; - esp_attr->hdr.seq = uverbs_esp.seq; - esp_attr->hdr.tfc_pad = uverbs_esp.tfc_pad; - esp_attr->hdr.hard_limit_pkts = uverbs_esp.hard_limit_pkts; - } - esp_attr->hdr.flags = esp_flags_uverbs_to_verbs(attrs, uverbs_esp.flags, - is_modify); - - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT)) { - esp_attr->keymat.protocol = - uverbs_attr_get_enum_id(attrs, - UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT); - ret = uverbs_copy_from_or_zero(&esp_attr->keymat.keymat, - attrs, - UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT); - if (ret) - return ret; - - ret = flow_action_esp_keymat_validate[esp_attr->keymat.protocol](&esp_attr->keymat); - if (ret) - return ret; - - esp_attr->hdr.keymat = &esp_attr->keymat; - } - - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY)) { - esp_attr->replay.protocol = - uverbs_attr_get_enum_id(attrs, - UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY); - - ret = uverbs_copy_from_or_zero(&esp_attr->replay.replay, - attrs, - UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY); - if (ret) - return ret; - - ret = flow_action_esp_replay_validate[esp_attr->replay.protocol](&esp_attr->replay, - is_modify); - if (ret) - return ret; - - esp_attr->hdr.replay = &esp_attr->replay; - } - - if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP)) { - ret = flow_action_esp_get_encap(&esp_attr->encap, attrs); - if (ret) - return ret; - - esp_attr->hdr.encap = &esp_attr->encap; - } - - return 0; -} - -static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)( - struct uverbs_attr_bundle *attrs) -{ - struct ib_uobject *uobj = uverbs_attr_get_uobject( - attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE); - struct ib_device *ib_dev = attrs->context->device; - int ret; - struct ib_flow_action *action; - struct ib_flow_action_esp_attr esp_attr = {}; - - if (!ib_dev->ops.create_flow_action_esp) - return -EOPNOTSUPP; - - ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false); - if (ret) - return ret; - - /* No need to check as this attribute is marked as MANDATORY */ - action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr, - attrs); - if (IS_ERR(action)) - return PTR_ERR(action); - - uverbs_flow_action_fill_action(action, uobj, ib_dev, - IB_FLOW_ACTION_ESP); - - return 0; -} - -static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)( - struct uverbs_attr_bundle *attrs) -{ - struct ib_uobject *uobj = uverbs_attr_get_uobject( - attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE); - struct ib_flow_action *action = uobj->object; - int ret; - struct ib_flow_action_esp_attr esp_attr = {}; - - if (!action->device->ops.modify_flow_action_esp) - return -EOPNOTSUPP; - - ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true); - if (ret) - return ret; - - if (action->type != IB_FLOW_ACTION_ESP) - return -EINVAL; - - return action->device->ops.modify_flow_action_esp(action, - &esp_attr.hdr, - attrs); -} - -static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { - [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = { - .type = UVERBS_ATTR_TYPE_PTR_IN, - UVERBS_ATTR_STRUCT( - struct ib_uverbs_flow_action_esp_keymat_aes_gcm, - aes_key), - }, -}; - -static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = { - [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = { - .type = UVERBS_ATTR_TYPE_PTR_IN, - UVERBS_ATTR_NO_DATA(), - }, - [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = { - .type = UVERBS_ATTR_TYPE_PTR_IN, - UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, - size), - }, -}; - -DECLARE_UVERBS_NAMED_METHOD( - UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, - UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE, - UVERBS_OBJECT_FLOW_ACTION, - UVERBS_ACCESS_NEW, - UA_MANDATORY), - UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, - UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, - hard_limit_pkts), - UA_MANDATORY), - UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, - UVERBS_ATTR_TYPE(__u32), - UA_OPTIONAL), - UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, - uverbs_flow_action_esp_keymat, - UA_MANDATORY), - UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, - uverbs_flow_action_esp_replay, - UA_OPTIONAL), - UVERBS_ATTR_PTR_IN( - UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, - UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap), - UA_OPTIONAL)); - -DECLARE_UVERBS_NAMED_METHOD( - UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY, - UVERBS_ATTR_IDR(UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE, - UVERBS_OBJECT_FLOW_ACTION, - UVERBS_ACCESS_WRITE, - UA_MANDATORY), - UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, - UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp, - hard_limit_pkts), - UA_OPTIONAL), - UVERBS_ATTR_PTR_IN(UVERBS_ATTR_FLOW_ACTION_ESP_ESN, - UVERBS_ATTR_TYPE(__u32), - UA_OPTIONAL), - UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, - uverbs_flow_action_esp_keymat, - UA_OPTIONAL), - UVERBS_ATTR_ENUM_IN(UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, - uverbs_flow_action_esp_replay, - UA_OPTIONAL), - UVERBS_ATTR_PTR_IN( - UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, - UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_encap), - UA_OPTIONAL)); - DECLARE_UVERBS_NAMED_METHOD_DESTROY( UVERBS_METHOD_FLOW_ACTION_DESTROY, UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, @@ -435,9 +56,7 @@ DECLARE_UVERBS_NAMED_METHOD_DESTROY( DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_FLOW_ACTION, UVERBS_TYPE_ALLOC_IDR(uverbs_free_flow_action), - &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE), - &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY), - &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)); + &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY)); const struct uapi_definition uverbs_def_obj_flow_action[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED( diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 69d883f7fb41..11ee4eaf84bd 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2497,15 +2497,7 @@ struct ib_device_ops { struct ib_flow_attr *flow_attr, struct ib_udata *udata); int (*destroy_flow)(struct ib_flow *flow_id); - struct ib_flow_action *(*create_flow_action_esp)( - struct ib_device *device, - const struct ib_flow_action_attrs_esp *attr, - struct uverbs_attr_bundle *attrs); int (*destroy_flow_action)(struct ib_flow_action *action); - int (*modify_flow_action_esp)( - struct ib_flow_action *action, - const struct ib_flow_action_attrs_esp *attr, - struct uverbs_attr_bundle *attrs); int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port, int state); int (*get_vf_config)(struct ib_device *device, int vf, u32 port, -- cgit v1.2.3 From 7e4e849121398ac4f7c7c2cf1d878c2ca6f62929 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:44 +0300 Subject: net/mlx5: Remove ipsec vs. ipsec offload file separation The IPsec won't be initialized at all if device doesn't support IPsec offload. It means that we can combine the ipsec.c and ipsec_offload.c files to one file. Such change will allow us to remove ipsec_ops indirection. Link: https://lore.kernel.org/r/d0ac1fb7b14c10ae20a21ae17a393ee860c72ac3.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.c | 174 --------------------- .../net/ethernet/mellanox/mlx5/core/accel/ipsec.h | 96 ------------ .../mellanox/mlx5/core/accel/ipsec_offload.c | 145 ++++++++++++++++- .../mellanox/mlx5/core/accel/ipsec_offload.h | 61 +++++++- .../net/ethernet/mellanox/mlx5/core/en/params.c | 2 +- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 2 +- .../mellanox/mlx5/core/en_accel/ipsec_stats.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 11 files changed, 205 insertions(+), 285 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index e50361656305..ad852703a3cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -89,7 +89,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # Accelerations & FPGA # mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o -mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/ipsec.o +mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c deleted file mode 100644 index 387be13b2f1f..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include - -#include "accel/ipsec.h" -#include "mlx5_core.h" -#include "accel/ipsec_offload.h" - -void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops; - int err = 0; - - ipsec_ops = mlx5_ipsec_offload_ops(mdev); - if (!ipsec_ops || !ipsec_ops->init) { - mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); - return; - } - - err = ipsec_ops->init(mdev); - if (err) { - mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err); - return; - } - - mdev->ipsec_ops = ipsec_ops; -} - -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->cleanup) - return; - - ipsec_ops->cleanup(mdev); -} - -u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->device_caps) - return 0; - - return ipsec_ops->device_caps(mdev); -} -EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); - -unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->counters_count) - return -EOPNOTSUPP; - - return ipsec_ops->counters_count(mdev); -} - -int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->counters_read) - return -EOPNOTSUPP; - - return ipsec_ops->counters_read(mdev, counters, count); -} - -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - __be32 saddr[4] = {}, daddr[4] = {}; - - if (!ipsec_ops || !ipsec_ops->create_hw_context) - return ERR_PTR(-EOPNOTSUPP); - - if (!xfrm->attrs.is_ipv6) { - saddr[3] = xfrm->attrs.saddr.a4; - daddr[3] = xfrm->attrs.daddr.a4; - } else { - memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); - memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); - } - - return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi, - xfrm->attrs.is_ipv6, sa_handle); -} - -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->free_hw_context) - return; - - ipsec_ops->free_hw_context(context); -} - -struct mlx5_accel_esp_xfrm * -mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - struct mlx5_accel_esp_xfrm *xfrm; - - if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) - return ERR_PTR(-EOPNOTSUPP); - - xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, 0); - if (IS_ERR(xfrm)) - return xfrm; - - xfrm->mdev = mdev; - return xfrm; -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); - -void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) - return; - - ipsec_ops->esp_destroy_xfrm(xfrm); -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); - -int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) - return -EOPNOTSUPP; - - return ipsec_ops->esp_modify_xfrm(xfrm, attrs); -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h deleted file mode 100644 index fbb9c5415d53..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_ACCEL_IPSEC_H__ -#define __MLX5_ACCEL_IPSEC_H__ - -#include -#include - -#ifdef CONFIG_MLX5_ACCEL - -#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ - MLX5_ACCEL_IPSEC_CAP_DEVICE) - -unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); -int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count); - -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle); -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); - -void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); - -struct mlx5_accel_ipsec_ops { - u32 (*device_caps)(struct mlx5_core_dev *mdev); - unsigned int (*counters_count)(struct mlx5_core_dev *mdev); - int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); - void* (*create_hw_context)(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - const __be32 saddr[4], const __be32 daddr[4], - const __be32 spi, bool is_ipv6, u32 *sa_handle); - void (*free_hw_context)(void *context); - int (*init)(struct mlx5_core_dev *mdev); - void (*cleanup)(struct mlx5_core_dev *mdev); - struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags); - int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs); - void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); -}; - -#else - -#define MLX5_IPSEC_DEV(mdev) false - -static inline void * -mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle) -{ - return NULL; -} - -static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {} - -static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} - -static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} - -#endif /* CONFIG_MLX5_ACCEL */ - -#endif /* __MLX5_ACCEL_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c index d6667d38e1de..3a85157f9f07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ +/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ #include "mlx5_core.h" -#include "ipsec_offload.h" +#include "accel/ipsec_offload.h" #include "lib/mlx5.h" #include "en_accel/ipsec_fs.h" @@ -376,10 +376,149 @@ static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm, }; -const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) +static const struct mlx5_accel_ipsec_ops * +mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { if (!mlx5_ipsec_offload_device_caps(mdev)) return NULL; return &ipsec_offload_ops; } + +void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops; + int err = 0; + + ipsec_ops = mlx5_ipsec_offload_ops(mdev); + if (!ipsec_ops || !ipsec_ops->init) { + mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); + return; + } + + err = ipsec_ops->init(mdev); + if (err) { + mlx5_core_warn_once( + mdev, "Failed to start IPsec device, err = %d\n", err); + return; + } + + mdev->ipsec_ops = ipsec_ops; +} + +void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->cleanup) + return; + + ipsec_ops->cleanup(mdev); +} + +u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->device_caps) + return 0; + + return ipsec_ops->device_caps(mdev); +} +EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); + +unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->counters_count) + return -EOPNOTSUPP; + + return ipsec_ops->counters_count(mdev); +} + +int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, + unsigned int count) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->counters_read) + return -EOPNOTSUPP; + + return ipsec_ops->counters_read(mdev, counters, count); +} + +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + u32 *sa_handle) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + __be32 saddr[4] = {}, daddr[4] = {}; + + if (!ipsec_ops || !ipsec_ops->create_hw_context) + return ERR_PTR(-EOPNOTSUPP); + + if (!xfrm->attrs.is_ipv6) { + saddr[3] = xfrm->attrs.saddr.a4; + daddr[3] = xfrm->attrs.daddr.a4; + } else { + memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); + memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); + } + + return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, + xfrm->attrs.spi, + xfrm->attrs.is_ipv6, sa_handle); +} + +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->free_hw_context) + return; + + ipsec_ops->free_hw_context(context); +} + +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + struct mlx5_accel_esp_xfrm *xfrm; + + if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) + return ERR_PTR(-EOPNOTSUPP); + + xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, 0); + if (IS_ERR(xfrm)) + return xfrm; + + xfrm->mdev = mdev; + return xfrm; +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); + +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) + return; + + ipsec_ops->esp_destroy_xfrm(xfrm); +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); + +int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) + return -EOPNOTSUPP; + + return ipsec_ops->esp_modify_xfrm(xfrm, attrs); +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h index 970c66d19c1d..4a7d49ed5604 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h @@ -5,11 +5,46 @@ #define __MLX5_IPSEC_OFFLOAD_H__ #include -#include "accel/ipsec.h" +#include #ifdef CONFIG_MLX5_IPSEC -const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev); +#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ + MLX5_ACCEL_IPSEC_CAP_DEVICE) + +unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); +int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, + unsigned int count); + +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + u32 *sa_handle); +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); + +void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); +void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); + +struct mlx5_accel_ipsec_ops { + u32 (*device_caps)(struct mlx5_core_dev *mdev); + unsigned int (*counters_count)(struct mlx5_core_dev *mdev); + int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, + unsigned int count); + void *(*create_hw_context)(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], const __be32 daddr[4], + const __be32 spi, bool is_ipv6, + u32 *sa_handle); + void (*free_hw_context)(void *context); + int (*init)(struct mlx5_core_dev *mdev); + void (*cleanup)(struct mlx5_core_dev *mdev); + struct mlx5_accel_esp_xfrm *(*esp_create_xfrm)( + struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags); + int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); + void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); +}; + static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) { if (!MLX5_CAP_GEN(mdev, ipsec_offload)) @@ -25,10 +60,26 @@ static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) && MLX5_CAP_ETH(mdev, insert_trailer); } - #else -static inline const struct mlx5_accel_ipsec_ops * -mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; } + +#define MLX5_IPSEC_DEV(mdev) false + +static inline void * +mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + u32 *sa_handle) +{ + return NULL; +} + +static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, + void *context) +{ +} + +static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} + +static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index ebb12817b795..d2ec0961fe9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -5,7 +5,7 @@ #include "en/txrx.h" #include "en/port.h" #include "en_accel/en_accel.h" -#include "accel/ipsec.h" +#include "accel/ipsec_offload.h" static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index ee50052cbcb8..eccc13b1338f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -40,7 +40,7 @@ #include #include -#include "accel/ipsec.h" +#include "accel/ipsec_offload.h" #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 80886290fd22..87c42df3ee20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -35,7 +35,7 @@ #include #include "en.h" -#include "accel/ipsec.h" +#include "accel/ipsec_offload.h" #include "fpga/sdk.h" #include "en_accel/ipsec.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 83365d04050b..346f7034fec8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -48,7 +48,7 @@ #include "en_accel/ipsec.h" #include "en_accel/en_accel.h" #include "en_accel/ktls.h" -#include "accel/ipsec.h" +#include "accel/ipsec_offload.h" #include "lib/vxlan.h" #include "lib/clock.h" #include "en/port.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index a180c80e9f68..f85eefaad6ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -48,7 +48,7 @@ #include "en_rep.h" #include "en/rep/tc.h" #include "ipoib/ipoib.h" -#include "accel/ipsec.h" +#include "accel/ipsec_offload.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/ktls_txrx.h" #include "en/xdp.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 387602bbfecc..8fcbb1032b07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -62,7 +62,7 @@ #include "lib/mlx5.h" #include "lib/tout.h" #include "fpga/core.h" -#include "accel/ipsec.h" +#include "accel/ipsec_offload.h" #include "lib/clock.h" #include "lib/vxlan.h" #include "lib/geneve.h" -- cgit v1.2.3 From 5a985aa3c922b680cc5bdfb347fee42961dc0e51 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:45 +0300 Subject: net/mlx5: Remove useless IPsec device checks The mlx5_is_ipsec_device() check was to distinguish ConnectX device related ops from FPGA, so post removing FPGA IPsec code this check can be removed as no other device implements it. It is safe to do it as there is already embedded check of IPsec device in mlx5_accel_ipsec_device_caps(). Link: https://lore.kernel.org/r/e45362abfcabe18e8af20ec8d1acdc99355978f3.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 10 +----- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 3 -- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 42 ++++++++++------------ 3 files changed, 20 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index f6e3b549424f..1391a0c84f16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -286,9 +286,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv, struct mlx5e_ipsec_sa_entry *sa_entry) { - if (!mlx5_is_ipsec_device(priv->mdev)) - return 0; - return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs, sa_entry->ipsec_obj_id, &sa_entry->ipsec_rule); @@ -297,9 +294,6 @@ static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv, static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv, struct mlx5e_ipsec_sa_entry *sa_entry) { - if (!mlx5_is_ipsec_device(priv->mdev)) - return; - mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, &sa_entry->ipsec_rule); } @@ -550,9 +544,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) return; } - if (mlx5_is_ipsec_device(mdev)) - netdev->gso_partial_features |= NETIF_F_GSO_ESP; - + netdev->gso_partial_features |= NETIF_F_GSO_ESP; mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n"); netdev->features |= NETIF_F_GSO_ESP; netdev->hw_features |= NETIF_F_GSO_ESP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 17da23dff0ed..32093497292f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -700,9 +700,6 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { int err; - if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec) - return -EOPNOTSUPP; - err = fs_init_tx(priv); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 8e0cf5e65100..08a6dd4b7662 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -202,16 +202,14 @@ static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv, ipsec_st->x = x; ipsec_st->xo = xo; - if (mlx5_is_ipsec_device(priv->mdev)) { - aead = x->data; - alen = crypto_aead_authsize(aead); - blksize = ALIGN(crypto_aead_blocksize(aead), 4); - clen = ALIGN(skb->len + 2, blksize); - plen = max_t(u32, clen - skb->len, 4); - tailen = plen + alen; - ipsec_st->plen = plen; - ipsec_st->tailen = tailen; - } + aead = x->data; + alen = crypto_aead_authsize(aead); + blksize = ALIGN(crypto_aead_blocksize(aead), 4); + clen = ALIGN(skb->len + 2, blksize); + plen = max_t(u32, clen - skb->len, 4); + tailen = plen + alen; + ipsec_st->plen = plen; + ipsec_st->tailen = tailen; return 0; } @@ -244,19 +242,17 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb, ((struct iphdr *)skb_network_header(skb))->protocol : ((struct ipv6hdr *)skb_network_header(skb))->nexthdr; - if (mlx5_is_ipsec_device(priv->mdev)) { - eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); - eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER); - encap = x->encap; - if (!encap) { - eseg->trailer |= (l3_proto == IPPROTO_ESP) ? - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) : - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC); - } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) { - eseg->trailer |= (l3_proto == IPPROTO_ESP) ? - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) : - cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC); - } + eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC); + eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER); + encap = x->encap; + if (!encap) { + eseg->trailer |= (l3_proto == IPPROTO_ESP) ? + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) : + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC); + } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) { + eseg->trailer |= (l3_proto == IPPROTO_ESP) ? + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) : + cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC); } } -- cgit v1.2.3 From 2451da081a343e079d9f5a7b063fcdf0bc439aa8 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:46 +0300 Subject: net/mlx5: Unify device IPsec capabilities check Merge two different function to one in order to provide coherent picture if the device is IPsec capable or not. Link: https://lore.kernel.org/r/8f10ea06ad19c6f651e9fb33921009658f01e1d5.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- .../mellanox/mlx5/core/accel/ipsec_offload.c | 38 +++++++++++----------- .../mellanox/mlx5/core/accel/ipsec_offload.h | 26 --------------- .../net/ethernet/mellanox/mlx5/core/en/params.c | 4 +-- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 12 +++---- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- include/linux/mlx5/accel.h | 7 ++-- 6 files changed, 32 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c index 3a85157f9f07..9dbebef19ff0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c @@ -6,9 +6,6 @@ #include "lib/mlx5.h" #include "en_accel/ipsec_fs.h" -#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \ - MLX5_ACCEL_IPSEC_CAP_LSO) - struct mlx5_ipsec_sa_ctx { struct rhash_head hash; u32 enc_key_id; @@ -25,17 +22,31 @@ struct mlx5_ipsec_esp_xfrm { struct mlx5_accel_esp_xfrm accel_xfrm; }; -static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev) +u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) { - u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS; + u32 caps; + + if (!MLX5_CAP_GEN(mdev, ipsec_offload)) + return 0; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return 0; + + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) + return 0; - if (!mlx5_is_ipsec_device(mdev)) + if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) || + !MLX5_CAP_ETH(mdev, insert_trailer)) return 0; if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) || !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt)) return 0; + caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | + MLX5_ACCEL_IPSEC_CAP_LSO; + if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) caps |= MLX5_ACCEL_IPSEC_CAP_ESP; @@ -52,6 +63,7 @@ static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev) WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24); return caps; } +EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); static int mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, @@ -367,7 +379,6 @@ change_sw_xfrm_attrs: } static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { - .device_caps = mlx5_ipsec_offload_device_caps, .create_hw_context = mlx5_ipsec_offload_create_sa_ctx, .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx, .init = mlx5_ipsec_offload_init, @@ -379,7 +390,7 @@ static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { static const struct mlx5_accel_ipsec_ops * mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { - if (!mlx5_ipsec_offload_device_caps(mdev)) + if (!mlx5_ipsec_device_caps(mdev)) return NULL; return &ipsec_offload_ops; @@ -416,17 +427,6 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) ipsec_ops->cleanup(mdev); } -u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->device_caps) - return 0; - - return ipsec_ops->device_caps(mdev); -} -EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); - unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) { const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h index 4a7d49ed5604..3d13e2c136b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h @@ -9,9 +9,6 @@ #ifdef CONFIG_MLX5_IPSEC -#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ - MLX5_ACCEL_IPSEC_CAP_DEVICE) - unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); @@ -25,7 +22,6 @@ void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); struct mlx5_accel_ipsec_ops { - u32 (*device_caps)(struct mlx5_core_dev *mdev); unsigned int (*counters_count)(struct mlx5_core_dev *mdev); int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); @@ -45,25 +41,8 @@ struct mlx5_accel_ipsec_ops { void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); }; -static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) -{ - if (!MLX5_CAP_GEN(mdev, ipsec_offload)) - return false; - - if (!MLX5_CAP_GEN(mdev, log_max_dek)) - return false; - - if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) - return false; - - return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) && - MLX5_CAP_ETH(mdev, insert_trailer); -} #else -#define MLX5_IPSEC_DEV(mdev) false - static inline void * mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, struct mlx5_accel_esp_xfrm *xfrm, @@ -80,10 +59,5 @@ static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} -static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) -{ - return false; -} - #endif /* CONFIG_MLX5_IPSEC */ #endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index d2ec0961fe9e..9f4ae8bc09b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -689,8 +689,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev, void *wq = MLX5_ADDR_OF(sqc, sqc, wq); bool allow_swp; - allow_swp = mlx5_geneve_tx_allowed(mdev) || - !!MLX5_IPSEC_DEV(mdev); + allow_swp = + mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); mlx5e_build_sq_param_common(mdev, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); MLX5_SET(sqc, sqc, allow_swp, allow_swp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 1391a0c84f16..c280a18ff002 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -226,8 +226,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return -EINVAL; } if (x->props.flags & XFRM_STATE_ESN && - !(mlx5_accel_ipsec_device_caps(priv->mdev) & - MLX5_ACCEL_IPSEC_CAP_ESN)) { + !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) { netdev_info(netdev, "Cannot offload ESN xfrm states\n"); return -EINVAL; } @@ -275,8 +274,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return -EINVAL; } if (x->props.family == AF_INET6 && - !(mlx5_accel_ipsec_device_caps(priv->mdev) & - MLX5_ACCEL_IPSEC_CAP_IPV6)) { + !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) { netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n"); return -EINVAL; } @@ -406,7 +404,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) { struct mlx5e_ipsec *ipsec = NULL; - if (!MLX5_IPSEC_DEV(priv->mdev)) { + if (!mlx5_ipsec_device_caps(priv->mdev)) { netdev_dbg(priv->netdev, "Not an IPSec offload device\n"); return 0; } @@ -519,7 +517,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; struct net_device *netdev = priv->netdev; - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || + if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || !MLX5_CAP_ETH(mdev, swp)) { mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n"); return; @@ -538,7 +536,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) netdev->features |= NETIF_F_HW_ESP_TX_CSUM; netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; - if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || + if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || !MLX5_CAP_ETH(mdev, swp_lso)) { mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 346f7034fec8..6a3a08fd8910 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1329,7 +1329,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); - if (MLX5_IPSEC_DEV(c->priv->mdev)) + if (mlx5_ipsec_device_caps(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (param->is_mpw) set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index af67d51308cf..9145e2d37c0e 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -124,7 +124,7 @@ enum mlx5_accel_ipsec_cap { #ifdef CONFIG_MLX5_ACCEL -u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); +u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev); struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, @@ -135,7 +135,10 @@ int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, #else -static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } +static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) +{ + return 0; +} static inline struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, -- cgit v1.2.3 From a6a9eaf14222434ba1bdfcdaa2c5a27dbd126972 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:47 +0300 Subject: net/mlx5: Align flow steering allocation namespace to common style Flow steering is a low level internal driver API, as such it relies on the callers to check if namespace is supported and not rely on some compilation flag. Link: https://lore.kernel.org/r/cfb411a8a9ed2a1471810af254bdc0f03469f79c.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 2 -- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 6 ------ 2 files changed, 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a0ac17c3f12f..33e9f86cf7d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -878,9 +878,7 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, table_type = FS_FT_NIC_RX; break; case MLX5_FLOW_NAMESPACE_EGRESS: -#ifdef CONFIG_MLX5_IPSEC case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL: -#endif max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); table_type = FS_FT_NIC_TX; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fe7bdccea301..297e6a468a3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -186,24 +186,18 @@ static struct init_tree_node { static struct init_tree_node egress_root_fs = { .type = FS_TYPE_NAMESPACE, -#ifdef CONFIG_MLX5_IPSEC .ar_size = 2, -#else - .ar_size = 1, -#endif .children = (struct init_tree_node[]) { ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, FS_CHAINING_CAPS_EGRESS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), -#ifdef CONFIG_MLX5_IPSEC ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0, FS_CHAINING_CAPS_EGRESS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS, KERNEL_TX_IPSEC_NUM_LEVELS))), -#endif } }; -- cgit v1.2.3 From 54deb0e77561973f4ca4515e18ab972c281eea1d Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:48 +0300 Subject: net/mlx5: Remove not-needed IPsec config In current code, the CONFIG_MLX5_IPSEC and CONFIG_MLX5_EN_IPSEC are the same. So remove useless indirection. Link: https://lore.kernel.org/r/fd14492cbc01a0d51a5bfedde02bcd2154123fde.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 16 +--------------- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 4 ++-- .../ethernet/mellanox/mlx5/core/accel/ipsec_offload.h | 18 ++---------------- include/linux/mlx5/accel.h | 4 ++-- include/linux/mlx5/driver.h | 2 +- 5 files changed, 8 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index e34e64a9ff4a..176883cf2827 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -142,28 +142,14 @@ config MLX5_CORE_IPOIB help MLX5 IPoIB offloads & acceleration support. -config MLX5_IPSEC +config MLX5_EN_IPSEC bool "Mellanox Technologies IPsec Connect-X support" depends on MLX5_CORE_EN depends on XFRM_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD select MLX5_ACCEL - help - Build IPsec support for the Connect-X family of network cards by Mellanox - Technologies. - Note: If you select this option, the mlx5_core driver will include - IPsec support for the Connect-X family. - -config MLX5_EN_IPSEC - bool "IPSec XFRM cryptography-offload acceleration" - depends on MLX5_CORE_EN - depends on XFRM_OFFLOAD - depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - depends on MLX5_IPSEC help Build support for IPsec cryptography-offload acceleration in the NIC. - Note: Support for hardware with this capability needs to be selected - for this option to become available. config MLX5_EN_TLS bool "Mellanox Technologies TLS Connect-X support" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index ad852703a3cb..9e715a1056f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -88,13 +88,13 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # # Accelerations & FPGA # -mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ - en_accel/ipsec_stats.o en_accel/ipsec_fs.o + en_accel/ipsec_stats.o en_accel/ipsec_fs.o \ + accel/ipsec_offload.o mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h index 3d13e2c136b1..36e700b596d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h @@ -7,7 +7,7 @@ #include #include -#ifdef CONFIG_MLX5_IPSEC +#ifdef CONFIG_MLX5_EN_IPSEC unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, @@ -42,22 +42,8 @@ struct mlx5_accel_ipsec_ops { }; #else - -static inline void * -mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle) -{ - return NULL; -} - -static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, - void *context) -{ -} - static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} -#endif /* CONFIG_MLX5_IPSEC */ +#endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 9145e2d37c0e..73e4d50a9f02 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -122,7 +122,7 @@ enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, }; -#ifdef CONFIG_MLX5_ACCEL +#ifdef CONFIG_MLX5_EN_IPSEC u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev); @@ -152,5 +152,5 @@ static inline int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } -#endif /* CONFIG_MLX5_ACCEL */ +#endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* __MLX5_ACCEL_H__ */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9424503eb8d3..5af53c035949 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -778,7 +778,7 @@ struct mlx5_core_dev { #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; #endif -#ifdef CONFIG_MLX5_ACCEL +#ifdef CONFIG_MLX5_EN_IPSEC const struct mlx5_accel_ipsec_ops *ipsec_ops; #endif struct mlx5_clock clock; -- cgit v1.2.3 From 16fe5a1c5c074a836626e3bd9560d3c4a39a3fcf Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:49 +0300 Subject: net/mlx5: Move IPsec file to relevant directory IPsec is part of ethernet side of mlx5 driver and needs to be placed in en_accel folder. Link: https://lore.kernel.org/r/a0ca88f4d9c602c574106c0de0511803e7dcbdff.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../mellanox/mlx5/core/accel/ipsec_offload.c | 524 --------------------- .../mellanox/mlx5/core/accel/ipsec_offload.h | 49 -- .../net/ethernet/mellanox/mlx5/core/en/params.c | 2 +- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 2 +- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 2 +- .../mellanox/mlx5/core/en_accel/ipsec_fs.h | 2 +- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 524 +++++++++++++++++++++ .../mellanox/mlx5/core/en_accel/ipsec_offload.h | 49 ++ .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 2 +- .../mellanox/mlx5/core/en_accel/ipsec_stats.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 14 files changed, 583 insertions(+), 583 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9e715a1056f8..f7aafbfcdb61 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -94,7 +94,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o en_accel/ipsec_fs.o \ - accel/ipsec_offload.o + en_accel/ipsec_offload.o mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c deleted file mode 100644 index 9dbebef19ff0..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c +++ /dev/null @@ -1,524 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ - -#include "mlx5_core.h" -#include "accel/ipsec_offload.h" -#include "lib/mlx5.h" -#include "en_accel/ipsec_fs.h" - -struct mlx5_ipsec_sa_ctx { - struct rhash_head hash; - u32 enc_key_id; - u32 ipsec_obj_id; - /* hw ctx */ - struct mlx5_core_dev *dev; - struct mlx5_ipsec_esp_xfrm *mxfrm; -}; - -struct mlx5_ipsec_esp_xfrm { - /* reference counter of SA ctx */ - struct mlx5_ipsec_sa_ctx *sa_ctx; - struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */ - struct mlx5_accel_esp_xfrm accel_xfrm; -}; - -u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - u32 caps; - - if (!MLX5_CAP_GEN(mdev, ipsec_offload)) - return 0; - - if (!MLX5_CAP_GEN(mdev, log_max_dek)) - return 0; - - if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & - MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) - return 0; - - if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) || - !MLX5_CAP_ETH(mdev, insert_trailer)) - return 0; - - if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) || - !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt)) - return 0; - - caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | - MLX5_ACCEL_IPSEC_CAP_LSO; - - if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && - MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) - caps |= MLX5_ACCEL_IPSEC_CAP_ESP; - - if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) { - caps |= MLX5_ACCEL_IPSEC_CAP_ESN; - caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; - } - - /* We can accommodate up to 2^24 different IPsec objects - * because we use up to 24 bit in flow table metadata - * to hold the IPsec Object unique handle. - */ - WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24); - return caps; -} -EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); - -static int -mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { - mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n", - attrs->replay_type); - return -EOPNOTSUPP; - } - - if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { - mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n", - attrs->keymat_type); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.iv_algo != - MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { - mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n", - attrs->keymat.aes_gcm.iv_algo); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.key_len != 128 && - attrs->keymat.aes_gcm.key_len != 256) { - mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n", - attrs->keymat.aes_gcm.key_len); - return -EOPNOTSUPP; - } - - if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && - !MLX5_CAP_IPSEC(mdev, ipsec_esn)) { - mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n"); - return -EOPNOTSUPP; - } - - return 0; -} - -static struct mlx5_accel_esp_xfrm * -mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags) -{ - struct mlx5_ipsec_esp_xfrm *mxfrm; - int err = 0; - - err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs); - if (err) - return ERR_PTR(err); - - mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL); - if (!mxfrm) - return ERR_PTR(-ENOMEM); - - mutex_init(&mxfrm->lock); - memcpy(&mxfrm->accel_xfrm.attrs, attrs, - sizeof(mxfrm->accel_xfrm.attrs)); - - return &mxfrm->accel_xfrm; -} - -static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) -{ - struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, - accel_xfrm); - - /* assuming no sa_ctx are connected to this xfrm_ctx */ - WARN_ON(mxfrm->sa_ctx); - kfree(mxfrm); -} - -struct mlx5_ipsec_obj_attrs { - const struct aes_gcm_keymat *aes_gcm; - u32 accel_flags; - u32 esn_msb; - u32 enc_key_id; -}; - -static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev, - struct mlx5_ipsec_obj_attrs *attrs, - u32 *ipsec_id) -{ - const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm; - u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; - u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {}; - void *obj, *salt_p, *salt_iv_p; - int err; - - obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object); - - /* salt and seq_iv */ - salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt); - memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt)); - - switch (aes_gcm->icv_len) { - case 64: - MLX5_SET(ipsec_obj, obj, icv_length, - MLX5_IPSEC_OBJECT_ICV_LEN_8B); - break; - case 96: - MLX5_SET(ipsec_obj, obj, icv_length, - MLX5_IPSEC_OBJECT_ICV_LEN_12B); - break; - case 128: - MLX5_SET(ipsec_obj, obj, icv_length, - MLX5_IPSEC_OBJECT_ICV_LEN_16B); - break; - default: - return -EINVAL; - } - salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv); - memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv)); - /* esn */ - if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { - MLX5_SET(ipsec_obj, obj, esn_en, 1); - MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb); - if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) - MLX5_SET(ipsec_obj, obj, esn_overlap, 1); - } - - MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id); - - /* general object fields set */ - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, - MLX5_CMD_OP_CREATE_GENERAL_OBJECT); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, - MLX5_GENERAL_OBJECT_TYPES_IPSEC); - - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); - if (!err) - *ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); - - return err; -} - -static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id) -{ - u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; - u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; - - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, - MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, - MLX5_GENERAL_OBJECT_TYPES_IPSEC); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id); - - mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); -} - -static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *accel_xfrm, - const __be32 saddr[4], const __be32 daddr[4], - const __be32 spi, bool is_ipv6, u32 *hw_handle) -{ - struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs; - struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; - struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; - struct mlx5_ipsec_esp_xfrm *mxfrm; - struct mlx5_ipsec_sa_ctx *sa_ctx; - int err; - - /* alloc SA context */ - sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); - if (!sa_ctx) - return ERR_PTR(-ENOMEM); - - sa_ctx->dev = mdev; - - mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); - mutex_lock(&mxfrm->lock); - sa_ctx->mxfrm = mxfrm; - - /* key */ - err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key, - aes_gcm->key_len / BITS_PER_BYTE, - MLX5_ACCEL_OBJ_IPSEC_KEY, - &sa_ctx->enc_key_id); - if (err) { - mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err); - goto err_sa_ctx; - } - - ipsec_attrs.aes_gcm = aes_gcm; - ipsec_attrs.accel_flags = accel_xfrm->attrs.flags; - ipsec_attrs.esn_msb = accel_xfrm->attrs.esn; - ipsec_attrs.enc_key_id = sa_ctx->enc_key_id; - err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs, - &sa_ctx->ipsec_obj_id); - if (err) { - mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err); - goto err_enc_key; - } - - *hw_handle = sa_ctx->ipsec_obj_id; - mxfrm->sa_ctx = sa_ctx; - mutex_unlock(&mxfrm->lock); - - return sa_ctx; - -err_enc_key: - mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id); -err_sa_ctx: - mutex_unlock(&mxfrm->lock); - kfree(sa_ctx); - return ERR_PTR(err); -} - -static void mlx5_ipsec_offload_delete_sa_ctx(void *context) -{ - struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context; - struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm; - - mutex_lock(&mxfrm->lock); - mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id); - mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id); - kfree(sa_ctx); - mxfrm->sa_ctx = NULL; - mutex_unlock(&mxfrm->lock); -} - -static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev) -{ - return 0; -} - -static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, - struct mlx5_ipsec_obj_attrs *attrs, - u32 ipsec_id) -{ - u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {}; - u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)]; - u64 modify_field_select = 0; - u64 general_obj_types; - void *obj; - int err; - - if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED)) - return 0; - - general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types); - if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) - return -EINVAL; - - /* general object fields set */ - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id); - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); - if (err) { - mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n", - ipsec_id, err); - return err; - } - - obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object); - modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select); - - /* esn */ - if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) || - !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB)) - return -EOPNOTSUPP; - - obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object); - MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb); - if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) - MLX5_SET(ipsec_obj, obj, esn_overlap, 1); - - /* general object fields set */ - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); - - return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); -} - -static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; - struct mlx5_core_dev *mdev = xfrm->mdev; - struct mlx5_ipsec_esp_xfrm *mxfrm; - - int err = 0; - - if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) - return 0; - - if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs)) - return -EOPNOTSUPP; - - mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); - - mutex_lock(&mxfrm->lock); - - if (!mxfrm->sa_ctx) - /* Not bound xfrm, change only sw attrs */ - goto change_sw_xfrm_attrs; - - /* need to add find and replace in ipsec_rhash_sa the sa_ctx */ - /* modify device with new hw_sa */ - ipsec_attrs.accel_flags = attrs->flags; - ipsec_attrs.esn_msb = attrs->esn; - err = mlx5_modify_ipsec_obj(mdev, - &ipsec_attrs, - mxfrm->sa_ctx->ipsec_obj_id); - -change_sw_xfrm_attrs: - if (!err) - memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); - - mutex_unlock(&mxfrm->lock); - return err; -} - -static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { - .create_hw_context = mlx5_ipsec_offload_create_sa_ctx, - .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx, - .init = mlx5_ipsec_offload_init, - .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm, - .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm, - .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm, -}; - -static const struct mlx5_accel_ipsec_ops * -mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) -{ - if (!mlx5_ipsec_device_caps(mdev)) - return NULL; - - return &ipsec_offload_ops; -} - -void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops; - int err = 0; - - ipsec_ops = mlx5_ipsec_offload_ops(mdev); - if (!ipsec_ops || !ipsec_ops->init) { - mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); - return; - } - - err = ipsec_ops->init(mdev); - if (err) { - mlx5_core_warn_once( - mdev, "Failed to start IPsec device, err = %d\n", err); - return; - } - - mdev->ipsec_ops = ipsec_ops; -} - -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->cleanup) - return; - - ipsec_ops->cleanup(mdev); -} - -unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->counters_count) - return -EOPNOTSUPP; - - return ipsec_ops->counters_count(mdev); -} - -int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->counters_read) - return -EOPNOTSUPP; - - return ipsec_ops->counters_read(mdev, counters, count); -} - -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - __be32 saddr[4] = {}, daddr[4] = {}; - - if (!ipsec_ops || !ipsec_ops->create_hw_context) - return ERR_PTR(-EOPNOTSUPP); - - if (!xfrm->attrs.is_ipv6) { - saddr[3] = xfrm->attrs.saddr.a4; - daddr[3] = xfrm->attrs.daddr.a4; - } else { - memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); - memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); - } - - return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, - xfrm->attrs.spi, - xfrm->attrs.is_ipv6, sa_handle); -} - -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->free_hw_context) - return; - - ipsec_ops->free_hw_context(context); -} - -struct mlx5_accel_esp_xfrm * -mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - struct mlx5_accel_esp_xfrm *xfrm; - - if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) - return ERR_PTR(-EOPNOTSUPP); - - xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, 0); - if (IS_ERR(xfrm)) - return xfrm; - - xfrm->mdev = mdev; - return xfrm; -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); - -void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) - return; - - ipsec_ops->esp_destroy_xfrm(xfrm); -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); - -int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) - return -EOPNOTSUPP; - - return ipsec_ops->esp_modify_xfrm(xfrm, attrs); -} -EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h deleted file mode 100644 index 36e700b596d8..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ - -#ifndef __MLX5_IPSEC_OFFLOAD_H__ -#define __MLX5_IPSEC_OFFLOAD_H__ - -#include -#include - -#ifdef CONFIG_MLX5_EN_IPSEC - -unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); -int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count); - -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle); -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); - -void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); - -struct mlx5_accel_ipsec_ops { - unsigned int (*counters_count)(struct mlx5_core_dev *mdev); - int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count); - void *(*create_hw_context)(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - const __be32 saddr[4], const __be32 daddr[4], - const __be32 spi, bool is_ipv6, - u32 *sa_handle); - void (*free_hw_context)(void *context); - int (*init)(struct mlx5_core_dev *mdev); - void (*cleanup)(struct mlx5_core_dev *mdev); - struct mlx5_accel_esp_xfrm *(*esp_create_xfrm)( - struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags); - int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs); - void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); -}; - -#else -static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} - -static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} -#endif /* CONFIG_MLX5_EN_IPSEC */ -#endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 9f4ae8bc09b9..1e8700957280 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -5,7 +5,7 @@ #include "en/txrx.h" #include "en/port.h" #include "en_accel/en_accel.h" -#include "accel/ipsec_offload.h" +#include "en_accel/ipsec_offload.h" static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index eccc13b1338f..a0e9dade09e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -40,7 +40,7 @@ #include #include -#include "accel/ipsec_offload.h" +#include "ipsec_offload.h" #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 32093497292f..66b529e36ea1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -2,7 +2,7 @@ /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include -#include "accel/ipsec_offload.h" +#include "ipsec_offload.h" #include "ipsec_fs.h" #include "fs_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h index 3389b3bb3ef8..b3e23aa5beeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h @@ -6,7 +6,7 @@ #include "en.h" #include "ipsec.h" -#include "accel/ipsec_offload.h" +#include "ipsec_offload.h" #include "en/fs.h" #ifdef CONFIG_MLX5_EN_IPSEC diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c new file mode 100644 index 000000000000..7ae2d308139e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -0,0 +1,524 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ + +#include "mlx5_core.h" +#include "ipsec_offload.h" +#include "lib/mlx5.h" +#include "en_accel/ipsec_fs.h" + +struct mlx5_ipsec_sa_ctx { + struct rhash_head hash; + u32 enc_key_id; + u32 ipsec_obj_id; + /* hw ctx */ + struct mlx5_core_dev *dev; + struct mlx5_ipsec_esp_xfrm *mxfrm; +}; + +struct mlx5_ipsec_esp_xfrm { + /* reference counter of SA ctx */ + struct mlx5_ipsec_sa_ctx *sa_ctx; + struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */ + struct mlx5_accel_esp_xfrm accel_xfrm; +}; + +u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) +{ + u32 caps; + + if (!MLX5_CAP_GEN(mdev, ipsec_offload)) + return 0; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return 0; + + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) + return 0; + + if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) || + !MLX5_CAP_ETH(mdev, insert_trailer)) + return 0; + + if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) || + !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt)) + return 0; + + caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | + MLX5_ACCEL_IPSEC_CAP_LSO; + + if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && + MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) + caps |= MLX5_ACCEL_IPSEC_CAP_ESP; + + if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) { + caps |= MLX5_ACCEL_IPSEC_CAP_ESN; + caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; + } + + /* We can accommodate up to 2^24 different IPsec objects + * because we use up to 24 bit in flow table metadata + * to hold the IPsec Object unique handle. + */ + WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24); + return caps; +} +EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); + +static int +mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { + mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n", + attrs->replay_type); + return -EOPNOTSUPP; + } + + if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { + mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n", + attrs->keymat_type); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.iv_algo != + MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { + mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n", + attrs->keymat.aes_gcm.iv_algo); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.key_len != 128 && + attrs->keymat.aes_gcm.key_len != 256) { + mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n", + attrs->keymat.aes_gcm.key_len); + return -EOPNOTSUPP; + } + + if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && + !MLX5_CAP_IPSEC(mdev, ipsec_esn)) { + mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static struct mlx5_accel_esp_xfrm * +mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) +{ + struct mlx5_ipsec_esp_xfrm *mxfrm; + int err = 0; + + err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs); + if (err) + return ERR_PTR(err); + + mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL); + if (!mxfrm) + return ERR_PTR(-ENOMEM); + + mutex_init(&mxfrm->lock); + memcpy(&mxfrm->accel_xfrm.attrs, attrs, + sizeof(mxfrm->accel_xfrm.attrs)); + + return &mxfrm->accel_xfrm; +} + +static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, + accel_xfrm); + + /* assuming no sa_ctx are connected to this xfrm_ctx */ + WARN_ON(mxfrm->sa_ctx); + kfree(mxfrm); +} + +struct mlx5_ipsec_obj_attrs { + const struct aes_gcm_keymat *aes_gcm; + u32 accel_flags; + u32 esn_msb; + u32 enc_key_id; +}; + +static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_obj_attrs *attrs, + u32 *ipsec_id) +{ + const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {}; + void *obj, *salt_p, *salt_iv_p; + int err; + + obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object); + + /* salt and seq_iv */ + salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt); + memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt)); + + switch (aes_gcm->icv_len) { + case 64: + MLX5_SET(ipsec_obj, obj, icv_length, + MLX5_IPSEC_OBJECT_ICV_LEN_8B); + break; + case 96: + MLX5_SET(ipsec_obj, obj, icv_length, + MLX5_IPSEC_OBJECT_ICV_LEN_12B); + break; + case 128: + MLX5_SET(ipsec_obj, obj, icv_length, + MLX5_IPSEC_OBJECT_ICV_LEN_16B); + break; + default: + return -EINVAL; + } + salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv); + memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv)); + /* esn */ + if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { + MLX5_SET(ipsec_obj, obj, esn_en, 1); + MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb); + if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) + MLX5_SET(ipsec_obj, obj, esn_overlap, 1); + } + + MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id); + + /* general object fields set */ + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_GENERAL_OBJECT_TYPES_IPSEC); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (!err) + *ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return err; +} + +static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_GENERAL_OBJECT_TYPES_IPSEC); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id); + + mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], const __be32 daddr[4], + const __be32 spi, bool is_ipv6, u32 *hw_handle) +{ + struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs; + struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; + struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; + struct mlx5_ipsec_esp_xfrm *mxfrm; + struct mlx5_ipsec_sa_ctx *sa_ctx; + int err; + + /* alloc SA context */ + sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); + if (!sa_ctx) + return ERR_PTR(-ENOMEM); + + sa_ctx->dev = mdev; + + mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); + mutex_lock(&mxfrm->lock); + sa_ctx->mxfrm = mxfrm; + + /* key */ + err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key, + aes_gcm->key_len / BITS_PER_BYTE, + MLX5_ACCEL_OBJ_IPSEC_KEY, + &sa_ctx->enc_key_id); + if (err) { + mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err); + goto err_sa_ctx; + } + + ipsec_attrs.aes_gcm = aes_gcm; + ipsec_attrs.accel_flags = accel_xfrm->attrs.flags; + ipsec_attrs.esn_msb = accel_xfrm->attrs.esn; + ipsec_attrs.enc_key_id = sa_ctx->enc_key_id; + err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs, + &sa_ctx->ipsec_obj_id); + if (err) { + mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err); + goto err_enc_key; + } + + *hw_handle = sa_ctx->ipsec_obj_id; + mxfrm->sa_ctx = sa_ctx; + mutex_unlock(&mxfrm->lock); + + return sa_ctx; + +err_enc_key: + mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id); +err_sa_ctx: + mutex_unlock(&mxfrm->lock); + kfree(sa_ctx); + return ERR_PTR(err); +} + +static void mlx5_ipsec_offload_delete_sa_ctx(void *context) +{ + struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context; + struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm; + + mutex_lock(&mxfrm->lock); + mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id); + mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id); + kfree(sa_ctx); + mxfrm->sa_ctx = NULL; + mutex_unlock(&mxfrm->lock); +} + +static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev) +{ + return 0; +} + +static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_obj_attrs *attrs, + u32 ipsec_id) +{ + u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {}; + u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)]; + u64 modify_field_select = 0; + u64 general_obj_types; + void *obj; + int err; + + if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED)) + return 0; + + general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types); + if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) + return -EINVAL; + + /* general object fields set */ + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id); + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) { + mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n", + ipsec_id, err); + return err; + } + + obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object); + modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select); + + /* esn */ + if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) || + !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB)) + return -EOPNOTSUPP; + + obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object); + MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb); + if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) + MLX5_SET(ipsec_obj, obj, esn_overlap, 1); + + /* general object fields set */ + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; + struct mlx5_core_dev *mdev = xfrm->mdev; + struct mlx5_ipsec_esp_xfrm *mxfrm; + + int err = 0; + + if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) + return 0; + + if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs)) + return -EOPNOTSUPP; + + mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); + + mutex_lock(&mxfrm->lock); + + if (!mxfrm->sa_ctx) + /* Not bound xfrm, change only sw attrs */ + goto change_sw_xfrm_attrs; + + /* need to add find and replace in ipsec_rhash_sa the sa_ctx */ + /* modify device with new hw_sa */ + ipsec_attrs.accel_flags = attrs->flags; + ipsec_attrs.esn_msb = attrs->esn; + err = mlx5_modify_ipsec_obj(mdev, + &ipsec_attrs, + mxfrm->sa_ctx->ipsec_obj_id); + +change_sw_xfrm_attrs: + if (!err) + memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); + + mutex_unlock(&mxfrm->lock); + return err; +} + +static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { + .create_hw_context = mlx5_ipsec_offload_create_sa_ctx, + .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx, + .init = mlx5_ipsec_offload_init, + .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm, + .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm, + .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm, +}; + +static const struct mlx5_accel_ipsec_ops * +mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) +{ + if (!mlx5_ipsec_device_caps(mdev)) + return NULL; + + return &ipsec_offload_ops; +} + +void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops; + int err = 0; + + ipsec_ops = mlx5_ipsec_offload_ops(mdev); + if (!ipsec_ops || !ipsec_ops->init) { + mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); + return; + } + + err = ipsec_ops->init(mdev); + if (err) { + mlx5_core_warn_once( + mdev, "Failed to start IPsec device, err = %d\n", err); + return; + } + + mdev->ipsec_ops = ipsec_ops; +} + +void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->cleanup) + return; + + ipsec_ops->cleanup(mdev); +} + +unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->counters_count) + return -EOPNOTSUPP; + + return ipsec_ops->counters_count(mdev); +} + +int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, + unsigned int count) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->counters_read) + return -EOPNOTSUPP; + + return ipsec_ops->counters_read(mdev, counters, count); +} + +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + u32 *sa_handle) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + __be32 saddr[4] = {}, daddr[4] = {}; + + if (!ipsec_ops || !ipsec_ops->create_hw_context) + return ERR_PTR(-EOPNOTSUPP); + + if (!xfrm->attrs.is_ipv6) { + saddr[3] = xfrm->attrs.saddr.a4; + daddr[3] = xfrm->attrs.daddr.a4; + } else { + memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); + memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); + } + + return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, + xfrm->attrs.spi, + xfrm->attrs.is_ipv6, sa_handle); +} + +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->free_hw_context) + return; + + ipsec_ops->free_hw_context(context); +} + +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + struct mlx5_accel_esp_xfrm *xfrm; + + if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) + return ERR_PTR(-EOPNOTSUPP); + + xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, 0); + if (IS_ERR(xfrm)) + return xfrm; + + xfrm->mdev = mdev; + return xfrm; +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); + +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) + return; + + ipsec_ops->esp_destroy_xfrm(xfrm); +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); + +int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) + return -EOPNOTSUPP; + + return ipsec_ops->esp_modify_xfrm(xfrm, attrs); +} +EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h new file mode 100644 index 000000000000..36e700b596d8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_IPSEC_OFFLOAD_H__ +#define __MLX5_IPSEC_OFFLOAD_H__ + +#include +#include + +#ifdef CONFIG_MLX5_EN_IPSEC + +unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); +int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, + unsigned int count); + +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + u32 *sa_handle); +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); + +void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); +void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); + +struct mlx5_accel_ipsec_ops { + unsigned int (*counters_count)(struct mlx5_core_dev *mdev); + int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, + unsigned int count); + void *(*create_hw_context)(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], const __be32 daddr[4], + const __be32 spi, bool is_ipv6, + u32 *sa_handle); + void (*free_hw_context)(void *context); + int (*init)(struct mlx5_core_dev *mdev); + void (*cleanup)(struct mlx5_core_dev *mdev); + struct mlx5_accel_esp_xfrm *(*esp_create_xfrm)( + struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags); + int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); + void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); +}; + +#else +static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} + +static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} +#endif /* CONFIG_MLX5_EN_IPSEC */ +#endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 08a6dd4b7662..9b65c765cbd9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -34,7 +34,7 @@ #include #include #include -#include "accel/ipsec_offload.h" +#include "ipsec_offload.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/ipsec.h" #include "en.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 87c42df3ee20..3aace1c2a763 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -35,7 +35,7 @@ #include #include "en.h" -#include "accel/ipsec_offload.h" +#include "ipsec_offload.h" #include "fpga/sdk.h" #include "en_accel/ipsec.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6a3a08fd8910..12b72a0bcb1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -48,7 +48,7 @@ #include "en_accel/ipsec.h" #include "en_accel/en_accel.h" #include "en_accel/ktls.h" -#include "accel/ipsec_offload.h" +#include "en_accel/ipsec_offload.h" #include "lib/vxlan.h" #include "lib/clock.h" #include "en/port.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f85eefaad6ab..a5f6fd16b665 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -48,7 +48,7 @@ #include "en_rep.h" #include "en/rep/tc.h" #include "ipoib/ipoib.h" -#include "accel/ipsec_offload.h" +#include "en_accel/ipsec_offload.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/ktls_txrx.h" #include "en/xdp.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8fcbb1032b07..032de078723c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -62,7 +62,7 @@ #include "lib/mlx5.h" #include "lib/tout.h" #include "fpga/core.h" -#include "accel/ipsec_offload.h" +#include "en_accel/ipsec_offload.h" #include "lib/clock.h" #include "lib/vxlan.h" #include "lib/geneve.h" -- cgit v1.2.3 From f03c7b183ef93032582131cd25940245fbee433a Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:50 +0300 Subject: net/mlx5: Reduce kconfig complexity while building crypto support Both IPsec and kTLS need two functions declared in the lib/crypto.c file. These functions are advertised through general mlx5.h file and don't have any protection from attempts to call them without proper config option. Instead of creating stubs just for two functions, simply build that *.c file as part of regular mlx5_eth build and rely on compiler to throw them away if no callers exist in produced code. Link: https://lore.kernel.org/r/37f02171da06886c1b403d44dd18b2a56b19219d.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 5 ----- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 4 +--- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 176883cf2827..bfc0cd5ec423 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -16,9 +16,6 @@ config MLX5_CORE Core driver for low level functionality of the ConnectX-4 and Connect-IB cards by Mellanox Technologies. -config MLX5_ACCEL - bool - config MLX5_FPGA bool "Mellanox Technologies Innova support" depends on MLX5_CORE @@ -147,7 +144,6 @@ config MLX5_EN_IPSEC depends on MLX5_CORE_EN depends on XFRM_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - select MLX5_ACCEL help Build support for IPsec cryptography-offload acceleration in the NIC. @@ -156,7 +152,6 @@ config MLX5_EN_TLS depends on TLS_DEVICE depends on TLS=y || MLX5_CORE=m depends on MLX5_CORE_EN - select MLX5_ACCEL help Build support for TLS cryptography-offload acceleration in the NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index f7aafbfcdb61..81620c25c77e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -28,7 +28,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ - en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o + en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o # # Netdev extra @@ -88,8 +88,6 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # # Accelerations & FPGA # -mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o - mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ -- cgit v1.2.3 From f2b41b32cde8453a0a26875261f0e26809c2805a Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:51 +0300 Subject: net/mlx5: Remove ipsec_ops function table There is only one IPsec implementation and ipsec_ops is not needed at all in this situation. Together with removal of ipsec_ops, we can drop the entry checks as these functions are called for IPsec devices only. Link: https://lore.kernel.org/r/bc8dd1c8a77b65dbf5e2cf92c813ffaca2505c5f.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- .../mellanox/mlx5/core/en_accel/ipsec_fs.h | 5 - .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 118 ++------------------- .../mellanox/mlx5/core/en_accel/ipsec_offload.h | 35 ------ drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 - include/linux/mlx5/driver.h | 3 - 5 files changed, 8 insertions(+), 157 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h index b3e23aa5beeb..b70953979709 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h @@ -9,7 +9,6 @@ #include "ipsec_offload.h" #include "en/fs.h" -#ifdef CONFIG_MLX5_EN_IPSEC void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv); int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv); int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, @@ -19,8 +18,4 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5e_ipsec_rule *ipsec_rule); -#else -static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {} -static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; } -#endif #endif /* __MLX5_IPSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 7ae2d308139e..f0f44bd95cc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -106,8 +106,7 @@ mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, static struct mlx5_accel_esp_xfrm * mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags) + const struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_ipsec_esp_xfrm *mxfrm; int err = 0; @@ -286,11 +285,6 @@ static void mlx5_ipsec_offload_delete_sa_ctx(void *context) mutex_unlock(&mxfrm->lock); } -static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev) -{ - return 0; -} - static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, struct mlx5_ipsec_obj_attrs *attrs, u32 ipsec_id) @@ -378,86 +372,12 @@ change_sw_xfrm_attrs: return err; } -static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { - .create_hw_context = mlx5_ipsec_offload_create_sa_ctx, - .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx, - .init = mlx5_ipsec_offload_init, - .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm, - .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm, - .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm, -}; - -static const struct mlx5_accel_ipsec_ops * -mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) -{ - if (!mlx5_ipsec_device_caps(mdev)) - return NULL; - - return &ipsec_offload_ops; -} - -void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops; - int err = 0; - - ipsec_ops = mlx5_ipsec_offload_ops(mdev); - if (!ipsec_ops || !ipsec_ops->init) { - mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); - return; - } - - err = ipsec_ops->init(mdev); - if (err) { - mlx5_core_warn_once( - mdev, "Failed to start IPsec device, err = %d\n", err); - return; - } - - mdev->ipsec_ops = ipsec_ops; -} - -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->cleanup) - return; - - ipsec_ops->cleanup(mdev); -} - -unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->counters_count) - return -EOPNOTSUPP; - - return ipsec_ops->counters_count(mdev); -} - -int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count) -{ - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->counters_read) - return -EOPNOTSUPP; - - return ipsec_ops->counters_read(mdev, counters, count); -} - void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, struct mlx5_accel_esp_xfrm *xfrm, u32 *sa_handle) { - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; __be32 saddr[4] = {}, daddr[4] = {}; - if (!ipsec_ops || !ipsec_ops->create_hw_context) - return ERR_PTR(-EOPNOTSUPP); - if (!xfrm->attrs.is_ipv6) { saddr[3] = xfrm->attrs.saddr.a4; daddr[3] = xfrm->attrs.daddr.a4; @@ -466,59 +386,37 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); } - return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, - xfrm->attrs.spi, - xfrm->attrs.is_ipv6, sa_handle); + return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr, + xfrm->attrs.spi, + xfrm->attrs.is_ipv6, sa_handle); } void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) { - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->free_hw_context) - return; - - ipsec_ops->free_hw_context(context); + mlx5_ipsec_offload_delete_sa_ctx(context); } struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs) { - const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; struct mlx5_accel_esp_xfrm *xfrm; - if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) - return ERR_PTR(-EOPNOTSUPP); - - xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, 0); + xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs); if (IS_ERR(xfrm)) return xfrm; xfrm->mdev = mdev; return xfrm; } -EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) { - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) - return; - - ipsec_ops->esp_destroy_xfrm(xfrm); + mlx5_ipsec_offload_esp_destroy_xfrm(xfrm); } -EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs) { - const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; - - if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) - return -EOPNOTSUPP; - - return ipsec_ops->esp_modify_xfrm(xfrm, attrs); + return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs); } -EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h index 36e700b596d8..7dac104e6ef1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h @@ -7,43 +7,8 @@ #include #include -#ifdef CONFIG_MLX5_EN_IPSEC - -unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev); -int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count); - void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, struct mlx5_accel_esp_xfrm *xfrm, u32 *sa_handle); void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); - -void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); - -struct mlx5_accel_ipsec_ops { - unsigned int (*counters_count)(struct mlx5_core_dev *mdev); - int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int count); - void *(*create_hw_context)(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - const __be32 saddr[4], const __be32 daddr[4], - const __be32 spi, bool is_ipv6, - u32 *sa_handle); - void (*free_hw_context)(void *context); - int (*init)(struct mlx5_core_dev *mdev); - void (*cleanup)(struct mlx5_core_dev *mdev); - struct mlx5_accel_esp_xfrm *(*esp_create_xfrm)( - struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags); - int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs); - void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); -}; - -#else -static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} - -static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} -#endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 032de078723c..d504c8cb8f96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1181,8 +1181,6 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_fpga_start; } - mlx5_accel_ipsec_init(dev); - err = mlx5_init_fs(dev); if (err) { mlx5_core_err(dev, "Failed to init flow steering\n"); @@ -1230,7 +1228,6 @@ err_vhca: err_set_hca: mlx5_cleanup_fs(dev); err_fs: - mlx5_accel_ipsec_cleanup(dev); mlx5_fpga_device_stop(dev); err_fpga_start: mlx5_rsc_dump_cleanup(dev); @@ -1256,7 +1253,6 @@ static void mlx5_unload(struct mlx5_core_dev *dev) mlx5_sf_hw_table_destroy(dev); mlx5_vhca_event_stop(dev); mlx5_cleanup_fs(dev); - mlx5_accel_ipsec_cleanup(dev); mlx5_fpga_device_stop(dev); mlx5_rsc_dump_cleanup(dev); mlx5_hv_vhca_cleanup(dev->hv_vhca); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5af53c035949..ff47d49d8be4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -777,9 +777,6 @@ struct mlx5_core_dev { } roce; #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; -#endif -#ifdef CONFIG_MLX5_EN_IPSEC - const struct mlx5_accel_ipsec_ops *ipsec_ops; #endif struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; -- cgit v1.2.3 From 2984287c4c19949d7eb451dcad0bd5c54a2a376f Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 11:25:52 +0300 Subject: net/mlx5: Remove not-implemented IPsec capabilities Clean a capabilities enum to remove not-implemented bits. Link: https://lore.kernel.org/r/1044bb7b779107ff38e48e3f6553421104f3f819.1649232994.git.leonro@nvidia.com Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky --- .../net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c | 4 +--- include/linux/mlx5/accel.h | 11 ++++------- 2 files changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index f0f44bd95cc9..37c9880719cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -51,10 +51,8 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) caps |= MLX5_ACCEL_IPSEC_CAP_ESP; - if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) { + if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) caps |= MLX5_ACCEL_IPSEC_CAP_ESN; - caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; - } /* We can accommodate up to 2^24 different IPsec objects * because we use up to 24 bit in flow table metadata diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 73e4d50a9f02..0f2596297f6a 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -113,13 +113,10 @@ struct mlx5_accel_esp_xfrm { enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, - MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1, - MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2, - MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3, - MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4, - MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5, - MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6, - MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, + MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 1, + MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 2, + MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 3, + MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 4, }; #ifdef CONFIG_MLX5_EN_IPSEC -- cgit v1.2.3 From b559edfaf3f3f3eb726f99f65b181ab310b3d4bc Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Fri, 8 Apr 2022 11:22:46 +0800 Subject: net: ethernet: mtk_eth_soc: fix return value check in mtk_wed_add_hw() If syscon_regmap_lookup_by_phandle() fails, it never return NULL pointer, change the check to IS_ERR(). Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_wed.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index f0eacf819cd9..5d510ac24f2a 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -813,7 +813,7 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, return; regs = syscon_regmap_lookup_by_phandle(np, NULL); - if (!regs) + if (IS_ERR(regs)) return; rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); -- cgit v1.2.3 From 4d65f9b6869a756e89172d858671688349e77671 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 8 Apr 2022 10:59:45 +0200 Subject: net: ethernet: mtk_eth_soc/wed: fix sparse endian warnings Descriptor fields are little-endian Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") Reported-by: kernel test robot Signed-off-by: Felix Fietkau Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_wed.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 5d510ac24f2a..5530f7991d1d 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -144,16 +144,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev) for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { u32 txd_size; + u32 ctrl; txd_size = dev->wlan.init_buf(buf, buf_phys, token++); - desc->buf0 = buf_phys; - desc->buf1 = buf_phys + txd_size; - desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, - txd_size) | - FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, - MTK_WED_BUF_SIZE - txd_size) | - MTK_WDMA_DESC_CTRL_LAST_SEG1; + desc->buf0 = cpu_to_le32(buf_phys); + desc->buf1 = cpu_to_le32(buf_phys + txd_size); + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, + MTK_WED_BUF_SIZE - txd_size) | + MTK_WDMA_DESC_CTRL_LAST_SEG1; + desc->ctrl = cpu_to_le32(ctrl); desc->info = 0; desc++; @@ -184,12 +185,14 @@ mtk_wed_free_buffer(struct mtk_wed_device *dev) for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { void *page = page_list[page_idx++]; + dma_addr_t buf_addr; if (!page) break; - dma_unmap_page(dev->hw->dev, desc[i].buf0, - PAGE_SIZE, DMA_BIDIRECTIONAL); + buf_addr = le32_to_cpu(desc[i].buf0); + dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); __free_page(page); } -- cgit v1.2.3 From a21437d2b4855980a15cc9e5dc230f95c7563772 Mon Sep 17 00:00:00 2001 From: Lv Ruyi Date: Fri, 8 Apr 2022 09:49:01 +0000 Subject: bnx2x: Fix spelling mistake "regiser" -> "register" There are some spelling mistakes in the comments for macro. Fix it. Reported-by: Zeal Robot Signed-off-by: Lv Ruyi Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 881ac33fe914..4e9215bce4ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2212,7 +2212,7 @@ * MAC DA 2. The reset default is set to mask out all parameters. */ #define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0 -/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set +/* [RW 14] Mask register for the rules used in detecting PTP packets. Set * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; @@ -2381,7 +2381,7 @@ * MAC DA 2. The reset default is set to mask out all parameters. */ #define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8 -/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set +/* [RW 14] Mask register for the rules used in detecting PTP packets. Set * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; @@ -2493,7 +2493,7 @@ * MAC DA 2. The reset default is set to mask out all parameters. */ #define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0 -/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set +/* [RW 14] Mask register for the rules used in detecting PTP packets. Set * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; @@ -2529,7 +2529,7 @@ * MAC DA 2. The reset default is set to mask out all parameters. */ #define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8 -/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set +/* [RW 14] Mask register for the rules used in detecting PTP packets. Set * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; -- cgit v1.2.3 From d072c88c28e1e2c886681bbb0f1748b8e6ff105f Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 8 Apr 2022 16:48:38 +0300 Subject: net: ethernet: ti: cpsw: drop CPSW_HEADROOM define Since commit 1771afd47430 ("net: cpsw: avoid alignment faults by taking NET_IP_ALIGN into account") the TI CPSW driver was switched to use correct define CPSW_HEADROOM_NA to avoid alignment faults, but there are two places left where CPSW_HEADROOM is still used (without causing issues). Hence, completely drop CPSW_HEADROOM define and use CPSW_HEADROOM_NA everywhere to avoid further mistakes in code. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- drivers/net/ethernet/ti/cpsw_new.c | 2 +- drivers/net/ethernet/ti/cpsw_priv.h | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 03575c017500..e4edcc8c6889 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -335,7 +335,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) static unsigned int cpsw_rxbuf_total_len(unsigned int len) { - len += CPSW_HEADROOM; + len += CPSW_HEADROOM_NA; len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); return SKB_DATA_ALIGN(len); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index bd4b1528cf99..394145d06d54 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -273,7 +273,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) static unsigned int cpsw_rxbuf_total_len(unsigned int len) { - len += CPSW_HEADROOM; + len += CPSW_HEADROOM_NA; len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); return SKB_DATA_ALIGN(len); diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 74555970730c..86a0a1a093b5 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -411,7 +411,6 @@ struct __aligned(sizeof(long)) cpsw_meta_xdp { /* The buf includes headroom compatible with both skb and xdpf */ #define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN) -#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long)) static inline int cpsw_is_xdpf_handle(void *handle) { -- cgit v1.2.3 From 6d945a33f2b0aa24fc210dadaa0af3e8218e7002 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 25 Mar 2022 11:42:41 +0100 Subject: mac80211: introduce BSS color collision detection Add ieee80211_rx_check_bss_color_collision routine in order to introduce BSS color collision detection in mac80211 if it is not supported in HW/FW (e.g. for mt7915 chipset). Add IEEE80211_HW_DETECTS_COLOR_COLLISION flag to let the driver notify BSS color collision detection is supported in HW/FW. Set this for ath11k which apparently didn't need this code. Tested-by: Peter Chiu Co-developed-by: Ryder Lee Signed-off-by: Ryder Lee Signed-off-by: Lorenzo Bianconi Link: https://lore.kernel.org/r/a05eeeb1841a84560dc5aaec77894fcb69a54f27.1648204871.git.lorenzo@kernel.org [clarify commit message a bit, move flag to mac80211] Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath11k/mac.c | 5 +++- include/net/mac80211.h | 4 +++ net/mac80211/debugfs.c | 1 + net/mac80211/rx.c | 46 +++++++++++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 175a4ae752f3..49421d91d39c 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -8720,9 +8720,12 @@ static int __ath11k_mac_register(struct ath11k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); - if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map)) + if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, + ar->ab->wmi_ab.svc_map)) { wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_BSS_COLOR); + ieee80211_hw_set(ar->hw, DETECTS_COLOR_COLLISION); + } ar->hw->wiphy->cipher_suites = cipher_suites; ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index db992f71604d..0943b2626f4d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2434,6 +2434,9 @@ struct ieee80211_txq { * usage and 802.11 frames with %RX_FLAG_ONLY_MONITOR set for monitor to * the stack. * + * @IEEE80211_HW_DETECTS_COLOR_COLLISION: HW/driver has support for BSS color + * collision detection and doesn't need it in software. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2489,6 +2492,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD, IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD, IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP, + IEEE80211_HW_DETECTS_COLOR_COLLISION, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index f4c9a92f50f9..1fe43b264d75 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -504,6 +504,7 @@ static const char *hw_flag_names[] = { FLAG(SUPPORTS_TX_ENCAP_OFFLOAD), FLAG(SUPPORTS_RX_DECAP_OFFLOAD), FLAG(SUPPORTS_CONC_MON_RX_DECAP), + FLAG(DETECTS_COLOR_COLLISION), #undef FLAG }; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index beb6b92eb780..03ee1d9b6d08 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3178,6 +3178,49 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, ieee80211_tx_skb(sdata, skb); } +static void +ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) +{ + struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + const struct element *ie; + size_t baselen; + + if (!wiphy_ext_feature_isset(rx->local->hw.wiphy, + NL80211_EXT_FEATURE_BSS_COLOR)) + return; + + if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION)) + return; + + if (rx->sdata->vif.csa_active) + return; + + baselen = mgmt->u.beacon.variable - rx->skb->data; + if (baselen > rx->skb->len) + return; + + ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, + mgmt->u.beacon.variable, + rx->skb->len - baselen); + if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) && + ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) { + struct ieee80211_bss_conf *bss_conf = &rx->sdata->vif.bss_conf; + const struct ieee80211_he_operation *he_oper; + u8 color; + + he_oper = (void *)(ie->data + 1); + if (le32_get_bits(he_oper->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED)) + return; + + color = le32_get_bits(he_oper->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_MASK); + if (color == bss_conf->he_bss_color.color) + ieeee80211_obss_color_collision_notify(&rx->sdata->vif, + BIT_ULL(color)); + } +} + static ieee80211_rx_result debug_noinline ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) { @@ -3203,6 +3246,9 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { int sig = 0; + /* sw bss color collision detection */ + ieee80211_rx_check_bss_color_collision(rx); + if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) sig = status->signal; -- cgit v1.2.3 From 046d2e7c50e3087a32a85fd384c21f896dbccf83 Mon Sep 17 00:00:00 2001 From: Sriram R Date: Mon, 4 Apr 2022 21:11:23 +0530 Subject: mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ar5523/ar5523.c | 4 +- drivers/net/wireless/ath/ath10k/mac.c | 76 +++++----- drivers/net/wireless/ath/ath11k/mac.c | 154 ++++++++++---------- drivers/net/wireless/ath/ath9k/debug_sta.c | 4 +- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 20 +-- drivers/net/wireless/ath/ath9k/main.c | 2 +- drivers/net/wireless/ath/ath9k/xmit.c | 6 +- drivers/net/wireless/ath/carl9170/main.c | 8 +- drivers/net/wireless/ath/carl9170/tx.c | 5 +- drivers/net/wireless/ath/wcn36xx/main.c | 18 +-- drivers/net/wireless/ath/wcn36xx/smd.c | 35 ++--- .../broadcom/brcm80211/brcmsmac/mac80211_if.c | 2 +- drivers/net/wireless/intel/iwlegacy/3945-rs.c | 6 +- drivers/net/wireless/intel/iwlegacy/4965-rs.c | 22 +-- drivers/net/wireless/intel/iwlegacy/common.c | 6 +- drivers/net/wireless/intel/iwlwifi/dvm/rs.c | 22 +-- drivers/net/wireless/intel/iwlwifi/dvm/rxon.c | 2 +- drivers/net/wireless/intel/iwlwifi/dvm/sta.c | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 38 ++--- drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c | 38 ++--- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 35 ++--- drivers/net/wireless/intel/iwlwifi/mvm/sf.c | 8 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 31 ++--- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 6 +- drivers/net/wireless/mac80211_hwsim.c | 4 +- drivers/net/wireless/marvell/mwl8k.c | 48 +++---- drivers/net/wireless/mediatek/mt76/mt7603/mac.c | 16 ++- .../net/wireless/mediatek/mt76/mt76_connac_mcu.c | 83 +++++------ drivers/net/wireless/mediatek/mt76/mt76x02_mac.c | 4 +- .../net/wireless/mediatek/mt76/mt7915/debugfs.c | 4 +- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 140 +++++++++---------- drivers/net/wireless/mediatek/mt76/mt7921/mac.c | 2 +- drivers/net/wireless/mediatek/mt7601u/mac.c | 2 +- drivers/net/wireless/mediatek/mt7601u/tx.c | 4 +- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 8 +- drivers/net/wireless/ralink/rt2x00/rt2x00queue.c | 2 +- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 42 +++--- drivers/net/wireless/realtek/rtlwifi/base.c | 31 +++-- drivers/net/wireless/realtek/rtlwifi/core.c | 40 +++--- drivers/net/wireless/realtek/rtlwifi/rc.c | 20 +-- .../net/wireless/realtek/rtlwifi/rtl8188ee/hw.c | 26 ++-- .../net/wireless/realtek/rtlwifi/rtl8188ee/trx.c | 4 +- .../net/wireless/realtek/rtlwifi/rtl8192ce/hw.c | 26 ++-- .../net/wireless/realtek/rtlwifi/rtl8192ce/trx.c | 4 +- .../net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 26 ++-- .../net/wireless/realtek/rtlwifi/rtl8192cu/trx.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8192de/hw.c | 26 ++-- .../net/wireless/realtek/rtlwifi/rtl8192de/trx.c | 4 +- .../net/wireless/realtek/rtlwifi/rtl8192ee/hw.c | 12 +- .../net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 4 +- .../net/wireless/realtek/rtlwifi/rtl8192se/hw.c | 26 ++-- .../net/wireless/realtek/rtlwifi/rtl8192se/trx.c | 2 +- .../net/wireless/realtek/rtlwifi/rtl8723ae/hw.c | 26 ++-- .../net/wireless/realtek/rtlwifi/rtl8723ae/trx.c | 4 +- .../net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 12 +- .../net/wireless/realtek/rtlwifi/rtl8723be/trx.c | 4 +- .../net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 30 ++-- .../net/wireless/realtek/rtlwifi/rtl8821ae/trx.c | 2 +- drivers/net/wireless/realtek/rtw88/bf.c | 2 +- drivers/net/wireless/realtek/rtw88/main.c | 50 +++---- drivers/net/wireless/realtek/rtw88/tx.c | 14 +- drivers/net/wireless/realtek/rtw89/coex.c | 12 +- drivers/net/wireless/realtek/rtw89/core.c | 6 +- drivers/net/wireless/realtek/rtw89/core.h | 10 +- drivers/net/wireless/realtek/rtw89/fw.c | 15 +- drivers/net/wireless/realtek/rtw89/mac.c | 18 +-- drivers/net/wireless/realtek/rtw89/phy.c | 68 ++++----- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 12 +- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 8 +- drivers/net/wireless/st/cw1200/sta.c | 4 +- drivers/net/wireless/ti/wlcore/cmd.c | 8 +- drivers/net/wireless/ti/wlcore/main.c | 16 ++- drivers/staging/wfx/sta.c | 8 +- include/net/mac80211.h | 80 ++++++++--- net/mac80211/agg-rx.c | 12 +- net/mac80211/agg-tx.c | 6 +- net/mac80211/airtime.c | 4 +- net/mac80211/cfg.c | 11 +- net/mac80211/chan.c | 8 +- net/mac80211/debugfs_sta.c | 12 +- net/mac80211/eht.c | 6 +- net/mac80211/ethtool.c | 4 +- net/mac80211/he.c | 8 +- net/mac80211/ht.c | 8 +- net/mac80211/ibss.c | 26 ++-- net/mac80211/key.c | 9 +- net/mac80211/mesh_hwmp.c | 2 +- net/mac80211/mesh_plink.c | 24 ++-- net/mac80211/mlme.c | 18 +-- net/mac80211/ocb.c | 2 +- net/mac80211/rate.c | 8 +- net/mac80211/rc80211_minstrel_ht.c | 20 +-- net/mac80211/rx.c | 85 +++++------ net/mac80211/s1g.c | 4 +- net/mac80211/sta_info.c | 110 ++++++++------- net/mac80211/sta_info.h | 155 +++++++++++++-------- net/mac80211/status.c | 41 +++--- net/mac80211/tdls.c | 26 ++-- net/mac80211/trace.h | 4 +- net/mac80211/tx.c | 26 ++-- net/mac80211/vht.c | 78 +++++------ 103 files changed, 1200 insertions(+), 1094 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 9cabd342d156..00ddeee123c2 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1160,7 +1160,7 @@ static int ar5523_get_wlan_mode(struct ar5523 *ar, ar5523_info(ar, "STA not found!\n"); return WLAN_MODE_11b; } - sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band]; + sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band]; for (bit = 0; bit < band->n_bitrates; bit++) { if (sta_rate_set & 1) { @@ -1198,7 +1198,7 @@ static void ar5523_create_rateset(struct ar5523 *ar, ar5523_info(ar, "STA not found. Cannot set rates\n"); sta_rate_set = bss_conf->basic_rates; } else - sta_rate_set = sta->supp_rates[ar->hw->conf.chandef.chan->band]; + sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band]; ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b11aaee8b8c0..d804e19a742a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2251,7 +2251,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar, band = def.chan->band; sband = ar->hw->wiphy->bands[band]; - ratemask = sta->supp_rates[band]; + ratemask = sta->deflink.supp_rates[band]; ratemask &= arvif->bitrate_mask.control[band].legacy; rates = sband->bitrates; @@ -2296,7 +2296,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; @@ -2335,7 +2335,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) arg->peer_flags |= ar->wmi.peer_flags->ldbc; - if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { + if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { arg->peer_flags |= ar->wmi.peer_flags->bw40; arg->peer_rate_caps |= WMI_RC_CW40_FLAG; } @@ -2388,7 +2388,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_ht_rates.rates[i] = i; } else { arg->peer_ht_rates.num_rates = n; - arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); + arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, + max_nss); } ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", @@ -2545,7 +2546,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_hw_params *hw = &ar->hw_params; struct cfg80211_chan_def def; @@ -2587,10 +2588,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1); - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) arg->peer_flags |= ar->wmi.peer_flags->bw80; - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) arg->peer_flags |= ar->wmi.peer_flags->bw160; /* Calculate peer NSS capability from VHT capabilities if STA @@ -2604,7 +2605,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, vht_mcs_mask[i]) max_nss = i + 1; } - arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); + arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss); arg->peer_vht_rates.rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->peer_vht_rates.rx_mcs_set = @@ -2684,15 +2685,15 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) { - return sta->supp_rates[NL80211_BAND_2GHZ] >> + return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> ATH10K_MAC_FIRST_OFDM_RATE_IDX; } static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, struct ieee80211_sta *sta) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { - switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { + switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: return MODE_11AC_VHT160; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: @@ -2703,13 +2704,13 @@ static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, } } - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AC_VHT80; - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AC_VHT40; - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AC_VHT20; return MODE_UNKNOWN; @@ -2736,15 +2737,15 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, switch (band) { case NL80211_BAND_2GHZ: - if (sta->vht_cap.vht_supported && + if (sta->deflink.vht_cap.vht_supported && !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AC_VHT40; else phymode = MODE_11AC_VHT20; - } else if (sta->ht_cap.ht_supported && + } else if (sta->deflink.ht_cap.ht_supported && !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11NG_HT40; else phymode = MODE_11NG_HT20; @@ -2759,12 +2760,12 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, /* * Check VHT first. */ - if (sta->vht_cap.vht_supported && + if (sta->deflink.vht_cap.vht_supported && !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { phymode = ath10k_mac_get_phymode_vht(ar, sta); - } else if (sta->ht_cap.ht_supported && + } else if (sta->deflink.ht_cap.ht_supported && !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { - if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) phymode = MODE_11NA_HT40; else phymode = MODE_11NA_HT20; @@ -3079,8 +3080,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, /* ap_sta must be accessed only within rcu section which must be left * before calling ath10k_setup_peer_smps() which might sleep. */ - ht_cap = ap_sta->ht_cap; - vht_cap = ap_sta->vht_cap; + ht_cap = ap_sta->deflink.ht_cap; + vht_cap = ap_sta->deflink.vht_cap; ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); if (ret) { @@ -3278,7 +3279,7 @@ static int ath10k_station_assoc(struct ath10k *ar, */ if (!reassoc) { ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, - &sta->ht_cap); + &sta->deflink.ht_cap); if (ret) { ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -6787,10 +6788,10 @@ static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw, int ret = 0; s16 txpwr; - if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) { + if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { txpwr = 0; } else { - txpwr = sta->txpwr.power; + txpwr = sta->deflink.txpwr.power; if (!txpwr) return -EINVAL; } @@ -6910,26 +6911,26 @@ static int ath10k_mac_validate_rate_mask(struct ath10k *ar, struct ieee80211_sta *sta, u32 rate_ctrl_flag, u8 nss) { - if (nss > sta->rx_nss) { + if (nss > sta->deflink.rx_nss) { ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n", - nss, sta->rx_nss); + nss, sta->deflink.rx_nss); return -EINVAL; } if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) { - if (!sta->vht_cap.vht_supported) { + if (!sta->deflink.vht_cap.vht_supported) { ath10k_warn(ar, "Invalid VHT rate for sta %pM\n", sta->addr); return -EINVAL; } } else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) { - if (!sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) { + if (!sta->deflink.ht_cap.ht_supported || sta->deflink.vht_cap.vht_supported) { ath10k_warn(ar, "Invalid HT rate for sta %pM\n", sta->addr); return -EINVAL; } } else { - if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) + if (sta->deflink.ht_cap.ht_supported || sta->deflink.vht_cap.vht_supported) return -EINVAL; } @@ -8272,7 +8273,7 @@ static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar, u8 rate = arvif->vht_pfr; /* skip non vht and multiple rate peers */ - if (!sta->vht_cap.vht_supported || arvif->vht_num_rates != 1) + if (!sta->deflink.vht_cap.vht_supported || arvif->vht_num_rates != 1) return false; err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, @@ -8313,7 +8314,7 @@ static void ath10k_mac_clr_bitrate_mask_iter(void *data, int err; /* clear vht peers only */ - if (arsta->arvif != arvif || !sta->vht_cap.vht_supported) + if (arsta->arvif != arvif || !sta->deflink.vht_cap.vht_supported) return; err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, @@ -8457,13 +8458,14 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", - sta->addr, changed, sta->bandwidth, sta->rx_nss, + sta->addr, changed, sta->deflink.bandwidth, + sta->deflink.rx_nss, sta->smps_mode); if (changed & IEEE80211_RC_BW_CHANGED) { bw = WMI_PEER_CHWIDTH_20MHZ; - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_20: bw = WMI_PEER_CHWIDTH_20MHZ; break; @@ -8478,7 +8480,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, break; default: ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", - sta->bandwidth, sta->addr); + sta->deflink.bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; break; } @@ -8487,7 +8489,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, } if (changed & IEEE80211_RC_NSS_CHANGED) - arsta->nss = sta->rx_nss; + arsta->nss = sta->deflink.rx_nss; if (changed & IEEE80211_RC_SMPS_CHANGED) { smps = WMI_PEER_SMPS_PS_NONE; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 49421d91d39c..a7ee570c1e9c 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1636,7 +1636,7 @@ static void ath11k_peer_assoc_h_rates(struct ath11k *ar, band = def.chan->band; sband = ar->hw->wiphy->bands[band]; - ratemask = sta->supp_rates[band]; + ratemask = sta->deflink.supp_rates[band]; ratemask &= arvif->bitrate_mask.control[band].legacy; rates = sband->bitrates; @@ -1681,7 +1681,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; struct ath11k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; @@ -1718,7 +1718,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar, if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) arg->ldpc_flag = true; - if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { + if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { arg->bw_40 = true; arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; } @@ -1776,7 +1776,7 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar, arg->peer_ht_rates.rates[i] = i; } else { arg->peer_ht_rates.num_rates = n; - arg->peer_nss = min(sta->rx_nss, max_nss); + arg->peer_nss = min(sta->deflink.rx_nss, max_nss); } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", @@ -1878,7 +1878,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; struct ath11k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; @@ -1924,17 +1924,17 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1); - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) arg->bw_80 = true; - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) arg->bw_160 = true; vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask); - if (vht_nss > sta->rx_nss) { + if (vht_nss > sta->deflink.rx_nss) { user_rate_valid = false; - for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) { + for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) { if (vht_mcs_mask[nss_idx]) { user_rate_valid = true; break; @@ -1944,8 +1944,8 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, if (!user_rate_valid) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting vht range mcs value to peer supported nss %d for peer %pM\n", - sta->rx_nss, sta->addr); - vht_mcs_mask[sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1]; + sta->deflink.rx_nss, sta->addr); + vht_mcs_mask[sta->deflink.rx_nss - 1] = vht_mcs_mask[vht_nss - 1]; } /* Calculate peer NSS capability from VHT capabilities if STA @@ -1959,7 +1959,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, vht_mcs_mask[i]) max_nss = i + 1; } - arg->peer_nss = min(sta->rx_nss, max_nss); + arg->peer_nss = min(sta->deflink.rx_nss, max_nss); arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); @@ -2078,7 +2078,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, { struct ath11k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; - const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; enum nl80211_band band; u16 *he_mcs_mask; u8 max_nss, he_mcs; @@ -2135,7 +2135,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, else max_nss = rx_mcs_80; - arg->peer_nss = min(sta->rx_nss, max_nss); + arg->peer_nss = min(sta->deflink.rx_nss, max_nss); memcpy_and_pad(&arg->peer_he_cap_macinfo, sizeof(arg->peer_he_cap_macinfo), @@ -2167,10 +2167,10 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); if (ampdu_factor) { - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; - else if (sta->ht_cap.ht_supported) + else if (sta->deflink.ht_cap.ht_supported) arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; } @@ -2213,9 +2213,9 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, he_nss = ath11k_mac_max_he_nss(he_mcs_mask); - if (he_nss > sta->rx_nss) { + if (he_nss > sta->deflink.rx_nss) { user_rate_valid = false; - for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) { + for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) { if (he_mcs_mask[nss_idx]) { user_rate_valid = true; break; @@ -2225,11 +2225,11 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, if (!user_rate_valid) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting he range mcs value to peer supported nss %d for peer %pM\n", - sta->rx_nss, sta->addr); - he_mcs_mask[sta->rx_nss - 1] = he_mcs_mask[he_nss - 1]; + sta->deflink.rx_nss, sta->addr); + he_mcs_mask[sta->deflink.rx_nss - 1] = he_mcs_mask[he_nss - 1]; } - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { @@ -2283,7 +2283,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, he_mcs_mask[i]) max_nss = i + 1; } - arg->peer_nss = min(sta->rx_nss, max_nss); + arg->peer_nss = min(sta->deflink.rx_nss, max_nss); if (arg->peer_phymode == MODE_11AX_HE160 || arg->peer_phymode == MODE_11AX_HE80_80) { @@ -2316,7 +2316,7 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; struct cfg80211_chan_def def; enum nl80211_band band; u8 ampdu_factor; @@ -2326,19 +2326,19 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar, band = def.chan->band; - if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa) + if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa) return; - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) arg->bw_40 = true; - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) arg->bw_80 = true; - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) arg->bw_160 = true; - arg->peer_he_caps_6ghz = le16_to_cpu(sta->he_6ghz_capa.capa); + arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa); arg->peer_mpdu_density = ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START, arg->peer_he_caps_6ghz)); @@ -2364,17 +2364,17 @@ static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar, static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta, struct peer_assoc_params *arg) { - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; int smps; - if (!ht_cap->ht_supported && !sta->he_6ghz_capa.capa) + if (!ht_cap->ht_supported && !sta->deflink.he_6ghz_capa.capa) return; if (ht_cap->ht_supported) { smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; } else { - smps = le16_get_bits(sta->he_6ghz_capa.capa, + smps = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_SM_PS); } @@ -2498,15 +2498,15 @@ err: static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) { - return sta->supp_rates[NL80211_BAND_2GHZ] >> + return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> ATH11K_MAC_FIRST_OFDM_RATE_IDX; } static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar, struct ieee80211_sta *sta) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { - switch (sta->vht_cap.cap & + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { + switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: return MODE_11AC_VHT160; @@ -2518,13 +2518,13 @@ static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar, } } - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AC_VHT80; - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AC_VHT40; - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AC_VHT20; return MODE_UNKNOWN; @@ -2533,24 +2533,24 @@ static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar, static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar, struct ieee80211_sta *sta) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { - if (sta->he_cap.he_cap_elem.phy_cap_info[0] & + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) return MODE_11AX_HE160; - else if (sta->he_cap.he_cap_elem.phy_cap_info[0] & - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) + else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) return MODE_11AX_HE80_80; /* not sure if this is a valid case? */ return MODE_11AX_HE160; } - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AX_HE80; - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AX_HE40; - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AX_HE20; return MODE_UNKNOWN; @@ -2579,23 +2579,23 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, switch (band) { case NL80211_BAND_2GHZ: - if (sta->he_cap.has_he && + if (sta->deflink.he_cap.has_he && !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) phymode = MODE_11AX_HE80_2G; - else if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AX_HE40_2G; else phymode = MODE_11AX_HE20_2G; - } else if (sta->vht_cap.vht_supported && - !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + } else if (sta->deflink.vht_cap.vht_supported && + !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AC_VHT40; else phymode = MODE_11AC_VHT20; - } else if (sta->ht_cap.ht_supported && + } else if (sta->deflink.ht_cap.ht_supported && !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11NG_HT40; else phymode = MODE_11NG_HT20; @@ -2608,15 +2608,15 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: /* Check HE first */ - if (sta->he_cap.has_he && + if (sta->deflink.he_cap.has_he && !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) { phymode = ath11k_mac_get_phymode_he(ar, sta); - } else if (sta->vht_cap.vht_supported && - !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { + } else if (sta->deflink.vht_cap.vht_supported && + !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { phymode = ath11k_mac_get_phymode_vht(ar, sta); - } else if (sta->ht_cap.ht_supported && + } else if (sta->deflink.ht_cap.ht_supported && !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) { - if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) + if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) phymode = MODE_11NA_HT40; else phymode = MODE_11NA_HT20; @@ -2739,8 +2739,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, } ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid, - &ap_sta->ht_cap, - le16_to_cpu(ap_sta->he_6ghz_capa.capa)); + &ap_sta->deflink.ht_cap, + le16_to_cpu(ap_sta->deflink.he_6ghz_capa.capa)); if (ret) { ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -4001,7 +4001,7 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif, } /* Avoid updating invalid nss as fixed rate*/ - if (nss > sta->rx_nss) + if (nss > sta->deflink.rx_nss) return -EINVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, @@ -4051,7 +4051,7 @@ ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif, } /* Avoid updating invalid nss as fixed rate */ - if (nss > sta->rx_nss) + if (nss > sta->deflink.rx_nss) return -EINVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, @@ -4118,12 +4118,12 @@ static int ath11k_station_assoc(struct ath11k *ar, * fixed param. * Note that all other rates and NSS will be disabled for this peer. */ - if (sta->vht_cap.vht_supported && num_vht_rates == 1) { + if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, band); if (ret) return ret; - } else if (sta->he_cap.has_he && num_he_rates == 1) { + } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) { ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask, band); if (ret) @@ -4137,7 +4137,8 @@ static int ath11k_station_assoc(struct ath11k *ar, return 0; ret = ath11k_setup_peer_smps(ar, arvif, sta->addr, - &sta->ht_cap, le16_to_cpu(sta->he_6ghz_capa.capa)); + &sta->deflink.ht_cap, + le16_to_cpu(sta->deflink.he_6ghz_capa.capa)); if (ret) { ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -4299,10 +4300,10 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk) * TODO: Check RATEMASK_CMDID to support auto rates selection * across HT/VHT and for multiple VHT MCS support. */ - if (sta->vht_cap.vht_supported && num_vht_rates == 1) { + if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, band); - } else if (sta->he_cap.has_he && num_he_rates == 1) { + } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) { ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask, band); } else { @@ -4624,10 +4625,10 @@ static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, int ret = 0; s16 txpwr; - if (sta->txpwr.type == NL80211_TX_POWER_AUTOMATIC) { + if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { txpwr = 0; } else { - txpwr = sta->txpwr.power; + txpwr = sta->deflink.txpwr.power; if (!txpwr) return -EINVAL; } @@ -4688,7 +4689,8 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", - sta->addr, changed, sta->bandwidth, sta->rx_nss, + sta->addr, changed, sta->deflink.bandwidth, + sta->deflink.rx_nss, sta->smps_mode); spin_lock_bh(&ar->data_lock); @@ -4696,7 +4698,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, if (changed & IEEE80211_RC_BW_CHANGED) { bw = WMI_PEER_CHWIDTH_20MHZ; - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_20: bw = WMI_PEER_CHWIDTH_20MHZ; break; @@ -4711,7 +4713,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, break; default: ath11k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n", - sta->bandwidth, sta->addr); + sta->deflink.bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; break; } @@ -4720,7 +4722,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, } if (changed & IEEE80211_RC_NSS_CHANGED) - arsta->nss = sta->rx_nss; + arsta->nss = sta->deflink.rx_nss; if (changed & IEEE80211_RC_SMPS_CHANGED) { smps = WMI_PEER_SMPS_PS_NONE; @@ -7755,13 +7757,13 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b spin_lock_bh(&ar->ab->base_lock); list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) { if (peer->sta) { - if (vht_fixed_rate && (!peer->sta->vht_cap.vht_supported || - peer->sta->rx_nss < vht_nss)) { + if (vht_fixed_rate && (!peer->sta->deflink.vht_cap.vht_supported || + peer->sta->deflink.rx_nss < vht_nss)) { ret = false; goto out; } - if (he_fixed_rate && (!peer->sta->he_cap.has_he || - peer->sta->rx_nss < he_nss)) { + if (he_fixed_rate && (!peer->sta->deflink.he_cap.has_he || + peer->sta->deflink.rx_nss < he_nss)) { ret = false; goto out; } diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index d95cabddce33..1e2a30019fb6 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -36,7 +36,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, if (buf == NULL) return -ENOMEM; - if (!an->sta->ht_cap.ht_supported) { + if (!an->sta->deflink.ht_cap.ht_supported) { len = scnprintf(buf, size, "%s\n", "HT not supported"); goto exit; @@ -186,7 +186,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf, band = ah->curchan->chan->band; rstats = &an->rx_rate_stats; - if (!sta->ht_cap.ht_supported) + if (!sta->deflink.ht_cap.ht_supported) goto legacy; len += scnprintf(buf + len, size - len, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 72ef319feeda..cfee732a89b1 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -491,7 +491,7 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, ista->index = sta_idx; tsta.is_vif_sta = 0; maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + - sta->ht_cap.ampdu_factor); + sta->deflink.ht_cap.ampdu_factor); tsta.maxampdu = cpu_to_be16(maxampdu); } else { memcpy(&tsta.macaddr, vif->addr, ETH_ALEN); @@ -602,7 +602,7 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band]; for (i = 0, j = 0; i < sband->n_bitrates; i++) { - if (sta->supp_rates[sband->band] & BIT(i)) { + if (sta->deflink.supp_rates[sband->band] & BIT(i)) { trate->rates.legacy_rates.rs_rates[j] = (sband->bitrates[i].bitrate * 2) / 10; j++; @@ -610,9 +610,9 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, } trate->rates.legacy_rates.rs_nrates = j; - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { for (i = 0, j = 0; i < 77; i++) { - if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8))) + if (sta->deflink.ht_cap.mcs.rx_mask[i/8] & (1<<(i%8))) trate->rates.ht_rates.rs_rates[j++] = i; if (j == ATH_HTC_RATE_MAX) break; @@ -620,18 +620,18 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv, trate->rates.ht_rates.rs_nrates = j; caps = WLAN_RC_HT_FLAG; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) caps |= ATH_RC_TX_STBC_FLAG; - if (sta->ht_cap.mcs.rx_mask[1]) + if (sta->deflink.ht_cap.mcs.rx_mask[1]) caps |= WLAN_RC_DS_FLAG; - if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && - (conf_is_ht40(&priv->hw->conf))) + if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && + (conf_is_ht40(&priv->hw->conf))) caps |= WLAN_RC_40_FLAG; if (conf_is_ht40(&priv->hw->conf) && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) caps |= WLAN_RC_SGI_FLAG; else if (conf_is_ht20(&priv->hw->conf) && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)) + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)) caps |= WLAN_RC_SGI_FLAG; } diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 98090e40e1cf..eac34063af5a 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2048,7 +2048,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_OPERATIONAL: atid = ath_node_to_tid(an, tid); atid->baw_size = IEEE80211_MIN_AMPDU_BUF << - sta->ht_cap.ampdu_factor; + sta->deflink.ht_cap.ampdu_factor; break; default: ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n"); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d0caf1de2bde..09516a86ef9c 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1574,10 +1574,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, * in HT IBSS when a beacon with HT-info is received after the station * has already been added. */ - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { an->maxampdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + - sta->ht_cap.ampdu_factor)) - 1; - density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density); + sta->deflink.ht_cap.ampdu_factor)) - 1; + density = ath9k_parse_mpdudensity(sta->deflink.ht_cap.ampdu_density); an->mpdudensity = density; } diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 76e84adf57c1..101295162967 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1306,8 +1306,8 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw, atomic_set(&sta_info->pending_frames, 0); - if (sta->ht_cap.ht_supported) { - if (sta->ht_cap.ampdu_density > 6) { + if (sta->deflink.ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ampdu_density > 6) { /* * HW does support 16us AMPDU density. * No HT-Xmit for station. @@ -1319,7 +1319,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw, for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) RCU_INIT_POINTER(sta_info->agg[i], NULL); - sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor); + sta_info->ampdu_max_len = 1 << (3 + sta->deflink.ht_cap.ampdu_factor); sta_info->ht_sta = true; } @@ -1335,7 +1335,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw, unsigned int i; bool cleanup = false; - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { sta_info->ht_sta = false; diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 1b76f4434c06..39b3c1d9c9da 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -1044,8 +1044,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar, if (unlikely(!sta || !cvif)) goto err_out; - factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor); - density = sta->ht_cap.ampdu_density; + factor = min_t(unsigned int, 1u, + sta->deflink.ht_cap.ampdu_factor); + density = sta->deflink.ht_cap.ampdu_density; if (density) { /* diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 61c47f958e4a..e34d3d0b7082 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -792,7 +792,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, int i, size; u16 *rates_table; struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); - u32 rates = sta->supp_rates[band]; + u32 rates = sta->deflink.supp_rates[band]; memset(&sta_priv->supported_rates, 0, sizeof(sta_priv->supported_rates)); @@ -818,20 +818,20 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, } } - if (sta->ht_cap.ht_supported) { - BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) > - sizeof(sta_priv->supported_rates.supported_mcs_set)); + if (sta->deflink.ht_cap.ht_supported) { + BUILD_BUG_ON(sizeof(sta->deflink.ht_cap.mcs.rx_mask) > + sizeof(sta_priv->supported_rates.supported_mcs_set)); memcpy(sta_priv->supported_rates.supported_mcs_set, - sta->ht_cap.mcs.rx_mask, - sizeof(sta->ht_cap.mcs.rx_mask)); + sta->deflink.ht_cap.mcs.rx_mask, + sizeof(sta->deflink.ht_cap.mcs.rx_mask)); } - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { sta_priv->supported_rates.op_rate_mode = STA_11ac; sta_priv->supported_rates.vht_rx_mcs_map = - sta->vht_cap.vht_mcs.rx_mcs_map; + sta->deflink.vht_cap.vht_mcs.rx_mcs_map; sta_priv->supported_rates.vht_tx_mcs_map = - sta->vht_cap.vht_mcs.tx_mcs_map; + sta->deflink.vht_cap.vht_mcs.tx_mcs_map; } } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 872b37875c57..dc3805609284 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -208,9 +208,9 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn, { if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn)) bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE; - else if (sta && sta->ht_cap.ht_supported) + else if (sta && sta->deflink.ht_cap.ht_supported) bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE; - else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f)) + else if (sta && (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0x7f)) bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE; else bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE; @@ -225,9 +225,10 @@ static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wcn36xx_hal_config_bss_params *bss_params) { - if (sta && sta->ht_cap.ht_supported) { - unsigned long caps = sta->ht_cap.cap; - bss_params->ht = sta->ht_cap.ht_supported; + if (sta && sta->deflink.ht_cap.ht_supported) { + unsigned long caps = sta->deflink.ht_cap.cap; + + bss_params->ht = sta->deflink.ht_cap.ht_supported; bss_params->tx_channel_width_set = is_cap_supported(caps, IEEE80211_HT_CAP_SUP_WIDTH_20_40); bss_params->lsig_tx_op_protection_full_support = @@ -250,23 +251,24 @@ wcn36xx_smd_set_bss_vht_params(struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct wcn36xx_hal_config_bss_params_v1 *bss) { - if (sta && sta->vht_cap.vht_supported) + if (sta && sta->deflink.vht_cap.vht_supported) bss->vht_capable = 1; } static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta, struct wcn36xx_hal_config_sta_params *sta_params) { - if (sta->ht_cap.ht_supported) { - unsigned long caps = sta->ht_cap.cap; - sta_params->ht_capable = sta->ht_cap.ht_supported; + if (sta->deflink.ht_cap.ht_supported) { + unsigned long caps = sta->deflink.ht_cap.cap; + + sta_params->ht_capable = sta->deflink.ht_cap.ht_supported; sta_params->tx_channel_width_set = is_cap_supported(caps, IEEE80211_HT_CAP_SUP_WIDTH_20_40); sta_params->lsig_txop_protection = is_cap_supported(caps, IEEE80211_HT_CAP_LSIG_TXOP_PROT); - sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor; - sta_params->max_ampdu_density = sta->ht_cap.ampdu_density; + sta_params->max_ampdu_size = sta->deflink.ht_cap.ampdu_factor; + sta_params->max_ampdu_density = sta->deflink.ht_cap.ampdu_density; /* max_amsdu_size: 1 : 3839 bytes, 0 : 7935 bytes (max) */ sta_params->max_amsdu_size = !is_cap_supported(caps, IEEE80211_HT_CAP_MAX_AMSDU); @@ -287,10 +289,10 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn, struct ieee80211_sta *sta, struct wcn36xx_hal_config_sta_params_v1 *sta_params) { - if (sta->vht_cap.vht_supported) { - unsigned long caps = sta->vht_cap.cap; + if (sta->deflink.vht_cap.vht_supported) { + unsigned long caps = sta->deflink.vht_cap.cap; - sta_params->vht_capable = sta->vht_cap.vht_supported; + sta_params->vht_capable = sta->deflink.vht_cap.vht_supported; sta_params->vht_ldpc_enabled = is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC); if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) { @@ -308,9 +310,10 @@ static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn, static void wcn36xx_smd_set_sta_ht_ldpc_params(struct ieee80211_sta *sta, struct wcn36xx_hal_config_sta_params_v1 *sta_params) { - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { sta_params->ht_ldpc_enabled = - is_cap_supported(sta->ht_cap.cap, IEEE80211_HT_CAP_LDPC_CODING); + is_cap_supported(sta->deflink.ht_cap.cap, + IEEE80211_HT_CAP_LDPC_CODING); } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index eadac0f5590f..8c741b98d8e5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -868,7 +868,7 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw, spin_lock_bh(&wl->lock); brcms_c_ampdu_tx_operational(wl->wlc, tid, buf_size, (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + - sta->ht_cap.ampdu_factor)) - 1); + sta->deflink.ht_cap.ampdu_factor)) - 1); spin_unlock_bh(&wl->lock); /* Power save wakeup */ break; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c index b2478cbe558e..0eaad980c85c 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c @@ -354,13 +354,13 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) * after assoc.. */ for (i = sband->n_bitrates - 1; i >= 0; i--) { - if (sta->supp_rates[sband->band] & (1 << i)) { + if (sta->deflink.supp_rates[sband->band] & (1 << i)) { rs_sta->last_txrate_idx = i; break; } } - il->_3945.sta_supp_rates = sta->supp_rates[sband->band]; + il->_3945.sta_supp_rates = sta->deflink.supp_rates[sband->band]; /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */ if (sband->band == NL80211_BAND_5GHZ) { rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; @@ -631,7 +631,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, il_sta = NULL; } - rate_mask = sta->supp_rates[sband->band]; + rate_mask = sta->deflink.supp_rates[sband->band]; /* get user max rate if set */ max_rate_idx = fls(txrc->rate_idx_mask) - 1; diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index 9a491e5db75b..9dd2d890e35f 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -627,7 +627,7 @@ il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, static bool il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta) { - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + return (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && !il->ht.non_gf_sta_present; } @@ -970,7 +970,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband, lq_sta->last_rate_n_flags = tx_rate; done: /* See if there's a better rate or modulation mode to try. */ - if (sta->supp_rates[sband->band]) + if (sta->deflink.supp_rates[sband->band]) il4965_rs_rate_scale_perform(il, skb, sta, lq_sta); } @@ -1164,7 +1164,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta, s32 rate; s8 is_green = lq_sta->is_green; - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported) return -1; if (sta->smps_mode == IEEE80211_SMPS_STATIC) @@ -1182,7 +1182,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta, tbl->max_search = IL_MAX_SEARCH; rate_mask = lq_sta->active_mimo2_rate; - if (il_is_ht40_tx_allowed(il, &sta->ht_cap)) + if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap)) tbl->is_ht40 = 1; else tbl->is_ht40 = 0; @@ -1217,7 +1217,7 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta, u8 is_green = lq_sta->is_green; s32 rate; - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported) return -1; D_RATE("LQ: try to switch to SISO\n"); @@ -1228,7 +1228,7 @@ il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta, tbl->max_search = IL_MAX_SEARCH; rate_mask = lq_sta->active_siso_rate; - if (il_is_ht40_tx_allowed(il, &sta->ht_cap)) + if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap)) tbl->is_ht40 = 1; else tbl->is_ht40 = 0; @@ -1384,7 +1384,7 @@ il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta, struct il_scale_tbl_info *search_tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); struct il_rate_scale_data *win = &(tbl->win[idx]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; u32 sz = (sizeof(struct il_scale_tbl_info) - (sizeof(struct il_rate_scale_data) * RATE_COUNT)); @@ -1507,7 +1507,7 @@ il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta, struct il_scale_tbl_info *search_tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); struct il_rate_scale_data *win = &(tbl->win[idx]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; u32 sz = (sizeof(struct il_scale_tbl_info) - (sizeof(struct il_rate_scale_data) * RATE_COUNT)); @@ -1760,7 +1760,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb, (info->flags & IEEE80211_TX_CTL_NO_ACK)) return; - lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + lq_sta->supp_rates = sta->deflink.supp_rates[lq_sta->band]; tid = il4965_rs_tl_add_packet(lq_sta, hdr); if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) { @@ -2271,7 +2271,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) int i, j; struct ieee80211_hw *hw = il->hw; struct ieee80211_conf *conf = &il->hw->conf; - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; struct il_station_priv *sta_priv; struct il_lq_sta *lq_sta; struct ieee80211_supported_band *sband; @@ -2288,7 +2288,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) win[i]); lq_sta->flush_timer = 0; - lq_sta->supp_rates = sta->supp_rates[sband->band]; + lq_sta->supp_rates = sta->deflink.supp_rates[sband->band]; for (j = 0; j < LQ_SIZE; j++) for (i = 0; i < RATE_COUNT; i++) il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j]. diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 683b632981ed..8299d89e7505 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -1863,7 +1863,7 @@ EXPORT_SYMBOL(il_send_add_sta); static void il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) { - struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; + struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap; __le32 sta_flags; if (!sta || !sta_ht_inf->ht_supported) @@ -1900,7 +1900,7 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) cpu_to_le32((u32) sta_ht_inf-> ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); - if (il_is_ht40_tx_allowed(il, &sta->ht_cap)) + if (il_is_ht40_tx_allowed(il, &sta->deflink.ht_cap)) sta_flags |= STA_FLG_HT40_EN_MSK; else sta_flags &= ~STA_FLG_HT40_EN_MSK; @@ -5222,7 +5222,7 @@ il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); if (sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; int maxstreams; maxstreams = diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index b7c8b209bfea..baffa1cbe8fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -1039,7 +1039,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, lq_sta->last_rate_n_flags = tx_rate; done: /* See if there's a better rate or modulation mode to try. */ - if (sta && sta->supp_rates[sband->band]) + if (sta && sta->deflink.supp_rates[sband->band]) rs_rate_scale_perform(priv, skb, sta, lq_sta); if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) @@ -1239,7 +1239,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv, struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; struct iwl_rxon_context *ctx = sta_priv->ctx; - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported) return -1; if (sta->smps_mode == IEEE80211_SMPS_STATIC) @@ -1294,7 +1294,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv, struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; struct iwl_rxon_context *ctx = sta_priv->ctx; - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported) return -1; if (sta->smps_mode == IEEE80211_SMPS_STATIC) @@ -1350,7 +1350,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv, struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; struct iwl_rxon_context *ctx = sta_priv->ctx; - if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported) return -1; IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n"); @@ -1570,7 +1570,7 @@ static void rs_move_siso_to_other(struct iwl_priv *priv, struct iwl_scale_tbl_info *search_tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; u32 sz = (sizeof(struct iwl_scale_tbl_info) - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); u8 start_action; @@ -1740,7 +1740,7 @@ static void rs_move_mimo2_to_other(struct iwl_priv *priv, struct iwl_scale_tbl_info *search_tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; u32 sz = (sizeof(struct iwl_scale_tbl_info) - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); u8 start_action; @@ -1908,7 +1908,7 @@ static void rs_move_mimo3_to_other(struct iwl_priv *priv, struct iwl_scale_tbl_info *search_tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); struct iwl_rate_scale_data *window = &(tbl->win[index]); - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; u32 sz = (sizeof(struct iwl_scale_tbl_info) - (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); u8 start_action; @@ -2212,7 +2212,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, info->flags & IEEE80211_TX_CTL_NO_ACK) return; - lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + lq_sta->supp_rates = sta->deflink.supp_rates[lq_sta->band]; tid = rs_tl_add_packet(lq_sta, hdr); if ((tid != IWL_MAX_TID_COUNT) && @@ -2763,7 +2763,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i int i, j; struct ieee80211_hw *hw = priv->hw; struct ieee80211_conf *conf = &priv->hw->conf; - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; struct iwl_station_priv *sta_priv; struct iwl_lq_sta *lq_sta; struct ieee80211_supported_band *sband; @@ -2781,7 +2781,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); lq_sta->flush_timer = 0; - lq_sta->supp_rates = sta->supp_rates[sband->band]; + lq_sta->supp_rates = sta->deflink.supp_rates[sband->band]; IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n", sta_id); @@ -2798,7 +2798,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i /* * active legacy rates as per supported rates bitmap */ - supp = sta->supp_rates[sband->band]; + supp = sta->deflink.supp_rates[sband->band]; lq_sta->active_legacy_rate = 0; for_each_set_bit(i, &supp, BITS_PER_LONG) lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c index 70338bc7bb54..5dd2d43a01d8 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -1280,7 +1280,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv, break; } - ht_cap = &sta->ht_cap; + ht_cap = &sta->deflink.ht_cap; need_multiple = true; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index 8f7a0f36c276..476068c0abb7 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -139,7 +139,7 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, if (!sta) return true; - return sta->bandwidth >= IEEE80211_STA_RX_BW_40; + return sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40; } static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, @@ -147,7 +147,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, struct iwl_rxon_context *ctx, __le32 *flags, __le32 *mask) { - struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; + struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap; *mask = STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index a995bba0ba81..bcc4ed20fe5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -915,7 +915,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ wowlan_config_cmd->is_11n_connection = - ap_sta->ht_cap.ht_supported; + ap_sta->deflink.ht_cap.ht_supported; wowlan_config_cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 784d91281c02..4fda6c3ba9f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1877,8 +1877,8 @@ static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_he_pkt_ext_v2 *pkt_ext) { - u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; - u8 *ppe = &sta->he_cap.ppe_thres[0]; + u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &sta->deflink.he_cap.ppe_thres[0]; u8 ru_index_bitmap = u8_get_bits(*ppe, IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); @@ -1993,7 +1993,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, return; } - if (!sta->he_cap.has_he) { + if (!sta->deflink.he_cap.has_he) { rcu_read_unlock(); return; } @@ -2005,17 +2005,17 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; /* HTC flags */ - if (sta->he_cap.he_cap_elem.mac_cap_info[0] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); - if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & + if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || - (sta->he_cap.he_cap_elem.mac_cap_info[2] & + (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { u8 link_adap = - ((sta->he_cap.he_cap_elem.mac_cap_info[2] & + ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + - (sta->he_cap.he_cap_elem.mac_cap_info[1] & + (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); if (link_adap == 2) @@ -2025,12 +2025,12 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); - if (sta->he_cap.he_cap_elem.mac_cap_info[3] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); - if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); /* @@ -2041,7 +2041,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, sizeof(sta_ctxt_cmd.pkt_ext)); /* If PPE Thresholds exist, parse them into a FW-familiar format. */ - if (sta->he_cap.he_cap_elem.phy_cap_info[6] & + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, &sta_ctxt_cmd.pkt_ext); @@ -2050,7 +2050,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, * according to Common Nominal Packet Padding fiels. */ } else { u8 nominal_padding = - u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], + u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9], IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, @@ -2058,11 +2058,11 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, &flags); } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) flags |= STA_CTXT_HE_32BIT_BA_BITMAP; - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN) flags |= STA_CTXT_HE_ACK_ENABLED; @@ -3157,7 +3157,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, } if (vif->type == NL80211_IFTYPE_STATION) - vif->bss_conf.he_support = sta->he_cap.has_he; + vif->bss_conf.he_support = sta->deflink.he_cap.has_he; if (sta->tdls && (vif->p2p || @@ -3189,17 +3189,17 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { if (vif->type == NL80211_IFTYPE_AP) { - vif->bss_conf.he_support = sta->he_cap.has_he; + vif->bss_conf.he_support = sta->deflink.he_cap.has_he; mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); } else if (vif->type == NL80211_IFTYPE_STATION) { - vif->bss_conf.he_support = sta->he_cap.has_he; + vif->bss_conf.he_support = sta->deflink.he_cap.has_he; mvmvif->he_ru_2mhz_block = false; - if (sta->he_cap.has_he) + if (sta->deflink.he_cap.has_he) iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 9830d2663689..d8c3d7ff4f44 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -11,7 +11,7 @@ static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta) { - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: return IWL_TLC_MNG_CH_WIDTH_160MHZ; case IEEE80211_STA_RX_BW_80: @@ -38,9 +38,9 @@ static u8 rs_fw_set_active_chains(u8 chains) static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; u8 supp = 0; if (he_cap->has_he) @@ -62,9 +62,9 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; bool vht_ena = vht_cap->vht_supported; u16 flags = 0; @@ -136,7 +136,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, { u16 supp; int i, highest_mcs; - u8 max_nss = sta->rx_nss; + u8 max_nss = sta->deflink.rx_nss; struct ieee80211_vht_cap ieee_vht_cap = { .vht_cap_info = cpu_to_le32(vht_cap->cap), .supp_mcs = vht_cap->vht_mcs, @@ -154,7 +154,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, continue; supp = BIT(highest_mcs + 1) - 1; - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp); @@ -163,7 +163,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, * configuration is supported - only for MCS 0 since we already * decoded the MCS bits anyway ourselves. */ - if (sta->bandwidth == IEEE80211_STA_RX_BW_160 && + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160 && ieee80211_get_vht_max_nss(&ieee_vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, 0, true, nss) >= nss) @@ -194,7 +194,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct iwl_tlc_config_cmd_v4 *cmd) { - const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); u16 tx_mcs_80 = @@ -202,7 +202,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, u16 tx_mcs_160 = le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); int i; - u8 nss = sta->rx_nss; + u8 nss = sta->deflink.rx_nss; /* the station support only a single receive chain */ if (sta->smps_mode == IEEE80211_SMPS_STATIC) @@ -245,12 +245,12 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, int i; u16 supp = 0; unsigned long tmp; /* must be unsigned long for for_each_set_bit */ - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; /* non HT rates */ - tmp = sta->supp_rates[sband->band]; + tmp = sta->deflink.supp_rates[sband->band]; for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value); @@ -378,11 +378,11 @@ out: u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; if (mvmsta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { - switch (le16_get_bits(sta->he_6ghz_capa.capa, + switch (le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: return IEEE80211_MAX_MPDU_LEN_VHT_11454; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 62114616317c..974eeecc9153 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -135,7 +135,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { - if (!sta->ht_cap.ht_supported) + if (!sta->deflink.ht_cap.ht_supported) return false; if (sta->smps_mode == IEEE80211_SMPS_STATIC) @@ -157,7 +157,7 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { - if (!sta->ht_cap.ht_supported) + if (!sta->deflink.ht_cap.ht_supported) return false; return true; @@ -167,8 +167,8 @@ static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct rs_rate *rate, const struct rs_tx_column *next_col) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; if (is_ht20(rate) && (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)) @@ -1369,13 +1369,13 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm, static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) { - struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->vht_cap; + struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->deflink.vht_cap; struct ieee80211_vht_cap vht_cap = { .vht_cap_info = cpu_to_le32(sta_vht_cap->cap), .supp_mcs = sta_vht_cap->vht_mcs, }; - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: /* * Don't use 160 MHz if VHT extended NSS support @@ -1388,7 +1388,7 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) if (ieee80211_get_vht_max_nss(&vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, 0, true, - sta->rx_nss) < sta->rx_nss) + sta->deflink.rx_nss) < sta->deflink.rx_nss) return RATE_MCS_CHAN_WIDTH_80; return RATE_MCS_CHAN_WIDTH_160; case IEEE80211_STA_RX_BW_80: @@ -2537,7 +2537,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, * In case of VHT/HT when the rssi is low fallback to the case of * legacy rates. */ - if (sta->vht_cap.vht_supported && + if (sta->deflink.vht_cap.vht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { /* * In AP mode, when a new station associates, rs is initialized @@ -2563,14 +2563,15 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); break; default: - IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth); + IWL_ERR(mvm, "Invalid BW %d\n", + sta->deflink.bandwidth); goto out; } active_rate = lq_sta->active_siso_rate; rate->type = LQ_VHT_SISO; rate->bw = bw; - } else if (sta->ht_cap.ht_supported && + } else if (sta->deflink.ht_cap.ht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { initial_rates = rs_optimal_rates_ht; nentries = ARRAY_SIZE(rs_optimal_rates_ht); @@ -2761,14 +2762,14 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta, /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ if (i == IWL_RATE_MCS_9_INDEX && - sta->bandwidth == IEEE80211_STA_RX_BW_20) + sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) continue; lq_sta->active_siso_rate |= BIT(i); } } - if (sta->rx_nss < 2) + if (sta->deflink.rx_nss < 2) return; highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2); @@ -2779,7 +2780,7 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta, /* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */ if (i == IWL_RATE_MCS_9_INDEX && - sta->bandwidth == IEEE80211_STA_RX_BW_20) + sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) continue; lq_sta->active_mimo2_rate |= BIT(i); @@ -2916,8 +2917,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, { int i, j; struct ieee80211_hw *hw = mvm->hw; - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; struct ieee80211_supported_band *sband; @@ -2953,7 +2954,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* * active legacy rates as per supported rates bitmap */ - supp = sta->supp_rates[sband->band]; + supp = sta->deflink.supp_rates[sband->band]; lq_sta->active_legacy_rate = 0; for_each_set_bit(i, &supp, BITS_PER_LONG) lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value); @@ -3246,7 +3247,7 @@ static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); done: /* See if there's a better rate or modulation mode to try. */ - if (sta->supp_rates[info->band]) + if (sta->deflink.supp_rates[info->band]) rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index 655da8856c75..693752d8f65b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -106,10 +106,10 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, * capabilities of the AP station, and choose the watermark accordingly. */ if (sta) { - if (sta->ht_cap.ht_supported || - sta->vht_cap.vht_supported || - sta->he_cap.has_he) { - switch (sta->rx_nss) { + if (sta->deflink.ht_cap.ht_supported || + sta->deflink.vht_cap.vht_supported || + sta->deflink.he_cap.has_he) { + switch (sta->deflink.rx_nss) { case 1: watermark = SF_W_MARK_SISO; break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index c7f9d3870f21..406f0a50a5bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -86,7 +86,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } } - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_320: case IEEE80211_STA_RX_BW_160: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); @@ -98,13 +98,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); fallthrough; case IEEE80211_STA_RX_BW_20: - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_20MHZ); break; } - switch (sta->rx_nss) { + switch (sta->deflink.rx_nss) { case 1: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; @@ -134,12 +134,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, break; } - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); - mpdu_dens = sta->ht_cap.ampdu_density; + mpdu_dens = sta->deflink.ht_cap.ampdu_density; } if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { @@ -147,18 +147,17 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); - mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa, + mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); - agg_size = le16_get_bits(sta->he_6ghz_capa.capa, - IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); - } else - if (sta->vht_cap.vht_supported) { - agg_size = sta->vht_cap.cap & + agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + } else if (sta->deflink.vht_cap.vht_supported) { + agg_size = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; agg_size >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; - } else if (sta->ht_cap.ht_supported) { - agg_size = sta->ht_cap.ampdu_factor; + } else if (sta->deflink.ht_cap.ht_supported) { + agg_size = sta->deflink.ht_cap.ampdu_factor; } /* D6.0 10.12.2 A-MPDU length limit rules @@ -169,8 +168,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, * Maximum AMPDU Length Exponent Extension field in its HE * Capabilities element */ - if (sta->he_cap.has_he) - agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3], + if (sta->deflink.he_cap.has_he) + agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3], IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); /* Limit to max A-MPDU supported by FW */ @@ -782,7 +781,7 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, /* this queue isn't used for traffic (cab_queue) */ if (IS_ERR_OR_NULL(sta)) { size = IWL_MGMT_QUEUE_SIZE; - } else if (sta->he_cap.has_he) { + } else if (sta->deflink.he_cap.has_he) { /* support for 256 ba size */ size = IWL_DEFAULT_QUEUE_SIZE_HE; } else { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 7763037b93ed..8125bb76f59e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -794,7 +794,7 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, int lmac = iwl_mvm_get_lmac_id(mvm->fw, band); /* For HE redirect to trigger based fifos */ - if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) + if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) ac += 4; txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); @@ -935,7 +935,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * section 8.7.3 NOTE 3). */ if (info->flags & IEEE80211_TX_CTL_AMPDU && - !sta->vht_cap.vht_supported) + !sta->deflink.vht_cap.vht_supported) max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); /* Sub frame header + SNAP + IP header + TCP header + MSS */ @@ -1083,7 +1083,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; - if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he) + if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he) return -1; if (unlikely(ieee80211_is_probe_resp(fc))) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 28bfa7b7b73c..afdf48550588 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2189,7 +2189,7 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw, u32 bw = U32_MAX; enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT; - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { #define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break C(20); C(40); @@ -2211,7 +2211,7 @@ mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw, WARN(bw > hwsim_get_chanwidth(confbw), "intf %pM: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n", - vif->addr, sta->addr, bw, sta->bandwidth, + vif->addr, sta->addr, bw, sta->deflink.bandwidth, hwsim_get_chanwidth(data->bw), data->bw); } diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 864a2ba9efee..36c24d17136c 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -1985,7 +1985,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, txpriority = index; - if (priv->ap_fw && sta && sta->ht_cap.ht_supported && !eapol_frame && + if (priv->ap_fw && sta && sta->deflink.ht_cap.ht_supported && !eapol_frame && ieee80211_is_data_qos(wh->frame_control)) { tid = qos & 0xf; mwl8k_tx_count_packet(sta, tid); @@ -4027,9 +4027,9 @@ mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream, cmd->create_params.reset_seq_no_flag = 1; cmd->create_params.param_info = - (stream->sta->ht_cap.ampdu_factor & + (stream->sta->deflink.ht_cap.ampdu_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) | - ((stream->sta->ht_cap.ampdu_density << 2) & + ((stream->sta->deflink.ht_cap.ampdu_density << 2) & IEEE80211_HT_AMPDU_PARM_DENSITY); cmd->create_params.flags = @@ -4113,18 +4113,18 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw, cmd->stn_id = cpu_to_le16(sta->aid); cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD); if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) - rates = sta->supp_rates[NL80211_BAND_2GHZ]; + rates = sta->deflink.supp_rates[NL80211_BAND_2GHZ]; else - rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5; + rates = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 5; cmd->legacy_rates = cpu_to_le32(rates); - if (sta->ht_cap.ht_supported) { - cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0]; - cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1]; - cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2]; - cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3]; - cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap); - cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) | - ((sta->ht_cap.ampdu_density & 7) << 2); + if (sta->deflink.ht_cap.ht_supported) { + cmd->ht_rates[0] = sta->deflink.ht_cap.mcs.rx_mask[0]; + cmd->ht_rates[1] = sta->deflink.ht_cap.mcs.rx_mask[1]; + cmd->ht_rates[2] = sta->deflink.ht_cap.mcs.rx_mask[2]; + cmd->ht_rates[3] = sta->deflink.ht_cap.mcs.rx_mask[3]; + cmd->ht_capabilities_info = cpu_to_le16(sta->deflink.ht_cap.cap); + cmd->mac_ht_param_info = (sta->deflink.ht_cap.ampdu_factor & 3) | + ((sta->deflink.ht_cap.ampdu_density & 7) << 2); cmd->is_qos_sta = 1; } @@ -4545,16 +4545,16 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw, p = &cmd->peer_info; p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT; p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability); - p->ht_support = sta->ht_cap.ht_supported; - p->ht_caps = cpu_to_le16(sta->ht_cap.cap); - p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) | - ((sta->ht_cap.ampdu_density & 7) << 2); + p->ht_support = sta->deflink.ht_cap.ht_supported; + p->ht_caps = cpu_to_le16(sta->deflink.ht_cap.cap); + p->extended_ht_caps = (sta->deflink.ht_cap.ampdu_factor & 3) | + ((sta->deflink.ht_cap.ampdu_density & 7) << 2); if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) - rates = sta->supp_rates[NL80211_BAND_2GHZ]; + rates = sta->deflink.supp_rates[NL80211_BAND_2GHZ]; else - rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5; + rates = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 5; legacy_rate_mask_to_array(p->legacy_rates, rates); - memcpy(p->ht_rates, &sta->ht_cap.mcs, 16); + memcpy(p->ht_rates, &sta->deflink.ht_cap.mcs, 16); p->interop = 1; p->amsdu_enabled = 0; @@ -5031,12 +5031,12 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) { - ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ]; + ap_legacy_rates = ap->deflink.supp_rates[NL80211_BAND_2GHZ]; } else { ap_legacy_rates = - ap->supp_rates[NL80211_BAND_5GHZ] << 5; + ap->deflink.supp_rates[NL80211_BAND_5GHZ] << 5; } - memcpy(ap_mcs_rates, &ap->ht_cap.mcs, 16); + memcpy(ap_mcs_rates, &ap->deflink.ht_cap.mcs, 16); rcu_read_unlock(); @@ -5347,7 +5347,7 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw, ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); if (ret >= 0) { MWL8K_STA(sta)->peer_id = ret; - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) MWL8K_STA(sta)->is_ampdu_allowed = true; ret = 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index 17713c821d80..49a511ae8161 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -326,19 +326,21 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) addr = mt7603_wtbl1_addr(idx); - ampdu_density = sta->ht_cap.ampdu_density; + ampdu_density = sta->deflink.ht_cap.ampdu_density; if (ampdu_density < IEEE80211_HT_MPDU_DENSITY_4) ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; val = mt76_rr(dev, addr + 2 * 4); val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL; - val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) | - FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) | + val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, + sta->deflink.ht_cap.ampdu_factor) | + FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, + sta->deflink.ht_cap.ampdu_density) | MT_WTBL1_W2_TXS_BAF_REPORT; - if (sta->ht_cap.cap) + if (sta->deflink.ht_cap.cap) val |= MT_WTBL1_W2_HT; - if (sta->vht_cap.cap) + if (sta->deflink.vht_cap.cap) val |= MT_WTBL1_W2_VHT; mt76_wr(dev, addr + 2 * 4, val); @@ -347,9 +349,9 @@ void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) val = mt76_rr(dev, addr + 9 * 4); val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | MT_WTBL2_W9_SHORT_GI_80); - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) val |= MT_WTBL2_W9_SHORT_GI_20; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) val |= MT_WTBL2_W9_SHORT_GI_40; mt76_wr(dev, addr + 9 * 4, val); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 7cb17bf40e35..51a9b5d60c7a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -610,7 +610,7 @@ mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, static void mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) { - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; struct sta_rec_he *he; struct tlv *tlv; @@ -698,7 +698,7 @@ mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) he->he_cap = cpu_to_le32(cap); - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: if (elem->phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) @@ -750,9 +750,9 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, u8 mode = 0; if (sta) { - ht_cap = &sta->ht_cap; - vht_cap = &sta->vht_cap; - he_cap = &sta->he_cap; + ht_cap = &sta->deflink.ht_cap; + vht_cap = &sta->deflink.vht_cap; + he_cap = &sta->deflink.he_cap; } else { struct ieee80211_supported_band *sband; @@ -801,25 +801,25 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, u16 supp_rates; /* starec ht */ - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { struct sta_rec_ht *ht; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); ht = (struct sta_rec_ht *)tlv; - ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); + ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); } /* starec vht */ - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { struct sta_rec_vht *vht; int len; len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len); vht = (struct sta_rec_vht *)tlv; - vht->vht_cap = cpu_to_le32(sta->vht_cap.cap); - vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map; - vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map; + vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); + vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; } /* starec uapsd */ @@ -828,11 +828,11 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, if (!is_mt7921(dev)) return; - if (sta->ht_cap.ht_supported || sta->he_cap.has_he) + if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he) mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif); /* starec he */ - if (sta->he_cap.has_he) { + if (sta->deflink.he_cap.has_he) { mt76_connac_mcu_sta_he_tlv(skb, sta); if (band == NL80211_BAND_6GHZ && sta_state == MT76_STA_INFO_STATE_ASSOC) { @@ -841,7 +841,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g_capa)); he_6g_capa = (struct sta_rec_he_6g_capa *)tlv; - he_6g_capa->capa = sta->he_6ghz_capa.capa; + he_6g_capa->capa = sta->deflink.he_6ghz_capa.capa; } } @@ -851,14 +851,14 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); phy->rcpi = rcpi; phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR, - sta->ht_cap.ampdu_factor) | + sta->deflink.ht_cap.ampdu_factor) | FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY, - sta->ht_cap.ampdu_density); + sta->deflink.ht_cap.ampdu_density); tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info)); ra_info = (struct sta_rec_ra_info *)tlv; - supp_rates = sta->supp_rates[band]; + supp_rates = sta->deflink.supp_rates[band]; if (band == NL80211_BAND_2GHZ) supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) | FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf); @@ -867,17 +867,18 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, ra_info->legacy = cpu_to_le16(supp_rates); - if (sta->ht_cap.ht_supported) - memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask, + if (sta->deflink.ht_cap.ht_supported) + memcpy(ra_info->rx_mcs_bitmask, + sta->deflink.ht_cap.mcs.rx_mask, HT_MCS_MASK_NUM); tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_STATE, sizeof(*state)); state = (struct sta_rec_state *)tlv; state->state = sta_state; - if (sta->vht_cap.vht_supported) { - state->vht_opmode = sta->bandwidth; - state->vht_opmode |= (sta->rx_nss - 1) << + if (sta->deflink.vht_cap.vht_supported) { + state->vht_opmode = sta->deflink.bandwidth; + state->vht_opmode |= (sta->deflink.rx_nss - 1) << IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; } } @@ -905,27 +906,27 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct tlv *tlv; u32 flags = 0; - if (sta->ht_cap.ht_supported || sta->he_6ghz_capa.capa) { + if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_6ghz_capa.capa) { tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht), wtbl_tlv, sta_wtbl); ht = (struct wtbl_ht *)tlv; ht->ldpc = ht_ldpc && - !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING); + !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING); - if (sta->ht_cap.ht_supported) { - ht->af = sta->ht_cap.ampdu_factor; - ht->mm = sta->ht_cap.ampdu_density; + if (sta->deflink.ht_cap.ht_supported) { + ht->af = sta->deflink.ht_cap.ampdu_factor; + ht->mm = sta->deflink.ht_cap.ampdu_density; } else { - ht->af = le16_get_bits(sta->he_6ghz_capa.capa, + ht->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); - ht->mm = le16_get_bits(sta->he_6ghz_capa.capa, + ht->mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); } ht->ht = true; } - if (sta->vht_cap.vht_supported || sta->he_6ghz_capa.capa) { + if (sta->deflink.vht_cap.vht_supported || sta->deflink.he_6ghz_capa.capa) { struct wtbl_vht *vht; u8 af; @@ -934,18 +935,18 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb, sta_wtbl); vht = (struct wtbl_vht *)tlv; vht->ldpc = vht_ldpc && - !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); + !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); vht->vht = true; af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, - sta->vht_cap.cap); + sta->deflink.vht_cap.cap); if (ht) ht->af = max(ht->af, af); } mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv); - if (is_connac_v1(dev) && sta->ht_cap.ht_supported) { + if (is_connac_v1(dev) && sta->deflink.ht_cap.ht_supported) { /* sgi */ u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 | MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160; @@ -955,15 +956,15 @@ void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb, sizeof(*raw), wtbl_tlv, sta_wtbl); - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) flags |= MT_WTBL_W5_SHORT_GI_20; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) flags |= MT_WTBL_W5_SHORT_GI_40; - if (sta->vht_cap.vht_supported) { - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) + if (sta->deflink.vht_cap.vht_supported) { + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) flags |= MT_WTBL_W5_SHORT_GI_80; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) flags |= MT_WTBL_W5_SHORT_GI_160; } raw = (struct wtbl_raw *)tlv; @@ -1231,9 +1232,9 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, return 0x38; if (sta) { - ht_cap = &sta->ht_cap; - vht_cap = &sta->vht_cap; - he_cap = &sta->he_cap; + ht_cap = &sta->deflink.ht_cap; + vht_cap = &sta->deflink.vht_cap; + he_cap = &sta->deflink.he_cap; } else { struct ieee80211_supported_band *sband; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 2afad8c76ca6..cf4d4110cc99 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -412,9 +412,9 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { u8 ba_size = IEEE80211_MIN_AMPDU_BUF; - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; - ba_size <<= sta->ht_cap.ampdu_factor; + ba_size <<= sta->deflink.ht_cap.ampdu_factor; ba_size = min_t(int, 63, ba_size - 1); if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ba_size = 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 4e1ecaec8f4f..e9cab1165f38 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -1017,8 +1017,8 @@ static ssize_t mt7915_sta_fixed_rate_set(struct file *file, phy.ldpc = (phy.bw || phy.ldpc) * GENMASK(2, 0); for (i = 0; i <= phy.bw; i++) { - phy.sgi |= gi << (i << sta->he_cap.has_he); - phy.he_ltf |= he_ltf << (i << sta->he_cap.has_he); + phy.sgi |= gi << (i << sta->deflink.he_cap.has_he); + phy.he_ltf |= he_ltf << (i << sta->deflink.he_cap.has_he); } field = RATE_PARAM_FIXED; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index e9e7efbf350d..bab70cf981bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -1354,7 +1354,7 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) u16 fc, tid; u32 val; - if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he)) + if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) return; tid = le32_get_bits(txwi[1], MT_TXD1_TID); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index e7a6f80e7755..df31084e860f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -112,7 +112,7 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs, struct mt7915_dev *dev = msta->vif->phy->dev; enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band; const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs; - int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss; + int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; for (nss = 0; nss < max_nss; nss++) { int mcs; @@ -152,7 +152,7 @@ mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs, /* only support 2ss on 160MHz for mt7915 */ if (is_mt7915(&dev->mt76) && nss > 1 && - sta->bandwidth == IEEE80211_STA_RX_BW_160) + sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) break; } @@ -165,8 +165,8 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs, { struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; struct mt7915_dev *dev = msta->vif->phy->dev; - u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map); - int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss; + u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); + int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; u16 mcs; for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) { @@ -188,7 +188,7 @@ mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs, /* only support 2ss on 160MHz for mt7915 */ if (is_mt7915(&dev->mt76) && nss > 1 && - sta->bandwidth == IEEE80211_STA_RX_BW_160) + sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) break; } } @@ -197,10 +197,10 @@ static void mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs, const u8 *mask) { - int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss; + int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; for (nss = 0; nss < max_nss; nss++) - ht_mcs[nss] = sta->ht_cap.mcs.rx_mask[nss] & mask[nss]; + ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss]; } static int @@ -788,13 +788,13 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem; + struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; struct ieee80211_he_mcs_nss_supp mcs_map; struct sta_rec_he *he; struct tlv *tlv; u32 cap = 0; - if (!sta->he_cap.has_he) + if (!sta->deflink.he_cap.has_he) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he)); @@ -880,8 +880,8 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, he->he_cap = cpu_to_le32(cap); - mcs_map = sta->he_cap.he_mcs_nss_supp; - switch (sta->bandwidth) { + mcs_map = sta->deflink.he_cap.he_mcs_nss_supp; + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: if (elem->phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) @@ -931,7 +931,7 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem; + struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; struct sta_rec_muru *muru; struct tlv *tlv; @@ -949,11 +949,11 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, muru->cfg.mimo_ul_en = true; muru->cfg.ofdma_dl_en = true; - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) muru->mimo_dl.vht_mu_bfee = - !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); - if (!sta->he_cap.has_he) + if (!sta->deflink.he_cap.has_he) return; muru->mimo_dl.partial_bw_dl_mimo = @@ -987,13 +987,13 @@ mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) struct sta_rec_ht *ht; struct tlv *tlv; - if (!sta->ht_cap.ht_supported) + if (!sta->deflink.ht_cap.ht_supported) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); ht = (struct sta_rec_ht *)tlv; - ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); + ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); } static void @@ -1002,15 +1002,15 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) struct sta_rec_vht *vht; struct tlv *tlv; - if (!sta->vht_cap.vht_supported) + if (!sta->deflink.vht_cap.vht_supported) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); vht = (struct sta_rec_vht *)tlv; - vht->vht_cap = cpu_to_le32(sta->vht_cap.cap); - vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map; - vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map; + vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); + vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; } static void @@ -1097,8 +1097,8 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, if (!bfee && tx_ant < 2) return false; - if (sta->he_cap.has_he) { - struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem; + if (sta->deflink.he_cap.has_he) { + struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; if (bfee) return mvif->cap.he_su_ebfee && @@ -1108,8 +1108,8 @@ mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]); } - if (sta->vht_cap.vht_supported) { - u32 cap = sta->vht_cap.cap; + if (sta->deflink.vht_cap.vht_supported) { + u32 cap = sta->deflink.vht_cap.cap; if (bfee) return mvif->cap.vht_su_ebfee && @@ -1135,7 +1135,7 @@ static void mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy, struct sta_rec_bf *bf) { - struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; + struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs; u8 n = 0; bf->tx_mode = MT_PHY_TYPE_HT; @@ -1160,7 +1160,7 @@ static void mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy, struct sta_rec_bf *bf, bool explicit) { - struct ieee80211_sta_vht_cap *pc = &sta->vht_cap; + struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap; struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap; u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map); u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); @@ -1181,14 +1181,14 @@ mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy, bf->ncol = min_t(u8, nss_mcs, bf->nrow); bf->ibf_ncol = bf->ncol; - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) bf->nrow = 1; } else { bf->nrow = tx_ant; bf->ncol = min_t(u8, nss_mcs, bf->nrow); bf->ibf_ncol = nss_mcs; - if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) bf->ibf_nrow = 1; } } @@ -1197,7 +1197,7 @@ static void mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, struct mt7915_phy *phy, struct sta_rec_bf *bf) { - struct ieee80211_sta_he_cap *pc = &sta->he_cap; + struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap; struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem; const struct ieee80211_sta_he_cap *vc = mt76_connac_get_he_phy_cap(phy->mt76, vif); @@ -1222,7 +1222,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, bf->ncol = min_t(u8, nss_mcs, bf->nrow); bf->ibf_ncol = bf->ncol; - if (sta->bandwidth != IEEE80211_STA_RX_BW_160) + if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160) return; /* go over for 160MHz and 80p80 */ @@ -1270,7 +1270,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb, }; bool ebf; - if (!(sta->ht_cap.ht_supported || sta->he_cap.has_he)) + if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) return; ebf = mt7915_is_ebf_supported(phy, vif, sta, false); @@ -1284,21 +1284,21 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb, * vht: support eBF and iBF * ht: iBF only, since mac80211 lacks of eBF support */ - if (sta->he_cap.has_he && ebf) + if (sta->deflink.he_cap.has_he && ebf) mt7915_mcu_sta_bfer_he(sta, vif, phy, bf); - else if (sta->vht_cap.vht_supported) + else if (sta->deflink.vht_cap.vht_supported) mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf); - else if (sta->ht_cap.ht_supported) + else if (sta->deflink.ht_cap.ht_supported) mt7915_mcu_sta_bfer_ht(sta, phy, bf); else return; bf->bf_cap = ebf ? ebf : dev->ibf << 1; - bf->bw = sta->bandwidth; - bf->ibf_dbw = sta->bandwidth; + bf->bw = sta->deflink.bandwidth; + bf->ibf_dbw = sta->deflink.bandwidth; bf->ibf_nrow = tx_ant; - if (!ebf && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol) + if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol) bf->ibf_timeout = 0x48; else bf->ibf_timeout = 0x18; @@ -1308,7 +1308,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb, else bf->mem_20m = matrix[bf->nrow][bf->ncol]; - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: case IEEE80211_STA_RX_BW_80: bf->mem_total = bf->mem_20m * 2; @@ -1333,7 +1333,7 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb, struct tlv *tlv; u8 nrow = 0; - if (!(sta->vht_cap.vht_supported || sta->he_cap.has_he)) + if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he)) return; if (!mt7915_is_ebf_supported(phy, vif, sta, true)) @@ -1342,13 +1342,13 @@ mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb, tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee)); bfee = (struct sta_rec_bfee *)tlv; - if (sta->he_cap.has_he) { - struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem; + if (sta->deflink.he_cap.has_he) { + struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, pe->phy_cap_info[5]); - } else if (sta->vht_cap.vht_supported) { - struct ieee80211_sta_vht_cap *pc = &sta->vht_cap; + } else if (sta->deflink.vht_cap.vht_supported) { + struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap; nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, pc->cap); @@ -1464,7 +1464,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev, do { \ u8 i, gi = mask->control[band]._gi; \ gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \ - for (i = 0; i <= sta->bandwidth; i++) { \ + for (i = 0; i <= sta->deflink.bandwidth; i++) { \ phy.sgi |= gi << (i << (_he)); \ phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\ } \ @@ -1476,11 +1476,11 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev, } \ } while (0) - if (sta->he_cap.has_he) { + if (sta->deflink.he_cap.has_he) { __sta_phy_bitrate_mask_check(he_mcs, he_gi, 1); - } else if (sta->vht_cap.vht_supported) { + } else if (sta->deflink.vht_cap.vht_supported) { __sta_phy_bitrate_mask_check(vht_mcs, gi, 0); - } else if (sta->ht_cap.ht_supported) { + } else if (sta->deflink.ht_cap.ht_supported) { __sta_phy_bitrate_mask_check(ht_mcs, gi, 0); } else { nrates = hweight32(mask->control[band].legacy); @@ -1514,7 +1514,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev, * actual txrate hardware sends out. */ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7); - if (sta->he_cap.has_he) + if (sta->deflink.he_cap.has_he) mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi); else mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); @@ -1547,7 +1547,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, enum nl80211_band band = chandef->chan->band; struct sta_rec_ra *ra; struct tlv *tlv; - u32 supp_rate = sta->supp_rates[band]; + u32 supp_rate = sta->deflink.supp_rates[band]; u32 cap = sta->wme ? STA_CAP_WMM : 0; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra)); @@ -1557,8 +1557,8 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, ra->auto_rate = true; ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta); ra->channel = chandef->chan->hw_value; - ra->bw = sta->bandwidth; - ra->phy.bw = sta->bandwidth; + ra->bw = sta->deflink.bandwidth; + ra->phy.bw = sta->deflink.bandwidth; ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode); if (supp_rate) { @@ -1579,22 +1579,22 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, } } - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { ra->supp_mode |= MODE_HT; - ra->af = sta->ht_cap.ampdu_factor; - ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); + ra->af = sta->deflink.ht_cap.ampdu_factor; + ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); cap |= STA_CAP_HT; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) cap |= STA_CAP_SGI_20; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) cap |= STA_CAP_SGI_40; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC) cap |= STA_CAP_TX_STBC; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) cap |= STA_CAP_RX_STBC; if (mvif->cap.ht_ldpc && - (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) cap |= STA_CAP_LDPC; mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, @@ -1602,37 +1602,37 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs; } - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { u8 af; ra->supp_mode |= MODE_VHT; af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, - sta->vht_cap.cap); + sta->deflink.vht_cap.cap); ra->af = max_t(u8, ra->af, af); cap |= STA_CAP_VHT; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) cap |= STA_CAP_VHT_SGI_80; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) cap |= STA_CAP_VHT_SGI_160; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC) cap |= STA_CAP_VHT_TX_STBC; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) cap |= STA_CAP_VHT_RX_STBC; if (mvif->cap.vht_ldpc && - (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)) + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)) cap |= STA_CAP_VHT_LDPC; mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, mask->control[band].vht_mcs); } - if (sta->he_cap.has_he) { + if (sta->deflink.he_cap.has_he) { ra->supp_mode |= MODE_HE; cap |= STA_CAP_HE; - if (sta->he_6ghz_capa.capa) - ra->af = le16_get_bits(sta->he_6ghz_capa.capa, + if (sta->deflink.he_6ghz_capa.capa) + ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 233998ca4857..b67615487910 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -1023,7 +1023,7 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) u16 fc, tid; u32 val; - if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he)) + if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) return; tid = le32_get_bits(txwi[1], MT_TXD1_TID); diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c index d2ee1aaa3c81..ca9cf628eb10 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.c +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -385,7 +385,7 @@ void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev) msta = container_of(wcid, struct mt76_sta, wcid); sta = container_of(msta, struct ieee80211_sta, drv_priv); - min_factor = min(min_factor, sta->ht_cap.ampdu_factor); + min_factor = min(min_factor, sta->deflink.ht_cap.ampdu_factor); } rcu_read_unlock(); diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c index f3dff8319a4c..f1fa0442a57f 100644 --- a/drivers/net/wireless/mediatek/mt7601u/tx.c +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c @@ -163,7 +163,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb, if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { u8 ba_size = IEEE80211_MIN_AMPDU_BUF; - ba_size <<= sta->ht_cap.ampdu_factor; + ba_size <<= sta->deflink.ht_cap.ampdu_factor; ba_size = min_t(int, 63, ba_size); if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ba_size = 0; @@ -172,7 +172,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb, txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU | FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, - sta->ht_cap.ampdu_density)); + sta->deflink.ht_cap.ampdu_density)); if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) txwi->flags = 0; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index deddb0afd312..cbdaf7992f98 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1801,8 +1801,8 @@ int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, * do not have a choice if some connected STA is not capable to * receive the same amount of data like the others. */ - if (sta->ht_cap.ht_supported) { - drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]++; + if (sta->deflink.ht_cap.ht_supported) { + drv_data->ampdu_factor_cnt[sta->deflink.ht_cap.ampdu_factor & 3]++; rt2800_set_max_psdu_len(rt2x00dev); } @@ -1847,8 +1847,8 @@ int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); int wcid = sta_priv->wcid; - if (sta->ht_cap.ht_supported) { - drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]--; + if (sta->deflink.ht_cap.ht_supported) { + drv_data->ampdu_factor_cnt[sta->deflink.ht_cap.ampdu_factor & 3]--; rt2800_set_max_psdu_len(rt2x00dev); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index fb1d31b2d52a..aa6b2f3d2eff 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -303,7 +303,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, if (sta) { sta_priv = sta_to_rt2x00_sta(sta); txdesc->u.ht.wcid = sta_priv->wcid; - density = sta->ht_cap.ampdu_density; + density = sta->deflink.ht_cap.ampdu_density; } /* diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 6d9b5cf01b11..8b2ca9e8eac6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4533,21 +4533,21 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta) u16 network_type = WIRELESS_MODE_UNKNOWN; if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) { - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) network_type = WIRELESS_MODE_AC; - else if (sta->ht_cap.ht_supported) + else if (sta->deflink.ht_cap.ht_supported) network_type = WIRELESS_MODE_N_5G; network_type |= WIRELESS_MODE_A; } else { - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) network_type = WIRELESS_MODE_AC; - else if (sta->ht_cap.ht_supported) + else if (sta->deflink.ht_cap.ht_supported) network_type = WIRELESS_MODE_N_24G; - if (sta->supp_rates[0] <= 0xf) + if (sta->deflink.supp_rates[0] <= 0xf) network_type |= WIRELESS_MODE_B; - else if (sta->supp_rates[0] & 0xf) + else if (sta->deflink.supp_rates[0] & 0xf) network_type |= (WIRELESS_MODE_B | WIRELESS_MODE_G); else network_type |= WIRELESS_MODE_G; @@ -4591,16 +4591,16 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto error; } - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) dev_info(dev, "%s: HT supported\n", __func__); - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) dev_info(dev, "%s: VHT supported\n", __func__); /* TODO: Set bits 28-31 for rate adaptive id */ - ramask = (sta->supp_rates[0] & 0xfff) | - sta->ht_cap.mcs.rx_mask[0] << 12 | - sta->ht_cap.mcs.rx_mask[1] << 20; - if (sta->ht_cap.cap & + ramask = (sta->deflink.supp_rates[0] & 0xfff) | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12 | + sta->deflink.ht_cap.mcs.rx_mask[1] << 20; + if (sta->deflink.ht_cap.cap & (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)) sgi = 1; rcu_read_unlock(); @@ -5095,12 +5095,12 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, /* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */ ampdu_enable = false; if (ieee80211_is_data_qos(hdr->frame_control) && sta) { - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { u32 ampdu, val32; u8 *qc = ieee80211_get_qos_ctl(hdr); u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - ampdu = (u32)sta->ht_cap.ampdu_density; + ampdu = (u32)sta->deflink.ht_cap.ampdu_density; val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT; tx_desc->txdw2 |= cpu_to_le32(val32); @@ -5115,7 +5115,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (rate_flag & IEEE80211_TX_RC_SHORT_GI || (ieee80211_is_data_qos(hdr->frame_control) && - sta && sta->ht_cap.cap & + sta && sta->deflink.ht_cap.cap & (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) sgi = true; @@ -6162,8 +6162,8 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, switch (action) { case IEEE80211_AMPDU_TX_START: dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__); - ampdu_factor = sta->ht_cap.ampdu_factor; - ampdu_density = sta->ht_cap.ampdu_density; + ampdu_factor = sta->deflink.ht_cap.ampdu_factor; + ampdu_density = sta->deflink.ht_cap.ampdu_density; rtl8xxxu_set_ampdu_factor(priv, ampdu_factor); rtl8xxxu_set_ampdu_min_space(priv, ampdu_density); dev_dbg(dev, @@ -6255,10 +6255,10 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv, u32 rate_bitmap = 0; rcu_read_lock(); - rate_bitmap = (sta->supp_rates[0] & 0xfff) | - (sta->ht_cap.mcs.rx_mask[0] << 12) | - (sta->ht_cap.mcs.rx_mask[1] << 20); - if (sta->ht_cap.cap & + rate_bitmap = (sta->deflink.supp_rates[0] & 0xfff) | + (sta->deflink.ht_cap.mcs.rx_mask[0] << 12) | + (sta->deflink.ht_cap.mcs.rx_mask[1] << 20); + if (sta->deflink.ht_cap.cap & (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)) sgi = 1; rcu_read_unlock(); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index a7ef84f55939..9e7e98b55eff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -629,11 +629,12 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw, if (sta == NULL) return; - sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; - sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; - sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80; + sgi_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40; + sgi_20 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20; + sgi_80 = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80; - if ((!sta->ht_cap.ht_supported) && (!sta->vht_cap.vht_supported)) + if (!sta->deflink.ht_cap.ht_supported && + !sta->deflink.vht_cap.vht_supported) return; if (!sgi_40 && !sgi_20) @@ -645,8 +646,8 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC || mac->opmode == NL80211_IFTYPE_MESH_POINT) { - bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; - bw_80 = sta->vht_cap.vht_supported; + bw_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; + bw_80 = sta->deflink.vht_cap.vht_supported; } if (bw_80) { @@ -864,11 +865,11 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC || mac->opmode == NL80211_IFTYPE_MESH_POINT) { - if (!(sta->ht_cap.ht_supported) || - !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + if (!(sta->deflink.ht_cap.ht_supported) || + !(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) return; } else if (mac->opmode == NL80211_IFTYPE_STATION) { - if (!mac->bw_40 || !(sta->ht_cap.ht_supported)) + if (!mac->bw_40 || !(sta->deflink.ht_cap.ht_supported)) return; } if (tcb_desc->multicast || tcb_desc->broadcast) @@ -884,11 +885,11 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC || mac->opmode == NL80211_IFTYPE_MESH_POINT) { - if (!(sta->vht_cap.vht_supported)) + if (!(sta->deflink.vht_cap.vht_supported)) return; } else if (mac->opmode == NL80211_IFTYPE_STATION) { if (!mac->bw_80 || - !(sta->vht_cap.vht_supported)) + !(sta->deflink.vht_cap.vht_supported)) return; } if (tcb_desc->hw_rate <= @@ -904,7 +905,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u8 hw_rate; - u16 tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map); + u16 tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map); if ((get_rf_type(rtlphy) == RF_2T2R) && (tx_mcs_map & 0x000c) != 0x000c) { @@ -944,7 +945,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw, u8 hw_rate; if (get_rf_type(rtlphy) == RF_2T2R && - sta->ht_cap.mcs.rx_mask[1] != 0) + sta->deflink.ht_cap.mcs.rx_mask[1] != 0) hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15]; else hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7]; @@ -1271,11 +1272,11 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, *and N rate will all be controlled by FW *when tcb_desc->use_driver_rate = false */ - if (sta && sta->vht_cap.vht_supported) { + if (sta && sta->deflink.vht_cap.vht_supported) { tcb_desc->hw_rate = _rtl_get_vht_highest_n_rate(hw, sta); } else { - if (sta && sta->ht_cap.ht_supported) { + if (sta && sta->deflink.ht_cap.ht_supported) { tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta); } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 8efe2f5e5b9f..99a1d91ced5a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -903,18 +903,18 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw, spin_unlock_bh(&rtlpriv->locks.entry_list_lock); if (rtlhal->current_bandtype == BAND_ON_2_4G) { sta_entry->wireless_mode = WIRELESS_MODE_G; - if (sta->supp_rates[0] <= 0xf) + if (sta->deflink.supp_rates[0] <= 0xf) sta_entry->wireless_mode = WIRELESS_MODE_B; - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) sta_entry->wireless_mode = WIRELESS_MODE_N_24G; if (vif->type == NL80211_IFTYPE_ADHOC) sta_entry->wireless_mode = WIRELESS_MODE_G; } else if (rtlhal->current_bandtype == BAND_ON_5G) { sta_entry->wireless_mode = WIRELESS_MODE_A; - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) sta_entry->wireless_mode = WIRELESS_MODE_N_5G; - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) sta_entry->wireless_mode = WIRELESS_MODE_AC_5G; if (vif->type == NL80211_IFTYPE_ADHOC) @@ -922,7 +922,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw, } /*disable cck rate for p2p*/ if (mac->p2p) - sta->supp_rates[0] &= 0xfffffff0; + sta->deflink.supp_rates[0] &= 0xfffffff0; memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN); rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG, @@ -1126,7 +1126,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD, "send PS STATIC frame\n"); if (rtlpriv->dm.supp_phymode_switch) { - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) rtl_send_smps_action(hw, sta, IEEE80211_SMPS_STATIC); } @@ -1134,20 +1134,20 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, if (rtlhal->current_bandtype == BAND_ON_5G) { mac->mode = WIRELESS_MODE_A; } else { - if (sta->supp_rates[0] <= 0xf) + if (sta->deflink.supp_rates[0] <= 0xf) mac->mode = WIRELESS_MODE_B; else mac->mode = WIRELESS_MODE_G; } - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { if (rtlhal->current_bandtype == BAND_ON_2_4G) mac->mode = WIRELESS_MODE_N_24G; else mac->mode = WIRELESS_MODE_N_5G; } - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { if (rtlhal->current_bandtype == BAND_ON_5G) mac->mode = WIRELESS_MODE_AC_5G; else @@ -1256,14 +1256,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, rcu_read_lock(); sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid); if (sta) { - if (sta->ht_cap.ampdu_density > + if (sta->deflink.ht_cap.ampdu_density > mac->current_ampdu_density) mac->current_ampdu_density = - sta->ht_cap.ampdu_density; - if (sta->ht_cap.ampdu_factor < + sta->deflink.ht_cap.ampdu_density; + if (sta->deflink.ht_cap.ampdu_factor < mac->current_ampdu_factor) mac->current_ampdu_factor = - sta->ht_cap.ampdu_factor; + sta->deflink.ht_cap.ampdu_factor; } rcu_read_unlock(); @@ -1298,20 +1298,20 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, if (rtlhal->current_bandtype == BAND_ON_5G) { mac->mode = WIRELESS_MODE_A; } else { - if (sta->supp_rates[0] <= 0xf) + if (sta->deflink.supp_rates[0] <= 0xf) mac->mode = WIRELESS_MODE_B; else mac->mode = WIRELESS_MODE_G; } - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { if (rtlhal->current_bandtype == BAND_ON_2_4G) mac->mode = WIRELESS_MODE_N_24G; else mac->mode = WIRELESS_MODE_N_5G; } - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { if (rtlhal->current_bandtype == BAND_ON_5G) mac->mode = WIRELESS_MODE_AC_5G; else @@ -1327,7 +1327,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, sta_entry->wireless_mode = mac->mode; } - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { mac->ht_enable = true; /* @@ -1338,16 +1338,16 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, * */ } - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) mac->vht_enable = true; if (changed & BSS_CHANGED_BASIC_RATES) { /* for 5G must << RATE_6M_INDEX = 4, * because 5G have no cck rate*/ if (rtlhal->current_bandtype == BAND_ON_5G) - basic_rates = sta->supp_rates[1] << 4; + basic_rates = sta->deflink.supp_rates[1] << 4; else - basic_rates = sta->supp_rates[0]; + basic_rates = sta->deflink.supp_rates[0]; mac->basic_rates = basic_rates; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 4b5ea0ec9109..a164364109ba 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -66,7 +66,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, else return N_MODE_MCS15_RIX; } else if (wireless_mode == WIRELESS_MODE_AC_24G) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) { ieee80211_rate_set_vht(&rate, AC_MODE_MCS8_RIX, nss); @@ -88,7 +88,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, else return N_MODE_MCS15_RIX; } else if (wireless_mode == WIRELESS_MODE_AC_5G) { - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) { + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) { ieee80211_rate_set_vht(&rate, AC_MODE_MCS8_RIX, nss); @@ -121,9 +121,9 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0; if (sta) { - sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; - sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; - sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80; + sgi_20 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20; + sgi_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40; + sgi_80 = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80; sta_entry = (struct rtl_sta_info *)sta->drv_priv; wireless_mode = sta_entry->wireless_mode; } @@ -135,10 +135,10 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { - if (sta && (sta->ht_cap.cap & + if (sta && (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - if (sta && sta->vht_cap.vht_supported) + if (sta && sta->deflink.vht_cap.vht_supported) rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; } else { if (mac->bw_80) @@ -149,11 +149,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, if (sgi_20 || sgi_40 || sgi_80) rate->flags |= IEEE80211_TX_RC_SHORT_GI; - if (sta && sta->ht_cap.ht_supported && + if (sta && sta->deflink.ht_cap.ht_supported && (wireless_mode == WIRELESS_MODE_N_5G || wireless_mode == WIRELESS_MODE_N_24G)) rate->flags |= IEEE80211_TX_RC_MCS; - if (sta && sta->vht_cap.vht_supported && + if (sta && sta->deflink.vht_cap.vht_supported && (wireless_mode == WIRELESS_MODE_AC_5G || wireless_mode == WIRELESS_MODE_AC_24G || wireless_mode == WIRELESS_MODE_AC_ONLY)) @@ -229,7 +229,7 @@ static void rtl_tx_status(void *ppriv, if (sta) { /* Check if aggregation has to be enabled for this tid */ sta_entry = (struct rtl_sta_info *)sta->drv_priv; - if (sta->ht_cap.ht_supported && + if (sta->deflink.ht_cap.ht_supported && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { if (ieee80211_is_data_qos(fc)) { u8 tid = rtl_get_tid(skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index bf686a916acb..58c2ab3d44be 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -1975,21 +1975,21 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw, u16 shortgi_rate; u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; u32 ratr_mask; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; + ratr_value = sta->deflink.supp_rates[1] << 4; else - ratr_value = sta->supp_rates[0]; + ratr_value = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -2061,11 +2061,11 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool b_shortgi = false; @@ -2083,13 +2083,13 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw, macid = sta->aid + 1; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_bitmap = sta->supp_rates[1] << 4; + ratr_bitmap = sta->deflink.supp_rates[1] << 4; else - ratr_bitmap = sta->supp_rates[0]; + ratr_bitmap = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index c948dafa0c80..257816563fa0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -504,7 +504,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->ht_cap.cap & + bw_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; @@ -591,7 +591,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, set_tx_desc_linip(pdesc, 0); set_tx_desc_pkt_size(pdesc, (u16)skb_len); if (sta) { - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; set_tx_desc_ampdu_density(pdesc, ampdu_density); } if (info->control.hw_key) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index bb5a0c4aec93..b9c62640d2cb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -1765,22 +1765,22 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw, u16 shortgi_rate; u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; u32 ratr_mask; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; + ratr_value = sta->deflink.supp_rates[1] << 4; else - ratr_value = sta->supp_rates[0]; + ratr_value = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -1853,11 +1853,11 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->ht_cap.cap & + u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; - u8 curshortgi_40mhz = (sta->ht_cap.cap & + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool shortgi = false; @@ -1874,13 +1874,13 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, macid = sta->aid + 1; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_bitmap = sta->supp_rates[1] << 4; + ratr_bitmap = sta->deflink.supp_rates[1] << 4; else - ratr_bitmap = sta->supp_rates[0]; + ratr_bitmap = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 4165175cf5c0..fdbd3758cde1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -379,7 +379,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, mac->opmode == NL80211_IFTYPE_ADHOC || mac->opmode == NL80211_IFTYPE_MESH_POINT) { if (sta) - bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; + bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; @@ -441,7 +441,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, set_tx_desc_pkt_size(pdesc, (u16)skb->len); if (sta) { - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; set_tx_desc_ampdu_density(pdesc, ampdu_density); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index eaba66113328..9bd02c3c3adc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1918,21 +1918,21 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, u16 shortgi_rate; u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; + ratr_value = sta->deflink.supp_rates[1] << 4; else - ratr_value = sta->supp_rates[0]; + ratr_value = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -2003,11 +2003,11 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; + u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; u8 curshortgi_40mhz = curtxbw_40mhz && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool shortgi = false; @@ -2025,13 +2025,13 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, macid = sta->aid + 1; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_bitmap = sta->supp_rates[1] << 4; + ratr_bitmap = sta->deflink.supp_rates[1] << 4; else - ratr_bitmap = sta->supp_rates[0]; + ratr_bitmap = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 87f959d5d861..ae3c4f97637e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -540,7 +540,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, rcu_read_lock(); sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; set_tx_desc_ampdu_density(txdesc, ampdu_density); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index f849291cc587..2aecb2583f75 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -1802,18 +1802,18 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw, u16 shortgi_rate; u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; + ratr_value = sta->deflink.supp_rates[1] << 4; else - ratr_value = sta->supp_rates[0]; - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_value = sta->deflink.supp_rates[0]; + ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_A: ratr_value &= 0x00000FF0; @@ -1880,10 +1880,10 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool shortgi = false; @@ -1901,11 +1901,11 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw, macid = sta->aid + 1; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_bitmap = sta->supp_rates[1] << 4; + ratr_bitmap = sta->deflink.supp_rates[1] << 4; else - ratr_bitmap = sta->supp_rates[0]; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap = sta->deflink.supp_rates[0]; + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index c02813fba934..807b66c16e11 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -498,7 +498,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; + bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); @@ -586,7 +586,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, set_tx_desc_linip(pdesc, 0); set_tx_desc_pkt_size(pdesc, (u16)skb_len); if (sta) { - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; set_tx_desc_ampdu_density(pdesc, ampdu_density); } if (info->control.hw_key) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 76189283104c..47d8999e31c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2256,11 +2256,11 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; - u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool b_shortgi = false; @@ -2276,12 +2276,12 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw, mac->opmode == NL80211_IFTYPE_ADHOC) macid = sta->aid + 1; - ratr_bitmap = sta->supp_rates[0]; + ratr_bitmap = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index eef7a041e80d..272750612abf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -665,7 +665,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->ht_cap.cap & + bw_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; @@ -759,7 +759,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, set_tx_desc_linip(pdesc, 0); if (sta) { - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; set_tx_desc_ampdu_density(pdesc, ampdu_density); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 91199262aaca..4ca299c9de77 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -2017,20 +2017,20 @@ static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw, u16 shortgi_rate = 0; u32 tmp_ratr_value = 0; u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; + ratr_value = sta->deflink.supp_rates[1] << 4; else - ratr_value = sta->supp_rates[0]; + ratr_value = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_value &= 0x0000000D; @@ -2115,10 +2115,10 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index = 0; - u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool shortgi = false; @@ -2139,13 +2139,13 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw, macid = sta->aid + 1; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_bitmap = sta->supp_rates[1] << 4; + ratr_bitmap = sta->deflink.supp_rates[1] << 4; else - ratr_bitmap = sta->supp_rates[0]; + ratr_bitmap = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: band |= WIRELESS_11B; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index e474b4ec17f3..a5853a170b58 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -342,7 +342,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->bandwidth >= IEEE80211_STA_RX_BW_40; + bw_40 = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index c98f2216734f..965d98b9b09f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -1841,21 +1841,21 @@ static void rtl8723e_update_hal_rate_table(struct ieee80211_hw *hw, u16 shortgi_rate; u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; u32 ratr_mask; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; + ratr_value = sta->deflink.supp_rates[1] << 4; else - ratr_value = sta->supp_rates[0]; + ratr_value = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -1928,11 +1928,11 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool shortgi = false; @@ -1949,13 +1949,13 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw, macid = sta->aid + 1; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_bitmap = sta->supp_rates[1] << 4; + ratr_bitmap = sta->deflink.supp_rates[1] << 4; else - ratr_bitmap = sta->supp_rates[0]; + ratr_bitmap = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 340b3d68a54e..3797a1e2af82 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -376,7 +376,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->ht_cap.cap & + bw_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; } @@ -442,7 +442,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, set_tx_desc_pkt_size(pdesc, (u16)skb->len); if (sta) { - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; set_tx_desc_ampdu_density(pdesc, ampdu_density); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 0748aedce2ad..189cc6437600 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2315,11 +2315,11 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw, struct rtl_sta_info *sta_entry = NULL; u32 ratr_bitmap; u8 ratr_index; - u8 curtxbw_40mhz = (sta->ht_cap.cap & + u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; - u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = 0; bool shortgi = false; @@ -2335,13 +2335,13 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw, mac->opmode == NL80211_IFTYPE_ADHOC) macid = sta->aid + 1; - ratr_bitmap = sta->supp_rates[0]; + ratr_bitmap = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: ratr_index = RATR_INX_WIRELESS_B; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 5a7cd270575a..c08f57e487e5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -429,7 +429,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, } else if (mac->opmode == NL80211_IFTYPE_AP || mac->opmode == NL80211_IFTYPE_ADHOC) { if (sta) - bw_40 = sta->ht_cap.cap & + bw_40 = sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; } seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; @@ -516,7 +516,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, set_tx_desc_linip(pdesc, 0); set_tx_desc_pkt_size(pdesc, (u16)skb_len); if (sta) { - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; set_tx_desc_ampdu_density(pdesc, ampdu_density); } if (info->control.hw_key) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 33ffc24d3675..7e0f62d59fe1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -3300,20 +3300,20 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw, u16 shortgi_rate; u32 tmp_ratr_value; u8 curtxbw_40mhz = mac->bw_40; - u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; enum wireless_mode wirelessmode = mac->mode; if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->supp_rates[1] << 4; + ratr_value = sta->deflink.supp_rates[1] << 4; else - ratr_value = sta->supp_rates[0]; + ratr_value = sta->deflink.supp_rates[0]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_value = 0xfff; - ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); switch (wirelessmode) { case WIRELESS_MODE_B: if (ratr_value & 0x0000000c) @@ -3484,12 +3484,12 @@ static bool _rtl8821ae_get_ra_shortgi(struct ieee80211_hw *hw, struct ieee80211_ u8 mac_id) { bool b_short_gi = false; - u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + u8 b_curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; - u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + u8 b_curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; u8 b_curshortgi_80mhz = 0; - b_curshortgi_80mhz = (sta->vht_cap.cap & + b_curshortgi_80mhz = (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) ? 1 : 0; if (mac_id == MAC_ID_STATIC_FOR_BROADCAST_MULTICAST) @@ -3512,7 +3512,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw, u32 ratr_bitmap; u8 ratr_index; enum wireless_mode wirelessmode = 0; - u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + u8 curtxbw_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; bool b_shortgi = false; u8 rate_mask[7]; @@ -3534,22 +3534,22 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw, if (wirelessmode == WIRELESS_MODE_N_5G || wirelessmode == WIRELESS_MODE_AC_5G || wirelessmode == WIRELESS_MODE_A) - ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4; + ratr_bitmap = sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4; else - ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ]; + ratr_bitmap = sta->deflink.supp_rates[NL80211_BAND_2GHZ]; if (mac->opmode == NL80211_IFTYPE_ADHOC) ratr_bitmap = 0xfff; if (wirelessmode == WIRELESS_MODE_N_24G || wirelessmode == WIRELESS_MODE_N_5G) - ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | - sta->ht_cap.mcs.rx_mask[0] << 12); + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); else if (wirelessmode == WIRELESS_MODE_AC_24G || wirelessmode == WIRELESS_MODE_AC_5G || wirelessmode == WIRELESS_MODE_AC_ONLY) ratr_bitmap |= _rtl8821ae_rate_to_bitmap_2ssvht( - sta->vht_cap.vht_mcs.rx_mcs_map) << 12; + sta->deflink.vht_cap.vht_mcs.rx_mcs_map) << 12; b_shortgi = _rtl8821ae_get_ra_shortgi(hw, sta, macid); rf_type = _rtl8821ae_get_ra_rftype(hw, wirelessmode, ratr_bitmap); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 9d6f8dcbf2d6..8c48108960b9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -761,7 +761,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, set_tx_desc_linip(pdesc, 0); set_tx_desc_pkt_size(pdesc, (u16)skb_len); if (sta) { - u8 ampdu_density = sta->ht_cap.ampdu_density; + u8 ampdu_density = sta->deflink.ht_cap.ampdu_density; set_tx_desc_ampdu_density(pdesc, ampdu_density); } diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c index df750b3a35e9..e76841d3417b 100644 --- a/drivers/net/wireless/realtek/rtw88/bf.c +++ b/drivers/net/wireless/realtek/rtw88/bf.c @@ -55,7 +55,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, } ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap; - vht_cap = &sta->vht_cap; + vht_cap = &sta->deflink.vht_cap; if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && (vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index b57f9262f590..1a4c8c989ce9 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -904,7 +904,7 @@ static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num) static u64 get_vht_ra_mask(struct ieee80211_sta *sta) { u64 ra_mask = 0; - u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map); + u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); u8 vht_mcs_cap; int i, nss; @@ -1123,19 +1123,19 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) bool is_vht_enable = false; bool is_support_sgi = false; - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { is_vht_enable = true; ra_mask |= get_vht_ra_mask(sta); - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) stbc_en = VHT_STBC_EN; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = VHT_LDPC_EN; - } else if (sta->ht_cap.ht_supported) { - ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) | - (sta->ht_cap.mcs.rx_mask[0] << 12); - if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + } else if (sta->deflink.ht_cap.ht_supported) { + ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) | + (sta->deflink.ht_cap.mcs.rx_mask[0] << 12); + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) stbc_en = HT_STBC_EN; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) ldpc_en = HT_LDPC_EN; } @@ -1143,12 +1143,12 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; if (hal->current_band_type == RTW_BAND_5G) { - ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4; + ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4; ra_mask_bak = ra_mask; - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT; wireless_set = WIRELESS_OFDM | WIRELESS_VHT; - } else if (sta->ht_cap.ht_supported) { + } else if (sta->deflink.ht_cap.ht_supported) { ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G; wireless_set = WIRELESS_OFDM | WIRELESS_HT; } else { @@ -1156,19 +1156,19 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) } dm_info->rrsr_val_init = RRSR_INIT_5G; } else if (hal->current_band_type == RTW_BAND_2G) { - ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ]; + ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ]; ra_mask_bak = ra_mask; - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT | RA_MASK_OFDM_IN_VHT; wireless_set = WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT | WIRELESS_VHT; - } else if (sta->ht_cap.ht_supported) { + } else if (sta->deflink.ht_cap.ht_supported) { ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT | RA_MASK_OFDM_IN_HT_2G; wireless_set = WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT; - } else if (sta->supp_rates[0] <= 0xf) { + } else if (sta->deflink.supp_rates[0] <= 0xf) { wireless_set = WIRELESS_CCK; } else { ra_mask &= RA_MASK_OFDM_RATES | RA_MASK_CCK_IN_BG; @@ -1181,28 +1181,28 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) wireless_set = 0; } - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_80: bw_mode = RTW_CHANNEL_WIDTH_80; - is_support_sgi = sta->vht_cap.vht_supported && - (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); + is_support_sgi = sta->deflink.vht_cap.vht_supported && + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); break; case IEEE80211_STA_RX_BW_40: bw_mode = RTW_CHANNEL_WIDTH_40; - is_support_sgi = sta->ht_cap.ht_supported && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); + is_support_sgi = sta->deflink.ht_cap.ht_supported && + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40); break; default: bw_mode = RTW_CHANNEL_WIDTH_20; - is_support_sgi = sta->ht_cap.ht_supported && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); + is_support_sgi = sta->deflink.ht_cap.ht_supported && + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20); break; } - if (sta->vht_cap.vht_supported && ra_mask & 0xffc00000) { + if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000) { tx_num = 2; rf_type = RF_2T2R; - } else if (sta->ht_cap.ht_supported && ra_mask & 0xfff00000) { + } else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000) { tx_num = 2; rf_type = RF_2T2R; } diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index 94d1089f4022..83248da0237f 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -72,7 +72,7 @@ EXPORT_SYMBOL(rtw_tx_fill_tx_desc); static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta) { - u8 exp = sta->ht_cap.ampdu_factor; + u8 exp = sta->deflink.ht_cap.ampdu_factor; /* the least ampdu factor is 8K, and the value in the tx desc is the * max aggregation num, which represents val * 2 packets can be @@ -83,7 +83,7 @@ static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta) static u8 get_tx_ampdu_density(struct ieee80211_sta *sta) { - return sta->ht_cap.ampdu_density; + return sta->deflink.ht_cap.ampdu_density; } static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev, @@ -91,7 +91,7 @@ static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev, { u8 rate; - if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0) + if (rtwdev->hal.rf_type == RF_2T2R && sta->deflink.ht_cap.mcs.rx_mask[1] != 0) rate = DESC_RATEMCS15; else rate = DESC_RATEMCS7; @@ -106,7 +106,7 @@ static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev, u8 rate; u16 tx_mcs_map; - tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map); + tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map); if (efuse->hw_cap.nss == 1) { switch (tx_mcs_map & 0x3) { case IEEE80211_VHT_MCS_SUPPORT_0_7: @@ -340,11 +340,11 @@ static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev, if (info->control.use_rts || skb->len > hw->wiphy->rts_threshold) pkt_info->rts = true; - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) rate = get_highest_vht_tx_rate(rtwdev, sta); - else if (sta->ht_cap.ht_supported) + else if (sta->deflink.ht_cap.ht_supported) rate = get_highest_ht_tx_rate(rtwdev, sta); - else if (sta->supp_rates[0] <= 0xf) + else if (sta->deflink.supp_rates[0] <= 0xf) rate = DESC_RATE11M; else rate = DESC_RATE54M; diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index 3d5664cc3a20..683854bba217 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -4179,14 +4179,14 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], STA support HE=%d VHT=%d HT=%d\n", - sta->he_cap.has_he, - sta->vht_cap.vht_supported, - sta->ht_cap.ht_supported); - if (sta->he_cap.has_he) + sta->deflink.he_cap.has_he, + sta->deflink.vht_cap.vht_supported, + sta->deflink.ht_cap.ht_supported); + if (sta->deflink.he_cap.has_he) mode |= BIT(BTC_WL_MODE_HE); - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) mode |= BIT(BTC_WL_MODE_VHT); - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) mode |= BIT(BTC_WL_MODE_HT); r.mode = mode; diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 89506daa54cc..6e8a65b021ce 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -424,10 +424,10 @@ rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev, ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ? rtwsta->ampdu_params[tid].agg_num : - 4 << sta->ht_cap.ampdu_factor) - 1); + 4 << sta->deflink.ht_cap.ampdu_factor) - 1); desc_info->agg_en = true; - desc_info->ampdu_density = sta->ht_cap.ampdu_density; + desc_info->ampdu_density = sta->deflink.ht_cap.ampdu_density; desc_info->ampdu_num = ampdu_num; } @@ -612,7 +612,7 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev, if (pkt_type < PACKET_MAX) return false; - if (!sta || !sta->he_cap.has_he) + if (!sta || !sta->deflink.he_cap.has_he) return false; if (!ieee80211_is_data_qos(fc)) diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index ee2edd9e9173..fdf593e68660 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -3601,10 +3601,12 @@ static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr) static inline bool rtw89_sta_has_beamformer_cap(struct ieee80211_sta *sta) { - if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || - (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) || - (sta->he_cap.he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || - (sta->he_cap.he_cap_elem.phy_cap_info[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) + if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) || + (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || + (sta->deflink.he_cap.he_cap_elem.phy_cap_info[4] & + IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) return true; return false; } diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index fc77d9bfd626..986d6249c108 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -831,23 +831,23 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, { bool ppe_th; u8 ppe16, ppe8; - u8 nss = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1; - u8 ppe_thres_hdr = sta->he_cap.ppe_thres[0]; + u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; + u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; u8 ru_bitmap; u8 n, idx, sh; u16 ppe; int i; - if (!sta->he_cap.has_he) + if (!sta->deflink.he_cap.has_he) return; ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, - sta->he_cap.he_cap_elem.phy_cap_info[6]); + sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); if (!ppe_th) { u8 pad; pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, - sta->he_cap.he_cap_elem.phy_cap_info[9]); + sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); for (i = 0; i < RTW89_PPE_BW_NUM; i++) pads[i] = pad; @@ -867,7 +867,7 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, sh = n & 7; n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; - ppe = le16_to_cpu(*((__le16 *)&sta->he_cap.ppe_thres[idx])); + ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; @@ -921,7 +921,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); if (sta) - SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he); + SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, + sta->deflink.he_cap.has_he); if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) SET_CMC_TBL_DATA_DCM(skb->data, 0); diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 34e14a53a585..c28f5a7c23b5 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -3995,7 +3995,7 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; u8 port_sel = rtwvif->port; u8 sound_dim = 3, t; - u8 *phy_cap = sta->he_cap.he_cap_elem.phy_cap_info; + u8 *phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info; u32 reg; u16 val; int ret; @@ -4012,12 +4012,12 @@ static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, phy_cap[5]); sound_dim = min(sound_dim, t); } - if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || - (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { - ldpc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); - stbc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); + if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { + ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); + stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, - sta->vht_cap.cap); + sta->deflink.vht_cap.cap); sound_dim = min(sound_dim, t); } nc = min(nc, sound_dim); @@ -4058,17 +4058,17 @@ static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, if (ret) return ret; - if (sta->he_cap.has_he) { + if (sta->deflink.he_cap.has_he) { rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); } - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); } - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 193afb1f53f5..994869f72080 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -76,10 +76,10 @@ static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap) static u64 get_he_ra_mask(struct ieee80211_sta *sta) { - struct ieee80211_sta_he_cap cap = sta->he_cap; + struct ieee80211_sta_he_cap cap = sta->deflink.he_cap; u16 mcs_map; - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: if (cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) @@ -172,17 +172,17 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw return -1; } - if (sta->he_cap.has_he) { + if (sta->deflink.he_cap.has_he) { cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0], RA_MASK_HE_1SS_RATES); cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1], RA_MASK_HE_2SS_RATES); - } else if (sta->vht_cap.vht_supported) { + } else if (sta->deflink.vht_cap.vht_supported) { cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], RA_MASK_VHT_1SS_RATES); cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], RA_MASK_VHT_2SS_RATES); - } else if (sta->ht_cap.ht_supported) { + } else if (sta->deflink.ht_cap.ht_supported) { cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], RA_MASK_HT_1SS_RATES); cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], @@ -223,57 +223,57 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, memset(ra, 0, sizeof(*ra)); /* Set the ra mask from sta's capability */ - if (sta->he_cap.has_he) { + if (sta->deflink.he_cap.has_he) { mode |= RTW89_RA_MODE_HE; csi_mode = RTW89_RA_RPT_MODE_HE; ra_mask |= get_he_ra_mask(sta); high_rate_masks = rtw89_ra_mask_he_rates; - if (sta->he_cap.he_cap_elem.phy_cap_info[2] & + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) stbc_en = 1; - if (sta->he_cap.he_cap_elem.phy_cap_info[1] & + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) ldpc_en = 1; - } else if (sta->vht_cap.vht_supported) { - u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map); + } else if (sta->deflink.vht_cap.vht_supported) { + u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); mode |= RTW89_RA_MODE_VHT; csi_mode = RTW89_RA_RPT_MODE_VHT; /* MCS9, MCS8, MCS7 */ ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1); high_rate_masks = rtw89_ra_mask_vht_rates; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) stbc_en = 1; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = 1; - } else if (sta->ht_cap.ht_supported) { + } else if (sta->deflink.ht_cap.ht_supported) { mode |= RTW89_RA_MODE_HT; csi_mode = RTW89_RA_RPT_MODE_HT; - ra_mask |= ((u64)sta->ht_cap.mcs.rx_mask[3] << 48) | - ((u64)sta->ht_cap.mcs.rx_mask[2] << 36) | - (sta->ht_cap.mcs.rx_mask[1] << 24) | - (sta->ht_cap.mcs.rx_mask[0] << 12); + ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) | + ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) | + (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) | + (sta->deflink.ht_cap.mcs.rx_mask[0] << 12); high_rate_masks = rtw89_ra_mask_ht_rates; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) stbc_en = 1; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) ldpc_en = 1; } switch (rtwdev->hal.current_band_type) { case RTW89_BAND_2G: - ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ]; - if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf) + ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ]; + if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] <= 0xf) mode |= RTW89_RA_MODE_CCK; else mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM; break; case RTW89_BAND_5G: - ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4; + ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4; mode |= RTW89_RA_MODE_OFDM; break; case RTW89_BAND_6G: - ra_mask |= (u64)sta->supp_rates[NL80211_BAND_6GHZ] << 4; + ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4; mode |= RTW89_RA_MODE_OFDM; break; default: @@ -302,30 +302,30 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak); ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta); - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: bw_mode = RTW89_CHANNEL_WIDTH_160; - sgi = sta->vht_cap.vht_supported && - (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160); + sgi = sta->deflink.vht_cap.vht_supported && + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160); break; case IEEE80211_STA_RX_BW_80: bw_mode = RTW89_CHANNEL_WIDTH_80; - sgi = sta->vht_cap.vht_supported && - (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); + sgi = sta->deflink.vht_cap.vht_supported && + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); break; case IEEE80211_STA_RX_BW_40: bw_mode = RTW89_CHANNEL_WIDTH_40; - sgi = sta->ht_cap.ht_supported && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); + sgi = sta->deflink.ht_cap.ht_supported && + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40); break; default: bw_mode = RTW89_CHANNEL_WIDTH_20; - sgi = sta->ht_cap.ht_supported && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); + sgi = sta->deflink.ht_cap.ht_supported && + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20); break; } - if (sta->he_cap.he_cap_elem.phy_cap_info[3] & + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM) ra->dcm_cap = 1; @@ -340,7 +340,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ra->macid = rtwsta->mac_id; ra->stbc_cap = stbc_en; ra->ldpc_cap = ldpc_en; - ra->ss_num = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1; + ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; ra->en_sgi = sgi; ra->ra_mask = ra_mask; diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 913e11fb3807..f01e82b90c07 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1490,13 +1490,13 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, if ((vif->type == NL80211_IFTYPE_STATION) || (vif->type == NL80211_IFTYPE_P2P_CLIENT)) { - common->bitrate_mask[common->band] = sta->supp_rates[common->band]; - common->vif_info[0].is_ht = sta->ht_cap.ht_supported; - if (sta->ht_cap.ht_supported) { + common->bitrate_mask[common->band] = sta->deflink.supp_rates[common->band]; + common->vif_info[0].is_ht = sta->deflink.ht_cap.ht_supported; + if (sta->deflink.ht_cap.ht_supported) { common->bitrate_mask[NL80211_BAND_2GHZ] = - sta->supp_rates[NL80211_BAND_2GHZ]; - if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) + sta->deflink.supp_rates[NL80211_BAND_2GHZ]; + if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) common->vif_info[0].sgi = true; ieee80211_start_tx_ba_session(sta, 0, 0); } diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 0848f7a7e76c..c14689266fec 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1357,10 +1357,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common, is_ht = common->vif_info[0].is_ht; is_sgi = common->vif_info[0].sgi; } else { - rate_bitmap = sta->supp_rates[band]; - is_ht = sta->ht_cap.ht_supported; - if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) + rate_bitmap = sta->deflink.supp_rates[band]; + is_ht = sta->deflink.ht_cap.ht_supported; + if ((sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) is_sgi = true; } diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index 236022d4ae2a..321df124d449 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -1907,10 +1907,10 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev, if (info->bssid && !info->ibss_joined) sta = ieee80211_find_sta(vif, info->bssid); if (sta) { - priv->ht_info.ht_cap = sta->ht_cap; + priv->ht_info.ht_cap = sta->deflink.ht_cap; priv->bss_params.operational_rate_set = cw1200_rate_mask_to_wsm(priv, - sta->supp_rates[priv->channel->band]); + sta->deflink.supp_rates[priv->channel->band]); priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef); priv->ht_info.operation_mode = info->ht_operation_mode; } else { diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 8b798b5fcaf5..09eb1116be0e 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1558,11 +1558,11 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, WL1271_PSD_LEGACY; - sta_rates = sta->supp_rates[wlvif->band]; - if (sta->ht_cap.ht_supported) + sta_rates = sta->deflink.supp_rates[wlvif->band]; + if (sta->deflink.ht_cap.ht_supported) sta_rates |= - (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) | - (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET); + (sta->deflink.ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) | + (sta->deflink.ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET); cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 5669f17b395f..8440e0942674 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -4439,15 +4439,15 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl, rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); if (sta) { - u8 *rx_mask = sta->ht_cap.mcs.rx_mask; + u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask; /* save the supp_rates of the ap */ - sta_rate_set = sta->supp_rates[wlvif->band]; - if (sta->ht_cap.ht_supported) + sta_rate_set = sta->deflink.supp_rates[wlvif->band]; + if (sta->deflink.ht_cap.ht_supported) sta_rate_set |= (rx_mask[0] << HW_HT_RATES_OFFSET) | (rx_mask[1] << HW_MIMO_RATES_OFFSET); - sta_ht_cap = sta->ht_cap; + sta_ht_cap = sta->deflink.ht_cap; sta_exists = true; } @@ -5225,7 +5225,8 @@ static int wl12xx_update_sta_state(struct wl1271 *wl, if (ret < 0) return ret; - ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, + ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap, + true, wl_sta->hlid); if (ret) return ret; @@ -5786,8 +5787,9 @@ static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw, return; /* this callback is atomic, so schedule a new work */ - wlvif->rc_update_bw = sta->bandwidth; - memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap)); + wlvif->rc_update_bw = sta->deflink.bandwidth; + memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap, + sizeof(sta->deflink.ht_cap)); ieee80211_queue_work(hw, &wlvif->rc_update_work); } diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c index b1e9fb14d2b4..3297d73c327a 100644 --- a/drivers/staging/wfx/sta.c +++ b/drivers/staging/wfx/sta.c @@ -441,11 +441,11 @@ static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *i rcu_read_lock(); /* protect sta */ if (info->bssid && !info->ibss_joined) sta = ieee80211_find_sta(wvif->vif, info->bssid); - if (sta && sta->ht_cap.ht_supported) - ampdu_density = sta->ht_cap.ampdu_density; - if (sta && sta->ht_cap.ht_supported && + if (sta && sta->deflink.ht_cap.ht_supported) + ampdu_density = sta->deflink.ht_cap.ampdu_density; + if (sta && sta->deflink.ht_cap.ht_supported && !(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)) - greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); + greenfield = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); rcu_read_unlock(); wvif->join_in_progress = false; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 0943b2626f4d..75880fc70700 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2056,6 +2056,45 @@ struct ieee80211_sta_txpwr { enum nl80211_tx_power_setting type; }; +#define MAX_STA_LINKS 15 + +/** + * struct ieee80211_link_sta - station Link specific info + * All link specific info for a STA link for a non MLD STA(single) + * or a MLD STA(multiple entries) are stored here. + * + * @addr: MAC address of the Link STA. For non-MLO STA this is same as the addr + * in ieee80211_sta. For MLO Link STA this addr can be same or different + * from addr in ieee80211_sta (representing MLD STA addr) + * @supp_rates: Bitmap of supported rates + * @ht_cap: HT capabilities of this STA; restricted to our own capabilities + * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities + * @he_cap: HE capabilities of this STA + * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities + * @eht_cap: EHT capabilities of this STA + * @bandwidth: current bandwidth the station can receive with + * @rx_nss: in HT/VHT, the maximum number of spatial streams the + * station can receive at the moment, changed by operating mode + * notifications and capabilities. The value is only valid after + * the station moves to associated state. + * @txpwr: the station tx power configuration + * + */ +struct ieee80211_link_sta { + u8 addr[ETH_ALEN]; + + u32 supp_rates[NUM_NL80211_BANDS]; + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_he_cap he_cap; + struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct ieee80211_sta_eht_cap eht_cap; + + u8 rx_nss; + enum ieee80211_sta_rx_bandwidth bandwidth; + struct ieee80211_sta_txpwr txpwr; +}; + /** * struct ieee80211_sta - station table entry * @@ -2065,15 +2104,11 @@ struct ieee80211_sta_txpwr { * either be protected by rcu_read_lock() explicitly or implicitly, * or you must take good care to not use such a pointer after a * call to your sta_remove callback that removed it. + * This also represents the MLD STA in case of MLO association + * and holds pointers to various link STA's * * @addr: MAC address * @aid: AID we assigned to the station if we're an AP - * @supp_rates: Bitmap of supported rates (per band) - * @ht_cap: HT capabilities of this STA; restricted to our own capabilities - * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities - * @he_cap: HE capabilities of this STA - * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities - * @eht_cap: EHT capabilities of this STA * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU * that this station is allowed to transmit to us. * Can be modified by driver. @@ -2085,11 +2120,6 @@ struct ieee80211_sta_txpwr { * if wme is supported. The bits order is like in * IEEE80211_WMM_IE_STA_QOSINFO_AC_*. * @max_sp: max Service Period. Only valid if wme is supported. - * @bandwidth: current bandwidth the station can receive with - * @rx_nss: in HT/VHT, the maximum number of spatial streams the - * station can receive at the moment, changed by operating mode - * notifications and capabilities. The value is only valid after - * the station moves to associated state. * @smps_mode: current SMPS mode (off, static or dynamic) * @rates: rate control selection table * @tdls: indicates whether the STA is a TDLS peer @@ -2102,25 +2132,28 @@ struct ieee80211_sta_txpwr { * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not. * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control. * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID - * @txpwr: the station tx power configuration * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames + * @multi_link_sta: Identifies if this sta is a MLD STA + * @deflink: This holds the default link STA information, for non MLO STA all link + * specific STA information is accessed through @deflink or through + * link[0] which points to address of @deflink. For MLO Link STA + * the first added link STA will point to deflink. + * @link: reference to Link Sta entries. For Non MLO STA, except 1st link, + * i.e link[0] all links would be assigned to NULL by default and + * would access link information via @deflink or link[0]. For MLO + * STA, first link STA being added will point its link pointer to + * @deflink address and remaining would be allocated and the address + * would be assigned to link[link_id] where link_id is the id assigned + * by the AP. */ struct ieee80211_sta { - u32 supp_rates[NUM_NL80211_BANDS]; u8 addr[ETH_ALEN]; u16 aid; - struct ieee80211_sta_ht_cap ht_cap; - struct ieee80211_sta_vht_cap vht_cap; - struct ieee80211_sta_he_cap he_cap; - struct ieee80211_he_6ghz_capa he_6ghz_capa; - struct ieee80211_sta_eht_cap eht_cap; u16 max_rx_aggregation_subframes; bool wme; u8 uapsd_queues; u8 max_sp; - u8 rx_nss; - enum ieee80211_sta_rx_bandwidth bandwidth; enum ieee80211_smps_mode smps_mode; struct ieee80211_sta_rates __rcu *rates; bool tdls; @@ -2147,10 +2180,13 @@ struct ieee80211_sta { bool support_p2p_ps; u16 max_rc_amsdu_len; u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS]; - struct ieee80211_sta_txpwr txpwr; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; + bool multi_link_sta; + struct ieee80211_link_sta deflink; + struct ieee80211_link_sta *link[MAX_STA_LINKS]; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; @@ -6371,7 +6407,7 @@ static inline int rate_supported(struct ieee80211_sta *sta, enum nl80211_band band, int index) { - return (sta == NULL || sta->supp_rates[band] & BIT(index)); + return (sta == NULL || sta->deflink.supp_rates[band] & BIT(index)); } static inline s8 diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 218cdc554d71..bfab39320004 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -263,7 +263,7 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid, mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); - if (sta->sta.he_cap.has_he && addbaext) + if (sta->sta.deflink.he_cap.has_he && addbaext) ieee80211_add_addbaext(sdata, skb, addbaext, buf_size); ieee80211_tx_skb(sdata, skb); @@ -296,7 +296,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, goto end; } - if (!sta->sta.ht_cap.ht_supported && + if (!sta->sta.deflink.ht_cap.ht_supported && sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ) { ht_dbg(sta->sdata, "STA %pM erroneously requests BA session on tid %d w/o QoS\n", @@ -312,9 +312,9 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, goto end; } - if (sta->sta.eht_cap.has_eht) + if (sta->sta.deflink.eht_cap.has_eht) max_buf_size = IEEE80211_MAX_AMPDU_BUF_EHT; - else if (sta->sta.he_cap.has_he) + else if (sta->sta.deflink.he_cap.has_he) max_buf_size = IEEE80211_MAX_AMPDU_BUF_HE; else max_buf_size = IEEE80211_MAX_AMPDU_BUF_HT; @@ -324,7 +324,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, * and if buffer size does not exceeds max value */ /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && - (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || + (!(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || (buf_size > max_buf_size)) { status = WLAN_STATUS_INVALID_QOS_PARAM; ht_dbg_ratelimited(sta->sdata, @@ -507,7 +507,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, goto free; } - if (sta->sta.eht_cap.has_eht && elems && elems->addba_ext_ie) { + if (sta->sta.deflink.eht_cap.has_eht && elems && elems->addba_ext_ie) { u8 buf_size_1k = u8_get_bits(elems->addba_ext_ie->data, IEEE80211_ADDBA_EXT_BUF_SIZE_MASK); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 1deb3d874a4b..91878ed5ec46 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -467,7 +467,7 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta, sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); - if (sta->sta.he_cap.has_he) { + if (sta->sta.deflink.he_cap.has_he) { buf_size = local->hw.max_tx_aggregation_subframes; } else { /* @@ -594,7 +594,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, "Requested to start BA session on reserved tid=%d", tid)) return -EINVAL; - if (!pubsta->ht_cap.ht_supported && + if (!pubsta->deflink.ht_cap.ht_supported && sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ) return -EINVAL; @@ -647,7 +647,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, * is set when we receive a bss info from a probe response or a beacon. */ if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && - !sta->sta.ht_cap.ht_supported) { + !sta->sta.deflink.ht_cap.ht_supported) { ht_dbg(sdata, "BA request denied - IBSS STA %pM does not advertise HT support\n", pubsta->addr); diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c index 2619e12c8bda..4bab1683652d 100644 --- a/net/mac80211/airtime.c +++ b/net/mac80211/airtime.c @@ -647,8 +647,8 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_rx_status stat; - struct ieee80211_tx_rate *tx_rate = &sta->tx_stats.last_rate; - struct rate_info *ri = &sta->tx_stats.last_rate_info; + struct ieee80211_tx_rate *tx_rate = &sta->deflink.tx_stats.last_rate; + struct rate_info *ri = &sta->deflink.tx_stats.last_rate_info; u32 duration, overhead; u8 agg_shift; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 8e14ff53e4bd..f1d211e61e49 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -570,7 +570,8 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, if (pairwise) key = key_mtx_dereference(local, sta->ptk[key_idx]); else - key = key_mtx_dereference(local, sta->gtk[key_idx]); + key = key_mtx_dereference(local, + sta->deflink.gtk[key_idx]); } else key = key_mtx_dereference(local, sdata->keys[key_idx]); @@ -620,7 +621,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, else if (!pairwise && key_idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS) - key = rcu_dereference(sta->gtk[key_idx]); + key = rcu_dereference(sta->deflink.gtk[key_idx]); } else key = rcu_dereference(sdata->keys[key_idx]); @@ -1728,9 +1729,9 @@ static int sta_apply_parameters(struct ieee80211_local *local, sta->listen_interval = params->listen_interval; if (params->sta_modify_mask & STATION_PARAM_APPLY_STA_TXPOWER) { - sta->sta.txpwr.type = params->txpwr.type; + sta->sta.deflink.txpwr.type = params->txpwr.type; if (params->txpwr.type == NL80211_TX_POWER_LIMITED) - sta->sta.txpwr.power = params->txpwr.power; + sta->sta.deflink.txpwr.power = params->txpwr.power; ret = drv_sta_set_txpwr(local, sdata, sta); if (ret) return ret; @@ -1740,7 +1741,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef, sband, params->supported_rates, params->supported_rates_len, - &sta->sta.supp_rates[sband->band]); + &sta->sta.deflink.supp_rates[sband->band]); } if (params->ht_capa) diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index e26d42de14ec..e3452445b363 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -199,7 +199,7 @@ static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta) switch (width) { case IEEE80211_STA_RX_BW_20: - if (sta->sta.ht_cap.ht_supported) + if (sta->sta.deflink.ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20; else return NL80211_CHAN_WIDTH_20_NOHT; @@ -375,15 +375,15 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local, new_sta_bw = ieee80211_sta_cur_vht_bw(sta); /* nothing change */ - if (new_sta_bw == sta->sta.bandwidth) + if (new_sta_bw == sta->sta.deflink.bandwidth) continue; /* vif changed to narrow BW and narrow BW for station wasn't * requested or vise versa */ - if ((new_sta_bw < sta->sta.bandwidth) == !narrowed) + if ((new_sta_bw < sta->sta.deflink.bandwidth) == !narrowed) continue; - sta->sta.bandwidth = new_sta_bw; + sta->sta.deflink.bandwidth = new_sta_bw; rate_control_rate_update(local, sband, sta, IEEE80211_RC_BW_CHANGED); } diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 9479f2787ea7..46d08861ca65 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -447,7 +447,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, int i; ssize_t bufsz = 512; struct sta_info *sta = file->private_data; - struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; + struct ieee80211_sta_ht_cap *htc = &sta->sta.deflink.ht_cap; ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); @@ -531,7 +531,7 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf, { char *buf, *p; struct sta_info *sta = file->private_data; - struct ieee80211_sta_vht_cap *vhtc = &sta->sta.vht_cap; + struct ieee80211_sta_vht_cap *vhtc = &sta->sta.deflink.vht_cap; ssize_t ret; ssize_t bufsz = 512; @@ -646,7 +646,7 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf, char *buf, *p; size_t buf_sz = PAGE_SIZE; struct sta_info *sta = file->private_data; - struct ieee80211_sta_he_cap *hec = &sta->sta.he_cap; + struct ieee80211_sta_he_cap *hec = &sta->sta.deflink.he_cap; struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp; u8 ppe_size; u8 *cap; @@ -1052,9 +1052,9 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) DEBUGFS_ADD(vht_capa); DEBUGFS_ADD(he_capa); - DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates); - DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments); - DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered); + DEBUGFS_ADD_COUNTER(rx_duplicates, deflink.rx_stats.num_duplicates); + DEBUGFS_ADD_COUNTER(rx_fragments, deflink.rx_stats.fragments); + DEBUGFS_ADD_COUNTER(tx_filtered, deflink.status_stats.filtered); if (local->ops->wake_tx_queue) { DEBUGFS_ADD(aqm); diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c index 364ad0ef7692..96c9486bf2fe 100644 --- a/net/mac80211/eht.c +++ b/net/mac80211/eht.c @@ -14,7 +14,7 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, const struct ieee80211_eht_cap_elem *eht_cap_ie_elem, u8 eht_cap_len, struct sta_info *sta) { - struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.eht_cap; + struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.deflink.eht_cap; struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; u8 eht_ppe_size = 0; u8 mcs_nss_size; @@ -71,6 +71,6 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, eht_cap->has_eht = true; - sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta); - sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta); + sta->deflink.cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta); + sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta); } diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index b2253df54413..31cd3c1ac07f 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -114,7 +114,7 @@ static void ieee80211_get_stats(struct net_device *dev, sta_set_sinfo(sta, &sinfo, false); i = 0; - ADD_STA_STATS(sta); + ADD_STA_STATS(sta->link[0]); data[i++] = sta->sta_state; @@ -140,7 +140,7 @@ static void ieee80211_get_stats(struct net_device *dev, memset(&sinfo, 0, sizeof(sinfo)); sta_set_sinfo(sta, &sinfo, false); i = 0; - ADD_STA_STATS(sta); + ADD_STA_STATS(sta->link[0]); } } diff --git a/net/mac80211/he.c b/net/mac80211/he.c index c05af7018f79..1a61f7552edd 100644 --- a/net/mac80211/he.c +++ b/net/mac80211/he.c @@ -49,7 +49,7 @@ ieee80211_update_from_he_6ghz_capa(const struct ieee80211_he_6ghz_capa *he_6ghz_ break; } - sta->sta.he_6ghz_capa = *he_6ghz_capa; + sta->sta.deflink.he_6ghz_capa = *he_6ghz_capa; } static void ieee80211_he_mcs_disable(__le16 *he_mcs) @@ -110,7 +110,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, const struct ieee80211_he_6ghz_capa *he_6ghz_capa, struct sta_info *sta) { - struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap; struct ieee80211_sta_he_cap own_he_cap; struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; u8 he_ppe_size; @@ -153,8 +153,8 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, he_cap->has_he = true; - sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta); - sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta); + sta->deflink.cur_max_bandwidth = ieee80211_sta_cap_rx_bw(sta); + sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta); if (sband->band == NL80211_BAND_6GHZ && he_6ghz_capa) ieee80211_update_from_he_6ghz_capa(he_6ghz_capa, sta); diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 2eb7641f5556..171bd16b13f3 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -243,9 +243,9 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839; apply: - changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap)); + changed = memcmp(&sta->sta.deflink.ht_cap, &ht_cap, sizeof(ht_cap)); - memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap)); + memcpy(&sta->sta.deflink.ht_cap, &ht_cap, sizeof(ht_cap)); switch (sdata->vif.bss_conf.chandef.width) { default: @@ -264,9 +264,9 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, break; } - sta->sta.bandwidth = bw; + sta->sta.deflink.bandwidth = bw; - sta->cur_max_bandwidth = + sta->deflink.cur_max_bandwidth = ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 0416c4d22292..14c04fd48b7a 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -637,7 +637,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; - sta->sta.supp_rates[band] = supp_rates | + sta->sta.deflink.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband, scan_width); return ieee80211_ibss_finish_sta(sta); @@ -1005,7 +1005,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, if (sta) { u32 prev_rates; - prev_rates = sta->sta.supp_rates[band]; + prev_rates = sta->sta.deflink.supp_rates[band]; /* make sure mandatory rates are always added */ scan_width = NL80211_BSS_CHAN_WIDTH_20; if (rx_status->bw == RATE_INFO_BW_5) @@ -1013,13 +1013,13 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, else if (rx_status->bw == RATE_INFO_BW_10) scan_width = NL80211_BSS_CHAN_WIDTH_10; - sta->sta.supp_rates[band] = supp_rates | + sta->sta.deflink.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband, scan_width); - if (sta->sta.supp_rates[band] != prev_rates) { + if (sta->sta.deflink.supp_rates[band] != prev_rates) { ibss_dbg(sdata, "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n", sta->sta.addr, prev_rates, - sta->sta.supp_rates[band]); + sta->sta.deflink.supp_rates[band]); rates_updated = true; } } else { @@ -1043,7 +1043,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, /* we both use HT */ struct ieee80211_ht_cap htcap_ie; struct cfg80211_chan_def chandef; - enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth; + enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); ieee80211_chandef_ht_oper(elems->ht_operation, &chandef); @@ -1058,7 +1058,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_40) { /* we both use VHT */ struct ieee80211_vht_cap cap_ie; - struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap; + struct ieee80211_sta_vht_cap cap = sta->sta.deflink.vht_cap; u32 vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info); @@ -1069,11 +1069,11 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie)); ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, &cap_ie, sta); - if (memcmp(&cap, &sta->sta.vht_cap, sizeof(cap))) + if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap))) rates_updated |= true; } - if (bw != sta->sta.bandwidth) + if (bw != sta->sta.deflink.bandwidth) rates_updated |= true; if (!cfg80211_chandef_compatible(&sdata->u.ibss.chandef, @@ -1083,12 +1083,12 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, if (sta && rates_updated) { u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED; - u8 rx_nss = sta->sta.rx_nss; + u8 rx_nss = sta->sta.deflink.rx_nss; /* Force rx_nss recalculation */ - sta->sta.rx_nss = 0; + sta->sta.deflink.rx_nss = 0; rate_control_rate_init(sta); - if (sta->sta.rx_nss != rx_nss) + if (sta->sta.deflink.rx_nss != rx_nss) changed |= IEEE80211_RC_NSS_CHANGED; drv_sta_rc_update(local, sdata, &sta->sta, changed); @@ -1235,7 +1235,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; - sta->sta.supp_rates[band] = supp_rates | + sta->sta.deflink.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband, scan_width); spin_lock(&ifibss->incomplete_lock); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index f695fc80088b..0fcf8aebedc4 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -476,7 +476,7 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) _ieee80211_set_tx_key(new, true); } else { - rcu_assign_pointer(sta->gtk[idx], new); + rcu_assign_pointer(sta->deflink.gtk[idx], new); } /* Only needed for transition from no key -> key. * Still triggers unnecessary when using Extended Key ID @@ -826,7 +826,8 @@ int ieee80211_key_link(struct ieee80211_key *key, (old_key && old_key->conf.cipher != key->conf.cipher)) goto out; } else if (sta) { - old_key = key_mtx_dereference(sdata->local, sta->gtk[idx]); + old_key = key_mtx_dereference(sdata->local, + sta->deflink.gtk[idx]); } else { old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); } @@ -1076,8 +1077,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, int i; mutex_lock(&local->key_mtx); - for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) { - key = key_mtx_dereference(local, sta->gtk[i]); + for (i = 0; i < ARRAY_SIZE(sta->deflink.gtk); i++) { + key = key_mtx_dereference(local, sta->deflink.gtk[i]); if (!key) continue; ieee80211_key_replace(key->sdata, key->sta, diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 44a6fdb6efbd..58ebdcd69d05 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -310,7 +310,7 @@ void ieee80211s_update_metric(struct ieee80211_local *local, LINK_FAIL_THRESH) mesh_plink_broken(sta); - sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo); + sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &rinfo); ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg, cfg80211_calculate_bitrate(&rinfo)); } diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index a829470dd59e..42ba7424589e 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -61,8 +61,8 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata, s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold; return rssi_threshold == 0 || (sta && - (s8)-ewma_signal_read(&sta->rx_stats_avg.signal) > - rssi_threshold); + (s8)-ewma_signal_read(&sta->deflink.rx_stats_avg.signal) > + rssi_threshold); } /** @@ -125,7 +125,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) continue; short_slot = false; - if (erp_rates & sta->sta.supp_rates[sband->band]) + if (erp_rates & sta->sta.deflink.supp_rates[sband->band]) short_slot = true; else break; @@ -175,10 +175,10 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) sta->mesh->plink_state != NL80211_PLINK_ESTAB) continue; - if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20) + if (sta->sta.deflink.bandwidth > IEEE80211_STA_RX_BW_20) continue; - if (!sta->sta.ht_cap.ht_supported) { + if (!sta->sta.deflink.ht_cap.ht_supported) { mpl_dbg(sdata, "nonHT sta (%pM) is present\n", sta->sta.addr); non_ht_sta = true; @@ -415,7 +415,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; u32 rates, basic_rates = 0, changed = 0; - enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth; + enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth; sband = ieee80211_get_sband(sdata); if (!sband) @@ -425,7 +425,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, &basic_rates); spin_lock_bh(&sta->mesh->plink_lock); - sta->rx_stats.last_rx = jiffies; + sta->deflink.rx_stats.last_rx = jiffies; /* rates and capabilities don't change during peering */ if (sta->mesh->plink_state == NL80211_PLINK_ESTAB && @@ -433,9 +433,9 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, goto out; sta->mesh->processed_beacon = true; - if (sta->sta.supp_rates[sband->band] != rates) + if (sta->sta.deflink.supp_rates[sband->band] != rates) changed |= IEEE80211_RC_SUPP_RATES_CHANGED; - sta->sta.supp_rates[sband->band] = rates; + sta->sta.deflink.supp_rates[sband->band] = rates; if (ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems->ht_cap_elem, sta)) @@ -449,16 +449,16 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, elems->he_6ghz_capa, sta); - if (bw != sta->sta.bandwidth) + if (bw != sta->sta.deflink.bandwidth) changed |= IEEE80211_RC_BW_CHANGED; /* HT peer is operating 20MHz-only */ if (elems->ht_operation && !(elems->ht_operation->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { - if (sta->sta.bandwidth != IEEE80211_STA_RX_BW_20) + if (sta->sta.deflink.bandwidth != IEEE80211_STA_RX_BW_20) changed |= IEEE80211_RC_BW_CHANGED; - sta->sta.bandwidth = IEEE80211_STA_RX_BW_20; + sta->sta.deflink.bandwidth = IEEE80211_STA_RX_BW_20; } if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 1b30c724ca8d..b857915881e0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3342,7 +3342,7 @@ static bool ieee80211_twt_req_supported(const struct sta_info *sta, if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT)) return false; - return sta->sta.he_cap.he_cap_elem.mac_cap_info[0] & + return sta->sta.deflink.he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES; } @@ -3369,7 +3369,7 @@ static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata, ieee80211_vif_type_p2p(&sdata->vif)); return bss_conf->he_support && - (sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & + (sta->sta.deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BCAST_TWT) && own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] & @@ -3587,7 +3587,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, elems->he_6ghz_capa, sta); - bss_conf->he_support = sta->sta.he_cap.has_he; + bss_conf->he_support = sta->sta.deflink.he_cap.has_he; if (elems->rsnx && elems->rsnx_len && (elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) && wiphy_ext_feature_isset(local->hw.wiphy, @@ -3607,7 +3607,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, elems->eht_cap_len, sta); - bss_conf->eht_support = sta->sta.eht_cap.has_eht; + bss_conf->eht_support = sta->sta.deflink.eht_cap.has_eht; } else { bss_conf->eht_support = false; } @@ -3678,7 +3678,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; nss += 1; - sta->sta.rx_nss = nss; + sta->sta.deflink.rx_nss = nss; } rate_control_rate_init(sta); @@ -4836,9 +4836,9 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t) if (!sta) return; - timeout = sta->status_stats.last_ack; - if (time_before(sta->status_stats.last_ack, sta->rx_stats.last_rx)) - timeout = sta->rx_stats.last_rx; + timeout = sta->deflink.status_stats.last_ack; + if (time_before(sta->deflink.status_stats.last_ack, sta->deflink.rx_stats.last_rx)) + timeout = sta->deflink.rx_stats.last_rx; timeout += IEEE80211_CONNECTION_IDLE_TIME; /* If timeout is after now, then update timer to fire at @@ -5638,7 +5638,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, } if (rates) - new_sta->sta.supp_rates[cbss->channel->band] = rates; + new_sta->sta.deflink.supp_rates[cbss->channel->band] = rates; else sdata_info(sdata, "No rates found, keeping mandatory only\n"); diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c index 7c1a735b9eee..f97cb4c453d3 100644 --- a/net/mac80211/ocb.c +++ b/net/mac80211/ocb.c @@ -74,7 +74,7 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, /* Add only mandatory rates for now */ sband = local->hw.wiphy->bands[band]; - sta->sta.supp_rates[band] = + sta->sta.deflink.supp_rates[band] = ieee80211_mandatory_rates(sband, scan_width); spin_lock(&ifocb->incomplete_lock); diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 8c6416129d5b..ae9700e0a1a5 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -371,7 +371,7 @@ static void __rate_control_send_low(struct ieee80211_hw *hw, WARN_ONCE(i == sband->n_bitrates, "no supported rates for sta %pM (0x%x, band %d) in rate_mask 0x%x with flags 0x%x\n", sta ? sta->addr : NULL, - sta ? sta->supp_rates[sband->band] : -1, + sta ? sta->deflink.supp_rates[sband->band] : -1, sband->band, rate_mask, rate_flags); @@ -781,11 +781,11 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata, u16 sta_vht_mask[NL80211_VHT_NSS_MAX]; /* Filter out rates that the STA does not support */ - *mask &= sta->supp_rates[sband->band]; + *mask &= sta->deflink.supp_rates[sband->band]; for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) - mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i]; + mcs_mask[i] &= sta->deflink.ht_cap.mcs.rx_mask[i]; - sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map; + sta_vht_cap = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask); for (i = 0; i < NL80211_VHT_NSS_MAX; i++) vht_mask[i] &= sta_vht_mask[i]; diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 5a6bf46a4248..7b1f5c045e06 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -606,7 +606,7 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) int tmp_max_streams, group, tmp_idx, tmp_prob; int tmp_tp = 0; - if (!mi->sta->ht_cap.ht_supported) + if (!mi->sta->deflink.ht_cap.ht_supported) return; group = MI_RATE_GROUP(mi->max_tp_rate[0]); @@ -996,7 +996,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES]; u16 tmp_legacy_tp_rate[MAX_THR_RATES], tmp_max_prob_rate; u16 index; - bool ht_supported = mi->sta->ht_cap.ht_supported; + bool ht_supported = mi->sta->deflink.ht_cap.ht_supported; if (mi->ampdu_packets > 0) { if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN)) @@ -1419,7 +1419,7 @@ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi) * the limit here to avoid the complexity of having to de-aggregate * packets in the queue. */ - if (!mi->sta->vht_cap.vht_supported) + if (!mi->sta->deflink.vht_cap.vht_supported) return IEEE80211_MAX_MPDU_LEN_HT_BA; /* unlimited */ @@ -1536,7 +1536,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, if (sband->band != NL80211_BAND_2GHZ) return; - if (sta->ht_cap.ht_supported && + if (sta->deflink.ht_cap.ht_supported && !ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES)) return; @@ -1559,7 +1559,7 @@ minstrel_ht_update_ofdm(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, const u8 *rates; int i; - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) return; rates = mp->ofdm_rates[sband->band]; @@ -1579,9 +1579,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, { struct minstrel_priv *mp = priv; struct minstrel_ht_sta *mi = priv_sta; - struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; - u16 ht_cap = sta->ht_cap.cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs; + u16 ht_cap = sta->deflink.ht_cap.cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; const struct ieee80211_rate *ctl_rate; bool ldpc, erp; int use_vht; @@ -1653,7 +1653,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, } if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH && - sta->bandwidth < IEEE80211_STA_RX_BW_40) + sta->deflink.bandwidth < IEEE80211_STA_RX_BW_40) continue; nss = minstrel_mcs_groups[i].streams; @@ -1680,7 +1680,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, continue; if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) { - if (sta->bandwidth < IEEE80211_STA_RX_BW_80 || + if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_80 || ((gflags & IEEE80211_TX_RC_SHORT_GI) && !(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) { continue; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 03ee1d9b6d08..959a36fd658b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -221,7 +221,7 @@ static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, skb_queue_tail(&sdata->skb_queue, skb); ieee80211_queue_work(&sdata->local->hw, &sdata->work); if (sta) - sta->rx_stats.packets++; + sta->deflink.rx_stats.packets++; } static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, @@ -1465,7 +1465,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) if (unlikely(ieee80211_has_retry(hdr->frame_control) && rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); - rx->sta->rx_stats.num_duplicates++; + rx->sta->deflink.rx_stats.num_duplicates++; return RX_DROP_UNUSABLE; } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; @@ -1761,46 +1761,47 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) NL80211_IFTYPE_ADHOC); if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { - sta->rx_stats.last_rx = jiffies; + sta->deflink.rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1)) - sta->rx_stats.last_rate = + sta->deflink.rx_stats.last_rate = sta_stats_encode_rate(status); } } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { - sta->rx_stats.last_rx = jiffies; + sta->deflink.rx_stats.last_rx = jiffies; } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1)) { /* * Mesh beacons will update last_rx when if they are found to * match the current local configuration when processed. */ - sta->rx_stats.last_rx = jiffies; + sta->deflink.rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control)) - sta->rx_stats.last_rate = sta_stats_encode_rate(status); + sta->deflink.rx_stats.last_rate = sta_stats_encode_rate(status); } - sta->rx_stats.fragments++; + sta->deflink.rx_stats.fragments++; - u64_stats_update_begin(&rx->sta->rx_stats.syncp); - sta->rx_stats.bytes += rx->skb->len; - u64_stats_update_end(&rx->sta->rx_stats.syncp); + u64_stats_update_begin(&rx->sta->deflink.rx_stats.syncp); + sta->deflink.rx_stats.bytes += rx->skb->len; + u64_stats_update_end(&rx->sta->deflink.rx_stats.syncp); if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { - sta->rx_stats.last_signal = status->signal; - ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); + sta->deflink.rx_stats.last_signal = status->signal; + ewma_signal_add(&sta->deflink.rx_stats_avg.signal, + -status->signal); } if (status->chains) { - sta->rx_stats.chains = status->chains; + sta->deflink.rx_stats.chains = status->chains; for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { int signal = status->chain_signal[i]; if (!(status->chains & BIT(i))) continue; - sta->rx_stats.chain_signal_last[i] = signal; - ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], + sta->deflink.rx_stats.chain_signal_last[i] = signal; + ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i], -signal); } } @@ -1861,7 +1862,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) * Update counter and free packet here to avoid * counting this as a dropped packed. */ - sta->rx_stats.packets++; + sta->deflink.rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } @@ -1893,11 +1894,11 @@ ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) } if (rx->sta) - key = rcu_dereference(rx->sta->gtk[idx]); + key = rcu_dereference(rx->sta->deflink.gtk[idx]); if (!key) key = rcu_dereference(sdata->keys[idx]); if (!key && rx->sta) - key = rcu_dereference(rx->sta->gtk[idx2]); + key = rcu_dereference(rx->sta->deflink.gtk[idx2]); if (!key) key = rcu_dereference(sdata->keys[idx2]); @@ -2012,7 +2013,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) test_sta_flag(rx->sta, WLAN_STA_MFP)) return RX_DROP_MONITOR; - rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); + rx->key = rcu_dereference(rx->sta->deflink.gtk[mmie_keyidx]); } if (!rx->key) rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); @@ -2035,7 +2036,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) } else { if (rx->sta) { for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - key = rcu_dereference(rx->sta->gtk[i]); + key = rcu_dereference(rx->sta->deflink.gtk[i]); if (key) break; } @@ -2072,7 +2073,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) /* check per-station GTK first, if multicast packet */ if (is_multicast_ether_addr(hdr->addr1) && rx->sta) - rx->key = rcu_dereference(rx->sta->gtk[keyidx]); + rx->key = rcu_dereference(rx->sta->deflink.gtk[keyidx]); /* if not found, try default key */ if (!rx->key) { @@ -2398,7 +2399,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) out: ieee80211_led_rx(rx->local); if (rx->sta) - rx->sta->rx_stats.packets++; + rx->sta->deflink.rx_stats.packets++; return RX_CONTINUE; } @@ -2645,9 +2646,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) * for non-QoS-data frames. Here we know it's a data * frame, so count MSDUs. */ - u64_stats_update_begin(&rx->sta->rx_stats.syncp); - rx->sta->rx_stats.msdu[rx->seqno_idx]++; - u64_stats_update_end(&rx->sta->rx_stats.syncp); + u64_stats_update_begin(&rx->sta->deflink.rx_stats.syncp); + rx->sta->deflink.rx_stats.msdu[rx->seqno_idx]++; + u64_stats_update_end(&rx->sta->deflink.rx_stats.syncp); } if ((sdata->vif.type == NL80211_IFTYPE_AP || @@ -3342,7 +3343,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) switch (mgmt->u.action.category) { case WLAN_CATEGORY_HT: /* reject HT action frames from stations not supporting HT */ - if (!rx->sta->sta.ht_cap.ht_supported) + if (!rx->sta->sta.deflink.ht_cap.ht_supported) goto invalid; if (sdata->vif.type != NL80211_IFTYPE_STATION && @@ -3406,7 +3407,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) struct sta_opmode_info sta_opmode = {}; /* If it doesn't support 40 MHz it can't change ... */ - if (!(rx->sta->sta.ht_cap.cap & + if (!(rx->sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) goto handled; @@ -3416,13 +3417,13 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) max_bw = ieee80211_sta_cap_rx_bw(rx->sta); /* set cur_max_bandwidth and recalc sta bw */ - rx->sta->cur_max_bandwidth = max_bw; + rx->sta->deflink.cur_max_bandwidth = max_bw; new_bw = ieee80211_sta_cur_vht_bw(rx->sta); - if (rx->sta->sta.bandwidth == new_bw) + if (rx->sta->sta.deflink.bandwidth == new_bw) goto handled; - rx->sta->sta.bandwidth = new_bw; + rx->sta->sta.deflink.bandwidth = new_bw; sband = rx->local->hw.wiphy->bands[status->band]; sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(rx->sta); @@ -3619,7 +3620,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) handled: if (rx->sta) - rx->sta->rx_stats.packets++; + rx->sta->deflink.rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; @@ -3653,7 +3654,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) ieee80211_rx_status_to_khz(status), sig, rx->skb->data, rx->skb->len, 0)) { if (rx->sta) - rx->sta->rx_stats.packets++; + rx->sta->deflink.rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } @@ -3691,7 +3692,7 @@ ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx) handled: if (rx->sta) - rx->sta->rx_stats.packets++; + rx->sta->deflink.rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } @@ -3911,7 +3912,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, case RX_DROP_MONITOR: I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) - rx->sta->rx_stats.dropped++; + rx->sta->deflink.rx_stats.dropped++; fallthrough; case RX_CONTINUE: { struct ieee80211_rate *rate = NULL; @@ -3930,7 +3931,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, case RX_DROP_UNUSABLE: I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) - rx->sta->rx_stats.dropped++; + rx->sta->deflink.rx_stats.dropped++; dev_kfree_skb(rx->skb); break; case RX_QUEUED: @@ -4482,15 +4483,15 @@ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, void *sa = skb->data + ETH_ALEN; void *da = skb->data; - stats = &sta->rx_stats; + stats = &sta->deflink.rx_stats; if (fast_rx->uses_rss) - stats = this_cpu_ptr(sta->pcpu_rx_stats); + stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats); /* statistics part of ieee80211_rx_h_sta_process() */ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { stats->last_signal = status->signal; if (!fast_rx->uses_rss) - ewma_signal_add(&sta->rx_stats_avg.signal, + ewma_signal_add(&sta->deflink.rx_stats_avg.signal, -status->signal); } @@ -4506,7 +4507,7 @@ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, stats->chain_signal_last[i] = signal; if (!fast_rx->uses_rss) - ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], + ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i], -signal); } } @@ -4582,7 +4583,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; } addrs __aligned(2); - struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; + struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write * to a common data structure; drivers can implement that per queue @@ -4684,7 +4685,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, drop: dev_kfree_skb(skb); if (fast_rx->uses_rss) - stats = this_cpu_ptr(sta->pcpu_rx_stats); + stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats); stats->dropped++; return true; diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c index 4141bc80cdfd..8ca7d45d6daa 100644 --- a/net/mac80211/s1g.c +++ b/net/mac80211/s1g.c @@ -11,8 +11,8 @@ void ieee80211_s1g_sta_rate_init(struct sta_info *sta) { /* avoid indicating legacy bitrates for S1G STAs */ - sta->tx_stats.last_rate.flags |= IEEE80211_TX_RC_S1G_MCS; - sta->rx_stats.last_rate = + sta->deflink.tx_stats.last_rate.flags |= IEEE80211_TX_RC_S1G_MCS; + sta->deflink.rx_stats.last_rate = STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_S1G); } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 91fbb1ee5c38..e04a0905e941 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -287,7 +287,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) #ifdef CONFIG_MAC80211_MESH kfree(sta->mesh); #endif - free_percpu(sta->pcpu_rx_stats); + free_percpu(sta->deflink.pcpu_rx_stats); kfree(sta); } @@ -346,9 +346,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, return NULL; if (ieee80211_hw_check(hw, USES_RSS)) { - sta->pcpu_rx_stats = + sta->deflink.pcpu_rx_stats = alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); - if (!sta->pcpu_rx_stats) + if (!sta->deflink.pcpu_rx_stats) goto free; } @@ -376,6 +376,14 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->sta.max_rx_aggregation_subframes = local->hw.max_rx_aggregation_subframes; + /* TODO link specific alloc and assignments for MLO Link STA */ + + /* For non MLO STA, link info can be accessed either via deflink + * or link[0] + */ + sta->link[0] = &sta->deflink; + sta->sta.link[0] = &sta->sta.deflink; + /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only. * The Tx path starts to use a key as soon as the key slot ptk_idx * references to is not NULL. To not use the initial Rx-only key @@ -387,9 +395,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->local = local; sta->sdata = sdata; - sta->rx_stats.last_rx = jiffies; + sta->deflink.rx_stats.last_rx = jiffies; - u64_stats_init(&sta->rx_stats.syncp); + u64_stats_init(&sta->deflink.rx_stats.syncp); ieee80211_init_frag_cache(&sta->frags); @@ -399,10 +407,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->reserved_tid = IEEE80211_TID_UNRESERVED; sta->last_connected = ktime_get_seconds(); - ewma_signal_init(&sta->rx_stats_avg.signal); - ewma_avg_signal_init(&sta->status_stats.avg_ack_signal); - for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++) - ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]); + ewma_signal_init(&sta->deflink.rx_stats_avg.signal); + ewma_avg_signal_init(&sta->deflink.status_stats.avg_ack_signal); + for (i = 0; i < ARRAY_SIZE(sta->deflink.rx_stats_avg.chain_signal); i++) + ewma_signal_init(&sta->deflink.rx_stats_avg.chain_signal[i]); if (local->ops->wake_tx_queue) { void *txq_data; @@ -472,7 +480,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, if (!(rate->flags & mandatory)) continue; - sta->sta.supp_rates[i] |= BIT(r); + sta->sta.deflink.supp_rates[i] |= BIT(r); } } @@ -524,7 +532,7 @@ free_txq: if (sta->sta.txq[0]) kfree(to_txq_info(sta->sta.txq[0])); free: - free_percpu(sta->pcpu_rx_stats); + free_percpu(sta->deflink.pcpu_rx_stats); #ifdef CONFIG_MAC80211_MESH kfree(sta->mesh); #endif @@ -2087,16 +2095,16 @@ int sta_info_move_state(struct sta_info *sta, u8 sta_info_tx_streams(struct sta_info *sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.deflink.ht_cap; u8 rx_streams; - if (!sta->sta.ht_cap.ht_supported) + if (!sta->sta.deflink.ht_cap.ht_supported) return 1; - if (sta->sta.vht_cap.vht_supported) { + if (sta->sta.deflink.vht_cap.vht_supported) { int i; u16 tx_mcs_map = - le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map); + le16_to_cpu(sta->sta.deflink.vht_cap.vht_mcs.tx_mcs_map); for (i = 7; i >= 0; i--) if ((tx_mcs_map & (0x3 << (i * 2))) != @@ -2123,16 +2131,16 @@ u8 sta_info_tx_streams(struct sta_info *sta) static struct ieee80211_sta_rx_stats * sta_get_last_rx_stats(struct sta_info *sta) { - struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; + struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats; int cpu; - if (!sta->pcpu_rx_stats) + if (!sta->deflink.pcpu_rx_stats) return stats; for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpustats; - cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); if (time_after(cpustats->last_rx, stats->last_rx)) stats = cpustats; @@ -2226,13 +2234,15 @@ static void sta_set_tidstats(struct sta_info *sta, int cpu; if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) { - tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid); + tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats, + tid); - if (sta->pcpu_rx_stats) { + if (sta->deflink.pcpu_rx_stats) { for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpurxs; - cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, + cpu); tidstats->rx_msdu += sta_get_tidstats_msdu(cpurxs, tid); } @@ -2243,19 +2253,19 @@ static void sta_set_tidstats(struct sta_info *sta, if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU); - tidstats->tx_msdu = sta->tx_stats.msdu[tid]; + tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid]; } if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES); - tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid]; + tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid]; } if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED); - tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid]; + tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid]; } if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) { @@ -2326,26 +2336,27 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { sinfo->tx_bytes = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - sinfo->tx_bytes += sta->tx_stats.bytes[ac]; + sinfo->tx_bytes += sta->deflink.tx_stats.bytes[ac]; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) { sinfo->tx_packets = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - sinfo->tx_packets += sta->tx_stats.packets[ac]; + sinfo->tx_packets += sta->deflink.tx_stats.packets[ac]; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); } if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) | BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) { - sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats); + sinfo->rx_bytes += sta_get_stats_bytes(&sta->deflink.rx_stats); - if (sta->pcpu_rx_stats) { + if (sta->deflink.pcpu_rx_stats) { for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpurxs; - cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, + cpu); sinfo->rx_bytes += sta_get_stats_bytes(cpurxs); } } @@ -2354,12 +2365,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) { - sinfo->rx_packets = sta->rx_stats.packets; - if (sta->pcpu_rx_stats) { + sinfo->rx_packets = sta->deflink.rx_stats.packets; + if (sta->deflink.pcpu_rx_stats) { for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpurxs; - cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, + cpu); sinfo->rx_packets += cpurxs->packets; } } @@ -2367,12 +2379,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) { - sinfo->tx_retries = sta->status_stats.retry_count; + sinfo->tx_retries = sta->deflink.status_stats.retry_count; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) { - sinfo->tx_failed = sta->status_stats.retry_failed; + sinfo->tx_failed = sta->deflink.status_stats.retry_failed; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); } @@ -2393,12 +2405,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); } - sinfo->rx_dropped_misc = sta->rx_stats.dropped; - if (sta->pcpu_rx_stats) { + sinfo->rx_dropped_misc = sta->deflink.rx_stats.dropped; + if (sta->deflink.pcpu_rx_stats) { for_each_possible_cpu(cpu) { struct ieee80211_sta_rx_stats *cpurxs; - cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); + cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu); sinfo->rx_dropped_misc += cpurxs->dropped; } } @@ -2417,10 +2429,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } - if (!sta->pcpu_rx_stats && + if (!sta->deflink.pcpu_rx_stats && !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) { sinfo->signal_avg = - -ewma_signal_read(&sta->rx_stats_avg.signal); + -ewma_signal_read(&sta->deflink.rx_stats_avg.signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } } @@ -2433,7 +2445,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) | BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) { sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); - if (!sta->pcpu_rx_stats) + if (!sta->deflink.pcpu_rx_stats) sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); sinfo->chains = last_rxstats->chains; @@ -2442,12 +2454,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->chain_signal[i] = last_rxstats->chain_signal_last[i]; sinfo->chain_signal_avg[i] = - -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]); + -ewma_signal_read(&sta->deflink.rx_stats_avg.chain_signal[i]); } } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) { - sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, + sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &sinfo->txrate); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } @@ -2529,16 +2541,16 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) && - sta->status_stats.ack_signal_filled) { - sinfo->ack_signal = sta->status_stats.last_ack_signal; + sta->deflink.status_stats.ack_signal_filled) { + sinfo->ack_signal = sta->deflink.status_stats.last_ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); } if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) && - sta->status_stats.ack_signal_filled) { + sta->deflink.status_stats.ack_signal_filled) { sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read( - &sta->status_stats.avg_ack_signal); + &sta->deflink.status_stats.avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } @@ -2573,10 +2585,10 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta) { struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); - if (!sta->status_stats.last_ack || - time_after(stats->last_rx, sta->status_stats.last_ack)) + if (!sta->deflink.status_stats.last_ack || + time_after(stats->last_rx, sta->deflink.status_stats.last_ack)) return stats->last_rx; - return sta->status_stats.last_ack; + return sta->deflink.status_stats.last_ack; } static void sta_update_codel_params(struct sta_info *sta, u32 thr) diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 379fd367197f..35c390bedfba 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -483,6 +483,86 @@ struct ieee80211_fragment_cache { */ #define STA_SLOW_THRESHOLD 6000 /* 6 Mbps */ +/** + * struct link_sta_info - Link STA information + * All link specific sta info are stored here for reference. This can be + * a single entry for non-MLD STA or multiple entries for MLD STA + * @addr: Link MAC address - Can be same as MLD STA mac address and is always + * same for non-MLD STA. This is used as key for searching link STA + * @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD + * and set to the corresponding vif LinkId for MLD STA + * @sta: Points to the STA info + * @gtk: group keys negotiated with this station, if any + * @tx_stats: TX statistics + * @tx_stats.packets: # of packets transmitted + * @tx_stats.bytes: # of bytes in all packets transmitted + * @tx_stats.last_rate: last TX rate + * @tx_stats.msdu: # of transmitted MSDUs per TID + * @rx_stats: RX statistics + * @rx_stats_avg: averaged RX statistics + * @rx_stats_avg.signal: averaged signal + * @rx_stats_avg.chain_signal: averaged per-chain signal + * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs + * this (by advertising the USES_RSS hw flag) + * @status_stats: TX status statistics + * @status_stats.filtered: # of filtered frames + * @status_stats.retry_failed: # of frames that failed after retry + * @status_stats.retry_count: # of retries attempted + * @status_stats.lost_packets: # of lost packets + * @status_stats.last_pkt_time: timestamp of last ACKed packet + * @status_stats.msdu_retries: # of MSDU retries + * @status_stats.msdu_failed: # of failed MSDUs + * @status_stats.last_ack: last ack timestamp (jiffies) + * @status_stats.last_ack_signal: last ACK signal + * @status_stats.ack_signal_filled: last ACK signal validity + * @status_stats.avg_ack_signal: average ACK signal + * TODO Move other link params from sta_info as required for MLD operation + */ +struct link_sta_info { + u8 addr[ETH_ALEN]; + u8 link_id; + + /* TODO rhash head/node for finding link_sta based on addr */ + + struct sta_info *sta; + struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + + NUM_DEFAULT_MGMT_KEYS + + NUM_DEFAULT_BEACON_KEYS]; + struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats; + + /* Updated from RX path only, no locking requirements */ + struct ieee80211_sta_rx_stats rx_stats; + struct { + struct ewma_signal signal; + struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS]; + } rx_stats_avg; + + /* Updated from TX status path only, no locking requirements */ + struct { + unsigned long filtered; + unsigned long retry_failed, retry_count; + unsigned int lost_packets; + unsigned long last_pkt_time; + u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; + u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; + unsigned long last_ack; + s8 last_ack_signal; + bool ack_signal_filled; + struct ewma_avg_signal avg_ack_signal; + } status_stats; + + /* Updated from TX path only, no locking requirements */ + struct { + u64 packets[IEEE80211_NUM_ACS]; + u64 bytes[IEEE80211_NUM_ACS]; + struct ieee80211_tx_rate last_rate; + struct rate_info last_rate_info; + u64 msdu[IEEE80211_NUM_TIDS + 1]; + } tx_stats; + + enum ieee80211_sta_rx_bandwidth cur_max_bandwidth; +}; + /** * struct sta_info - STA information * @@ -498,7 +578,6 @@ struct ieee80211_fragment_cache { * @sdata: virtual interface this station belongs to * @ptk: peer keys negotiated with this station, if any * @ptk_idx: last installed peer key index - * @gtk: group keys negotiated with this station, if any * @rate_ctrl: rate control algorithm reference * @rate_ctrl_lock: spinlock used to protect rate control data * (data inside the algorithm, so serializes calls there) @@ -544,30 +623,19 @@ struct ieee80211_fragment_cache { * @fast_rx: RX fastpath information * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to * the BSS one. - * @tx_stats: TX statistics - * @tx_stats.packets: # of packets transmitted - * @tx_stats.bytes: # of bytes in all packets transmitted - * @tx_stats.last_rate: last TX rate - * @tx_stats.msdu: # of transmitted MSDUs per TID - * @rx_stats: RX statistics - * @rx_stats_avg: averaged RX statistics - * @rx_stats_avg.signal: averaged signal - * @rx_stats_avg.chain_signal: averaged per-chain signal - * @pcpu_rx_stats: per-CPU RX statistics, assigned only if the driver needs - * this (by advertising the USES_RSS hw flag) - * @status_stats: TX status statistics - * @status_stats.filtered: # of filtered frames - * @status_stats.retry_failed: # of frames that failed after retry - * @status_stats.retry_count: # of retries attempted - * @status_stats.lost_packets: # of lost packets - * @status_stats.last_pkt_time: timestamp of last ACKed packet - * @status_stats.msdu_retries: # of MSDU retries - * @status_stats.msdu_failed: # of failed MSDUs - * @status_stats.last_ack: last ack timestamp (jiffies) - * @status_stats.last_ack_signal: last ACK signal - * @status_stats.ack_signal_filled: last ACK signal validity - * @status_stats.avg_ack_signal: average ACK signal * @frags: fragment cache + * @multi_link_sta: Identifies if this sta is a MLD STA or regular STA + * @deflink: This is the default link STA information, for non MLO STA all link + * specific STA information is accessed through @deflink or through + * link[0] which points to address of @deflink. For MLO Link STA + * the first added link STA will point to deflink. + * @link: reference to Link Sta entries. For Non MLO STA, except 1st link, + * i.e link[0] all links would be assigned to NULL by default and + * would access link information via @deflink or link[0]. For MLO + * STA, first link STA being added will point its link pointer to + * @deflink address and remaining would be allocated and the address + * would be assigned to link[link_id] where link_id is the id assigned + * by the AP. */ struct sta_info { /* General information, mostly static */ @@ -577,9 +645,6 @@ struct sta_info { u8 addr[ETH_ALEN]; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; - struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + - NUM_DEFAULT_MGMT_KEYS + - NUM_DEFAULT_BEACON_KEYS]; struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS]; u8 ptk_idx; struct rate_control_ref *rate_ctrl; @@ -589,7 +654,6 @@ struct sta_info { struct ieee80211_fast_tx __rcu *fast_tx; struct ieee80211_fast_rx __rcu *fast_rx; - struct ieee80211_sta_rx_stats __percpu *pcpu_rx_stats; #ifdef CONFIG_MAC80211_MESH struct mesh_sta *mesh; @@ -619,38 +683,9 @@ struct sta_info { u64 assoc_at; long last_connected; - /* Updated from RX path only, no locking requirements */ - struct ieee80211_sta_rx_stats rx_stats; - struct { - struct ewma_signal signal; - struct ewma_signal chain_signal[IEEE80211_MAX_CHAINS]; - } rx_stats_avg; - /* Plus 1 for non-QoS frames */ __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; - /* Updated from TX status path only, no locking requirements */ - struct { - unsigned long filtered; - unsigned long retry_failed, retry_count; - unsigned int lost_packets; - unsigned long last_pkt_time; - u64 msdu_retries[IEEE80211_NUM_TIDS + 1]; - u64 msdu_failed[IEEE80211_NUM_TIDS + 1]; - unsigned long last_ack; - s8 last_ack_signal; - bool ack_signal_filled; - struct ewma_avg_signal avg_ack_signal; - } status_stats; - - /* Updated from TX path only, no locking requirements */ - struct { - u64 packets[IEEE80211_NUM_ACS]; - u64 bytes[IEEE80211_NUM_ACS]; - struct ieee80211_tx_rate last_rate; - struct rate_info last_rate_info; - u64 msdu[IEEE80211_NUM_TIDS + 1]; - } tx_stats; u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; struct airtime_info airtime[IEEE80211_NUM_ACS]; @@ -664,8 +699,6 @@ struct sta_info { struct dentry *debugfs_dir; #endif - enum ieee80211_sta_rx_bandwidth cur_max_bandwidth; - enum ieee80211_smps_mode known_smps_mode; const struct ieee80211_cipher_scheme *cipher_scheme; @@ -677,6 +710,10 @@ struct sta_info { struct ieee80211_fragment_cache frags; + bool multi_link_sta; + struct link_sta_info deflink; + struct link_sta_info *link[MAX_STA_LINKS]; + /* keep last! */ struct ieee80211_sta sta; }; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index e81e8a5bb774..c563fa718d84 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -72,7 +72,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, info->flags |= IEEE80211_TX_INTFL_RETRANSMISSION; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; - sta->status_stats.filtered++; + sta->deflink.status_stats.filtered++; /* * Clear more-data bit on filtered frames, it might be set @@ -776,7 +776,7 @@ static void ieee80211_lost_packet(struct sta_info *sta, !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; - sta->status_stats.lost_packets++; + sta->deflink.status_stats.lost_packets++; if (sta->sta.tdls) { pkt_time = STA_LOST_TDLS_PKT_TIME; pkt_thr = STA_LOST_PKT_THRESHOLD; @@ -789,13 +789,14 @@ static void ieee80211_lost_packet(struct sta_info *sta, * mechanism. * For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME */ - if (sta->status_stats.lost_packets < pkt_thr || - !time_after(jiffies, sta->status_stats.last_pkt_time + pkt_time)) + if (sta->deflink.status_stats.lost_packets < pkt_thr || + !time_after(jiffies, sta->deflink.status_stats.last_pkt_time + pkt_time)) return; cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr, - sta->status_stats.lost_packets, GFP_ATOMIC); - sta->status_stats.lost_packets = 0; + sta->deflink.status_stats.lost_packets, + GFP_ATOMIC); + sta->deflink.status_stats.lost_packets = 0; } static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, @@ -930,7 +931,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && (ieee80211_is_data(hdr->frame_control)) && (rates_idx != -1)) - sta->tx_stats.last_rate = + sta->deflink.tx_stats.last_rate = info->status.rates[rates_idx]; if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && @@ -976,9 +977,9 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, return; } else if (ieee80211_is_data_present(fc)) { if (!acked && !noack_success) - sta->status_stats.msdu_failed[tid]++; + sta->deflink.status_stats.msdu_failed[tid]++; - sta->status_stats.msdu_retries[tid] += + sta->deflink.status_stats.msdu_retries[tid] += retry_count; } @@ -1111,7 +1112,7 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, sta = container_of(pubsta, struct sta_info, sta); if (status->rate) - sta->tx_stats.last_rate_info = *status->rate; + sta->deflink.tx_stats.last_rate_info = *status->rate; } if (skb && (tx_time_est = @@ -1142,8 +1143,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, struct ieee80211_sub_if_data *sdata = sta->sdata; if (!acked && !noack_success) - sta->status_stats.retry_failed++; - sta->status_stats.retry_count += retry_count; + sta->deflink.status_stats.retry_failed++; + sta->deflink.status_stats.retry_count += retry_count; if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { if (sdata->vif.type == NL80211_IFTYPE_STATION && @@ -1152,13 +1153,13 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, acked, info->status.tx_time); if (acked) { - sta->status_stats.last_ack = jiffies; + sta->deflink.status_stats.last_ack = jiffies; - if (sta->status_stats.lost_packets) - sta->status_stats.lost_packets = 0; + if (sta->deflink.status_stats.lost_packets) + sta->deflink.status_stats.lost_packets = 0; /* Track when last packet was ACKed */ - sta->status_stats.last_pkt_time = jiffies; + sta->deflink.status_stats.last_pkt_time = jiffies; /* Reset connection monitor */ if (sdata->vif.type == NL80211_IFTYPE_STATION && @@ -1166,10 +1167,10 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, sdata->u.mgd.probe_send_count = 0; if (ack_signal_valid) { - sta->status_stats.last_ack_signal = + sta->deflink.status_stats.last_ack_signal = (s8)info->status.ack_signal; - sta->status_stats.ack_signal_filled = true; - ewma_avg_signal_add(&sta->status_stats.avg_ack_signal, + sta->deflink.status_stats.ack_signal_filled = true; + ewma_avg_signal_add(&sta->deflink.status_stats.avg_ack_signal, -info->status.ack_signal); } } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) { @@ -1235,7 +1236,7 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw, rate_control_tx_status(local, sband, &status); if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) - sta->tx_stats.last_rate = info->status.rates[0]; + sta->deflink.tx_stats.last_rate = info->status.rates[0]; } EXPORT_SYMBOL(ieee80211_tx_rate_update); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 137be9ec94af..4e2d22e47429 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -459,9 +459,9 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && - ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) { + ht_cap.ht_supported && sta->sta.deflink.ht_cap.ht_supported) { /* the peer caps are already intersected with our own */ - memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap)); + memcpy(&ht_cap, &sta->sta.deflink.ht_cap, sizeof(ht_cap)); pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); @@ -510,9 +510,9 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && - vht_cap.vht_supported && sta->sta.vht_cap.vht_supported) { + vht_cap.vht_supported && sta->sta.deflink.vht_cap.vht_supported) { /* the peer caps are already intersected with our own */ - memcpy(&vht_cap, &sta->sta.vht_cap, sizeof(vht_cap)); + memcpy(&vht_cap, &sta->sta.deflink.vht_cap, sizeof(vht_cap)); /* the AID is present only when VHT is implemented */ ieee80211_tdls_add_aid(sdata, skb); @@ -603,13 +603,13 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, * if HT support is only added in TDLS, we need an HT-operation IE. * add the IE as required by IEEE802.11-2012 9.23.3.2. */ - if (!ap_sta->sta.ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) { + if (!ap_sta->sta.deflink.ht_cap.ht_supported && sta->sta.deflink.ht_cap.ht_supported) { u16 prot = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT; pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); - ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap, + ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap, &sdata->vif.bss_conf.chandef, prot, true); } @@ -618,7 +618,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, /* only include VHT-operation if not on the 2.4GHz band */ if (sband->band != NL80211_BAND_2GHZ && - sta->sta.vht_cap.vht_supported) { + sta->sta.deflink.vht_cap.vht_supported) { /* * if both peers support WIDER_BW, we can expand the chandef to * a wider compatible one, up to 80MHz @@ -627,7 +627,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, ieee80211_tdls_chandef_vht_upgrade(sdata, sta); pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); - ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap, + ieee80211_ie_build_vht_oper(pos, &sta->sta.deflink.vht_cap, &sta->tdls_chandef); } @@ -1269,8 +1269,8 @@ static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata, bw = ieee80211_chan_width_to_rx_bw(conf->def.width); bw = min(bw, ieee80211_sta_cap_rx_bw(sta)); - if (bw != sta->sta.bandwidth) { - sta->sta.bandwidth = bw; + if (bw != sta->sta.deflink.bandwidth) { + sta->sta.deflink.bandwidth = bw; rate_control_rate_update(local, sband, sta, IEEE80211_RC_BW_CHANGED); /* @@ -1296,7 +1296,7 @@ static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata) if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) || - !sta->sta.ht_cap.ht_supported) + !sta->sta.deflink.ht_cap.ht_supported) continue; result = true; break; @@ -1321,7 +1321,7 @@ iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata, if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) return; - tdls_ht = (sta && sta->sta.ht_cap.ht_supported) || + tdls_ht = (sta && sta->sta.deflink.ht_cap.ht_supported) || iee80211_tdls_have_ht_peers(sdata); opmode = sdata->vif.bss_conf.ht_operation_mode; @@ -1900,7 +1900,7 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, } /* peer should have known better */ - if (!sta->sta.ht_cap.ht_supported && elems->sec_chan_offs && + if (!sta->sta.deflink.ht_cap.ht_supported && elems->sec_chan_offs && elems->sec_chan_offs->sec_chan_offs) { tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n"); ret = -ENOTSUPP; diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index d91498f77796..743adfbb9b15 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -860,8 +860,8 @@ TRACE_EVENT(drv_sta_set_txpwr, LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; - __entry->txpwr = sta->txpwr.power; - __entry->type = sta->txpwr.type; + __entry->txpwr = sta->deflink.txpwr.power; + __entry->type = sta->deflink.txpwr.type; ), TP_printk( diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b6b20f38de0e..13253eb39d09 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -768,9 +768,9 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) if (txrc.reported_rate.idx < 0) { txrc.reported_rate = tx->rate; if (tx->sta && ieee80211_is_tx_data(tx->skb)) - tx->sta->tx_stats.last_rate = txrc.reported_rate; + tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate; } else if (tx->sta) - tx->sta->tx_stats.last_rate = txrc.reported_rate; + tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate; if (ratetbl) return TX_CONTINUE; @@ -837,7 +837,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); tx->sdata->sequence_number += 0x10; if (tx->sta) - tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++; + tx->sta->deflink.tx_stats.msdu[IEEE80211_NUM_TIDS]++; return TX_CONTINUE; } @@ -851,7 +851,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) /* include per-STA, per-TID sequence counter */ tid = ieee80211_get_tid(hdr); - tx->sta->tx_stats.msdu[tid]++; + tx->sta->deflink.tx_stats.msdu[tid]++; hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); @@ -1004,10 +1004,10 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) skb_queue_walk(&tx->skbs, skb) { ac = skb_get_queue_mapping(skb); - tx->sta->tx_stats.bytes[ac] += skb->len; + tx->sta->deflink.tx_stats.bytes[ac] += skb->len; } if (ac >= 0) - tx->sta->tx_stats.packets[ac]++; + tx->sta->deflink.tx_stats.packets[ac]++; return TX_CONTINUE; } @@ -1159,7 +1159,7 @@ ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER)) return; - if (!sta || !sta->sta.ht_cap.ht_supported || + if (!sta || !sta->sta.deflink.ht_cap.ht_supported || !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO || skb->protocol == sdata->control_port_protocol) return; @@ -3462,18 +3462,18 @@ ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, } if (skb_shinfo(skb)->gso_size) - sta->tx_stats.msdu[tid] += + sta->deflink.tx_stats.msdu[tid] += DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); else - sta->tx_stats.msdu[tid]++; + sta->deflink.tx_stats.msdu[tid]++; info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; /* statistics normally done by ieee80211_tx_h_stats (but that * has to consider fragmentation, so is more complex) */ - sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; - sta->tx_stats.packets[skb_get_queue_mapping(skb)]++; + sta->deflink.tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; + sta->deflink.tx_stats.packets[skb_get_queue_mapping(skb)]++; if (pn_offs) { u64 pn; @@ -4481,8 +4481,8 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, dev_sw_netstats_tx_add(dev, 1, skb->len); - sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; - sta->tx_stats.packets[skb_get_queue_mapping(skb)]++; + sta->deflink.tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; + sta->deflink.tx_stats.packets[skb_get_queue_mapping(skb)]++; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 8f16aa9c725d..ff26e0c4787b 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -118,14 +118,14 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, const struct ieee80211_vht_cap *vht_cap_ie, struct sta_info *sta) { - struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap; struct ieee80211_sta_vht_cap own_cap; u32 cap_info, i; bool have_80mhz; memset(vht_cap, 0, sizeof(*vht_cap)); - if (!sta->sta.ht_cap.ht_supported) + if (!sta->sta.deflink.ht_cap.ht_supported) return; if (!vht_cap_ie || !sband->vht_cap.vht_supported) @@ -295,10 +295,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: - sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; + sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160; break; default: - sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; + sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_80; if (!(vht_cap->vht_mcs.tx_highest & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE))) @@ -310,10 +310,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, * above) between 160 and 80+80 yet. */ if (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) - sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; + sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160; } - sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta); + sta->sta.deflink.bandwidth = ieee80211_sta_cur_vht_bw(sta); switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: @@ -332,9 +332,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, /* FIXME: move this to some better location - parses HE/EHT now */ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta) { - struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap; - struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.eht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap; + struct ieee80211_sta_eht_cap *eht_cap = &sta->sta.deflink.eht_cap; u32 cap_width; if (he_cap->has_he) { @@ -369,7 +369,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta) } if (!vht_cap->vht_supported) - return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? + return sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? IEEE80211_STA_RX_BW_40 : IEEE80211_STA_RX_BW_20; @@ -392,14 +392,14 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta) enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta) { - struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap; u32 cap_width; if (!vht_cap->vht_supported) { - if (!sta->sta.ht_cap.ht_supported) + if (!sta->sta.deflink.ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20_NOHT; - return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? + return sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20; } @@ -416,13 +416,13 @@ enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta) enum nl80211_chan_width ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta) { - enum ieee80211_sta_rx_bandwidth cur_bw = sta->sta.bandwidth; - struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; + enum ieee80211_sta_rx_bandwidth cur_bw = sta->sta.deflink.bandwidth; + struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.deflink.vht_cap; u32 cap_width; switch (cur_bw) { case IEEE80211_STA_RX_BW_20: - if (!sta->sta.ht_cap.ht_supported) + if (!sta->sta.deflink.ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20_NOHT; else return NL80211_CHAN_WIDTH_20; @@ -473,7 +473,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width; bw = ieee80211_sta_cap_rx_bw(sta); - bw = min(bw, sta->cur_max_bandwidth); + bw = min(bw, sta->deflink.cur_max_bandwidth); /* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of * IEEE80211-2016 specification makes higher bandwidth operation @@ -501,12 +501,12 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta) bool support_160; /* if we received a notification already don't overwrite it */ - if (sta->sta.rx_nss) + if (sta->sta.deflink.rx_nss) return; - if (sta->sta.eht_cap.has_eht) { + if (sta->sta.deflink.eht_cap.has_eht) { int i; - const u8 *rx_nss_mcs = (void *)&sta->sta.eht_cap.eht_mcs_nss_supp; + const u8 *rx_nss_mcs = (void *)&sta->sta.deflink.eht_cap.eht_mcs_nss_supp; /* get the max nss for EHT over all possible bandwidths and mcs */ for (i = 0; i < sizeof(struct ieee80211_eht_mcs_nss_supp); i++) @@ -515,10 +515,10 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta) IEEE80211_EHT_MCS_NSS_RX)); } - if (sta->sta.he_cap.has_he) { + if (sta->sta.deflink.he_cap.has_he) { int i; u8 rx_mcs_80 = 0, rx_mcs_160 = 0; - const struct ieee80211_sta_he_cap *he_cap = &sta->sta.he_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->sta.deflink.he_cap; u16 mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); u16 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); @@ -549,23 +549,23 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta) he_rx_nss = rx_mcs_80; } - if (sta->sta.ht_cap.ht_supported) { - if (sta->sta.ht_cap.mcs.rx_mask[0]) + if (sta->sta.deflink.ht_cap.ht_supported) { + if (sta->sta.deflink.ht_cap.mcs.rx_mask[0]) ht_rx_nss++; - if (sta->sta.ht_cap.mcs.rx_mask[1]) + if (sta->sta.deflink.ht_cap.mcs.rx_mask[1]) ht_rx_nss++; - if (sta->sta.ht_cap.mcs.rx_mask[2]) + if (sta->sta.deflink.ht_cap.mcs.rx_mask[2]) ht_rx_nss++; - if (sta->sta.ht_cap.mcs.rx_mask[3]) + if (sta->sta.deflink.ht_cap.mcs.rx_mask[3]) ht_rx_nss++; /* FIXME: consider rx_highest? */ } - if (sta->sta.vht_cap.vht_supported) { + if (sta->sta.deflink.vht_cap.vht_supported) { int i; u16 rx_mcs_map; - rx_mcs_map = le16_to_cpu(sta->sta.vht_cap.vht_mcs.rx_mcs_map); + rx_mcs_map = le16_to_cpu(sta->sta.deflink.vht_cap.vht_mcs.rx_mcs_map); for (i = 7; i >= 0; i--) { u8 mcs = (rx_mcs_map >> (2 * i)) & 3; @@ -581,7 +581,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta) rx_nss = max(vht_rx_nss, ht_rx_nss); rx_nss = max(he_rx_nss, rx_nss); rx_nss = max(eht_rx_nss, rx_nss); - sta->sta.rx_nss = max_t(u8, 1, rx_nss); + sta->sta.deflink.rx_nss = max_t(u8, 1, rx_nss); } u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, @@ -601,8 +601,8 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; nss += 1; - if (sta->sta.rx_nss != nss) { - sta->sta.rx_nss = nss; + if (sta->sta.deflink.rx_nss != nss) { + sta->sta.deflink.rx_nss = nss; sta_opmode.rx_nss = nss; changed |= IEEE80211_RC_NSS_CHANGED; sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED; @@ -611,27 +611,27 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: /* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */ - sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; + sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_20; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: /* ignore IEEE80211_OPMODE_NOTIF_BW_160_80P80 must not be set */ - sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_40; + sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_40; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: if (opmode & IEEE80211_OPMODE_NOTIF_BW_160_80P80) - sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; + sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160; else - sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_80; + sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_80; break; case IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: /* legacy only, no longer used by newer spec */ - sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_160; + sta->deflink.cur_max_bandwidth = IEEE80211_STA_RX_BW_160; break; } new_bw = ieee80211_sta_cur_vht_bw(sta); - if (new_bw != sta->sta.bandwidth) { - sta->sta.bandwidth = new_bw; + if (new_bw != sta->sta.deflink.bandwidth) { + sta->sta.deflink.bandwidth = new_bw; sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(sta); changed |= IEEE80211_RC_BW_CHANGED; sta_opmode.changed |= STA_OPMODE_MAX_BW_CHANGED; -- cgit v1.2.3 From 888ade8f90d7dbbdc8552ae9b23d311f9e61ab0e Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 8 Apr 2022 22:08:37 +0200 Subject: ipv4: Use dscp_t in struct fib_rt_info Use the new dscp_t type to replace the tos field of struct fib_rt_info. This ensures ECN bits are ignored and makes it compatible with the fa_dscp field of struct fib_alias. This also allows sparse to flag potential incorrect uses of DSCP and ECN bits. Signed-off-by: Guillaume Nault Reviewed-by: Ido Schimmel Reviewed-by: David Ahern Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/prestera/prestera_router.c | 3 ++- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 7 ++++--- drivers/net/netdevsim/fib.c | 5 +++-- include/net/ip_fib.h | 2 +- net/ipv4/fib_semantics.c | 4 ++-- net/ipv4/fib_trie.c | 6 +++--- net/ipv4/route.c | 4 ++-- 7 files changed, 17 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 6c5618cf4f08..99d60c9ae54d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -132,7 +133,7 @@ __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw, fri.tb_id = fc->key.kern_tb_id; fri.dst = fc->key.addr.u.ipv4; fri.dst_len = fc->key.prefix_len; - fri.tos = fc->kern_tos; + fri.dscp = inet_dsfield_to_dscp(fc->kern_tos); fri.type = fc->kern_type; /* flags begin */ fri.offload = offload; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 79fd486e29e3..596516ba73c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -5620,7 +5621,7 @@ mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fen_info->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = fen_info->dst_len; - fri.tos = fen_info->tos; + fri.dscp = inet_dsfield_to_dscp(fen_info->tos); fri.type = fen_info->type; fri.offload = false; fri.trap = false; @@ -5645,7 +5646,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fib4_entry->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.tos = fib4_entry->tos; + fri.dscp = inet_dsfield_to_dscp(fib4_entry->tos); fri.type = fib4_entry->type; fri.offload = should_offload; fri.trap = !should_offload; @@ -5668,7 +5669,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fib4_entry->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.tos = fib4_entry->tos; + fri.dscp = inet_dsfield_to_dscp(fib4_entry->tos); fri.type = fib4_entry->type; fri.offload = false; fri.trap = false; diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 378ee779061c..31e73709aac5 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -322,7 +323,7 @@ nsim_fib4_rt_offload_failed_flag_set(struct net *net, fri.tb_id = fen_info->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = fen_info->dst_len; - fri.tos = fen_info->tos; + fri.dscp = inet_dsfield_to_dscp(fen_info->tos); fri.type = fen_info->type; fri.offload = false; fri.trap = false; @@ -342,7 +343,7 @@ static void nsim_fib4_rt_hw_flags_set(struct net *net, fri.tb_id = fib4_rt->common.key.tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.tos = fib4_rt->tos; + fri.dscp = inet_dsfield_to_dscp(fib4_rt->tos); fri.type = fib4_rt->type; fri.offload = false; fri.trap = trap; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 6a82bcb8813b..f08ba531ac08 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -212,7 +212,7 @@ struct fib_rt_info { u32 tb_id; __be32 dst; int dst_len; - u8 tos; + dscp_t dscp; u8 type; u8 offload:1, trap:1, diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ccb62038f6a4..a57ba23571c9 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -524,7 +524,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, fri.tb_id = tb_id; fri.dst = key; fri.dst_len = dst_len; - fri.tos = inet_dscp_to_dsfield(fa->fa_dscp); + fri.dscp = fa->fa_dscp; fri.type = fa->fa_type; fri.offload = READ_ONCE(fa->offload); fri.trap = READ_ONCE(fa->trap); @@ -1781,7 +1781,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, rtm->rtm_family = AF_INET; rtm->rtm_dst_len = fri->dst_len; rtm->rtm_src_len = 0; - rtm->rtm_tos = fri->tos; + rtm->rtm_tos = inet_dscp_to_dsfield(fri->dscp); if (tb_id < 256) rtm->rtm_table = tb_id; else diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index fb0e49c36c2e..e96f02f0ab93 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1032,8 +1032,8 @@ fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri) hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { if (fa->fa_slen == slen && fa->tb_id == fri->tb_id && - fa->fa_dscp == inet_dsfield_to_dscp(fri->tos) && - fa->fa_info == fri->fi && fa->fa_type == fri->type) + fa->fa_dscp == fri->dscp && fa->fa_info == fri->fi && + fa->fa_type == fri->type) return fa; } @@ -2305,7 +2305,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, fri.tb_id = tb->tb_id; fri.dst = xkey; fri.dst_len = KEYLENGTH - fa->fa_slen; - fri.tos = inet_dscp_to_dsfield(fa->fa_dscp); + fri.dscp = fa->fa_dscp; fri.type = fa->fa_type; fri.offload = READ_ONCE(fa->offload); fri.trap = READ_ONCE(fa->trap); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 98c6f3429593..80f96170876c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3394,7 +3394,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, fri.tb_id = table_id; fri.dst = res.prefix; fri.dst_len = res.prefixlen; - fri.tos = fl4.flowi4_tos; + fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos); fri.type = rt->rt_type; fri.offload = 0; fri.trap = 0; @@ -3407,7 +3407,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (fa->fa_slen == slen && fa->tb_id == fri.tb_id && - fa->fa_dscp == inet_dsfield_to_dscp(fri.tos) && + fa->fa_dscp == fri.dscp && fa->fa_info == res.fi && fa->fa_type == fri.type) { fri.offload = READ_ONCE(fa->offload); -- cgit v1.2.3 From 568a3f33b4273833f1b1e4a39d4a3410c4770c32 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 8 Apr 2022 22:08:40 +0200 Subject: ipv4: Use dscp_t in struct fib_entry_notifier_info Use the new dscp_t type to replace the tos field of struct fib_entry_notifier_info. This ensures ECN bits are ignored and makes it compatible with the dscp field of struct fib_rt_info. This also allows sparse to flag potential incorrect uses of DSCP and ECN bits. Signed-off-by: Guillaume Nault Reviewed-by: Ido Schimmel Reviewed-by: David Ahern Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/prestera/prestera_router.c | 6 +++--- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 6 +++--- drivers/net/netdevsim/fib.c | 4 ++-- include/net/ip_fib.h | 2 +- net/ipv4/fib_trie.c | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 99d60c9ae54d..a6b608ade2b9 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -89,7 +89,7 @@ prestera_kern_fib_cache_destroy(struct prestera_switch *sw, static struct prestera_kern_fib_cache * prestera_kern_fib_cache_create(struct prestera_switch *sw, struct prestera_kern_fib_cache_key *key, - struct fib_info *fi, u8 tos, u8 type) + struct fib_info *fi, dscp_t dscp, u8 type) { struct prestera_kern_fib_cache *fib_cache; int err; @@ -101,7 +101,7 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw, memcpy(&fib_cache->key, key, sizeof(*key)); fib_info_hold(fi); fib_cache->fi = fi; - fib_cache->kern_tos = tos; + fib_cache->kern_tos = inet_dscp_to_dsfield(dscp); fib_cache->kern_type = type; err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht, @@ -306,7 +306,7 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw, if (replace) { fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, fen_info->fi, - fen_info->tos, + fen_info->dscp, fen_info->type); if (!fib_cache) { dev_err(sw->dev->dev, "fib_cache == NULL"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 596516ba73c1..1c451d648302 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5621,7 +5621,7 @@ mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fen_info->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = fen_info->dst_len; - fri.dscp = inet_dsfield_to_dscp(fen_info->tos); + fri.dscp = fen_info->dscp; fri.type = fen_info->type; fri.offload = false; fri.trap = false; @@ -6251,7 +6251,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, fib_info_hold(fib4_entry->fi); fib4_entry->tb_id = fen_info->tb_id; fib4_entry->type = fen_info->type; - fib4_entry->tos = fen_info->tos; + fib4_entry->tos = inet_dscp_to_dsfield(fen_info->dscp); fib_entry->fib_node = fib_node; @@ -6305,7 +6305,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, fib4_entry = container_of(fib_node->fib_entry, struct mlxsw_sp_fib4_entry, common); if (fib4_entry->tb_id == fen_info->tb_id && - fib4_entry->tos == fen_info->tos && + fib4_entry->tos == inet_dscp_to_dsfield(fen_info->dscp) && fib4_entry->type == fen_info->type && fib4_entry->fi == fen_info->fi) return fib4_entry; diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 31e73709aac5..fb9af26122ac 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -284,7 +284,7 @@ nsim_fib4_rt_create(struct nsim_fib_data *data, fib4_rt->fi = fen_info->fi; fib_info_hold(fib4_rt->fi); - fib4_rt->tos = fen_info->tos; + fib4_rt->tos = inet_dscp_to_dsfield(fen_info->dscp); fib4_rt->type = fen_info->type; return fib4_rt; @@ -323,7 +323,7 @@ nsim_fib4_rt_offload_failed_flag_set(struct net *net, fri.tb_id = fen_info->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = fen_info->dst_len; - fri.dscp = inet_dsfield_to_dscp(fen_info->tos); + fri.dscp = fen_info->dscp; fri.type = fen_info->type; fri.offload = false; fri.trap = false; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index f08ba531ac08..a378eff827c7 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -225,7 +225,7 @@ struct fib_entry_notifier_info { u32 dst; int dst_len; struct fib_info *fi; - u8 tos; + dscp_t dscp; u8 type; u32 tb_id; }; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index e96f02f0ab93..1f7f25532fa1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -82,7 +82,7 @@ static int call_fib_entry_notifier(struct notifier_block *nb, .dst = dst, .dst_len = dst_len, .fi = fa->fa_info, - .tos = inet_dscp_to_dsfield(fa->fa_dscp), + .dscp = fa->fa_dscp, .type = fa->fa_type, .tb_id = fa->tb_id, }; @@ -99,7 +99,7 @@ static int call_fib_entry_notifiers(struct net *net, .dst = dst, .dst_len = dst_len, .fi = fa->fa_info, - .tos = inet_dscp_to_dsfield(fa->fa_dscp), + .dscp = fa->fa_dscp, .type = fa->fa_type, .tb_id = fa->tb_id, }; -- cgit v1.2.3 From 20bbf32efe1e3b937e7d3a53604dd643b686af3c Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 8 Apr 2022 22:08:43 +0200 Subject: netdevsim: Use dscp_t in struct nsim_fib4_rt Use the new dscp_t type to replace the tos field of struct nsim_fib4_rt. This ensures ECN bits are ignored and makes it compatible with the dscp fields of struct fib_entry_notifier_info and struct fib_rt_info. This also allows sparse to flag potential incorrect uses of DSCP and ECN bits. Signed-off-by: Guillaume Nault Reviewed-by: Ido Schimmel Reviewed-by: David Ahern Signed-off-by: Jakub Kicinski --- drivers/net/netdevsim/fib.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index fb9af26122ac..c8f398f5bc5b 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -79,7 +79,7 @@ struct nsim_fib_rt { struct nsim_fib4_rt { struct nsim_fib_rt common; struct fib_info *fi; - u8 tos; + dscp_t dscp; u8 type; }; @@ -284,7 +284,7 @@ nsim_fib4_rt_create(struct nsim_fib_data *data, fib4_rt->fi = fen_info->fi; fib_info_hold(fib4_rt->fi); - fib4_rt->tos = inet_dscp_to_dsfield(fen_info->dscp); + fib4_rt->dscp = fen_info->dscp; fib4_rt->type = fen_info->type; return fib4_rt; @@ -343,7 +343,7 @@ static void nsim_fib4_rt_hw_flags_set(struct net *net, fri.tb_id = fib4_rt->common.key.tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.dscp = inet_dsfield_to_dscp(fib4_rt->tos); + fri.dscp = fib4_rt->dscp; fri.type = fib4_rt->type; fri.offload = false; fri.trap = trap; -- cgit v1.2.3 From 046eabbf1991c65d50d1d842f1a3011b53ca9b0a Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 8 Apr 2022 22:08:46 +0200 Subject: mlxsw: Use dscp_t in struct mlxsw_sp_fib4_entry Use the new dscp_t type to replace the tos field of struct mlxsw_sp_fib4_entry. This ensures ECN bits are ignored and makes it compatible with the dscp fields of fib_entry_notifier_info and fib_rt_info. This also allows sparse to flag potential incorrect uses of DSCP and ECN bits. Signed-off-by: Guillaume Nault Reviewed-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 1c451d648302..dc820d9f2696 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -508,7 +508,7 @@ struct mlxsw_sp_fib4_entry { struct mlxsw_sp_fib_entry common; struct fib_info *fi; u32 tb_id; - u8 tos; + dscp_t dscp; u8 type; }; @@ -5560,7 +5560,7 @@ mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, common); - return !fib4_entry->tos; + return !fib4_entry->dscp; } static bool @@ -5646,7 +5646,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fib4_entry->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.dscp = inet_dsfield_to_dscp(fib4_entry->tos); + fri.dscp = fib4_entry->dscp; fri.type = fib4_entry->type; fri.offload = should_offload; fri.trap = !should_offload; @@ -5669,7 +5669,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp, fri.tb_id = fib4_entry->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; - fri.dscp = inet_dsfield_to_dscp(fib4_entry->tos); + fri.dscp = fib4_entry->dscp; fri.type = fib4_entry->type; fri.offload = false; fri.trap = false; @@ -6251,7 +6251,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, fib_info_hold(fib4_entry->fi); fib4_entry->tb_id = fen_info->tb_id; fib4_entry->type = fen_info->type; - fib4_entry->tos = inet_dscp_to_dsfield(fen_info->dscp); + fib4_entry->dscp = fen_info->dscp; fib_entry->fib_node = fib_node; @@ -6305,7 +6305,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, fib4_entry = container_of(fib_node->fib_entry, struct mlxsw_sp_fib4_entry, common); if (fib4_entry->tb_id == fen_info->tb_id && - fib4_entry->tos == inet_dscp_to_dsfield(fen_info->dscp) && + fib4_entry->dscp == fen_info->dscp && fib4_entry->type == fen_info->type && fib4_entry->fi == fen_info->fi) return fib4_entry; -- cgit v1.2.3 From 9f6982e9a3c26a9db226cefd91c21a3e325ca397 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 8 Apr 2022 22:08:50 +0200 Subject: net: marvell: prestera: Use dscp_t in struct prestera_kern_fib_cache Use the new dscp_t type to replace the kern_tos field of struct prestera_kern_fib_cache. This ensures ECN bits are ignored and makes it compatible with the dscp fields of struct fib_entry_notifier_info and struct fib_rt_info. This also allows sparse to flag potential incorrect uses of DSCP and ECN bits. Signed-off-by: Guillaume Nault Reviewed-by: Yevhen Orlov Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/prestera/prestera_router.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index a6b608ade2b9..3754d8aec76d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -27,7 +27,7 @@ struct prestera_kern_fib_cache { /* Indicate if route is not overlapped by another table */ struct rhash_head ht_node; /* node of prestera_router */ struct fib_info *fi; - u8 kern_tos; + dscp_t kern_dscp; u8 kern_type; bool reachable; }; @@ -101,7 +101,7 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw, memcpy(&fib_cache->key, key, sizeof(*key)); fib_info_hold(fi); fib_cache->fi = fi; - fib_cache->kern_tos = inet_dscp_to_dsfield(dscp); + fib_cache->kern_dscp = dscp; fib_cache->kern_type = type; err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht, @@ -133,7 +133,7 @@ __prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw, fri.tb_id = fc->key.kern_tb_id; fri.dst = fc->key.addr.u.ipv4; fri.dst_len = fc->key.prefix_len; - fri.dscp = inet_dsfield_to_dscp(fc->kern_tos); + fri.dscp = fc->kern_dscp; fri.type = fc->kern_type; /* flags begin */ fri.offload = offload; -- cgit v1.2.3 From 1cb9d3b6185b2a4d1d592632a7faf5d8c8e5f9b3 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Thu, 7 Apr 2022 13:21:34 -0700 Subject: hv_netvsc: Add support for XDP_REDIRECT Handle XDP_REDIRECT action in netvsc driver. Also, transparently pass ndo_xdp_xmit to VF when available. Signed-off-by: Haiyang Zhang Link: https://lore.kernel.org/r/1649362894-20077-1-git-send-email-haiyangz@microsoft.com Signed-off-by: Jakub Kicinski --- drivers/net/hyperv/hyperv_net.h | 69 +++++++++++++++++- drivers/net/hyperv/netvsc.c | 8 ++- drivers/net/hyperv/netvsc_bpf.c | 95 ++++++++++++++++++++++++- drivers/net/hyperv/netvsc_drv.c | 150 ++++++++++++++++------------------------ 4 files changed, 228 insertions(+), 94 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index cf69da0e296c..25b38a374e3c 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -15,6 +15,7 @@ #include #include #include +#include /* RSS related */ #define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203 /* query only */ @@ -237,6 +238,7 @@ int netvsc_recv_callback(struct net_device *net, void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); +void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev); u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, struct xdp_buff *xdp); unsigned int netvsc_xdp_fraglen(unsigned int len); @@ -246,6 +248,8 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netvsc_device *nvdev); int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog); int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf); +int netvsc_ndoxdp_xmit(struct net_device *ndev, int n, + struct xdp_frame **frames, u32 flags); int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev, @@ -942,12 +946,21 @@ struct nvsc_rsc { #define NVSC_RSC_CSUM_INFO BIT(1) /* valid/present bit for 'csum_info' */ #define NVSC_RSC_HASH_INFO BIT(2) /* valid/present bit for 'hash_info' */ -struct netvsc_stats { +struct netvsc_stats_tx { + u64 packets; + u64 bytes; + u64 xdp_xmit; + struct u64_stats_sync syncp; +}; + +struct netvsc_stats_rx { u64 packets; u64 bytes; u64 broadcast; u64 multicast; u64 xdp_drop; + u64 xdp_redirect; + u64 xdp_tx; struct u64_stats_sync syncp; }; @@ -1046,6 +1059,55 @@ struct net_device_context { struct netvsc_device_info *saved_netvsc_dev_info; }; +/* Azure hosts don't support non-TCP port numbers in hashing for fragmented + * packets. We can use ethtool to change UDP hash level when necessary. + */ +static inline u32 netvsc_get_hash(struct sk_buff *skb, + const struct net_device_context *ndc) +{ + struct flow_keys flow; + u32 hash, pkt_proto = 0; + static u32 hashrnd __read_mostly; + + net_get_random_once(&hashrnd, sizeof(hashrnd)); + + if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) + return 0; + + switch (flow.basic.ip_proto) { + case IPPROTO_TCP: + if (flow.basic.n_proto == htons(ETH_P_IP)) + pkt_proto = HV_TCP4_L4HASH; + else if (flow.basic.n_proto == htons(ETH_P_IPV6)) + pkt_proto = HV_TCP6_L4HASH; + + break; + + case IPPROTO_UDP: + if (flow.basic.n_proto == htons(ETH_P_IP)) + pkt_proto = HV_UDP4_L4HASH; + else if (flow.basic.n_proto == htons(ETH_P_IPV6)) + pkt_proto = HV_UDP6_L4HASH; + + break; + } + + if (pkt_proto & ndc->l4_hash) { + return skb_get_hash(skb); + } else { + if (flow.basic.n_proto == htons(ETH_P_IP)) + hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); + else if (flow.basic.n_proto == htons(ETH_P_IPV6)) + hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); + else + return 0; + + __skb_set_sw_hash(skb, hash, false); + } + + return hash; +} + /* Per channel data */ struct netvsc_channel { struct vmbus_channel *channel; @@ -1060,9 +1122,10 @@ struct netvsc_channel { struct bpf_prog __rcu *bpf_prog; struct xdp_rxq_info xdp_rxq; + bool xdp_flush; - struct netvsc_stats tx_stats; - struct netvsc_stats rx_stats; + struct netvsc_stats_tx tx_stats; + struct netvsc_stats_rx rx_stats; }; /* Per netvsc device */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4061af5baaea..6e42cb03e226 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -805,7 +806,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev, struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)skb->cb; u32 send_index = packet->send_buf_index; - struct netvsc_stats *tx_stats; + struct netvsc_stats_tx *tx_stats; if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); @@ -1670,12 +1671,17 @@ int netvsc_poll(struct napi_struct *napi, int budget) if (!nvchan->desc) nvchan->desc = hv_pkt_iter_first(channel); + nvchan->xdp_flush = false; + while (nvchan->desc && work_done < budget) { work_done += netvsc_process_raw_pkt(device, nvchan, net_device, ndev, nvchan->desc, budget); nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } + if (nvchan->xdp_flush) + xdp_do_flush(); + /* Send any pending receive completions */ ret = send_recv_completions(ndev, net_device, nvchan); diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 232c4a0efd7b..4a9522689fa4 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -23,11 +24,13 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, struct xdp_buff *xdp) { + struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats; void *data = nvchan->rsc.data[0]; u32 len = nvchan->rsc.len[0]; struct page *page = NULL; struct bpf_prog *prog; u32 act = XDP_PASS; + bool drop = true; xdp->data_hard_start = NULL; @@ -60,9 +63,34 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, switch (act) { case XDP_PASS: case XDP_TX: + drop = false; + break; + case XDP_DROP: break; + case XDP_REDIRECT: + if (!xdp_do_redirect(ndev, xdp, prog)) { + nvchan->xdp_flush = true; + drop = false; + + u64_stats_update_begin(&rx_stats->syncp); + + rx_stats->xdp_redirect++; + rx_stats->packets++; + rx_stats->bytes += nvchan->rsc.pktlen; + + u64_stats_update_end(&rx_stats->syncp); + + break; + } else { + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->xdp_drop++; + u64_stats_update_end(&rx_stats->syncp); + } + + fallthrough; + case XDP_ABORTED: trace_xdp_exception(ndev, prog, act); break; @@ -74,7 +102,7 @@ u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, out: rcu_read_unlock(); - if (page && act != XDP_PASS && act != XDP_TX) { + if (page && drop) { __free_page(page); xdp->data_hard_start = NULL; } @@ -197,3 +225,68 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) return -EINVAL; } } + +static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev, + struct xdp_frame *frame, u16 q_idx) +{ + struct sk_buff *skb; + + skb = xdp_build_skb_from_frame(frame, ndev); + if (unlikely(!skb)) + return -ENOMEM; + + netvsc_get_hash(skb, netdev_priv(ndev)); + + skb_record_rx_queue(skb, q_idx); + + netvsc_xdp_xmit(skb, ndev); + + return 0; +} + +int netvsc_ndoxdp_xmit(struct net_device *ndev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + const struct net_device_ops *vf_ops; + struct netvsc_stats_tx *tx_stats; + struct netvsc_device *nvsc_dev; + struct net_device *vf_netdev; + int i, count = 0; + u16 q_idx; + + /* Don't transmit if netvsc_device is gone */ + nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev); + if (unlikely(!nvsc_dev || nvsc_dev->destroy)) + return 0; + + /* If VF is present and up then redirect packets to it. + * Skip the VF if it is marked down or has no carrier. + * If netpoll is in uses, then VF can not be used either. + */ + vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev); + if (vf_netdev && netif_running(vf_netdev) && + netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) && + vf_netdev->netdev_ops->ndo_xdp_xmit && + ndev_ctx->data_path_is_vf) { + vf_ops = vf_netdev->netdev_ops; + return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags); + } + + q_idx = smp_processor_id() % ndev->real_num_tx_queues; + + for (i = 0; i < n; i++) { + if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx)) + break; + + count++; + } + + tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats; + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->xdp_xmit += count; + u64_stats_update_end(&tx_stats->syncp); + + return count; +} diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fde1c492ca02..27f6bbca6619 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -242,56 +242,6 @@ static inline void *init_ppi_data(struct rndis_message *msg, return ppi + 1; } -/* Azure hosts don't support non-TCP port numbers in hashing for fragmented - * packets. We can use ethtool to change UDP hash level when necessary. - */ -static inline u32 netvsc_get_hash( - struct sk_buff *skb, - const struct net_device_context *ndc) -{ - struct flow_keys flow; - u32 hash, pkt_proto = 0; - static u32 hashrnd __read_mostly; - - net_get_random_once(&hashrnd, sizeof(hashrnd)); - - if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) - return 0; - - switch (flow.basic.ip_proto) { - case IPPROTO_TCP: - if (flow.basic.n_proto == htons(ETH_P_IP)) - pkt_proto = HV_TCP4_L4HASH; - else if (flow.basic.n_proto == htons(ETH_P_IPV6)) - pkt_proto = HV_TCP6_L4HASH; - - break; - - case IPPROTO_UDP: - if (flow.basic.n_proto == htons(ETH_P_IP)) - pkt_proto = HV_UDP4_L4HASH; - else if (flow.basic.n_proto == htons(ETH_P_IPV6)) - pkt_proto = HV_UDP6_L4HASH; - - break; - } - - if (pkt_proto & ndc->l4_hash) { - return skb_get_hash(skb); - } else { - if (flow.basic.n_proto == htons(ETH_P_IP)) - hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd); - else if (flow.basic.n_proto == htons(ETH_P_IPV6)) - hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd); - else - return 0; - - __skb_set_sw_hash(skb, hash, false); - } - - return hash; -} - static inline int netvsc_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, int old_idx) { @@ -804,7 +754,7 @@ void netvsc_linkstatus_callback(struct net_device *net, } /* This function should only be called after skb_record_rx_queue() */ -static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) +void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev) { int rc; @@ -925,7 +875,7 @@ int netvsc_recv_callback(struct net_device *net, struct vmbus_channel *channel = nvchan->channel; u16 q_idx = channel->offermsg.offer.sub_channel_index; struct sk_buff *skb; - struct netvsc_stats *rx_stats = &nvchan->rx_stats; + struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats; struct xdp_buff xdp; u32 act; @@ -934,6 +884,9 @@ int netvsc_recv_callback(struct net_device *net, act = netvsc_run_xdp(net, nvchan, &xdp); + if (act == XDP_REDIRECT) + return NVSP_STAT_SUCCESS; + if (act != XDP_PASS && act != XDP_TX) { u64_stats_update_begin(&rx_stats->syncp); rx_stats->xdp_drop++; @@ -958,6 +911,9 @@ int netvsc_recv_callback(struct net_device *net, * statistics will not work correctly. */ u64_stats_update_begin(&rx_stats->syncp); + if (act == XDP_TX) + rx_stats->xdp_tx++; + rx_stats->packets++; rx_stats->bytes += nvchan->rsc.pktlen; @@ -1353,28 +1309,29 @@ static void netvsc_get_pcpu_stats(struct net_device *net, /* fetch percpu stats of netvsc */ for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; - const struct netvsc_stats *stats; + const struct netvsc_stats_tx *tx_stats; + const struct netvsc_stats_rx *rx_stats; struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[nvchan->channel->target_cpu]; u64 packets, bytes; unsigned int start; - stats = &nvchan->tx_stats; + tx_stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - packets = stats->packets; - bytes = stats->bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + packets = tx_stats->packets; + bytes = tx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); this_tot->tx_bytes += bytes; this_tot->tx_packets += packets; - stats = &nvchan->rx_stats; + rx_stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - packets = stats->packets; - bytes = stats->bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + packets = rx_stats->packets; + bytes = rx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); this_tot->rx_bytes += bytes; this_tot->rx_packets += packets; @@ -1406,27 +1363,28 @@ static void netvsc_get_stats64(struct net_device *net, for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; - const struct netvsc_stats *stats; + const struct netvsc_stats_tx *tx_stats; + const struct netvsc_stats_rx *rx_stats; u64 packets, bytes, multicast; unsigned int start; - stats = &nvchan->tx_stats; + tx_stats = &nvchan->tx_stats; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - packets = stats->packets; - bytes = stats->bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + packets = tx_stats->packets; + bytes = tx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); t->tx_bytes += bytes; t->tx_packets += packets; - stats = &nvchan->rx_stats; + rx_stats = &nvchan->rx_stats; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - packets = stats->packets; - bytes = stats->bytes; - multicast = stats->multicast + stats->broadcast; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + packets = rx_stats->packets; + bytes = rx_stats->bytes; + multicast = rx_stats->multicast + rx_stats->broadcast; + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); t->rx_bytes += bytes; t->rx_packets += packets; @@ -1515,8 +1473,8 @@ static const struct { /* statistics per queue (rx/tx packets/bytes) */ #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats)) -/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */ -#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5) +/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */ +#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8) static int netvsc_get_sset_count(struct net_device *dev, int string_set) { @@ -1543,12 +1501,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct net_device_context *ndc = netdev_priv(dev); struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); const void *nds = &ndc->eth_stats; - const struct netvsc_stats *qstats; + const struct netvsc_stats_tx *tx_stats; + const struct netvsc_stats_rx *rx_stats; struct netvsc_vf_pcpu_stats sum; struct netvsc_ethtool_pcpu_stats *pcpu_sum; unsigned int start; u64 packets, bytes; u64 xdp_drop; + u64 xdp_redirect; + u64 xdp_tx; + u64 xdp_xmit; int i, j, cpu; if (!nvdev) @@ -1562,26 +1524,32 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); for (j = 0; j < nvdev->num_chn; j++) { - qstats = &nvdev->chan_table[j].tx_stats; + tx_stats = &nvdev->chan_table[j].tx_stats; do { - start = u64_stats_fetch_begin_irq(&qstats->syncp); - packets = qstats->packets; - bytes = qstats->bytes; - } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + packets = tx_stats->packets; + bytes = tx_stats->bytes; + xdp_xmit = tx_stats->xdp_xmit; + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; + data[i++] = xdp_xmit; - qstats = &nvdev->chan_table[j].rx_stats; + rx_stats = &nvdev->chan_table[j].rx_stats; do { - start = u64_stats_fetch_begin_irq(&qstats->syncp); - packets = qstats->packets; - bytes = qstats->bytes; - xdp_drop = qstats->xdp_drop; - } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + packets = rx_stats->packets; + bytes = rx_stats->bytes; + xdp_drop = rx_stats->xdp_drop; + xdp_redirect = rx_stats->xdp_redirect; + xdp_tx = rx_stats->xdp_tx; + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; data[i++] = xdp_drop; + data[i++] = xdp_redirect; + data[i++] = xdp_tx; } pcpu_sum = kvmalloc_array(num_possible_cpus(), @@ -1622,9 +1590,12 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) for (i = 0; i < nvdev->num_chn; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i); ethtool_sprintf(&p, "rx_queue_%u_packets", i); ethtool_sprintf(&p, "rx_queue_%u_bytes", i); ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i); + ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i); + ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i); } for_each_present_cpu(cpu) { @@ -2057,6 +2028,7 @@ static const struct net_device_ops device_ops = { .ndo_select_queue = netvsc_select_queue, .ndo_get_stats64 = netvsc_get_stats64, .ndo_bpf = netvsc_bpf, + .ndo_xdp_xmit = netvsc_ndoxdp_xmit, }; /* -- cgit v1.2.3 From e2d0acd40c876ad0cd3e8805adcf2325b910360c Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Fri, 8 Apr 2022 08:12:50 +0000 Subject: net: stmmac: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Link: https://lore.kernel.org/r/20220408081250.2494588-1-chi.minghao@zte.com.cn Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 24 ++++++++--------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index a5d150c5f3d8..9bc625fccca0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -88,11 +88,9 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) u32 tmp, addr, value = MII_XGMAC_BUSY; int ret; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, @@ -156,11 +154,9 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr, u32 addr, tmp, value = MII_XGMAC_BUSY; int ret; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } /* Wait until any existing MII operation is complete */ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp, @@ -229,11 +225,9 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) int data = 0; u32 v; - data = pm_runtime_get_sync(priv->device); - if (data < 0) { - pm_runtime_put_noidle(priv->device); + data = pm_runtime_resume_and_get(priv->device); + if (data < 0) return data; - } value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; @@ -297,11 +291,9 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, u32 value = MII_BUSY; u32 v; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; -- cgit v1.2.3 From bfa323c659b1016c8e896920ba08cd6914cc3b0c Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Sat, 9 Apr 2022 18:59:31 +0800 Subject: net: ethernet: ti: am65-cpsw: Fix build error without PHYLINK If PHYLINK is n, build fails: drivers/net/ethernet/ti/am65-cpsw-ethtool.o: In function `am65_cpsw_set_link_ksettings': am65-cpsw-ethtool.c:(.text+0x118): undefined reference to `phylink_ethtool_ksettings_set' drivers/net/ethernet/ti/am65-cpsw-ethtool.o: In function `am65_cpsw_get_link_ksettings': am65-cpsw-ethtool.c:(.text+0x138): undefined reference to `phylink_ethtool_ksettings_get' drivers/net/ethernet/ti/am65-cpsw-ethtool.o: In function `am65_cpsw_set_eee': am65-cpsw-ethtool.c:(.text+0x158): undefined reference to `phylink_ethtool_set_eee' Select PHYLINK for TI_K3_AM65_CPSW_NUSS to fix this. Fixes: e8609e69470f ("net: ethernet: ti: am65-cpsw: Convert to PHYLINK") Signed-off-by: YueHaibing Reviewed-by: Russell King (Oracle) Link: https://lore.kernel.org/r/20220409105931.9080-1-yuehaibing@huawei.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ti/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index affcf92cd3aa..fb30bc5d56cb 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -94,6 +94,7 @@ config TI_K3_AM65_CPSW_NUSS depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER select NET_DEVLINK select TI_DAVINCI_MDIO + select PHYLINK imply PHY_TI_GMII_SEL depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS help -- cgit v1.2.3 From d6967d04145eb5471ce8b3c75dfc563a03bd3377 Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Sat, 9 Apr 2022 20:21:45 +0200 Subject: net: calxedaxgmac: Fix typo (doubled "the") MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a doubled word in the comment above xgmac_poll. Signed-off-by: Jonathan Neuschäfer Link: https://lore.kernel.org/r/20220409182147.2509788-1-j.neuschaefer@gmx.net Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/calxeda/xgmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 457cb7121000..1281d1565ef8 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1224,7 +1224,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) * @budget : maximum number of packets that the current CPU can receive from * all interfaces. * Description : - * This function implements the the reception process. + * This function implements the reception process. * Also it runs the TX completion thread */ static int xgmac_poll(struct napi_struct *napi, int budget) -- cgit v1.2.3 From fdb2981c00bb6ec82d3f96940a2f751ef9ca6bb4 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Fri, 8 Apr 2022 09:03:54 +0200 Subject: net: lan966x: Add registers that are used for FDMA. Add the registers that are used to configure the FDMA. Signed-off-by: Horatiu Vultur Signed-off-by: Jakub Kicinski --- .../net/ethernet/microchip/lan966x/lan966x_main.c | 1 + .../net/ethernet/microchip/lan966x/lan966x_regs.h | 106 +++++++++++++++++++++ 2 files changed, 107 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 1f8c67f0261b..83f5208529ef 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -43,6 +43,7 @@ struct lan966x_main_io_resource { static const struct lan966x_main_io_resource lan966x_main_iomap[] = { { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */ + { TARGET_FDMA, 0xc0400, 0 }, /* 0xe00c0400 */ { TARGET_ORG, 0, 1 }, /* 0xe2000000 */ { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */ { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h index 0c0b3e173d53..2f59285bef29 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -17,6 +17,7 @@ enum lan966x_target { TARGET_CHIP_TOP = 5, TARGET_CPU = 6, TARGET_DEV = 13, + TARGET_FDMA = 21, TARGET_GCB = 27, TARGET_ORG = 36, TARGET_PTP = 41, @@ -578,6 +579,111 @@ enum lan966x_target { #define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\ FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x) +/* FDMA:FDMA:FDMA_CH_ACTIVATE */ +#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4) + +#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\ + FIELD_PREP(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) +#define FDMA_CH_ACTIVATE_CH_ACTIVATE_GET(x)\ + FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x) + +/* FDMA:FDMA:FDMA_CH_RELOAD */ +#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4) + +#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0) +#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\ + FIELD_PREP(FDMA_CH_RELOAD_CH_RELOAD, x) +#define FDMA_CH_RELOAD_CH_RELOAD_GET(x)\ + FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x) + +/* FDMA:FDMA:FDMA_CH_DISABLE */ +#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4) + +#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0) +#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\ + FIELD_PREP(FDMA_CH_DISABLE_CH_DISABLE, x) +#define FDMA_CH_DISABLE_CH_DISABLE_GET(x)\ + FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x) + +/* FDMA:FDMA:FDMA_CH_DB_DISCARD */ +#define FDMA_CH_DB_DISCARD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 16, 0, 1, 4) + +#define FDMA_CH_DB_DISCARD_DB_DISCARD GENMASK(7, 0) +#define FDMA_CH_DB_DISCARD_DB_DISCARD_SET(x)\ + FIELD_PREP(FDMA_CH_DB_DISCARD_DB_DISCARD, x) +#define FDMA_CH_DB_DISCARD_DB_DISCARD_GET(x)\ + FIELD_GET(FDMA_CH_DB_DISCARD_DB_DISCARD, x) + +/* FDMA:FDMA:FDMA_DCB_LLP */ +#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4) + +/* FDMA:FDMA:FDMA_DCB_LLP1 */ +#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4) + +/* FDMA:FDMA:FDMA_CH_ACTIVE */ +#define FDMA_CH_ACTIVE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 180, 0, 1, 4) + +/* FDMA:FDMA:FDMA_CH_CFG */ +#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4) + +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY BIT(4) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) +#define FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY, x) + +#define FDMA_CH_CFG_CH_INJ_PORT BIT(3) +#define FDMA_CH_CFG_CH_INJ_PORT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_INJ_PORT, x) +#define FDMA_CH_CFG_CH_INJ_PORT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_INJ_PORT, x) + +#define FDMA_CH_CFG_CH_DCB_DB_CNT GENMASK(2, 1) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_DCB_DB_CNT, x) +#define FDMA_CH_CFG_CH_DCB_DB_CNT_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_DCB_DB_CNT, x) + +#define FDMA_CH_CFG_CH_MEM BIT(0) +#define FDMA_CH_CFG_CH_MEM_SET(x)\ + FIELD_PREP(FDMA_CH_CFG_CH_MEM, x) +#define FDMA_CH_CFG_CH_MEM_GET(x)\ + FIELD_GET(FDMA_CH_CFG_CH_MEM, x) + +/* FDMA:FDMA:FDMA_PORT_CTRL */ +#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4) + +#define FDMA_PORT_CTRL_INJ_STOP BIT(4) +#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_INJ_STOP, x) +#define FDMA_PORT_CTRL_INJ_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_INJ_STOP, x) + +#define FDMA_PORT_CTRL_XTR_STOP BIT(2) +#define FDMA_PORT_CTRL_XTR_STOP_SET(x)\ + FIELD_PREP(FDMA_PORT_CTRL_XTR_STOP, x) +#define FDMA_PORT_CTRL_XTR_STOP_GET(x)\ + FIELD_GET(FDMA_PORT_CTRL_XTR_STOP, x) + +/* FDMA:FDMA:FDMA_INTR_DB */ +#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4) + +/* FDMA:FDMA:FDMA_INTR_DB_ENA */ +#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4) + +#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\ + FIELD_PREP(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) +#define FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(x)\ + FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x) + +/* FDMA:FDMA:FDMA_INTR_ERR */ +#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4) + +/* FDMA:FDMA:FDMA_ERRORS */ +#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4) + /* PTP:PTP_CFG:PTP_DOM_CFG */ #define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4) -- cgit v1.2.3 From 8f2c7d9ad778e38c38a43932869ee6fc971b2b6c Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Fri, 8 Apr 2022 09:03:55 +0200 Subject: net: lan966x: Expose functions that are needed by FDMA Expose the following functions 'lan966x_hw_offload', 'lan966x_ifh_get_src_port' and 'lan966x_ifh_get_timestamp' in lan966x_main.h so they can be accessed by FDMA. Signed-off-by: Horatiu Vultur Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/microchip/lan966x/lan966x_main.c | 10 +++------- drivers/net/ethernet/microchip/lan966x/lan966x_main.h | 8 ++++++++ 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 83f5208529ef..4ebc31c1d423 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -24,9 +24,6 @@ #define XTR_NOT_READY 0x07000080U #define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3)) -#define READL_SLEEP_US 10 -#define READL_TIMEOUT_US 100000000 - #define IO_RANGES 2 static const struct of_device_id lan966x_match[] = { @@ -433,8 +430,7 @@ bool lan966x_netdevice_check(const struct net_device *dev) return dev->netdev_ops == &lan966x_port_netdev_ops; } -static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, - struct sk_buff *skb) +bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb) { u32 val; @@ -515,7 +511,7 @@ static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval) } } -static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port) +void lan966x_ifh_get_src_port(void *ifh, u64 *src_port) { packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1, IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0); @@ -527,7 +523,7 @@ static void lan966x_ifh_get_len(void *ifh, u64 *len) IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0); } -static void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp) +void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp) { packing(ifh, timestamp, IFH_POS_TIMESTAMP + IFH_WID_TIMESTAMP - 1, IFH_POS_TIMESTAMP, IFH_LEN * 4, UNPACK, 0); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index ae282da1da74..b692c612f235 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -17,6 +17,9 @@ #define TABLE_UPDATE_SLEEP_US 10 #define TABLE_UPDATE_TIMEOUT_US 100000 +#define READL_SLEEP_US 10 +#define READL_TIMEOUT_US 100000000 + #define LAN966X_BUFFER_CELL_SZ 64 #define LAN966X_BUFFER_MEMORY (160 * 1024) #define LAN966X_BUFFER_MIN_SZ 60 @@ -195,6 +198,11 @@ bool lan966x_netdevice_check(const struct net_device *dev); void lan966x_register_notifier_blocks(void); void lan966x_unregister_notifier_blocks(void); +bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb); + +void lan966x_ifh_get_src_port(void *ifh, u64 *src_port); +void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp); + void lan966x_stats_get(struct net_device *dev, struct rtnl_link_stats64 *stats); int lan966x_stats_init(struct lan966x *lan966x); -- cgit v1.2.3 From c8349639324a79f60106126c8193bd2fcd574fe8 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Fri, 8 Apr 2022 09:03:56 +0200 Subject: net: lan966x: Add FDMA functionality Ethernet frames can be extracted or injected to or from the device's DDR memory. There is one channel for injection and one channel for extraction. Each of these channels contain a linked list of DCBs which contains DB. The DCB contains only 1 DB for both the injection and extraction. Each DB contains a frame. Every time when a frame is received or transmitted an interrupt is generated. It is not possible to use both the FDMA and the manual injection/extraction of the frames. Therefore the FDMA has priority over the manual because of better performance values. FDMA: iperf -c 192.168.1.1 [ 5] 0.00-10.02 sec 420 MBytes 352 Mbits/sec 0 sender [ 5] 0.00-10.03 sec 420 MBytes 351 Mbits/sec receiver iperf -c 192.168.1.1 -R [ 5] 0.00-10.01 sec 528 MBytes 442 Mbits/sec 0 sender [ 5] 0.00-10.00 sec 524 MBytes 440 Mbits/sec receiver Manual: iperf -c 192.168.1.1 [ 5] 0.00-10.02 sec 93.8 MBytes 78.5 Mbits/sec 0 sender [ 5] 0.00-10.03 sec 93.8 MBytes 78.4 Mbits/sec receiver ipers -c 192.168.1.1 -R [ 5] 0.00-10.03 sec 121 MBytes 101 Mbits/sec 0 sender [ 5] 0.00-10.01 sec 118 MBytes 99.0 Mbits/sec receiver Signed-off-by: Horatiu Vultur Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/microchip/lan966x/Makefile | 2 +- .../net/ethernet/microchip/lan966x/lan966x_fdma.c | 708 +++++++++++++++++++++ .../net/ethernet/microchip/lan966x/lan966x_main.c | 34 +- .../net/ethernet/microchip/lan966x/lan966x_main.h | 108 ++++ .../net/ethernet/microchip/lan966x/lan966x_port.c | 3 + 5 files changed, 851 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile index a9ffc719aa0e..fd2e0ebb2427 100644 --- a/drivers/net/ethernet/microchip/lan966x/Makefile +++ b/drivers/net/ethernet/microchip/lan966x/Makefile @@ -8,4 +8,4 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \ lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \ - lan966x_ptp.o + lan966x_ptp.o lan966x_fdma.o diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c new file mode 100644 index 000000000000..f5fdb7455a41 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -0,0 +1,708 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include "lan966x_main.h" + +static int lan966x_fdma_channel_active(struct lan966x *lan966x) +{ + return lan_rd(lan966x, FDMA_CH_ACTIVE); +} + +static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, + struct lan966x_db *db) +{ + struct lan966x *lan966x = rx->lan966x; + dma_addr_t dma_addr; + struct page *page; + + page = dev_alloc_pages(rx->page_order); + if (unlikely(!page)) + return NULL; + + dma_addr = dma_map_page(lan966x->dev, page, 0, + PAGE_SIZE << rx->page_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(lan966x->dev, dma_addr))) + goto free_page; + + db->dataptr = dma_addr; + + return page; + +free_page: + __free_pages(page, rx->page_order); + return NULL; +} + +static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_rx_dcb *dcb; + struct lan966x_db *db; + int i, j; + + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &rx->dcbs[i]; + + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + dma_unmap_single(lan966x->dev, + (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, + DMA_FROM_DEVICE); + __free_pages(rx->page[i][j], rx->page_order); + } + } +} + +static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, + struct lan966x_rx_dcb *dcb, + u64 nextptr) +{ + struct lan966x_db *db; + int i; + + for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) { + db = &dcb->db[i]; + db->status = FDMA_DCB_STATUS_INTR; + } + + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order); + + rx->last_entry->nextptr = nextptr; + rx->last_entry = dcb; +} + +static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_rx_dcb *dcb; + struct lan966x_db *db; + struct page *page; + int i, j; + int size; + + /* calculate how many pages are needed to allocate the dcbs */ + size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + + rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL); + if (!rx->dcbs) + return -ENOMEM; + + rx->last_entry = rx->dcbs; + rx->db_index = 0; + rx->dcb_index = 0; + + /* Now for each dcb allocate the dbs */ + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &rx->dcbs[i]; + dcb->info = 0; + + /* For each db allocate a page and map it to the DB dataptr. */ + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + page = lan966x_fdma_rx_alloc_page(rx, db); + if (!page) + return -ENOMEM; + + db->status = 0; + rx->page[i][j] = page; + } + + lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i); + } + + return 0; +} + +static void lan966x_fdma_rx_free(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 size; + + /* Now it is possible to do the cleanup of dcb */ + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma); +} + +static void lan966x_fdma_rx_start(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 mask; + + /* When activating a channel, first is required to write the first DCB + * address and then to activate it + */ + lan_wr(lower_32_bits((u64)rx->dma), lan966x, + FDMA_DCB_LLP(rx->channel_id)); + lan_wr(upper_32_bits((u64)rx->dma), lan966x, + FDMA_DCB_LLP1(rx->channel_id)); + + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(0) | + FDMA_CH_CFG_CH_MEM_SET(1), + lan966x, FDMA_CH_CFG(rx->channel_id)); + + /* Start fdma */ + lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), + FDMA_PORT_CTRL_XTR_STOP, + lan966x, FDMA_PORT_CTRL(0)); + + /* Enable interrupts */ + mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); + mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); + mask |= BIT(rx->channel_id); + lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), + FDMA_INTR_DB_ENA_INTR_DB_ENA, + lan966x, FDMA_INTR_DB_ENA); + + /* Activate the channel */ + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)), + FDMA_CH_ACTIVATE_CH_ACTIVATE, + lan966x, FDMA_CH_ACTIVATE); +} + +static void lan966x_fdma_rx_disable(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u32 val; + + /* Disable the channel */ + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)), + FDMA_CH_DISABLE_CH_DISABLE, + lan966x, FDMA_CH_DISABLE); + + readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, + val, !(val & BIT(rx->channel_id)), + READL_SLEEP_US, READL_TIMEOUT_US); + + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)), + FDMA_CH_DB_DISCARD_DB_DISCARD, + lan966x, FDMA_CH_DB_DISCARD); +} + +static void lan966x_fdma_rx_reload(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)), + FDMA_CH_RELOAD_CH_RELOAD, + lan966x, FDMA_CH_RELOAD); +} + +static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx, + struct lan966x_tx_dcb *dcb) +{ + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = 0; +} + +static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + struct lan966x_tx_dcb *dcb; + struct lan966x_db *db; + int size; + int i, j; + + tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf), + GFP_KERNEL); + if (!tx->dcbs_buf) + return -ENOMEM; + + /* calculate how many pages are needed to allocate the dcbs */ + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL); + if (!tx->dcbs) + goto out; + + /* Now for each dcb allocate the db */ + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb = &tx->dcbs[i]; + + for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) { + db = &dcb->db[j]; + db->dataptr = 0; + db->status = 0; + } + + lan966x_fdma_tx_add_dcb(tx, dcb); + } + + return 0; + +out: + kfree(tx->dcbs_buf); + return -ENOMEM; +} + +static void lan966x_fdma_tx_free(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + int size; + + kfree(tx->dcbs_buf); + + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma); +} + +static void lan966x_fdma_tx_activate(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + u32 mask; + + /* When activating a channel, first is required to write the first DCB + * address and then to activate it + */ + lan_wr(lower_32_bits((u64)tx->dma), lan966x, + FDMA_DCB_LLP(tx->channel_id)); + lan_wr(upper_32_bits((u64)tx->dma), lan966x, + FDMA_DCB_LLP1(tx->channel_id)); + + lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(0) | + FDMA_CH_CFG_CH_MEM_SET(1), + lan966x, FDMA_CH_CFG(tx->channel_id)); + + /* Start fdma */ + lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), + FDMA_PORT_CTRL_INJ_STOP, + lan966x, FDMA_PORT_CTRL(0)); + + /* Enable interrupts */ + mask = lan_rd(lan966x, FDMA_INTR_DB_ENA); + mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask); + mask |= BIT(tx->channel_id); + lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask), + FDMA_INTR_DB_ENA_INTR_DB_ENA, + lan966x, FDMA_INTR_DB_ENA); + + /* Activate the channel */ + lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)), + FDMA_CH_ACTIVATE_CH_ACTIVATE, + lan966x, FDMA_CH_ACTIVATE); +} + +static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + u32 val; + + /* Disable the channel */ + lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)), + FDMA_CH_DISABLE_CH_DISABLE, + lan966x, FDMA_CH_DISABLE); + + readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x, + val, !(val & BIT(tx->channel_id)), + READL_SLEEP_US, READL_TIMEOUT_US); + + lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)), + FDMA_CH_DB_DISCARD_DB_DISCARD, + lan966x, FDMA_CH_DB_DISCARD); + + tx->activated = false; +} + +static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) +{ + struct lan966x *lan966x = tx->lan966x; + + /* Write the registers to reload the channel */ + lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)), + FDMA_CH_RELOAD_CH_RELOAD, + lan966x, FDMA_CH_RELOAD); +} + +static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + port = lan966x->ports[i]; + if (!port) + continue; + + if (netif_queue_stopped(port->dev)) + netif_wake_queue(port->dev); + } +} + +static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) +{ + struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_tx_dcb_buf *dcb_buf; + struct lan966x_db *db; + unsigned long flags; + bool clear = false; + int i; + + spin_lock_irqsave(&lan966x->tx_lock, flags); + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb_buf = &tx->dcbs_buf[i]; + + if (!dcb_buf->used) + continue; + + db = &tx->dcbs[i].db[0]; + if (!(db->status & FDMA_DCB_STATUS_DONE)) + continue; + + dcb_buf->dev->stats.tx_packets++; + dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len; + + dcb_buf->used = false; + dma_unmap_single(lan966x->dev, + dcb_buf->dma_addr, + dcb_buf->skb->len, + DMA_TO_DEVICE); + if (!dcb_buf->ptp) + dev_kfree_skb_any(dcb_buf->skb); + + clear = true; + } + + if (clear) + lan966x_fdma_wakeup_netdev(lan966x); + + spin_unlock_irqrestore(&lan966x->tx_lock, flags); +} + +static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) +{ + struct lan966x_db *db; + + /* Check if there is any data */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE))) + return false; + + return true; +} + +static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + u64 src_port, timestamp; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + + /* Get the received frame and unmap it */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + page = rx->page[rx->dcb_index][rx->db_index]; + skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); + if (unlikely(!skb)) + goto unmap_page; + + dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr, + FDMA_DCB_STATUS_BLOCKL(db->status), + DMA_FROM_DEVICE); + skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); + + lan966x_ifh_get_src_port(skb->data, &src_port); + lan966x_ifh_get_timestamp(skb->data, ×tamp); + + WARN_ON(src_port >= lan966x->num_phys_ports); + + skb->dev = lan966x->ports[src_port]->dev; + skb_pull(skb, IFH_LEN * sizeof(u32)); + + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + + lan966x_ptp_rxtstamp(lan966x, skb, timestamp); + skb->protocol = eth_type_trans(skb, skb->dev); + + if (lan966x->bridge_mask & BIT(src_port)) { + skb->offload_fwd_mark = 1; + + skb_reset_network_header(skb); + if (!lan966x_hw_offload(lan966x, src_port, skb)) + skb->offload_fwd_mark = 0; + } + + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + + return skb; + +unmap_page: + dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr, + FDMA_DCB_STATUS_BLOCKL(db->status), + DMA_FROM_DEVICE); + __free_pages(page, rx->page_order); + + return NULL; +} + +static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan966x *lan966x = container_of(napi, struct lan966x, napi); + struct lan966x_rx *rx = &lan966x->rx; + int dcb_reload = rx->dcb_index; + struct lan966x_rx_dcb *old_dcb; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + int counter = 0; + u64 nextptr; + + lan966x_fdma_tx_clear_buf(lan966x, weight); + + /* Get all received skb */ + while (counter < weight) { + if (!lan966x_fdma_rx_more_frames(rx)) + break; + + skb = lan966x_fdma_rx_get_frame(rx); + + rx->page[rx->dcb_index][rx->db_index] = NULL; + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; + + if (!skb) + break; + + napi_gro_receive(&lan966x->napi, skb); + counter++; + } + + /* Allocate new pages and map them */ + while (dcb_reload != rx->dcb_index) { + db = &rx->dcbs[dcb_reload].db[rx->db_index]; + page = lan966x_fdma_rx_alloc_page(rx, db); + if (unlikely(!page)) + break; + rx->page[dcb_reload][rx->db_index] = page; + + old_dcb = &rx->dcbs[dcb_reload]; + dcb_reload++; + dcb_reload &= FDMA_DCB_MAX - 1; + + nextptr = rx->dma + ((unsigned long)old_dcb - + (unsigned long)rx->dcbs); + lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr); + lan966x_fdma_rx_reload(rx); + } + + if (counter < weight && napi_complete_done(napi, counter)) + lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA); + + return counter; +} + +irqreturn_t lan966x_fdma_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + u32 db, err, err_type; + + db = lan_rd(lan966x, FDMA_INTR_DB); + err = lan_rd(lan966x, FDMA_INTR_ERR); + + if (db) { + lan_wr(0, lan966x, FDMA_INTR_DB_ENA); + lan_wr(db, lan966x, FDMA_INTR_DB); + + napi_schedule(&lan966x->napi); + } + + if (err) { + err_type = lan_rd(lan966x, FDMA_ERRORS); + + WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type); + + lan_wr(err, lan966x, FDMA_INTR_ERR); + lan_wr(err_type, lan966x, FDMA_ERRORS); + } + + return IRQ_HANDLED; +} + +static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) +{ + struct lan966x_tx_dcb_buf *dcb_buf; + int i; + + for (i = 0; i < FDMA_DCB_MAX; ++i) { + dcb_buf = &tx->dcbs_buf[i]; + if (!dcb_buf->used && i != tx->last_in_use) + return i; + } + + return -1; +} + +int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) +{ + struct lan966x_port *port = netdev_priv(dev); + struct lan966x *lan966x = port->lan966x; + struct lan966x_tx_dcb_buf *next_dcb_buf; + struct lan966x_tx_dcb *next_dcb, *dcb; + struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_db *next_db; + int needed_headroom; + int needed_tailroom; + dma_addr_t dma_addr; + int next_to_use; + int err; + + /* Get next index */ + next_to_use = lan966x_fdma_get_next_dcb(tx); + if (next_to_use < 0) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + if (skb_put_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /* skb processing */ + needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0); + needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); + if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) { + err = pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); + if (unlikely(err)) { + dev->stats.tx_dropped++; + err = NETDEV_TX_OK; + goto release; + } + } + + skb_tx_timestamp(skb); + skb_push(skb, IFH_LEN * sizeof(u32)); + memcpy(skb->data, ifh, IFH_LEN * sizeof(u32)); + skb_put(skb, 4); + + dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(lan966x->dev, dma_addr)) { + dev->stats.tx_dropped++; + err = NETDEV_TX_OK; + goto release; + } + + /* Setup next dcb */ + next_dcb = &tx->dcbs[next_to_use]; + next_dcb->nextptr = FDMA_DCB_INVALID_DATA; + + next_db = &next_dcb->db[0]; + next_db->dataptr = dma_addr; + next_db->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len); + + /* Fill up the buffer */ + next_dcb_buf = &tx->dcbs_buf[next_to_use]; + next_dcb_buf->skb = skb; + next_dcb_buf->dma_addr = dma_addr; + next_dcb_buf->used = true; + next_dcb_buf->ptp = false; + next_dcb_buf->dev = dev; + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + next_dcb_buf->ptp = true; + + if (likely(lan966x->tx.activated)) { + /* Connect current dcb to the next db */ + dcb = &tx->dcbs[tx->last_in_use]; + dcb->nextptr = tx->dma + (next_to_use * + sizeof(struct lan966x_tx_dcb)); + + lan966x_fdma_tx_reload(tx); + } else { + /* Because it is first time, then just activate */ + lan966x->tx.activated = true; + lan966x_fdma_tx_activate(tx); + } + + /* Move to next dcb because this last in use */ + tx->last_in_use = next_to_use; + + return NETDEV_TX_OK; + +release: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + lan966x_ptp_txtstamp_release(port, skb); + + dev_kfree_skb_any(skb); + return err; +} + +void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) +{ + if (lan966x->fdma_ndev) + return; + + lan966x->fdma_ndev = dev; + netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll, + NAPI_POLL_WEIGHT); + napi_enable(&lan966x->napi); +} + +void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev) +{ + if (lan966x->fdma_ndev == dev) { + netif_napi_del(&lan966x->napi); + lan966x->fdma_ndev = NULL; + } +} + +int lan966x_fdma_init(struct lan966x *lan966x) +{ + int err; + + if (!lan966x->fdma) + return 0; + + lan966x->rx.lan966x = lan966x; + lan966x->rx.channel_id = FDMA_XTR_CHANNEL; + lan966x->tx.lan966x = lan966x; + lan966x->tx.channel_id = FDMA_INJ_CHANNEL; + lan966x->tx.last_in_use = -1; + + err = lan966x_fdma_rx_alloc(&lan966x->rx); + if (err) + return err; + + err = lan966x_fdma_tx_alloc(&lan966x->tx); + if (err) { + lan966x_fdma_rx_free(&lan966x->rx); + return err; + } + + lan966x_fdma_rx_start(&lan966x->rx); + + return 0; +} + +void lan966x_fdma_deinit(struct lan966x *lan966x) +{ + if (!lan966x->fdma) + return; + + lan966x_fdma_rx_disable(&lan966x->rx); + lan966x_fdma_tx_disable(&lan966x->tx); + + napi_synchronize(&lan966x->napi); + napi_disable(&lan966x->napi); + + lan966x_fdma_rx_free_pages(&lan966x->rx); + lan966x_fdma_rx_free(&lan966x->rx); + lan966x_fdma_tx_free(&lan966x->tx); +} diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 4ebc31c1d423..d7134a427640 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -341,7 +341,10 @@ static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev) } spin_lock(&lan966x->tx_lock); - err = lan966x_port_ifh_xmit(skb, ifh, dev); + if (port->lan966x->fdma) + err = lan966x_fdma_xmit(skb, ifh, dev); + else + err = lan966x_port_ifh_xmit(skb, ifh, dev); spin_unlock(&lan966x->tx_lock); return err; @@ -643,6 +646,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x) if (port->dev) unregister_netdev(port->dev); + if (lan966x->fdma && lan966x->fdma_ndev == port->dev) + lan966x_fdma_netdev_deinit(lan966x, port->dev); + if (port->phylink) { rtnl_lock(); lan966x_port_stop(port->dev); @@ -662,6 +668,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x) disable_irq(lan966x->ana_irq); lan966x->ana_irq = -ENXIO; } + + if (lan966x->fdma) + devm_free_irq(lan966x->dev, lan966x->fdma_irq, lan966x); } static int lan966x_probe_port(struct lan966x *lan966x, u32 p, @@ -790,12 +799,12 @@ static void lan966x_init(struct lan966x *lan966x) /* Do byte-swap and expect status after last data word * Extraction: Mode: manual extraction) | Byte_swap */ - lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) | + lan_wr(QS_XTR_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) | QS_XTR_GRP_CFG_BYTE_SWAP_SET(1), lan966x, QS_XTR_GRP_CFG(0)); /* Injection: Mode: manual injection | Byte_swap */ - lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) | + lan_wr(QS_INJ_GRP_CFG_MODE_SET(lan966x->fdma ? 2 : 1) | QS_INJ_GRP_CFG_BYTE_SWAP_SET(1), lan966x, QS_INJ_GRP_CFG(0)); @@ -1017,6 +1026,17 @@ static int lan966x_probe(struct platform_device *pdev) lan966x->ptp = 1; } + lan966x->fdma_irq = platform_get_irq_byname(pdev, "fdma"); + if (lan966x->fdma_irq > 0) { + err = devm_request_irq(&pdev->dev, lan966x->fdma_irq, + lan966x_fdma_irq_handler, 0, + "fdma irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, "Unable to use fdma irq"); + + lan966x->fdma = true; + } + /* init switch */ lan966x_init(lan966x); lan966x_stats_init(lan966x); @@ -1055,8 +1075,15 @@ static int lan966x_probe(struct platform_device *pdev) if (err) goto cleanup_fdb; + err = lan966x_fdma_init(lan966x); + if (err) + goto cleanup_ptp; + return 0; +cleanup_ptp: + lan966x_ptp_deinit(lan966x); + cleanup_fdb: lan966x_fdb_deinit(lan966x); @@ -1076,6 +1103,7 @@ static int lan966x_remove(struct platform_device *pdev) { struct lan966x *lan966x = platform_get_drvdata(pdev); + lan966x_fdma_deinit(lan966x); lan966x_cleanup_ports(lan966x); cancel_delayed_work_sync(&lan966x->stats_work); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index b692c612f235..1ab20b6ec278 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -61,6 +61,22 @@ #define IFH_REW_OP_ONE_STEP_PTP 0x3 #define IFH_REW_OP_TWO_STEP_PTP 0x4 +#define FDMA_RX_DCB_MAX_DBS 1 +#define FDMA_TX_DCB_MAX_DBS 1 +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 +#define FDMA_DCB_MAX 512 + /* MAC table entry types. * ENTRYTYPE_NORMAL is subject to aging. * ENTRYTYPE_LOCKED is not subject to aging. @@ -76,6 +92,83 @@ enum macaccess_entry_type { struct lan966x_port; +struct lan966x_db { + u64 dataptr; + u64 status; +}; + +struct lan966x_rx_dcb { + u64 nextptr; + u64 info; + struct lan966x_db db[FDMA_RX_DCB_MAX_DBS]; +}; + +struct lan966x_tx_dcb { + u64 nextptr; + u64 info; + struct lan966x_db db[FDMA_TX_DCB_MAX_DBS]; +}; + +struct lan966x_rx { + struct lan966x *lan966x; + + /* Pointer to the array of hardware dcbs. */ + struct lan966x_rx_dcb *dcbs; + + /* Pointer to the last address in the dcbs. */ + struct lan966x_rx_dcb *last_entry; + + /* For each DB, there is a page */ + struct page *page[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + + /* Represents the db_index, it can have a value between 0 and + * FDMA_RX_DCB_MAX_DBS, once it reaches the value of FDMA_RX_DCB_MAX_DBS + * it means that the DCB can be reused. + */ + int db_index; + + /* Represents the index in the dcbs. It has a value between 0 and + * FDMA_DCB_MAX + */ + int dcb_index; + + /* Represents the dma address to the dcbs array */ + dma_addr_t dma; + + /* Represents the page order that is used to allocate the pages for the + * RX buffers. This value is calculated based on max MTU of the devices. + */ + u8 page_order; + + u8 channel_id; +}; + +struct lan966x_tx_dcb_buf { + struct net_device *dev; + struct sk_buff *skb; + dma_addr_t dma_addr; + bool used; + bool ptp; +}; + +struct lan966x_tx { + struct lan966x *lan966x; + + /* Pointer to the dcb list */ + struct lan966x_tx_dcb *dcbs; + u16 last_in_use; + + /* Represents the DMA address to the first entry of the dcb entries. */ + dma_addr_t dma; + + /* Array of dcbs that are given to the HW */ + struct lan966x_tx_dcb_buf *dcbs_buf; + + u8 channel_id; + + bool activated; +}; + struct lan966x_stat_layout { u32 offset; char name[ETH_GSTRING_LEN]; @@ -137,6 +230,7 @@ struct lan966x { int xtr_irq; int ana_irq; int ptp_irq; + int fdma_irq; /* worqueue for fdb */ struct workqueue_struct *fdb_work; @@ -153,6 +247,13 @@ struct lan966x { spinlock_t ptp_ts_id_lock; /* lock for ts_id */ struct mutex ptp_lock; /* lock for ptp interface state */ u16 ptp_skbs; + + /* fdma */ + bool fdma; + struct net_device *fdma_ndev; + struct lan966x_rx rx; + struct lan966x_tx tx; + struct napi_struct napi; }; struct lan966x_port_config { @@ -292,6 +393,13 @@ void lan966x_ptp_txtstamp_release(struct lan966x_port *port, struct sk_buff *skb); irqreturn_t lan966x_ptp_irq_handler(int irq, void *args); +int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev); +void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev); +void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev); +int lan966x_fdma_init(struct lan966x *lan966x); +void lan966x_fdma_deinit(struct lan966x *lan966x); +irqreturn_t lan966x_fdma_irq_handler(int irq, void *args); + static inline void __iomem *lan_addr(void __iomem *base[], int id, int tinst, int tcnt, int gbase, int ginst, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 237555845a52..f141644e4372 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -393,6 +393,9 @@ void lan966x_port_init(struct lan966x_port *port) lan966x_port_config_down(port); + if (lan966x->fdma) + lan966x_fdma_netdev_init(lan966x, port->dev); + if (config->portmode != PHY_INTERFACE_MODE_QSGMII) return; -- cgit v1.2.3 From 2ea1cbac267e2a39546982612fc00084b6d93e40 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Fri, 8 Apr 2022 09:03:57 +0200 Subject: net: lan966x: Update FDMA to change MTU. When changing the MTU, it is required to change also the size of the DBs. In case those frames will arrive to CPU. Signed-off-by: Horatiu Vultur Signed-off-by: Jakub Kicinski --- .../net/ethernet/microchip/lan966x/lan966x_fdma.c | 134 +++++++++++++++++++++ .../net/ethernet/microchip/lan966x/lan966x_main.c | 14 ++- .../net/ethernet/microchip/lan966x/lan966x_main.h | 1 + 3 files changed, 148 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index f5fdb7455a41..9e2a7323eaf0 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -336,6 +336,20 @@ static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x) } } +static void lan966x_fdma_stop_netdev(struct lan966x *lan966x) +{ + struct lan966x_port *port; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + port = lan966x->ports[i]; + if (!port) + continue; + + netif_stop_queue(port->dev); + } +} + static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) { struct lan966x_tx *tx = &lan966x->tx; @@ -644,6 +658,126 @@ release: return err; } +static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x) +{ + int max_mtu = 0; + int i; + + for (i = 0; i < lan966x->num_phys_ports; ++i) { + int mtu; + + if (!lan966x->ports[i]) + continue; + + mtu = lan966x->ports[i]->dev->mtu; + if (mtu > max_mtu) + max_mtu = mtu; + } + + return max_mtu; +} + +static int lan966x_qsys_sw_status(struct lan966x *lan966x) +{ + return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT)); +} + +static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) +{ + void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf; + dma_addr_t rx_dma, tx_dma; + u32 size; + int err; + + /* Store these for later to free them */ + rx_dma = lan966x->rx.dma; + tx_dma = lan966x->tx.dma; + rx_dcbs = lan966x->rx.dcbs; + tx_dcbs = lan966x->tx.dcbs; + tx_dcbs_buf = lan966x->tx.dcbs_buf; + + napi_synchronize(&lan966x->napi); + napi_disable(&lan966x->napi); + lan966x_fdma_stop_netdev(lan966x); + + lan966x_fdma_rx_disable(&lan966x->rx); + lan966x_fdma_rx_free_pages(&lan966x->rx); + lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; + err = lan966x_fdma_rx_alloc(&lan966x->rx); + if (err) + goto restore; + lan966x_fdma_rx_start(&lan966x->rx); + + size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); + + lan966x_fdma_tx_disable(&lan966x->tx); + err = lan966x_fdma_tx_alloc(&lan966x->tx); + if (err) + goto restore_tx; + + size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma); + + kfree(tx_dcbs_buf); + + lan966x_fdma_wakeup_netdev(lan966x); + napi_enable(&lan966x->napi); + + return err; +restore: + lan966x->rx.dma = rx_dma; + lan966x->tx.dma = tx_dma; + lan966x_fdma_rx_start(&lan966x->rx); + +restore_tx: + lan966x->rx.dcbs = rx_dcbs; + lan966x->tx.dcbs = tx_dcbs; + lan966x->tx.dcbs_buf = tx_dcbs_buf; + + return err; +} + +int lan966x_fdma_change_mtu(struct lan966x *lan966x) +{ + int max_mtu; + int err; + u32 val; + + max_mtu = lan966x_fdma_get_max_mtu(lan966x); + max_mtu += IFH_LEN * sizeof(u32); + + if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 == + lan966x->rx.page_order) + return 0; + + /* Disable the CPU port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + /* Flush the CPU queues */ + readx_poll_timeout(lan966x_qsys_sw_status, lan966x, + val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)), + READL_SLEEP_US, READL_TIMEOUT_US); + + /* Add a sleep in case there are frames between the queues and the CPU + * port + */ + usleep_range(1000, 2000); + + err = lan966x_fdma_reload(lan966x, max_mtu); + + /* Enable back the CPU port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1), + QSYS_SW_PORT_MODE_PORT_ENA, + lan966x, QSYS_SW_PORT_MODE(CPU_PORT)); + + return err; +} + void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) { if (lan966x->fdma_ndev) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index d7134a427640..e8751970f2b1 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -354,12 +354,24 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) { struct lan966x_port *port = netdev_priv(dev); struct lan966x *lan966x = port->lan966x; + int old_mtu = dev->mtu; + int err; lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu), lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); dev->mtu = new_mtu; - return 0; + if (!lan966x->fdma) + return 0; + + err = lan966x_fdma_change_mtu(lan966x); + if (err) { + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu), + lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); + dev->mtu = old_mtu; + } + + return err; } static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 1ab20b6ec278..5213263c4e87 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -394,6 +394,7 @@ void lan966x_ptp_txtstamp_release(struct lan966x_port *port, irqreturn_t lan966x_ptp_irq_handler(int irq, void *args); int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev); +int lan966x_fdma_change_mtu(struct lan966x *lan966x); void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev); void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev); int lan966x_fdma_init(struct lan966x *lan966x); -- cgit v1.2.3 From b8ff3395fbdf3b79a99d0ef410fc34c51044121e Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Mon, 11 Apr 2022 09:32:37 +0800 Subject: sfc: ef10: Fix assigning negative value to unsigned variable fix warning reported by smatch: 251 drivers/net/ethernet/sfc/ef10.c:2259 efx_ef10_tx_tso_desc() warn: assigning (-208) to unsigned variable 'ip_tot_len' Signed-off-by: Haowen Bai Acked-by: Edward Cree Link: https://lore.kernel.org/r/1649640757-30041-1-git-send-email-baihaowen@meizu.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/ef10.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 50d535981a35..c9ee5011803f 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2256,7 +2256,7 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, * guaranteed to satisfy the second as we only attempt TSO if * inner_network_header <= 208. */ - ip_tot_len = -EFX_TSO2_MAX_HDRLEN; + ip_tot_len = 0x10000 - EFX_TSO2_MAX_HDRLEN; EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN + (tcp->doff << 2u) > ip_tot_len); -- cgit v1.2.3 From b66bfc131c69bd9a5ed3ae90be4cf47ec46c1526 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Mon, 11 Apr 2022 01:38:12 +0000 Subject: net/cadence: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Reviewed-by: Claudiu Beznea Link: https://lore.kernel.org/r/20220411013812.2517212-1-chi.minghao@zte.com.cn Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/cadence/macb_main.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 800d5ced5800..5555daee6f13 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -337,11 +337,9 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) struct macb *bp = bus->priv; int status; - status = pm_runtime_get_sync(&bp->pdev->dev); - if (status < 0) { - pm_runtime_put_noidle(&bp->pdev->dev); + status = pm_runtime_resume_and_get(&bp->pdev->dev); + if (status < 0) goto mdio_pm_exit; - } status = macb_mdio_wait_for_idle(bp); if (status < 0) @@ -391,11 +389,9 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, struct macb *bp = bus->priv; int status; - status = pm_runtime_get_sync(&bp->pdev->dev); - if (status < 0) { - pm_runtime_put_noidle(&bp->pdev->dev); + status = pm_runtime_resume_and_get(&bp->pdev->dev); + if (status < 0) goto mdio_pm_exit; - } status = macb_mdio_wait_for_idle(bp); if (status < 0) @@ -2745,9 +2741,9 @@ static int macb_open(struct net_device *dev) netdev_dbg(bp->dev, "open\n"); - err = pm_runtime_get_sync(&bp->pdev->dev); + err = pm_runtime_resume_and_get(&bp->pdev->dev); if (err < 0) - goto pm_exit; + return err; /* RX buffers initialization */ macb_init_rx_buffer_size(bp, bufsz); @@ -4134,11 +4130,9 @@ static int at91ether_open(struct net_device *dev) u32 ctl; int ret; - ret = pm_runtime_get_sync(&lp->pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&lp->pdev->dev); + ret = pm_runtime_resume_and_get(&lp->pdev->dev); + if (ret < 0) return ret; - } /* Clear internal statistics */ ctl = macb_readl(lp, NCR); -- cgit v1.2.3 From ac6bef064f716ba67ab81054d53856f285bec307 Mon Sep 17 00:00:00 2001 From: Lv Ruyi Date: Mon, 11 Apr 2022 03:25:46 +0000 Subject: sfc: Fix spelling mistake "writting" -> "writing" There are some spelling mistakes in the comment. Fix it. Reported-by: Zeal Robot Signed-off-by: Lv Ruyi Link: https://lore.kernel.org/r/20220411032546.2517628-1-lv.ruyi@zte.com.cn Signed-off-by: Paolo Abeni --- drivers/net/ethernet/sfc/mcdi_pcol.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index d3fcbf930dba..ff617b1b38d3 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -73,8 +73,8 @@ * \------------------------------ Resync (always set) * * The client writes it's request into MC shared memory, and rings the - * doorbell. Each request is completed by either by the MC writting - * back into shared memory, or by writting out an event. + * doorbell. Each request is completed by either by the MC writing + * back into shared memory, or by writing out an event. * * All MCDI commands support completion by shared memory response. Each * request may also contain additional data (accounted for by HEADER.LEN), -- cgit v1.2.3 From 66f862563ed68717dfd84e808ca12705ed275ced Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:45:56 +0100 Subject: net: dsa: mt7530: 1G can also support 1000BASE-X link mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using an external PHY connected using RGMII to mt7531 port 5, the PHY can be used to used support 1000BASE-X connections. Moreover, if 1000BASE-T is supported, then we should allow 1000BASE-X as well, since which are supported is a property of the PHY. Therefore, it makes no sense to exclude this from the linkmodes when 1000BASE-T is supported. Fixes: c288575f7810 ("net: dsa: mt7530: Add the support of MT7531 switch") Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 19f0035d4410..854859e8cb75 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2539,13 +2539,7 @@ static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port, /* Port5 supports ethier RGMII or SGMII. * Port6 supports SGMII only. */ - switch (port) { - case 5: - if (mt7531_is_rgmii_port(priv, port)) - break; - fallthrough; - case 6: - phylink_set(supported, 1000baseX_Full); + if (port == 6) { phylink_set(supported, 2500baseX_Full); phylink_set(supported, 2500baseT_Full); } @@ -2913,8 +2907,6 @@ static void mt7530_mac_port_validate(struct dsa_switch *ds, int port, unsigned long *supported) { - if (port == 5) - phylink_set(supported, 1000baseX_Full); } static void mt7531_mac_port_validate(struct dsa_switch *ds, int port, @@ -2951,8 +2943,10 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, } /* This switch only supports 1G full-duplex. */ - if (state->interface != PHY_INTERFACE_MODE_MII) + if (state->interface != PHY_INTERFACE_MODE_MII) { phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + } priv->info->mac_port_validate(ds, port, mask); -- cgit v1.2.3 From 59c2215f36040ac0040f9cfb6fbcc4cb2a705f11 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:46:01 +0100 Subject: net: dsa: mt7530: populate supported_interfaces and mac_capabilities MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Populate the supported interfaces and MAC capabilities for mt7530, mt7531 and mt7621 DSA switches. Filling this in will enable phylink to pre-check the PHY interface mode against the the supported interfaces bitmap prior to calling the validate function, and will eventually allow us to convert to using the generic validation. Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++++ drivers/net/dsa/mt7530.h | 2 ++ 2 files changed, 76 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 854859e8cb75..659aa687fe8c 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2389,6 +2389,32 @@ mt7531_setup(struct dsa_switch *ds) return 0; } +static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + switch (port) { + case 0 ... 4: /* Internal phy */ + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + break; + + case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ + phy_interface_set_rgmii(config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_MII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + break; + + case 6: /* 1st cpu port */ + __set_bit(PHY_INTERFACE_MODE_RGMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_TRGMII, + config->supported_interfaces); + break; + } +} + static bool mt7530_phy_mode_supported(struct dsa_switch *ds, int port, const struct phylink_link_state *state) @@ -2425,6 +2451,37 @@ static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port) return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII); } +static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct mt7530_priv *priv = ds->priv; + + switch (port) { + case 0 ... 4: /* Internal phy */ + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + break; + + case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */ + if (mt7531_is_rgmii_port(priv, port)) { + phy_interface_set_rgmii(config->supported_interfaces); + break; + } + fallthrough; + + case 6: /* 1st cpu port supports sgmii/8023z only */ + __set_bit(PHY_INTERFACE_MODE_SGMII, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + config->supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + config->supported_interfaces); + + config->mac_capabilities |= MAC_2500FD; + break; + } +} + static bool mt7531_phy_mode_supported(struct dsa_switch *ds, int port, const struct phylink_link_state *state) @@ -2903,6 +2960,18 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port) return 0; } +static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct mt7530_priv *priv = ds->priv; + + /* This switch only supports full-duplex at 1Gbps */ + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000FD; + + priv->info->mac_port_get_caps(ds, port, config); +} + static void mt7530_mac_port_validate(struct dsa_switch *ds, int port, unsigned long *supported) @@ -3138,6 +3207,7 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_vlan_del = mt7530_port_vlan_del, .port_mirror_add = mt753x_port_mirror_add, .port_mirror_del = mt753x_port_mirror_del, + .phylink_get_caps = mt753x_phylink_get_caps, .phylink_validate = mt753x_phylink_validate, .phylink_mac_link_state = mt753x_phylink_mac_link_state, .phylink_mac_config = mt753x_phylink_mac_config, @@ -3155,6 +3225,7 @@ static const struct mt753x_info mt753x_table[] = { .phy_read = mt7530_phy_read, .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, + .mac_port_get_caps = mt7530_mac_port_get_caps, .phy_mode_supported = mt7530_phy_mode_supported, .mac_port_validate = mt7530_mac_port_validate, .mac_port_get_state = mt7530_phylink_mac_link_state, @@ -3166,6 +3237,7 @@ static const struct mt753x_info mt753x_table[] = { .phy_read = mt7530_phy_read, .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, + .mac_port_get_caps = mt7530_mac_port_get_caps, .phy_mode_supported = mt7530_phy_mode_supported, .mac_port_validate = mt7530_mac_port_validate, .mac_port_get_state = mt7530_phylink_mac_link_state, @@ -3178,6 +3250,7 @@ static const struct mt753x_info mt753x_table[] = { .phy_write = mt7531_ind_phy_write, .pad_setup = mt7531_pad_setup, .cpu_port_config = mt7531_cpu_port_config, + .mac_port_get_caps = mt7531_mac_port_get_caps, .phy_mode_supported = mt7531_phy_mode_supported, .mac_port_validate = mt7531_mac_port_validate, .mac_port_get_state = mt7531_phylink_mac_link_state, @@ -3240,6 +3313,7 @@ mt7530_probe(struct mdio_device *mdiodev) */ if (!priv->info->sw_setup || !priv->info->pad_setup || !priv->info->phy_read || !priv->info->phy_write || + !priv->info->mac_port_get_caps || !priv->info->phy_mode_supported || !priv->info->mac_port_validate || !priv->info->mac_port_get_state || !priv->info->mac_port_config) diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 91508e2feef9..e285b68ba354 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -769,6 +769,8 @@ struct mt753x_info { int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val); int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface); int (*cpu_port_config)(struct dsa_switch *ds, int port); + void (*mac_port_get_caps)(struct dsa_switch *ds, int port, + struct phylink_config *config); bool (*phy_mode_supported)(struct dsa_switch *ds, int port, const struct phylink_link_state *state); void (*mac_port_validate)(struct dsa_switch *ds, int port, -- cgit v1.2.3 From 26f6d881028281094203a49869121f727ec52bfa Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:46:06 +0100 Subject: net: dsa: mt7530: remove interface checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As phylink checks the interface mode against the supported_interfaces bitmap, we no longer need to validate the interface mode, nor handle PHY_INTERFACE_MODE_NA in the validation function. Remove these to simplify the implementation. Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 82 ------------------------------------------------ drivers/net/dsa/mt7530.h | 2 -- 2 files changed, 84 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 659aa687fe8c..4fdd75e88b1b 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2415,37 +2415,6 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, } } -static bool -mt7530_phy_mode_supported(struct dsa_switch *ds, int port, - const struct phylink_link_state *state) -{ - struct mt7530_priv *priv = ds->priv; - - switch (port) { - case 0 ... 4: /* Internal phy */ - if (state->interface != PHY_INTERFACE_MODE_GMII) - return false; - break; - case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ - if (!phy_interface_mode_is_rgmii(state->interface) && - state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_GMII) - return false; - break; - case 6: /* 1st cpu port */ - if (state->interface != PHY_INTERFACE_MODE_RGMII && - state->interface != PHY_INTERFACE_MODE_TRGMII) - return false; - break; - default: - dev_err(priv->dev, "%s: unsupported port: %i\n", __func__, - port); - return false; - } - - return true; -} - static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port) { return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII); @@ -2482,44 +2451,6 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, } } -static bool -mt7531_phy_mode_supported(struct dsa_switch *ds, int port, - const struct phylink_link_state *state) -{ - struct mt7530_priv *priv = ds->priv; - - switch (port) { - case 0 ... 4: /* Internal phy */ - if (state->interface != PHY_INTERFACE_MODE_GMII) - return false; - break; - case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */ - if (mt7531_is_rgmii_port(priv, port)) - return phy_interface_mode_is_rgmii(state->interface); - fallthrough; - case 6: /* 1st cpu port supports sgmii/8023z only */ - if (state->interface != PHY_INTERFACE_MODE_SGMII && - !phy_interface_mode_is_8023z(state->interface)) - return false; - break; - default: - dev_err(priv->dev, "%s: unsupported port: %i\n", __func__, - port); - return false; - } - - return true; -} - -static bool -mt753x_phy_mode_supported(struct dsa_switch *ds, int port, - const struct phylink_link_state *state) -{ - struct mt7530_priv *priv = ds->priv; - - return priv->info->phy_mode_supported(ds, port, state); -} - static int mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state) { @@ -2774,9 +2705,6 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, struct mt7530_priv *priv = ds->priv; u32 mcr_cur, mcr_new; - if (!mt753x_phy_mode_supported(ds, port, state)) - goto unsupported; - switch (port) { case 0 ... 4: /* Internal phy */ if (state->interface != PHY_INTERFACE_MODE_GMII) @@ -2994,12 +2922,6 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct mt7530_priv *priv = ds->priv; - if (state->interface != PHY_INTERFACE_MODE_NA && - !mt753x_phy_mode_supported(ds, port, state)) { - linkmode_zero(supported); - return; - } - phylink_set_port_modes(mask); if (state->interface != PHY_INTERFACE_MODE_TRGMII && @@ -3226,7 +3148,6 @@ static const struct mt753x_info mt753x_table[] = { .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, .mac_port_get_caps = mt7530_mac_port_get_caps, - .phy_mode_supported = mt7530_phy_mode_supported, .mac_port_validate = mt7530_mac_port_validate, .mac_port_get_state = mt7530_phylink_mac_link_state, .mac_port_config = mt7530_mac_config, @@ -3238,7 +3159,6 @@ static const struct mt753x_info mt753x_table[] = { .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, .mac_port_get_caps = mt7530_mac_port_get_caps, - .phy_mode_supported = mt7530_phy_mode_supported, .mac_port_validate = mt7530_mac_port_validate, .mac_port_get_state = mt7530_phylink_mac_link_state, .mac_port_config = mt7530_mac_config, @@ -3251,7 +3171,6 @@ static const struct mt753x_info mt753x_table[] = { .pad_setup = mt7531_pad_setup, .cpu_port_config = mt7531_cpu_port_config, .mac_port_get_caps = mt7531_mac_port_get_caps, - .phy_mode_supported = mt7531_phy_mode_supported, .mac_port_validate = mt7531_mac_port_validate, .mac_port_get_state = mt7531_phylink_mac_link_state, .mac_port_config = mt7531_mac_config, @@ -3314,7 +3233,6 @@ mt7530_probe(struct mdio_device *mdiodev) if (!priv->info->sw_setup || !priv->info->pad_setup || !priv->info->phy_read || !priv->info->phy_write || !priv->info->mac_port_get_caps || - !priv->info->phy_mode_supported || !priv->info->mac_port_validate || !priv->info->mac_port_get_state || !priv->info->mac_port_config) return -EINVAL; diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index e285b68ba354..cbebbcc76509 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -771,8 +771,6 @@ struct mt753x_info { int (*cpu_port_config)(struct dsa_switch *ds, int port); void (*mac_port_get_caps)(struct dsa_switch *ds, int port, struct phylink_config *config); - bool (*phy_mode_supported)(struct dsa_switch *ds, int port, - const struct phylink_link_state *state); void (*mac_port_validate)(struct dsa_switch *ds, int port, unsigned long *supported); int (*mac_port_get_state)(struct dsa_switch *ds, int port, -- cgit v1.2.3 From fd301137e6b30c836c710001f9915f8776d7955e Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:46:12 +0100 Subject: net: dsa: mt7530: drop use of phylink_helper_basex_speed() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we have a better method to select SFP interface modes, we no longer need to use phylink_helper_basex_speed() in a driver's validation function. Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 4fdd75e88b1b..de9cfd20ccbf 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2946,11 +2946,6 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, linkmode_and(supported, supported, mask); linkmode_and(state->advertising, state->advertising, mask); - - /* We can only operate at 2500BaseX or 1000BaseX. If requested - * to advertise both, only report advertising at 2500BaseX. - */ - phylink_helper_basex_speed(state); } static int -- cgit v1.2.3 From 7c04c8489115a46cc2ad6769f2e0c96237b6e75c Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:46:17 +0100 Subject: net: dsa: mt7530: only indicate linkmodes that can be supported MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that mt7530 is not using the basex helper, it becomes unnecessary to indicate support for both 1000baseX and 2500baseX when one of the 803.3z PHY interface modes is being selected. Ensure that the driver indicates only those linkmodes that can actually be supported by the PHY interface mode. Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 12 ++++++++---- drivers/net/dsa/mt7530.h | 1 + 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index de9cfd20ccbf..dfe3cd6d6b56 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2522,12 +2522,13 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, } static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port, + phy_interface_t interface, unsigned long *supported) { /* Port5 supports ethier RGMII or SGMII. * Port6 supports SGMII only. */ - if (port == 6) { + if (port == 6 && interface == PHY_INTERFACE_MODE_2500BASEX) { phylink_set(supported, 2500baseX_Full); phylink_set(supported, 2500baseT_Full); } @@ -2902,16 +2903,18 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, static void mt7530_mac_port_validate(struct dsa_switch *ds, int port, + phy_interface_t interface, unsigned long *supported) { } static void mt7531_mac_port_validate(struct dsa_switch *ds, int port, + phy_interface_t interface, unsigned long *supported) { struct mt7530_priv *priv = ds->priv; - mt7531_sgmii_validate(priv, port, supported); + mt7531_sgmii_validate(priv, port, interface, supported); } static void @@ -2934,12 +2937,13 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, } /* This switch only supports 1G full-duplex. */ - if (state->interface != PHY_INTERFACE_MODE_MII) { + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_2500BASEX) { phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); } - priv->info->mac_port_validate(ds, port, mask); + priv->info->mac_port_validate(ds, port, state->interface, mask); phylink_set(mask, Pause); phylink_set(mask, Asym_Pause); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index cbebbcc76509..73cfd29fbb17 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -772,6 +772,7 @@ struct mt753x_info { void (*mac_port_get_caps)(struct dsa_switch *ds, int port, struct phylink_config *config); void (*mac_port_validate)(struct dsa_switch *ds, int port, + phy_interface_t interface, unsigned long *supported); int (*mac_port_get_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); -- cgit v1.2.3 From 6789d6d76e8183a0cd19f4a45fc265de81b5c7e6 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:46:22 +0100 Subject: net: dsa: mt7530: switch to use phylink_get_linkmodes() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch mt7530 to use phylink_get_linkmodes() to generate the ethtool linkmodes that can be supported. We are unable to use the generic helper for this as pause modes are dependent on the interface as the Autoneg bit depends on the interface mode. Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 57 +++++------------------------------------------- 1 file changed, 5 insertions(+), 52 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index dfe3cd6d6b56..aa7f21683a07 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2521,19 +2521,6 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, return 0; } -static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port, - phy_interface_t interface, - unsigned long *supported) -{ - /* Port5 supports ethier RGMII or SGMII. - * Port6 supports SGMII only. - */ - if (port == 6 && interface == PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(supported, 2500baseX_Full); - phylink_set(supported, 2500baseT_Full); - } -} - static void mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, @@ -2901,52 +2888,22 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, priv->info->mac_port_get_caps(ds, port, config); } -static void -mt7530_mac_port_validate(struct dsa_switch *ds, int port, - phy_interface_t interface, - unsigned long *supported) -{ -} - -static void mt7531_mac_port_validate(struct dsa_switch *ds, int port, - phy_interface_t interface, - unsigned long *supported) -{ - struct mt7530_priv *priv = ds->priv; - - mt7531_sgmii_validate(priv, port, interface, supported); -} - static void mt753x_phylink_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - struct mt7530_priv *priv = ds->priv; + u32 caps; + + caps = dsa_to_port(ds, port)->pl_config.mac_capabilities; phylink_set_port_modes(mask); + phylink_get_linkmodes(mask, state->interface, caps); if (state->interface != PHY_INTERFACE_MODE_TRGMII && - !phy_interface_mode_is_8023z(state->interface)) { - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); + !phy_interface_mode_is_8023z(state->interface)) phylink_set(mask, Autoneg); - } - - /* This switch only supports 1G full-duplex. */ - if (state->interface != PHY_INTERFACE_MODE_MII && - state->interface != PHY_INTERFACE_MODE_2500BASEX) { - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); - } - - priv->info->mac_port_validate(ds, port, state->interface, mask); - - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); linkmode_and(supported, supported, mask); linkmode_and(state->advertising, state->advertising, mask); @@ -3147,7 +3104,6 @@ static const struct mt753x_info mt753x_table[] = { .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, .mac_port_get_caps = mt7530_mac_port_get_caps, - .mac_port_validate = mt7530_mac_port_validate, .mac_port_get_state = mt7530_phylink_mac_link_state, .mac_port_config = mt7530_mac_config, }, @@ -3158,7 +3114,6 @@ static const struct mt753x_info mt753x_table[] = { .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, .mac_port_get_caps = mt7530_mac_port_get_caps, - .mac_port_validate = mt7530_mac_port_validate, .mac_port_get_state = mt7530_phylink_mac_link_state, .mac_port_config = mt7530_mac_config, }, @@ -3170,7 +3125,6 @@ static const struct mt753x_info mt753x_table[] = { .pad_setup = mt7531_pad_setup, .cpu_port_config = mt7531_cpu_port_config, .mac_port_get_caps = mt7531_mac_port_get_caps, - .mac_port_validate = mt7531_mac_port_validate, .mac_port_get_state = mt7531_phylink_mac_link_state, .mac_port_config = mt7531_mac_config, .mac_pcs_an_restart = mt7531_sgmii_restart_an, @@ -3232,7 +3186,6 @@ mt7530_probe(struct mdio_device *mdiodev) if (!priv->info->sw_setup || !priv->info->pad_setup || !priv->info->phy_read || !priv->info->phy_write || !priv->info->mac_port_get_caps || - !priv->info->mac_port_validate || !priv->info->mac_port_get_state || !priv->info->mac_port_config) return -EINVAL; -- cgit v1.2.3 From cbd1f243bc41056c76fcfc5f3380cfac1f00d37b Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:46:27 +0100 Subject: net: dsa: mt7530: partially convert to phylink_pcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Partially convert the mt7530 driver to use phylink's PCS support. This is a partial implementation as we don't move anything into the pcs_config method yet - this driver supports SGMII or 1000BASE-X without in-band. Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 144 ++++++++++++++++++++++++++++------------------- drivers/net/dsa/mt7530.h | 21 +++---- 2 files changed, 95 insertions(+), 70 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index aa7f21683a07..1c0d931973e5 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -24,6 +24,11 @@ #include "mt7530.h" +static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mt753x_pcs, pcs); +} + /* String, offset, and register size in bytes if different from 4 bytes */ static const struct mt7530_mib_desc mt7530_mib[] = { MIB_DESC(1, 0x00, "TxDrop"), @@ -2521,12 +2526,11 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, return 0; } -static void -mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, - int speed, int duplex) +static void mt7531_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex) { - struct mt7530_priv *priv = ds->priv; + struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; + int port = pcs_to_mt753x_pcs(pcs)->port; unsigned int val; /* For adjusting speed and duplex of SGMII force mode. */ @@ -2552,6 +2556,9 @@ mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port, /* MT7531 SGMII 1G force mode can only work in full duplex mode, * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not. + * + * The speed check is unnecessary as the MAC capabilities apply + * this restriction. --rmk */ if ((speed == SPEED_10 || speed == SPEED_100) && duplex != DUPLEX_FULL) @@ -2627,9 +2634,10 @@ static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port, return 0; } -static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port) +static void mt7531_pcs_an_restart(struct phylink_pcs *pcs) { - struct mt7530_priv *priv = ds->priv; + struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; + int port = pcs_to_mt753x_pcs(pcs)->port; u32 val; /* Only restart AN when AN is enabled */ @@ -2686,6 +2694,24 @@ mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode, return priv->info->mac_port_config(ds, port, mode, state->interface); } +static struct phylink_pcs * +mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port, + phy_interface_t interface) +{ + struct mt7530_priv *priv = ds->priv; + + switch (interface) { + case PHY_INTERFACE_MODE_TRGMII: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + return &priv->pcs[port].pcs; + + default: + return NULL; + } +} + static void mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) @@ -2747,17 +2773,6 @@ unsupported: mt7530_write(priv, MT7530_PMCR_P(port), mcr_new); } -static void -mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port) -{ - struct mt7530_priv *priv = ds->priv; - - if (!priv->info->mac_pcs_an_restart) - return; - - priv->info->mac_pcs_an_restart(ds, port); -} - static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface) @@ -2767,16 +2782,13 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); } -static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, - int speed, int duplex) +static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex) { - struct mt7530_priv *priv = ds->priv; - - if (!priv->info->mac_pcs_link_up) - return; - - priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex); + if (pcs->ops->pcs_link_up) + pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex); } static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, @@ -2789,8 +2801,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, struct mt7530_priv *priv = ds->priv; u32 mcr; - mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex); - mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK; /* MT753x MAC works in 1G full duplex mode for all up-clocked @@ -2870,6 +2880,8 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port) return ret; mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPU_PORT_SETTING(priv->id)); + mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED, + interface, speed, DUPLEX_FULL); mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL, speed, DUPLEX_FULL, true, true); @@ -2909,16 +2921,13 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, linkmode_and(state->advertising, state->advertising, mask); } -static int -mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) +static void mt7530_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) { - struct mt7530_priv *priv = ds->priv; + struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; + int port = pcs_to_mt753x_pcs(pcs)->port; u32 pmsr; - if (port < 0 || port >= MT7530_NUM_PORTS) - return -EINVAL; - pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); state->link = (pmsr & PMSR_LINK); @@ -2945,8 +2954,6 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port, state->pause |= MLO_PAUSE_RX; if (pmsr & PMSR_TX_FC) state->pause |= MLO_PAUSE_TX; - - return 1; } static int @@ -2988,32 +2995,49 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port, return 0; } -static int -mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) +static void mt7531_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) { - struct mt7530_priv *priv = ds->priv; + struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv; + int port = pcs_to_mt753x_pcs(pcs)->port; if (state->interface == PHY_INTERFACE_MODE_SGMII) - return mt7531_sgmii_pcs_get_state_an(priv, port, state); - - return -EOPNOTSUPP; + mt7531_sgmii_pcs_get_state_an(priv, port, state); + else + state->link = false; } -static int -mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port, - struct phylink_link_state *state) +static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) { - struct mt7530_priv *priv = ds->priv; + return 0; +} - return priv->info->mac_port_get_state(ds, port, state); +static void mt7530_pcs_an_restart(struct phylink_pcs *pcs) +{ } +static const struct phylink_pcs_ops mt7530_pcs_ops = { + .pcs_get_state = mt7530_pcs_get_state, + .pcs_config = mt753x_pcs_config, + .pcs_an_restart = mt7530_pcs_an_restart, +}; + +static const struct phylink_pcs_ops mt7531_pcs_ops = { + .pcs_get_state = mt7531_pcs_get_state, + .pcs_config = mt753x_pcs_config, + .pcs_an_restart = mt7531_pcs_an_restart, + .pcs_link_up = mt7531_pcs_link_up, +}; + static int mt753x_setup(struct dsa_switch *ds) { struct mt7530_priv *priv = ds->priv; int ret = priv->info->sw_setup(ds); + int i; if (ret) return ret; @@ -3026,6 +3050,13 @@ mt753x_setup(struct dsa_switch *ds) if (ret && priv->irq) mt7530_free_irq_common(priv); + /* Initialise the PCS devices */ + for (i = 0; i < priv->ds->num_ports; i++) { + priv->pcs[i].pcs.ops = priv->info->pcs_ops; + priv->pcs[i].priv = priv; + priv->pcs[i].port = i; + } + return ret; } @@ -3087,9 +3118,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_mirror_del = mt753x_port_mirror_del, .phylink_get_caps = mt753x_phylink_get_caps, .phylink_validate = mt753x_phylink_validate, - .phylink_mac_link_state = mt753x_phylink_mac_link_state, + .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs, .phylink_mac_config = mt753x_phylink_mac_config, - .phylink_mac_an_restart = mt753x_phylink_mac_an_restart, .phylink_mac_link_down = mt753x_phylink_mac_link_down, .phylink_mac_link_up = mt753x_phylink_mac_link_up, .get_mac_eee = mt753x_get_mac_eee, @@ -3099,36 +3129,34 @@ static const struct dsa_switch_ops mt7530_switch_ops = { static const struct mt753x_info mt753x_table[] = { [ID_MT7621] = { .id = ID_MT7621, + .pcs_ops = &mt7530_pcs_ops, .sw_setup = mt7530_setup, .phy_read = mt7530_phy_read, .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, .mac_port_get_caps = mt7530_mac_port_get_caps, - .mac_port_get_state = mt7530_phylink_mac_link_state, .mac_port_config = mt7530_mac_config, }, [ID_MT7530] = { .id = ID_MT7530, + .pcs_ops = &mt7530_pcs_ops, .sw_setup = mt7530_setup, .phy_read = mt7530_phy_read, .phy_write = mt7530_phy_write, .pad_setup = mt7530_pad_clk_setup, .mac_port_get_caps = mt7530_mac_port_get_caps, - .mac_port_get_state = mt7530_phylink_mac_link_state, .mac_port_config = mt7530_mac_config, }, [ID_MT7531] = { .id = ID_MT7531, + .pcs_ops = &mt7531_pcs_ops, .sw_setup = mt7531_setup, .phy_read = mt7531_ind_phy_read, .phy_write = mt7531_ind_phy_write, .pad_setup = mt7531_pad_setup, .cpu_port_config = mt7531_cpu_port_config, .mac_port_get_caps = mt7531_mac_port_get_caps, - .mac_port_get_state = mt7531_phylink_mac_link_state, .mac_port_config = mt7531_mac_config, - .mac_pcs_an_restart = mt7531_sgmii_restart_an, - .mac_pcs_link_up = mt7531_sgmii_link_up_force, }, }; @@ -3186,7 +3214,7 @@ mt7530_probe(struct mdio_device *mdiodev) if (!priv->info->sw_setup || !priv->info->pad_setup || !priv->info->phy_read || !priv->info->phy_write || !priv->info->mac_port_get_caps || - !priv->info->mac_port_get_state || !priv->info->mac_port_config) + !priv->info->mac_port_config) return -EINVAL; priv->id = priv->info->id; diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 73cfd29fbb17..71e36b69b96d 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -741,6 +741,12 @@ static const char *p5_intf_modes(unsigned int p5_interface) struct mt7530_priv; +struct mt753x_pcs { + struct phylink_pcs pcs; + struct mt7530_priv *priv; + int port; +}; + /* struct mt753x_info - This is the main data structure for holding the specific * part for each supported device * @sw_setup: Holding the handler to a device initialization @@ -752,18 +758,14 @@ struct mt7530_priv; * port * @mac_port_validate: Holding the way to set addition validate type for a * certan MAC port - * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain - * MAC port * @mac_port_config: Holding the way setting up the PHY attribute to a * certain MAC port - * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a - * certain MAC port - * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs - * of the certain MAC port */ struct mt753x_info { enum mt753x_id id; + const struct phylink_pcs_ops *pcs_ops; + int (*sw_setup)(struct dsa_switch *ds); int (*phy_read)(struct mt7530_priv *priv, int port, int regnum); int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val); @@ -774,15 +776,9 @@ struct mt753x_info { void (*mac_port_validate)(struct dsa_switch *ds, int port, phy_interface_t interface, unsigned long *supported); - int (*mac_port_get_state)(struct dsa_switch *ds, int port, - struct phylink_link_state *state); int (*mac_port_config)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); - void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port); - void (*mac_pcs_link_up)(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, - int speed, int duplex); }; /* struct mt7530_priv - This is the main data structure for holding the state @@ -824,6 +820,7 @@ struct mt7530_priv { u8 mirror_tx; struct mt7530_port ports[MT7530_NUM_PORTS]; + struct mt753x_pcs pcs[MT7530_NUM_PORTS]; /* protect among processes for registers access*/ struct mutex reg_mutex; int irq; -- cgit v1.2.3 From 9d0df207c002e1532c109fb06c8895d1855fba40 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:46:32 +0100 Subject: net: dsa: mt7530: move autoneg handling to PCS validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the autoneg bit handling to the PCS validation, which allows us to get rid of mt753x_phylink_validate() and rely on the default phylink_generic_validate() implementation for the MAC side. Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1c0d931973e5..0ee2cb7e5664 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2900,25 +2900,16 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, priv->info->mac_port_get_caps(ds, port, config); } -static void -mt753x_phylink_validate(struct dsa_switch *ds, int port, - unsigned long *supported, - struct phylink_link_state *state) +static int mt753x_pcs_validate(struct phylink_pcs *pcs, + unsigned long *supported, + const struct phylink_link_state *state) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - u32 caps; - - caps = dsa_to_port(ds, port)->pl_config.mac_capabilities; - - phylink_set_port_modes(mask); - phylink_get_linkmodes(mask, state->interface, caps); + /* Autonegotiation is not supported in TRGMII nor 802.3z modes */ + if (state->interface == PHY_INTERFACE_MODE_TRGMII || + phy_interface_mode_is_8023z(state->interface)) + phylink_clear(supported, Autoneg); - if (state->interface != PHY_INTERFACE_MODE_TRGMII && - !phy_interface_mode_is_8023z(state->interface)) - phylink_set(mask, Autoneg); - - linkmode_and(supported, supported, mask); - linkmode_and(state->advertising, state->advertising, mask); + return 0; } static void mt7530_pcs_get_state(struct phylink_pcs *pcs, @@ -3020,12 +3011,14 @@ static void mt7530_pcs_an_restart(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops mt7530_pcs_ops = { + .pcs_validate = mt753x_pcs_validate, .pcs_get_state = mt7530_pcs_get_state, .pcs_config = mt753x_pcs_config, .pcs_an_restart = mt7530_pcs_an_restart, }; static const struct phylink_pcs_ops mt7531_pcs_ops = { + .pcs_validate = mt753x_pcs_validate, .pcs_get_state = mt7531_pcs_get_state, .pcs_config = mt753x_pcs_config, .pcs_an_restart = mt7531_pcs_an_restart, @@ -3117,7 +3110,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_mirror_add = mt753x_port_mirror_add, .port_mirror_del = mt753x_port_mirror_del, .phylink_get_caps = mt753x_phylink_get_caps, - .phylink_validate = mt753x_phylink_validate, .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs, .phylink_mac_config = mt753x_phylink_mac_config, .phylink_mac_link_down = mt753x_phylink_mac_link_down, -- cgit v1.2.3 From 7b972512ec0e16df298366b31326f3707f5dbbdd Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 11 Apr 2022 10:46:37 +0100 Subject: net: dsa: mt7530: mark as non-legacy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The mt7530 driver does not make use of the speed, duplex, pause or advertisement in its phylink_mac_config() implementation, so it can be marked as a non-legacy driver. Tested-by: Marek Behún Signed-off-by: Russell King (Oracle) Signed-off-by: Paolo Abeni --- drivers/net/dsa/mt7530.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 0ee2cb7e5664..c4ea73d996e8 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2897,6 +2897,12 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; + /* This driver does not make use of the speed, duplex, pause or the + * advertisement in its mac_config, so it is safe to mark this driver + * as non-legacy. + */ + config->legacy_pre_march2020 = false; + priv->info->mac_port_get_caps(ds, port, config); } -- cgit v1.2.3 From 54fccfdd7c663a2e23de85ea4ed6a123b6abcc35 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 11 Apr 2022 12:27:10 +0100 Subject: sfc: efx_default_channel_type APIs can be static This means we can remove them from efx_channel.h and avoid naming conflicts later. efx_channel_dummy_op_void() cannot be static as it is used in ef100_nic.c. Signed-off-by: Martin Habets Signed-off-by: Paolo Abeni --- drivers/net/ethernet/sfc/efx_channels.c | 52 ++++++++++++++++++--------------- drivers/net/ethernet/sfc/efx_channels.h | 4 --- 2 files changed, 28 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 377df8b7f015..eec80b024195 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -51,28 +51,7 @@ MODULE_PARM_DESC(irq_adapt_high_thresh, */ static int napi_weight = 64; -/*************** - * Housekeeping - ***************/ - -int efx_channel_dummy_op_int(struct efx_channel *channel) -{ - return 0; -} - -void efx_channel_dummy_op_void(struct efx_channel *channel) -{ -} - -static const struct efx_channel_type efx_default_channel_type = { - .pre_probe = efx_channel_dummy_op_int, - .post_remove = efx_channel_dummy_op_void, - .get_name = efx_get_channel_name, - .copy = efx_copy_channel, - .want_txqs = efx_default_channel_want_txqs, - .keep_eventq = false, - .want_pio = true, -}; +static const struct efx_channel_type efx_default_channel_type; /************* * INTERRUPTS @@ -619,6 +598,7 @@ void efx_fini_channels(struct efx_nic *efx) /* Allocate and initialise a channel structure, copying parameters * (but not resources) from an old channel structure. */ +static struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) { struct efx_rx_queue *rx_queue; @@ -696,7 +676,8 @@ fail: return rc; } -void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) +static void efx_get_channel_name(struct efx_channel *channel, char *buf, + size_t len) { struct efx_nic *efx = channel->efx; const char *type; @@ -1004,7 +985,7 @@ int efx_set_channels(struct efx_nic *efx) return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); } -bool efx_default_channel_want_txqs(struct efx_channel *channel) +static bool efx_default_channel_want_txqs(struct efx_channel *channel) { return channel->channel - channel->efx->tx_channel_offset < channel->efx->n_tx_channels; @@ -1362,3 +1343,26 @@ void efx_fini_napi(struct efx_nic *efx) efx_for_each_channel(channel, efx) efx_fini_napi_channel(channel); } + +/*************** + * Housekeeping + ***************/ + +static int efx_channel_dummy_op_int(struct efx_channel *channel) +{ + return 0; +} + +void efx_channel_dummy_op_void(struct efx_channel *channel) +{ +} + +static const struct efx_channel_type efx_default_channel_type = { + .pre_probe = efx_channel_dummy_op_int, + .post_remove = efx_channel_dummy_op_void, + .get_name = efx_get_channel_name, + .copy = efx_copy_channel, + .want_txqs = efx_default_channel_want_txqs, + .keep_eventq = false, + .want_pio = true, +}; diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h index d77ec1f77fb1..64abb99a56b8 100644 --- a/drivers/net/ethernet/sfc/efx_channels.h +++ b/drivers/net/ethernet/sfc/efx_channels.h @@ -32,16 +32,13 @@ void efx_fini_eventq(struct efx_channel *channel); void efx_remove_eventq(struct efx_channel *channel); int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); -void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len); void efx_set_channel_names(struct efx_nic *efx); int efx_init_channels(struct efx_nic *efx); int efx_probe_channels(struct efx_nic *efx); int efx_set_channels(struct efx_nic *efx); -bool efx_default_channel_want_txqs(struct efx_channel *channel); void efx_remove_channel(struct efx_channel *channel); void efx_remove_channels(struct efx_nic *efx); void efx_fini_channels(struct efx_nic *efx); -struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel); void efx_start_channels(struct efx_nic *efx); void efx_stop_channels(struct efx_nic *efx); @@ -50,7 +47,6 @@ void efx_init_napi(struct efx_nic *efx); void efx_fini_napi_channel(struct efx_channel *channel); void efx_fini_napi(struct efx_nic *efx); -int efx_channel_dummy_op_int(struct efx_channel *channel); void efx_channel_dummy_op_void(struct efx_channel *channel); #endif -- cgit v1.2.3 From cc42e4e3f1014f2d24437955bc1e90b77cef343e Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 11 Apr 2022 12:27:15 +0100 Subject: sfc: Remove duplicate definition of efx_xmit_done It is defined both in efx.h and tx_common.h. Remove the definition in efx.h. Signed-off-by: Martin Habets Signed-off-by: Paolo Abeni --- drivers/net/ethernet/sfc/efx.h | 1 - drivers/net/ethernet/sfc/farch.c | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index daf0c00c1242..c05a83da9e44 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -28,7 +28,6 @@ static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct ef100_enqueue_skb, __efx_enqueue_skb, tx_queue, skb); } -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_xmit_done_single(struct efx_tx_queue *tx_queue); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 148dcd48b58d..9599123bc28d 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -16,6 +16,7 @@ #include "bitfield.h" #include "efx.h" #include "rx_common.h" +#include "tx_common.h" #include "nic.h" #include "farch_regs.h" #include "sriov.h" -- cgit v1.2.3 From d78eaf06b5d9f4686846ca4e35207025c415ba85 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 11 Apr 2022 12:27:20 +0100 Subject: sfc: Remove global definition of efx_reset_type_names The strings are only used in efx_common.c so the definitions can be static in there. Signed-off-by: Martin Habets Signed-off-by: Paolo Abeni --- drivers/net/ethernet/sfc/efx_common.c | 4 ++-- drivers/net/ethernet/sfc/net_driver.h | 5 ----- 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index af37c990217e..f6577e74d6e6 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -51,8 +51,8 @@ static unsigned int efx_monitor_interval = 1 * HZ; /* Default stats update time */ #define STATS_PERIOD_MS_DEFAULT 1000 -const unsigned int efx_reset_type_max = RESET_TYPE_MAX; -const char *const efx_reset_type_names[] = { +static const unsigned int efx_reset_type_max = RESET_TYPE_MAX; +static const char *const efx_reset_type_names[] = { [RESET_TYPE_INVISIBLE] = "INVISIBLE", [RESET_TYPE_ALL] = "ALL", [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index c75dc75e2857..318db906a154 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -612,11 +612,6 @@ extern const unsigned int efx_loopback_mode_max; #define LOOPBACK_MODE(efx) \ STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) -extern const char *const efx_reset_type_names[]; -extern const unsigned int efx_reset_type_max; -#define RESET_TYPE(type) \ - STRING_TABLE_LOOKUP(type, efx_reset_type) - enum efx_int_mode { /* Be careful if altering to correct macro below */ EFX_INT_MODE_MSIX = 0, -- cgit v1.2.3 From f01598090048f5f732ea7aa64b2f194131ce60d2 Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Mon, 11 Apr 2022 15:59:43 +0200 Subject: net: usb: qmi_wwan: add Telit 0x1057 composition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the following Telit FN980 composition: 0x1057: tty, adb, rmnet, tty, tty, tty, tty, tty Signed-off-by: Daniele Palmas Acked-by: Bjørn Mork Link: https://lore.kernel.org/r/20220411135943.4067264-1-dnlplm@gmail.com Signed-off-by: Paolo Abeni --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 17cf0d10fef9..9e2f48c2e85e 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1357,6 +1357,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ -- cgit v1.2.3 From ec095263a965720e1ca39db1d9c5cd47846c789b Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Mon, 11 Apr 2022 14:49:55 +0200 Subject: net: remove noblock parameter from recvmsg() entities The internal recvmsg() functions have two parameters 'flags' and 'noblock' that were merged inside skb_recv_datagram(). As a follow up patch to commit f4b41f062c42 ("net: remove noblock parameter from skb_recv_datagram()") this patch removes the separate 'noblock' parameter for recvmsg(). Analogue to the referenced patch for skb_recv_datagram() the 'flags' and 'noblock' parameters are unnecessarily split up with e.g. err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, flags & ~MSG_DONTWAIT, &addr_len); or in err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg, sk, msg, size, flags & MSG_DONTWAIT, flags & ~MSG_DONTWAIT, &addr_len); instead of simply using only flags all the time and check for MSG_DONTWAIT where needed (to preserve for the formerly separated no(n)block condition). Signed-off-by: Oliver Hartkopp Link: https://lore.kernel.org/r/20220411124955.154876-1-socketcan@hartkopp.net Signed-off-by: Paolo Abeni --- .../ethernet/chelsio/inline_crypto/chtls/chtls.h | 2 +- .../chelsio/inline_crypto/chtls/chtls_io.c | 22 ++++++++++------------ include/net/ping.h | 2 +- include/net/sctp/sctp.h | 2 +- include/net/sock.h | 3 +-- include/net/tcp.h | 2 +- include/net/tls.h | 2 +- include/net/udp.h | 8 ++++---- net/core/sock.c | 3 +-- net/dccp/dccp.h | 4 ++-- net/dccp/proto.c | 6 +++--- net/ieee802154/socket.c | 6 ++---- net/ipv4/af_inet.c | 5 ++--- net/ipv4/ping.c | 5 ++--- net/ipv4/raw.c | 3 +-- net/ipv4/tcp.c | 19 ++++++++----------- net/ipv4/tcp_bpf.c | 15 +++++++-------- net/ipv4/udp.c | 11 +++++------ net/ipv4/udp_bpf.c | 17 ++++++++--------- net/ipv4/udp_impl.h | 4 ++-- net/ipv6/af_inet6.c | 5 ++--- net/ipv6/raw.c | 3 +-- net/ipv6/udp.c | 4 ++-- net/ipv6/udp_impl.h | 4 ++-- net/l2tp/l2tp_ip.c | 3 +-- net/l2tp/l2tp_ip6.c | 3 +-- net/mptcp/protocol.c | 4 ++-- net/phonet/datagram.c | 3 +-- net/phonet/pep.c | 3 +-- net/sctp/socket.c | 16 +++++++--------- net/sctp/ulpevent.c | 2 +- net/sunrpc/svcsock.c | 2 +- net/sunrpc/xprtsock.c | 2 +- net/tls/tls_sw.c | 3 --- net/unix/af_unix.c | 6 ++---- net/unix/unix_bpf.c | 5 ++--- net/xfrm/espintcp.c | 4 +--- 37 files changed, 91 insertions(+), 122 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h index 9e2378013642..41714203ace8 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h @@ -567,7 +567,7 @@ void chtls_shutdown(struct sock *sk, int how); void chtls_destroy_sock(struct sock *sk); int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int chtls_recvmsg(struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags, int *addr_len); + size_t len, int flags, int *addr_len); int chtls_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); int send_tx_flowc_wr(struct sock *sk, int compl, diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c index c320cc8ca68d..539992dad8ba 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c @@ -1426,7 +1426,7 @@ static void chtls_cleanup_rbuf(struct sock *sk, int copied) } static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) + int flags, int *addr_len) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct chtls_hws *hws = &csk->tlshws; @@ -1441,7 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, buffers_freed = 0; - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) @@ -1616,7 +1616,7 @@ skip_copy: * Peek at data in a socket's receive buffer. */ static int peekmsg(struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags) + size_t len, int flags) { struct tcp_sock *tp = tcp_sk(sk); u32 peek_seq, offset; @@ -1626,7 +1626,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, long timeo; lock_sock(sk); - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); peek_seq = tp->copied_seq; do { @@ -1737,7 +1737,7 @@ found_ok_skb: } int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) + int flags, int *addr_len) { struct tcp_sock *tp = tcp_sk(sk); struct chtls_sock *csk; @@ -1750,25 +1750,23 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, buffers_freed = 0; if (unlikely(flags & MSG_OOB)) - return tcp_prot.recvmsg(sk, msg, len, nonblock, flags, - addr_len); + return tcp_prot.recvmsg(sk, msg, len, flags, addr_len); if (unlikely(flags & MSG_PEEK)) - return peekmsg(sk, msg, len, nonblock, flags); + return peekmsg(sk, msg, len, flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) && sk->sk_state == TCP_ESTABLISHED) - sk_busy_loop(sk, nonblock); + sk_busy_loop(sk, flags & MSG_DONTWAIT); lock_sock(sk); csk = rcu_dereference_sk_user_data(sk); if (is_tls_rx(csk)) - return chtls_pt_recvmsg(sk, msg, len, nonblock, - flags, addr_len); + return chtls_pt_recvmsg(sk, msg, len, flags, addr_len); - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) diff --git a/include/net/ping.h b/include/net/ping.h index b68fbfdb606f..e4ff3911cbf5 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -71,7 +71,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info); int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *); -int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, +int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len); int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, void *user_icmph, size_t icmph_len); diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index bf3716fe83e0..a04999ee99b0 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -103,7 +103,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc); extern struct percpu_counter sctp_sockets_allocated; int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); -struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); +struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int *); typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *); void sctp_transport_walk_start(struct rhashtable_iter *iter); diff --git a/include/net/sock.h b/include/net/sock.h index 1a988e605f09..a01d6c421aa2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1202,8 +1202,7 @@ struct proto { int (*sendmsg)(struct sock *sk, struct msghdr *msg, size_t len); int (*recvmsg)(struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, - int *addr_len); + size_t len, int flags, int *addr_len); int (*sendpage)(struct sock *sk, struct page *page, int offset, size_t size, int flags); int (*bind)(struct sock *sk, diff --git a/include/net/tcp.h b/include/net/tcp.h index 6d50a662bf89..679b1964d494 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -407,7 +407,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); void tcp_syn_ack_timeout(const struct request_sock *req); -int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, +int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len); int tcp_set_rcvlowat(struct sock *sk, int val); int tcp_set_window_clamp(struct sock *sk, int val); diff --git a/include/net/tls.h b/include/net/tls.h index 6fe78361c8c8..b59f0a63292b 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -371,7 +371,7 @@ void tls_sw_free_resources_rx(struct sock *sk); void tls_sw_release_resources_rx(struct sock *sk); void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len); + int flags, int *addr_len); bool tls_sw_sock_is_readable(struct sock *sk); ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, diff --git a/include/net/udp.h b/include/net/udp.h index f1c2a88c9005..b83a00330566 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -250,14 +250,14 @@ void udp_destruct_sock(struct sock *sk); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb); void udp_skb_destructor(struct sock *sk, struct sk_buff *skb); -struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, - int noblock, int *off, int *err); +struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off, + int *err); static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, - int noblock, int *err) + int *err) { int off = 0; - return __skb_recv_udp(sk, flags, noblock, &off, err); + return __skb_recv_udp(sk, flags, &off, err); } int udp_v4_early_demux(struct sk_buff *skb); diff --git a/net/core/sock.c b/net/core/sock.c index 05e0cfa064b6..29abec3eabd8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3506,8 +3506,7 @@ int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int addr_len = 0; int err; - err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, - flags & ~MSG_DONTWAIT, &addr_len); + err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; return err; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 671c377f0889..7dfc00c9fb32 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -293,8 +293,8 @@ int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); -int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, - int flags, int *addr_len); +int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, + int *addr_len); void dccp_shutdown(struct sock *sk, int how); int inet_dccp_listen(struct socket *sock, int backlog); __poll_t dccp_poll(struct file *file, struct socket *sock, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a976b4d29892..58421f94427e 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -791,8 +791,8 @@ out_discard: EXPORT_SYMBOL_GPL(dccp_sendmsg); -int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, - int flags, int *addr_len) +int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, + int *addr_len) { const struct dccp_hdr *dh; long timeo; @@ -804,7 +804,7 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, goto out; } - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index a725dd9bbda8..f24852814fa3 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -308,13 +308,12 @@ out: } static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; struct sk_buff *skb; - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; @@ -696,7 +695,7 @@ out: } static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { size_t copied = 0; int err = -EOPNOTSUPP; @@ -704,7 +703,6 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, struct dgram_sock *ro = dgram_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ieee802154 *, saddr, msg->msg_name); - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 72fde2888ad2..195ecfa2f000 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -836,7 +836,7 @@ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, EXPORT_SYMBOL(inet_sendpage); INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *, - size_t, int, int, int *)); + size_t, int, int *)); int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { @@ -848,8 +848,7 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, sock_rps_record_flow(sk); err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg, - sk, msg, size, flags & MSG_DONTWAIT, - flags & ~MSG_DONTWAIT, &addr_len); + sk, msg, size, flags, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; return err; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 2897fcf71211..319c181bfbb6 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -844,8 +844,8 @@ do_confirm: goto out; } -int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, + int *addr_len) { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; @@ -861,7 +861,6 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, if (flags & MSG_ERRQUEUE) return inet_recv_error(sk, msg, len, addr_len); - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index c9dd9603f2e7..4056b0da85ea 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -753,7 +753,7 @@ out: */ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; @@ -769,7 +769,6 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, goto out; } - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e31cf137c614..e20b87b3bf90 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1877,8 +1877,7 @@ static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, } static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, - struct scm_timestamping_internal *tss, + int flags, struct scm_timestamping_internal *tss, int *cmsg_flags); static int receive_fallback_to_copy(struct sock *sk, struct tcp_zerocopy_receive *zc, int inq, @@ -1900,7 +1899,7 @@ static int receive_fallback_to_copy(struct sock *sk, if (err) return err; - err = tcp_recvmsg_locked(sk, &msg, inq, /*nonblock=*/1, /*flags=*/0, + err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, tss, &zc->msg_flags); if (err < 0) return err; @@ -2316,8 +2315,7 @@ static int tcp_inq_hint(struct sock *sk) */ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, - struct scm_timestamping_internal *tss, + int flags, struct scm_timestamping_internal *tss, int *cmsg_flags) { struct tcp_sock *tp = tcp_sk(sk); @@ -2337,7 +2335,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, if (tp->recvmsg_inq) *cmsg_flags = TCP_CMSG_INQ; - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); /* Urgent data needs to be handled specially. */ if (flags & MSG_OOB) @@ -2556,8 +2554,8 @@ recv_sndq: goto out; } -int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, - int flags, int *addr_len) +int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, + int *addr_len) { int cmsg_flags = 0, ret, inq; struct scm_timestamping_internal tss; @@ -2568,11 +2566,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) && sk->sk_state == TCP_ESTABLISHED) - sk_busy_loop(sk, nonblock); + sk_busy_loop(sk, flags & MSG_DONTWAIT); lock_sock(sk); - ret = tcp_recvmsg_locked(sk, msg, len, nonblock, flags, &tss, - &cmsg_flags); + ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); release_sock(sk); sk_defer_free_flush(sk); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 1cdcb4df0eb7..be3947e70fec 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -174,7 +174,6 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, static int tcp_bpf_recvmsg_parser(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) { @@ -186,7 +185,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, psock = sk_psock_get(sk); if (unlikely(!psock)) - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + return tcp_recvmsg(sk, msg, len, flags, addr_len); lock_sock(sk); msg_bytes_ready: @@ -211,7 +210,7 @@ msg_bytes_ready: goto out; } - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); if (!timeo) { copied = -EAGAIN; goto out; @@ -234,7 +233,7 @@ out: } static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) + int flags, int *addr_len) { struct sk_psock *psock; int copied, ret; @@ -244,11 +243,11 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, psock = sk_psock_get(sk); if (unlikely(!psock)) - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + return tcp_recvmsg(sk, msg, len, flags, addr_len); if (!skb_queue_empty(&sk->sk_receive_queue) && sk_psock_queue_empty(psock)) { sk_psock_put(sk, psock); - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + return tcp_recvmsg(sk, msg, len, flags, addr_len); } lock_sock(sk); msg_bytes_ready: @@ -257,14 +256,14 @@ msg_bytes_ready: long timeo; int data; - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); data = tcp_msg_wait_data(sk, psock, timeo); if (data) { if (!sk_psock_queue_empty(psock)) goto msg_bytes_ready; release_sock(sk); sk_psock_put(sk, psock); - return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + return tcp_recvmsg(sk, msg, len, flags, addr_len); } copied = -EAGAIN; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6b4d8361560f..aa8545ca6964 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1726,7 +1726,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) EXPORT_SYMBOL(udp_ioctl); struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, - int noblock, int *off, int *err) + int *off, int *err) { struct sk_buff_head *sk_queue = &sk->sk_receive_queue; struct sk_buff_head *queue; @@ -1735,7 +1735,6 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int error; queue = &udp_sk(sk)->reader_queue; - flags |= noblock ? MSG_DONTWAIT : 0; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { struct sk_buff *skb; @@ -1805,7 +1804,7 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc, struct sk_buff *skb; int err, used; - skb = skb_recv_udp(sk, 0, 1, &err); + skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); if (!skb) return err; @@ -1843,8 +1842,8 @@ EXPORT_SYMBOL(udp_read_sock); * return it, otherwise we block. */ -int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, + int *addr_len) { struct inet_sock *inet = inet_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); @@ -1859,7 +1858,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, try_again: off = sk_peek_offset(sk, flags); - skb = __skb_recv_udp(sk, flags, noblock, &off, &err); + skb = __skb_recv_udp(sk, flags, &off, &err); if (!skb) return err; diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c index bbe6569c9ad3..ff15918b7bdc 100644 --- a/net/ipv4/udp_bpf.c +++ b/net/ipv4/udp_bpf.c @@ -11,14 +11,13 @@ static struct proto *udpv6_prot_saved __read_mostly; static int sk_udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) - return udpv6_prot_saved->recvmsg(sk, msg, len, noblock, flags, - addr_len); + return udpv6_prot_saved->recvmsg(sk, msg, len, flags, addr_len); #endif - return udp_prot.recvmsg(sk, msg, len, noblock, flags, addr_len); + return udp_prot.recvmsg(sk, msg, len, flags, addr_len); } static bool udp_sk_has_data(struct sock *sk) @@ -61,7 +60,7 @@ static int udp_msg_wait_data(struct sock *sk, struct sk_psock *psock, } static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) + int flags, int *addr_len) { struct sk_psock *psock; int copied, ret; @@ -71,10 +70,10 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, psock = sk_psock_get(sk); if (unlikely(!psock)) - return sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + return sk_udp_recvmsg(sk, msg, len, flags, addr_len); if (!psock_has_data(psock)) { - ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + ret = sk_udp_recvmsg(sk, msg, len, flags, addr_len); goto out; } @@ -84,12 +83,12 @@ msg_bytes_ready: long timeo; int data; - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); data = udp_msg_wait_data(sk, psock, timeo); if (data) { if (psock_has_data(psock)) goto msg_bytes_ready; - ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len); + ret = sk_udp_recvmsg(sk, msg, len, flags, addr_len); goto out; } copied = -EAGAIN; diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 2878d8285caf..4ba7a88a1b1d 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -17,8 +17,8 @@ int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len); +int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, + int *addr_len); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); void udp_destroy_sock(struct sock *sk); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 7d7b7523d126..6595a78672c8 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -654,7 +654,7 @@ int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } INDIRECT_CALLABLE_DECLARE(int udpv6_recvmsg(struct sock *, struct msghdr *, - size_t, int, int, int *)); + size_t, int, int *)); int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { @@ -669,8 +669,7 @@ int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, /* IPV6_ADDRFORM can change sk->sk_prot under us. */ prot = READ_ONCE(sk->sk_prot); err = INDIRECT_CALL_2(prot->recvmsg, tcp_recvmsg, udpv6_recvmsg, - sk, msg, size, flags & MSG_DONTWAIT, - flags & ~MSG_DONTWAIT, &addr_len); + sk, msg, size, flags, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; return err; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 8bb41f3b246a..0d7c13d33d1a 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -460,7 +460,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) */ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); @@ -477,7 +477,6 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len, addr_len); - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 7f0fa9bd9ffe..db9449b52dbe 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -322,7 +322,7 @@ static int udp6_skb_len(struct sk_buff *skb) */ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); @@ -342,7 +342,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, try_again: off = sk_peek_offset(sk, flags); - skb = __skb_recv_udp(sk, flags, noblock, &off, &err); + skb = __skb_recv_udp(sk, flags, &off, &err); if (!skb) return err; diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index b2fcc46c1630..4251e49d32a0 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -20,8 +20,8 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); -int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len); +int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, + int *addr_len); void udpv6_destroy_sock(struct sock *sk); #ifdef CONFIG_PROC_FS diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index c6a5cc2d88e7..6af09e188e52 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -515,7 +515,7 @@ no_route: } static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, int *addr_len) + size_t len, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; @@ -526,7 +526,6 @@ static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg, if (flags & MSG_OOB) goto out; - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 97fde8a9209b..217c7192691e 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -657,7 +657,7 @@ do_confirm: } static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); @@ -671,7 +671,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len, addr_len); - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 8f54293c1d88..0492aa9308c7 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2012,7 +2012,7 @@ static unsigned int mptcp_inq_hint(const struct sock *sk) } static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) + int flags, int *addr_len) { struct mptcp_sock *msk = mptcp_sk(sk); struct scm_timestamping_internal tss; @@ -2030,7 +2030,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, goto out_err; } - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); len = min_t(size_t, len, INT_MAX); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index 3f2e62b63dd4..ff5f49ab236e 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c @@ -112,7 +112,7 @@ static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { struct sk_buff *skb = NULL; struct sockaddr_pn sa; @@ -123,7 +123,6 @@ static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, MSG_CMSG_COMPAT)) goto out_nofree; - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &rval); if (skb == NULL) goto out_nofree; diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 441a26706592..83ea13a50690 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -1239,7 +1239,7 @@ struct sk_buff *pep_read(struct sock *sk) } static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { struct sk_buff *skb; int err; @@ -1268,7 +1268,6 @@ static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return -EINVAL; } - flags |= (noblock ? MSG_DONTWAIT : 0); skb = skb_recv_datagram(sk, flags, &err); lock_sock(sk); if (skb == NULL) { diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3e1a9600be5e..4c7865f14064 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2084,7 +2084,7 @@ static int sctp_skb_pull(struct sk_buff *skb, int len) * 5 for complete description of the flags. */ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int noblock, int flags, int *addr_len) + int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); @@ -2093,9 +2093,8 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int err = 0; int skb_len; - pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " - "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, - addr_len); + pr_debug("%s: sk:%p, msghdr:%p, len:%zd, flags:0x%x, addr_len:%p)\n", + __func__, sk, msg, len, flags, addr_len); lock_sock(sk); @@ -2105,7 +2104,7 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, goto out; } - skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); + skb = sctp_skb_recv_datagram(sk, flags, &err); if (!skb) goto out; @@ -8978,14 +8977,13 @@ out: * Note: This is pretty much the same routine as in core/datagram.c * with a few changes to make lksctp work. */ -struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, - int noblock, int *err) +struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int *err) { int error; struct sk_buff *skb; long timeo; - timeo = sock_rcvtimeo(sk, noblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, MAX_SCHEDULE_TIMEOUT); @@ -9018,7 +9016,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, break; if (sk_can_busy_loop(sk)) { - sk_busy_loop(sk, noblock); + sk_busy_loop(sk, flags & MSG_DONTWAIT); if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) continue; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 0c3d2b4d7321..8920ca92a011 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -1063,7 +1063,7 @@ void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, struct sk_buff *skb; int err; - skb = sctp_skb_recv_datagram(sk, MSG_PEEK, 1, &err); + skb = sctp_skb_recv_datagram(sk, MSG_PEEK | MSG_DONTWAIT, &err); if (skb != NULL) { __sctp_ulpevent_read_nxtinfo(sctp_skb2event(skb), msghdr, skb); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 478f857cdaed..a21f5684345f 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -464,7 +464,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) 0, 0, MSG_PEEK | MSG_DONTWAIT); if (err < 0) goto out_recv_err; - skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err); + skb = skb_recv_udp(svsk->sk_sk, MSG_DONTWAIT, &err); if (!skb) goto out_recv_err; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 78af7518f263..d11aa2b7c532 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1337,7 +1337,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport) if (sk == NULL) goto out; for (;;) { - skb = skb_recv_udp(sk, 0, 1, &err); + skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); if (skb == NULL) break; xs_udp_data_read_skb(&transport->xprt, sk, skb); diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 2e8a896af81a..17c4e236ec8b 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1722,7 +1722,6 @@ static int process_rx_list(struct tls_sw_context_rx *ctx, int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) { @@ -1744,8 +1743,6 @@ int tls_sw_recvmsg(struct sock *sk, bool bpf_strp_enabled; bool zc_capable; - flags |= nonblock; - if (unlikely(flags & MSG_ERRQUEUE)) return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fecbd95da918..e1dd9e9c8452 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2484,8 +2484,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t si const struct proto *prot = READ_ONCE(sk->sk_prot); if (prot != &unix_dgram_proto) - return prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, - flags & ~MSG_DONTWAIT, NULL); + return prot->recvmsg(sk, msg, size, flags, NULL); #endif return __unix_dgram_recvmsg(sk, msg, size, flags); } @@ -2917,8 +2916,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, const struct proto *prot = READ_ONCE(sk->sk_prot); if (prot != &unix_stream_proto) - return prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, - flags & ~MSG_DONTWAIT, NULL); + return prot->recvmsg(sk, msg, size, flags, NULL); #endif return unix_stream_read_generic(&state, true); } diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c index 452376c6f419..7cf14c6b1725 100644 --- a/net/unix/unix_bpf.c +++ b/net/unix/unix_bpf.c @@ -48,8 +48,7 @@ static int __unix_recvmsg(struct sock *sk, struct msghdr *msg, } static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg, - size_t len, int nonblock, int flags, - int *addr_len) + size_t len, int flags, int *addr_len) { struct unix_sock *u = unix_sk(sk); struct sk_psock *psock; @@ -73,7 +72,7 @@ msg_bytes_ready: long timeo; int data; - timeo = sock_rcvtimeo(sk, nonblock); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); data = unix_msg_wait_data(sk, psock, timeo); if (data) { if (!sk_psock_queue_empty(psock)) diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index 1f08ebf7d80c..82d14eea1b5a 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -131,7 +131,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb) } static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int nonblock, int flags, int *addr_len) + int flags, int *addr_len) { struct espintcp_ctx *ctx = espintcp_getctx(sk); struct sk_buff *skb; @@ -139,8 +139,6 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int copied; int off = 0; - flags |= nonblock ? MSG_DONTWAIT : 0; - skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err); if (!skb) { if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) -- cgit v1.2.3 From 24584d4f0afc75faa57883dd694e0df61af0ad40 Mon Sep 17 00:00:00 2001 From: Peter Seiderer Date: Sat, 2 Apr 2022 17:30:13 +0200 Subject: ath9k: fix ath_get_rate_txpower() to respect the rate list end tag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop reading (and copying) from ieee80211_tx_rate to ath_tx_info.rates after list end tag (count == 0, idx < 0), prevents copying of garbage to card registers. Note: no need to write to the remaining ath_tx_info.rates entries as the complete ath_tx_info struct is already initialized to zero from both call sites. Signed-off-by: Peter Seiderer Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220402153014.31332-1-ps.report@gmx.net --- drivers/net/wireless/ath/ath9k/xmit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d0caf1de2bde..ec9bad2d9510 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1271,7 +1271,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int phy; if (!rates[i].count || (rates[i].idx < 0)) - continue; + break; rix = rates[i].idx; info->rates[i].Tries = rates[i].count; -- cgit v1.2.3 From 405342ebea2ab776df070ee54c308f1f59723844 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 7 Apr 2022 11:28:20 +0100 Subject: ath11k: Fix spelling mistake "reseting" -> "resetting" There is a spelling mistake in an ath11k_warn message. Fix it. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407102820.613881-1-colin.i.king@gmail.com --- drivers/net/wireless/ath/ath11k/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index cbac1919867f..1537ec0ae2e7 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1567,7 +1567,7 @@ static void ath11k_core_reset(struct work_struct *work) * completed, then the second reset worker will destroy the previous one, * thus below is to avoid that. */ - ath11k_warn(ab, "already reseting count %d\n", reset_count); + ath11k_warn(ab, "already resetting count %d\n", reset_count); reinit_completion(&ab->reset_complete); time_left = wait_for_completion_timeout(&ab->reset_complete, -- cgit v1.2.3 From 2578171ff85ed71d2575cd6abcd9c37c4b642b52 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Fri, 8 Apr 2022 08:01:12 +0800 Subject: wcn36xx: clean up some inconsistent indenting Eliminate the follow smatch warning: drivers/net/wireless/ath/wcn36xx/smd.c:3151 wcn36xx_smd_gtk_offload_get_info_rsp() warn: inconsistent indenting Reported-by: Abaci Robot Signed-off-by: Yang Li Acked-by: Loic Poulain Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408000113.129906-1-yang.lee@linux.alibaba.com --- drivers/net/wireless/ath/wcn36xx/smd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 872b37875c57..691e637b94f1 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -3148,9 +3148,9 @@ static int wcn36xx_smd_gtk_offload_get_info_rsp(struct wcn36xx *wcn, cpu_to_le64(rsp->key_replay_counter); ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); - wcn36xx_dbg(WCN36XX_DBG_HAL, - "GTK replay counter increment %llu\n", - rsp->key_replay_counter); + wcn36xx_dbg(WCN36XX_DBG_HAL, + "GTK replay counter increment %llu\n", + rsp->key_replay_counter); } wcn36xx_dbg(WCN36XX_DBG_HAL, -- cgit v1.2.3 From d7ceee8051ba226f1a481edadd06e34e200fcbab Mon Sep 17 00:00:00 2001 From: Yang Li Date: Fri, 8 Apr 2022 08:01:13 +0800 Subject: ath9k: Remove unnecessary print function dev_err() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The print function dev_err() is redundant because platform_get_irq_byname() already prints an error. Eliminate the follow coccicheck warning: ./drivers/net/wireless/ath/ath9k/ahb.c:103:2-9: line 103 is redundant because platform_get_irq() already prints an error Reported-by: Abaci Robot Signed-off-by: Yang Li Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408000113.129906-2-yang.lee@linux.alibaba.com --- drivers/net/wireless/ath/ath9k/ahb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index c9b853af41d1..9cd12b20b18d 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -99,10 +99,8 @@ static int ath_ahb_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "no IRQ resource found\n"); + if (irq < 0) return irq; - } ath9k_fill_chanctx_ops(); hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); -- cgit v1.2.3 From 5ddfffd6da9b94e5f6397843ad1a54d6a211f652 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Fri, 8 Apr 2022 08:13:41 +0800 Subject: rtw89: ser: fix unannotated fall-through add `break` to fix warning of unannotated fall-through between switch labels. Fixes: 14f9f4790048 ("rtw89: ser: control hci interrupts on/off by state") Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/ser.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 25d1df10f226..5aebd6839d29 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -394,6 +394,7 @@ static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt) break; case SER_EV_STATE_OUT: rtw89_hci_recovery_start(rtwdev); + break; default: break; } -- cgit v1.2.3 From eeadcd2a47f855d14ba85f46a8b9206182ea110c Mon Sep 17 00:00:00 2001 From: Chia-Yuan Li Date: Fri, 8 Apr 2022 08:13:42 +0800 Subject: rtw89: ser: configure D-MAC interrupt mask These interrupts are used by firmware to recover hardware. Create functions to set specific D-MAC masks to replace plain register settings. Signed-off-by: Chia-Yuan Li Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 29 + drivers/net/wireless/realtek/rtw89/mac.c | 160 ++++- drivers/net/wireless/realtek/rtw89/reg.h | 863 ++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852a.c | 29 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 29 + 5 files changed, 1091 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index fdf593e68660..63aa88b705bc 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2293,6 +2293,34 @@ struct rtw89_page_regs { u32 wp_page_info1; }; +struct rtw89_imr_info { + u32 wdrls_imr_set; + u32 wsec_imr_reg; + u32 wsec_imr_set; + u32 mpdu_tx_imr_set; + u32 mpdu_rx_imr_set; + u32 sta_sch_imr_set; + u32 txpktctl_imr_b0_reg; + u32 txpktctl_imr_b0_clr; + u32 txpktctl_imr_b0_set; + u32 txpktctl_imr_b1_reg; + u32 txpktctl_imr_b1_clr; + u32 txpktctl_imr_b1_set; + u32 wde_imr_clr; + u32 wde_imr_set; + u32 ple_imr_clr; + u32 ple_imr_set; + u32 host_disp_imr_clr; + u32 host_disp_imr_set; + u32 cpu_disp_imr_clr; + u32 cpu_disp_imr_set; + u32 other_disp_imr_clr; + u32 other_disp_imr_set; + u32 bbrpt_chinfo_err_imr_reg; + u32 bbrpt_err_imr_set; + u32 bbrpt_dfs_err_imr_reg; +}; + struct rtw89_chip_info { enum rtw89_core_chip_id chip_id; const struct rtw89_chip_ops *ops; @@ -2378,6 +2406,7 @@ struct rtw89_chip_info { const struct rtw89_page_regs *page_regs; const struct rtw89_reg_def *dcfo_comp; u8 dcfo_comp_sft; + const struct rtw89_imr_info *imr_info; }; union rtw89_bus_info { diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index c28f5a7c23b5..77fb9b74019e 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2607,6 +2607,136 @@ static int band1_enable(struct rtw89_dev *rtwdev) return 0; } +static void rtw89_wdrls_imr_enable(struct rtw89_dev *rtwdev) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, B_AX_WDRLS_IMR_EN_CLR); + rtw89_write32_set(rtwdev, R_AX_WDRLS_ERR_IMR, imr->wdrls_imr_set); +} + +static void rtw89_wsec_imr_enable(struct rtw89_dev *rtwdev) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_set(rtwdev, imr->wsec_imr_reg, imr->wsec_imr_set); +} + +static void rtw89_mpdu_trx_imr_enable(struct rtw89_dev *rtwdev) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR, + B_AX_TX_GET_ERRPKTID_INT_EN | + B_AX_TX_NXT_ERRPKTID_INT_EN | + B_AX_TX_MPDU_SIZE_ZERO_INT_EN | + B_AX_TX_OFFSET_ERR_INT_EN | + B_AX_TX_HDR3_SIZE_ERR_INT_EN); + if (chip_id == RTL8852C) + rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR, + B_AX_TX_ETH_TYPE_ERR_EN | + B_AX_TX_LLC_PRE_ERR_EN | + B_AX_TX_NW_TYPE_ERR_EN | + B_AX_TX_KSRCH_ERR_EN); + rtw89_write32_set(rtwdev, R_AX_MPDU_TX_ERR_IMR, + imr->mpdu_tx_imr_set); + + rtw89_write32_clr(rtwdev, R_AX_MPDU_RX_ERR_IMR, + B_AX_GETPKTID_ERR_INT_EN | + B_AX_MHDRLEN_ERR_INT_EN | + B_AX_RPT_ERR_INT_EN); + rtw89_write32_set(rtwdev, R_AX_MPDU_RX_ERR_IMR, + imr->mpdu_rx_imr_set); +} + +static void rtw89_sta_sch_imr_enable(struct rtw89_dev *rtwdev) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_clr(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR, + B_AX_SEARCH_HANG_TIMEOUT_INT_EN | + B_AX_RPT_HANG_TIMEOUT_INT_EN | + B_AX_PLE_B_PKTID_ERR_INT_EN); + rtw89_write32_set(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR, + imr->sta_sch_imr_set); +} + +static void rtw89_txpktctl_imr_enable(struct rtw89_dev *rtwdev) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b0_reg, + imr->txpktctl_imr_b0_clr); + rtw89_write32_set(rtwdev, imr->txpktctl_imr_b0_reg, + imr->txpktctl_imr_b0_set); + rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b1_reg, + imr->txpktctl_imr_b1_clr); + rtw89_write32_set(rtwdev, imr->txpktctl_imr_b1_reg, + imr->txpktctl_imr_b1_set); +} + +static void rtw89_wde_imr_enable(struct rtw89_dev *rtwdev) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_clr(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_clr); + rtw89_write32_set(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_set); +} + +static void rtw89_ple_imr_enable(struct rtw89_dev *rtwdev) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_clr); + rtw89_write32_set(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_set); +} + +static void rtw89_pktin_imr_enable(struct rtw89_dev *rtwdev) +{ + rtw89_write32_set(rtwdev, R_AX_PKTIN_ERR_IMR, + B_AX_PKTIN_GETPKTID_ERR_INT_EN); +} + +static void rtw89_dispatcher_imr_enable(struct rtw89_dev *rtwdev) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, + imr->host_disp_imr_clr); + rtw89_write32_set(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, + imr->host_disp_imr_set); + rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, + imr->cpu_disp_imr_clr); + rtw89_write32_set(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, + imr->cpu_disp_imr_set); + rtw89_write32_clr(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR, + imr->other_disp_imr_clr); + rtw89_write32_set(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR, + imr->other_disp_imr_set); +} + +static void rtw89_cpuio_imr_enable(struct rtw89_dev *rtwdev) +{ + rtw89_write32_clr(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_CLR); + rtw89_write32_set(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_SET); +} + +static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + + rtw89_write32_set(rtwdev, R_AX_BBRPT_COM_ERR_IMR, + B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN); + rtw89_write32_clr(rtwdev, imr->bbrpt_chinfo_err_imr_reg, + B_AX_BBRPT_CHINFO_IMR_CLR); + rtw89_write32_set(rtwdev, imr->bbrpt_chinfo_err_imr_reg, + imr->bbrpt_err_imr_set); + rtw89_write32_set(rtwdev, imr->bbrpt_dfs_err_imr_reg, + B_AX_BBRPT_DFS_TO_ERR_INT_EN); + rtw89_write32_set(rtwdev, R_AX_LA_ERRFLAG, B_AX_LA_IMR_DATA_LOSS_ERR); +} + static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, enum rtw89_mac_hwmod_sel sel) { @@ -2621,25 +2751,17 @@ static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, } if (sel == RTW89_DMAC_SEL) { - rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, - B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | - B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | - B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN); - rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1, - B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | - B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN); - rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, - B_AX_HDT_PKT_FAIL_DBG_INT_EN | - B_AX_HDT_OFFSET_UNMATCH_INT_EN); - rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, - B_AX_CPU_SHIFT_EN_ERR_INT_EN); - rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, - B_AX_PLE_GETNPG_STRPG_ERR_INT_EN); - rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, - B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN); - rtw89_write32_set(rtwdev, R_AX_HD0IMR, B_AX_WDT_PTFM_INT_EN); - rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, - B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN); + rtw89_wdrls_imr_enable(rtwdev); + rtw89_wsec_imr_enable(rtwdev); + rtw89_mpdu_trx_imr_enable(rtwdev); + rtw89_sta_sch_imr_enable(rtwdev); + rtw89_txpktctl_imr_enable(rtwdev); + rtw89_wde_imr_enable(rtwdev); + rtw89_ple_imr_enable(rtwdev); + rtw89_pktin_imr_enable(rtwdev); + rtw89_dispatcher_imr_enable(rtwdev); + rtw89_cpuio_imr_enable(rtwdev); + rtw89_bbrpt_imr_enable(rtwdev); } else if (sel == RTW89_CMAC_SEL) { reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); rtw89_write32_clr(rtwdev, reg, diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index a0c60528b578..9e445bf9b9d4 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -527,13 +527,361 @@ #define B_AX_HOST_ADDR_INFO_8B_SEL BIT(0) #define R_AX_HOST_DISPATCHER_ERR_IMR 0x8850 +#define B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN BIT(31) +#define B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN BIT(30) +#define B_AX_HDT_CHKSUM_FSM_ERR_INT_EN BIT(29) +#define B_AX_HDT_SHIFT_DMA_CFG_ERR_INT_EN BIT(28) +#define B_AX_HDT_DMA_PROCESS_ERR_INT_EN BIT(27) +#define B_AX_HDT_TOTAL_LEN_ERR_INT_EN BIT(26) +#define B_AX_HDT_SHIFT_EN_ERR_INT_EN BIT(25) +#define B_AX_HDT_RXAGG_CFG_ERR_INT_EN BIT(24) +#define B_AX_HDT_OUTPUT_ERR_INT_EN BIT(21) +#define B_AX_HDT_RES_ERR_INT_EN BIT(20) +#define B_AX_HDT_BURST_NUM_ERR_INT_EN BIT(19) +#define B_AX_HDT_NULLPKT_ERR_INT_EN BIT(18) +#define B_AX_HDT_FLOW_CTRL_ERR_INT_EN BIT(17) +#define B_AX_HDT_PLD_CMD_UNDERFLOW_INT_EN BIT(16) +#define B_AX_HDT_PLD_CMD_OVERLOW_INT_EN BIT(15) +#define B_AX_HDT_TX_WRITE_UNDERFLOW_INT_EN BIT(14) +#define B_AX_HDT_TX_WRITE_OVERFLOW_INT_EN BIT(13) +#define B_AX_HDT_TCP_CHK_ERR_INT_EN BIT(12) +#define B_AX_HDT_TXPKTSIZE_ERR_INT_EN BIT(11) +#define B_AX_HDT_PRE_COST_ERR_INT_EN BIT(10) +#define B_AX_HDT_WD_CHK_ERR_INT_EN BIT(9) +#define B_AX_HDT_CHANNEL_DMA_ERR_INT_EN BIT(8) #define B_AX_HDT_OFFSET_UNMATCH_INT_EN BIT(7) +#define B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN BIT(6) +#define B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN BIT(5) +#define B_AX_HDT_PERMU_UNDERFLOW_INT_EN BIT(4) +#define B_AX_HDT_PERMU_OVERFLOW_INT_EN BIT(3) #define B_AX_HDT_PKT_FAIL_DBG_INT_EN BIT(2) +#define B_AX_HDT_CHANNEL_ID_ERR_INT_EN BIT(1) +#define B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN BIT(0) +#define B_AX_HOST_DISP_IMR_CLR (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \ + B_AX_HDT_CHANNEL_ID_ERR_INT_EN | \ + B_AX_HDT_PKT_FAIL_DBG_INT_EN | \ + B_AX_HDT_PERMU_OVERFLOW_INT_EN | \ + B_AX_HDT_PERMU_UNDERFLOW_INT_EN | \ + B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \ + B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \ + B_AX_HDT_OFFSET_UNMATCH_INT_EN | \ + B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \ + B_AX_HDT_WD_CHK_ERR_INT_EN | \ + B_AX_HDT_PRE_COST_ERR_INT_EN | \ + B_AX_HDT_TXPKTSIZE_ERR_INT_EN | \ + B_AX_HDT_TCP_CHK_ERR_INT_EN | \ + B_AX_HDT_TX_WRITE_OVERFLOW_INT_EN | \ + B_AX_HDT_TX_WRITE_UNDERFLOW_INT_EN | \ + B_AX_HDT_PLD_CMD_OVERLOW_INT_EN | \ + B_AX_HDT_PLD_CMD_UNDERFLOW_INT_EN | \ + B_AX_HDT_FLOW_CTRL_ERR_INT_EN | \ + B_AX_HDT_NULLPKT_ERR_INT_EN | \ + B_AX_HDT_BURST_NUM_ERR_INT_EN | \ + B_AX_HDT_RXAGG_CFG_ERR_INT_EN | \ + B_AX_HDT_SHIFT_EN_ERR_INT_EN | \ + B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \ + B_AX_HDT_DMA_PROCESS_ERR_INT_EN | \ + B_AX_HDT_SHIFT_DMA_CFG_ERR_INT_EN | \ + B_AX_HDT_CHKSUM_FSM_ERR_INT_EN | \ + B_AX_HDT_RX_WRITE_OVERFLOW_INT_EN | \ + B_AX_HDT_RX_WRITE_UNDERFLOW_INT_EN) +#define B_AX_HOST_DISP_IMR_SET (B_AX_HDT_CHANNEL_DIFF_ERR_INT_EN | \ + B_AX_HDT_PAYLOAD_OVERFLOW_INT_EN | \ + B_AX_HDT_PAYLOAD_UNDERFLOW_INT_EN | \ + B_AX_HDT_CHANNEL_DMA_ERR_INT_EN | \ + B_AX_HDT_TOTAL_LEN_ERR_INT_EN | \ + B_AX_HDT_DMA_PROCESS_ERR_INT_EN) + +#define B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN BIT(31) +#define B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN BIT(30) +#define B_AX_HR_CHKSUM_FSM_ERR_INT_EN BIT(29) +#define B_AX_HR_SHIFT_DMA_CFG_ERR_INT_EN BIT(28) +#define B_AX_HR_DMA_PROCESS_ERR_INT_EN BIT(27) +#define B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN BIT(26) +#define B_AX_HR_SHIFT_EN_ERR_INT_EN BIT(25) +#define B_AX_HR_AGG_CFG_ERR_INT_EN BIT(24) +#define B_AX_HR_DMA_RD_CNT_DEQ_ERR_INT_EN BIT(23) +#define B_AX_HR_PLD_LEN_ZERO_ERR_INT_EN BIT(22) +#define B_AX_HT_ILL_CH_ERR_INT_EN BIT(20) +#define B_AX_HT_ADDR_INFO_LEN_ERR_INT_EN BIT(18) +#define B_AX_HT_WD_LEN_OVER_ERR_INT_EN BIT(17) +#define B_AX_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN BIT(16) +#define B_AX_HT_PLD_CMD_OVERFLOW_ERR_INT_EN BIT(15) +#define B_AX_HT_WRFF_UNDERFLOW_ERR_INT_EN BIT(14) +#define B_AX_HT_WRFF_OVERFLOW_ERR_INT_EN BIT(13) +#define B_AX_HT_CHKSUM_FSM_ERR_INT_EN BIT(12) +#define B_AX_HT_TXPKTSIZE_ERR_INT_EN BIT(11) +#define B_AX_HT_PRE_SUB_ERR_INT_EN BIT(10) +#define B_AX_HT_WD_CHKSUM_ERR_INT_EN BIT(9) +#define B_AX_HT_CHANNEL_DMA_ERR_INT_EN BIT(8) +#define B_AX_HT_OFFSET_UNMATCH_ERR_INT_EN BIT(7) +#define B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN BIT(6) +#define B_AX_HT_PAYLOAD_OVER_ERR_INT_EN BIT(5) +#define B_AX_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN BIT(4) +#define B_AX_HT_PERMU_FF_OVERFLOW_ERR_INT_EN BIT(3) +#define B_AX_HT_PKT_FAIL_ERR_INT_EN BIT(2) +#define B_AX_HT_CH_ID_ERR_INT_EN BIT(1) +#define B_AX_HT_EP_CH_DIFF_ERR_INT_EN BIT(0) +#define B_AX_HOST_DISP_IMR_CLR_V1 (B_AX_HT_EP_CH_DIFF_ERR_INT_EN | \ + B_AX_HT_CH_ID_ERR_INT_EN | \ + B_AX_HT_PKT_FAIL_ERR_INT_EN | \ + B_AX_HT_PERMU_FF_OVERFLOW_ERR_INT_EN | \ + B_AX_HT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \ + B_AX_HT_PAYLOAD_OVER_ERR_INT_EN | \ + B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN | \ + B_AX_HT_OFFSET_UNMATCH_ERR_INT_EN | \ + B_AX_HT_CHANNEL_DMA_ERR_INT_EN | \ + B_AX_HT_WD_CHKSUM_ERR_INT_EN | \ + B_AX_HT_PRE_SUB_ERR_INT_EN | \ + B_AX_HT_TXPKTSIZE_ERR_INT_EN | \ + B_AX_HT_CHKSUM_FSM_ERR_INT_EN | \ + B_AX_HT_WRFF_OVERFLOW_ERR_INT_EN | \ + B_AX_HT_WRFF_UNDERFLOW_ERR_INT_EN | \ + B_AX_HT_PLD_CMD_OVERFLOW_ERR_INT_EN | \ + B_AX_HT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \ + B_AX_HT_WD_LEN_OVER_ERR_INT_EN | \ + B_AX_HT_ADDR_INFO_LEN_ERR_INT_EN | \ + B_AX_HT_ILL_CH_ERR_INT_EN | \ + B_AX_HR_PLD_LEN_ZERO_ERR_INT_EN | \ + B_AX_HR_DMA_RD_CNT_DEQ_ERR_INT_EN | \ + B_AX_HR_AGG_CFG_ERR_INT_EN | \ + B_AX_HR_SHIFT_EN_ERR_INT_EN | \ + B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN | \ + B_AX_HR_DMA_PROCESS_ERR_INT_EN | \ + B_AX_HR_SHIFT_DMA_CFG_ERR_INT_EN | \ + B_AX_HR_CHKSUM_FSM_ERR_INT_EN | \ + B_AX_HR_WRFF_OVERFLOW_ERR_INT_EN | \ + B_AX_HR_WRFF_UNDERFLOW_ERR_INT_EN) +#define B_AX_HOST_DISP_IMR_SET_V1 (B_AX_HT_PAYLOAD_OVER_ERR_INT_EN | \ + B_AX_HT_PAYLOAD_UNDER_ERR_INT_EN | \ + B_AX_HT_ILL_CH_ERR_INT_EN | \ + B_AX_HR_TOTAL_LEN_UNDER_ERR_INT_EN | \ + B_AX_HR_DMA_PROCESS_ERR_INT_EN) #define R_AX_CPU_DISPATCHER_ERR_IMR 0x8854 +#define B_AX_CPU_RX_WRITE_UNDERFLOW_INT_EN BIT(31) +#define B_AX_CPU_RX_WRITE_OVERFLOW_INT_EN BIT(30) +#define B_AX_CPU_CHKSUM_FSM_ERR_INT_EN BIT(29) +#define B_AX_CPU_SHIFT_DMA_CFG_ERR_INT_EN BIT(28) +#define B_AX_CPU_DMA_PROCESS_ERR_INT_EN BIT(27) +#define B_AX_CPU_TOTAL_LEN_ERR_INT_EN BIT(26) #define B_AX_CPU_SHIFT_EN_ERR_INT_EN BIT(25) +#define B_AX_CPU_RXAGG_CFG_ERR_INT_EN BIT(24) +#define B_AX_CPU_OUTPUT_ERR_INT_EN BIT(20) +#define B_AX_CPU_RESP_ERR_INT_EN BIT(19) +#define B_AX_CPU_BURST_NUM_ERR_INT_EN BIT(18) +#define B_AX_CPU_NULLPKT_ERR_INT_EN BIT(17) +#define B_AX_CPU_FLOW_CTRL_ERR_INT_EN BIT(16) +#define B_AX_CPU_F2P_SEQ_ERR_INT_EN BIT(15) +#define B_AX_CPU_F2P_QSEL_ERR_INT_EN BIT(14) +#define B_AX_CPU_PLD_CMD_UNDERFLOW_INT_EN BIT(13) +#define B_AX_CPU_PLD_CMD_OVERLOW_INT_EN BIT(12) +#define B_AX_CPU_PRE_COST_ERR_INT_EN BIT(11) +#define B_AX_CPU_WD_CHK_ERR_INT_EN BIT(10) +#define B_AX_CPU_CHANNEL_DMA_ERR_INT_EN BIT(9) +#define B_AX_CPU_OFFSET_UNMATCH_INT_EN BIT(8) +#define B_AX_CPU_PAYLOAD_CHKSUM_ERR_INT_EN BIT(7) +#define B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN BIT(6) +#define B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN BIT(5) +#define B_AX_CPU_PERMU_UNDERFLOW_INT_EN BIT(4) +#define B_AX_CPU_PERMU_OVERFLOW_INT_EN BIT(3) +#define B_AX_CPU_CHANNEL_ID_ERR_INT_EN BIT(2) +#define B_AX_CPU_PKT_FAIL_DBG_INT_EN BIT(1) +#define B_AX_CPU_CHANNEL_DIFF_ERR_INT_EN BIT(0) +#define B_AX_CPU_DISP_IMR_CLR (B_AX_CPU_CHANNEL_DIFF_ERR_INT_EN | \ + B_AX_CPU_PKT_FAIL_DBG_INT_EN | \ + B_AX_CPU_CHANNEL_ID_ERR_INT_EN | \ + B_AX_CPU_PERMU_OVERFLOW_INT_EN | \ + B_AX_CPU_PERMU_UNDERFLOW_INT_EN | \ + B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN | \ + B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN | \ + B_AX_CPU_PAYLOAD_CHKSUM_ERR_INT_EN | \ + B_AX_CPU_OFFSET_UNMATCH_INT_EN | \ + B_AX_CPU_CHANNEL_DMA_ERR_INT_EN | \ + B_AX_CPU_WD_CHK_ERR_INT_EN | \ + B_AX_CPU_PRE_COST_ERR_INT_EN | \ + B_AX_CPU_PLD_CMD_OVERLOW_INT_EN | \ + B_AX_CPU_PLD_CMD_UNDERFLOW_INT_EN | \ + B_AX_CPU_F2P_QSEL_ERR_INT_EN | \ + B_AX_CPU_F2P_SEQ_ERR_INT_EN | \ + B_AX_CPU_FLOW_CTRL_ERR_INT_EN | \ + B_AX_CPU_NULLPKT_ERR_INT_EN | \ + B_AX_CPU_BURST_NUM_ERR_INT_EN | \ + B_AX_CPU_RXAGG_CFG_ERR_INT_EN | \ + B_AX_CPU_SHIFT_EN_ERR_INT_EN | \ + B_AX_CPU_TOTAL_LEN_ERR_INT_EN | \ + B_AX_CPU_DMA_PROCESS_ERR_INT_EN | \ + B_AX_CPU_SHIFT_DMA_CFG_ERR_INT_EN | \ + B_AX_CPU_CHKSUM_FSM_ERR_INT_EN | \ + B_AX_CPU_RX_WRITE_OVERFLOW_INT_EN | \ + B_AX_CPU_RX_WRITE_UNDERFLOW_INT_EN) +#define B_AX_CPU_DISP_IMR_SET (B_AX_CPU_PKT_FAIL_DBG_INT_EN | \ + B_AX_CPU_PAYLOAD_OVERFLOW_INT_EN | \ + B_AX_CPU_PAYLOAD_UNDERFLOW_INT_EN | \ + B_AX_CPU_TOTAL_LEN_ERR_INT_EN) + +#define B_AX_CR_PLD_LEN_ERR_INT_EN BIT(30) +#define B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN BIT(29) +#define B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN BIT(28) +#define B_AX_CR_SHIFT_DMA_CFG_ERR_INT_EN BIT(27) +#define B_AX_CR_DMA_PROCESS_ERR_INT_EN BIT(26) +#define B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN BIT(25) +#define B_AX_CR_SHIFT_EN_ERR_INT_EN BIT(24) +#define B_AX_REUSE_FIFO_B_UNDER_ERR_INT_EN BIT(22) +#define B_AX_REUSE_FIFO_B_OVER_ERR_INT_EN BIT(21) +#define B_AX_REUSE_FIFO_A_UNDER_ERR_INT_EN BIT(20) +#define B_AX_REUSE_FIFO_A_OVER_ERR_INT_EN BIT(19) +#define B_AX_CT_ADDR_INFO_LEN_MISS_ERR_INT_EN BIT(17) +#define B_AX_CT_WD_LEN_OVER_ERR_INT_EN BIT(16) +#define B_AX_CT_F2P_SEQ_ERR_INT_EN BIT(15) +#define B_AX_CT_F2P_QSEL_ERR_INT_EN BIT(14) +#define B_AX_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN BIT(13) +#define B_AX_CT_PLD_CMD_OVERFLOW_ERR_INT_EN BIT(12) +#define B_AX_CT_PRE_SUB_ERR_INT_EN BIT(11) +#define B_AX_CT_WD_CHKSUM_ERR_INT_EN BIT(10) +#define B_AX_CT_CHANNEL_DMA_ERR_INT_EN BIT(9) +#define B_AX_CT_OFFSET_UNMATCH_ERR_INT_EN BIT(8) +#define B_AX_CT_PAYLOAD_CHKSUM_ERR_INT_EN BIT(7) +#define B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN BIT(6) +#define B_AX_CT_PAYLOAD_OVER_ERR_INT_EN BIT(5) +#define B_AX_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN BIT(4) +#define B_AX_CT_PERMU_FF_OVERFLOW_ERR_INT_EN BIT(3) +#define B_AX_CT_CH_ID_ERR_INT_EN BIT(2) +#define B_AX_CT_EP_CH_DIFF_ERR_INT_EN BIT(0) +#define B_AX_CPU_DISP_IMR_CLR_V1 (B_AX_CT_EP_CH_DIFF_ERR_INT_EN | \ + B_AX_CT_CH_ID_ERR_INT_EN | \ + B_AX_CT_PERMU_FF_OVERFLOW_ERR_INT_EN | \ + B_AX_CT_PERMU_FF_UNDERFLOW_ERR_INT_EN | \ + B_AX_CT_PAYLOAD_OVER_ERR_INT_EN | \ + B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN | \ + B_AX_CT_PAYLOAD_CHKSUM_ERR_INT_EN | \ + B_AX_CT_OFFSET_UNMATCH_ERR_INT_EN | \ + B_AX_CT_CHANNEL_DMA_ERR_INT_EN | \ + B_AX_CT_WD_CHKSUM_ERR_INT_EN | \ + B_AX_CT_PRE_SUB_ERR_INT_EN | \ + B_AX_CT_PLD_CMD_OVERFLOW_ERR_INT_EN | \ + B_AX_CT_PLD_CMD_UNDERFLOW_ERR_INT_EN | \ + B_AX_CT_F2P_QSEL_ERR_INT_EN | \ + B_AX_CT_F2P_SEQ_ERR_INT_EN | \ + B_AX_CT_WD_LEN_OVER_ERR_INT_EN | \ + B_AX_CT_ADDR_INFO_LEN_MISS_ERR_INT_EN | \ + B_AX_REUSE_FIFO_A_OVER_ERR_INT_EN | \ + B_AX_REUSE_FIFO_A_UNDER_ERR_INT_EN | \ + B_AX_REUSE_FIFO_B_OVER_ERR_INT_EN | \ + B_AX_REUSE_FIFO_B_UNDER_ERR_INT_EN | \ + B_AX_CR_SHIFT_EN_ERR_INT_EN | \ + B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN | \ + B_AX_CR_DMA_PROCESS_ERR_INT_EN | \ + B_AX_CR_SHIFT_DMA_CFG_ERR_INT_EN | \ + B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN | \ + B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN | \ + B_AX_CR_PLD_LEN_ERR_INT_EN) +#define B_AX_CPU_DISP_IMR_SET_V1 (B_AX_CT_PAYLOAD_OVER_ERR_INT_EN | \ + B_AX_CT_PAYLOAD_UNDER_ERR_INT_EN | \ + B_AX_CR_TOTAL_LEN_UNDER_ERR_INT_EN | \ + B_AX_CR_DMA_PROCESS_ERR_INT_EN | \ + B_AX_CR_WRFF_OVERFLOW_ERR_INT_EN | \ + B_AX_CR_WRFF_UNDERFLOW_ERR_INT_EN) #define R_AX_OTHER_DISPATCHER_ERR_IMR 0x8858 +#define B_AX_OTHER_STF_WROQT_UNDERFLOW_INT_EN BIT(29) +#define B_AX_OTHER_STF_WROQT_OVERFLOW_INT_EN BIT(28) +#define B_AX_OTHER_STF_WRFF_UNDERFLOW_INT_EN BIT(27) +#define B_AX_OTHER_STF_WRFF_OVERFLOW_INT_EN BIT(26) +#define B_AX_OTHER_STF_CMD_UNDERFLOW_INT_EN BIT(25) +#define B_AX_OTHER_STF_CMD_OVERFLOW_INT_EN BIT(24) +#define B_AX_HOST_ADDR_INFO_LEN_ZERO_ERR_INT_EN BIT(17) +#define B_AX_CPU_ADDR_INFO_LEN_ZERO_ERR_INT_EN BIT(16) +#define B_AX_PLE_OUTPUT_ERR_INT_EN BIT(12) +#define B_AX_PLE_RESP_ERR_INT_EN BIT(11) +#define B_AX_PLE_BURST_NUM_ERR_INT_EN BIT(10) +#define B_AX_PLE_NULL_PKT_ERR_INT_EN BIT(9) +#define B_AX_PLE_FLOW_CTRL_ERR_INT_EN BIT(8) +#define B_AX_WDE_OUTPUT_ERR_INT_EN BIT(4) +#define B_AX_WDE_RESP_ERR_INT_EN BIT(3) +#define B_AX_WDE_BURST_NUM_ERR_INT_EN BIT(2) +#define B_AX_WDE_NULL_PKT_ERR_INT_EN BIT(1) +#define B_AX_WDE_FLOW_CTRL_ERR_INT_EN BIT(0) +#define B_AX_OTHER_DISP_IMR_CLR (B_AX_OTHER_STF_WROQT_UNDERFLOW_INT_EN | \ + B_AX_OTHER_STF_WROQT_OVERFLOW_INT_EN | \ + B_AX_OTHER_STF_WRFF_UNDERFLOW_INT_EN | \ + B_AX_OTHER_STF_WRFF_OVERFLOW_INT_EN | \ + B_AX_OTHER_STF_CMD_UNDERFLOW_INT_EN | \ + B_AX_OTHER_STF_CMD_OVERFLOW_INT_EN | \ + B_AX_HOST_ADDR_INFO_LEN_ZERO_ERR_INT_EN | \ + B_AX_CPU_ADDR_INFO_LEN_ZERO_ERR_INT_EN | \ + B_AX_PLE_OUTPUT_ERR_INT_EN | \ + B_AX_PLE_RESP_ERR_INT_EN | \ + B_AX_PLE_BURST_NUM_ERR_INT_EN | \ + B_AX_PLE_NULL_PKT_ERR_INT_EN | \ + B_AX_PLE_FLOW_CTRL_ERR_INT_EN | \ + B_AX_WDE_OUTPUT_ERR_INT_EN | \ + B_AX_WDE_RESP_ERR_INT_EN | \ + B_AX_WDE_BURST_NUM_ERR_INT_EN | \ + B_AX_WDE_NULL_PKT_ERR_INT_EN | \ + B_AX_WDE_FLOW_CTRL_ERR_INT_EN) + +#define B_AX_REUSE_SIZE_ERR_INT_EN BIT(31) +#define B_AX_REUSE_EN_ERR_INT_EN BIT(30) +#define B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN BIT(29) +#define B_AX_STF_OQT_OVERFLOW_ERR_INT_EN BIT(28) +#define B_AX_STF_WRFF_UNDERFLOW_ERR_INT_EN BIT(27) +#define B_AX_STF_WRFF_OVERFLOW_ERR_INT_EN BIT(26) +#define B_AX_STF_CMD_UNDERFLOW_ERR_INT_EN BIT(25) +#define B_AX_STF_CMD_OVERFLOW_ERR_INT_EN BIT(24) +#define B_AX_REUSE_SIZE_ZERO_ERR_INT_EN BIT(23) +#define B_AX_REUSE_PKT_CNT_ERR_INT_EN BIT(22) +#define B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN BIT(21) +#define B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN BIT(20) +#define B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN BIT(19) +#define B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN BIT(18) +#define B_AX_CDT_ADDR_INFO_LEN_ERR_INT_EN BIT(17) +#define B_AX_HDT_ADDR_INFO_LEN_ERR_INT_EN BIT(16) +#define B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN BIT(15) +#define B_AX_CDR_RX_TIMEOUT_ERR_INT_EN BIT(14) +#define B_AX_PLE_RESPOSE_ERR_INT_EN BIT(11) +#define B_AX_HDR_DMA_TIMEOUT_ERR_INT_EN BIT(7) +#define B_AX_HDR_RX_TIMEOUT_ERR_INT_EN BIT(6) +#define B_AX_WDE_RESPONSE_ERR_INT_EN BIT(3) +#define B_AX_OTHER_DISP_IMR_CLR_V1 (B_AX_CT_EP_CH_DIFF_ERR_INT_EN | \ + B_AX_WDE_FLOW_CTRL_ERR_INT_EN | \ + B_AX_WDE_NULL_PKT_ERR_INT_EN | \ + B_AX_WDE_BURST_NUM_ERR_INT_EN | \ + B_AX_WDE_RESPONSE_ERR_INT_EN | \ + B_AX_WDE_OUTPUT_ERR_INT_EN | \ + B_AX_HDR_RX_TIMEOUT_ERR_INT_EN | \ + B_AX_HDR_DMA_TIMEOUT_ERR_INT_EN | \ + B_AX_PLE_FLOW_CTRL_ERR_INT_EN | \ + B_AX_PLE_NULL_PKT_ERR_INT_EN | \ + B_AX_PLE_BURST_NUM_ERR_INT_EN | \ + B_AX_PLE_RESPOSE_ERR_INT_EN | \ + B_AX_PLE_OUTPUT_ERR_INT_EN | \ + B_AX_CDR_RX_TIMEOUT_ERR_INT_EN | \ + B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN | \ + B_AX_HDT_ADDR_INFO_LEN_ERR_INT_EN | \ + B_AX_CDT_ADDR_INFO_LEN_ERR_INT_EN | \ + B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN | \ + B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN | \ + B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN | \ + B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN | \ + B_AX_REUSE_PKT_CNT_ERR_INT_EN | \ + B_AX_REUSE_SIZE_ZERO_ERR_INT_EN | \ + B_AX_STF_CMD_OVERFLOW_ERR_INT_EN | \ + B_AX_STF_CMD_UNDERFLOW_ERR_INT_EN | \ + B_AX_STF_WRFF_OVERFLOW_ERR_INT_EN | \ + B_AX_STF_WRFF_UNDERFLOW_ERR_INT_EN | \ + B_AX_STF_OQT_OVERFLOW_ERR_INT_EN | \ + B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN | \ + B_AX_REUSE_EN_ERR_INT_EN | \ + B_AX_REUSE_SIZE_ERR_INT_EN) +#define B_AX_OTHER_DISP_IMR_SET_V1 (B_AX_CDR_RX_TIMEOUT_ERR_INT_EN | \ + B_AX_CDR_DMA_TIMEOUT_ERR_INT_EN | \ + B_AX_HDT_HCI_TIMEOUT_ERR_INT_EN | \ + B_AX_HDT_PTR_TIMEOUT_ERR_INT_EN | \ + B_AX_CDT_HCI_TIMEOUT_ERR_INT_EN | \ + B_AX_CDT_PTR_TIMEOUT_ERR_INT_EN | \ + B_AX_STF_OQT_OVERFLOW_ERR_INT_EN | \ + B_AX_STF_OQT_UNDERFLOW_ERR_INT_EN) #define R_AX_HCI_FC_CTRL 0x8A00 #define B_AX_HCI_FC_CH12_FULL_COND_MASK GENMASK(11, 10) @@ -612,9 +960,168 @@ #define B_AX_WDE_START_BOUND_MASK GENMASK(13, 8) #define B_AX_WDE_PAGE_SEL_MASK GENMASK(1, 0) #define B_AX_WDE_FREE_PAGE_NUM_MASK GENMASK(28, 16) + +#define R_AX_WDE_ERRFLAG_MSG 0x8C30 +#define B_AX_WDE_ERR_FLAG_MSG_MASK GENMASK(31, 0) + #define R_AX_WDE_ERR_FLAG_CFG 0x8C34 + #define R_AX_WDE_ERR_IMR 0x8C38 +#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27) +#define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26) +#define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25) +#define B_AX_WDE_DATCHN_ARBT_ERR_INT_EN BIT(24) +#define B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN BIT(19) +#define B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN BIT(18) +#define B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN BIT(17) +#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16) +#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15) +#define B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN BIT(14) +#define B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN BIT(13) +#define B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN BIT(12) +#define B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN BIT(7) +#define B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN BIT(6) +#define B_AX_WDE_GETNPG_STRPG_ERR_INT_EN BIT(5) +#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN BIT(4) +#define B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN BIT(3) +#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN BIT(2) +#define B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN BIT(1) +#define B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN BIT(0) +#define B_AX_WDE_IMR_CLR (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \ + B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \ + B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \ + B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \ + B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \ + B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN) +#define B_AX_WDE_IMR_SET (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_SIZE0_INT_EN | \ + B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \ + B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \ + B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \ + B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \ + B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \ + B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN) + +#define B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29) +#define B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28) +#define B_AX_WDE_DATCHN_RRDY_ERR_INT_EN BIT(27) +#define B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN BIT(26) +#define B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN BIT(25) +#define B_AX_WDE_DATCHN_ARBT_ERR_INT_EN BIT(24) +#define B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN BIT(19) +#define B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN BIT(18) +#define B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN BIT(17) +#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16) +#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15) +#define B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN BIT(14) +#define B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 BIT(9) +#define B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 BIT(8) +#define B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 BIT(7) +#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 BIT(6) +#define B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 BIT(5) +#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 BIT(4) +#define B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 BIT(3) +#define B_AX_WDE_BUFREQ_SIZELMT_INT_EN BIT(2) +#define B_AX_WDE_BUFREQ_SIZE0_INT_EN BIT(1) +#define B_AX_WDE_IMR_CLR_V1 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_SIZE0_INT_EN | \ + B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \ + B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \ + B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \ + B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \ + B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \ + B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN) +#define B_AX_WDE_IMR_SET_V1 (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_WDE_BUFREQ_SIZE0_INT_EN | \ + B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \ + B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \ + B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \ + B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \ + B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \ + B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_WDE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_WDE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_WDE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \ + B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \ + B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \ + B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN) + #define R_AX_WDE_ERR_ISR 0x8C3C +#define B_AX_WDE_DATCHN_RRDY_ERR BIT(27) +#define B_AX_WDE_DATCHN_FRZTO_ERR BIT(26) +#define B_AX_WDE_DATCHN_NULLPG_ERR BIT(25) +#define B_AX_WDE_DATCHN_ARBT_ERR BIT(24) +#define B_AX_WDE_QUEMGN_FRZTO_ERR BIT(19) +#define B_AX_WDE_NXTPKTLL_AD_ERR BIT(18) +#define B_AX_WDE_PREPKTLLT_AD_ERR BIT(17) +#define B_AX_WDE_ENQ_PKTCNT_NVAL_ERR BIT(16) +#define B_AX_WDE_ENQ_PKTCNT_OVRF_ERR BIT(15) +#define B_AX_WDE_QUE_SRCQUEID_ERR BIT(14) +#define B_AX_WDE_QUE_DSTQUEID_ERR BIT(13) +#define B_AX_WDE_QUE_CMDTYPE_ERR BIT(12) +#define B_AX_WDE_BUFMGN_FRZTO_ERR BIT(7) +#define B_AX_WDE_GETNPG_PGOFST_ERR BIT(6) +#define B_AX_WDE_GETNPG_STRPG_ERR BIT(5) +#define B_AX_WDE_BUFREQ_SRCHTAILPG_ERR BIT(4) +#define B_AX_WDE_BUFRTN_SIZE_ERR BIT(3) +#define B_AX_WDE_BUFRTN_INVLD_PKTID_ERR BIT(2) +#define B_AX_WDE_BUFREQ_UNAVAL_ERR BIT(1) +#define B_AX_WDE_BUFREQ_QTAID_ERR BIT(0) #define B_AX_WDE_MAX_SIZE_MASK GENMASK(27, 16) #define B_AX_WDE_MIN_SIZE_MASK GENMASK(11, 0) @@ -649,7 +1156,123 @@ #define R_AX_PLE_ERR_FLAG_CFG 0x9034 #define R_AX_PLE_ERR_IMR 0x9038 +#define B_AX_PLE_DATCHN_RRDY_ERR_INT_EN BIT(27) +#define B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN BIT(26) +#define B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN BIT(25) +#define B_AX_PLE_DATCHN_ARBT_ERR_INT_EN BIT(24) +#define B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN BIT(19) +#define B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN BIT(18) +#define B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN BIT(17) +#define B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN BIT(16) +#define B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN BIT(15) +#define B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN BIT(14) +#define B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN BIT(13) +#define B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN BIT(12) +#define B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN BIT(7) +#define B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN BIT(6) #define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN BIT(5) +#define B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN BIT(4) +#define B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN BIT(3) +#define B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN BIT(2) +#define B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN BIT(1) +#define B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN BIT(0) +#define B_AX_PLE_IMR_CLR (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN | \ + B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \ + B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN | \ + B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \ + B_AX_PLE_GETNPG_STRPG_ERR_INT_EN | \ + B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN | \ + B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN | \ + B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN) +#define B_AX_PLE_IMR_SET (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN | \ + B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \ + B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN | \ + B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \ + B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN | \ + B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN | \ + B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN) + +#define B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29) +#define B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28) +#define B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 BIT(9) +#define B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 BIT(8) +#define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 BIT(7) +#define B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 BIT(6) +#define B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 BIT(5) +#define B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 BIT(4) +#define B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 BIT(3) +#define B_AX_PLE_BUFREQ_SIZELMT_INT_EN BIT(2) +#define B_AX_PLE_BUFREQ_SIZE0_INT_EN BIT(1) +#define B_AX_PLE_IMR_CLR_V1 (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_PLE_BUFREQ_SIZE0_INT_EN | \ + B_AX_PLE_BUFREQ_SIZELMT_INT_EN | \ + B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \ + B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \ + B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 | \ + B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \ + B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 | \ + B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 | \ + B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \ + B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN | \ + B_AX_PLE_DATCHN_RRDY_ERR_INT_EN | \ + B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN | \ + B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN) +#define B_AX_PLE_IMR_SET_V1 (B_AX_PLE_BUFREQ_QTAID_ERR_INT_EN | \ + B_AX_PLE_BUFREQ_SIZE0_INT_EN | \ + B_AX_PLE_BUFREQ_SIZELMT_INT_EN | \ + B_AX_PLE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \ + B_AX_PLE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \ + B_AX_PLE_BUFRTN_SIZE_ERR_INT_EN_V1 | \ + B_AX_PLE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \ + B_AX_PLE_GETNPG_STRPG_ERR_INT_EN_V1 | \ + B_AX_PLE_GETNPG_PGOFST_ERR_INT_EN_V1 | \ + B_AX_PLE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \ + B_AX_PLE_QUE_CMDTYPE_ERR_INT_EN | \ + B_AX_PLE_QUE_DSTQUEID_ERR_INT_EN | \ + B_AX_PLE_QUE_SRCQUEID_ERR_INT_EN | \ + B_AX_PLE_ENQ_PKTCNT_OVRF_ERR_INT_EN | \ + B_AX_PLE_ENQ_PKTCNT_NVAL_ERR_INT_EN | \ + B_AX_PLE_PREPKTLLT_AD_ERR_INT_EN | \ + B_AX_PLE_NXTPKTLL_AD_ERR_INT_EN | \ + B_AX_PLE_QUEMGN_FRZTO_ERR_INT_EN | \ + B_AX_PLE_DATCHN_ARBT_ERR_INT_EN | \ + B_AX_PLE_DATCHN_NULLPG_ERR_INT_EN | \ + B_AX_PLE_DATCHN_FRZTO_ERR_INT_EN | \ + B_AX_PLE_DATCHN_RRDY_ERR_INT_EN | \ + B_AX_PLE_DATCHN_ADRERR_ERR_INT_EN | \ + B_AX_PLE_DATCHN_CAMREQ_ERR_INT_EN) #define R_AX_PLE_ERR_FLAG_ISR 0x903C #define B_AX_PLE_MAX_SIZE_MASK GENMASK(27, 16) @@ -704,12 +1327,97 @@ #define B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN BIT(2) #define B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN BIT(1) #define B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN BIT(0) +#define B_AX_WDRLS_IMR_EN_CLR (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \ + B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN | \ + B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \ + B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \ + B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \ + B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN) +#define B_AX_WDRLS_IMR_SET (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \ + B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \ + B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \ + B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \ + B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN) +#define B_AX_WDRLS_IMR_SET_V1 (B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN | \ + B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN | \ + B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN | \ + B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN | \ + B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN | \ + B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN | \ + B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN) + #define R_AX_WDRLS_ERR_ISR 0x9434 +#define R_AX_BBRPT_COM_ERR_IMR 0x9608 +#define B_AX_BBRPT_COM_HANG_EN BIT(1) +#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN BIT(0) + #define R_AX_BBRPT_COM_ERR_IMR_ISR 0x960C +#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR BIT(16) +#define B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN BIT(0) + +#define R_AX_BBRPT_CHINFO_ERR_IMR 0x9628 +#define B_AX_BBPRT_CHIF_TO_ERR_INT_EN BIT(7) +#define B_AX_BBPRT_CHIF_NULL_ERR_INT_EN BIT(6) +#define B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN BIT(5) +#define B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN BIT(4) +#define B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN BIT(3) +#define B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN BIT(2) +#define B_AX_BBPRT_CHIF_OVF_ERR_INT_EN BIT(1) +#define B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN BIT(0) +#define R_AX_BBRPT_CHINFO_IMR_SET_V1 (B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_OVF_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_NULL_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_TO_ERR_INT_EN) + #define R_AX_BBRPT_CHINFO_ERR_IMR_ISR 0x962C +#define B_AX_BBPRT_CHIF_TO_ERR BIT(23) +#define B_AX_BBPRT_CHIF_NULL_ERR BIT(22) +#define B_AX_BBPRT_CHIF_LEFT2_ERR BIT(21) +#define B_AX_BBPRT_CHIF_LEFT1_ERR BIT(20) +#define B_AX_BBPRT_CHIF_HDRL_ERR BIT(19) +#define B_AX_BBPRT_CHIF_BOVF_ERR BIT(18) +#define B_AX_BBPRT_CHIF_OVF_ERR BIT(17) +#define B_AX_BBPRT_CHIF_BB_TO_ERR BIT(16) +#define B_AX_BBPRT_CHIF_TO_ERR_INT_EN BIT(7) +#define B_AX_BBPRT_CHIF_NULL_ERR_INT_EN BIT(6) +#define B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN BIT(5) +#define B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN BIT(4) +#define B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN BIT(3) +#define B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN BIT(2) +#define B_AX_BBPRT_CHIF_OVF_ERR_INT_EN BIT(1) +#define B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN BIT(0) +#define B_AX_BBRPT_CHINFO_IMR_CLR (B_AX_BBPRT_CHIF_BB_TO_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_OVF_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_BOVF_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_HDRL_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_LEFT1_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_LEFT2_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_NULL_ERR_INT_EN | \ + B_AX_BBPRT_CHIF_TO_ERR_INT_EN) + +#define R_AX_BBRPT_DFS_ERR_IMR 0x9638 +#define B_AX_BBRPT_DFS_TO_ERR_INT_EN BIT(0) + #define R_AX_BBRPT_DFS_ERR_IMR_ISR 0x963C +#define B_AX_BBRPT_DFS_TO_ERR BIT(16) +#define B_AX_BBRPT_DFS_TO_ERR_INT_EN BIT(0) + #define R_AX_LA_ERRFLAG 0x966C +#define B_AX_LA_ISR_DATA_LOSS_ERR BIT(16) +#define B_AX_LA_IMR_DATA_LOSS_ERR BIT(0) #define R_AX_WD_BUF_REQ 0x9800 #define R_AX_PL_BUF_REQ 0x9820 @@ -745,18 +1453,51 @@ #define R_AX_PL_CPUQ_OP_STATUS 0x983C #define B_AX_WD_CPUQ_OP_STAT_DONE BIT(31) #define B_AX_WD_CPUQ_OP_PKTID_MASK GENMASK(11, 0) + #define R_AX_CPUIO_ERR_IMR 0x9840 +#define B_AX_PLEQUE_OP_ERR_INT_EN BIT(12) +#define B_AX_PLEBUF_OP_ERR_INT_EN BIT(8) +#define B_AX_WDEQUE_OP_ERR_INT_EN BIT(4) +#define B_AX_WDEBUF_OP_ERR_INT_EN BIT(0) +#define B_AX_CPUIO_IMR_CLR (B_AX_WDEBUF_OP_ERR_INT_EN | \ + B_AX_WDEQUE_OP_ERR_INT_EN | \ + B_AX_PLEBUF_OP_ERR_INT_EN | \ + B_AX_PLEQUE_OP_ERR_INT_EN) +#define B_AX_CPUIO_IMR_SET (B_AX_WDEBUF_OP_ERR_INT_EN | \ + B_AX_WDEQUE_OP_ERR_INT_EN | \ + B_AX_PLEBUF_OP_ERR_INT_EN | \ + B_AX_PLEQUE_OP_ERR_INT_EN) + #define R_AX_CPUIO_ERR_ISR 0x9844 #define R_AX_SEC_ERR_IMR_ISR 0x991C #define R_AX_PKTIN_SETTING 0x9A00 #define B_AX_WD_ADDR_INFO_LENGTH BIT(1) + #define R_AX_PKTIN_ERR_IMR 0x9A20 +#define B_AX_PKTIN_GETPKTID_ERR_INT_EN BIT(0) + #define R_AX_PKTIN_ERR_ISR 0x9A24 #define R_AX_MPDU_TX_ERR_ISR 0x9BF0 #define R_AX_MPDU_TX_ERR_IMR 0x9BF4 +#define B_AX_TX_KSRCH_ERR_EN BIT(9) +#define B_AX_TX_NW_TYPE_ERR_EN BIT(8) +#define B_AX_TX_LLC_PRE_ERR_EN BIT(7) +#define B_AX_TX_ETH_TYPE_ERR_EN BIT(6) +#define B_AX_TX_HDR3_SIZE_ERR_INT_EN BIT(5) +#define B_AX_TX_OFFSET_ERR_INT_EN BIT(4) +#define B_AX_TX_MPDU_SIZE_ZERO_INT_EN BIT(3) +#define B_AX_TX_NXT_ERRPKTID_INT_EN BIT(2) +#define B_AX_TX_GET_ERRPKTID_INT_EN BIT(1) +#define B_AX_MPDU_TX_IMR_SET_V1 (B_AX_TX_GET_ERRPKTID_INT_EN | \ + B_AX_TX_NXT_ERRPKTID_INT_EN | \ + B_AX_TX_MPDU_SIZE_ZERO_INT_EN | \ + B_AX_TX_HDR3_SIZE_ERR_INT_EN | \ + B_AX_TX_ETH_TYPE_ERR_EN | \ + B_AX_TX_NW_TYPE_ERR_EN | \ + B_AX_TX_KSRCH_ERR_EN) #define R_AX_MPDU_PROC 0x9C00 #define B_AX_A_ICV_ERR BIT(1) @@ -778,6 +1519,10 @@ #define R_AX_MPDU_RX_ERR_ISR 0x9CF0 #define R_AX_MPDU_RX_ERR_IMR 0x9CF4 +#define B_AX_RPT_ERR_INT_EN BIT(3) +#define B_AX_MHDRLEN_ERR_INT_EN BIT(1) +#define B_AX_GETPKTID_ERR_INT_EN BIT(0) +#define B_AX_MPDU_RX_IMR_SET_V1 B_AX_RPT_ERR_INT_EN #define R_AX_SEC_ENG_CTRL 0x9D00 #define B_AX_TX_PARTIAL_MODE BIT(11) @@ -798,15 +1543,23 @@ #define R_AX_SEC_CAM_ACCESS 0x9D10 #define R_AX_SEC_CAM_RDATA 0x9D14 #define R_AX_SEC_CAM_WDATA 0x9D18 + #define R_AX_SEC_DEBUG 0x9D1C +#define B_AX_IMR_ERROR BIT(3) + #define R_AX_SEC_DEBUG1 0x9D1C #define B_AX_TX_TIMEOUT_SEL_MASK GENMASK(31, 30) #define AX_TX_TO_VAL 0x2 + #define R_AX_SEC_TX_DEBUG 0x9D20 #define R_AX_SEC_RX_DEBUG 0x9D24 #define R_AX_SEC_TRX_PKT_CNT 0x9D28 #define R_AX_SEC_TRX_BLK_CNT 0x9D2C +#define R_AX_SEC_ERROR_FLAG_IMR 0x9D2C +#define B_AX_RX_HANG_IMR BIT(1) +#define B_AX_TX_HANG_IMR BIT(0) + #define R_AX_SS_CTRL 0x9E10 #define B_AX_SS_INIT_DONE_1 BIT(31) #define B_AX_SS_WARM_INIT_FLG BIT(29) @@ -838,9 +1591,47 @@ #define B_AX_SS_MACID127_96_PAUSE_MASK GENMASK(31, 0) #define R_AX_STA_SCHEDULER_ERR_IMR 0x9EF0 +#define B_AX_PLE_B_PKTID_ERR_INT_EN BIT(2) +#define B_AX_RPT_HANG_TIMEOUT_INT_EN BIT(1) +#define B_AX_SEARCH_HANG_TIMEOUT_INT_EN BIT(0) +#define B_AX_STA_SCHEDULER_IMR_SET (B_AX_SEARCH_HANG_TIMEOUT_INT_EN | \ + B_AX_RPT_HANG_TIMEOUT_INT_EN | \ + B_AX_PLE_B_PKTID_ERR_INT_EN) + #define R_AX_STA_SCHEDULER_ERR_ISR 0x9EF4 #define R_AX_TXPKTCTL_ERR_IMR_ISR 0x9F1C +#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR BIT(25) +#define B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR BIT(24) +#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR BIT(19) +#define B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR BIT(18) +#define B_AX_TXPKTCTL_USRCTL_NOINIT_ERR BIT(17) +#define B_AX_TXPKTCTL_USRCTL_REINIT_ERR BIT(16) +#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN BIT(9) +#define B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN BIT(8) +#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN BIT(3) +#define B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN BIT(2) +#define B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN BIT(1) +#define B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN BIT(0) +#define B_AX_TXPKTCTL_IMR_B0_CLR (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \ + B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \ + B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | \ + B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | \ + B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \ + B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN) +#define B_AX_TXPKTCTL_IMR_B1_CLR (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \ + B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \ + B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | \ + B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | \ + B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \ + B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN) +#define B_AX_TXPKTCTL_IMR_B0_SET (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \ + B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN) +#define B_AX_TXPKTCTL_IMR_B1_SET (B_AX_TXPKTCTL_USRCTL_REINIT_ERR_INT_EN | \ + B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN | \ + B_AX_TXPKTCTL_CMDPSR_CMDTYPE_ERR_INT_EN | \ + B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN) + #define R_AX_TXPKTCTL_ERR_IMR_ISR_B1 0x9F2C #define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN BIT(9) #define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN BIT(3) @@ -867,6 +1658,42 @@ #define PRELD_NEXT_WND 1 #define B_AX_B0_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0) +#define R_AX_TXPKTCTL_B0_ERRFLAG_IMR 0x9F78 +#define B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG BIT(21) +#define B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR BIT(20) +#define B_AX_B0_IMR_ERR_MPDUIF_DATAERR BIT(18) +#define B_AX_B0_IMR_ERR_MPDUINFO_RECFG BIT(16) +#define B_AX_B0_IMR_ERR_CMDPSR_TBLSZ BIT(11) +#define B_AX_B0_IMR_ERR_CMDPSR_FRZTO BIT(10) +#define B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE BIT(9) +#define B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR BIT(8) +#define B_AX_B0_IMR_ERR_USRCTL_RLSBMPLEN BIT(3) +#define B_AX_B0_IMR_ERR_USRCTL_RDNRLSCMD BIT(2) +#define B_AX_B0_IMR_ERR_USRCTL_NOINIT BIT(1) +#define B_AX_B0_IMR_ERR_USRCTL_REINIT BIT(0) +#define B_AX_TXPKTCTL_IMR_B0_CLR_V1 (B_AX_B0_IMR_ERR_USRCTL_REINIT | \ + B_AX_B0_IMR_ERR_USRCTL_NOINIT | \ + B_AX_B0_IMR_ERR_USRCTL_RDNRLSCMD | \ + B_AX_B0_IMR_ERR_USRCTL_RLSBMPLEN | \ + B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \ + B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \ + B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \ + B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \ + B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \ + B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \ + B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR | \ + B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG) +#define B_AX_TXPKTCTL_IMR_B0_SET_V1 (B_AX_B0_IMR_ERR_USRCTL_REINIT | \ + B_AX_B0_IMR_ERR_USRCTL_NOINIT | \ + B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \ + B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \ + B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \ + B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \ + B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \ + B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \ + B_AX_B0_IMR_ERR_PRELD_RLSPKTSZERR | \ + B_AX_B0_IMR_ERR_PRELD_ENTNUMCFG) + #define R_AX_TXPKTCTL_B1_PRELD_CFG0 0x9F88 #define B_AX_B1_PRELD_FEN BIT(31) #define B_AX_B1_PRELD_USEMAXSZ_MASK GENMASK(25, 16) @@ -878,6 +1705,42 @@ #define B_AX_B1_PRELD_NXT_TXENDWIN_MASK GENMASK(11, 8) #define B_AX_B1_PRELD_NXT_RSVMINSZ_MASK GENMASK(7, 0) +#define R_AX_TXPKTCTL_B1_ERRFLAG_IMR 0x9FB8 +#define B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG BIT(21) +#define B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR BIT(20) +#define B_AX_B1_IMR_ERR_MPDUIF_DATAERR BIT(18) +#define B_AX_B1_IMR_ERR_MPDUINFO_RECFG BIT(16) +#define B_AX_B1_IMR_ERR_CMDPSR_TBLSZ BIT(11) +#define B_AX_B1_IMR_ERR_CMDPSR_FRZTO BIT(10) +#define B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE BIT(9) +#define B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR BIT(8) +#define B_AX_B1_IMR_ERR_USRCTL_RLSBMPLEN BIT(3) +#define B_AX_B1_IMR_ERR_USRCTL_RDNRLSCMD BIT(2) +#define B_AX_B1_IMR_ERR_USRCTL_NOINIT BIT(1) +#define B_AX_B1_IMR_ERR_USRCTL_REINIT BIT(0) +#define B_AX_TXPKTCTL_IMR_B1_CLR_V1 (B_AX_B1_IMR_ERR_USRCTL_REINIT | \ + B_AX_B1_IMR_ERR_USRCTL_NOINIT | \ + B_AX_B1_IMR_ERR_USRCTL_RDNRLSCMD | \ + B_AX_B1_IMR_ERR_USRCTL_RLSBMPLEN | \ + B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR | \ + B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE | \ + B_AX_B1_IMR_ERR_CMDPSR_FRZTO | \ + B_AX_B1_IMR_ERR_CMDPSR_TBLSZ | \ + B_AX_B1_IMR_ERR_MPDUINFO_RECFG | \ + B_AX_B1_IMR_ERR_MPDUIF_DATAERR | \ + B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR | \ + B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG) +#define B_AX_TXPKTCTL_IMR_B1_SET_V1 (B_AX_B1_IMR_ERR_USRCTL_REINIT | \ + B_AX_B1_IMR_ERR_USRCTL_NOINIT | \ + B_AX_B1_IMR_ERR_CMDPSR_1STCMDERR | \ + B_AX_B1_IMR_ERR_CMDPSR_CMDTYPE | \ + B_AX_B1_IMR_ERR_CMDPSR_FRZTO | \ + B_AX_B1_IMR_ERR_CMDPSR_TBLSZ | \ + B_AX_B1_IMR_ERR_MPDUINFO_RECFG | \ + B_AX_B1_IMR_ERR_MPDUIF_DATAERR | \ + B_AX_B1_IMR_ERR_PRELD_RLSPKTSZERR | \ + B_AX_B1_IMR_ERR_PRELD_ENTNUMCFG) + #define R_AX_AFE_CTRL1 0x0024 #define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 501631b1f041..1af605748740 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -408,6 +408,34 @@ static const struct rtw89_reg_def rtw8852a_dcfo_comp = { R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK }; +static const struct rtw89_imr_info rtw8852a_imr_info = { + .wdrls_imr_set = B_AX_WDRLS_IMR_SET, + .wsec_imr_reg = R_AX_SEC_DEBUG, + .wsec_imr_set = B_AX_IMR_ERROR, + .mpdu_tx_imr_set = 0, + .mpdu_rx_imr_set = 0, + .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET, + .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_ERR_IMR_ISR, + .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR, + .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET, + .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_ERR_IMR_ISR_B1, + .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR, + .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET, + .wde_imr_clr = B_AX_WDE_IMR_CLR, + .wde_imr_set = B_AX_WDE_IMR_SET, + .ple_imr_clr = B_AX_PLE_IMR_CLR, + .ple_imr_set = B_AX_PLE_IMR_SET, + .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR, + .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET, + .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR, + .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET, + .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR, + .other_disp_imr_set = 0, + .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR, + .bbrpt_err_imr_set = 0, + .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR, +}; + static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse, struct rtw8852a_efuse *map) { @@ -2112,6 +2140,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .page_regs = &rtw8852a_page_regs, .dcfo_comp = &rtw8852a_dcfo_comp, .dcfo_comp_sft = 3, + .imr_info = &rtw8852a_imr_info }; EXPORT_SYMBOL(rtw8852a_chip_info); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 5dbb711defc4..18c17b4bbc18 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -51,6 +51,34 @@ static const struct rtw89_reg_def rtw8852c_dcfo_comp = { R_DCFO_COMP_S0_V1, B_DCFO_COMP_S0_V1_MSK }; +static const struct rtw89_imr_info rtw8852c_imr_info = { + .wdrls_imr_set = B_AX_WDRLS_IMR_SET_V1, + .wsec_imr_reg = R_AX_SEC_ERROR_FLAG_IMR, + .wsec_imr_set = B_AX_TX_HANG_IMR | B_AX_RX_HANG_IMR, + .mpdu_tx_imr_set = B_AX_MPDU_TX_IMR_SET_V1, + .mpdu_rx_imr_set = B_AX_MPDU_RX_IMR_SET_V1, + .sta_sch_imr_set = B_AX_STA_SCHEDULER_IMR_SET, + .txpktctl_imr_b0_reg = R_AX_TXPKTCTL_B0_ERRFLAG_IMR, + .txpktctl_imr_b0_clr = B_AX_TXPKTCTL_IMR_B0_CLR_V1, + .txpktctl_imr_b0_set = B_AX_TXPKTCTL_IMR_B0_SET_V1, + .txpktctl_imr_b1_reg = R_AX_TXPKTCTL_B1_ERRFLAG_IMR, + .txpktctl_imr_b1_clr = B_AX_TXPKTCTL_IMR_B1_CLR_V1, + .txpktctl_imr_b1_set = B_AX_TXPKTCTL_IMR_B1_SET_V1, + .wde_imr_clr = B_AX_WDE_IMR_CLR_V1, + .wde_imr_set = B_AX_WDE_IMR_SET_V1, + .ple_imr_clr = B_AX_PLE_IMR_CLR_V1, + .ple_imr_set = B_AX_PLE_IMR_SET_V1, + .host_disp_imr_clr = B_AX_HOST_DISP_IMR_CLR_V1, + .host_disp_imr_set = B_AX_HOST_DISP_IMR_SET_V1, + .cpu_disp_imr_clr = B_AX_CPU_DISP_IMR_CLR_V1, + .cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET_V1, + .other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR_V1, + .other_disp_imr_set = B_AX_OTHER_DISP_IMR_SET_V1, + .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR, + .bbrpt_err_imr_set = R_AX_BBRPT_CHINFO_IMR_SET_V1, + .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR, +}; + static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev) { u32 val32; @@ -572,6 +600,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .page_regs = &rtw8852c_page_regs, .dcfo_comp = &rtw8852c_dcfo_comp, .dcfo_comp_sft = 5, + .imr_info = &rtw8852c_imr_info }; EXPORT_SYMBOL(rtw8852c_chip_info); -- cgit v1.2.3 From d86369e937f1ca4f87b9375d055c3ef9a6aa5360 Mon Sep 17 00:00:00 2001 From: Chia-Yuan Li Date: Fri, 8 Apr 2022 08:13:43 +0800 Subject: rtw89: ser: configure C-MAC interrupt mask Similarly, create functions to set specific C-MAC masks for firmware recovery. Signed-off-by: Chia-Yuan Li Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 17 ++ drivers/net/wireless/realtek/rtw89/mac.c | 108 +++++++--- drivers/net/wireless/realtek/rtw89/reg.h | 300 +++++++++++++++++++++++++- drivers/net/wireless/realtek/rtw89/rtw8852a.c | 17 ++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 17 ++ 5 files changed, 423 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 63aa88b705bc..a6c075ac67d5 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2319,6 +2319,23 @@ struct rtw89_imr_info { u32 bbrpt_chinfo_err_imr_reg; u32 bbrpt_err_imr_set; u32 bbrpt_dfs_err_imr_reg; + u32 ptcl_imr_clr; + u32 ptcl_imr_set; + u32 cdma_imr_0_reg; + u32 cdma_imr_0_clr; + u32 cdma_imr_0_set; + u32 cdma_imr_1_reg; + u32 cdma_imr_1_clr; + u32 cdma_imr_1_set; + u32 phy_intf_imr_reg; + u32 phy_intf_imr_clr; + u32 phy_intf_imr_set; + u32 rmac_imr_reg; + u32 rmac_imr_clr; + u32 rmac_imr_set; + u32 tmac_imr_reg; + u32 tmac_imr_clr; + u32 tmac_imr_set; }; struct rtw89_chip_info { diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 77fb9b74019e..c10eca44dbe9 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2737,10 +2737,76 @@ static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev) rtw89_write32_set(rtwdev, R_AX_LA_ERRFLAG, B_AX_LA_IMR_DATA_LOSS_ERR); } +static void rtw89_scheduler_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + u32 reg; + + reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); + rtw89_write32_clr(rtwdev, reg, B_AX_SORT_NON_IDLE_ERR_INT_EN | + B_AX_FSM_TIMEOUT_ERR_INT_EN); + rtw89_write32_set(rtwdev, reg, B_AX_FSM_TIMEOUT_ERR_INT_EN); +} + +static void rtw89_ptcl_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + u32 reg; + + reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx); + rtw89_write32_clr(rtwdev, reg, imr->ptcl_imr_clr); + rtw89_write32_set(rtwdev, reg, imr->ptcl_imr_set); +} + +static void rtw89_cdma_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + u32 reg; + + reg = rtw89_mac_reg_by_idx(imr->cdma_imr_0_reg, mac_idx); + rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_0_clr); + rtw89_write32_set(rtwdev, reg, imr->cdma_imr_0_set); + + if (chip_id == RTL8852C) { + reg = rtw89_mac_reg_by_idx(imr->cdma_imr_1_reg, mac_idx); + rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_1_clr); + rtw89_write32_set(rtwdev, reg, imr->cdma_imr_1_set); + } +} + +static void rtw89_phy_intf_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + u32 reg; + + reg = rtw89_mac_reg_by_idx(imr->phy_intf_imr_reg, mac_idx); + rtw89_write32_clr(rtwdev, reg, imr->phy_intf_imr_clr); + rtw89_write32_set(rtwdev, reg, imr->phy_intf_imr_set); +} + +static void rtw89_rmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + u32 reg; + + reg = rtw89_mac_reg_by_idx(imr->rmac_imr_reg, mac_idx); + rtw89_write32_clr(rtwdev, reg, imr->rmac_imr_clr); + rtw89_write32_set(rtwdev, reg, imr->rmac_imr_set); +} + +static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) +{ + const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; + u32 reg; + + reg = rtw89_mac_reg_by_idx(imr->tmac_imr_reg, mac_idx); + rtw89_write32_clr(rtwdev, reg, imr->tmac_imr_clr); + rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set); +} + static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, enum rtw89_mac_hwmod_sel sel) { - u32 reg, val; int ret; ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel); @@ -2763,40 +2829,12 @@ static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, rtw89_cpuio_imr_enable(rtwdev); rtw89_bbrpt_imr_enable(rtwdev); } else if (sel == RTW89_CMAC_SEL) { - reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); - rtw89_write32_clr(rtwdev, reg, - B_AX_SORT_NON_IDLE_ERR_INT_EN); - - reg = rtw89_mac_reg_by_idx(R_AX_DLE_CTRL, mac_idx); - rtw89_write32_clr(rtwdev, reg, - B_AX_NO_RESERVE_PAGE_ERR_IMR | - B_AX_RXDATA_FSM_HANG_ERROR_IMR); - - reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx); - val = B_AX_F2PCMD_USER_ALLC_ERR_INT_EN | - B_AX_TX_RECORD_PKTID_ERR_INT_EN | - B_AX_FSM_TIMEOUT_ERR_INT_EN; - rtw89_write32(rtwdev, reg, val); - - reg = rtw89_mac_reg_by_idx(R_AX_PHYINFO_ERR_IMR, mac_idx); - rtw89_write32_set(rtwdev, reg, - B_AX_PHY_TXON_TIMEOUT_INT_EN | - B_AX_CCK_CCA_TIMEOUT_INT_EN | - B_AX_OFDM_CCA_TIMEOUT_INT_EN | - B_AX_DATA_ON_TIMEOUT_INT_EN | - B_AX_STS_ON_TIMEOUT_INT_EN | - B_AX_CSI_ON_TIMEOUT_INT_EN); - - reg = rtw89_mac_reg_by_idx(R_AX_RMAC_ERR_ISR, mac_idx); - val = rtw89_read32(rtwdev, reg); - val |= (B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN | - B_AX_RMAC_RX_TIMEOUT_INT_EN | - B_AX_RMAC_CSI_TIMEOUT_INT_EN); - val &= ~(B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN | - B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN | - B_AX_RMAC_CCA_TIMEOUT_INT_EN | - B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN); - rtw89_write32(rtwdev, reg, val); + rtw89_scheduler_imr_enable(rtwdev, mac_idx); + rtw89_ptcl_imr_enable(rtwdev, mac_idx); + rtw89_cdma_imr_enable(rtwdev, mac_idx); + rtw89_phy_intf_imr_enable(rtwdev, mac_idx); + rtw89_rmac_imr_enable(rtwdev, mac_idx); + rtw89_tmac_imr_enable(rtwdev, mac_idx); } else { return -EINVAL; } diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 9e445bf9b9d4..3d66d3579af4 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -1932,7 +1932,6 @@ #define R_AX_SCHEDULE_ERR_IMR 0xC3E8 #define R_AX_SCHEDULE_ERR_IMR_C1 0xE3E8 #define B_AX_SORT_NON_IDLE_ERR_INT_EN BIT(1) -#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0) #define R_AX_SCHEDULE_ERR_ISR 0xC3EC #define R_AX_SCHEDULE_ERR_ISR_C1 0xE3EC @@ -2206,8 +2205,48 @@ #define R_AX_PTCL_IMR0 0xC6C0 #define R_AX_PTCL_IMR0_C1 0xE6C0 +#define B_AX_F2PCMD_PKTID_ERR_INT_EN BIT(31) +#define B_AX_F2PCMD_RD_PKTID_ERR_INT_EN BIT(30) +#define B_AX_F2PCMD_ASSIGN_PKTID_ERR_INT_EN BIT(29) #define B_AX_F2PCMD_USER_ALLC_ERR_INT_EN BIT(28) +#define B_AX_RX_SPF_U0_PKTID_ERR_INT_EN BIT(27) +#define B_AX_TX_SPF_U1_PKTID_ERR_INT_EN BIT(26) +#define B_AX_TX_SPF_U2_PKTID_ERR_INT_EN BIT(25) +#define B_AX_TX_SPF_U3_PKTID_ERR_INT_EN BIT(24) #define B_AX_TX_RECORD_PKTID_ERR_INT_EN BIT(23) +#define B_AX_F2PCMD_EMPTY_ERR_INT_EN BIT(15) +#define B_AX_TWTSP_QSEL_ERR_INT_EN BIT(14) +#define B_AX_BCNQ_ORDER_ERR_INT_EN BIT(12) +#define B_AX_Q_PKTID_ERR_INT_EN BIT(11) +#define B_AX_D_PKTID_ERR_INT_EN BIT(10) +#define B_AX_TXPRT_FULL_DROP_ERR_INT_EN BIT(9) +#define B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN BIT(8) +#define B_AX_FSM1_TIMEOUT_ERR_INT_EN BIT(1) +#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0) +#define B_AX_PTCL_IMR_CLR (B_AX_FSM_TIMEOUT_ERR_INT_EN | \ + B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN | \ + B_AX_TXPRT_FULL_DROP_ERR_INT_EN | \ + B_AX_D_PKTID_ERR_INT_EN | \ + B_AX_Q_PKTID_ERR_INT_EN | \ + B_AX_BCNQ_ORDER_ERR_INT_EN | \ + B_AX_TWTSP_QSEL_ERR_INT_EN | \ + B_AX_F2PCMD_EMPTY_ERR_INT_EN | \ + B_AX_TX_RECORD_PKTID_ERR_INT_EN | \ + B_AX_TX_SPF_U3_PKTID_ERR_INT_EN | \ + B_AX_TX_SPF_U2_PKTID_ERR_INT_EN | \ + B_AX_TX_SPF_U1_PKTID_ERR_INT_EN | \ + B_AX_RX_SPF_U0_PKTID_ERR_INT_EN | \ + B_AX_F2PCMD_USER_ALLC_ERR_INT_EN | \ + B_AX_F2PCMD_ASSIGN_PKTID_ERR_INT_EN | \ + B_AX_F2PCMD_RD_PKTID_ERR_INT_EN | \ + B_AX_F2PCMD_PKTID_ERR_INT_EN) +#define B_AX_PTCL_IMR_SET (B_AX_FSM_TIMEOUT_ERR_INT_EN | \ + B_AX_TX_RECORD_PKTID_ERR_INT_EN | \ + B_AX_F2PCMD_USER_ALLC_ERR_INT_EN) +#define B_AX_PTCL_IMR_CLR_V1 (B_AX_FSM1_TIMEOUT_ERR_INT_EN | \ + B_AX_FSM_TIMEOUT_ERR_INT_EN) +#define B_AX_PTCL_IMR_SET_V1 (B_AX_FSM1_TIMEOUT_ERR_INT_EN | \ + B_AX_FSM_TIMEOUT_ERR_INT_EN) #define R_AX_PTCL_ISR0 0xC6C4 #define R_AX_PTCL_ISR0_C1 0xE6C4 @@ -2234,10 +2273,142 @@ #define R_AX_DLE_CTRL_C1 0xE800 #define B_AX_NO_RESERVE_PAGE_ERR_IMR BIT(23) #define B_AX_RXDATA_FSM_HANG_ERROR_IMR BIT(15) +#define B_AX_RXSTS_FSM_HANG_ERROR_IMR BIT(14) +#define B_AX_DLE_IMR_CLR (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \ + B_AX_RXDATA_FSM_HANG_ERROR_IMR | \ + B_AX_NO_RESERVE_PAGE_ERR_IMR) +#define B_AX_DLE_IMR_SET (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \ + B_AX_RXDATA_FSM_HANG_ERROR_IMR) + #define R_AX_RXDMA_PKT_INFO_0 0xC814 #define R_AX_RXDMA_PKT_INFO_1 0xC818 #define R_AX_RXDMA_PKT_INFO_2 0xC81C +#define R_AX_RX_ERR_FLAG_IMR 0xC804 +#define R_AX_RX_ERR_FLAG_IMR_C1 0xE804 +#define B_AX_RX_GET_NULL_PKT_ERR_MSK BIT(30) +#define B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK BIT(29) +#define B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK BIT(28) +#define B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK BIT(27) +#define B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK BIT(26) +#define B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK BIT(25) +#define B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK BIT(24) +#define B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK BIT(23) +#define B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK BIT(22) +#define B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK BIT(21) +#define B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK BIT(20) +#define B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK BIT(19) +#define B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK BIT(18) +#define B_AX_RX_RU0_ZERO_LEN_ERR_MSK BIT(17) +#define B_AX_RX_RU1_ZERO_LEN_ERR_MSK BIT(16) +#define B_AX_RX_RU2_ZERO_LEN_ERR_MSK BIT(15) +#define B_AX_RX_RU3_ZERO_LEN_ERR_MSK BIT(14) +#define B_AX_RX_RU4_ZERO_LEN_ERR_MSK BIT(13) +#define B_AX_RX_RU5_ZERO_LEN_ERR_MSK BIT(12) +#define B_AX_RX_RU6_ZERO_LEN_ERR_MSK BIT(11) +#define B_AX_RX_RU7_ZERO_LEN_ERR_MSK BIT(10) +#define B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK BIT(9) +#define B_AX_RX_CSI_ZERO_LEN_ERR_MSK BIT(8) +#define B_AX_PLE_DATA_OPT_FSM_HANG_MSK BIT(7) +#define B_AX_PLE_RXDATA_REQ_BUF_FSM_HANG_MSK BIT(6) +#define B_AX_PLE_TXRPT_REQ_BUF_FSM_HANG_MSK BIT(5) +#define B_AX_PLE_WD_OPT_FSM_HANG_MSK BIT(4) +#define B_AX_PLE_ENQ_FSM_HANG_MSK BIT(3) +#define B_AX_RXDATA_ENQUE_ORDER_ERR_MSK BIT(2) +#define B_AX_RXSTS_ENQUE_ORDER_ERR_MSK BIT(1) +#define B_AX_RX_CSI_PKT_NUM_ERR_MSK BIT(0) +#define B_AX_RX_ERR_IMR_CLR_V1 (B_AX_RXSTS_ENQUE_ORDER_ERR_MSK | \ + B_AX_RXDATA_ENQUE_ORDER_ERR_MSK | \ + B_AX_RX_CSI_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU7_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU6_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU5_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU4_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU3_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU2_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU1_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU0_ZERO_LEN_ERR_MSK | \ + B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_GET_NULL_PKT_ERR_MSK) +#define B_AX_RX_ERR_IMR_SET_V1 (B_AX_RXSTS_ENQUE_ORDER_ERR_MSK | \ + B_AX_RXDATA_ENQUE_ORDER_ERR_MSK | \ + B_AX_RX_CSI_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RXSTS_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU7_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU6_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU5_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU4_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU3_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU2_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU1_ZERO_LEN_ERR_MSK | \ + B_AX_RX_RU0_ZERO_LEN_ERR_MSK | \ + B_AX_RX_F2PCMD_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_TXRPT_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_CSI_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RXSTS_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU7_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU6_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU5_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU4_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU3_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU2_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU1_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_RU0_FSM_HANG_MSK_ERR_MSK | \ + B_AX_RX_GET_NULL_PKT_ERR_MSK) + +#define R_AX_TX_ERR_FLAG_IMR 0xC870 +#define R_AX_TX_ERR_FLAG_IMR_C1 0xE870 +#define B_AX_TX_RU0_FSM_HANG_ERR_MSK BIT(31) +#define B_AX_TX_RU1_FSM_HANG_ERR_MSK BIT(30) +#define B_AX_TX_RU2_FSM_HANG_ERR_MSK BIT(29) +#define B_AX_TX_RU3_FSM_HANG_ERR_MSK BIT(28) +#define B_AX_TX_RU4_FSM_HANG_ERR_MSK BIT(27) +#define B_AX_TX_RU5_FSM_HANG_ERR_MSK BIT(26) +#define B_AX_TX_RU6_FSM_HANG_ERR_MSK BIT(25) +#define B_AX_TX_RU7_FSM_HANG_ERR_MSK BIT(24) +#define B_AX_TX_RU8_FSM_HANG_ERR_MSK BIT(23) +#define B_AX_TX_RU9_FSM_HANG_ERR_MSK BIT(22) +#define B_AX_TX_RU10_FSM_HANG_ERR_MSK BIT(21) +#define B_AX_TX_RU11_FSM_HANG_ERR_MSK BIT(20) +#define B_AX_TX_RU12_FSM_HANG_ERR_MSK BIT(19) +#define B_AX_TX_RU13_FSM_HANG_ERR_MSK BIT(18) +#define B_AX_TX_RU14_FSM_HANG_ERR_MSK BIT(17) +#define B_AX_TX_RU15_FSM_HANG_ERR_MSK BIT(16) +#define B_AX_TX_CSI_FSM_HANG_ERR_MSK BIT(15) +#define B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK BIT(14) +#define B_AX_TX_ERR_IMR_CLR_V1 (B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK | \ + B_AX_TX_CSI_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU7_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU6_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU5_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU4_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU3_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU2_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU1_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU0_FSM_HANG_ERR_MSK) +#define B_AX_TX_ERR_IMR_SET_V1 (B_AX_TX_WD_PLD_ID_FSM_HANG_ERR_MSK | \ + B_AX_TX_CSI_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU7_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU6_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU5_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU4_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU3_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU2_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU1_FSM_HANG_ERR_MSK | \ + B_AX_TX_RU0_FSM_HANG_ERR_MSK) + #define R_AX_TCR0 0xCA00 #define R_AX_TCR0_C1 0xEA00 #define B_AX_TCR_ZLD_NUM_MASK GENMASK(31, 24) @@ -2370,6 +2541,38 @@ #define B_AX_RXTRIG_EN BIT(16) #define B_AX_RXTRIG_USERINFO_2_MASK GENMASK(15, 0) +#define R_AX_TRXPTCL_ERROR_INDICA_MASK 0xCCBC +#define R_AX_TRXPTCL_ERROR_INDICA_MASK_C1 0xECBC +#define B_AX_WMAC_MODE BIT(22) +#define B_AX_WMAC_TIMETOUT_THR_MASK GENMASK(21, 16) +#define B_AX_RMAC_FTM BIT(8) +#define B_AX_RMAC_CSI BIT(7) +#define B_AX_TMAC_MIMO_CTRL BIT(6) +#define B_AX_TMAC_RXTB BIT(5) +#define B_AX_TMAC_HWSIGB_GEN BIT(4) +#define B_AX_TMAC_TXPLCP BIT(3) +#define B_AX_TMAC_RESP BIT(2) +#define B_AX_TMAC_TXCTL BIT(1) +#define B_AX_TMAC_MACTX BIT(0) +#define B_AX_TMAC_IMR_CLR_V1 (B_AX_TMAC_MACTX | \ + B_AX_TMAC_TXCTL | \ + B_AX_TMAC_RESP | \ + B_AX_TMAC_TXPLCP | \ + B_AX_TMAC_HWSIGB_GEN | \ + B_AX_TMAC_RXTB | \ + B_AX_TMAC_MIMO_CTRL | \ + B_AX_RMAC_CSI | \ + B_AX_RMAC_FTM) +#define B_AX_TMAC_IMR_SET_V1 (B_AX_TMAC_MACTX | \ + B_AX_TMAC_TXCTL | \ + B_AX_TMAC_RESP | \ + B_AX_TMAC_TXPLCP | \ + B_AX_TMAC_HWSIGB_GEN | \ + B_AX_TMAC_RXTB | \ + B_AX_TMAC_MIMO_CTRL | \ + B_AX_RMAC_CSI | \ + B_AX_RMAC_FTM) + #define R_AX_WMAC_TX_TF_INFO_0 0xCCD0 #define R_AX_WMAC_TX_TF_INFO_0_C1 0xECD0 #define B_AX_WMAC_TX_TF_INFO_SEL_MASK GENMASK(2, 0) @@ -2384,11 +2587,55 @@ #define R_AX_TMAC_ERR_IMR_ISR 0xCCEC #define R_AX_TMAC_ERR_IMR_ISR_C1 0xECEC +#define B_AX_TMAC_TXPLCP_ERR_CLR BIT(19) +#define B_AX_TMAC_RESP_ERR_CLR BIT(18) +#define B_AX_TMAC_TXCTL_ERR_CLR BIT(17) +#define B_AX_TMAC_MACTX_ERR_CLR BIT(16) +#define B_AX_TMAC_TXPLCP_ERR BIT(14) +#define B_AX_TMAC_RESP_ERR BIT(13) +#define B_AX_TMAC_TXCTL_ERR BIT(12) +#define B_AX_TMAC_MACTX_ERR BIT(11) +#define B_AX_TMAC_TXPLCP_INT_EN BIT(10) +#define B_AX_TMAC_RESP_INT_EN BIT(9) +#define B_AX_TMAC_TXCTL_INT_EN BIT(8) +#define B_AX_TMAC_MACTX_INT_EN BIT(7) +#define B_AX_WMAC_INT_MODE BIT(6) +#define B_AX_TMAC_TIMETOUT_THR_MASK GENMASK(5, 0) +#define B_AX_TMAC_IMR_CLR (B_AX_TMAC_MACTX_INT_EN | \ + B_AX_TMAC_TXCTL_INT_EN | \ + B_AX_TMAC_RESP_INT_EN | \ + B_AX_TMAC_TXPLCP_INT_EN) +#define B_AX_TMAC_IMR_SET (B_AX_TMAC_MACTX_INT_EN | \ + B_AX_TMAC_TXCTL_INT_EN | \ + B_AX_TMAC_RESP_INT_EN | \ + B_AX_TMAC_TXPLCP_INT_EN) #define R_AX_DBGSEL_TRXPTCL 0xCCF4 #define R_AX_DBGSEL_TRXPTCL_C1 0xECF4 #define B_AX_DBGSEL_TRXPTCL_MASK GENMASK(7, 0) +#define R_AX_PHYINFO_ERR_IMR_V1 0xCCF8 +#define R_AX_PHYINFO_ERR_IMR_V1_C1 0xECF8 +#define B_AX_PHYINTF_TIMEOUT_THR_MSAK_V1 GENMASK(21, 16) +#define B_AX_CSI_ON_TIMEOUT_EN BIT(5) +#define B_AX_STS_ON_TIMEOUT_EN BIT(4) +#define B_AX_DATA_ON_TIMEOUT_EN BIT(3) +#define B_AX_OFDM_CCA_TIMEOUT_EN BIT(2) +#define B_AX_CCK_CCA_TIMEOUT_EN BIT(1) +#define B_AX_PHY_TXON_TIMEOUT_EN BIT(0) +#define B_AX_PHYINFO_IMR_CLR_V1 (B_AX_PHY_TXON_TIMEOUT_EN | \ + B_AX_CCK_CCA_TIMEOUT_EN | \ + B_AX_OFDM_CCA_TIMEOUT_EN | \ + B_AX_DATA_ON_TIMEOUT_EN | \ + B_AX_STS_ON_TIMEOUT_EN | \ + B_AX_CSI_ON_TIMEOUT_EN) +#define B_AX_PHYINFO_IMR_SET_V1 (B_AX_PHY_TXON_TIMEOUT_EN | \ + B_AX_CCK_CCA_TIMEOUT_EN | \ + B_AX_OFDM_CCA_TIMEOUT_EN | \ + B_AX_DATA_ON_TIMEOUT_EN | \ + B_AX_STS_ON_TIMEOUT_EN | \ + B_AX_CSI_ON_TIMEOUT_EN) + #define R_AX_PHYINFO_ERR_IMR 0xCCFC #define R_AX_PHYINFO_ERR_IMR_C1 0xECFC #define B_AX_CSI_ON_TIMEOUT BIT(29) @@ -2404,6 +2651,12 @@ #define B_AX_CCK_CCA_TIMEOUT_INT_EN BIT(17) #define B_AX_PHY_TXON_TIMEOUT_INT_EN BIT(16) #define B_AX_PHYINTF_TIMEOUT_THR_MSAK GENMASK(5, 0) +#define B_AX_PHYINFO_IMR_EN_ALL (B_AX_PHY_TXON_TIMEOUT_INT_EN | \ + B_AX_CCK_CCA_TIMEOUT_INT_EN | \ + B_AX_OFDM_CCA_TIMEOUT_INT_EN | \ + B_AX_DATA_ON_TIMEOUT_INT_EN | \ + B_AX_STS_ON_TIMEOUT_INT_EN | \ + B_AX_CSI_ON_TIMEOUT_INT_EN) #define R_AX_PHYINFO_ERR_ISR 0xCCFC #define R_AX_PHYINFO_ERR_ISR_C1 0xECFC @@ -2579,6 +2832,51 @@ #define B_AX_BMAC_DMA_TIMEOUT_FLAG BIT(2) #define B_AX_BMAC_DATA_ON_TO_IDLE_TIMEOUT_FLAG BIT(1) #define B_AX_BMAC_CCA_TO_IDLE_TIMEOUT_FLAG BIT(0) +#define B_AX_RMAC_IMR_CLR (B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN | \ + B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN | \ + B_AX_RMAC_DMA_TIMEOUT_INT_EN | \ + B_AX_RMAC_CCA_TIMEOUT_INT_EN | \ + B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN | \ + B_AX_RMAC_CSI_TIMEOUT_INT_EN | \ + B_AX_RMAC_RX_TIMEOUT_INT_EN | \ + B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN) +#define B_AX_RMAC_IMR_SET (B_AX_RMAC_DMA_TIMEOUT_INT_EN | \ + B_AX_RMAC_CSI_TIMEOUT_INT_EN | \ + B_AX_RMAC_RX_TIMEOUT_INT_EN | \ + B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN) + +#define R_AX_RX_ERR_IMR 0xCEF8 +#define R_AX_RX_ERR_IMR_C1 0xEEF8 +#define B_AX_RX_ERR_TRIG_ACT_TO_MSK BIT(9) +#define B_AX_RX_ERR_STS_ACT_TO_MSK BIT(8) +#define B_AX_RX_ERR_CSI_ACT_TO_MSK BIT(7) +#define B_AX_RX_ERR_ACT_TO_MSK BIT(6) +#define B_AX_CSI_DATAON_ASSERT_TO_MSK BIT(5) +#define B_AX_DATAON_ASSERT_TO_MSK BIT(4) +#define B_AX_CCA_ASSERT_TO_MSK BIT(3) +#define B_AX_RX_ERR_DMA_TO_MSK BIT(2) +#define B_AX_RX_ERR_DATA_TO_MSK BIT(1) +#define B_AX_RX_ERR_CCA_TO_MSK BIT(0) +#define B_AX_RMAC_IMR_CLR_V1 (B_AX_RX_ERR_CCA_TO_MSK | \ + B_AX_RX_ERR_DATA_TO_MSK | \ + B_AX_RX_ERR_DMA_TO_MSK | \ + B_AX_CCA_ASSERT_TO_MSK | \ + B_AX_DATAON_ASSERT_TO_MSK | \ + B_AX_CSI_DATAON_ASSERT_TO_MSK | \ + B_AX_RX_ERR_ACT_TO_MSK | \ + B_AX_RX_ERR_CSI_ACT_TO_MSK | \ + B_AX_RX_ERR_STS_ACT_TO_MSK | \ + B_AX_RX_ERR_TRIG_ACT_TO_MSK) +#define B_AX_RMAC_IMR_SET_V1 (B_AX_RX_ERR_CCA_TO_MSK | \ + B_AX_RX_ERR_DATA_TO_MSK | \ + B_AX_RX_ERR_DMA_TO_MSK | \ + B_AX_CCA_ASSERT_TO_MSK | \ + B_AX_DATAON_ASSERT_TO_MSK | \ + B_AX_CSI_DATAON_ASSERT_TO_MSK | \ + B_AX_RX_ERR_ACT_TO_MSK | \ + B_AX_RX_ERR_CSI_ACT_TO_MSK | \ + B_AX_RX_ERR_STS_ACT_TO_MSK | \ + B_AX_RX_ERR_TRIG_ACT_TO_MSK) #define R_AX_RMAC_PLCP_MON 0xCEF8 #define R_AX_RMAC_PLCP_MON_C1 0xEEF8 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 1af605748740..9871ed78e44c 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -434,6 +434,23 @@ static const struct rtw89_imr_info rtw8852a_imr_info = { .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR, .bbrpt_err_imr_set = 0, .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR, + .ptcl_imr_clr = B_AX_PTCL_IMR_CLR, + .ptcl_imr_set = B_AX_PTCL_IMR_SET, + .cdma_imr_0_reg = R_AX_DLE_CTRL, + .cdma_imr_0_clr = B_AX_DLE_IMR_CLR, + .cdma_imr_0_set = B_AX_DLE_IMR_SET, + .cdma_imr_1_reg = 0, + .cdma_imr_1_clr = 0, + .cdma_imr_1_set = 0, + .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR, + .phy_intf_imr_clr = 0, + .phy_intf_imr_set = 0, + .rmac_imr_reg = R_AX_RMAC_ERR_ISR, + .rmac_imr_clr = B_AX_RMAC_IMR_CLR, + .rmac_imr_set = B_AX_RMAC_IMR_SET, + .tmac_imr_reg = R_AX_TMAC_ERR_IMR_ISR, + .tmac_imr_clr = B_AX_TMAC_IMR_CLR, + .tmac_imr_set = B_AX_TMAC_IMR_SET, }; static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 18c17b4bbc18..9689fc5a6372 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -77,6 +77,23 @@ static const struct rtw89_imr_info rtw8852c_imr_info = { .bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR, .bbrpt_err_imr_set = R_AX_BBRPT_CHINFO_IMR_SET_V1, .bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR, + .ptcl_imr_clr = B_AX_PTCL_IMR_CLR_V1, + .ptcl_imr_set = B_AX_PTCL_IMR_SET_V1, + .cdma_imr_0_reg = R_AX_RX_ERR_FLAG_IMR, + .cdma_imr_0_clr = B_AX_RX_ERR_IMR_CLR_V1, + .cdma_imr_0_set = B_AX_RX_ERR_IMR_SET_V1, + .cdma_imr_1_reg = R_AX_TX_ERR_FLAG_IMR, + .cdma_imr_1_clr = B_AX_TX_ERR_IMR_CLR_V1, + .cdma_imr_1_set = B_AX_TX_ERR_IMR_SET_V1, + .phy_intf_imr_reg = R_AX_PHYINFO_ERR_IMR_V1, + .phy_intf_imr_clr = B_AX_PHYINFO_IMR_CLR_V1, + .phy_intf_imr_set = B_AX_PHYINFO_IMR_SET_V1, + .rmac_imr_reg = R_AX_RX_ERR_IMR, + .rmac_imr_clr = B_AX_RMAC_IMR_CLR_V1, + .rmac_imr_set = B_AX_RMAC_IMR_SET_V1, + .tmac_imr_reg = R_AX_TRXPTCL_ERROR_INDICA_MASK, + .tmac_imr_clr = B_AX_TMAC_IMR_CLR_V1, + .tmac_imr_set = B_AX_TMAC_IMR_SET_V1, }; static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev) -- cgit v1.2.3 From 9f405b0162ba92bf8317ce1329aad96fecc89273 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 8 Apr 2022 08:13:44 +0800 Subject: rtw89: ser: configure top ERR IMR for firmware to recover Turn on ERR IMR, and then firmware can capture interrupts reflecting errors to recover hardware states. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 15 +++++++++++++++ drivers/net/wireless/realtek/rtw89/reg.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index c10eca44dbe9..234635098a13 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2842,6 +2842,19 @@ static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, return 0; } +static void rtw89_mac_err_imr_ctrl(struct rtw89_dev *rtwdev, bool en) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + + rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR, + en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS); + rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR, + en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS); + if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta) + rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1, + en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS); +} + static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable) { int ret = 0; @@ -2923,6 +2936,8 @@ static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev) return ret; } + rtw89_mac_err_imr_ctrl(rtwdev, true); + ret = set_host_rpr(rtwdev); if (ret) { rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret); diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 3d66d3579af4..e5f8374f49ad 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -504,6 +504,21 @@ #define B_AX_WDE_EMPTY_QUE_CMAC0_MBH BIT(1) #define B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC BIT(0) +#define R_AX_DMAC_ERR_IMR 0x8520 +#define B_AX_DLE_CPUIO_ERR_INT_EN BIT(10) +#define B_AX_APB_BRIDGE_ERR_INT_EN BIT(9) +#define B_AX_DISPATCH_ERR_INT_EN BIT(8) +#define B_AX_PKTIN_ERR_INT_EN BIT(7) +#define B_AX_PLE_DLE_ERR_INT_EN BIT(6) +#define B_AX_TXPKTCTRL_ERR_INT_EN BIT(5) +#define B_AX_WDE_DLE_ERR_INT_EN BIT(4) +#define B_AX_STA_SCHEDULER_ERR_INT_EN BIT(3) +#define B_AX_MPDU_ERR_INT_EN BIT(2) +#define B_AX_WSEC_ERR_INT_EN BIT(1) +#define B_AX_WDRLS_ERR_INT_EN BIT(0) +#define DMAC_ERR_IMR_EN GENMASK(31, 0) +#define DMAC_ERR_IMR_DIS 0 + #define R_AX_DMAC_ERR_ISR 0x8524 #define B_AX_DLE_CPUIO_ERR_FLAG BIT(10) #define B_AX_APB_BRIDGE_ERR_FLAG BIT(9) @@ -1805,6 +1820,20 @@ #define B_AX_TXSC_40M_MASK GENMASK(7, 4) #define B_AX_TXSC_20M_MASK GENMASK(3, 0) +#define R_AX_CMAC_ERR_IMR 0xC160 +#define R_AX_CMAC_ERR_IMR_C1 0xE160 +#define B_AX_WMAC_TX_ERR_IND_EN BIT(7) +#define B_AX_WMAC_RX_ERR_IND_EN BIT(6) +#define B_AX_TXPWR_CTRL_ERR_IND_EN BIT(5) +#define B_AX_PHYINTF_ERR_IND_EN BIT(4) +#define B_AX_DMA_TOP_ERR_IND_EN BIT(3) +#define B_AX_PTCL_TOP_ERR_IND_EN BIT(1) +#define B_AX_SCHEDULE_TOP_ERR_IND_EN BIT(0) +#define CMAC0_ERR_IMR_EN GENMASK(31, 0) +#define CMAC1_ERR_IMR_EN GENMASK(31, 0) +#define CMAC0_ERR_IMR_DIS 0 +#define CMAC1_ERR_IMR_DIS 0 + #define R_AX_CMAC_ERR_ISR 0xC164 #define R_AX_CMAC_ERR_ISR_C1 0xE164 #define B_AX_WMAC_TX_ERR_IND BIT(7) -- cgit v1.2.3 From 9a1ab283c709a058593f1bf0d69a05a5e8b90f72 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 8 Apr 2022 08:13:45 +0800 Subject: rtw89: change station scheduler setting for hardware TX mode The bit B_AX_SS_NONEMPTY_SS2FINFO_EN should be clear, because we configure C-MAC as hardware TX/RX mode. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 234635098a13..b60b9255e3fa 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1593,8 +1593,8 @@ static int sta_sch_init(struct rtw89_dev *rtwdev) return ret; } - rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG | - B_AX_SS_NONEMPTY_SS2FINFO_EN); + rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG); + rtw89_write32_clr(rtwdev, R_AX_SS_CTRL, B_AX_SS_NONEMPTY_SS2FINFO_EN); _patch_ss2f_path(rtwdev); -- cgit v1.2.3 From 181751970107746c1b4e6870b01d74e4ac0f3fb8 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 8 Apr 2022 08:13:46 +0800 Subject: rtw89: reset BA CAM BA CAM is used to react receiving AMPDU packets, so reset them to be expected initial state. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 18 ++++++++++++++++++ drivers/net/wireless/realtek/rtw89/reg.h | 2 ++ 2 files changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index b60b9255e3fa..ba8c7d3e176c 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1964,6 +1964,21 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) return 0; } +static void rst_bacam(struct rtw89_dev *rtwdev) +{ + u32 val32; + int ret; + + rtw89_write32_mask(rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK, + S_AX_BACAM_RST_ALL); + + ret = read_poll_timeout_atomic(rtw89_read32_mask, val32, val32 == 0, + 1, 1000, false, + rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK); + if (ret) + rtw89_warn(rtwdev, "failed to reset BA CAM\n"); +} + static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) { #define TRXCFG_RMAC_CCA_TO 32 @@ -1978,6 +1993,9 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) if (ret) return ret; + if (mac_idx == RTW89_MAC_0) + rst_bacam(rtwdev); + reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx); rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index e5f8374f49ad..bc343e756aad 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -2819,6 +2819,8 @@ #define R_AX_RESPBA_CAM_CTRL 0xCE3C #define R_AX_RESPBA_CAM_CTRL_C1 0xEE3C #define B_AX_SSN_SEL BIT(2) +#define B_AX_BACAM_RST_MASK GENMASK(1, 0) +#define S_AX_BACAM_RST_ALL 2 #define R_AX_PPDU_STAT 0xCE40 #define R_AX_PPDU_STAT_C1 0xEE40 -- cgit v1.2.3 From ec356ffb2917a29c6cb7c0d9030a072eb125c907 Mon Sep 17 00:00:00 2001 From: Chia-Yuan Li Date: Fri, 8 Apr 2022 08:13:47 +0800 Subject: rtw89: 8852c: disable firmware watchdog if CPU disabled Disable watchdog timer to prevent it timeout suddenly. Signed-off-by: Chia-Yuan Li Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-8-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 37 +++++++++++++++++++++++++++++++- drivers/net/wireless/realtek/rtw89/mac.h | 6 +++--- drivers/net/wireless/realtek/rtw89/reg.h | 16 ++++++++++++++ 3 files changed, 55 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index ba8c7d3e176c..a990fd845c25 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -10,7 +10,7 @@ #include "reg.h" #include "util.h" -const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_MAX] = { +const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = { [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, @@ -28,8 +28,27 @@ const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_MAX] = { [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR, [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR, [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR, + [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR, }; +static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset, + u32 val, enum rtw89_mac_mem_sel sel) +{ + u32 addr = rtw89_mac_mem_base_addrs[sel] + offset; + + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr); + rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, val); +} + +static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset, + enum rtw89_mac_mem_sel sel) +{ + u32 addr = rtw89_mac_mem_base_addrs[sel] + offset; + + rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, addr); + return rtw89_read32(rtwdev, R_AX_INDIR_ACCESS_ENTRY); +} + int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, enum rtw89_mac_hwmod_sel sel) { @@ -2965,6 +2984,19 @@ static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev) return 0; } +static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev) +{ + u32 val32; + + rtw89_mac_mem_write(rtwdev, R_AX_WDT_CTRL, + WDT_CTRL_ALL_DIS, RTW89_MAC_MEM_CPU_LOCAL); + + val32 = rtw89_mac_mem_read(rtwdev, R_AX_WDT_STATUS, RTW89_MAC_MEM_CPU_LOCAL); + val32 |= B_AX_FS_WDT_INT; + val32 &= ~B_AX_FS_WDT_INT_MSK; + rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL); +} + static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) { clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); @@ -2973,6 +3005,9 @@ static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); + + rtw89_disable_fw_watchdog(rtwdev); + rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); } diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index de65d9becb05..31d53de97cfc 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -245,6 +245,7 @@ enum rtw89_mac_dbg_port_sel { #define TXD_FIFO_1_BASE_ADDR 0x188A1080 #define TXDATA_FIFO_0_BASE_ADDR 0x18856000 #define TXDATA_FIFO_1_BASE_ADDR 0x188A1000 +#define CPU_LOCAL_BASE_ADDR 0x18003000 #define CCTL_INFO_SIZE 32 @@ -266,11 +267,10 @@ enum rtw89_mac_mem_sel { RTW89_MAC_MEM_TXD_FIFO_1, RTW89_MAC_MEM_TXDATA_FIFO_0, RTW89_MAC_MEM_TXDATA_FIFO_1, + RTW89_MAC_MEM_CPU_LOCAL, /* keep last */ - RTW89_MAC_MEM_LAST, - RTW89_MAC_MEM_MAX = RTW89_MAC_MEM_LAST, - RTW89_MAC_MEM_INVALID = RTW89_MAC_MEM_LAST, + RTW89_MAC_MEM_NUM, }; extern const u32 rtw89_mac_mem_base_addrs[]; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index bc343e756aad..15d29c226b0c 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3773,4 +3773,20 @@ #define B_IQKINF2_FCNT GENMASK(23, 16) #define B_IQKINF2_KCNT GENMASK(15, 8) #define B_IQKINF2_NCTLV GENMAKS(7, 0) + +/* WiFi CPU local domain */ +#define R_AX_WDT_CTRL 0x0040 +#define B_AX_WDT_EN BIT(31) +#define B_AX_WDT_OPT_RESET_PLATFORM_EN BIT(29) +#define B_AX_IO_HANG_IMR BIT(27) +#define B_AX_IO_HANG_CMAC_RDATA_EN BIT(26) +#define B_AX_IO_HANG_DMAC_EN BIT(25) +#define B_AX_WDT_CLR BIT(16) +#define B_AX_WDT_COUNT_MASK GENMASK(15, 0) +#define WDT_CTRL_ALL_DIS 0 + +#define R_AX_WDT_STATUS 0x0044 +#define B_AX_FS_WDT_INT BIT(8) +#define B_AX_FS_WDT_INT_MSK BIT(0) + #endif -- cgit v1.2.3 From d264edb1cc65df8e0054b275f7eaaddb795c1d18 Mon Sep 17 00:00:00 2001 From: Johnson Lin Date: Fri, 8 Apr 2022 08:13:48 +0800 Subject: rtw89: Skip useless dig gain and igi related settings for 8852C Separated DIG RX gain, IGI configurations from not supportted HW using "support_igi" capability flag. Signed-off-by: Johnson Lin Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-9-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/phy.c | 9 +++++++-- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 994869f72080..eed6dbd0d5db 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -2930,6 +2930,9 @@ static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev) u32 tmp; u8 i; + if (!rtwdev->hal.support_igi) + return; + tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW, B_PATH0_IB_PKPW_MSK); dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT); @@ -3184,6 +3187,9 @@ static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev) { struct rtw89_dig_info *dig = &rtwdev->dig; + if (!rtwdev->hal.support_igi) + return; + if (dig->force_gaincode_idx_en) { rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); rtw89_debug(rtwdev, RTW89_DBG_DIG, @@ -3309,8 +3315,7 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev) dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, dig->igi_fa_rssi); - if (rtwdev->hal.support_igi) - rtw89_phy_dig_config_igi(rtwdev); + rtw89_phy_dig_config_igi(rtwdev); rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 9689fc5a6372..ca254339ea7a 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -598,6 +598,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .rf_base_addr = {0xe000, 0xf000}, .pwr_on_seq = NULL, .pwr_off_seq = NULL, + .dig_table = NULL, .hw_sec_hdr = true, .sec_ctrl_efuse_size = 4, .physical_efuse_size = 1216, -- cgit v1.2.3 From 065cf8f9777f28fcf1115a7e3a6cdc93ed2d30d1 Mon Sep 17 00:00:00 2001 From: Chia-Yuan Li Date: Fri, 8 Apr 2022 08:13:49 +0800 Subject: rtw89: 8852c: add 8852c specific BT-coexistence initial function Initialize registers to default values, such as PTA and GNT pin, and set pin MUX according to number of antenna on hardware module. Signed-off-by: Chia-Yuan Li Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408001353.17188-10-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 28 +++++++++++++ drivers/net/wireless/realtek/rtw89/mac.h | 2 + drivers/net/wireless/realtek/rtw89/reg.h | 33 +++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 59 +++++++++++++++++++++++++++ 4 files changed, 122 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index a990fd845c25..127d5d74c426 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -3973,6 +3973,34 @@ int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex } EXPORT_SYMBOL(rtw89_mac_coex_init); +int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev, + const struct rtw89_mac_ax_coex *coex) +{ + rtw89_write32_set(rtwdev, R_AX_BTC_CFG, + B_AX_BTC_EN | B_AX_BTG_LNA1_GAIN_SEL); + rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN); + rtw89_write16_set(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_EN); + rtw89_write16_clr(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_BRK_TXOP_EN); + + switch (coex->pta_mode) { + case RTW89_MAC_AX_COEX_RTK_MODE: + rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK, + MAC_AX_RTK_MODE); + rtw89_write32_mask(rtwdev, R_AX_RTK_MODE_CFG_V1, + B_AX_SAMPLE_CLK_MASK, MAC_AX_RTK_RATE); + break; + case RTW89_MAC_AX_COEX_CSR_MODE: + rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK, + MAC_AX_CSR_MODE); + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(rtw89_mac_coex_init_v1); + int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex_gnt *gnt_cfg) { diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 31d53de97cfc..9eb4afe348b3 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -829,6 +829,8 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable) void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx); void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop); int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex); +int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev, + const struct rtw89_mac_ax_coex *coex); int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex_gnt *gnt_cfg); int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 15d29c226b0c..98465d746989 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -87,6 +87,8 @@ #define B_AX_BTMODE_MASK GENMASK(7, 6) #define MAC_AX_BT_MODE_0_3 0 #define MAC_AX_BT_MODE_2 2 +#define MAC_AX_RTK_MODE 0 +#define MAC_AX_CSR_MODE 1 #define B_AX_ENBT BIT(5) #define B_AX_EROM_EN BIT(4) #define B_AX_ENUARTRX BIT(2) @@ -2962,11 +2964,40 @@ #define R_AX_TXPWR_ISR_C1 0xF9E4 #define R_AX_BTC_CFG 0xDA00 +#define B_AX_BTC_EN BIT(31) +#define B_AX_EN_EXT_BT_PINMUX BIT(29) +#define B_AX_BTC_RST BIT(28) +#define B_AX_BTC_DBG_SRC_SEL BIT(27) +#define B_AX_BTC_MODE_MASK GENMASK(25, 24) +#define B_AX_INV_WL_ACT2 BIT(17) +#define B_AX_BTG_LNA1_GAIN_SEL BIT(16) +#define B_AX_COEX_DLY_CLK_MASK GENMASK(15, 8) +#define B_AX_IGN_GNT_BT2_RX BIT(7) +#define B_AX_IGN_GNT_BT2_TX BIT(6) +#define B_AX_IGN_GNT_BT2 BIT(5) +#define B_AX_BTC_DBG_SEL_MASK GENMASK(4, 3) #define B_AX_DIS_BTC_CLK_G BIT(2) +#define B_AX_GNT_WL_RX_CTRL BIT(1) +#define B_AX_WL_SRC BIT(0) + +#define R_AX_RTK_MODE_CFG_V1 0xDA04 +#define R_AX_RTK_MODE_CFG_V1_C1 0xFA04 +#define B_AX_BT_BLE_EN_V1 BIT(24) +#define B_AX_BT_ULTRA_EN BIT(16) +#define B_AX_BT_L_RX_ULTRA_MASK GENMASK(15, 14) +#define B_AX_BT_L_TX_ULTRA_MASK GENMASK(13, 12) +#define B_AX_BT_H_RX_ULTRA_MASK GENMASK(11, 10) +#define B_AX_BT_H_TX_ULTRA_MASK GENMASK(9, 8) +#define B_AX_SAMPLE_CLK_MASK GENMASK(7, 0) #define R_AX_WL_PRI_MSK 0xDA10 #define B_AX_PTA_WL_PRI_MASK_BCNQ BIT(8) +#define R_AX_BT_CNT_CFG 0xDA10 +#define R_AX_BT_CNT_CFG_C1 0xFA10 +#define B_AX_BT_CNT_RST_V1 BIT(1) +#define B_AX_BT_CNT_EN BIT(0) + #define R_AX_BTC_FUNC_EN 0xDA20 #define R_AX_BTC_FUNC_EN_C1 0xFA20 #define B_AX_PTA_WL_TX_EN BIT(1) @@ -2999,6 +3030,8 @@ #define B_AX_WL_ACT_MASK_ENABLE BIT(1) #define B_AX_ENHANCED_BT BIT(0) +#define R_AX_BT_BREAK_TABLE 0xDA44 + #define R_AX_BT_STAST_HIGH 0xDA44 #define B_AX_STATIS_BT_HI_RX_MASK GENMASK(31, 16) #define B_AX_STATIS_BT_HI_TX_MASK GENMASK(15, 0) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index ca254339ea7a..38b138330716 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2022 Realtek Corporation */ +#include "coex.h" #include "debug.h" #include "fw.h" #include "mac.h" @@ -528,6 +529,62 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, } } +static +void rtw8852c_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) +{ + rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group); + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0); +} + +static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_module *module = &btc->mdinfo; + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_mac_ax_coex coex_params = { + .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE, + .direction = RTW89_MAC_AX_COEX_INNER, + }; + + /* PTA init */ + rtw89_mac_coex_init_v1(rtwdev, &coex_params); + + /* set WL Tx response = Hi-Pri */ + chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true); + chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true); + + /* set rf gnt debug off */ + rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0); + + /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */ + if (module->ant.type == BTC_ANT_SHARED) { + rtw8852c_set_trx_mask(rtwdev, + RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff); + rtw8852c_set_trx_mask(rtwdev, + RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff); + /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */ + rtw8852c_set_trx_mask(rtwdev, + RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); + } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */ + rtw8852c_set_trx_mask(rtwdev, + RF_PATH_A, BTC_BT_SS_GROUP, 0x5df); + rtw8852c_set_trx_mask(rtwdev, + RF_PATH_B, BTC_BT_SS_GROUP, 0x5df); + } + + /* set PTA break table */ + rtw89_write32(rtwdev, R_AX_BT_BREAK_TABLE, BTC_BREAK_PARAM); + + /* enable BT counter 0xda10[1:0] = 2b'11 */ + rtw89_write32_set(rtwdev, + R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN | + B_AX_BT_CNT_RST_V1); + btc->cx.wl.status.map.init_ok = true; +} + static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev) { int ret; @@ -588,6 +645,8 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .mac_cfg_gnt = rtw89_mac_cfg_gnt_v1, .stop_sch_tx = rtw89_mac_stop_sch_tx_v1, .resume_sch_tx = rtw89_mac_resume_sch_tx_v1, + + .btc_init_cfg = rtw8852c_btc_init_cfg, }; const struct rtw89_chip_info rtw8852c_chip_info = { -- cgit v1.2.3 From af5175acc8e2fa7990c59a3e4420bbe74e047d40 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 7 Apr 2022 13:14:44 -0700 Subject: rtw89: rtw89_ser: add const to struct state_ent and event_ent Change the struct and the uses to const to reduce data. $ size drivers/net/wireless/realtek/rtw89/ser.o* (x86-64 defconfig w/ rtw89) text data bss dec hex filename 3741 8 0 3749 ea5 drivers/net/wireless/realtek/rtw89/ser.o.new 3437 312 0 3749 ea5 drivers/net/wireless/realtek/rtw89/ser.o.old Signed-off-by: Joe Perches Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/2fd88e6119f62b968477ef9781abb1832d399fd6.camel@perches.com --- drivers/net/wireless/realtek/rtw89/core.h | 4 ++-- drivers/net/wireless/realtek/rtw89/ser.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index a6c075ac67d5..ec181c1989bc 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2948,8 +2948,8 @@ struct rtw89_ser { struct work_struct ser_hdl_work; struct delayed_work ser_alarm_work; - struct state_ent *st_tbl; - struct event_ent *ev_tbl; + const struct state_ent *st_tbl; + const struct event_ent *ev_tbl; struct list_head msg_q; spinlock_t msg_q_lock; /* lock when read/write ser msg */ DECLARE_BITMAP(flags, RTW89_NUM_OF_SER_FLAGS); diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 5aebd6839d29..9e95ed972710 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -632,7 +632,7 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) } } -static struct event_ent ser_ev_tbl[] = { +static const struct event_ent ser_ev_tbl[] = { {SER_EV_NONE, "SER_EV_NONE"}, {SER_EV_STATE_IN, "SER_EV_STATE_IN"}, {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"}, @@ -648,7 +648,7 @@ static struct event_ent ser_ev_tbl[] = { {SER_EV_MAXX, "SER_EV_MAX"} }; -static struct state_ent ser_st_tbl[] = { +static const struct state_ent ser_st_tbl[] = { {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl}, {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl}, {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl}, -- cgit v1.2.3 From c1edc86472fc3a5aa3b5c5c53c4e20f6a24992a6 Mon Sep 17 00:00:00 2001 From: Po-Hao Huang Date: Thu, 7 Apr 2022 17:58:53 +0800 Subject: rtw88: add ieee80211:sta_rc_update ops Adding this allows us to get notification when bitrate configuration of the station changes. Update according parameters to firmware so the rate control algorithm can work properly. Signed-off-by: Po-Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407095858.46807-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/fw.c | 7 +++---- drivers/net/wireless/realtek/rtw88/fw.h | 3 ++- drivers/net/wireless/realtek/rtw88/mac80211.c | 14 +++++++++++++- drivers/net/wireless/realtek/rtw88/main.c | 7 ++++--- drivers/net/wireless/realtek/rtw88/main.h | 4 ++-- drivers/net/wireless/realtek/rtw88/phy.c | 2 +- 6 files changed, 25 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index aa2aeb5fb2cc..941df27f0b69 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -585,10 +585,10 @@ void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } -void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) +void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, + bool reset_ra_mask) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; - bool no_update = si->updated; bool disable_pt = true; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO); @@ -599,7 +599,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable); SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode); SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en); - SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update); + SET_RA_INFO_NO_UPDATE(h2c_pkt, !reset_ra_mask); SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable); SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt); SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff)); @@ -608,7 +608,6 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24); si->init_ra_lv = 0; - si->updated = true; rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index b59d2cbad5d7..3486eb9585de 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -791,7 +791,8 @@ void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data); void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data); void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); -void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); +void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, + bool reset_ra_mask); void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn); void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev); void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect, diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 5cdc54c9a9aa..c9341af49364 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -694,7 +694,7 @@ static void rtw_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta) } si->use_cfg_mask = true; - rtw_update_sta_info(br_data->rtwdev, si); + rtw_update_sta_info(br_data->rtwdev, si, true); } static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev, @@ -850,6 +850,17 @@ static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw, return 0; } +static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u32 changed) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; + + if (changed & IEEE80211_RC_BW_CHANGED) + rtw_update_sta_info(rtwdev, si, true); +} + const struct ieee80211_ops rtw_ops = { .tx = rtw_ops_tx, .wake_tx_queue = rtw_ops_wake_tx_queue, @@ -879,6 +890,7 @@ const struct ieee80211_ops rtw_ops = { .reconfig_complete = rtw_reconfig_complete, .hw_scan = rtw_ops_hw_scan, .cancel_hw_scan = rtw_ops_cancel_hw_scan, + .sta_rc_update = rtw_ops_sta_rc_update, .set_sar_specs = rtw_ops_set_sar_specs, #ifdef CONFIG_PM .suspend = rtw_ops_suspend, diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 1a4c8c989ce9..66b6c140e69a 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -313,7 +313,7 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_init(rtwdev, sta->txq[i]); - rtw_update_sta_info(rtwdev, si); + rtw_update_sta_info(rtwdev, si, true); rtw_fw_media_status_report(rtwdev, si->mac_id, true); rtwdev->sta_cnt++; @@ -1105,7 +1105,8 @@ static u64 rtw_rate_mask_cfg(struct rtw_dev *rtwdev, struct rtw_sta_info *si, return ra_mask; } -void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) +void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, + bool reset_ra_mask) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct ieee80211_sta *sta = si->sta; @@ -1223,7 +1224,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) si->ra_mask = ra_mask; si->rate_id = rate_id; - rtw_fw_send_ra_info(rtwdev, si); + rtw_fw_send_ra_info(rtwdev, si, reset_ra_mask); } static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 17815af9dd4e..f694a2d86e0b 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -753,7 +753,6 @@ struct rtw_sta_info { u8 ldpc_en:2; bool sgi_enable; bool vht_enable; - bool updated; u8 init_ra_lv; u64 ra_mask; @@ -2145,7 +2144,8 @@ void rtw_chip_prepare_tx(struct rtw_dev *rtwdev); void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u32 config); void rtw_tx_report_purge_timer(struct timer_list *t); -void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si); +void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, + bool reset_ra_mask); void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, const u8 *mac_addr, bool hw_scan); void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index e505d17f107e..8982e0c98dac 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -536,7 +536,7 @@ static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta) struct rtw_dev *rtwdev = data; struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; - rtw_update_sta_info(rtwdev, si); + rtw_update_sta_info(rtwdev, si, false); } static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev) -- cgit v1.2.3 From 6723c0cde84fde582a261c186ce84100dcfa0019 Mon Sep 17 00:00:00 2001 From: Po-Hao Huang Date: Thu, 7 Apr 2022 17:58:54 +0800 Subject: rtw88: fix incorrect frequency reported We should only fill in frequency reported by firmware during scan. Add this so frames won't be dropped by mac80211 due to frequency mismatch. Signed-off-by: Po-Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407095858.46807-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/rx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c index d2d607e22198..84aedabdf285 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.c +++ b/drivers/net/wireless/realtek/rtw88/rx.c @@ -158,7 +158,8 @@ void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, memset(rx_status, 0, sizeof(*rx_status)); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; - if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD) && + test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) rtw_set_rx_freq_by_pktstat(pkt_stat, rx_status); if (pkt_stat->crc_err) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; -- cgit v1.2.3 From f2217968ffdae702c21cc00fa804fbbd9ee6bb4b Mon Sep 17 00:00:00 2001 From: Po-Hao Huang Date: Thu, 7 Apr 2022 17:58:55 +0800 Subject: rtw88: Add update beacon flow for AP mode To support stations in power saving mode, AP should notify stations that there are frames buffered at the AP via TIM during beacons. Driver used to transmit identical beacons that were downloaded to hardware during the initiation phase. This beacon will become obsolete over time. If the beacon does not contain sufficient information, station would not be able to percept that there is data to receive. Hence it won't wake up and start the PS-poll procedure, this could lead to timeout and/or lost data segments. In order to resolve this issue, driver will now download beacon to hardware whenever the content is updated. Enable hardware to update dtim_count for more efficiency, this reduces the overhead of downloading beacon at every beacon interval since most of the time only the dtim_count needs to be updated. Change queue mapping for broadcast/multicast frames to high queue, so these frames can be prioritized and sent when dtim_count is zero. Signed-off-by: Po-Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407095858.46807-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/fw.c | 4 +++- drivers/net/wireless/realtek/rtw88/fw.h | 1 + drivers/net/wireless/realtek/rtw88/mac80211.c | 17 ++++++++++++++++- drivers/net/wireless/realtek/rtw88/main.c | 6 ++++++ drivers/net/wireless/realtek/rtw88/main.h | 2 ++ drivers/net/wireless/realtek/rtw88/pci.c | 3 +++ drivers/net/wireless/realtek/rtw88/reg.h | 2 ++ drivers/net/wireless/realtek/rtw88/tx.c | 17 +++++++++++++++++ drivers/net/wireless/realtek/rtw88/tx.h | 4 ++++ 9 files changed, 54 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 941df27f0b69..c1af2704bb86 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -1047,6 +1047,7 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw, struct rtw_vif *rtwvif; struct sk_buff *skb_new; struct cfg80211_ssid *ssid; + u16 tim_offset; if (rsvd_pkt->type == RSVD_DUMMY) { skb_new = alloc_skb(1, GFP_KERNEL); @@ -1065,7 +1066,8 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw, switch (rsvd_pkt->type) { case RSVD_BEACON: - skb_new = ieee80211_beacon_get(hw, vif); + skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL); + rsvd_pkt->tim_offset = tim_offset; break; case RSVD_PS_POLL: skb_new = ieee80211_pspoll_get(hw, vif); diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 3486eb9585de..734113fba184 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -172,6 +172,7 @@ struct rtw_rsvd_page { struct sk_buff *skb; enum rtw_rsvd_packet_type type; u8 page; + u16 tim_offset; bool add_txdesc; struct cfg80211_ssid *ssid; u16 probe_req_size; diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index c9341af49364..1ee41dfda5e1 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -402,8 +402,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, coex_stat->wl_beacon_interval = conf->beacon_int; } - if (changed & BSS_CHANGED_BEACON) + if (changed & BSS_CHANGED_BEACON) { + rtw_set_dtim_period(rtwdev, conf->dtim_period); rtw_fw_download_rsvd_page(rtwdev); + } if (changed & BSS_CHANGED_BEACON_ENABLED) { if (conf->enable_beacon) @@ -474,6 +476,18 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw, return 0; } +static int rtw_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set) +{ + struct rtw_dev *rtwdev = hw->priv; + + mutex_lock(&rtwdev->mutex); + rtw_fw_download_rsvd_page(rtwdev); + mutex_unlock(&rtwdev->mutex); + + return 0; +} + static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) @@ -875,6 +889,7 @@ const struct ieee80211_ops rtw_ops = { .conf_tx = rtw_ops_conf_tx, .sta_add = rtw_ops_sta_add, .sta_remove = rtw_ops_sta_remove, + .set_tim = rtw_ops_set_tim, .set_key = rtw_ops_set_key, .ampdu_action = rtw_ops_ampdu_action, .can_aggregate_in_amsdu = rtw_ops_can_aggregate_in_amsdu, diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 66b6c140e69a..7431988b5985 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -664,6 +664,12 @@ void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel) } EXPORT_SYMBOL(rtw_set_rx_freq_band); +void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period) +{ + rtw_write32_set(rtwdev, REG_TCR, BIT_TCR_UPDATE_TIMIE); + rtw_write8(rtwdev, REG_DTIM_COUNTER_ROOT, dtim_period - 1); +} + void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *chan_params) { diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index f694a2d86e0b..2743074a4256 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -580,6 +580,7 @@ struct rtw_tx_pkt_info { u32 tx_pkt_size; u8 offset; u8 pkt_offset; + u8 tim_offset; u8 mac_id; u8 rate_id; u8 rate; @@ -2131,6 +2132,7 @@ static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev) } void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel); +void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period); void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *ch_param); bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target); diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index a0991d3f15c0..8e122e121e85 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -689,6 +689,9 @@ static u8 rtw_hw_queue_mapping(struct sk_buff *skb) queue = RTW_TX_QUEUE_BCN; else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) queue = RTW_TX_QUEUE_MGMT; + else if (is_broadcast_ether_addr(hdr->addr1) || + is_multicast_ether_addr(hdr->addr1)) + queue = RTW_TX_QUEUE_HI0; else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) queue = ac_to_hwq[IEEE80211_AC_BE]; else diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 84ba9ec489c3..03bd8dc53f72 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -389,12 +389,14 @@ #define BIT_EN_FREE_CNT BIT(3) #define BIT_DIS_SECOND_CCA (BIT(0) | BIT(1)) #define REG_HIQ_NO_LMT_EN 0x5A7 +#define REG_DTIM_COUNTER_ROOT 0x5A8 #define BIT_HIQ_NO_LMT_EN_ROOT BIT(0) #define REG_TIMER0_SRC_SEL 0x05B4 #define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6)) #define REG_TCR 0x0604 #define BIT_PWRMGT_HWDATA_EN BIT(7) +#define BIT_TCR_UPDATE_TIMIE BIT(5) #define REG_RCR 0x0608 #define BIT_APP_FCS BIT(31) #define BIT_APP_MIC BIT(30) diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index 83248da0237f..60d40a5c2c6a 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -67,6 +67,10 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel); SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr); SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null); + if (pkt_info->tim_offset) { + SET_TX_DESC_TIM_EN(txdesc, 1); + SET_TX_DESC_TIM_OFFSET(txdesc, pkt_info->tim_offset); + } } EXPORT_SYMBOL(rtw_tx_fill_tx_desc); @@ -448,6 +452,19 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, if (type == RSVD_QOS_NULL) pkt_info->bt_null = true; + if (type == RSVD_BEACON) { + struct rtw_rsvd_page *rsvd_pkt; + int hdr_len; + + rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list, + struct rtw_rsvd_page, + build_list); + if (rsvd_pkt && rsvd_pkt->tim_offset != 0) { + hdr_len = sizeof(struct ieee80211_hdr_3addr); + pkt_info->tim_offset = rsvd_pkt->tim_offset - hdr_len; + } + } + rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); /* TODO: need to change hw port and hw ssn sel for multiple vifs */ diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h index 56371eff9f7f..8419603adce4 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.h +++ b/drivers/net/wireless/realtek/rtw88/tx.h @@ -33,6 +33,10 @@ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(6, 5)) #define SET_TX_DESC_SW_SEQ(txdesc, value) \ le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12)) +#define SET_TX_DESC_TIM_EN(txdesc, value) \ + le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, BIT(7)) +#define SET_TX_DESC_TIM_OFFSET(txdesc, value) \ + le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(6, 0)) #define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17)) #define SET_TX_DESC_USE_RTS(tx_desc, value) \ -- cgit v1.2.3 From f1c4dabfe68df7321cba0a07a30b2776a303abb2 Mon Sep 17 00:00:00 2001 From: Po-Hao Huang Date: Thu, 7 Apr 2022 17:58:56 +0800 Subject: rtw88: 8821c: Enable TX report for management frames Without this setting, hardware would not report to driver even if management frames are transmitted successfully. So we fix it. Signed-off-by: Po-Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407095858.46807-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/rtw8821c_table.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c index 8e8915c5c498..6c82c4383497 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c @@ -13,7 +13,7 @@ static const u32 rtw8821c_mac[] = { 0x04F, 0x00000001, 0x029, 0x000000F9, 0x420, 0x00000080, - 0x421, 0x0000000F, + 0x421, 0x0000001F, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, -- cgit v1.2.3 From f5207c122102cac22c78f438610d937a924aee3a Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 7 Apr 2022 17:58:57 +0800 Subject: rtw88: do PHY calibration while starting AP Calling calibration to yield expected PHY performance. We do this in STA mode, so do this in AP mode as well. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407095858.46807-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/mac80211.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 1ee41dfda5e1..30903c567cd9 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -429,6 +429,18 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, mutex_unlock(&rtwdev->mutex); } +static int rtw_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct rtw_dev *rtwdev = hw->priv; + struct rtw_chip_info *chip = rtwdev->chip; + + mutex_lock(&rtwdev->mutex); + chip->ops->phy_calibration(rtwdev); + mutex_unlock(&rtwdev->mutex); + + return 0; +} + static int rtw_ops_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params) @@ -886,6 +898,7 @@ const struct ieee80211_ops rtw_ops = { .change_interface = rtw_ops_change_interface, .configure_filter = rtw_ops_configure_filter, .bss_info_changed = rtw_ops_bss_info_changed, + .start_ap = rtw_ops_start_ap, .conf_tx = rtw_ops_conf_tx, .sta_add = rtw_ops_sta_add, .sta_remove = rtw_ops_sta_remove, -- cgit v1.2.3 From ece31c93d4d68f7eb8eea4431b052aacdb678de2 Mon Sep 17 00:00:00 2001 From: Po-Hao Huang Date: Thu, 7 Apr 2022 17:58:58 +0800 Subject: rtw88: 8821c: fix debugfs rssi value RSSI value per frame is reported to mac80211 but not maintained in our own statistics, add it back to help us debug. Signed-off-by: Po-Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407095858.46807-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/rtw8821c.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 99eee128ae94..ec38a7c84951 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -512,6 +512,7 @@ static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx) static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, struct rtw_rx_pkt_stat *pkt_stat) { + struct rtw_dm_info *dm_info = &rtwdev->dm_info; s8 rx_power; u8 lna_idx = 0; u8 vga_idx = 0; @@ -523,6 +524,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, pkt_stat->rx_power[RF_PATH_A] = rx_power; pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); + dm_info->rssi[RF_PATH_A] = pkt_stat->rssi; pkt_stat->bw = RTW_CHANNEL_WIDTH_20; pkt_stat->signal_power = rx_power; } @@ -530,6 +532,7 @@ static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, struct rtw_rx_pkt_stat *pkt_stat) { + struct rtw_dm_info *dm_info = &rtwdev->dm_info; u8 rxsc, bw; s8 min_rx_power = -120; @@ -549,6 +552,7 @@ static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110; pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); + dm_info->rssi[RF_PATH_A] = pkt_stat->rssi; pkt_stat->bw = bw; pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A], min_rx_power); -- cgit v1.2.3 From d5286826201ec226a683964122e207be60f62fbf Mon Sep 17 00:00:00 2001 From: Jimmy Hon Date: Thu, 7 Apr 2022 02:51:22 -0500 Subject: rtw88: 8821ce: add support for device ID 0xb821 New device ID 0xb821 found on TP-Link T2E Tested it with c821 driver. 2.4GHz and 5GHz works. PCI id: 05:00.0 Network controller: Realtek Semiconductor Co., Ltd. Device b821 Subsystem: Realtek Semiconductor Co., Ltd. Device b821 Signed-off-by: Jimmy Hon Reviewed-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407075123.420696-2-honyuenkwun@gmail.com --- drivers/net/wireless/realtek/rtw88/rtw8821ce.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c index f34de115e4bc..56d22f9de904 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c @@ -8,6 +8,10 @@ #include "rtw8821ce.h" static const struct pci_device_id rtw_8821ce_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB821), + .driver_data = (kernel_ulong_t)&rtw8821c_hw_spec + }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC821), .driver_data = (kernel_ulong_t)&rtw8821c_hw_spec -- cgit v1.2.3 From b9eb5f0742d107ca9c0f33da58f61ce83e3ce2fc Mon Sep 17 00:00:00 2001 From: Jimmy Hon Date: Thu, 7 Apr 2022 02:51:23 -0500 Subject: rtw88: 8821ce: Disable PCIe ASPM L1 for 8821CE using chip ID Make workaround work for other 8821CE devices with different PCI ID Signed-off-by: Jimmy Hon Reviewed-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220407075123.420696-3-honyuenkwun@gmail.com --- drivers/net/wireless/realtek/rtw88/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 8e122e121e85..33042b63a151 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1773,7 +1773,7 @@ int rtw_pci_probe(struct pci_dev *pdev, } /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */ - if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL) + if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL) rtwpci->rx_no_aspm = true; rtw_pci_phy_cfg(rtwdev); -- cgit v1.2.3 From b2268fd81c18302ed3443db09b7f06d52e6dcf03 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Fri, 8 Apr 2022 08:12:05 +0000 Subject: wlcore: debugfs: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408081205.2494512-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/debugfs.c | 52 +++++++++++--------------------- 1 file changed, 17 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index cce8d75d8b81..eb3d3f0e0b4d 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -52,11 +52,9 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } if (!wl->plt && time_after(jiffies, wl->stats.fw_stats_update + @@ -108,12 +106,9 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value, return; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); - + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) return; - } chip_op = arg; chip_op(wl); @@ -279,11 +274,9 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } /* In case we're already in PSM, trigger it again to set new timeout * immediately without waiting for re-association @@ -349,11 +342,9 @@ static ssize_t forced_ps_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } /* In case we're already in PSM, trigger it again to switch mode * immediately without waiting for re-association @@ -831,11 +822,9 @@ static ssize_t rx_streaming_interval_write(struct file *file, wl->conf.rx_streaming.interval = value; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_for_each_wlvif_sta(wl, wlvif) { wl1271_recalc_rx_streaming(wl, wlvif); @@ -889,11 +878,9 @@ static ssize_t rx_streaming_always_write(struct file *file, wl->conf.rx_streaming.always = value; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_for_each_wlvif_sta(wl, wlvif) { wl1271_recalc_rx_streaming(wl, wlvif); @@ -939,11 +926,9 @@ static ssize_t beacon_filtering_write(struct file *file, mutex_lock(&wl->mutex); - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); @@ -1021,11 +1006,9 @@ static ssize_t sleep_auth_write(struct file *file, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl1271_acx_sleep_auth(wl, value); if (ret < 0) @@ -1254,9 +1237,8 @@ static ssize_t fw_logger_write(struct file *file, } mutex_lock(&wl->mutex); - ret = pm_runtime_get_sync(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); if (ret < 0) { - pm_runtime_put_noidle(wl->dev); count = ret; goto out; } -- cgit v1.2.3 From e8c241d4a7fa07c0a21a69eb68a98efd818f2a77 Mon Sep 17 00:00:00 2001 From: Lv Ruyi Date: Fri, 8 Apr 2022 09:58:03 +0000 Subject: rtlwifi: Fix spelling mistake "cacluated" -> "calculated" There are some spelling mistakes in the comments. Fix it. Reported-by: Zeal Robot Signed-off-by: Lv Ruyi Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408095803.2495336-1-lv.ruyi@zte.com.cn --- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c | 4 ++-- 6 files changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 257816563fa0..6e4741e9483f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -58,7 +58,7 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, cck_agc_rpt = cck_buf->cck_agc_rpt; /* (1)Hardware does not provide RSSI for CCK - * (2)PWDB, Average PWDB cacluated by + * (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ if (ppsc->rfpwr_state == ERFON) @@ -187,7 +187,7 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } - /* (2)PWDB, Average PWDB cacluated by + /* (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index fdbd3758cde1..730c7e939bd2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -166,7 +166,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, pstats->rx_mimo_signalstrength[i] = (u8) rssi; } - /* (2)PWDB, Average PWDB cacluated by + /* (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 272750612abf..8043d819fb85 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -55,7 +55,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw, cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a; /* (1)Hardware does not provide RSSI for CCK - * (2)PWDB, Average PWDB cacluated by + * (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ cck_highpwr = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, @@ -153,7 +153,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw, pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } - /* (2)PWDB, Average PWDB cacluated by + /* (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ rx_pwr_all = ((p_phystrpt->cck_sig_qual_ofdm_pwdb_all >> 1) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 3797a1e2af82..27fddbcade32 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -52,7 +52,7 @@ static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw, cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo; /* (1)Hardware does not provide RSSI for CCK */ - /* (2)PWDB, Average PWDB cacluated by + /* (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ if (ppsc->rfpwr_state == ERFON) @@ -170,7 +170,7 @@ static void _rtl8723e_query_rxphystatus(struct ieee80211_hw *hw, pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } - /* (2)PWDB, Average PWDB cacluated by + /* (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index c08f57e487e5..24ef7cc52e99 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -55,7 +55,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a; /* (1)Hardware does not provide RSSI for CCK */ - /* (2)PWDB, Average PWDB cacluated by + /* (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BIT(9)); @@ -126,7 +126,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } - /* (2)PWDB, Average PWDB cacluated by + /* (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ rx_pwr_all = ((p_phystrpt->cck_sig_qual_ofdm_pwdb_all >> 1) & diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 8c48108960b9..d7cb3319d885 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -86,7 +86,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw, cck_agc_rpt = p_phystrpt->cfosho[0]; /* (1)Hardware does not provide RSSI for CCK - * (2)PWDB, Average PWDB cacluated by + * (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ cck_highpwr = (u8)rtlphy->cck_high_power; @@ -215,7 +215,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw, pstatus->rx_mimo_signalstrength[i] = (u8)rssi; } - /* (2)PWDB, Average PWDB cacluated by + /* (2)PWDB, Average PWDB calculated by * hardware (for rate adaptive) */ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; -- cgit v1.2.3 From 780d9c48a05a2945912a9f7e0f511c8024e38f40 Mon Sep 17 00:00:00 2001 From: Lv Ruyi Date: Mon, 11 Apr 2022 03:24:58 +0000 Subject: rtlwifi: rtl8192cu: Fix spelling mistake "writting" -> "writing" There are some spelling mistakes in the comments. Fix it. Reported-by: Zeal Robot Signed-off-by: Lv Ruyi Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220411032458.2517551-1-lv.ruyi@zte.com.cn --- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 9bd02c3c3adc..a040c07791d1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -520,7 +520,7 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw, * 2 out-ep. Remainder pages have assigned to High queue */ if (outepnum > 1 && txqremaininpage) numhq += txqremaininpage; - /* NOTE: This step done before writting REG_RQPN. */ + /* NOTE: This step done before writing REG_RQPN. */ if (ischipn) { if (queue_sel & TX_SELE_NQ) numnq = txqpageunit; @@ -539,7 +539,7 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw, numlq = ischipn ? WMM_CHIP_B_PAGE_NUM_LPQ : WMM_CHIP_A_PAGE_NUM_LPQ; } - /* NOTE: This step done before writting REG_RQPN. */ + /* NOTE: This step done before writing REG_RQPN. */ if (ischipn) { if (queue_sel & TX_SELE_NQ) numnq = WMM_CHIP_B_PAGE_NUM_NPQ; -- cgit v1.2.3 From b4fb2d33514a511312e1c0e470771eaa4a94d8ec Mon Sep 17 00:00:00 2001 From: Joe Damato Date: Wed, 2 Mar 2022 17:29:07 -0800 Subject: i40e: Add support for MPLS + TSO This change adds support for TSO of MPLS packets. In my tests with tcpdump it seems to work. Note this test setup has a 9000 byte MTU: MPLS (label 100, exp 0, [S], ttl 64) IP srcip.50086 > dstip.1234: Flags [P.], seq 593345:644401, ack 0, win 420, options [nop,nop,TS val 45022534 ecr 1722291395], length 51056 IP dstip.1234 > srcip.50086: Flags [.], ack 593345, win 122, options [nop,nop,TS val 1722291395 ecr 45022534], length 0 IP dstip.1234 > srcip.50086: Flags [.], ack 602289, win 105, options [nop,nop,TS val 1722291395 ecr 45022534], length 0 IP dstip.1234 > srcip.50086: Flags [.], ack 620177, win 71, options [nop,nop,TS val 1722291395 ecr 45022534], length 0 MPLS (label 100, exp 0, [S], ttl 64) IP srcip.50086 > dstip.1234: Flags [P.], seq 644401:655953, ack 0, win 420, options [nop,nop,TS val 45022534 ecr 1722291395], length 11552 IP dstip.1234 > srcip.50086: Flags [.], ack 638065, win 37, options [nop,nop,TS val 1722291395 ecr 45022534], length 0 IP dstip.1234 > srcip.50086: Flags [.], ack 644401, win 25, options [nop,nop,TS val 1722291395 ecr 45022534], length 0 IP dstip.1234 > srcip.50086: Flags [.], ack 653345, win 8, options [nop,nop,TS val 1722291395 ecr 45022534], length 0 IP dstip.1234 > srcip.50086: Flags [.], ack 655953, win 3, options [nop,nop,TS val 1722291395 ecr 45022534], length 0 Signed-off-by: Joe Damato Co-developed-by: Mike Gallo Signed-off-by: Mike Gallo Tested-by: Gurucharan (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_main.c | 20 ++++++++++-- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 47 ++++++++++++++++++----------- 2 files changed, 48 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6778df2177a1..b7f11fd8297b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -13436,8 +13436,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np->vsi = vsi; hw_enc_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | + NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_SOFT_FEATURES | NETIF_F_TSO | @@ -13468,6 +13467,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; +#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + I40E_GSO_PARTIAL_FEATURES; + + netdev->mpls_features |= NETIF_F_SG; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->mpls_features |= NETIF_F_TSO; + netdev->mpls_features |= NETIF_F_TSO6; + netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES; + /* enable macvlan offloads */ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0eae5858f2fe..8b844ad08a86 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3,6 +3,7 @@ #include #include +#include #include #include "i40e.h" #include "i40e_trace.h" @@ -3015,6 +3016,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, { struct sk_buff *skb = first->skb; u64 cd_cmd, cd_tso_len, cd_mss; + __be16 protocol; union { struct iphdr *v4; struct ipv6hdr *v6; @@ -3026,7 +3028,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, unsigned char *hdr; } l4; u32 paylen, l4_offset; - u16 gso_segs, gso_size; + u16 gso_size; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3039,15 +3041,23 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, if (err < 0) return err; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* initialize outer IP header fields */ if (ip.v4->version == 4) { ip.v4->tot_len = 0; ip.v4->check = 0; + + first->tx_flags |= I40E_TX_FLAGS_TSO; } else { ip.v6->payload_len = 0; + first->tx_flags |= I40E_TX_FLAGS_TSO; } if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | @@ -3100,10 +3110,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* pull values out of skb_shinfo */ gso_size = skb_shinfo(skb)->gso_size; - gso_segs = skb_shinfo(skb)->gso_segs; /* update GSO size and bytecount with header size */ - first->gso_segs = gso_segs; + first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* find the field values */ @@ -3187,13 +3196,27 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *exthdr; u32 offset, cmd = 0; __be16 frag_off; + __be16 protocol; u8 l4_proto = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + + /* set the tx_flags to indicate the IP protocol type. this is + * required so that checksum header computation below is accurate. + */ + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + else + *tx_flags |= I40E_TX_FLAGS_IPV6; /* compute outer L2 header size */ offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; @@ -3749,7 +3772,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, struct i40e_tx_buffer *first; u32 td_offset = 0; u32 tx_flags = 0; - __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; int tso, count; @@ -3791,15 +3813,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; - /* obtain protocol of skb */ - protocol = vlan_get_protocol(skb); - - /* setup IPv4/IPv6 offloads */ - if (protocol == htons(ETH_P_IP)) - tx_flags |= I40E_TX_FLAGS_IPV4; - else if (protocol == htons(ETH_P_IPV6)) - tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) -- cgit v1.2.3 From d30bed29a71806c6f686fc8776b5279e897cc742 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Mon, 11 Apr 2022 17:46:50 +0300 Subject: mlxsw: reg: Extend MTMP register with new slot number field Extend MTMP (Management Temperature Register) with new field specifying the slot index. The purpose of this field is to support access to MTMP register for reading temperature sensors on modular systems. For non-modular systems the 'sensor_index' uniquely identifies the cage sensors, while 'slot_index' is always 0. For modular systems the sensors are identified by: - 'slot_index', specifying the slot index, where line card is located; - 'sensor_index', specifying cage sensor within the line card. Signed-off-by: Vadim Pasternak Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 11 ++++++----- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 6 +++--- drivers/net/ethernet/mellanox/mlxsw/reg.h | 11 +++++++++-- 4 files changed, 19 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 29a74b8bd5b5..3103fef43955 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -177,7 +177,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, int page; int err; - mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, + mlxsw_reg_mtmp_pack(mtmp_pl, 0, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, false); err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 8b170ad92302..71ca3b561e62 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -66,7 +66,7 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev, index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, mlxsw_hwmon->module_sensor_max); - mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, 0, index, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); @@ -89,7 +89,7 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev, index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, mlxsw_hwmon->module_sensor_max); - mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, 0, index, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); @@ -232,8 +232,9 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev, int err; module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, - false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, 0, + MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, + false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { dev_err(dev, "Failed to query module temperature\n"); @@ -721,7 +722,7 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) while (index < max_index) { sensor_index = index % mlxsw_hwmon->module_sensor_max + MLXSW_REG_MTMP_GBOX_INDEX_MIN; - mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, true, true); + mlxsw_reg_mtmp_pack(mtmp_pl, 0, sensor_index, true, true); err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 05f54bd982c0..65ab04e32972 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -271,7 +271,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev, int temp; int err; - mlxsw_reg_mtmp_pack(mtmp_pl, 0, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, 0, 0, false, false); err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { @@ -431,7 +431,7 @@ mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core, int err; /* Read module temperature and thresholds. */ - mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, 0, sensor_index, false, false); err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl); if (err) { /* Set temperature and thresholds to zero to avoid passing @@ -576,7 +576,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, int err; index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module; - mlxsw_reg_mtmp_pack(mtmp_pl, index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, 0, index, false, false); err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 67b1a2f8397f..5de77d6e08ba 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -9721,6 +9721,12 @@ MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7); MLXSW_REG_DEFINE(mtmp, MLXSW_REG_MTMP_ID, MLXSW_REG_MTMP_LEN); +/* reg_mtmp_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, mtmp, slot_index, 0x00, 16, 4); + #define MLXSW_REG_MTMP_MODULE_INDEX_MIN 64 #define MLXSW_REG_MTMP_GBOX_INDEX_MIN 256 /* reg_mtmp_sensor_index @@ -9810,11 +9816,12 @@ MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16); */ MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE); -static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index, - bool max_temp_enable, +static inline void mlxsw_reg_mtmp_pack(char *payload, u8 slot_index, + u16 sensor_index, bool max_temp_enable, bool max_temp_reset) { MLXSW_REG_ZERO(mtmp, payload); + mlxsw_reg_mtmp_slot_index_set(payload, slot_index); mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index); mlxsw_reg_mtmp_mte_set(payload, max_temp_enable); mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset); -- cgit v1.2.3 From c6e6ad703ed29af57af85ac8647b34763ae06cdf Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Mon, 11 Apr 2022 17:46:51 +0300 Subject: mlxsw: reg: Extend MTBR register with new slot number field Extend MTBR (Management Temperature Bulk Register) with new field specifying the slot number. The purpose of this field is to support access to MTBR register for reading temperature sensors on modular system. For non-modular systems the 'sensor_index' uniquely identifies the cage sensors. For modular systems the sensors are identified by two indexes: - 'slot_index', specifying the slot number, where line card is located; - 'sensor_index', specifying cage sensor within the line card. Signed-off-by: Vadim Pasternak Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/reg.h | 11 +++++++++-- 3 files changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 3103fef43955..0c1306db7315 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -660,8 +660,8 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, u16 temp; int err; - mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, - 1); + mlxsw_reg_mtbr_pack(mtbr_pl, 0, + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 71ca3b561e62..f4bc711a16cf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -271,8 +271,8 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, int err; module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, - 1); + mlxsw_reg_mtbr_pack(mtbr_pl, 0, + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); if (err) { dev_err(dev, "Failed to query module temperature sensor\n"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5de77d6e08ba..958df69cccb5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -9887,6 +9887,12 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1); MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN); +/* reg_mtbr_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, mtbr, slot_index, 0x00, 16, 4); + /* reg_mtbr_base_sensor_index * Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors, * 64-127 are mapped to the SFP+/QSFP modules sequentially). @@ -9919,10 +9925,11 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16, MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16, MLXSW_REG_MTBR_REC_LEN, 0x00, false); -static inline void mlxsw_reg_mtbr_pack(char *payload, u16 base_sensor_index, - u8 num_rec) +static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index, + u16 base_sensor_index, u8 num_rec) { MLXSW_REG_ZERO(mtbr, payload); + mlxsw_reg_mtbr_slot_index_set(payload, slot_index); mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index); mlxsw_reg_mtbr_num_rec_set(payload, num_rec); } -- cgit v1.2.3 From 89dd6fcd07f9adda02ea3be31966a9a7fc4eed2a Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Mon, 11 Apr 2022 17:46:52 +0300 Subject: mlxsw: reg: Extend MCIA register with new slot number field Extend MCIA (Management Cable Info Access Register) with new field specifying the slot number. The purpose of this field is to support access to MCIA register for reading cage cable information on modular system. For non-modular systems the 'module' number uniquely identifies the transceiver location. For modular systems the transceivers are identified by two indexes: - 'slot_index', specifying the slot number, where line card is located; - 'module', specifying cage transceiver within the line card. Signed-off-by: Vadim Pasternak Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 13 +++++++------ drivers/net/ethernet/mellanox/mlxsw/reg.h | 14 +++++++++++--- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 0c1306db7315..eff9ced260ea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -69,8 +69,8 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp, if (err) return err; - mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1, - MLXSW_REG_MCIA_I2C_ADDR_LOW); + mlxsw_reg_mcia_pack(mcia_pl, 0, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, + 1, MLXSW_REG_MCIA_I2C_ADDR_LOW); err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); if (err) return err; @@ -145,7 +145,8 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, } } - mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr); + mlxsw_reg_mcia_pack(mcia_pl, 0, module, 0, page, offset, size, + i2c_addr); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl); if (err) @@ -219,12 +220,12 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM; else page = MLXSW_REG_MCIA_TH_PAGE_NUM; - mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, + mlxsw_reg_mcia_pack(mcia_pl, 0, module, 0, page, MLXSW_REG_MCIA_TH_PAGE_OFF + off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_LOW); } else { - mlxsw_reg_mcia_pack(mcia_pl, module, 0, + mlxsw_reg_mcia_pack(mcia_pl, 0, module, 0, MLXSW_REG_MCIA_PAGE0_LO, off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_HIGH); @@ -419,7 +420,7 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, size = min_t(u8, page->length - bytes_read, MLXSW_REG_MCIA_EEPROM_SIZE); - mlxsw_reg_mcia_pack(mcia_pl, module, 0, page->page, + mlxsw_reg_mcia_pack(mcia_pl, 0, module, 0, page->page, device_addr + bytes_read, size, page->i2c_address); mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 958df69cccb5..57eb6980bf8c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -9978,6 +9978,12 @@ MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1); */ MLXSW_ITEM32(reg, mcia, module, 0x00, 16, 8); +/* reg_mcia_slot_index + * Slot index (0: Main board) + * Access: Index + */ +MLXSW_ITEM32(reg, mcia, slot, 0x00, 12, 4); + enum { MLXSW_REG_MCIA_STATUS_GOOD = 0, /* No response from module's EEPROM. */ @@ -10077,11 +10083,13 @@ MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE); MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) / \ MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH + 1) -static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock, - u8 page_number, u16 device_addr, - u8 size, u8 i2c_device_addr) +static inline void mlxsw_reg_mcia_pack(char *payload, u8 slot_index, u8 module, + u8 lock, u8 page_number, + u16 device_addr, u8 size, + u8 i2c_device_addr) { MLXSW_REG_ZERO(mcia, payload); + mlxsw_reg_mcia_slot_set(payload, slot_index); mlxsw_reg_mcia_module_set(payload, module); mlxsw_reg_mcia_l_set(payload, lock); mlxsw_reg_mcia_page_number_set(payload, page_number); -- cgit v1.2.3 From 655cbb1d7530e0d13e0cbfc2a39cc3281a7596e2 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Mon, 11 Apr 2022 17:46:53 +0300 Subject: mlxsw: reg: Extend MCION register with new slot number field Extend MCION (Management Cable IO and Notifications Register) with new field specifying the slot number. The purpose of this field is to support access to MCION register for query cage transceiver on modular system. For non-modular systems the 'module' number uniquely identifies the transceiver location. For modular systems the transceivers are identified by two indexes: - 'slot_index', specifying the slot number, where line card is located; - 'module', specifying cage transceiver within the line card. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/reg.h | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index eff9ced260ea..602f0738deab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -520,7 +520,7 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, params->policy = mlxsw_env->module_info[module].power_mode_policy; - mlxsw_reg_mcion_pack(mcion_pl, module); + mlxsw_reg_mcion_pack(mcion_pl, 0, module); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 57eb6980bf8c..3695f8c7d143 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -10521,6 +10521,12 @@ MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN); */ MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8); +/* reg_mcion_slot_index + * Slot index. + * Access: Index + */ +MLXSW_ITEM32(reg, mcion, slot_index, 0x00, 12, 4); + enum { MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0), MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8), @@ -10532,9 +10538,10 @@ enum { */ MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16); -static inline void mlxsw_reg_mcion_pack(char *payload, u8 module) +static inline void mlxsw_reg_mcion_pack(char *payload, u8 slot_index, u8 module) { MLXSW_REG_ZERO(mcion, payload); + mlxsw_reg_mcion_slot_index_set(payload, slot_index); mlxsw_reg_mcion_module_set(payload, module); } -- cgit v1.2.3 From 7cb85d3c696ef16405db354c8ef00ea9c666fba8 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Mon, 11 Apr 2022 17:46:54 +0300 Subject: mlxsw: reg: Extend PMMP register with new slot number field Extend PMMP (Port Module Memory Map Properties Register) with new field specifying the slot number. The purpose of this field is to enable overriding the cable/module memory map advertisement. For non-modular systems the 'module' number uniquely identifies the transceiver location. For modular systems the transceivers are identified by two indexes: - 'slot_index', specifying the slot number, where line card is located; - 'module', specifying cage transceiver within the line card. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/reg.h | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 602f0738deab..8cee3e317a5b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -563,7 +563,7 @@ static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core, u16 eeprom_override_mask, eeprom_override; char pmmp_pl[MLXSW_REG_PMMP_LEN]; - mlxsw_reg_pmmp_pack(pmmp_pl, module); + mlxsw_reg_pmmp_pack(pmmp_pl, 0, module); mlxsw_reg_pmmp_sticky_set(pmmp_pl, true); /* Mask all the bits except low power mode. */ eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 3695f8c7d143..5bf8ad32cb8e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5984,6 +5984,12 @@ MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN); */ MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8); +/* reg_pmmp_slot_index + * Slot index. + * Access: Index + */ +MLXSW_ITEM32(reg, pmmp, slot_index, 0x00, 24, 4); + /* reg_pmmp_sticky * When set, will keep eeprom_override values after plug-out event. * Access: OP @@ -6011,9 +6017,10 @@ enum { */ MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16); -static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module) +static inline void mlxsw_reg_pmmp_pack(char *payload, u8 slot_index, u8 module) { MLXSW_REG_ZERO(pmmp, payload); + mlxsw_reg_pmmp_slot_index_set(payload, slot_index); mlxsw_reg_pmmp_module_set(payload, module); } -- cgit v1.2.3 From b691602c6f96369542d8f9e078610f89af2ad673 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Mon, 11 Apr 2022 17:46:55 +0300 Subject: mlxsw: reg: Extend MGPIR register with new slot fields Extend MGPIR (Management General Peripheral Information Register) with new fields specifying the slot number and number of the slots available on system. The purpose of these fields is: - to support access to MPGIR register on modular system for getting the number of cages, equipped on the line card, inserted at specified slot. In case slot number is set zero, MGPIR will provide the information for the main board. For Top of the Rack (non-modular) system it will provide the same as before. - to provide the number of slots supported by system. This data is relevant only in case slot number is set zero. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 9 +++++---- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 8 ++++---- drivers/net/ethernet/mellanox/mlxsw/reg.h | 20 ++++++++++++++++++-- 4 files changed, 29 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 8cee3e317a5b..8b4205c098d2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -1060,12 +1060,12 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) u8 module_count; int i, err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; - mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count); + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count, NULL); env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL); if (!env) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index f4bc711a16cf..2bc4c4556895 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -656,13 +656,13 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) u8 module_sensor_max; int i, err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, - &module_sensor_max); + &module_sensor_max, NULL); /* Add extra attributes for module temperature. Sensor index is * assigned to sensor_count value, while all indexed before @@ -707,12 +707,13 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) u8 gbox_num; int err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; - mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL); + mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL, + NULL); if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || !gbox_num) return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 65ab04e32972..adb2820430b1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -746,13 +746,13 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, char mgpir_pl[MLXSW_REG_MGPIR_LEN]; int i, err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, - &thermal->tz_module_num); + &thermal->tz_module_num, NULL); thermal->tz_module_arr = kcalloc(thermal->tz_module_num, sizeof(*thermal->tz_module_arr), @@ -837,13 +837,13 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, int i; int err; - mlxsw_reg_mgpir_pack(mgpir_pl); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, - NULL); + NULL, NULL); if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || !gbox_num) return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5bf8ad32cb8e..f0c90e66aa32 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11362,6 +11362,12 @@ enum mlxsw_reg_mgpir_device_type { MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE, }; +/* mgpir_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, mgpir, slot_index, 0x00, 28, 4); + /* mgpir_device_type * Access: RO */ @@ -11379,21 +11385,29 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8); */ MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8); +/* mgpir_num_of_slots + * Number of slots in the system. + * Access: RO + */ +MLXSW_ITEM32(reg, mgpir, num_of_slots, 0x04, 8, 8); + /* mgpir_num_of_modules * Number of modules. * Access: RO */ MLXSW_ITEM32(reg, mgpir, num_of_modules, 0x04, 0, 8); -static inline void mlxsw_reg_mgpir_pack(char *payload) +static inline void mlxsw_reg_mgpir_pack(char *payload, u8 slot_index) { MLXSW_REG_ZERO(mgpir, payload); + mlxsw_reg_mgpir_slot_index_set(payload, slot_index); } static inline void mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices, enum mlxsw_reg_mgpir_device_type *device_type, - u8 *devices_per_flash, u8 *num_of_modules) + u8 *devices_per_flash, u8 *num_of_modules, + u8 *num_of_slots) { if (num_of_devices) *num_of_devices = mlxsw_reg_mgpir_num_of_devices_get(payload); @@ -11404,6 +11418,8 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices, mlxsw_reg_mgpir_devices_per_flash_get(payload); if (num_of_modules) *num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload); + if (num_of_slots) + *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload); } /* MFDE - Monitoring FW Debug Register -- cgit v1.2.3 From 64e65a540e6df27944c61b03ed2ac8fe797a0afa Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Mon, 11 Apr 2022 17:46:56 +0300 Subject: mlxsw: core_env: Pass slot index during PMAOS register write call Pass the slot index down to PMAOS pack helper alongside with the module. Signed-off-by: Jiri Pirko Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 6 +++--- drivers/net/ethernet/mellanox/mlxsw/reg.h | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 8b4205c098d2..f1bb243dfb8c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -448,7 +448,7 @@ static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, module); + mlxsw_reg_pmaos_pack(pmaos_pl, 0, module); mlxsw_reg_pmaos_rst_set(pmaos_pl, true); return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); @@ -548,7 +548,7 @@ static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core, enum mlxsw_reg_pmaos_admin_status admin_status; char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, module); + mlxsw_reg_pmaos_pack(pmaos_pl, 0, module); admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED : MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED; mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status); @@ -931,7 +931,7 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core) for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, i); + mlxsw_reg_pmaos_pack(pmaos_pl, 0, i); mlxsw_reg_pmaos_e_set(pmaos_pl, MLXSW_REG_PMAOS_E_GENERATE_EVENT); mlxsw_reg_pmaos_ee_set(pmaos_pl, true); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index f0c90e66aa32..03ef975a37e4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5769,9 +5769,10 @@ enum mlxsw_reg_pmaos_e { */ MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2); -static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module) +static inline void mlxsw_reg_pmaos_pack(char *payload, u8 slot_index, u8 module) { MLXSW_REG_ZERO(pmaos, payload); + mlxsw_reg_pmaos_slot_index_set(payload, slot_index); mlxsw_reg_pmaos_module_set(payload, module); } -- cgit v1.2.3 From e94295e0ed274b86f42d93b8b63d207deb484459 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Mon, 11 Apr 2022 17:46:57 +0300 Subject: mlxsw: reg: Add new field to Management General Peripheral Information Register Add new field 'max_modules_per_slot' to provide maximum number of modules that can be connected per slot. This field will always be zero, if 'slot_index' in query request is set to non-zero value, otherwise value in this field will provide maximum modules number, which can be equipped on device inserted at any slot. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 03ef975a37e4..b8a236872fea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11386,6 +11386,12 @@ MLXSW_ITEM32(reg, mgpir, devices_per_flash, 0x00, 16, 8); */ MLXSW_ITEM32(reg, mgpir, num_of_devices, 0x00, 0, 8); +/* max_modules_per_slot + * Maximum number of modules that can be connected per slot. + * Access: RO + */ +MLXSW_ITEM32(reg, mgpir, max_modules_per_slot, 0x04, 16, 8); + /* mgpir_num_of_slots * Number of slots in the system. * Access: RO -- cgit v1.2.3 From 69e66c04c672d8ff83f76e625c27a4d7cd717c08 Mon Sep 17 00:00:00 2001 From: Joe Damato Date: Thu, 17 Mar 2022 21:12:12 -0700 Subject: ice: Add mpls+tso support Attempt to add mpls+tso support. I don't have ice hardware available to test myself, but I just implemented this feature in i40e and thought it might be useful to implement for ice while this is fresh in my brain. Hoping some one at intel will be able to test this on my behalf. Signed-off-by: Joe Damato Tested-by: Gurucharan (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 4 +++- drivers/net/ethernet/intel/ice/ice_txrx.c | 29 +++++++++++++++++++++-------- 2 files changed, 24 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d768925785ca..fbefcb08fa64 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3329,7 +3329,9 @@ static void ice_set_netdev_features(struct net_device *netdev) vlano_features | tso_features; /* add support for HW_CSUM on packets with MPLS header */ - netdev->mpls_features = NETIF_F_HW_CSUM; + netdev->mpls_features = NETIF_F_HW_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; /* enable features */ netdev->features |= netdev->hw_features; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index f9bf008471c9..3f8b7274ed2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "ice_txrx_lib.h" #include "ice_lib.h" @@ -1748,18 +1749,24 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* compute outer L2 header size */ l2_len = ip.hdr - skb->data; offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; - protocol = vlan_get_protocol(skb); - - if (protocol == htons(ETH_P_IP)) + /* set the tx_flags to indicate the IP protocol type. this is + * required so that checksum header computation below is accurate. + */ + if (ip.v4->version == 4) first->tx_flags |= ICE_TX_FLAGS_IPV4; - else if (protocol == htons(ETH_P_IPV6)) + else if (ip.v6->version == 6) first->tx_flags |= ICE_TX_FLAGS_IPV6; if (skb->encapsulation) { @@ -1957,6 +1964,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) unsigned char *hdr; } l4; u64 cd_mss, cd_tso_len; + __be16 protocol; u32 paylen; u8 l4_start; int err; @@ -1972,8 +1980,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) return err; /* cppcheck-suppress unreadVariable */ - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); + protocol = vlan_get_protocol(skb); + + if (eth_p_mpls(protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); /* initialize outer IP header fields */ if (ip.v4->version == 4) { -- cgit v1.2.3 From f728fa01666952fc077801f2a12a088a49d3b543 Mon Sep 17 00:00:00 2001 From: Joe Damato Date: Thu, 24 Mar 2022 12:46:58 -0700 Subject: i40e: Add tx_stopped stat Track TX queue stop events and export the new stat with ethtool. Signed-off-by: Joe Damato Tested-by: Gurucharan (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 5 +++-- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++++ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2 ++ drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 + 6 files changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 55c6bce5da61..18558a019353 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -852,6 +852,7 @@ struct i40e_vsi { u64 tx_busy; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; u64 rx_buf_failed; u64 rx_page_failed; u64 rx_page_reuse; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index be7c6f34d45c..c9dcd6d92c83 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -309,10 +309,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) tx_ring->stats.bytes, tx_ring->tx_stats.restart_queue); dev_info(&pf->pdev->dev, - " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", + " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n", i, tx_ring->tx_stats.tx_busy, - tx_ring->tx_stats.tx_done_old); + tx_ring->tx_stats.tx_done_old, + tx_ring->tx_stats.tx_stopped); dev_info(&pf->pdev->dev, " tx_rings[%i]: size = %i\n", i, tx_ring->size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e48499624d22..162bae158160 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -293,6 +293,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), I40E_VSI_STAT("tx_busy", tx_busy), + I40E_VSI_STAT("tx_stopped", tx_stopped), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse), diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b7f11fd8297b..fea40efbd1ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -785,6 +785,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) unsigned int start; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -804,6 +805,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; + tx_stopped = 0; rx_page = 0; rx_buf = 0; rx_reuse = 0; @@ -828,6 +830,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; + tx_stopped += p->tx_stats.tx_stopped; /* locate Rx ring */ p = READ_ONCE(vsi->rx_rings[q]); @@ -872,6 +875,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; + vsi->tx_stopped = tx_stopped; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; vsi->rx_page_reuse = rx_reuse; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 8b844ad08a86..7bc1174edf6b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3396,6 +3396,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) /* Memory barrier before checking head and tail */ smp_mb(); + ++tx_ring->tx_stats.tx_stopped; + /* Check again in a case another CPU has just made room available. */ if (likely(I40E_DESC_UNUSED(tx_ring) < size)) return -EBUSY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index c471c2da313c..41f86e9535a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -290,6 +290,7 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; int prev_pkt_ctr; }; -- cgit v1.2.3 From c8631e61f4d459ce9259ffddbb23baafd514c98d Mon Sep 17 00:00:00 2001 From: "Nabil S. Alramli" Date: Thu, 24 Mar 2022 20:03:38 +0000 Subject: i40e: Add vsi.tx_restart to i40e ethtool stats Add vsi.tx_restart to the i40e driver ethtool statistics Signed-off-by: Nabil S. Alramli Tested-by: Gurucharan (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 162bae158160..610f00cbaff9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -300,6 +300,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc), I40E_VSI_STAT("rx_cache_waive", rx_page_waive), I40E_VSI_STAT("rx_cache_busy", rx_page_busy), + I40E_VSI_STAT("tx_restart", tx_restart), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, -- cgit v1.2.3 From a941d5ee4c579682e36063dd966e041bd8368ab8 Mon Sep 17 00:00:00 2001 From: Mateusz Palczewski Date: Tue, 29 Mar 2022 09:35:43 +0200 Subject: i40e: Add Ethernet Connection X722 for 10GbE SFP+ support Add support for Ethernet Connection X722 for 10GbE SFP+ cards. Make possible for the driver to bind to the card. Signed-off-by: Przemyslaw Patynowski Signed-off-by: Mateusz Palczewski Tested-by: Gurucharan (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_common.c | 1 + drivers/net/ethernet/intel/i40e/i40e_devids.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 3 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6aefffd83615..2819e261a126 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -47,6 +47,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_SFP_X722_A: hw->mac.type = I40E_MAC_X722; break; default: diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 1bcb0ec0f0c0..2610338002fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -33,6 +33,7 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_SFP_X722_A 0x0DDA #endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index fea40efbd1ca..358c2edc118d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -77,6 +77,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, -- cgit v1.2.3 From 2e5b3d4cb16e0be22f714475849bd14435b72583 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Tue, 12 Apr 2022 08:28:47 +0000 Subject: net: ethernet: ti: cpsw: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Reviewed-by: Grygorii Strashko Link: https://lore.kernel.org/r/20220412082847.2532584-1-chi.minghao@zte.com.cn Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ti/cpsw.c | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e4edcc8c6889..e6ad2e53f1cd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -756,11 +756,9 @@ static int cpsw_ndo_open(struct net_device *ndev) int ret; u32 reg; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } netif_carrier_off(ndev); @@ -968,11 +966,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } if (cpsw->data.dual_emac) { vid = cpsw->slaves[priv->emac_port].port_vlan; @@ -1052,11 +1048,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } if (cpsw->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for @@ -1090,11 +1084,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } if (cpsw->data.dual_emac) { int i; @@ -1567,11 +1559,9 @@ static int cpsw_probe(struct platform_device *pdev) /* Need to enable clocks with runtime PM api to access module * registers */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_put_noidle(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) goto clean_runtime_disable_ret; - } ret = cpsw_probe_dt(&cpsw->data, pdev); if (ret) @@ -1734,11 +1724,9 @@ static int cpsw_remove(struct platform_device *pdev) struct cpsw_common *cpsw = platform_get_drvdata(pdev); int i, ret; - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) return ret; - } for (i = 0; i < cpsw->data.slaves; i++) if (cpsw->slaves[i].ndev) -- cgit v1.2.3 From f45ba67eb74ab4b775616af731bdf8944afce3f1 Mon Sep 17 00:00:00 2001 From: Lv Ruyi Date: Tue, 12 Apr 2022 08:51:26 +0000 Subject: ixp4xx_eth: fix error check return value of platform_get_irq() platform_get_irq() return negative value on failure, so null check of return value is incorrect. Fix it by comparing whether it is less than zero. Fixes: 9055a2f59162 ("ixp4xx_eth: make ptp support a platform driver") Reported-by: Zeal Robot Signed-off-by: Lv Ruyi Reviewed-by: Linus Walleij Link: https://lore.kernel.org/r/20220412085126.2532924-1-lv.ruyi@zte.com.cn Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/xscale/ptp_ixp46x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c index 1f382777aa5a..9abbdb71e629 100644 --- a/drivers/net/ethernet/xscale/ptp_ixp46x.c +++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c @@ -271,7 +271,7 @@ static int ptp_ixp_probe(struct platform_device *pdev) ixp_clock.master_irq = platform_get_irq(pdev, 0); ixp_clock.slave_irq = platform_get_irq(pdev, 1); if (IS_ERR(ixp_clock.regs) || - !ixp_clock.master_irq || !ixp_clock.slave_irq) + ixp_clock.master_irq < 0 || ixp_clock.slave_irq < 0) return -ENXIO; ixp_clock.caps = ptp_ixp_caps; -- cgit v1.2.3 From aa7f148bedcaab5b6f6072de6154f36f8876e118 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Wed, 13 Apr 2022 09:08:01 +0800 Subject: rtw89: extend H2C of CMAC control info In order to support new chip that has capability of 160M, we need new format to fill new information, so add a new V1 ID for newer use. Since most fields are the same, fill fields according to the function ID of chip. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220413010804.8941-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 3 +- drivers/net/wireless/realtek/rtw89/fw.c | 47 ++++++++++++++++++--------- drivers/net/wireless/realtek/rtw89/fw.h | 43 ++++++++++++++++++++---- drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 + 5 files changed, 72 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index ec181c1989bc..2715dc1ff73f 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -578,7 +578,7 @@ enum rtw89_ps_mode { #define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1) #define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) #define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) -#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1) +#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1) enum rtw89_ru_bandwidth { RTW89_RU26 = 0, @@ -2413,6 +2413,7 @@ struct rtw89_chip_info { const struct rtw89_btc_rf_trx_para *rf_para_dlink; u8 ps_mode_supported; + u32 h2c_cctl_func_id; u32 hci_func_en_addr; u32 h2c_desc_size; u32 txwd_body_size; diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 986d6249c108..bd07d2b83d07 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -780,6 +780,7 @@ fail: int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_hal *hal = &rtwdev->hal; struct sk_buff *skb; u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; @@ -794,16 +795,18 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, skb_put(skb, H2C_CMC_TBL_LEN); SET_CTRL_INFO_MACID(skb->data, macid); SET_CTRL_INFO_OPERATION(skb->data, 1); - SET_CMC_TBL_TXPWR_MODE(skb->data, 0); - SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); - SET_CMC_TBL_PATH_MAP_A(skb->data, 0); - SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); - SET_CMC_TBL_PATH_MAP_C(skb->data, 0); - SET_CMC_TBL_PATH_MAP_D(skb->data, 0); - SET_CMC_TBL_ANTSEL_A(skb->data, 0); - SET_CMC_TBL_ANTSEL_B(skb->data, 0); - SET_CMC_TBL_ANTSEL_C(skb->data, 0); - SET_CMC_TBL_ANTSEL_D(skb->data, 0); + if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { + SET_CMC_TBL_TXPWR_MODE(skb->data, 0); + SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); + SET_CMC_TBL_PATH_MAP_A(skb->data, 0); + SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); + SET_CMC_TBL_PATH_MAP_C(skb->data, 0); + SET_CMC_TBL_PATH_MAP_D(skb->data, 0); + SET_CMC_TBL_ANTSEL_A(skb->data, 0); + SET_CMC_TBL_ANTSEL_B(skb->data, 0); + SET_CMC_TBL_ANTSEL_C(skb->data, 0); + SET_CMC_TBL_ANTSEL_D(skb->data, 0); + } SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) @@ -811,7 +814,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, - H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, + chip->h2c_cctl_func_id, 0, 1, H2C_CMC_TBL_LEN); if (rtw89_h2c_tx(rtwdev, skb, false)) { @@ -851,6 +854,8 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, for (i = 0; i < RTW89_PPE_BW_NUM; i++) pads[i] = pad; + + return; } ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); @@ -885,6 +890,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { + const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; @@ -917,9 +923,17 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, else SET_CMC_TBL_ULDL(skb->data, 0); SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); - SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); - SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); - SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); + if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { + SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); + SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); + SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); + SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); + } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { + SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); + SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); + SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); + SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); + } if (sta) SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->deflink.he_cap.has_he); @@ -928,7 +942,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, - H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, + chip->h2c_cctl_func_id, 0, 1, H2C_CMC_TBL_LEN); if (rtw89_h2c_tx(rtwdev, skb, false)) { @@ -946,6 +960,7 @@ fail: int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta) { + const struct rtw89_chip_info *chip = rtwdev->chip; struct sk_buff *skb; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); @@ -967,7 +982,7 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, - H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, + chip->h2c_cctl_func_id, 0, 1, H2C_CMC_TBL_LEN); if (rtw89_h2c_tx(rtwdev, skb, false)) { diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 2a010154a8e8..9cba8990b836 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -973,6 +973,36 @@ static inline void SET_CMC_TBL_ANTSEL_D(void *table, u32 val) le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D, BIT(31)); } + +#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0) +static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(1, 0)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, + GENMASK(1, 0)); +} + +static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(3, 2)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, + GENMASK(3, 2)); +} + +static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(5, 4)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, + GENMASK(5, 4)); +} + +static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 6)); + le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, + GENMASK(7, 6)); +} + #define SET_CMC_TBL_MASK_ADDR_CAM_INDEX GENMASK(7, 0) static inline void SET_CMC_TBL_ADDR_CAM_INDEX(void *table, u32 val) { @@ -1001,7 +1031,6 @@ static inline void SET_CMC_TBL_DOPPLER_CTRL(void *table, u32 val) le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL, GENMASK(19, 18)); } -#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0) static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING(void *table, u32 val) { le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20)); @@ -1106,13 +1135,14 @@ static inline void SET_CMC_TBL_CSI_GI_LTF(void *table, u32 val) le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF, GENMASK(27, 25)); } -#define SET_CMC_TBL_MASK_CSI_GID_SEL BIT(0) -static inline void SET_CMC_TBL_CSI_GID_SEL(void *table, u32 val) + +static inline void SET_CMC_TBL_NOMINAL_PKT_PADDING160(void *table, u32 val) { - le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29)); - le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL, - BIT(29)); + le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(29, 28)); + le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, + GENMASK(29, 28)); } + #define SET_CMC_TBL_MASK_CSI_BW GENMASK(1, 0) static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val) { @@ -2170,6 +2200,7 @@ struct rtw89_fw_h2c_rf_reg_info { #define H2C_CL_MAC_FR_EXCHG 0x5 #define H2C_FUNC_MAC_CCTLINFO_UD 0x2 #define H2C_FUNC_MAC_BCN_UPD 0x5 +#define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa /* CLASS 6 - Address CAM */ #define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 9871ed78e44c..6371bbf7a2fd 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2147,6 +2147,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) | BIT(RTW89_PS_MODE_CLK_GATED) | BIT(RTW89_PS_MODE_PWR_GATED), + .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD, .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), .txwd_body_size = sizeof(struct rtw89_txwd_body), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 38b138330716..d900129c1a7c 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -667,6 +667,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .dav_log_efuse_size = 16, .phycap_addr = 0x590, .phycap_size = 0x60, + .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_V1, .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1, .h2c_desc_size = sizeof(struct rtw89_rxdesc_short), .txwd_body_size = sizeof(struct rtw89_txwd_body_v1), -- cgit v1.2.3 From 04b5983ef700d49cc2b30beee95b7f0ba5693dd3 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Wed, 13 Apr 2022 09:08:02 +0800 Subject: rtw89: add new H2C to configure security CAM via DCTL for V1 chip DCTL is short for D-MAC control that V1 chip uses this H2C to configure security CAM. Implement the callers in next patch. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220413010804.8941-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/cam.c | 28 +++ drivers/net/wireless/realtek/rtw89/cam.h | 4 + drivers/net/wireless/realtek/rtw89/fw.c | 35 ++++ drivers/net/wireless/realtek/rtw89/fw.h | 306 +++++++++++++++++++++++++++++++ 4 files changed, 373 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 34827f174ba1..08b9779163bb 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -710,3 +710,31 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, FWCMD_SET_ADDR_SEC_ENT5(cmd, addr_cam->sec_ent[5]); FWCMD_SET_ADDR_SEC_ENT6(cmd, addr_cam->sec_ent[6]); } + +void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta, + u8 *cmd) +{ + struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta); + + SET_DCTL_MACID_V1(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id); + SET_DCTL_OPERATION_V1(cmd, 1); + + SET_DCTL_SEC_ENT0_KEYID_V1(cmd, addr_cam->sec_ent_keyid[0]); + SET_DCTL_SEC_ENT1_KEYID_V1(cmd, addr_cam->sec_ent_keyid[1]); + SET_DCTL_SEC_ENT2_KEYID_V1(cmd, addr_cam->sec_ent_keyid[2]); + SET_DCTL_SEC_ENT3_KEYID_V1(cmd, addr_cam->sec_ent_keyid[3]); + SET_DCTL_SEC_ENT4_KEYID_V1(cmd, addr_cam->sec_ent_keyid[4]); + SET_DCTL_SEC_ENT5_KEYID_V1(cmd, addr_cam->sec_ent_keyid[5]); + SET_DCTL_SEC_ENT6_KEYID_V1(cmd, addr_cam->sec_ent_keyid[6]); + + SET_DCTL_SEC_ENT_VALID_V1(cmd, addr_cam->sec_cam_map[0] & 0xff); + SET_DCTL_SEC_ENT0_V1(cmd, addr_cam->sec_ent[0]); + SET_DCTL_SEC_ENT1_V1(cmd, addr_cam->sec_ent[1]); + SET_DCTL_SEC_ENT2_V1(cmd, addr_cam->sec_ent[2]); + SET_DCTL_SEC_ENT3_V1(cmd, addr_cam->sec_ent[3]); + SET_DCTL_SEC_ENT4_V1(cmd, addr_cam->sec_ent[4]); + SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]); + SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]); +} diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h index 3a6a786530d1..a3931d3e40d2 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.h +++ b/drivers/net/wireless/realtek/rtw89/cam.h @@ -355,6 +355,10 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, struct rtw89_vif *vif, struct rtw89_sta *rtwsta, const u8 *scan_mac_addr, u8 *cmd); +void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta, + u8 *cmd); int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev, struct rtw89_vif *vif, u8 *cmd); int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index bd07d2b83d07..7bef8b4288f0 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -599,6 +599,41 @@ fail: return -EBUSY; } +#define H2C_DCTL_SEC_CAM_LEN 68 +int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta) +{ + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); + return -ENOMEM; + } + skb_put(skb, H2C_DCTL_SEC_CAM_LEN); + + rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_FR_EXCHG, + H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, + H2C_DCTL_SEC_CAM_LEN); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} +EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); + #define H2C_BA_CAM_LEN 8 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, bool valid, struct ieee80211_ampdu_params *params) diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 9cba8990b836..aaabfc0dfd71 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -1151,6 +1151,308 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val) GENMASK(31, 30)); } +static inline void SET_DCTL_MACID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)); +} + +static inline void SET_DCTL_OPERATION_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7)); +} + +#define SET_DCTL_MASK_QOS_FIELD_V1 GENMASK(7, 0) +static inline void SET_DCTL_QOS_FIELD_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(7, 0)); + le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_FIELD_V1, + GENMASK(7, 0)); +} + +#define SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID GENMASK(6, 0) +static inline void SET_DCTL_HW_EXSEQ_MACID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 8)); + le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID, + GENMASK(14, 8)); +} + +#define SET_DCTL_MASK_QOS_DATA BIT(0) +static inline void SET_DCTL_QOS_DATA_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15)); + le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_DATA, + BIT(15)); +} + +#define SET_DCTL_MASK_AES_IV_L GENMASK(15, 0) +static inline void SET_DCTL_AES_IV_L_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 16)); + le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_AES_IV_L, + GENMASK(31, 16)); +} + +#define SET_DCTL_MASK_AES_IV_H GENMASK(31, 0) +static inline void SET_DCTL_AES_IV_H_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 0)); + le32p_replace_bits((__le32 *)(table) + 10, SET_DCTL_MASK_AES_IV_H, + GENMASK(31, 0)); +} + +#define SET_DCTL_MASK_SEQ0 GENMASK(11, 0) +static inline void SET_DCTL_SEQ0_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 0)); + le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ0, + GENMASK(11, 0)); +} + +#define SET_DCTL_MASK_SEQ1 GENMASK(11, 0) +static inline void SET_DCTL_SEQ1_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(23, 12)); + le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ1, + GENMASK(23, 12)); +} + +#define SET_DCTL_MASK_AMSDU_MAX_LEN GENMASK(2, 0) +static inline void SET_DCTL_AMSDU_MAX_LEN_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 24)); + le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_AMSDU_MAX_LEN, + GENMASK(26, 24)); +} + +#define SET_DCTL_MASK_STA_AMSDU_EN BIT(0) +static inline void SET_DCTL_STA_AMSDU_EN_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27)); + le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_STA_AMSDU_EN, + BIT(27)); +} + +#define SET_DCTL_MASK_CHKSUM_OFLD_EN BIT(0) +static inline void SET_DCTL_CHKSUM_OFLD_EN_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(28)); + le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_CHKSUM_OFLD_EN, + BIT(28)); +} + +#define SET_DCTL_MASK_WITH_LLC BIT(0) +static inline void SET_DCTL_WITH_LLC_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 3, val, BIT(29)); + le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_WITH_LLC, + BIT(29)); +} + +#define SET_DCTL_MASK_SEQ2 GENMASK(11, 0) +static inline void SET_DCTL_SEQ2_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(11, 0)); + le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ2, + GENMASK(11, 0)); +} + +#define SET_DCTL_MASK_SEQ3 GENMASK(11, 0) +static inline void SET_DCTL_SEQ3_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(23, 12)); + le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ3, + GENMASK(23, 12)); +} + +#define SET_DCTL_MASK_TGT_IND GENMASK(3, 0) +static inline void SET_DCTL_TGT_IND_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 24)); + le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND, + GENMASK(27, 24)); +} + +#define SET_DCTL_MASK_TGT_IND_EN BIT(0) +static inline void SET_DCTL_TGT_IND_EN_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, BIT(28)); + le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND_EN, + BIT(28)); +} + +#define SET_DCTL_MASK_HTC_LB GENMASK(2, 0) +static inline void SET_DCTL_HTC_LB_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 29)); + le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_HTC_LB, + GENMASK(31, 29)); +} + +#define SET_DCTL_MASK_MHDR_LEN GENMASK(4, 0) +static inline void SET_DCTL_MHDR_LEN_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(4, 0)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_MHDR_LEN, + GENMASK(4, 0)); +} + +#define SET_DCTL_MASK_VLAN_TAG_VALID BIT(0) +static inline void SET_DCTL_VLAN_TAG_VALID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(5)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_VALID, + BIT(5)); +} + +#define SET_DCTL_MASK_VLAN_TAG_SEL GENMASK(1, 0) +static inline void SET_DCTL_VLAN_TAG_SEL_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 6)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_SEL, + GENMASK(7, 6)); +} + +#define SET_DCTL_MASK_HTC_ORDER BIT(0) +static inline void SET_DCTL_HTC_ORDER_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_HTC_ORDER, + BIT(8)); +} + +#define SET_DCTL_MASK_SEC_KEY_ID GENMASK(1, 0) +static inline void SET_DCTL_SEC_KEY_ID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(10, 9)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_KEY_ID, + GENMASK(10, 9)); +} + +#define SET_DCTL_MASK_WAPI BIT(0) +static inline void SET_DCTL_WAPI_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_WAPI, + BIT(15)); +} + +#define SET_DCTL_MASK_SEC_ENT_MODE GENMASK(1, 0) +static inline void SET_DCTL_SEC_ENT_MODE_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(17, 16)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENT_MODE, + GENMASK(17, 16)); +} + +#define SET_DCTL_MASK_SEC_ENTX_KEYID GENMASK(1, 0) +static inline void SET_DCTL_SEC_ENT0_KEYID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(19, 18)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, + GENMASK(19, 18)); +} + +static inline void SET_DCTL_SEC_ENT1_KEYID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(21, 20)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, + GENMASK(21, 20)); +} + +static inline void SET_DCTL_SEC_ENT2_KEYID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(23, 22)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, + GENMASK(23, 22)); +} + +static inline void SET_DCTL_SEC_ENT3_KEYID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(25, 24)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, + GENMASK(25, 24)); +} + +static inline void SET_DCTL_SEC_ENT4_KEYID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(27, 26)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, + GENMASK(27, 26)); +} + +static inline void SET_DCTL_SEC_ENT5_KEYID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(29, 28)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, + GENMASK(29, 28)); +} + +static inline void SET_DCTL_SEC_ENT6_KEYID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 30)); + le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, + GENMASK(31, 30)); +} + +#define SET_DCTL_MASK_SEC_ENT_VALID GENMASK(7, 0) +static inline void SET_DCTL_SEC_ENT_VALID_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(7, 0)); + le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENT_VALID, + GENMASK(7, 0)); +} + +#define SET_DCTL_MASK_SEC_ENTX GENMASK(7, 0) +static inline void SET_DCTL_SEC_ENT0_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(15, 8)); + le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX, + GENMASK(15, 8)); +} + +static inline void SET_DCTL_SEC_ENT1_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 16)); + le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX, + GENMASK(23, 16)); +} + +static inline void SET_DCTL_SEC_ENT2_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(31, 24)); + le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX, + GENMASK(31, 24)); +} + +static inline void SET_DCTL_SEC_ENT3_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0)); + le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX, + GENMASK(7, 0)); +} + +static inline void SET_DCTL_SEC_ENT4_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(15, 8)); + le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX, + GENMASK(15, 8)); +} + +static inline void SET_DCTL_SEC_ENT5_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 16)); + le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX, + GENMASK(23, 16)); +} + +static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val) +{ + le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 24)); + le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX, + GENMASK(31, 24)); +} + static inline void SET_BCN_UPD_PORT(void *h2c, u32 val) { le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)); @@ -2200,6 +2502,7 @@ struct rtw89_fw_h2c_rf_reg_info { #define H2C_CL_MAC_FR_EXCHG 0x5 #define H2C_FUNC_MAC_CCTLINFO_UD 0x2 #define H2C_FUNC_MAC_BCN_UPD 0x5 +#define H2C_FUNC_MAC_DCTLINFO_UD_V1 0x9 #define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa /* CLASS 6 - Address CAM */ @@ -2267,6 +2570,9 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif, struct rtw89_sta *rtwsta, const u8 *scan_mac_addr); +int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta); void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h); void rtw89_fw_c2h_work(struct work_struct *work); int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, -- cgit v1.2.3 From 0a6f299b6782c522be67074d37e4e0da56389ddc Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Wed, 13 Apr 2022 09:08:03 +0800 Subject: rtw89: configure security CAM for V1 chip Add to configure security CAM while mac80211 calls set_key and del_key. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220413010804.8941-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/cam.c | 9 +++++++++ drivers/net/wireless/realtek/rtw89/core.h | 15 +++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 + 4 files changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 08b9779163bb..8a26adeb23fb 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -244,6 +244,12 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx; addr_cam->sec_entries[key_idx] = sec_cam; set_bit(key_idx, addr_cam->sec_cam_map); + ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta); + if (ret) { + rtw89_err(rtwdev, "failed to update dctl cam sec entry: %d\n", + ret); + return ret; + } ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); if (ret) { rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n", @@ -398,6 +404,9 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev, clear_bit(key_idx, addr_cam->sec_cam_map); addr_cam->sec_entries[key_idx] = NULL; if (inform_fw) { + ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta); + if (ret) + rtw89_err(rtwdev, "failed to update dctl cam del key: %d\n", ret); ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); if (ret) rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 2715dc1ff73f..090d41656901 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2106,6 +2106,9 @@ struct rtw89_chip_ops { int (*stop_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx, u32 *tx_en, enum rtw89_sch_tx_sel sel); int (*resume_sch_tx)(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en); + int (*h2c_dctl_sec_cam)(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta); void (*btc_set_rfe)(struct rtw89_dev *rtwdev); void (*btc_init_cfg)(struct rtw89_dev *rtwdev); @@ -3634,6 +3637,18 @@ int rtw89_chip_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en) return chip->ops->resume_sch_tx(rtwdev, mac_idx, tx_en); } +static inline +int rtw89_chip_h2c_dctl_sec_cam(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + struct rtw89_sta *rtwsta) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (!chip->ops->h2c_dctl_sec_cam) + return 0; + return chip->ops->h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta); +} + static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr) { __le16 fc = hdr->frame_control; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 6371bbf7a2fd..975c50495304 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2075,6 +2075,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { .mac_cfg_gnt = rtw89_mac_cfg_gnt, .stop_sch_tx = rtw89_mac_stop_sch_tx, .resume_sch_tx = rtw89_mac_resume_sch_tx, + .h2c_dctl_sec_cam = NULL, .btc_set_rfe = rtw8852a_btc_set_rfe, .btc_init_cfg = rtw8852a_btc_init_cfg, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index d900129c1a7c..d56d65661ce0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -645,6 +645,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .mac_cfg_gnt = rtw89_mac_cfg_gnt_v1, .stop_sch_tx = rtw89_mac_stop_sch_tx_v1, .resume_sch_tx = rtw89_mac_resume_sch_tx_v1, + .h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v1, .btc_init_cfg = rtw8852c_btc_init_cfg, }; -- cgit v1.2.3 From dc4246eff026699c48b27d5bae944b664a5c8035 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Wed, 13 Apr 2022 09:08:04 +0800 Subject: rtw89: pci: correct return value handling of rtw89_write16_mdio_mask() Fix wrong checking statement. Fortunately, this wrong code doesn't affect existing chip. Fixes: 740c431c22fe ("rtw89: pci: add register definition to rtw89_pci_info to generalize pci code") Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220413010804.8941-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index dcf907b81cff..ecb419010f0c 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -1522,7 +1522,7 @@ rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u u16 val; ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); - if (!ret) + if (ret) return ret; shift = __ffs(mask); @@ -1530,7 +1530,7 @@ rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u val |= ((data << shift) & mask); ret = rtw89_write16_mdio(rtwdev, addr, val, speed); - if (!ret) + if (ret) return ret; return 0; -- cgit v1.2.3 From 4263f77a5144f708a00aa8e9570b392777ab2482 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 11 Apr 2022 12:13:25 +0200 Subject: net: ethernet: mtk_eth_soc: use standard property for cci-control-port Rely on standard cci-control-port property to identify CCI port reference. Update mt7622 dts binding. Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- arch/arm64/boot/dts/mediatek/mt7622.dtsi | 2 +- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index 47d223e28f8d..f232f8baf4e8 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -970,7 +970,7 @@ power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>; mediatek,ethsys = <ðsys>; mediatek,sgmiisys = <&sgmiisys>; - mediatek,cci-control = <&cci_control2>; + cci-control-port = <&cci_control2>; mediatek,wed = <&wed0>, <&wed1>; mediatek,pcie-mirror = <&pcie_mirror>; mediatek,hifsys = <&hifsys>; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 209d00f56f62..18eebcaa6a76 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3149,7 +3149,7 @@ static int mtk_probe(struct platform_device *pdev) struct regmap *cci; cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "mediatek,cci-control"); + "cci-control-port"); /* enable CPU/bus coherency */ if (!IS_ERR(cci)) regmap_write(cci, 0, 3); -- cgit v1.2.3 From 2240514cb6441641111ac1fabab03e9df9972d7f Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Tue, 12 Apr 2022 09:05:15 +0000 Subject: net: ethernet: ti: am65-cpsw-nuss: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 33 +++++++++++--------------------- 1 file changed, 11 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index d2747e9db286..b7ebd741f284 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -173,11 +173,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, if (!netif_running(ndev) || !vid) return 0; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { - pm_runtime_put_noidle(common->dev); + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) return ret; - } port_mask = BIT(port->port_id) | ALE_PORT_HOST; if (!vid) @@ -203,11 +201,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, if (!netif_running(ndev) || !vid) return 0; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { - pm_runtime_put_noidle(common->dev); + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) return ret; - } dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); ret = cpsw_ale_del_vlan(common->ale, vid, @@ -557,11 +553,9 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) struct am65_cpsw_port *port = am65_ndev_to_port(ndev); int ret, i; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { - pm_runtime_put_noidle(common->dev); + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) return ret; - } /* Notify the stack of the actual queue counts. */ ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num); @@ -1214,11 +1208,9 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, if (ret < 0) return ret; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { - pm_runtime_put_noidle(common->dev); + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) return ret; - } cpsw_ale_del_ucast(common->ale, ndev->dev_addr, HOST_PORT_NUM, 0, 0); @@ -2692,9 +2684,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) common->bus_freq = clk_get_rate(clk); pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) { - pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } @@ -2789,11 +2780,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev) common = dev_get_drvdata(dev); - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) return ret; - } am65_cpsw_nuss_phylink_cleanup(common); am65_cpsw_unregister_devlink(common); -- cgit v1.2.3 From 17a5f6a78dc7b8db385de346092d7d9f9dc24df6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 12 Apr 2022 12:24:19 +0300 Subject: net: ethernet: mtk_eth_soc: use after free in __mtk_ppe_check_skb() The __mtk_foe_entry_clear() function frees "entry" so we have to use the _safe() version of hlist_for_each_entry() to prevent a use after free. Fixes: 33fc42de3327 ("net: ethernet: mtk_eth_soc: support creating mac address based offload entries") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_ppe.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 282a1f34a88a..683f89f8e3b2 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -600,6 +600,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) struct mtk_foe_entry *hwe = &ppe->foe_table[hash]; struct mtk_flow_entry *entry; struct mtk_foe_bridge key = {}; + struct hlist_node *n; struct ethhdr *eh; bool found = false; u8 *tag; @@ -609,7 +610,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) goto out; - hlist_for_each_entry(entry, head, list) { + hlist_for_each_entry_safe(entry, n, head, list) { if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)) -- cgit v1.2.3 From 1a95e04e29a116c3424988c70c441ca8ec2779ff Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Tue, 12 Apr 2022 11:24:00 +0100 Subject: net: phylink: remove phylink_helper_basex_speed() As there are now no users of phylink_helper_basex_speed(), we can remove this obsolete functionality. Signed-off-by: Russell King (Oracle) Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 28 ---------------------------- include/linux/phylink.h | 6 ------ 2 files changed, 34 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 06943889d747..33c285252584 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -2778,34 +2778,6 @@ static const struct sfp_upstream_ops sfp_phylink_ops = { /* Helpers for MAC drivers */ -/** - * phylink_helper_basex_speed() - 1000BaseX/2500BaseX helper - * @state: a pointer to a &struct phylink_link_state - * - * Inspect the interface mode, advertising mask or forced speed and - * decide whether to run at 2.5Gbit or 1Gbit appropriately, switching - * the interface mode to suit. @state->interface is appropriately - * updated, and the advertising mask has the "other" baseX_Full flag - * cleared. - */ -void phylink_helper_basex_speed(struct phylink_link_state *state) -{ - if (phy_interface_mode_is_8023z(state->interface)) { - bool want_2500 = state->an_enabled ? - phylink_test(state->advertising, 2500baseX_Full) : - state->speed == SPEED_2500; - - if (want_2500) { - phylink_clear(state->advertising, 1000baseX_Full); - state->interface = PHY_INTERFACE_MODE_2500BASEX; - } else { - phylink_clear(state->advertising, 2500baseX_Full); - state->interface = PHY_INTERFACE_MODE_1000BASEX; - } - } -} -EXPORT_SYMBOL_GPL(phylink_helper_basex_speed); - static void phylink_decode_c37_word(struct phylink_link_state *state, uint16_t config_reg, int speed) { diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 223781622b33..6d06896fc20d 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -160,11 +160,6 @@ struct phylink_mac_ops { * clearing unsupported speeds and duplex settings. The port modes * should not be cleared; phylink_set_port_modes() will help with this. * - * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX - * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode - * based on @state->advertising and/or @state->speed and update - * @state->interface accordingly. See phylink_helper_basex_speed(). - * * When @config->supported_interfaces has been set, phylink will iterate * over the supported interfaces to determine the full capability of the * MAC. The validation function must not print errors if @state->interface @@ -579,7 +574,6 @@ int phylink_speed_up(struct phylink *pl); #define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode) void phylink_set_port_modes(unsigned long *bits); -void phylink_helper_basex_speed(struct phylink_link_state *state); void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, u16 bmsr, u16 lpa); -- cgit v1.2.3 From e3a5e33fae99b69d72296b2c6afea1db4a397779 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 12 Apr 2022 13:29:27 +0300 Subject: drivers: net: cpsw: ale: add broadcast/multicast rate limit support The CPSW ALE supports feature to rate limit number ingress broadcast(BC)/multicast(MC) packets per/sec which main purpose is BC/MC storm prevention. The ALE BC/MC packet rate limit configuration consist of two parts: - global ALE_CONTROL.ENABLE_RATE_LIMIT bit 0 which enables rate limiting globally ALE_PRESCALE.PRESCALE specifies rate limiting interval - per-port ALE_PORTCTLx.BCASTMCAST/_LIMIT specifies number of BC/MC packets allowed per rate limiting interval. When port.BCASTMCAST/_LIMIT is 0 rate limiting is disabled for Port. When BC/MC packet rate limiting is enabled the number of allowed packets per/sec is defined as: number_of_packets/sec = (Fclk / ALE_PRESCALE) * port.BCASTMCAST/_LIMIT Hence, the ALE_PRESCALE configuration is common for all ports the 1ms interval is selected and configured during ALE initialization while port.BCAST/MCAST_LIMIT are configured per-port. This allows to achieve: - min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1 - max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF The ALE_CONTROL.ENABLE_RATE_LIMIT can also be enabled once during ALE initialization as rate limiting enabled by non zero port.BCASTMCAST/_LIMIT values. This patch implements above logic in ALE and adds new ALE APIs cpsw_ale_rx_ratelimit_bc(); cpsw_ale_rx_ratelimit_mc(); Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw_ale.c | 66 ++++++++++++++++++++++++++++++++++++++ drivers/net/ethernet/ti/cpsw_ale.h | 2 ++ 2 files changed, 68 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 1ef0aaef5c61..231370e9a801 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -50,6 +50,8 @@ /* ALE_AGING_TIMER */ #define ALE_AGING_TIMER_MASK GENMASK(23, 0) +#define ALE_RATE_LIMIT_MIN_PPS 1000 + /** * struct ale_entry_fld - The ALE tbl entry field description * @start_bit: field start bit @@ -1136,6 +1138,50 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) return tmp & BITMASK(info->bits); } +int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps) + +{ + int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS; + u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS; + + if (ratelimit_pps && !val) { + dev_err(ale->params.dev, "ALE MC port:%d ratelimit min value 1000pps\n", port); + return -EINVAL; + } + + if (remainder) + dev_info(ale->params.dev, "ALE port:%d MC ratelimit set to %dpps (requested %d)\n", + port, ratelimit_pps - remainder, ratelimit_pps); + + cpsw_ale_control_set(ale, port, ALE_PORT_MCAST_LIMIT, val); + + dev_dbg(ale->params.dev, "ALE port:%d MC ratelimit set %d\n", + port, val * ALE_RATE_LIMIT_MIN_PPS); + return 0; +} + +int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps) + +{ + int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS; + u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS; + + if (ratelimit_pps && !val) { + dev_err(ale->params.dev, "ALE port:%d BC ratelimit min value 1000pps\n", port); + return -EINVAL; + } + + if (remainder) + dev_info(ale->params.dev, "ALE port:%d BC ratelimit set to %dpps (requested %d)\n", + port, ratelimit_pps - remainder, ratelimit_pps); + + cpsw_ale_control_set(ale, port, ALE_PORT_BCAST_LIMIT, val); + + dev_dbg(ale->params.dev, "ALE port:%d BC ratelimit set %d\n", + port, val * ALE_RATE_LIMIT_MIN_PPS); + return 0; +} + static void cpsw_ale_timer(struct timer_list *t) { struct cpsw_ale *ale = from_timer(ale, t, timer); @@ -1199,6 +1245,26 @@ static void cpsw_ale_aging_stop(struct cpsw_ale *ale) void cpsw_ale_start(struct cpsw_ale *ale) { + unsigned long ale_prescale; + + /* configure Broadcast and Multicast Rate Limit + * number_of_packets = (Fclk / ALE_PRESCALE) * port.BCAST/MCAST_LIMIT + * ALE_PRESCALE width is 19bit and min value 0x10 + * port.BCAST/MCAST_LIMIT is 8bit + * + * For multi port configuration support the ALE_PRESCALE is configured to 1ms interval, + * which allows to configure port.BCAST/MCAST_LIMIT per port and achieve: + * min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1 + * max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF + */ + ale_prescale = ale->params.bus_freq / ALE_RATE_LIMIT_MIN_PPS; + writel((u32)ale_prescale, ale->params.ale_regs + ALE_PRESCALE); + + /* Allow MC/BC rate limiting globally. + * The actual Rate Limit cfg enabled per-port by port.BCAST/MCAST_LIMIT + */ + cpsw_ale_control_set(ale, 0, ALE_RATE_LIMIT, 1); + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 13fe47687fde..aba4572cfa3b 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -120,6 +120,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, int reg_mcast, int unreg_mcast); int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port); +int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps); +int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps); int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); int cpsw_ale_control_set(struct cpsw_ale *ale, int port, -- cgit v1.2.3 From 5ec836be11b3f7dfbdeaee8dd86137f134df6f6f Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 12 Apr 2022 13:29:28 +0300 Subject: net: ethernet: ti: am65-cpsw: enable bc/mc storm prevention support This patch enables support for ingress broadcast(BC)/multicast(MC) packets rate limiting in TI AM65x CPSW driver (the corresponding ALE support was added in previous patch) by implementing HW offload for simple tc-flower with policer action with matches on dst_mac/mask: - ff:ff:ff:ff:ff:ff/ff:ff:ff:ff:ff:ff has to be used for BC packets rate limiting (exact match) - 01:00:00:00:00:00/01:00:00:00:00:00 fixed value has to be used for MC packets rate limiting The CPSW supports MC/BC packets rate limiting in packets/sec and affects all ingress MC/BC packets and serves as BC/MC storm prevention feature. Examples: - BC rate limit to 1000pps: tc qdisc add dev eth0 clsact tc filter add dev eth0 ingress flower skip_sw dst_mac ff:ff:ff:ff:ff:ff \ action police pkts_rate 1000 pkts_burst 1 drop - MC rate limit to 20000pps: tc qdisc add dev eth0 clsact tc filter add dev eth0 ingress flower skip_sw dst_mac 01:00:00:00:00:00/01:00:00:00:00:00 \ action police rate pkts_rate 20000 pkts_burst 1 drop pkts_burst - not used. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-qos.c | 180 ++++++++++++++++++++++++++++++++ drivers/net/ethernet/ti/am65-cpsw-qos.h | 8 ++ 2 files changed, 188 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c index ebcc6386cc34..aa32dd905e2b 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.c +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c @@ -8,10 +8,12 @@ #include #include +#include #include "am65-cpsw-nuss.h" #include "am65-cpsw-qos.h" #include "am65-cpts.h" +#include "cpsw_ale.h" #define AM65_CPSW_REG_CTL 0x004 #define AM65_CPSW_PN_REG_CTL 0x004 @@ -588,12 +590,190 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data) return am65_cpsw_set_taprio(ndev, type_data); } +static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port, + struct netlink_ext_ack *extack, + struct flow_cls_offload *cls, + u64 rate_pkt_ps) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct am65_cpsw_qos *qos = &port->qos; + struct flow_match_eth_addrs match; + int ret; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address"); + return -EOPNOTSUPP; + } + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on source MAC not supported"); + return -EOPNOTSUPP; + } + + if (is_broadcast_ether_addr(match.key->dst) && + is_broadcast_ether_addr(match.mask->dst)) { + ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps); + if (ret) + return ret; + + qos->ale_bc_ratelimit.cookie = cls->cookie; + qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps; + } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) && + ether_addr_equal_unaligned(match.mask->dst, mc_mac)) { + ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps); + if (ret) + return ret; + + qos->ale_mc_ratelimit.cookie = cls->cookie; + qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps; + } else { + NL_SET_ERR_MSG_MOD(extack, "Not supported matching key"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when bytes per second/peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + const struct flow_action_entry *act; + int i, ret; + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_POLICE: + ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack); + if (ret) + return ret; + + return am65_cpsw_qos_clsflower_add_policer(port, extack, cls, + act->police.rate_pkt_ps); + default: + NL_SET_ERR_MSG_MOD(extack, + "Action not supported"); + return -EOPNOTSUPP; + } + } + return -EOPNOTSUPP; +} + +static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls) +{ + struct am65_cpsw_qos *qos = &port->qos; + + if (cls->cookie == qos->ale_bc_ratelimit.cookie) { + qos->ale_bc_ratelimit.cookie = 0; + qos->ale_bc_ratelimit.rate_packet_ps = 0; + cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0); + } + + if (cls->cookie == qos->ale_mc_ratelimit.cookie) { + qos->ale_mc_ratelimit.cookie = 0; + qos->ale_mc_ratelimit.rate_packet_ps = 0; + cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0); + } + + return 0; +} + +static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port, + struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return am65_cpsw_qos_configure_clsflower(port, cls_flower); + case FLOW_CLS_DESTROY: + return am65_cpsw_qos_delete_clsflower(port, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct am65_cpsw_port *port = cb_priv; + + if (!tc_cls_can_offload_and_chain0(port->ndev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return am65_cpsw_qos_setup_tc_clsflower(port, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(am65_cpsw_qos_block_cb_list); + +static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f) +{ + struct am65_cpsw_port *port = am65_ndev_to_port(ndev); + + return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list, + am65_cpsw_qos_setup_tc_block_cb, + port, port, true); +} + int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { switch (type) { case TC_SETUP_QDISC_TAPRIO: return am65_cpsw_setup_taprio(ndev, type_data); + case TC_SETUP_BLOCK: + return am65_cpsw_qos_setup_tc_block(ndev, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h index e8f1b6b59e93..fb223b43b196 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.h +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h @@ -14,11 +14,19 @@ struct am65_cpsw_est { struct tc_taprio_qopt_offload taprio; }; +struct am65_cpsw_ale_ratelimit { + unsigned long cookie; + u64 rate_packet_ps; +}; + struct am65_cpsw_qos { struct am65_cpsw_est *est_admin; struct am65_cpsw_est *est_oper; ktime_t link_down_time; int link_speed; + + struct am65_cpsw_ale_ratelimit ale_bc_ratelimit; + struct am65_cpsw_ale_ratelimit ale_mc_ratelimit; }; int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, -- cgit v1.2.3 From 127c9e970f59edaaaa7a7134e55752314c9691da Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Tue, 12 Apr 2022 13:29:29 +0300 Subject: net: ethernet: ti: cpsw_new: enable bc/mc storm prevention support This patch enables support for ingress broadcast(BC)/multicast(MC) packets rate limiting in TI CPSW switchdev driver (the corresponding ALE support was added in previous patch) by implementing HW offload for simple tc-flower with policer action with matches on dst_mac: - ff:ff:ff:ff:ff:ff/ff:ff:ff:ff:ff:ff has to be used for BC packets rate limiting (exact match) - 01:00:00:00:00:00/01:00:00:00:00:00 fixed value has to be used for MC packets rate limiting The CPSW supports MC/BC packets rate limiting in packets/sec and affects all ingress MC/BC packets and serves as BC/MC storm prevention feature. Examples: - BC rate limit to 1000pps: tc qdisc add dev eth0 clsact tc filter add dev eth0 ingress flower skip_sw dst_mac ff:ff:ff:ff:ff:ff \ action police pkts_rate 1000 pkts_burst 1 drop - MC rate limit to 20000pps: tc qdisc add dev eth0 clsact tc filter add dev eth0 ingress flower skip_sw dst_mac 01:00:00:00:00:00/01:00:00:00:00:00 \ action police rate pkts_rate 10000 pkts_burst 1 drop pkts_burst - not used. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw_new.c | 4 +- drivers/net/ethernet/ti/cpsw_priv.c | 205 ++++++++++++++++++++++++++++++++++++ drivers/net/ethernet/ti/cpsw_priv.h | 8 ++ 3 files changed, 216 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 394145d06d54..dfa0a9cf9d89 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -498,6 +498,8 @@ static void cpsw_restore(struct cpsw_priv *priv) /* restore CBS offload */ cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv); + + cpsw_qos_clsflower_resume(priv); } static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw) @@ -1407,7 +1409,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) cpsw->slaves[i].ndev = ndev; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL; + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 8f6817f346ba..4dbd327c6a4d 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -502,6 +502,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, ale_params.ale_ageout = ale_ageout; ale_params.ale_ports = CPSW_ALE_PORTS_NUM; ale_params.dev_id = "cpsw"; + ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000; cpsw->ale = cpsw_ale_create(&ale_params); if (IS_ERR(cpsw->ale)) { @@ -1048,6 +1049,8 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) return 0; } +static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f); + int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, void *type_data) { @@ -1058,6 +1061,9 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, case TC_SETUP_QDISC_MQPRIO: return cpsw_set_mqprio(ndev, type_data); + case TC_SETUP_BLOCK: + return cpsw_qos_setup_tc_block(ndev, type_data); + default: return -EOPNOTSUPP; } @@ -1381,3 +1387,202 @@ drop: page_pool_recycle_direct(cpsw->page_pool[ch], page); return ret; } + +static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv, + struct netlink_ext_ack *extack, + struct flow_cls_offload *cls, + u64 rate_pkt_ps) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct flow_match_eth_addrs match; + u32 port_id; + int ret; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported keys used"); + return -EOPNOTSUPP; + } + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address"); + return -EOPNOTSUPP; + } + + flow_rule_match_eth_addrs(rule, &match); + + if (!is_zero_ether_addr(match.mask->src)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on source MAC not supported"); + return -EOPNOTSUPP; + } + + port_id = cpsw_slave_index(priv->cpsw, priv) + 1; + + if (is_broadcast_ether_addr(match.key->dst) && + is_broadcast_ether_addr(match.mask->dst)) { + ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps); + if (ret) + return ret; + + priv->ale_bc_ratelimit.cookie = cls->cookie; + priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps; + } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) && + ether_addr_equal_unaligned(match.mask->dst, mc_mac)) { + ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps); + if (ret) + return ret; + + priv->ale_mc_ratelimit.cookie = cls->cookie; + priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps; + } else { + NL_SET_ERR_MSG_MOD(extack, "Not supported matching key"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action, + const struct flow_action_entry *act, + struct netlink_ext_ack *extack) +{ + if (act->police.exceed.act_id != FLOW_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when exceed action is not drop"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && + act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is not pipe or ok"); + return -EOPNOTSUPP; + } + + if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && + !flow_action_is_last_entry(action, act)) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when conform action is ok, but action is not last"); + return -EOPNOTSUPP; + } + + if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps || + act->police.avrate || act->police.overhead) { + NL_SET_ERR_MSG_MOD(extack, + "Offload not supported when bytes per second/peakrate/avrate/overhead is configured"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + const struct flow_action_entry *act; + int i, ret; + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_POLICE: + ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack); + if (ret) + return ret; + + return cpsw_qos_clsflower_add_policer(priv, extack, cls, + act->police.rate_pkt_ps); + default: + NL_SET_ERR_MSG_MOD(extack, "Action not supported"); + return -EOPNOTSUPP; + } + } + return -EOPNOTSUPP; +} + +static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls) +{ + u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1; + + if (cls->cookie == priv->ale_bc_ratelimit.cookie) { + priv->ale_bc_ratelimit.cookie = 0; + priv->ale_bc_ratelimit.rate_packet_ps = 0; + cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0); + } + + if (cls->cookie == priv->ale_mc_ratelimit.cookie) { + priv->ale_mc_ratelimit.cookie = 0; + priv->ale_mc_ratelimit.rate_packet_ps = 0; + cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0); + } + + return 0; +} + +static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower) +{ + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return cpsw_qos_configure_clsflower(priv, cls_flower); + case FLOW_CLS_DESTROY: + return cpsw_qos_delete_clsflower(priv, cls_flower); + default: + return -EOPNOTSUPP; + } +} + +static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct cpsw_priv *priv = cb_priv; + int ret; + + if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data)) + return -EOPNOTSUPP; + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + switch (type) { + case TC_SETUP_CLSFLOWER: + ret = cpsw_qos_setup_tc_clsflower(priv, type_data); + break; + default: + ret = -EOPNOTSUPP; + } + + pm_runtime_put(priv->dev); + return ret; +} + +static LIST_HEAD(cpsw_qos_block_cb_list); + +static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list, + cpsw_qos_setup_tc_block_cb, + priv, priv, true); +} + +void cpsw_qos_clsflower_resume(struct cpsw_priv *priv) +{ + u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1; + + if (priv->ale_bc_ratelimit.cookie) + cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, + priv->ale_bc_ratelimit.rate_packet_ps); + + if (priv->ale_mc_ratelimit.cookie) + cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, + priv->ale_mc_ratelimit.rate_packet_ps); +} diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 86a0a1a093b5..fc591f5ebe18 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -364,6 +364,11 @@ struct cpsw_common { u8 base_mac[ETH_ALEN]; }; +struct cpsw_ale_ratelimit { + unsigned long cookie; + u64 rate_packet_ps; +}; + struct cpsw_priv { struct net_device *ndev; struct device *dev; @@ -384,6 +389,8 @@ struct cpsw_priv { struct cpsw_common *cpsw; int offload_fwd_mark; u32 tx_packet_min; + struct cpsw_ale_ratelimit ale_bc_ratelimit; + struct cpsw_ale_ratelimit ale_mc_ratelimit; }; #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) @@ -461,6 +468,7 @@ int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, bool cpsw_shp_is_off(struct cpsw_priv *priv); void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); +void cpsw_qos_clsflower_resume(struct cpsw_priv *priv); /* ethtool */ u32 cpsw_get_msglevel(struct net_device *ndev); -- cgit v1.2.3 From 862cd659a6fbac664f1fcdd7149046040a7a7e9c Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Tue, 12 Apr 2022 20:34:57 -0700 Subject: octeon_ep: Add driver framework and device initialization Add driver framework and device setup and initialization for Octeon PCI Endpoint NIC. Add implementation to load module, initilaize, register network device, cleanup and unload module. Signed-off-by: Veerasenareddy Burru Signed-off-by: Abhijit Ayarekar Signed-off-by: Satananda Burla Signed-off-by: David S. Miller --- .../networking/device_drivers/ethernet/index.rst | 1 + .../device_drivers/ethernet/marvell/octeon_ep.rst | 35 ++ MAINTAINERS | 7 + drivers/net/ethernet/marvell/Kconfig | 1 + drivers/net/ethernet/marvell/Makefile | 1 + drivers/net/ethernet/marvell/octeon_ep/Kconfig | 20 + drivers/net/ethernet/marvell/octeon_ep/Makefile | 9 + .../net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c | 241 ++++++++++ .../net/ethernet/marvell/octeon_ep/octep_config.h | 204 ++++++++ .../ethernet/marvell/octeon_ep/octep_ctrl_mbox.c | 64 +++ .../ethernet/marvell/octeon_ep/octep_ctrl_mbox.h | 170 +++++++ .../ethernet/marvell/octeon_ep/octep_ctrl_net.c | 42 ++ .../ethernet/marvell/octeon_ep/octep_ctrl_net.h | 299 ++++++++++++ .../net/ethernet/marvell/octeon_ep/octep_main.c | 511 +++++++++++++++++++++ .../net/ethernet/marvell/octeon_ep/octep_main.h | 364 +++++++++++++++ .../marvell/octeon_ep/octep_regs_cn9k_pf.h | 367 +++++++++++++++ drivers/net/ethernet/marvell/octeon_ep/octep_rx.c | 42 ++ drivers/net/ethernet/marvell/octeon_ep/octep_rx.h | 199 ++++++++ drivers/net/ethernet/marvell/octeon_ep/octep_tx.c | 43 ++ drivers/net/ethernet/marvell/octeon_ep/octep_tx.h | 284 ++++++++++++ 20 files changed, 2904 insertions(+) create mode 100644 Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst create mode 100644 drivers/net/ethernet/marvell/octeon_ep/Kconfig create mode 100644 drivers/net/ethernet/marvell/octeon_ep/Makefile create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_config.h create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_main.c create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_main.h create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_rx.c create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_rx.h create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_tx.c create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_tx.h (limited to 'drivers') diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index 6b5dc203da2b..21a97703421d 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -39,6 +39,7 @@ Contents: intel/iavf intel/ice marvell/octeontx2 + marvell/octeon_ep mellanox/mlx5 microsoft/netvsc neterion/s2io diff --git a/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst new file mode 100644 index 000000000000..bc562c49011b --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst @@ -0,0 +1,35 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +==================================================================== +Linux kernel networking driver for Marvell's Octeon PCI Endpoint NIC +==================================================================== + +Network driver for Marvell's Octeon PCI EndPoint NIC. +Copyright (c) 2020 Marvell International Ltd. + +Contents +======== + +- `Overview`_ +- `Supported Devices`_ +- `Interface Control`_ + +Overview +======== +This driver implements networking functionality of Marvell's Octeon PCI +EndPoint NIC. + +Supported Devices +================= +Currently, this driver support following devices: + * Network controller: Cavium, Inc. Device b200 + +Interface Control +================= +Network Interface control like changing mtu, link speed, link down/up are +done by writing command to mailbox command queue, a mailbox interface +implemented through a reserved region in BAR4. +This driver writes the commands into the mailbox and the firmware on the +Octeon device processes them. The firmware also sends unsolicited notifications +to driver for events suchs as link change, through notification queue +implemented as part of mailbox interface. diff --git a/MAINTAINERS b/MAINTAINERS index fd768d43e048..9b0480f1b153 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11828,6 +11828,13 @@ S: Supported F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt F: drivers/mmc/host/sdhci-xenon* +MARVELL OCTEON ENDPOINT DRIVER +M: Veerasenareddy Burru +M: Abhijit Ayarekar +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/ethernet/marvell/octeon_ep + MATROX FRAMEBUFFER DRIVER L: linux-fbdev@vger.kernel.org S: Orphan diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index fe0989c0fc25..4cb55724001b 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -177,6 +177,7 @@ config SKY2_DEBUG source "drivers/net/ethernet/marvell/octeontx2/Kconfig" +source "drivers/net/ethernet/marvell/octeon_ep/Kconfig" source "drivers/net/ethernet/marvell/prestera/Kconfig" endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 9f88fe822555..ceba4aa4f026 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -11,5 +11,6 @@ obj-$(CONFIG_MVPP2) += mvpp2/ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o +obj-y += octeon_ep/ obj-y += octeontx2/ obj-y += prestera/ diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig new file mode 100644 index 000000000000..0d7db815340e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Marvell's Octeon PCI Endpoint NIC Driver Configuration +# + +config OCTEON_EP + tristate "Marvell Octeon PCI Endpoint NIC Driver" + depends on 64BIT + depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL + help + This driver supports networking functionality of Marvell's + Octeon PCI Endpoint NIC. + + To know the list of devices supported by this driver, refer + documentation in + . + + To compile this drivers as a module, choose M here. Name of the + module is octeon_ep. diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile new file mode 100644 index 000000000000..6e2db8e80b4a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Network driver for Marvell's Octeon PCI Endpoint NIC +# + +obj-$(CONFIG_OCTEON_EP) += octeon_ep.o + +octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \ + octep_ctrl_mbox.o octep_ctrl_net.o diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c new file mode 100644 index 000000000000..a38b52788619 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include +#include +#include + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_regs_cn9k_pf.h" + +/* Names of Hardware non-queue generic interrupts */ +static char *cn93_non_ioq_msix_names[] = { + "epf_ire_rint", + "epf_ore_rint", + "epf_vfire_rint0", + "epf_vfire_rint1", + "epf_vfore_rint0", + "epf_vfore_rint1", + "epf_mbox_rint0", + "epf_mbox_rint1", + "epf_oei_rint", + "epf_dma_rint", + "epf_dma_vf_rint0", + "epf_dma_vf_rint1", + "epf_pp_vf_rint0", + "epf_pp_vf_rint1", + "epf_misc_rint", + "epf_rsvd", +}; + +/* Reset all hardware Tx/Rx queues */ +static void octep_reset_io_queues_cn93_pf(struct octep_device *oct) +{ +} + +/* Initialize windowed addresses to access some hardware registers */ +static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct) +{ +} + +/* Configure Hardware mapping: inform hardware which rings belong to PF. */ +static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct) +{ +} + +/* Initialize configuration limits and initial active config 93xx PF. */ +static void octep_init_config_cn93_pf(struct octep_device *oct) +{ + struct octep_config *conf = oct->conf; + struct pci_dev *pdev = oct->pdev; + u64 val; + + /* Read ring configuration: + * PF ring count, number of VFs and rings per VF supported + */ + val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO); + conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val); + conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf; + conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val); + conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs; + conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val); + + val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port)); + conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val); + conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val); + conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings; + dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n", + conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf, + conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings); + + conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; + conf->iq.instr_type = OCTEP_64BYTE_INSTR; + conf->iq.pkind = 0; + conf->iq.db_min = OCTEP_DB_MIN; + conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; + + conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; + conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; + conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; + conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; + conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; + + conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR; + conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings; + conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names; + + conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + (0x400000ull * 7); +} + +/* Setup registers for a hardware Tx Queue */ +static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no) +{ +} + +/* Setup registers for a hardware Rx Queue */ +static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no) +{ +} + +/* Setup registers for a PF mailbox */ +static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) +{ +} + +/* Interrupts handler for all non-queue generic interrupts. */ +static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) +{ + return IRQ_HANDLED; +} + +/* Tx/Rx queue interrupt handler */ +static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data) +{ + return IRQ_HANDLED; +} + +/* soft reset of 93xx */ +static int octep_soft_reset_cn93_pf(struct octep_device *oct) +{ + dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n"); + + octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF); + + /* Set core domain reset bit */ + OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1); + /* Wait for 100ms as Octeon resets. */ + mdelay(100); + /* clear core domain reset bit */ + OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1); + + return 0; +} + +/* Re-initialize Octeon hardware registers */ +static void octep_reinit_regs_cn93_pf(struct octep_device *oct) +{ +} + +/* Enable all interrupts */ +static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) +{ +} + +/* Disable all interrupts */ +static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) +{ +} + +/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ +static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq) +{ + return 0; +} + +/* Enable a hardware Tx Queue */ +static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no) +{ +} + +/* Enable a hardware Rx Queue */ +static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no) +{ +} + +/* Enable all hardware Tx/Rx Queues assined to PF */ +static void octep_enable_io_queues_cn93_pf(struct octep_device *oct) +{ +} + +/* Disable a hardware Tx Queue assined to PF */ +static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no) +{ +} + +/* Disable a hardware Rx Queue assined to PF */ +static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no) +{ +} + +/* Disable all hardware Tx/Rx Queues assined to PF */ +static void octep_disable_io_queues_cn93_pf(struct octep_device *oct) +{ +} + +/* Dump hardware registers (including Tx/Rx queues) for debugging. */ +static void octep_dump_registers_cn93_pf(struct octep_device *oct) +{ +} + +/** + * octep_device_setup_cn93_pf() - Setup Octeon device. + * + * @oct: Octeon device private data structure. + * + * - initialize hardware operations. + * - get target side pcie port number for the device. + * - setup window access to hardware registers. + * - set initial configuration and max limits. + * - setup hardware mapping of rings to the PF device. + */ +void octep_device_setup_cn93_pf(struct octep_device *oct) +{ + oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf; + oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf; + oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf; + + oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf; + oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf; + oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf; + oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf; + + oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf; + oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf; + + oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf; + + oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf; + oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf; + oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf; + + oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf; + oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf; + oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf; + oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf; + + oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf; + + octep_setup_pci_window_regs_cn93_pf(oct); + + oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff; + dev_info(&oct->pdev->dev, + "Octeon device using PCIE Port %d\n", oct->pcie_port); + + octep_init_config_cn93_pf(oct); + octep_configure_ring_mapping_cn93_pf(oct); +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h new file mode 100644 index 000000000000..f208f3f9a447 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_CONFIG_H_ +#define _OCTEP_CONFIG_H_ + +/* Tx instruction types by length */ +#define OCTEP_32BYTE_INSTR 32 +#define OCTEP_64BYTE_INSTR 64 + +/* Tx Queue: maximum descriptors per ring */ +#define OCTEP_IQ_MAX_DESCRIPTORS 1024 +/* Minimum input (Tx) requests to be enqueued to ring doorbell */ +#define OCTEP_DB_MIN 1 +/* Packet threshold for Tx queue interrupt */ +#define OCTEP_IQ_INTR_THRESHOLD 0x0 + +/* Rx Queue: maximum descriptors per ring */ +#define OCTEP_OQ_MAX_DESCRIPTORS 1024 + +/* Rx buffer size: Use page size buffers. + * Build skb from allocated page buffer once the packet is received. + * When a gathered packet is received, make head page as skb head and + * page buffers in consecutive Rx descriptors as fragments. + */ +#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE)) +#define OCTEP_OQ_PKTS_PER_INTR 128 +#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4) + +#define OCTEP_OQ_INTR_PKT_THRESHOLD 1 +#define OCTEP_OQ_INTR_TIME_THRESHOLD 10 + +#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32) + +/* Tx Queue wake threshold + * wakeup a stopped Tx queue if minimum 2 descriptors are available. + * Even a skb with fragments consume only one Tx queue descriptor entry. + */ +#define OCTEP_WAKE_QUEUE_THRESHOLD 2 + +/* Minimum MTU supported by Octeon network interface */ +#define OCTEP_MIN_MTU ETH_MIN_MTU +/* Maximum MTU supported by Octeon interface*/ +#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN)) +/* Default MTU */ +#define OCTEP_DEFAULT_MTU 1500 + +/* Macros to get octeon config params */ +#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) +#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) +#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) +#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind) +#define CFG_GET_IQ_INSTR_SIZE(cfg) (64) +#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) +#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) + +#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) +#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) +#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) +#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) +#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) + +#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings) +#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings) +#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn) + +#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind) +#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us) +#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us) + +#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs) +#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs) +#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf) +#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf) +#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn) + +#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix) +#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix) +#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names) + +#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr) + +/* Hardware Tx Queue configuration. */ +struct octep_iq_config { + /* Size of the Input queue (number of commands) */ + u16 num_descs; + + /* Command size - 32 or 64 bytes */ + u16 instr_type; + + /* pkind for packets sent to Octeon */ + u16 pkind; + + /* Minimum number of commands pending to be posted to Octeon before driver + * hits the Input queue doorbell. + */ + u16 db_min; + + /* Trigger the IQ interrupt when processed cmd count reaches + * this level. + */ + u32 intr_threshold; +}; + +/* Hardware Rx Queue configuration. */ +struct octep_oq_config { + /* Size of Output queue (number of descriptors) */ + u16 num_descs; + + /* Size of buffer in this Output queue. */ + u16 buf_size; + + /* The number of buffers that were consumed during packet processing + * by the driver on this Output queue before the driver attempts to + * replenish the descriptor ring with new buffers. + */ + u16 refill_threshold; + + /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host + * only if it sent as many packets as specified by this field. + * The driver usually does not use packet count interrupt coalescing. + */ + u32 oq_intr_pkt; + + /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host + * if at least one packet was sent in the time interval specified by + * this field. The driver uses time interval interrupt coalescing by + * default. The time is specified in microseconds. + */ + u32 oq_intr_time; +}; + +/* Tx/Rx configuration */ +struct octep_pf_ring_config { + /* Max number of IOQs */ + u16 max_io_rings; + + /* Number of active IOQs */ + u16 active_io_rings; + + /* Starting IOQ number: this changes based on which PEM is used */ + u16 srn; +}; + +/* Octeon Hardware SRIOV config */ +struct octep_sriov_config { + /* Max number of VF devices supported */ + u16 max_vfs; + + /* Number of VF devices enabled */ + u16 active_vfs; + + /* Max number of rings assigned to VF */ + u8 max_rings_per_vf; + + /* Number of rings enabled per VF */ + u8 active_rings_per_vf; + + /* starting ring number of VF's: ring-0 of VF-0 of the PF */ + u16 vf_srn; +}; + +/* Octeon MSI-x config. */ +struct octep_msix_config { + /* Number of IOQ interrupts */ + u16 ioq_msix; + + /* Number of Non IOQ interrupts */ + u16 non_ioq_msix; + + /* Names of Non IOQ interrupts */ + char **non_ioq_msix_names; +}; + +struct octep_ctrl_mbox_config { + /* Barmem address for control mbox */ + void __iomem *barmem_addr; +}; + +/* Data Structure to hold configuration limits and active config */ +struct octep_config { + /* Input Queue attributes. */ + struct octep_iq_config iq; + + /* Output Queue attributes. */ + struct octep_oq_config oq; + + /* NIC Port Configuration */ + struct octep_pf_ring_config pf_ring_cfg; + + /* SRIOV configuration of the PF */ + struct octep_sriov_config sriov_cfg; + + /* MSI-X interrupt config */ + struct octep_msix_config msix_cfg; + + /* ctrl mbox config */ + struct octep_ctrl_mbox_config ctrl_mbox_cfg; +}; +#endif /* _OCTEP_CONFIG_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c new file mode 100644 index 000000000000..5479f64d5bfc --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "octep_ctrl_mbox.h" +#include "octep_config.h" +#include "octep_main.h" + +/* Timeout in msecs for message response */ +#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100 +/* Time in msecs to wait for message response */ +#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10 + +#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m) +#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8) +#define OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(m) ((m) + 16) +#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24) +#define OCTEP_CTRL_MBOX_INFO_FW_VERSION_OFFSET(m) ((m) + 136) +#define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144) + +#define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ) +#define OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) +#define OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 4) +#define OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 8) +#define OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m)) + 12) + +#define OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m) ((m) + \ + OCTEP_CTRL_MBOX_INFO_SZ + \ + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ) +#define OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(m) (OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) +#define OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 4) +#define OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 8) +#define OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO_OFFSET(m)) + 12) + +#define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \ + (sizeof(struct octep_ctrl_mbox_msg) * (i))) + +int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) +{ + return -EINVAL; +} + +int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) +{ + return -EINVAL; +} + +int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox) +{ + return -EINVAL; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h new file mode 100644 index 000000000000..d5ad58c6bbaa --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + #ifndef __OCTEP_CTRL_MBOX_H__ +#define __OCTEP_CTRL_MBOX_H__ + +/* barmem structure + * |===========================================| + * |Info (16 + 120 + 120 = 256 bytes) | + * |-------------------------------------------| + * |magic number (8 bytes) | + * |bar memory size (4 bytes) | + * |reserved (4 bytes) | + * |-------------------------------------------| + * |host version (8 bytes) | + * |host status (8 bytes) | + * |host reserved (104 bytes) | + * |-------------------------------------------| + * |fw version (8 bytes) | + * |fw status (8 bytes) | + * |fw reserved (104 bytes) | + * |===========================================| + * |Host to Fw Queue info (16 bytes) | + * |-------------------------------------------| + * |producer index (4 bytes) | + * |consumer index (4 bytes) | + * |element size (4 bytes) | + * |element count (4 bytes) | + * |===========================================| + * |Fw to Host Queue info (16 bytes) | + * |-------------------------------------------| + * |producer index (4 bytes) | + * |consumer index (4 bytes) | + * |element size (4 bytes) | + * |element count (4 bytes) | + * |===========================================| + * |Host to Fw Queue | + * |-------------------------------------------| + * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes| + * |===========================================| + * |===========================================| + * |Fw to Host Queue | + * |-------------------------------------------| + * |((elem_sz + hdr(8 bytes)) * elem_cnt) bytes| + * |===========================================| + */ + +#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull + +/* Size of mbox info in bytes */ +#define OCTEP_CTRL_MBOX_INFO_SZ 256 +/* Size of mbox host to target queue info in bytes */ +#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16 +/* Size of mbox target to host queue info in bytes */ +#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16 +/* Size of mbox queue in bytes */ +#define OCTEP_CTRL_MBOX_Q_SZ(sz, cnt) (((sz) + 8) * (cnt)) +/* Size of mbox in bytes */ +#define OCTEP_CTRL_MBOX_SZ(hsz, hcnt, fsz, fcnt) (OCTEP_CTRL_MBOX_INFO_SZ + \ + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \ + OCTEP_CTRL_MBOX_F2HQ_INFO_SZ + \ + OCTEP_CTRL_MBOX_Q_SZ(hsz, hcnt) + \ + OCTEP_CTRL_MBOX_Q_SZ(fsz, fcnt)) + +/* Valid request message */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0) +/* Valid response message */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1) +/* Valid notification, no response required */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2) + +enum octep_ctrl_mbox_status { + OCTEP_CTRL_MBOX_STATUS_INVALID = 0, + OCTEP_CTRL_MBOX_STATUS_INIT, + OCTEP_CTRL_MBOX_STATUS_READY, + OCTEP_CTRL_MBOX_STATUS_UNINIT +}; + +/* mbox message */ +union octep_ctrl_mbox_msg_hdr { + u64 word0; + struct { + /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */ + u32 flags; + /* size of message in words excluding header */ + u32 sizew; + }; +}; + +/* mbox message */ +struct octep_ctrl_mbox_msg { + /* mbox transaction header */ + union octep_ctrl_mbox_msg_hdr hdr; + /* pointer to message buffer */ + void *msg; +}; + +/* Mbox queue */ +struct octep_ctrl_mbox_q { + /* q element size, should be aligned to unsigned long */ + u16 elem_sz; + /* q element count, should be power of 2 */ + u16 elem_cnt; + /* q mask */ + u16 mask; + /* producer address in bar mem */ + void __iomem *hw_prod; + /* consumer address in bar mem */ + void __iomem *hw_cons; + /* q base address in bar mem */ + void __iomem *hw_q; +}; + +struct octep_ctrl_mbox { + /* host driver version */ + u64 version; + /* size of bar memory */ + u32 barmem_sz; + /* pointer to BAR memory */ + void __iomem *barmem; + /* user context for callback, can be null */ + void *user_ctx; + /* callback handler for processing request, called from octep_ctrl_mbox_recv */ + int (*process_req)(void *user_ctx, struct octep_ctrl_mbox_msg *msg); + /* host-to-fw queue */ + struct octep_ctrl_mbox_q h2fq; + /* fw-to-host queue */ + struct octep_ctrl_mbox_q f2hq; + /* lock for h2fq */ + struct mutex h2fq_lock; + /* lock for f2hq */ + struct mutex f2hq_lock; +}; + +/* Initialize control mbox. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox); + +/* Send mbox message. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg); + +/* Retrieve mbox message. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg); + +/* Uninitialize control mbox. + * + * @param ep: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox); + +#endif /* __OCTEP_CTRL_MBOX_H__ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c new file mode 100644 index 000000000000..021f888d8f6d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#include +#include +#include +#include + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +int octep_get_link_status(struct octep_device *oct) +{ + return 0; +} + +void octep_set_link_status(struct octep_device *oct, bool up) +{ +} + +void octep_set_rx_state(struct octep_device *oct, bool up) +{ +} + +int octep_get_mac_addr(struct octep_device *oct, u8 *addr) +{ + return -1; +} + +int octep_get_link_info(struct octep_device *oct) +{ + return -1; +} + +int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info) +{ + return -1; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h new file mode 100644 index 000000000000..f23b58381322 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#ifndef __OCTEP_CTRL_NET_H__ +#define __OCTEP_CTRL_NET_H__ + +/* Supported commands */ +enum octep_ctrl_net_cmd { + OCTEP_CTRL_NET_CMD_GET = 0, + OCTEP_CTRL_NET_CMD_SET, +}; + +/* Supported states */ +enum octep_ctrl_net_state { + OCTEP_CTRL_NET_STATE_DOWN = 0, + OCTEP_CTRL_NET_STATE_UP, +}; + +/* Supported replies */ +enum octep_ctrl_net_reply { + OCTEP_CTRL_NET_REPLY_OK = 0, + OCTEP_CTRL_NET_REPLY_GENERIC_FAIL, + OCTEP_CTRL_NET_REPLY_INVALID_PARAM, +}; + +/* Supported host to fw commands */ +enum octep_ctrl_net_h2f_cmd { + OCTEP_CTRL_NET_H2F_CMD_INVALID = 0, + OCTEP_CTRL_NET_H2F_CMD_MTU, + OCTEP_CTRL_NET_H2F_CMD_MAC, + OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS, + OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS, + OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS, + OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS, + OCTEP_CTRL_NET_H2F_CMD_RX_STATE, + OCTEP_CTRL_NET_H2F_CMD_LINK_INFO, +}; + +/* Supported fw to host commands */ +enum octep_ctrl_net_f2h_cmd { + OCTEP_CTRL_NET_F2H_CMD_INVALID = 0, + OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS, +}; + +struct octep_ctrl_net_req_hdr { + /* sender id */ + u16 sender; + /* receiver id */ + u16 receiver; + /* octep_ctrl_net_h2t_cmd */ + u16 cmd; + /* reserved */ + u16 rsvd0; +}; + +/* get/set mtu request */ +struct octep_ctrl_net_h2f_req_cmd_mtu { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* 0-65535 */ + u16 val; +}; + +/* get/set mac request */ +struct octep_ctrl_net_h2f_req_cmd_mac { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* xx:xx:xx:xx:xx:xx */ + u8 addr[ETH_ALEN]; +}; + +/* get if_stats, xstats, q_stats request */ +struct octep_ctrl_net_h2f_req_cmd_get_stats { + /* offset into barmem where fw should copy over stats */ + u32 offset; +}; + +/* get/set link state, rx state */ +struct octep_ctrl_net_h2f_req_cmd_state { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* link info */ +struct octep_ctrl_net_link_info { + /* Bitmap of Supported link speeds/modes */ + u64 supported_modes; + /* Bitmap of Advertised link speeds/modes */ + u64 advertised_modes; + /* Autonegotation state; bit 0=disabled; bit 1=enabled */ + u8 autoneg; + /* Pause frames setting. bit 0=disabled; bit 1=enabled */ + u8 pause; + /* Negotiated link speed in Mbps */ + u32 speed; +}; + +/* get/set link info */ +struct octep_ctrl_net_h2f_req_cmd_link_info { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* struct octep_ctrl_net_link_info */ + struct octep_ctrl_net_link_info info; +}; + +/* Host to fw request data */ +struct octep_ctrl_net_h2f_req { + struct octep_ctrl_net_req_hdr hdr; + union { + struct octep_ctrl_net_h2f_req_cmd_mtu mtu; + struct octep_ctrl_net_h2f_req_cmd_mac mac; + struct octep_ctrl_net_h2f_req_cmd_get_stats get_stats; + struct octep_ctrl_net_h2f_req_cmd_state link; + struct octep_ctrl_net_h2f_req_cmd_state rx; + struct octep_ctrl_net_h2f_req_cmd_link_info link_info; + }; +} __packed; + +struct octep_ctrl_net_resp_hdr { + /* sender id */ + u16 sender; + /* receiver id */ + u16 receiver; + /* octep_ctrl_net_h2t_cmd */ + u16 cmd; + /* octep_ctrl_net_reply */ + u16 reply; +}; + +/* get mtu response */ +struct octep_ctrl_net_h2f_resp_cmd_mtu { + /* 0-65535 */ + u16 val; +}; + +/* get mac response */ +struct octep_ctrl_net_h2f_resp_cmd_mac { + /* xx:xx:xx:xx:xx:xx */ + u8 addr[ETH_ALEN]; +}; + +/* get link state, rx state response */ +struct octep_ctrl_net_h2f_resp_cmd_state { + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* Host to fw response data */ +struct octep_ctrl_net_h2f_resp { + struct octep_ctrl_net_resp_hdr hdr; + union { + struct octep_ctrl_net_h2f_resp_cmd_mtu mtu; + struct octep_ctrl_net_h2f_resp_cmd_mac mac; + struct octep_ctrl_net_h2f_resp_cmd_state link; + struct octep_ctrl_net_h2f_resp_cmd_state rx; + struct octep_ctrl_net_link_info link_info; + }; +} __packed; + +/* link state notofication */ +struct octep_ctrl_net_f2h_req_cmd_state { + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* Fw to host request data */ +struct octep_ctrl_net_f2h_req { + struct octep_ctrl_net_req_hdr hdr; + union { + struct octep_ctrl_net_f2h_req_cmd_state link; + }; +}; + +/* Fw to host response data */ +struct octep_ctrl_net_f2h_resp { + struct octep_ctrl_net_resp_hdr hdr; +}; + +/* Size of host to fw octep_ctrl_mbox queue element */ +union octep_ctrl_net_h2f_data_sz { + struct octep_ctrl_net_h2f_req h2f_req; + struct octep_ctrl_net_h2f_resp h2f_resp; +}; + +/* Size of fw to host octep_ctrl_mbox queue element */ +union octep_ctrl_net_f2h_data_sz { + struct octep_ctrl_net_f2h_req f2h_req; + struct octep_ctrl_net_f2h_resp f2h_resp; +}; + +/* size of host to fw data in words */ +#define OCTEP_CTRL_NET_H2F_DATA_SZW ((sizeof(union octep_ctrl_net_h2f_data_sz)) / \ + (sizeof(unsigned long))) + +/* size of fw to host data in words */ +#define OCTEP_CTRL_NET_F2H_DATA_SZW ((sizeof(union octep_ctrl_net_f2h_data_sz)) / \ + (sizeof(unsigned long))) + +/* size in words of get/set mtu request */ +#define OCTEP_CTRL_NET_H2F_MTU_REQ_SZW 2 +/* size in words of get/set mac request */ +#define OCTEP_CTRL_NET_H2F_MAC_REQ_SZW 2 +/* size in words of get stats request */ +#define OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW 2 +/* size in words of get/set state request */ +#define OCTEP_CTRL_NET_H2F_STATE_REQ_SZW 2 +/* size in words of get/set link info request */ +#define OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW 4 + +/* size in words of get mtu response */ +#define OCTEP_CTRL_NET_H2F_GET_MTU_RESP_SZW 2 +/* size in words of set mtu response */ +#define OCTEP_CTRL_NET_H2F_SET_MTU_RESP_SZW 1 +/* size in words of get mac response */ +#define OCTEP_CTRL_NET_H2F_GET_MAC_RESP_SZW 2 +/* size in words of set mac response */ +#define OCTEP_CTRL_NET_H2F_SET_MAC_RESP_SZW 1 +/* size in words of get state request */ +#define OCTEP_CTRL_NET_H2F_GET_STATE_RESP_SZW 2 +/* size in words of set state request */ +#define OCTEP_CTRL_NET_H2F_SET_STATE_RESP_SZW 1 +/* size in words of get link info request */ +#define OCTEP_CTRL_NET_H2F_GET_LINK_INFO_RESP_SZW 4 +/* size in words of set link info request */ +#define OCTEP_CTRL_NET_H2F_SET_LINK_INFO_RESP_SZW 1 + +/** Get link status from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * + * return value: link status 0=down, 1=up. + */ +int octep_get_link_status(struct octep_device *oct); + +/** Set link status in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param up: boolean status. + */ +void octep_set_link_status(struct octep_device *oct, bool up); + +/** Set rx state in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param up: boolean status. + */ +void octep_set_rx_state(struct octep_device *oct, bool up); + +/** Get mac address from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param addr: non-null pointer to mac address. + * + * return value: 0 on success, -errno on failure. + */ +int octep_get_mac_addr(struct octep_device *oct, u8 *addr); + +/** Set mac address in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param addr: non-null pointer to mac address. + */ +int octep_set_mac_addr(struct octep_device *oct, u8 *addr); + +/** Set mtu in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param mtu: mtu. + */ +int octep_set_mtu(struct octep_device *oct, int mtu); + +/** Get interface statistics from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * + * return value: 0 on success, -errno on failure. + */ +int octep_get_if_stats(struct octep_device *oct); + +/** Get link info from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * + * return value: 0 on success, -errno on failure. + */ +int octep_get_link_info(struct octep_device *oct); + +/** Set link info in firmware. + * + * @param oct: non-null pointer to struct octep_device. + */ +int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info); + +#endif /* __OCTEP_CTRL_NET_H__ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c new file mode 100644 index 000000000000..3b55510b2550 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +static struct workqueue_struct *octep_wq; + +/* Supported Devices */ +static const struct pci_device_id octep_pci_id_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, + {0, }, +}; +MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); + +MODULE_AUTHOR("Veerasenareddy Burru "); +MODULE_DESCRIPTION(OCTEP_DRV_STRING); +MODULE_LICENSE("GPL"); +MODULE_VERSION(OCTEP_DRV_VERSION_STR); + +static void octep_link_up(struct net_device *netdev) +{ + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); +} + +/** + * octep_open() - start the octeon network device. + * + * @netdev: pointer to kernel network device. + * + * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues + * and interrupts.. + * + * Return: 0, on successfully setting up device and bring it up. + * -1, on any error. + */ +static int octep_open(struct net_device *netdev) +{ + struct octep_device *oct = netdev_priv(netdev); + int err, ret; + + netdev_info(netdev, "Starting netdev ...\n"); + netif_carrier_off(netdev); + + oct->hw_ops.reset_io_queues(oct); + + if (octep_setup_iqs(oct)) + goto setup_iq_err; + if (octep_setup_oqs(oct)) + goto setup_oq_err; + + err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); + if (err) + goto set_queues_err; + err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); + if (err) + goto set_queues_err; + + oct->link_info.admin_up = 1; + octep_set_rx_state(oct, true); + + ret = octep_get_link_status(oct); + if (!ret) + octep_set_link_status(oct, true); + + /* Enable the input and output queues for this Octeon device */ + oct->hw_ops.enable_io_queues(oct); + + /* Enable Octeon device interrupts */ + oct->hw_ops.enable_interrupts(oct); + + octep_oq_dbell_init(oct); + + ret = octep_get_link_status(oct); + if (ret) + octep_link_up(netdev); + + return 0; + +set_queues_err: + octep_free_oqs(oct); +setup_oq_err: + octep_free_iqs(oct); +setup_iq_err: + return -1; +} + +/** + * octep_stop() - stop the octeon network device. + * + * @netdev: pointer to kernel network device. + * + * stop the device Tx/Rx operations, bring down the link and + * free up all resources allocated for Tx/Rx queues and interrupts. + */ +static int octep_stop(struct net_device *netdev) +{ + struct octep_device *oct = netdev_priv(netdev); + + netdev_info(netdev, "Stopping the device ...\n"); + + /* Stop Tx from stack */ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + octep_set_link_status(oct, false); + octep_set_rx_state(oct, false); + + oct->link_info.admin_up = 0; + oct->link_info.oper_up = 0; + + oct->hw_ops.disable_interrupts(oct); + + octep_clean_iqs(oct); + + oct->hw_ops.disable_io_queues(oct); + oct->hw_ops.reset_io_queues(oct); + octep_free_oqs(oct); + octep_free_iqs(oct); + netdev_info(netdev, "Device stopped !!\n"); + return 0; +} + +/** + * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. + * + * @skb: packet skbuff pointer. + * @netdev: kernel network device. + * + * Return: NETDEV_TX_BUSY, if Tx Queue is full. + * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. + */ +static netdev_tx_t octep_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + return NETDEV_TX_OK; +} + +/** + * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. + * + * @work: pointer to Tx queue timeout work_struct + * + * Stop and start the device so that it frees up all queue resources + * and restarts the queues, that potentially clears a Tx queue timeout + * condition. + **/ +static void octep_tx_timeout_task(struct work_struct *work) +{ + struct octep_device *oct = container_of(work, struct octep_device, + tx_timeout_task); + struct net_device *netdev = oct->netdev; + + rtnl_lock(); + if (netif_running(netdev)) { + octep_stop(netdev); + octep_open(netdev); + } + rtnl_unlock(); +} + +/** + * octep_tx_timeout() - Handle Tx Queue timeout. + * + * @netdev: pointer to kernel network device. + * @txqueue: Timed out Tx queue number. + * + * Schedule a work to handle Tx queue timeout. + */ +static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct octep_device *oct = netdev_priv(netdev); + + queue_work(octep_wq, &oct->tx_timeout_task); +} + +static const struct net_device_ops octep_netdev_ops = { + .ndo_open = octep_open, + .ndo_stop = octep_stop, + .ndo_start_xmit = octep_start_xmit, + .ndo_tx_timeout = octep_tx_timeout, +}; + +/** + * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. + * + * @work: pointer to ctrl mbox work_struct + * + * Poll ctrl mbox message queue and handle control messages from firmware. + **/ +static void octep_ctrl_mbox_task(struct work_struct *work) +{ + struct octep_device *oct = container_of(work, struct octep_device, + ctrl_mbox_task); + struct net_device *netdev = oct->netdev; + struct octep_ctrl_net_f2h_req req = {}; + struct octep_ctrl_mbox_msg msg; + int ret = 0; + + msg.msg = &req; + while (true) { + ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, &msg); + if (ret) + break; + + switch (req.hdr.cmd) { + case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS: + if (netif_running(netdev)) { + if (req.link.state) { + dev_info(&oct->pdev->dev, "netif_carrier_on\n"); + netif_carrier_on(netdev); + } else { + dev_info(&oct->pdev->dev, "netif_carrier_off\n"); + netif_carrier_off(netdev); + } + } + break; + default: + pr_info("Unknown mbox req : %u\n", req.hdr.cmd); + break; + } + } +} + +/** + * octep_device_setup() - Setup Octeon Device. + * + * @oct: Octeon device private data structure. + * + * Setup Octeon device hardware operations, configuration, etc ... + */ +int octep_device_setup(struct octep_device *oct) +{ + struct octep_ctrl_mbox *ctrl_mbox; + struct pci_dev *pdev = oct->pdev; + int i, ret; + + /* allocate memory for oct->conf */ + oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); + if (!oct->conf) + return -ENOMEM; + + /* Map BAR regions */ + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { + oct->mmio[i].hw_addr = + ioremap(pci_resource_start(oct->pdev, i * 2), + pci_resource_len(oct->pdev, i * 2)); + oct->mmio[i].mapped = 1; + } + + oct->chip_id = pdev->device; + oct->rev_id = pdev->revision; + dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); + + switch (oct->chip_id) { + case OCTEP_PCI_DEVICE_ID_CN93_PF: + dev_info(&pdev->dev, + "Setting up OCTEON CN93XX PF PASS%d.%d\n", + OCTEP_MAJOR_REV(oct), OCTEP_MINOR_REV(oct)); + octep_device_setup_cn93_pf(oct); + break; + default: + dev_err(&pdev->dev, + "%s: unsupported device\n", __func__); + goto unsupported_dev; + } + + oct->pkind = CFG_GET_IQ_PKIND(oct->conf); + + /* Initialize control mbox */ + ctrl_mbox = &oct->ctrl_mbox; + ctrl_mbox->version = OCTEP_DRV_VERSION; + ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf); + ret = octep_ctrl_mbox_init(ctrl_mbox); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize control mbox\n"); + return -1; + } + oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz, + ctrl_mbox->h2fq.elem_cnt, + ctrl_mbox->f2hq.elem_sz, + ctrl_mbox->f2hq.elem_cnt); + + return 0; + +unsupported_dev: + return -1; +} + +/** + * octep_device_cleanup() - Cleanup Octeon Device. + * + * @oct: Octeon device private data structure. + * + * Cleanup Octeon device allocated resources. + */ +static void octep_device_cleanup(struct octep_device *oct) +{ + int i; + + dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); + + for (i = 0; i < OCTEP_MAX_VF; i++) { + if (oct->mbox[i]) + vfree(oct->mbox[i]); + oct->mbox[i] = NULL; + } + + octep_ctrl_mbox_uninit(&oct->ctrl_mbox); + + oct->hw_ops.soft_reset(oct); + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { + if (oct->mmio[i].mapped) + iounmap(oct->mmio[i].hw_addr); + } + + kfree(oct->conf); + oct->conf = NULL; +} + +/** + * octep_probe() - Octeon PCI device probe handler. + * + * @pdev: PCI device structure. + * @ent: entry in Octeon PCI device ID table. + * + * Initializes and enables the Octeon PCI device for network operations. + * Initializes Octeon private data structure and registers a network device. + */ +static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct octep_device *octep_dev = NULL; + struct net_device *netdev; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return err; + } + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); + goto err_dma_mask; + } + + err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); + goto err_pci_regions; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + netdev = alloc_etherdev_mq(sizeof(struct octep_device), + OCTEP_MAX_QUEUES); + if (!netdev) { + dev_err(&pdev->dev, "Failed to allocate netdev\n"); + err = -ENOMEM; + goto err_alloc_netdev; + } + SET_NETDEV_DEV(netdev, &pdev->dev); + + octep_dev = netdev_priv(netdev); + octep_dev->netdev = netdev; + octep_dev->pdev = pdev; + octep_dev->dev = &pdev->dev; + pci_set_drvdata(pdev, octep_dev); + + err = octep_device_setup(octep_dev); + if (err) { + dev_err(&pdev->dev, "Device setup failed\n"); + goto err_octep_config; + } + INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task); + INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); + + netdev->netdev_ops = &octep_netdev_ops; + netif_carrier_off(netdev); + + netdev->hw_features = NETIF_F_SG; + netdev->features |= netdev->hw_features; + netdev->min_mtu = OCTEP_MIN_MTU; + netdev->max_mtu = OCTEP_MAX_MTU; + netdev->mtu = OCTEP_DEFAULT_MTU; + + octep_get_mac_addr(octep_dev, octep_dev->mac_addr); + eth_hw_addr_set(netdev, octep_dev->mac_addr); + + if (register_netdev(netdev)) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto register_dev_err; + } + dev_info(&pdev->dev, "Device probe successful\n"); + return 0; + +register_dev_err: + octep_device_cleanup(octep_dev); +err_octep_config: + free_netdev(netdev); +err_alloc_netdev: + pci_disable_pcie_error_reporting(pdev); + pci_release_mem_regions(pdev); +err_pci_regions: +err_dma_mask: + pci_disable_device(pdev); + return err; +} + +/** + * octep_remove() - Remove Octeon PCI device from driver control. + * + * @pdev: PCI device structure of the Octeon device. + * + * Cleanup all resources allocated for the Octeon device. + * Unregister from network device and disable the PCI device. + */ +static void octep_remove(struct pci_dev *pdev) +{ + struct octep_device *oct = pci_get_drvdata(pdev); + struct net_device *netdev; + + if (!oct) + return; + + cancel_work_sync(&oct->tx_timeout_task); + cancel_work_sync(&oct->ctrl_mbox_task); + netdev = oct->netdev; + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + octep_device_cleanup(oct); + pci_release_mem_regions(pdev); + free_netdev(netdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver octep_driver = { + .name = OCTEP_DRV_NAME, + .id_table = octep_pci_id_tbl, + .probe = octep_probe, + .remove = octep_remove, +}; + +/** + * octep_init_module() - Module initialiation. + * + * create common resource for the driver and register PCI driver. + */ +static int __init octep_init_module(void) +{ + int ret; + + pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING); + + /* work queue for all deferred tasks */ + octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); + if (!octep_wq) { + pr_err("%s: Failed to create common workqueue\n", + OCTEP_DRV_NAME); + return -ENOMEM; + } + + ret = pci_register_driver(&octep_driver); + if (ret < 0) { + pr_err("%s: Failed to register PCI driver; err=%d\n", + OCTEP_DRV_NAME, ret); + return ret; + } + + pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME); + + return ret; +} + +/** + * octep_exit_module() - Module exit routine. + * + * unregister the driver with PCI subsystem and cleanup common resources. + */ +static void __exit octep_exit_module(void) +{ + pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME); + + pci_unregister_driver(&octep_driver); + destroy_workqueue(octep_wq); + + pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME); +} + +module_init(octep_init_module); +module_exit(octep_exit_module); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h new file mode 100644 index 000000000000..4950b4418035 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_MAIN_H_ +#define _OCTEP_MAIN_H_ + +#include "octep_tx.h" +#include "octep_rx.h" +#include "octep_ctrl_mbox.h" + +#define OCTEP_DRV_VERSION_MAJOR 1 +#define OCTEP_DRV_VERSION_MINOR 0 +#define OCTEP_DRV_VERSION_VARIANT 0 + +#define OCTEP_DRV_VERSION ((OCTEP_DRV_VERSION_MAJOR << 16) + \ + (OCTEP_DRV_VERSION_MINOR << 8) + \ + OCTEP_DRV_VERSION_VARIANT) + +#define OCTEP_DRV_VERSION_STR "1.0.0" +#define OCTEP_DRV_NAME "octeon_ep" +#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver" + +#define OCTEP_PCIID_CN93_PF 0xB200177d +#define OCTEP_PCIID_CN93_VF 0xB203177d + +#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200 +#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 + +#define OCTEP_MAX_QUEUES 63 +#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES +#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES +#define OCTEP_MAX_VF 64 + +#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ + +/* Flags to disable and enable Interrupts */ +#define OCTEP_INPUT_INTR (1) +#define OCTEP_OUTPUT_INTR (2) +#define OCTEP_MBOX_INTR (4) +#define OCTEP_ALL_INTR 0xff + +#define OCTEP_IQ_INTR_RESEND_BIT 59 +#define OCTEP_OQ_INTR_RESEND_BIT 59 + +#define OCTEP_MMIO_REGIONS 3 +/* PCI address space mapping information. + * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of + * Octeon gets mapped to different physical address spaces in + * the kernel. + */ +struct octep_mmio { + /* The physical address to which the PCI address space is mapped. */ + u8 __iomem *hw_addr; + + /* Flag indicating the mapping was successful. */ + int mapped; +}; + +struct octep_pci_win_regs { + u8 __iomem *pci_win_wr_addr; + u8 __iomem *pci_win_rd_addr; + u8 __iomem *pci_win_wr_data; + u8 __iomem *pci_win_rd_data; +}; + +struct octep_hw_ops { + void (*setup_iq_regs)(struct octep_device *oct, int q); + void (*setup_oq_regs)(struct octep_device *oct, int q); + void (*setup_mbox_regs)(struct octep_device *oct, int mbox); + + irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); + irqreturn_t (*ioq_intr_handler)(void *ioq_vector); + int (*soft_reset)(struct octep_device *oct); + void (*reinit_regs)(struct octep_device *oct); + u32 (*update_iq_read_idx)(struct octep_iq *iq); + + void (*enable_interrupts)(struct octep_device *oct); + void (*disable_interrupts)(struct octep_device *oct); + + void (*enable_io_queues)(struct octep_device *oct); + void (*disable_io_queues)(struct octep_device *oct); + void (*enable_iq)(struct octep_device *oct, int q); + void (*disable_iq)(struct octep_device *oct, int q); + void (*enable_oq)(struct octep_device *oct, int q); + void (*disable_oq)(struct octep_device *oct, int q); + void (*reset_io_queues)(struct octep_device *oct); + void (*dump_registers)(struct octep_device *oct); +}; + +/* Octeon mailbox data */ +struct octep_mbox_data { + u32 cmd; + u32 total_len; + u32 recv_len; + u32 rsvd; + u64 *data; +}; + +/* Octeon device mailbox */ +struct octep_mbox { + /* A spinlock to protect access to this q_mbox. */ + spinlock_t lock; + + u32 q_no; + u32 state; + + /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ + void *mbox_int_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(1) for VF. + */ + void *mbox_write_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(0) for VF. + */ + void *mbox_read_reg; + + struct octep_mbox_data mbox_data; +}; + +/* Tx/Rx queue vector per interrupt. */ +struct octep_ioq_vector { + char name[OCTEP_MSIX_NAME_SIZE]; + struct napi_struct napi; + struct octep_device *octep_dev; + struct octep_iq *iq; + struct octep_oq *oq; + cpumask_t affinity_mask; +}; + +/* Octeon hardware/firmware offload capability flags. */ +#define OCTEP_CAP_TX_CHECKSUM BIT(0) +#define OCTEP_CAP_RX_CHECKSUM BIT(1) +#define OCTEP_CAP_TSO BIT(2) + +/* Link modes */ +enum octep_link_mode_bit_indices { + OCTEP_LINK_MODE_10GBASE_T = 0, + OCTEP_LINK_MODE_10GBASE_R, + OCTEP_LINK_MODE_10GBASE_CR, + OCTEP_LINK_MODE_10GBASE_KR, + OCTEP_LINK_MODE_10GBASE_LR, + OCTEP_LINK_MODE_10GBASE_SR, + OCTEP_LINK_MODE_25GBASE_CR, + OCTEP_LINK_MODE_25GBASE_KR, + OCTEP_LINK_MODE_25GBASE_SR, + OCTEP_LINK_MODE_40GBASE_CR4, + OCTEP_LINK_MODE_40GBASE_KR4, + OCTEP_LINK_MODE_40GBASE_LR4, + OCTEP_LINK_MODE_40GBASE_SR4, + OCTEP_LINK_MODE_50GBASE_CR2, + OCTEP_LINK_MODE_50GBASE_KR2, + OCTEP_LINK_MODE_50GBASE_SR2, + OCTEP_LINK_MODE_50GBASE_CR, + OCTEP_LINK_MODE_50GBASE_KR, + OCTEP_LINK_MODE_50GBASE_LR, + OCTEP_LINK_MODE_50GBASE_SR, + OCTEP_LINK_MODE_100GBASE_CR4, + OCTEP_LINK_MODE_100GBASE_KR4, + OCTEP_LINK_MODE_100GBASE_LR4, + OCTEP_LINK_MODE_100GBASE_SR4, + OCTEP_LINK_MODE_NBITS +}; + +/* Hardware interface link state information. */ +struct octep_iface_link_info { + /* Bitmap of Supported link speeds/modes. */ + u64 supported_modes; + + /* Bitmap of Advertised link speeds/modes. */ + u64 advertised_modes; + + /* Negotiated link speed in Mbps. */ + u32 speed; + + /* MTU */ + u16 mtu; + + /* Autonegotation state. */ +#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0) +#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1) + u8 autoneg; + + /* Pause frames setting. */ +#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0) +#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1) + u8 pause; + + /* Admin state of the link (ifconfig up/down */ + u8 admin_up; + + /* Operational state of the link: physical link is up down */ + u8 oper_up; +}; + +/* The Octeon device specific private data structure. + * Each Octeon device has this structure to represent all its components. + */ +struct octep_device { + struct octep_config *conf; + + /* Octeon Chip type. */ + u16 chip_id; + u16 rev_id; + + /* Device capabilities enabled */ + u64 caps_enabled; + /* Device capabilities supported */ + u64 caps_supported; + + /* Pointer to basic Linux device */ + struct device *dev; + /* Linux PCI device pointer */ + struct pci_dev *pdev; + /* Netdev corresponding to the Octeon device */ + struct net_device *netdev; + + /* memory mapped io range */ + struct octep_mmio mmio[OCTEP_MMIO_REGIONS]; + + /* MAC address */ + u8 mac_addr[ETH_ALEN]; + + /* Tx queues (IQ: Instruction Queue) */ + u16 num_iqs; + /* pkind value to be used in every Tx hardware descriptor */ + u8 pkind; + /* Pointers to Octeon Tx queues */ + struct octep_iq *iq[OCTEP_MAX_IQ]; + + /* Rx queues (OQ: Output Queue) */ + u16 num_oqs; + /* Pointers to Octeon Rx queues */ + struct octep_oq *oq[OCTEP_MAX_OQ]; + + /* Hardware port number of the PCIe interface */ + u16 pcie_port; + + /* PCI Window registers to access some hardware CSRs */ + struct octep_pci_win_regs pci_win_regs; + /* Hardware operations */ + struct octep_hw_ops hw_ops; + + /* IRQ info */ + u16 num_irqs; + u16 num_non_ioq_irqs; + char *non_ioq_irq_names; + struct msix_entry *msix_entries; + /* IOq information of it's corresponding MSI-X interrupt. */ + struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES]; + + /* Hardware Interface Tx statistics */ + struct octep_iface_tx_stats iface_tx_stats; + /* Hardware Interface Rx statistics */ + struct octep_iface_rx_stats iface_rx_stats; + + /* Hardware Interface Link info like supported modes, aneg support */ + struct octep_iface_link_info link_info; + + /* Mailbox to talk to VFs */ + struct octep_mbox *mbox[OCTEP_MAX_VF]; + + /* Work entry to handle Tx timeout */ + struct work_struct tx_timeout_task; + + /* control mbox over pf */ + struct octep_ctrl_mbox ctrl_mbox; + + /* offset for iface stats */ + u32 ctrl_mbox_ifstats_offset; + + /* Work entry to handle ctrl mbox interrupt */ + struct work_struct ctrl_mbox_task; + +}; + +static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct) +{ + u16 rev = (oct->rev_id & 0xC) >> 2; + + return (rev == 0) ? 1 : rev; +} + +static inline u16 OCTEP_MINOR_REV(struct octep_device *oct) +{ + return (oct->rev_id & 0x3); +} + +/* Octeon CSR read/write access APIs */ +#define octep_write_csr(octep_dev, reg_off, value) \ + writel(value, (u8 *)(octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_write_csr64(octep_dev, reg_off, val64) \ + writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_read_csr(octep_dev, reg_off) \ + readl((u8 *)(octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_read_csr64(octep_dev, reg_off) \ + readq((octep_dev)->mmio[0].hw_addr + (reg_off)) + +/* Read windowed register. + * @param oct - pointer to the Octeon device. + * @param addr - Address of the register to read. + * + * This routine is called to read from the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return - 64 bit value read from the register. + */ +static inline u64 +OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr) +{ + u64 val64; + + addr |= 1ull << 53; /* read 8 bytes */ + writeq(addr, oct->pci_win_regs.pci_win_rd_addr); + val64 = readq(oct->pci_win_regs.pci_win_rd_data); + + dev_dbg(&oct->pdev->dev, + "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64); + + return val64; +} + +/* Write windowed register. + * @param oct - pointer to the Octeon device. + * @param addr - Address of the register to write + * @param val - Value to write + * + * This routine is called to write to the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return Nothing. + */ +static inline void +OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val) +{ + writeq(addr, oct->pci_win_regs.pci_win_wr_addr); + writeq(val, oct->pci_win_regs.pci_win_wr_data); + + dev_dbg(&oct->pdev->dev, + "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val); +} + +int octep_device_setup(struct octep_device *oct); +int octep_setup_iqs(struct octep_device *oct); +void octep_free_iqs(struct octep_device *oct); +void octep_clean_iqs(struct octep_device *oct); +int octep_setup_oqs(struct octep_device *oct); +void octep_free_oqs(struct octep_device *oct); +void octep_oq_dbell_init(struct octep_device *oct); +void octep_device_setup_cn93_pf(struct octep_device *oct); +int octep_iq_process_completions(struct octep_iq *iq, u16 budget); +int octep_oq_process_rx(struct octep_oq *oq, int budget); +void octep_set_ethtool_ops(struct net_device *netdev); + +#endif /* _OCTEP_MAIN_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h new file mode 100644 index 000000000000..cc51149790ff --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h @@ -0,0 +1,367 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_REGS_CN9K_PF_H_ +#define _OCTEP_REGS_CN9K_PF_H_ + +/* ############################ RST ######################### */ +#define CN93_RST_BOOT 0x000087E006001600ULL +#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL +#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL + +#define CN93_CONFIG_XPANSION_BAR 0x38 +#define CN93_CONFIG_PCIE_CAP 0x70 +#define CN93_CONFIG_PCIE_DEVCAP 0x74 +#define CN93_CONFIG_PCIE_DEVCTL 0x78 +#define CN93_CONFIG_PCIE_LINKCAP 0x7C +#define CN93_CONFIG_PCIE_LINKCTL 0x80 +#define CN93_CONFIG_PCIE_SLOTCAP 0x84 +#define CN93_CONFIG_PCIE_SLOTCTL 0x88 + +#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */ +#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10 +#define CN93_PCIE_SRIOV_FDL_MASK 0xFF + +#define CN93_CONFIG_PCIE_FLTMSK 0x720 + +/* ################# Offsets of RING, EPF, MAC ######################### */ +#define CN93_RING_OFFSET (0x1ULL << 17) +#define CN93_EPF_OFFSET (0x1ULL << 25) +#define CN93_MAC_OFFSET (0x1ULL << 4) +#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4) +#define CN93_EPVF_RING_OFFSET (0x1ULL << 4) + +/* ################# Scratch Registers ######################### */ +#define CN93_SDP_EPF_SCRATCH 0x205E0 + +/* ################# Window Registers ######################### */ +#define CN93_SDP_WIN_WR_ADDR64 0x20000 +#define CN93_SDP_WIN_RD_ADDR64 0x20010 +#define CN93_SDP_WIN_WR_DATA64 0x20020 +#define CN93_SDP_WIN_WR_MASK_REG 0x20030 +#define CN93_SDP_WIN_RD_DATA64 0x20040 + +#define CN93_SDP_MAC_NUMBER 0x2C100 + +/* ################# Global Previliged registers ######################### */ +#define CN93_SDP_EPF_RINFO 0x205F0 + +#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF) +#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF) +#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) && 0xFF) + +/* SDP Function select */ +#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8 +#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0 + +/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */ +#define CN93_SDP_R_IN_CONTROL_START 0x10000 +#define CN93_SDP_R_IN_ENABLE_START 0x10010 +#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020 +#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030 +#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040 +#define CN93_SDP_R_IN_CNTS_START 0x10050 +#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060 +#define CN93_SDP_R_IN_PKT_CNT_START 0x10080 +#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090 + +#define CN93_SDP_R_IN_CONTROL(ring) \ + (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_ENABLE(ring) \ + (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_BADDR(ring) \ + (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \ + (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_DBELL(ring) \ + (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_CNTS(ring) \ + (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_LEVELS(ring) \ + (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_PKT_CNT(ring) \ + (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_BYTE_CNT(ring) \ + (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET)) + +/* Rings per Virtual Function */ +#define CN93_R_IN_CTL_RPVF_MASK (0xF) +#define CN93_R_IN_CTL_RPVF_POS (48) + +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + */ +#define CN93_R_IN_CTL_IDLE (0x1ULL << 28) +#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25) +#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24) +#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8) +#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6) +#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5) +#define CN93_R_IN_CTL_NSR (0x1ULL << 3) +#define CN93_R_IN_CTL_ESR (0x1ULL << 1) +#define CN93_R_IN_CTL_ROR (0x1ULL << 0) + +#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B) + +/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */ +#define CN93_SDP_R_OUT_CNTS_START 0x10100 +#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110 +#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120 +#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130 +#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140 +#define CN93_SDP_R_OUT_CONTROL_START 0x10150 +#define CN93_SDP_R_OUT_ENABLE_START 0x10160 +#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180 +#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190 + +#define CN93_SDP_R_OUT_CONTROL(ring) \ + (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_ENABLE(ring) \ + (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \ + (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \ + (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \ + (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_CNTS(ring) \ + (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_LEVELS(ring) \ + (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_PKT_CNT(ring) \ + (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_BYTE_CNT(ring) \ + (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET)) + +/*------------------ R_OUT Masks ----------------*/ +#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) +#define CN93_R_OUT_INT_LEVELS_TIMET (32) + +#define CN93_R_OUT_CTL_IDLE BIT_ULL(40) +#define CN93_R_OUT_CTL_ES_I BIT_ULL(34) +#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33) +#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32) +#define CN93_R_OUT_CTL_ES_D BIT_ULL(30) +#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29) +#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28) +#define CN93_R_OUT_CTL_ES_P BIT_ULL(26) +#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25) +#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24) +#define CN93_R_OUT_CTL_IMODE BIT_ULL(23) + +/* ############### Interrupt Moderation Registers ############### */ +#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280 +#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0 +#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0 + +#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380 +#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0 +#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0 + +#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \ + (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \ + (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \ + (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET)) + +/* ##################### Mail Box Registers ########################## */ +/* INT register for VF. when a MBOX write from PF happed to a VF, + * corresponding bit will be set in this register as well as in + * PF_VF_INT register. + * + * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT + */ +/* Basically first 3 are from PF to VF. The last one is data from VF to PF */ +#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210 +#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220 +#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230 + +#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \ + (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \ + (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \ + (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET)) + +/* ##################### Interrupt Registers ########################## */ +#define CN93_SDP_R_ERR_TYPE_START 0x10400 + +#define CN93_SDP_R_ERR_TYPE(ring) \ + (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_ISM_START 0x10500 +#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510 +#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520 + +#define CN93_SDP_R_MBOX_ISM(ring) \ + (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_CNTS_ISM(ring) \ + (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_CNTS_ISM(ring) \ + (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_EPF_MBOX_RINT_START 0x20100 +#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120 +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140 +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160 + +#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180 +#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0 +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0 +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0 + +#define CN93_SDP_EPF_IRERR_RINT 0x20200 +#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210 +#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220 +#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230 + +#define CN93_SDP_EPF_VFORE_RINT_START 0x20240 +#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260 +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280 +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0 + +#define CN93_SDP_EPF_ORERR_RINT 0x20320 +#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330 +#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340 +#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350 + +#define CN93_SDP_EPF_OEI_RINT 0x20360 +#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370 +#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380 +#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390 + +#define CN93_SDP_EPF_DMA_RINT 0x20400 +#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410 +#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420 +#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430 + +#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440 +#define CN93_SDP_EPF_DMA_CNT_START 0x20460 +#define CN93_SDP_EPF_DMA_TIM_START 0x20480 + +#define CN93_SDP_EPF_MISC_RINT 0x204A0 +#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0 +#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0 +#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0 + +#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0 +#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500 +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520 +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540 + +#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560 +#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580 +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0 +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0 + +#define CN93_SDP_EPF_MBOX_RINT(index) \ + (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \ + (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_VFIRE_RINT(index) \ + (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \ + (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_VFORE_RINT(index) \ + (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \ + (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_DMA_VF_RINT(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_PP_VF_RINT(index) \ + (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \ + (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) + +/*------------------ Interrupt Masks ----------------*/ +#define CN93_INTR_R_SEND_ISM BIT_ULL(63) +#define CN93_INTR_R_OUT_INT BIT_ULL(62) +#define CN93_INTR_R_IN_INT BIT_ULL(61) +#define CN93_INTR_R_MBOX_INT BIT_ULL(60) +#define CN93_INTR_R_RESEND BIT_ULL(59) +#define CN93_INTR_R_CLR_TIM BIT_ULL(58) + +/* ####################### Ring Mapping Registers ################################## */ +#define CN93_SDP_EPVF_RING_START 0x26000 +#define CN93_SDP_IN_RING_TB_MAP_START 0x28000 +#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000 +#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000 + +#define CN93_SDP_EPVF_RING(ring) \ + (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_IN_RING_TB_MAP(ring) \ + (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_IN_RATE_LIMIT(ring) \ + (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_MAC_PF_RING_CTL(mac) \ + (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET)) + +#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF) +#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF) +#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F) + +/* Number of non-queue interrupts in CN93xx */ +#define CN93_NUM_NON_IOQ_INTR 16 +#endif /* _OCTEP_REGS_CN9K_PF_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c new file mode 100644 index 000000000000..4fa67f305cc8 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include +#include + +#include "octep_config.h" +#include "octep_main.h" + +/** + * octep_setup_oqs() - setup resources for all Rx queues. + * + * @oct: Octeon device private data structure. + */ +int octep_setup_oqs(struct octep_device *oct) +{ + return -1; +} + +/** + * octep_oq_dbell_init() - Initialize Rx queue doorbell. + * + * @oct: Octeon device private data structure. + * + * Write number of descriptors to Rx queue doorbell register. + */ +void octep_oq_dbell_init(struct octep_device *oct) +{ +} + +/** + * octep_free_oqs() - Free resources of all Rx queues. + * + * @oct: Octeon device private data structure. + */ +void octep_free_oqs(struct octep_device *oct) +{ +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h new file mode 100644 index 000000000000..782a24f27f3e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_RX_H_ +#define _OCTEP_RX_H_ + +/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format. + * + * The descriptor ring is made of descriptors which have 2 64-bit values: + * + * @buffer_ptr: DMA address of the skb->data + * @info_ptr: DMA address of host memory, used to update pkt count by hw. + * This is currently unused to save pci writes. + */ +struct octep_oq_desc_hw { + dma_addr_t buffer_ptr; + u64 info_ptr; +}; + +#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw)) + +#define OCTEP_CSUM_L4_VERIFIED 0x1 +#define OCTEP_CSUM_IP_VERIFIED 0x2 +#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED) + +/* Extended Response Header in packet data received from Hardware. + * Includes metadata like checksum status. + * this is valid only if hardware/firmware published support for this. + * This is at offset 0 of packet data (skb->data). + */ +struct octep_oq_resp_hw_ext { + /* Reserved. */ + u64 reserved:62; + + /* checksum verified. */ + u64 csum_verified:2; +}; + +#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext)) + +/* Length of Rx packet DMA'ed by Octeon to Host. + * this is in bigendian; so need to be converted to cpu endian. + * Octeon writes this at the beginning of Rx buffer (skb->data). + */ +struct octep_oq_resp_hw { + /* The Length of the packet. */ + __be64 length; +}; + +#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw)) + +/* Pointer to data buffer. + * Driver keeps a pointer to the data buffer that it made available to + * the Octeon device. Since the descriptor ring keeps physical (bus) + * addresses, this field is required for the driver to keep track of + * the virtual address pointers. The fields are operated by + * OS-dependent routines. + */ +struct octep_rx_buffer { + struct page *page; + + /* length from rx hardware descriptor after converting to cpu endian */ + u64 len; +}; + +#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer)) + +/* Output Queue statistics. Each output queue has four stats fields. */ +struct octep_oq_stats { + /* Number of packets received from the Device. */ + u64 packets; + + /* Number of bytes received from the Device. */ + u64 bytes; + + /* Number of times failed to allocate buffers. */ + u64 alloc_failures; +}; + +#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats)) + +/* Hardware interface Rx statistics */ +struct octep_iface_rx_stats { + /* Received packets */ + u64 pkts; + + /* Octets of received packets */ + u64 octets; + + /* Received PAUSE and Control packets */ + u64 pause_pkts; + + /* Received PAUSE and Control octets */ + u64 pause_octets; + + /* Filtered DMAC0 packets */ + u64 dmac0_pkts; + + /* Filtered DMAC0 octets */ + u64 dmac0_octets; + + /* Packets dropped due to RX FIFO full */ + u64 dropped_pkts_fifo_full; + + /* Octets dropped due to RX FIFO full */ + u64 dropped_octets_fifo_full; + + /* Error packets */ + u64 err_pkts; + + /* Filtered DMAC1 packets */ + u64 dmac1_pkts; + + /* Filtered DMAC1 octets */ + u64 dmac1_octets; + + /* NCSI-bound packets dropped */ + u64 ncsi_dropped_pkts; + + /* NCSI-bound octets dropped */ + u64 ncsi_dropped_octets; + + /* Multicast packets received. */ + u64 mcast_pkts; + + /* Broadcast packets received. */ + u64 bcast_pkts; + +}; + +/* The Descriptor Ring Output Queue structure. + * This structure has all the information required to implement a + * Octeon OQ. + */ +struct octep_oq { + u32 q_no; + + struct octep_device *octep_dev; + struct net_device *netdev; + struct device *dev; + + struct napi_struct *napi; + + /* The receive buffer list. This list has the virtual addresses + * of the buffers. + */ + struct octep_rx_buffer *buff_info; + + /* Pointer to the mapped packet credit register. + * Host writes number of info/buffer ptrs available to this register + */ + u8 __iomem *pkts_credit_reg; + + /* Pointer to the mapped packet sent register. + * Octeon writes the number of packets DMA'ed to host memory + * in this register. + */ + u8 __iomem *pkts_sent_reg; + + /* Statistics for this OQ. */ + struct octep_oq_stats stats; + + /* Packets pending to be processed */ + u32 pkts_pending; + u32 last_pkt_count; + + /* Index in the ring where the driver should read the next packet */ + u32 host_read_idx; + + /* Number of descriptors in this ring. */ + u32 max_count; + u32 ring_size_mask; + + /* The number of descriptors pending refill. */ + u32 refill_count; + + /* Index in the ring where the driver will refill the + * descriptor's buffer + */ + u32 host_refill_idx; + u32 refill_threshold; + + /* The size of each buffer pointed by the buffer pointer. */ + u32 buffer_size; + u32 max_single_buffer_size; + + /* The 8B aligned descriptor ring starts at this address. */ + struct octep_oq_desc_hw *desc_ring; + + /* DMA mapped address of the OQ descriptor ring. */ + dma_addr_t desc_ring_dma; +}; + +#define OCTEP_OQ_SIZE (sizeof(struct octep_oq)) +#endif /* _OCTEP_RX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c new file mode 100644 index 000000000000..f0e1cc142b2e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include +#include + +#include "octep_config.h" +#include "octep_main.h" + +/** + * octep_clean_iqs() - Clean Tx queues to shutdown the device. + * + * @oct: Octeon device private data structure. + * + * Free the buffers in Tx queue descriptors pending completion and + * reset queue indices + */ +void octep_clean_iqs(struct octep_device *oct) +{ +} + +/** + * octep_setup_iqs() - setup resources for all Tx queues. + * + * @oct: Octeon device private data structure. + */ +int octep_setup_iqs(struct octep_device *oct) +{ + return -1; +} + +/** + * octep_free_iqs() - Free resources of all Tx queues. + * + * @oct: Octeon device private data structure. + */ +void octep_free_iqs(struct octep_device *oct) +{ +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h new file mode 100644 index 000000000000..2ef57980eb47 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_TX_H_ +#define _OCTEP_TX_H_ + +#define IQ_SEND_OK 0 +#define IQ_SEND_STOP 1 +#define IQ_SEND_FAILED -1 + +#define TX_BUFTYPE_NONE 0 +#define TX_BUFTYPE_NET 1 +#define TX_BUFTYPE_NET_SG 2 +#define NUM_TX_BUFTYPES 3 + +/* Hardware format for Scatter/Gather list */ +struct octep_tx_sglist_desc { + u16 len[4]; + dma_addr_t dma_ptr[4]; +}; + +/* Each Scatter/Gather entry sent to hardwar hold four pointers. + * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' + * is for main skb which also goes as a gather buffer to Octeon hardware. + * To allocate sufficient SGLIST entries for a packet with max fragments, + * align by adding 3 before calcuating max SGLIST entries per packet. + */ +#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4) +#define OCTEP_SGLIST_SIZE_PER_PKT \ + (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc)) + +struct octep_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct octep_tx_sglist_desc *sglist; + dma_addr_t sglist_dma; + u8 gather; +}; + +#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer)) + +/* Hardware interface Tx statistics */ +struct octep_iface_tx_stats { + /* Packets dropped due to excessive collisions */ + u64 xscol; + + /* Packets dropped due to excessive deferral */ + u64 xsdef; + + /* Packets sent that experienced multiple collisions before successful + * transmission + */ + u64 mcol; + + /* Packets sent that experienced a single collision before successful + * transmission + */ + u64 scol; + + /* Total octets sent on the interface */ + u64 octs; + + /* Total frames sent on the interface */ + u64 pkts; + + /* Packets sent with an octet count < 64 */ + u64 hist_lt64; + + /* Packets sent with an octet count == 64 */ + u64 hist_eq64; + + /* Packets sent with an octet count of 65–127 */ + u64 hist_65to127; + + /* Packets sent with an octet count of 128–255 */ + u64 hist_128to255; + + /* Packets sent with an octet count of 256–511 */ + u64 hist_256to511; + + /* Packets sent with an octet count of 512–1023 */ + u64 hist_512to1023; + + /* Packets sent with an octet count of 1024-1518 */ + u64 hist_1024to1518; + + /* Packets sent with an octet count of > 1518 */ + u64 hist_gt1518; + + /* Packets sent to a broadcast DMAC */ + u64 bcst; + + /* Packets sent to the multicast DMAC */ + u64 mcst; + + /* Packets sent that experienced a transmit underflow and were + * truncated + */ + u64 undflw; + + /* Control/PAUSE packets sent */ + u64 ctl; +}; + +/* Input Queue statistics. Each input queue has four stats fields. */ +struct octep_iq_stats { + /* Instructions posted to this queue. */ + u64 instr_posted; + + /* Instructions copied by hardware for processing. */ + u64 instr_completed; + + /* Instructions that could not be processed. */ + u64 instr_dropped; + + /* Bytes sent through this queue. */ + u64 bytes_sent; + + /* Gather entries sent through this queue. */ + u64 sgentry_sent; + + /* Number of transmit failures due to TX_BUSY */ + u64 tx_busy; + + /* Number of times the queue is restarted */ + u64 restart_cnt; +}; + +/* The instruction (input) queue. + * The input queue is used to post raw (instruction) mode data or packet + * data to Octeon device from the host. Each input queue (up to 4) for + * a Octeon device has one such structure to represent it. + */ +struct octep_iq { + u32 q_no; + + struct octep_device *octep_dev; + struct net_device *netdev; + struct device *dev; + struct netdev_queue *netdev_q; + + /* Index in input ring where driver should write the next packet */ + u16 host_write_index; + + /* Index in input ring where Octeon is expected to read next packet */ + u16 octep_read_index; + + /* This index aids in finding the window in the queue where Octeon + * has read the commands. + */ + u16 flush_index; + + /* Statistics for this input queue. */ + struct octep_iq_stats stats; + + /* This field keeps track of the instructions pending in this queue. */ + atomic_t instr_pending; + + /* Pointer to the Virtual Base addr of the input ring. */ + struct octep_tx_desc_hw *desc_ring; + + /* DMA mapped base address of the input descriptor ring. */ + dma_addr_t desc_ring_dma; + + /* Info of Tx buffers pending completion. */ + struct octep_tx_buffer *buff_info; + + /* Base pointer to Scatter/Gather lists for all ring descriptors. */ + struct octep_tx_sglist_desc *sglist; + + /* DMA mapped addr of Scatter Gather Lists */ + dma_addr_t sglist_dma; + + /* Octeon doorbell register for the ring. */ + u8 __iomem *doorbell_reg; + + /* Octeon instruction count register for this ring. */ + u8 __iomem *inst_cnt_reg; + + /* interrupt level register for this ring */ + u8 __iomem *intr_lvl_reg; + + /* Maximum no. of instructions in this queue. */ + u32 max_count; + u32 ring_size_mask; + + u32 pkt_in_done; + u32 pkts_processed; + + u32 status; + + /* Number of instructions pending to be posted to Octeon. */ + u32 fill_cnt; + + /* The max. number of instructions that can be held pending by the + * driver before ringing doorbell. + */ + u32 fill_threshold; +}; + +/* Hardware Tx Instruction Header */ +struct octep_instr_hdr { + /* Data Len */ + u64 tlen:16; + + /* Reserved */ + u64 rsvd:20; + + /* PKIND for SDP */ + u64 pkind:6; + + /* Front Data size */ + u64 fsz:6; + + /* No. of entries in gather list */ + u64 gsz:14; + + /* Gather indicator 1=gather*/ + u64 gather:1; + + /* Reserved3 */ + u64 reserved3:1; +}; + +/* Hardware Tx completion response header */ +struct octep_instr_resp_hdr { + /* Request ID */ + u64 rid:16; + + /* PCIe port to use for response */ + u64 pcie_port:3; + + /* Scatter indicator 1=scatter */ + u64 scatter:1; + + /* Size of Expected result OR no. of entries in scatter list */ + u64 rlenssz:14; + + /* Desired destination port for result */ + u64 dport:6; + + /* Opcode Specific parameters */ + u64 param:8; + + /* Opcode for the return packet */ + u64 opcode:16; +}; + +/* 64-byte Tx instruction format. + * Format of instruction for a 64-byte mode input queue. + * + * only first 16-bytes (dptr and ih) are mandatory; rest are optional + * and filled by the driver based on firmware/hardware capabilities. + * These optional headers together called Front Data and its size is + * described by ih->fsz. + */ +struct octep_tx_desc_hw { + /* Pointer where the input data is available. */ + u64 dptr; + + /* Instruction Header. */ + union { + struct octep_instr_hdr ih; + u64 ih64; + }; + + /* Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + + /* Input Instruction Response Header. */ + struct octep_instr_resp_hdr irh; + + /* Additional headers available in a 64-byte instruction. */ + u64 exhdr[4]; +}; + +#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw)) +#endif /* _OCTEP_TX_H_ */ -- cgit v1.2.3 From 1f2c2d0cee023ca93299c322e3393af8be234ef8 Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Tue, 12 Apr 2022 20:34:58 -0700 Subject: octeon_ep: add hardware configuration APIs Implement hardware resource init and shutdown helper APIs. This includes hardware Tx/Rx queue init/enable/disable/reset, non queue interrupt handler that decodes non-queue interrupt type. Signed-off-by: Veerasenareddy Burru Signed-off-by: Abhijit Ayarekar Signed-off-by: Satananda Burla Signed-off-by: David S. Miller --- .../net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c | 498 ++++++++++++++++++++- .../net/ethernet/marvell/octeon_ep/octep_main.c | 2 +- .../net/ethernet/marvell/octeon_ep/octep_main.h | 12 +- 3 files changed, 505 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c index a38b52788619..1e47143c596d 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c @@ -33,19 +33,164 @@ static char *cn93_non_ioq_msix_names[] = { "epf_rsvd", }; +/* Dump useful hardware CSRs for debug purpose */ +static void cn93_dump_regs(struct octep_device *oct, int qno) +{ + struct device *dev = &oct->pdev->dev; + + dev_info(dev, "IQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_DBELL(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_CONTROL(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_ENABLE(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_BADDR(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_RSIZE(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno))); + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_CNTS(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno))); + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INT_LEVELS(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_PKT_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno))); + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_BYTE_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno))); + + dev_info(dev, "OQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_DBELL(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno))); + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_CONTROL(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno))); + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_ENABLE(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_BADDR(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno))); + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_CNTS(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno))); + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_INT_LEVELS(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_PKT_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno))); + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_BYTE_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno))); + dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_ERR_TYPE(qno), + octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno))); +} + +/* Reset Hardware Tx queue */ +static int cn93_reset_iq(struct octep_device *oct, int q_no) +{ + struct octep_config *conf = oct->conf; + u64 val = 0ULL; + + dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); + + /* Get absolute queue number */ + q_no += conf->pf_ring_cfg.srn; + + /* Disable the Tx/Instruction Ring */ + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val); + + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ + octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val); + + val = 0xFFFFFFFF; + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val); + + return 0; +} + +/* Reset Hardware Rx queue */ +static void cn93_reset_oq(struct octep_device *oct, int q_no) +{ + u64 val = 0ULL; + + q_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + /* Disable Output (Rx) Ring */ + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val); + + /* Clear count CSRs */ + val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no)); + octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val); + + octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); +} + /* Reset all hardware Tx/Rx queues */ static void octep_reset_io_queues_cn93_pf(struct octep_device *oct) { + struct pci_dev *pdev = oct->pdev; + int q; + + dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n"); + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + cn93_reset_iq(oct, q); + cn93_reset_oq(oct, q); + } } /* Initialize windowed addresses to access some hardware registers */ static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct) { + u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; + + oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64); + oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64); + oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64); + oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64); } /* Configure Hardware mapping: inform hardware which rings belong to PF. */ static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct) { + struct octep_config *conf = oct->conf; + struct pci_dev *pdev = oct->pdev; + u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf); + int q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) { + u64 regval = 0; + + if (oct->pcie_port) + regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS; + + octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval); + + regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q)); + dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n", + CN93_SDP_EPVF_RING(pf_srn + q), regval); + } } /* Initialize configuration limits and initial active config 93xx PF. */ @@ -95,27 +240,265 @@ static void octep_init_config_cn93_pf(struct octep_device *oct) /* Setup registers for a hardware Tx Queue */ static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no) { + struct octep_iq *iq = oct->iq[iq_no]; + u32 reset_instr_cnt; + u64 reg_val; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CN93_R_IN_CTL_IDLE)) { + do { + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); + } while (!(reg_val & CN93_R_IN_CTL_IDLE)); + } + + reg_val |= CN93_R_IN_CTL_RDSIZE; + reg_val |= CN93_R_IN_CTL_IS_64B; + reg_val |= CN93_R_IN_CTL_ESR; + octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val); + + /* Write the start of the input queue's ring and its size */ + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no), + iq->desc_ring_dma); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no), + iq->max_count); + + /* Remember the doorbell & instruction count register addr + * for this queue + */ + iq->doorbell_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_INSTR_DBELL(iq_no); + iq->inst_cnt_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_CNTS(iq_no); + iq->intr_lvl_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_INT_LEVELS(iq_no); + + /* Store the current instruction counter (used in flush_iq calculation) */ + reset_instr_cnt = readl(iq->inst_cnt_reg); + writel(reset_instr_cnt, iq->inst_cnt_reg); + + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); } /* Setup registers for a hardware Rx Queue */ static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no) { + u64 reg_val; + u64 oq_ctl = 0ULL; + u32 time_threshold = 0; + struct octep_oq *oq = oct->oq[oq_no]; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CN93_R_OUT_CTL_IDLE)) { + do { + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + } while (!(reg_val & CN93_R_OUT_CTL_IDLE)); + } + + reg_val &= ~(CN93_R_OUT_CTL_IMODE); + reg_val &= ~(CN93_R_OUT_CTL_ROR_P); + reg_val &= ~(CN93_R_OUT_CTL_NSR_P); + reg_val &= ~(CN93_R_OUT_CTL_ROR_I); + reg_val &= ~(CN93_R_OUT_CTL_NSR_I); + reg_val &= ~(CN93_R_OUT_CTL_ES_I); + reg_val &= ~(CN93_R_OUT_CTL_ROR_D); + reg_val &= ~(CN93_R_OUT_CTL_NSR_D); + reg_val &= ~(CN93_R_OUT_CTL_ES_D); + reg_val |= (CN93_R_OUT_CTL_ES_P); + + octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no), + oq->desc_ring_dma); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no), + oq->max_count); + + oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0) + oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0) + octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no); + oq->pkts_credit_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_OUT_SLIST_DBELL(oq_no); + + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); + reg_val = ((u64)time_threshold << 32) | + CFG_GET_OQ_INTR_PKT(oct->conf); + octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); } /* Setup registers for a PF mailbox */ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) { + struct octep_mbox *mbox = oct->mbox[q_no]; + + mbox->q_no = q_no; + + /* PF mbox interrupt reg */ + mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0); + + /* PF to VF DATA reg. PF writes into this reg */ + mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no); + + /* VF to PF DATA reg. PF reads from this reg */ + mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no); +} + +/* Mailbox Interrupt handler */ +static void cn93_handle_pf_mbox_intr(struct octep_device *oct) +{ + u64 mbox_int_val = 0ULL, val = 0ULL, qno = 0ULL; + + mbox_int_val = readq(oct->mbox[0]->mbox_int_reg); + for (qno = 0; qno < OCTEP_MAX_VF; qno++) { + val = readq(oct->mbox[qno]->mbox_read_reg); + dev_dbg(&oct->pdev->dev, + "PF MBOX READ: val:%llx from VF:%llx\n", val, qno); + } + + writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg); } /* Interrupts handler for all non-queue generic interrupts. */ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) { + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; + int i = 0; + + /* Check for IRERR INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "received IRERR_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + reg_val = octep_read_csr64(oct, + CN93_SDP_R_ERR_TYPE(i)); + if (reg_val) { + dev_info(&pdev->dev, + "Received err type on IQ-%d: 0x%llx\n", + i, reg_val); + octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), + reg_val); + } + } + goto irq_handled; + } + + /* Check for ORERR INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "Received ORERR_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val); + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i)); + if (reg_val) { + dev_info(&pdev->dev, + "Received err type on OQ-%d: 0x%llx\n", + i, reg_val); + octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), + reg_val); + } + } + + goto irq_handled; + } + + /* Check for VFIRE INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received VFIRE_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for VFORE INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received VFORE_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for MBOX INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MBOX_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received MBOX_RINT intr: 0x%llx\n", reg_val); + cn93_handle_pf_mbox_intr(oct); + goto irq_handled; + } + + /* Check for OEI INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "Received OEI_EINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg_val); + queue_work(octep_wq, &oct->ctrl_mbox_task); + goto irq_handled; + } + + /* Check for DMA INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT); + if (reg_val) { + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val); + goto irq_handled; + } + + /* Check for DMA VF INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for PPVF INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received PP_VF_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for MISC INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "Received MISC_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val); + goto irq_handled; + } + + dev_info(&pdev->dev, "Reserved inerrupts raised; Ignore\n"); +irq_handled: return IRQ_HANDLED; } /* Tx/Rx queue interrupt handler */ static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data) { + struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data; + struct octep_oq *oq = vector->oq; + + napi_schedule_irqoff(oq->napi); return IRQ_HANDLED; } @@ -139,57 +522,170 @@ static int octep_soft_reset_cn93_pf(struct octep_device *oct) /* Re-initialize Octeon hardware registers */ static void octep_reinit_regs_cn93_pf(struct octep_device *oct) { + u32 i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_iq_regs(oct, i); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_oq_regs(oct, i); + + oct->hw_ops.enable_interrupts(oct); + oct->hw_ops.enable_io_queues(oct); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); } /* Enable all interrupts */ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) { + u64 intr_mask = 0ULL; + int srn, num_rings, i; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (i = 0; i < num_rings; i++) + intr_mask |= (0x1ULL << (srn + i)); + + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); } /* Disable all interrupts */ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) { + u64 intr_mask = 0ULL; + int srn, num_rings, i; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (i = 0; i < num_rings; i++) + intr_mask |= (0x1ULL << (srn + i)); + + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); } /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq) { - return 0; + u32 pkt_in_done = readl(iq->inst_cnt_reg); + u32 last_done, new_idx; + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + new_idx = (iq->octep_read_index + last_done) % iq->max_count; + + return new_idx; } /* Enable a hardware Tx Queue */ static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no) { + u64 loop = HZ; + u64 reg_val; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); + + while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) && + loop--) { + schedule_timeout_interruptible(1); + } + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no)); + reg_val |= (0x1ULL << 62); + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); + reg_val |= 0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); } /* Enable a hardware Rx Queue */ static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no) { + u64 reg_val = 0ULL; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no)); + reg_val |= (0x1ULL << 62); + octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); + + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); + reg_val |= 0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); } /* Enable all hardware Tx/Rx Queues assined to PF */ static void octep_enable_io_queues_cn93_pf(struct octep_device *oct) { + u8 q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_enable_iq_cn93_pf(oct, q); + octep_enable_oq_cn93_pf(oct, q); + } } /* Disable a hardware Tx Queue assined to PF */ static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no) { + u64 reg_val = 0ULL; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); + reg_val &= ~0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); } /* Disable a hardware Rx Queue assined to PF */ static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no) { + u64 reg_val = 0ULL; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); + reg_val &= ~0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); } /* Disable all hardware Tx/Rx Queues assined to PF */ static void octep_disable_io_queues_cn93_pf(struct octep_device *oct) { + int q = 0; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_disable_iq_cn93_pf(oct, q); + octep_disable_oq_cn93_pf(oct, q); + } } /* Dump hardware registers (including Tx/Rx queues) for debugging. */ static void octep_dump_registers_cn93_pf(struct octep_device *oct) { + u8 srn, num_rings, q; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (q = srn; q < srn + num_rings; q++) + cn93_dump_regs(oct, q); } /** diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 3b55510b2550..8b3dc50d08fd 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -18,7 +18,7 @@ #include "octep_main.h" #include "octep_ctrl_net.h" -static struct workqueue_struct *octep_wq; +struct workqueue_struct *octep_wq; /* Supported Devices */ static const struct pci_device_id octep_pci_id_tbl[] = { diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h index 4950b4418035..520f2c3664f9 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -109,17 +109,17 @@ struct octep_mbox { u32 state; /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ - void *mbox_int_reg; + u8 __iomem *mbox_int_reg; /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF, * SLI_PKT_PF_VF_MBOX_SIG(1) for VF. */ - void *mbox_write_reg; + u8 __iomem *mbox_write_reg; /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF, * SLI_PKT_PF_VF_MBOX_SIG(0) for VF. */ - void *mbox_read_reg; + u8 __iomem *mbox_read_reg; struct octep_mbox_data mbox_data; }; @@ -294,13 +294,13 @@ static inline u16 OCTEP_MINOR_REV(struct octep_device *oct) /* Octeon CSR read/write access APIs */ #define octep_write_csr(octep_dev, reg_off, value) \ - writel(value, (u8 *)(octep_dev)->mmio[0].hw_addr + (reg_off)) + writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off)) #define octep_write_csr64(octep_dev, reg_off, val64) \ writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off)) #define octep_read_csr(octep_dev, reg_off) \ - readl((u8 *)(octep_dev)->mmio[0].hw_addr + (reg_off)) + readl((octep_dev)->mmio[0].hw_addr + (reg_off)) #define octep_read_csr64(octep_dev, reg_off) \ readq((octep_dev)->mmio[0].hw_addr + (reg_off)) @@ -349,6 +349,8 @@ OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val) "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val); } +extern struct workqueue_struct *octep_wq; + int octep_device_setup(struct octep_device *oct); int octep_setup_iqs(struct octep_device *oct); void octep_free_iqs(struct octep_device *oct); -- cgit v1.2.3 From 4ca2fbdd0bb630948d52b9b727dde58783d21e22 Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Tue, 12 Apr 2022 20:34:59 -0700 Subject: octeon_ep: Add mailbox for control commands Add mailbox between host and NIC to send control commands from host to NIC and receive responses and notifications from NIC to host driver, like link status update. Signed-off-by: Veerasenareddy Burru Signed-off-by: Abhijit Ayarekar Signed-off-by: Satananda Burla Signed-off-by: David S. Miller --- .../ethernet/marvell/octeon_ep/octep_ctrl_mbox.c | 198 ++++++++++++++++++++- .../ethernet/marvell/octeon_ep/octep_ctrl_mbox.h | 6 +- .../ethernet/marvell/octeon_ep/octep_ctrl_net.c | 100 ++++++++++- 3 files changed, 294 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c index 5479f64d5bfc..8c196dadfad0 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c @@ -48,17 +48,209 @@ #define OCTEP_CTRL_MBOX_Q_OFFSET(m, i) ((m) + \ (sizeof(struct octep_ctrl_mbox_msg) * (i))) +static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 mask) +{ + return (index + 1) & mask; +} + +static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 mask) +{ + return mask - ((pi - ci) & mask); +} + +static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask) +{ + return ((pi - ci) & mask); +} + int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) { - return -EINVAL; + u64 version, magic_num, status; + + if (!mbox) + return -EINVAL; + + if (!mbox->barmem) { + pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem); + return -EINVAL; + } + + magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(mbox->barmem)); + if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) { + pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num); + return -EINVAL; + } + + version = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION_OFFSET(mbox->barmem)); + if (version != OCTEP_DRV_VERSION) { + pr_info("octep_ctrl_mbox : Firmware version mismatch %llx != %x\n", + version, OCTEP_DRV_VERSION); + return -EINVAL; + } + + status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem)); + if (status != OCTEP_CTRL_MBOX_STATUS_READY) { + pr_info("octep_ctrl_mbox : Firmware is not ready.\n"); + return -EINVAL; + } + + mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem)); + + writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(mbox->barmem)); + writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); + + mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem)); + mbox->h2fq.elem_sz = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_SZ_OFFSET(mbox->barmem)); + mbox->h2fq.mask = (mbox->h2fq.elem_cnt - 1); + mutex_init(&mbox->h2fq_lock); + + mbox->f2hq.elem_cnt = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_CNT_OFFSET(mbox->barmem)); + mbox->f2hq.elem_sz = readl(OCTEP_CTRL_MBOX_F2HQ_ELEM_SZ_OFFSET(mbox->barmem)); + mbox->f2hq.mask = (mbox->f2hq.elem_cnt - 1); + mutex_init(&mbox->f2hq_lock); + + mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD_OFFSET(mbox->barmem); + mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS_OFFSET(mbox->barmem); + mbox->h2fq.hw_q = mbox->barmem + + OCTEP_CTRL_MBOX_INFO_SZ + + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + + OCTEP_CTRL_MBOX_F2HQ_INFO_SZ; + + mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD_OFFSET(mbox->barmem); + mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS_OFFSET(mbox->barmem); + mbox->f2hq.hw_q = mbox->h2fq.hw_q + + ((mbox->h2fq.elem_sz + sizeof(union octep_ctrl_mbox_msg_hdr)) * + mbox->h2fq.elem_cnt); + + /* ensure ready state is seen after everything is initialized */ + wmb(); + writeq(OCTEP_CTRL_MBOX_STATUS_READY, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); + + pr_info("Octep ctrl mbox : Init successful.\n"); + + return 0; +} + +int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) +{ + unsigned long timeout = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS); + unsigned long period = msecs_to_jiffies(OCTEP_CTRL_MBOX_MSG_WAIT_MS); + struct octep_ctrl_mbox_q *q; + unsigned long expire; + u64 *mbuf, *word0; + u8 __iomem *qidx; + u16 pi, ci; + int i; + + if (!mbox || !msg) + return -EINVAL; + + q = &mbox->h2fq; + pi = readl(q->hw_prod); + ci = readl(q->hw_cons); + + if (!octep_ctrl_mbox_circq_space(pi, ci, q->mask)) + return -ENOMEM; + + qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, pi); + mbuf = (u64 *)msg->msg; + word0 = &msg->hdr.word0; + + mutex_lock(&mbox->h2fq_lock); + for (i = 1; i <= msg->hdr.sizew; i++) + writeq(*mbuf++, (qidx + (i * 8))); + + writeq(*word0, qidx); + + pi = octep_ctrl_mbox_circq_inc(pi, q->mask); + writel(pi, q->hw_prod); + mutex_unlock(&mbox->h2fq_lock); + + /* don't check for notification response */ + if (msg->hdr.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY) + return 0; + + expire = jiffies + timeout; + while (true) { + *word0 = readq(qidx); + if (msg->hdr.flags == OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP) + break; + schedule_timeout_interruptible(period); + if (signal_pending(current) || time_after(jiffies, expire)) { + pr_info("octep_ctrl_mbox: Timed out\n"); + return -EBUSY; + } + } + mbuf = (u64 *)msg->msg; + for (i = 1; i <= msg->hdr.sizew; i++) + *mbuf++ = readq(qidx + (i * 8)); + + return 0; } int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) { - return -EINVAL; + struct octep_ctrl_mbox_q *q; + u32 count, pi, ci; + u8 __iomem *qidx; + u64 *mbuf; + int i; + + if (!mbox || !msg) + return -EINVAL; + + q = &mbox->f2hq; + pi = readl(q->hw_prod); + ci = readl(q->hw_cons); + count = octep_ctrl_mbox_circq_depth(pi, ci, q->mask); + if (!count) + return -EAGAIN; + + qidx = OCTEP_CTRL_MBOX_Q_OFFSET(q->hw_q, ci); + mbuf = (u64 *)msg->msg; + + mutex_lock(&mbox->f2hq_lock); + + msg->hdr.word0 = readq(qidx); + for (i = 1; i <= msg->hdr.sizew; i++) + *mbuf++ = readq(qidx + (i * 8)); + + ci = octep_ctrl_mbox_circq_inc(ci, q->mask); + writel(ci, q->hw_cons); + + mutex_unlock(&mbox->f2hq_lock); + + if (msg->hdr.flags != OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ || !mbox->process_req) + return 0; + + mbox->process_req(mbox->user_ctx, msg); + mbuf = (u64 *)msg->msg; + for (i = 1; i <= msg->hdr.sizew; i++) + writeq(*mbuf++, (qidx + (i * 8))); + + writeq(msg->hdr.word0, qidx); + + return 0; } int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox) { - return -EINVAL; + if (!mbox) + return -EINVAL; + + writeq(OCTEP_CTRL_MBOX_STATUS_UNINIT, + OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); + /* ensure uninit state is written before uninitialization */ + wmb(); + + mutex_destroy(&mbox->h2fq_lock); + mutex_destroy(&mbox->f2hq_lock); + + writeq(OCTEP_CTRL_MBOX_STATUS_INVALID, + OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); + writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(mbox->barmem)); + + pr_info("Octep ctrl mbox : Uninit successful.\n"); + + return 0; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h index d5ad58c6bbaa..30f497f0bc26 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h @@ -107,11 +107,11 @@ struct octep_ctrl_mbox_q { /* q mask */ u16 mask; /* producer address in bar mem */ - void __iomem *hw_prod; + u8 __iomem *hw_prod; /* consumer address in bar mem */ - void __iomem *hw_cons; + u8 __iomem *hw_cons; /* q base address in bar mem */ - void __iomem *hw_q; + u8 __iomem *hw_q; }; struct octep_ctrl_mbox { diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c index 021f888d8f6d..c3aca7b2775b 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -15,28 +15,120 @@ int octep_get_link_status(struct octep_device *oct) { - return 0; + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_net_h2f_resp *resp; + struct octep_ctrl_mbox_msg msg = {}; + int err; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; + req.link.cmd = OCTEP_CTRL_NET_CMD_GET; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; + msg.msg = &req; + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); + if (err) + return err; + + resp = (struct octep_ctrl_net_h2f_resp *)&req; + return resp->link.state; } void octep_set_link_status(struct octep_device *oct, bool up) { + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; + req.link.cmd = OCTEP_CTRL_NET_CMD_SET; + req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; + msg.msg = &req; + octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); } void octep_set_rx_state(struct octep_device *oct, bool up) { + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE; + req.link.cmd = OCTEP_CTRL_NET_CMD_SET; + req.link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : OCTEP_CTRL_NET_STATE_DOWN; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_STATE_REQ_SZW; + msg.msg = &req; + octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); } int octep_get_mac_addr(struct octep_device *oct, u8 *addr) { - return -1; + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_net_h2f_resp *resp; + struct octep_ctrl_mbox_msg msg = {}; + int err; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; + req.link.cmd = OCTEP_CTRL_NET_CMD_GET; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW; + msg.msg = &req; + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); + if (err) + return err; + + resp = (struct octep_ctrl_net_h2f_resp *)&req; + memcpy(addr, resp->mac.addr, ETH_ALEN); + + return err; } int octep_get_link_info(struct octep_device *oct) { - return -1; + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_net_h2f_resp *resp; + struct octep_ctrl_mbox_msg msg = {}; + int err; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; + req.mac.cmd = OCTEP_CTRL_NET_CMD_GET; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW; + msg.msg = &req; + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); + if (err) + return err; + + resp = (struct octep_ctrl_net_h2f_resp *)&req; + oct->link_info.supported_modes = resp->link_info.supported_modes; + oct->link_info.advertised_modes = resp->link_info.advertised_modes; + oct->link_info.autoneg = resp->link_info.autoneg; + oct->link_info.pause = resp->link_info.pause; + oct->link_info.speed = resp->link_info.speed; + + return err; } int octep_set_link_info(struct octep_device *oct, struct octep_iface_link_info *link_info) { - return -1; + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; + req.link_info.cmd = OCTEP_CTRL_NET_CMD_SET; + req.link_info.info.advertised_modes = link_info->advertised_modes; + req.link_info.info.autoneg = link_info->autoneg; + req.link_info.info.pause = link_info->pause; + req.link_info.info.speed = link_info->speed; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_LINK_INFO_REQ_SZW; + msg.msg = &req; + + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); } -- cgit v1.2.3 From 397dfb57dcc25f393b32506eeb00600c56fe2444 Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Tue, 12 Apr 2022 20:35:00 -0700 Subject: octeon_ep: add Tx/Rx ring resource setup and cleanup Implement Tx/Rx ring resource allocation and cleanup. Signed-off-by: Veerasenareddy Burru Signed-off-by: Abhijit Ayarekar Signed-off-by: Satananda Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeon_ep/octep_rx.c | 222 ++++++++++++++++++++++ drivers/net/ethernet/marvell/octeon_ep/octep_tx.c | 219 +++++++++++++++++++++ 2 files changed, 441 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index 4fa67f305cc8..7086ede58475 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -7,10 +7,199 @@ #include #include +#include #include "octep_config.h" #include "octep_main.h" +static void octep_oq_reset_indices(struct octep_oq *oq) +{ + oq->host_read_idx = 0; + oq->host_refill_idx = 0; + oq->refill_count = 0; + oq->last_pkt_count = 0; + oq->pkts_pending = 0; +} + +/** + * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring. + * + * @oq: Octeon Rx queue data structure. + * + * Return: 0, if successfully filled receive buffers for all descriptors. + * -1, if failed to allocate a buffer or failed to map for DMA. + */ +static int octep_oq_fill_ring_buffers(struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + struct page *page; + u32 i; + + for (i = 0; i < oq->max_count; i++) { + page = dev_alloc_page(); + if (unlikely(!page)) { + dev_err(oq->dev, "Rx buffer alloc failed\n"); + goto rx_buf_alloc_err; + } + desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) { + dev_err(oq->dev, + "OQ-%d buffer alloc: DMA mapping error!\n", + oq->q_no); + put_page(page); + goto dma_map_err; + } + oq->buff_info[i].page = page; + } + + return 0; + +dma_map_err: +rx_buf_alloc_err: + while (i) { + i--; + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); + put_page(oq->buff_info[i].page); + oq->buff_info[i].page = NULL; + } + + return -1; +} + +/** + * octep_setup_oq() - Setup a Rx queue. + * + * @oct: Octeon device private data structure. + * @q_no: Rx queue number to be setup. + * + * Allocate resources for a Rx queue. + */ +static int octep_setup_oq(struct octep_device *oct, int q_no) +{ + struct octep_oq *oq; + u32 desc_ring_size; + + oq = vzalloc(sizeof(*oq)); + if (!oq) + goto create_oq_fail; + oct->oq[q_no] = oq; + + oq->octep_dev = oct; + oq->netdev = oct->netdev; + oq->dev = &oct->pdev->dev; + oq->q_no = q_no; + oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); + oq->ring_size_mask = oq->max_count - 1; + oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); + oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE; + + /* When the hardware/firmware supports additional capabilities, + * additional header is filled-in by Octeon after length field in + * Rx packets. this header contains additional packet information. + */ + if (oct->caps_enabled) + oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE; + + oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); + + desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE; + oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size, + &oq->desc_ring_dma, GFP_KERNEL); + + if (unlikely(!oq->desc_ring)) { + dev_err(oq->dev, + "Failed to allocate DMA memory for OQ-%d !!\n", q_no); + goto desc_dma_alloc_err; + } + + oq->buff_info = (struct octep_rx_buffer *) + vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE); + if (unlikely(!oq->buff_info)) { + dev_err(&oct->pdev->dev, + "Failed to allocate buffer info for OQ-%d\n", q_no); + goto buf_list_err; + } + + if (octep_oq_fill_ring_buffers(oq)) + goto oq_fill_buff_err; + + octep_oq_reset_indices(oq); + oct->hw_ops.setup_oq_regs(oct, q_no); + oct->num_oqs++; + + return 0; + +oq_fill_buff_err: + vfree(oq->buff_info); + oq->buff_info = NULL; +buf_list_err: + dma_free_coherent(oq->dev, desc_ring_size, + oq->desc_ring, oq->desc_ring_dma); + oq->desc_ring = NULL; +desc_dma_alloc_err: + vfree(oq); + oct->oq[q_no] = NULL; +create_oq_fail: + return -1; +} + +/** + * octep_oq_free_ring_buffers() - Free ring buffers. + * + * @oq: Octeon Rx queue data structure. + * + * Free receive buffers in unused Rx queue descriptors. + */ +static void octep_oq_free_ring_buffers(struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + int i; + + if (!oq->desc_ring || !oq->buff_info) + return; + + for (i = 0; i < oq->max_count; i++) { + if (oq->buff_info[i].page) { + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + put_page(oq->buff_info[i].page); + oq->buff_info[i].page = NULL; + desc_ring[i].buffer_ptr = 0; + } + } + octep_oq_reset_indices(oq); +} + +/** + * octep_free_oq() - Free Rx queue resources. + * + * @oq: Octeon Rx queue data structure. + * + * Free all resources of a Rx queue. + */ +static int octep_free_oq(struct octep_oq *oq) +{ + struct octep_device *oct = oq->octep_dev; + int q_no = oq->q_no; + + octep_oq_free_ring_buffers(oq); + + if (oq->buff_info) + vfree(oq->buff_info); + + if (oq->desc_ring) + dma_free_coherent(oq->dev, + oq->max_count * OCTEP_OQ_DESC_SIZE, + oq->desc_ring, oq->desc_ring_dma); + + vfree(oq); + oct->oq[q_no] = NULL; + oct->num_oqs--; + return 0; +} + /** * octep_setup_oqs() - setup resources for all Rx queues. * @@ -18,6 +207,26 @@ */ int octep_setup_oqs(struct octep_device *oct) { + int i, retval = 0; + + oct->num_oqs = 0; + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + retval = octep_setup_oq(oct, i); + if (retval) { + dev_err(&oct->pdev->dev, + "Failed to setup OQ(RxQ)-%d.\n", i); + goto oq_setup_err; + } + dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i); + } + + return 0; + +oq_setup_err: + while (i) { + i--; + octep_free_oq(oct->oq[i]); + } return -1; } @@ -30,6 +239,10 @@ int octep_setup_oqs(struct octep_device *oct) */ void octep_oq_dbell_init(struct octep_device *oct) { + int i; + + for (i = 0; i < oct->num_oqs; i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); } /** @@ -39,4 +252,13 @@ void octep_oq_dbell_init(struct octep_device *oct) */ void octep_free_oqs(struct octep_device *oct) { + int i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + if (!oct->oq[i]) + continue; + octep_free_oq(oct->oq[i]); + dev_dbg(&oct->pdev->dev, + "Successfully freed OQ(RxQ)-%d.\n", i); + } } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c index f0e1cc142b2e..185522d72475 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -7,10 +7,75 @@ #include #include +#include #include "octep_config.h" #include "octep_main.h" +/* Reset various index of Tx queue data structure. */ +static void octep_iq_reset_indices(struct octep_iq *iq) +{ + iq->fill_cnt = 0; + iq->host_write_index = 0; + iq->octep_read_index = 0; + iq->flush_index = 0; + iq->pkts_processed = 0; + iq->pkt_in_done = 0; + atomic_set(&iq->instr_pending, 0); +} + +/** + * octep_iq_free_pending() - Free Tx buffers for pending completions. + * + * @iq: Octeon Tx queue data structure. + */ +static void octep_iq_free_pending(struct octep_iq *iq) +{ + struct octep_tx_buffer *tx_buffer; + struct skb_shared_info *shinfo; + u32 fi = iq->flush_index; + struct sk_buff *skb; + u8 frags, i; + + while (fi != iq->host_write_index) { + tx_buffer = iq->buff_info + fi; + skb = tx_buffer->skb; + + fi++; + if (unlikely(fi == iq->max_count)) + fi = 0; + + if (!tx_buffer->gather) { + dma_unmap_single(iq->dev, tx_buffer->dma, + tx_buffer->skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + continue; + } + + /* Scatter/Gather */ + shinfo = skb_shinfo(skb); + frags = shinfo->nr_frags; + + dma_unmap_single(iq->dev, + tx_buffer->sglist[0].dma_ptr[0], + tx_buffer->sglist[0].len[0], + DMA_TO_DEVICE); + + i = 1; /* entry 0 is main skb, unmapped above */ + while (frags--) { + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], + tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + atomic_set(&iq->instr_pending, 0); + iq->flush_index = fi; + netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); +} + /** * octep_clean_iqs() - Clean Tx queues to shutdown the device. * @@ -21,6 +86,133 @@ */ void octep_clean_iqs(struct octep_device *oct) { + int i; + + for (i = 0; i < oct->num_iqs; i++) { + octep_iq_free_pending(oct->iq[i]); + octep_iq_reset_indices(oct->iq[i]); + } +} + +/** + * octep_setup_iq() - Setup a Tx queue. + * + * @oct: Octeon device private data structure. + * @q_no: Tx queue number to be setup. + * + * Allocate resources for a Tx queue. + */ +static int octep_setup_iq(struct octep_device *oct, int q_no) +{ + u32 desc_ring_size, buff_info_size, sglist_size; + struct octep_iq *iq; + int i; + + iq = vzalloc(sizeof(*iq)); + if (!iq) + goto iq_alloc_err; + oct->iq[q_no] = iq; + + iq->octep_dev = oct; + iq->netdev = oct->netdev; + iq->dev = &oct->pdev->dev; + iq->q_no = q_no; + iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); + iq->ring_size_mask = iq->max_count - 1; + iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); + iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); + + /* Allocate memory for hardware queue descriptors */ + desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); + iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, + &iq->desc_ring_dma, GFP_KERNEL); + if (unlikely(!iq->desc_ring)) { + dev_err(iq->dev, + "Failed to allocate DMA memory for IQ-%d\n", q_no); + goto desc_dma_alloc_err; + } + + /* Allocate memory for hardware SGLIST descriptors */ + sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * + CFG_GET_IQ_NUM_DESC(oct->conf); + iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, + &iq->sglist_dma, GFP_KERNEL); + if (unlikely(!iq->sglist)) { + dev_err(iq->dev, + "Failed to allocate DMA memory for IQ-%d SGLIST\n", + q_no); + goto sglist_alloc_err; + } + + /* allocate memory to manage Tx packets pending completion */ + buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count; + iq->buff_info = vzalloc(buff_info_size); + if (!iq->buff_info) { + dev_err(iq->dev, + "Failed to allocate buff info for IQ-%d\n", q_no); + goto buff_info_err; + } + + /* Setup sglist addresses in tx_buffer entries */ + for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) { + struct octep_tx_buffer *tx_buffer; + + tx_buffer = &iq->buff_info[i]; + tx_buffer->sglist = + &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT]; + tx_buffer->sglist_dma = + iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT); + } + + octep_iq_reset_indices(iq); + oct->hw_ops.setup_iq_regs(oct, q_no); + + oct->num_iqs++; + return 0; + +buff_info_err: + dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); +sglist_alloc_err: + dma_free_coherent(iq->dev, desc_ring_size, + iq->desc_ring, iq->desc_ring_dma); +desc_dma_alloc_err: + vfree(iq); + oct->iq[q_no] = NULL; +iq_alloc_err: + return -1; +} + +/** + * octep_free_iq() - Free Tx queue resources. + * + * @iq: Octeon Tx queue data structure. + * + * Free all the resources allocated for a Tx queue. + */ +static void octep_free_iq(struct octep_iq *iq) +{ + struct octep_device *oct = iq->octep_dev; + u64 desc_ring_size, sglist_size; + int q_no = iq->q_no; + + desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); + + if (iq->buff_info) + vfree(iq->buff_info); + + if (iq->desc_ring) + dma_free_coherent(iq->dev, desc_ring_size, + iq->desc_ring, iq->desc_ring_dma); + + sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * + CFG_GET_IQ_NUM_DESC(oct->conf); + if (iq->sglist) + dma_free_coherent(iq->dev, sglist_size, + iq->sglist, iq->sglist_dma); + + vfree(iq); + oct->iq[q_no] = NULL; + oct->num_iqs--; } /** @@ -30,6 +222,25 @@ void octep_clean_iqs(struct octep_device *oct) */ int octep_setup_iqs(struct octep_device *oct) { + int i; + + oct->num_iqs = 0; + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + if (octep_setup_iq(oct, i)) { + dev_err(&oct->pdev->dev, + "Failed to setup IQ(TxQ)-%d.\n", i); + goto iq_setup_err; + } + dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i); + } + + return 0; + +iq_setup_err: + while (i) { + i--; + octep_free_iq(oct->iq[i]); + } return -1; } @@ -40,4 +251,12 @@ int octep_setup_iqs(struct octep_device *oct) */ void octep_free_iqs(struct octep_device *oct) { + int i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + octep_free_iq(oct->iq[i]); + dev_dbg(&oct->pdev->dev, + "Successfully destroyed IQ(TxQ)-%d.\n", i); + } + oct->num_iqs = 0; } -- cgit v1.2.3 From 6a610a46bad1fd9886d9bf19f4220ecbb7c1b9ef Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Tue, 12 Apr 2022 20:35:01 -0700 Subject: octeon_ep: add support for ndo ops Add support for ndo ops to set MAC address, change MTU, get stats. Add control path support to set MAC address, change MTU, get stats, set speed, get and set link mode. Signed-off-by: Veerasenareddy Burru Signed-off-by: Abhijit Ayarekar Signed-off-by: Satananda Burla Signed-off-by: David S. Miller --- .../ethernet/marvell/octeon_ep/octep_ctrl_mbox.h | 2 +- .../ethernet/marvell/octeon_ep/octep_ctrl_net.c | 60 +++++++++++++++++ .../net/ethernet/marvell/octeon_ep/octep_main.c | 78 ++++++++++++++++++++++ 3 files changed, 139 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h index 30f497f0bc26..2dc5753cfec6 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h @@ -120,7 +120,7 @@ struct octep_ctrl_mbox { /* size of bar memory */ u32 barmem_sz; /* pointer to BAR memory */ - void __iomem *barmem; + u8 __iomem *barmem; /* user context for callback, can be null */ void *user_ctx; /* callback handler for processing request, called from octep_ctrl_mbox_recv */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c index c3aca7b2775b..7c00c896ab98 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -87,6 +87,66 @@ int octep_get_mac_addr(struct octep_device *oct, u8 *addr) return err; } +int octep_set_mac_addr(struct octep_device *oct, u8 *addr) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; + req.mac.cmd = OCTEP_CTRL_NET_CMD_SET; + memcpy(&req.mac.addr, addr, ETH_ALEN); + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MAC_REQ_SZW; + msg.msg = &req; + + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); +} + +int octep_set_mtu(struct octep_device *oct, int mtu) +{ + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU; + req.mtu.cmd = OCTEP_CTRL_NET_CMD_SET; + req.mtu.val = mtu; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_MTU_REQ_SZW; + msg.msg = &req; + + return octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); +} + +int octep_get_if_stats(struct octep_device *oct) +{ + void __iomem *iface_rx_stats; + void __iomem *iface_tx_stats; + struct octep_ctrl_net_h2f_req req = {}; + struct octep_ctrl_mbox_msg msg = {}; + int err; + + req.hdr.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS; + req.mac.cmd = OCTEP_CTRL_NET_CMD_GET; + req.get_stats.offset = oct->ctrl_mbox_ifstats_offset; + + msg.hdr.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg.hdr.sizew = OCTEP_CTRL_NET_H2F_GET_STATS_REQ_SZW; + msg.msg = &req; + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &msg); + if (err) + return err; + + iface_rx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset; + iface_tx_stats = oct->ctrl_mbox.barmem + oct->ctrl_mbox_ifstats_offset + + sizeof(struct octep_iface_rx_stats); + memcpy_fromio(&oct->iface_rx_stats, iface_rx_stats, sizeof(struct octep_iface_rx_stats)); + memcpy_fromio(&oct->iface_tx_stats, iface_tx_stats, sizeof(struct octep_iface_tx_stats)); + + return err; +} + int octep_get_link_info(struct octep_device *oct) { struct octep_ctrl_net_h2f_req req = {}; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 8b3dc50d08fd..dac1e9464794 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -152,6 +152,43 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +/** + * octep_get_stats64() - Get Octeon network device statistics. + * + * @netdev: kernel network device. + * @stats: pointer to stats structure to be filled in. + */ +static void octep_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; + struct octep_device *oct = netdev_priv(netdev); + int q; + + octep_get_if_stats(oct); + tx_packets = 0; + tx_bytes = 0; + rx_packets = 0; + rx_bytes = 0; + for (q = 0; q < oct->num_oqs; q++) { + struct octep_iq *iq = oct->iq[q]; + struct octep_oq *oq = oct->oq[q]; + + tx_packets += iq->stats.instr_completed; + tx_bytes += iq->stats.bytes_sent; + rx_packets += oq->stats.packets; + rx_bytes += oq->stats.bytes; + } + stats->tx_packets = tx_packets; + stats->tx_bytes = tx_bytes; + stats->rx_packets = rx_packets; + stats->rx_bytes = rx_bytes; + stats->multicast = oct->iface_rx_stats.mcast_pkts; + stats->rx_errors = oct->iface_rx_stats.err_pkts; + stats->collisions = oct->iface_tx_stats.xscol; + stats->tx_fifo_errors = oct->iface_tx_stats.undflw; +} + /** * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. * @@ -190,11 +227,52 @@ static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) queue_work(octep_wq, &oct->tx_timeout_task); } +static int octep_set_mac(struct net_device *netdev, void *p) +{ + struct octep_device *oct = netdev_priv(netdev); + struct sockaddr *addr = (struct sockaddr *)p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = octep_set_mac_addr(oct, addr->sa_data); + if (err) + return err; + + memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(netdev, addr->sa_data); + + return 0; +} + +static int octep_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info *link_info; + int err = 0; + + link_info = &oct->link_info; + if (link_info->mtu == new_mtu) + return 0; + + err = octep_set_mtu(oct, new_mtu); + if (!err) { + oct->link_info.mtu = new_mtu; + netdev->mtu = new_mtu; + } + + return err; +} + static const struct net_device_ops octep_netdev_ops = { .ndo_open = octep_open, .ndo_stop = octep_stop, .ndo_start_xmit = octep_start_xmit, + .ndo_get_stats64 = octep_get_stats64, .ndo_tx_timeout = octep_tx_timeout, + .ndo_set_mac_address = octep_set_mac, + .ndo_change_mtu = octep_change_mtu, }; /** -- cgit v1.2.3 From 37d79d0596062057f588bdbb2ebad5455a43d353 Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Tue, 12 Apr 2022 20:35:02 -0700 Subject: octeon_ep: add Tx/Rx processing and interrupt support Add support to enable MSI-x and register interrupts. Add support to process Tx and Rx traffic. Includes processing Tx completions and Rx refill. Signed-off-by: Veerasenareddy Burru Signed-off-by: Abhijit Ayarekar Signed-off-by: Satananda Burla Signed-off-by: David S. Miller --- .../net/ethernet/marvell/octeon_ep/octep_main.c | 587 +++++++++++++++++++++ drivers/net/ethernet/marvell/octeon_ep/octep_rx.c | 244 +++++++++ drivers/net/ethernet/marvell/octeon_ep/octep_tx.c | 73 +++ 3 files changed, 904 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index dac1e9464794..2f6579d7c945 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -32,6 +32,431 @@ MODULE_DESCRIPTION(OCTEP_DRV_STRING); MODULE_LICENSE("GPL"); MODULE_VERSION(OCTEP_DRV_VERSION_STR); +/** + * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. + * + * @oct: Octeon device private data structure. + * + * Allocate resources to hold per Tx/Rx queue interrupt info. + * This is the information passed to interrupt handler, from which napi poll + * is scheduled and includes quick access to private data of Tx/Rx queue + * corresponding to the interrupt being handled. + * + * Return: 0, on successful allocation of resources for all queue interrupts. + * -1, if failed to allocate any resource. + */ +static int octep_alloc_ioq_vectors(struct octep_device *oct) +{ + int i; + struct octep_ioq_vector *ioq_vector; + + for (i = 0; i < oct->num_oqs; i++) { + oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); + if (!oct->ioq_vector[i]) + goto free_ioq_vector; + + ioq_vector = oct->ioq_vector[i]; + ioq_vector->iq = oct->iq[i]; + ioq_vector->oq = oct->oq[i]; + ioq_vector->octep_dev = oct; + } + + dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); + return 0; + +free_ioq_vector: + while (i) { + i--; + vfree(oct->ioq_vector[i]); + oct->ioq_vector[i] = NULL; + } + return -1; +} + +/** + * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. + * + * @oct: Octeon device private data structure. + */ +static void octep_free_ioq_vectors(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + if (oct->ioq_vector[i]) { + vfree(oct->ioq_vector[i]); + oct->ioq_vector[i] = NULL; + } + } + netdev_info(oct->netdev, "Freed IOQ Vectors\n"); +} + +/** + * octep_enable_msix_range() - enable MSI-x interrupts. + * + * @oct: Octeon device private data structure. + * + * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) + * for the Octeon device. + * + * Return: 0, on successfully enabling all MSI-x interrupts. + * -1, if failed to enable any MSI-x interrupt. + */ +static int octep_enable_msix_range(struct octep_device *oct) +{ + int num_msix, msix_allocated; + int i; + + /* Generic interrupts apart from input/output queues */ + num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); + oct->msix_entries = kcalloc(num_msix, + sizeof(struct msix_entry), GFP_KERNEL); + if (!oct->msix_entries) + goto msix_alloc_err; + + for (i = 0; i < num_msix; i++) + oct->msix_entries[i].entry = i; + + msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, + num_msix, num_msix); + if (msix_allocated != num_msix) { + dev_err(&oct->pdev->dev, + "Failed to enable %d msix irqs; got only %d\n", + num_msix, msix_allocated); + goto enable_msix_err; + } + oct->num_irqs = msix_allocated; + dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); + + return 0; + +enable_msix_err: + if (msix_allocated > 0) + pci_disable_msix(oct->pdev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; +msix_alloc_err: + return -1; +} + +/** + * octep_disable_msix() - disable MSI-x interrupts. + * + * @oct: Octeon device private data structure. + * + * Disable MSI-x on the Octeon device. + */ +static void octep_disable_msix(struct octep_device *oct) +{ + pci_disable_msix(oct->pdev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); +} + +/** + * octep_non_ioq_intr_handler() - common handler for all generic interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for all non-queue (generic) interrupts. + */ +static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.non_ioq_intr_handler(oct); +} + +/** + * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data contains pointers to Tx/Rx queue private data + * and correspong NAPI context. + * + * this is common handler for all non-queue (generic) interrupts. + */ +static irqreturn_t octep_ioq_intr_handler(int irq, void *data) +{ + struct octep_ioq_vector *ioq_vector = data; + struct octep_device *oct = ioq_vector->octep_dev; + + return oct->hw_ops.ioq_intr_handler(ioq_vector); +} + +/** + * octep_request_irqs() - Register interrupt handlers. + * + * @oct: Octeon device private data structure. + * + * Register handlers for all queue and non-queue interrupts. + * + * Return: 0, on successful registration of all interrupt handlers. + * -1, on any error. + */ +static int octep_request_irqs(struct octep_device *oct) +{ + struct net_device *netdev = oct->netdev; + struct octep_ioq_vector *ioq_vector; + struct msix_entry *msix_entry; + char **non_ioq_msix_names; + int num_non_ioq_msix; + int ret, i; + + num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); + non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); + + oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix, + OCTEP_MSIX_NAME_SIZE, GFP_KERNEL); + if (!oct->non_ioq_irq_names) + goto alloc_err; + + /* First few MSI-X interrupts are non-queue interrupts */ + for (i = 0; i < num_non_ioq_msix; i++) { + char *irq_name; + + irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE]; + msix_entry = &oct->msix_entries[i]; + + snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, + "%s-%s", netdev->name, non_ioq_msix_names[i]); + ret = request_irq(msix_entry->vector, + octep_non_ioq_intr_handler, 0, + irq_name, oct); + if (ret) { + netdev_err(netdev, + "request_irq failed for %s; err=%d", + irq_name, ret); + goto non_ioq_irq_err; + } + } + + /* Request IRQs for Tx/Rx queues */ + for (i = 0; i < oct->num_oqs; i++) { + ioq_vector = oct->ioq_vector[i]; + msix_entry = &oct->msix_entries[i + num_non_ioq_msix]; + + snprintf(ioq_vector->name, sizeof(ioq_vector->name), + "%s-q%d", netdev->name, i); + ret = request_irq(msix_entry->vector, + octep_ioq_intr_handler, 0, + ioq_vector->name, ioq_vector); + if (ret) { + netdev_err(netdev, + "request_irq failed for Q-%d; err=%d", + i, ret); + goto ioq_irq_err; + } + + cpumask_set_cpu(i % num_online_cpus(), + &ioq_vector->affinity_mask); + irq_set_affinity_hint(msix_entry->vector, + &ioq_vector->affinity_mask); + } + + return 0; +ioq_irq_err: + while (i > num_non_ioq_msix) { + --i; + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); + free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]); + } +non_ioq_irq_err: + while (i) { + --i; + free_irq(oct->msix_entries[i].vector, oct); + } +alloc_err: + return -1; +} + +/** + * octep_free_irqs() - free all registered interrupts. + * + * @oct: Octeon device private data structure. + * + * Free all queue and non-queue interrupts of the Octeon device. + */ +static void octep_free_irqs(struct octep_device *oct) +{ + int i; + + /* First few MSI-X interrupts are non queue interrupts; free them */ + for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++) + free_irq(oct->msix_entries[i].vector, oct); + kfree(oct->non_ioq_irq_names); + + /* Free IRQs for Input/Output (Tx/Rx) queues */ + for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) { + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); + free_irq(oct->msix_entries[i].vector, + oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]); + } + netdev_info(oct->netdev, "IRQs freed\n"); +} + +/** + * octep_setup_irqs() - setup interrupts for the Octeon device. + * + * @oct: Octeon device private data structure. + * + * Allocate data structures to hold per interrupt information, allocate/enable + * MSI-x interrupt and register interrupt handlers. + * + * Return: 0, on successful allocation and registration of all interrupts. + * -1, on any error. + */ +static int octep_setup_irqs(struct octep_device *oct) +{ + if (octep_alloc_ioq_vectors(oct)) + goto ioq_vector_err; + + if (octep_enable_msix_range(oct)) + goto enable_msix_err; + + if (octep_request_irqs(oct)) + goto request_irq_err; + + return 0; + +request_irq_err: + octep_disable_msix(oct); +enable_msix_err: + octep_free_ioq_vectors(oct); +ioq_vector_err: + return -1; +} + +/** + * octep_clean_irqs() - free all interrupts and its resources. + * + * @oct: Octeon device private data structure. + */ +static void octep_clean_irqs(struct octep_device *oct) +{ + octep_free_irqs(oct); + octep_disable_msix(oct); + octep_free_ioq_vectors(oct); +} + +/** + * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. + * + * @iq: Octeon Tx queue data structure. + * @oq: Octeon Rx queue data structure. + */ +static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) +{ + u32 pkts_pend = oq->pkts_pending; + + netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); + if (iq->pkts_processed) { + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; + } + if (oq->last_pkt_count - pkts_pend) { + writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); + oq->last_pkt_count = pkts_pend; + } + + /* Flush the previous wrties before writing to RESEND bit */ + wmb(); + writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); + writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); +} + +/** + * octep_napi_poll() - NAPI poll function for Tx/Rx. + * + * @napi: pointer to napi context. + * @budget: max number of packets to be processed in single invocation. + */ +static int octep_napi_poll(struct napi_struct *napi, int budget) +{ + struct octep_ioq_vector *ioq_vector = + container_of(napi, struct octep_ioq_vector, napi); + u32 tx_pending, rx_done; + + tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); + rx_done = octep_oq_process_rx(ioq_vector->oq, budget); + + /* need more polling if tx completion processing is still pending or + * processed at least 'budget' number of rx packets. + */ + if (tx_pending || rx_done >= budget) + return budget; + + napi_complete(napi); + octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); + return rx_done; +} + +/** + * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_add(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); + netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, + octep_napi_poll, 64); + oct->oq[i]->napi = &oct->ioq_vector[i]->napi; + } +} + +/** + * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_delete(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); + netif_napi_del(&oct->ioq_vector[i]->napi); + oct->oq[i]->napi = NULL; + } +} + +/** + * octep_napi_enable() - enable NAPI for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_enable(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); + napi_enable(&oct->ioq_vector[i]->napi); + } +} + +/** + * octep_napi_disable() - disable NAPI for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_disable(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); + napi_disable(&oct->ioq_vector[i]->napi); + } +} + static void octep_link_up(struct net_device *netdev) { netif_carrier_on(netdev); @@ -63,6 +488,8 @@ static int octep_open(struct net_device *netdev) goto setup_iq_err; if (octep_setup_oqs(oct)) goto setup_oq_err; + if (octep_setup_irqs(oct)) + goto setup_irq_err; err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); if (err) @@ -71,6 +498,9 @@ static int octep_open(struct net_device *netdev) if (err) goto set_queues_err; + octep_napi_add(oct); + octep_napi_enable(oct); + oct->link_info.admin_up = 1; octep_set_rx_state(oct, true); @@ -93,6 +523,10 @@ static int octep_open(struct net_device *netdev) return 0; set_queues_err: + octep_napi_disable(oct); + octep_napi_delete(oct); + octep_clean_irqs(oct); +setup_irq_err: octep_free_oqs(oct); setup_oq_err: octep_free_iqs(oct); @@ -126,7 +560,10 @@ static int octep_stop(struct net_device *netdev) oct->link_info.oper_up = 0; oct->hw_ops.disable_interrupts(oct); + octep_napi_disable(oct); + octep_napi_delete(oct); + octep_clean_irqs(oct); octep_clean_iqs(oct); oct->hw_ops.disable_io_queues(oct); @@ -137,6 +574,36 @@ static int octep_stop(struct net_device *netdev) return 0; } +/** + * octep_iq_full_check() - check if a Tx queue is full. + * + * @iq: Octeon Tx queue data structure. + * + * Return: 0, if the Tx queue is not full. + * 1, if the Tx queue is full. + */ +static inline int octep_iq_full_check(struct octep_iq *iq) +{ + if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >= + OCTEP_WAKE_QUEUE_THRESHOLD)) + return 0; + + /* Stop the queue if unable to send */ + netif_stop_subqueue(iq->netdev, iq->q_no); + + /* check again and restart the queue, in case NAPI has just freed + * enough Tx ring entries. + */ + if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >= + OCTEP_WAKE_QUEUE_THRESHOLD)) { + netif_start_subqueue(iq->netdev, iq->q_no); + iq->stats.restart_cnt++; + return 0; + } + + return 1; +} + /** * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. * @@ -149,6 +616,126 @@ static int octep_stop(struct net_device *netdev) static netdev_tx_t octep_start_xmit(struct sk_buff *skb, struct net_device *netdev) { + struct octep_device *oct = netdev_priv(netdev); + struct octep_tx_sglist_desc *sglist; + struct octep_tx_buffer *tx_buffer; + struct octep_tx_desc_hw *hw_desc; + struct skb_shared_info *shinfo; + struct octep_instr_hdr *ih; + struct octep_iq *iq; + skb_frag_t *frag; + u16 nr_frags, si; + u16 q_no, wi; + + q_no = skb_get_queue_mapping(skb); + if (q_no >= oct->num_iqs) { + netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); + q_no = q_no % oct->num_iqs; + } + + iq = oct->iq[q_no]; + if (octep_iq_full_check(iq)) { + iq->stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + shinfo = skb_shinfo(skb); + nr_frags = shinfo->nr_frags; + + wi = iq->host_write_index; + hw_desc = &iq->desc_ring[wi]; + hw_desc->ih64 = 0; + + tx_buffer = iq->buff_info + wi; + tx_buffer->skb = skb; + + ih = &hw_desc->ih; + ih->tlen = skb->len; + ih->pkind = oct->pkind; + + if (!nr_frags) { + tx_buffer->gather = 0; + tx_buffer->dma = dma_map_single(iq->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, tx_buffer->dma)) + goto dma_map_err; + hw_desc->dptr = tx_buffer->dma; + } else { + /* Scatter/Gather */ + dma_addr_t dma; + u16 len; + + sglist = tx_buffer->sglist; + + ih->gsz = nr_frags + 1; + ih->gather = 1; + tx_buffer->gather = 1; + + len = skb_headlen(skb); + dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, dma)) + goto dma_map_err; + + dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma, + OCTEP_SGLIST_SIZE_PER_PKT, + DMA_TO_DEVICE); + memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT); + sglist[0].len[3] = len; + sglist[0].dma_ptr[0] = dma; + + si = 1; /* entry 0 is main skb, mapped above */ + frag = &shinfo->frags[0]; + while (nr_frags--) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(iq->dev, frag, 0, + len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, dma)) + goto dma_map_sg_err; + + sglist[si >> 2].len[3 - (si & 3)] = len; + sglist[si >> 2].dma_ptr[si & 3] = dma; + + frag++; + si++; + } + dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma, + OCTEP_SGLIST_SIZE_PER_PKT, + DMA_TO_DEVICE); + + hw_desc->dptr = tx_buffer->sglist_dma; + } + + /* Flush the hw descriptor before writing to doorbell */ + wmb(); + + /* Ring Doorbell to notify the NIC there is a new packet */ + writel(1, iq->doorbell_reg); + atomic_inc(&iq->instr_pending); + wi++; + if (wi == iq->max_count) + wi = 0; + iq->host_write_index = wi; + + netdev_tx_sent_queue(iq->netdev_q, skb->len); + iq->stats.instr_posted++; + skb_tx_timestamp(skb); + return NETDEV_TX_OK; + +dma_map_sg_err: + if (si > 0) { + dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], + sglist[0].len[0], DMA_TO_DEVICE); + sglist[0].len[0] = 0; + } + while (si > 1) { + dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], + sglist[si >> 2].len[si & 3], DMA_TO_DEVICE); + sglist[si >> 2].len[si & 3] = 0; + si--; + } + tx_buffer->gather = 0; +dma_map_err: + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index 7086ede58475..945947ec7723 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -68,6 +68,50 @@ rx_buf_alloc_err: return -1; } +/** + * octep_oq_refill() - refill buffers for used Rx ring descriptors. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * + * Return: number of descriptors successfully refilled with receive buffers. + */ +static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + struct page *page; + u32 refill_idx, i; + + refill_idx = oq->host_refill_idx; + for (i = 0; i < oq->refill_count; i++) { + page = dev_alloc_page(); + if (unlikely(!page)) { + dev_err(oq->dev, "refill: rx buffer alloc failed\n"); + oq->stats.alloc_failures++; + break; + } + + desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) { + dev_err(oq->dev, + "OQ-%d buffer refill: DMA mapping error!\n", + oq->q_no); + put_page(page); + oq->stats.alloc_failures++; + break; + } + oq->buff_info[refill_idx].page = page; + refill_idx++; + if (refill_idx == oq->max_count) + refill_idx = 0; + } + oq->host_refill_idx = refill_idx; + oq->refill_count -= i; + + return i; +} + /** * octep_setup_oq() - Setup a Rx queue. * @@ -262,3 +306,203 @@ void octep_free_oqs(struct octep_device *oct) "Successfully freed OQ(RxQ)-%d.\n", i); } } + +/** + * octep_oq_check_hw_for_pkts() - Check for new Rx packets. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * + * Return: packets received after previous check. + */ +static int octep_oq_check_hw_for_pkts(struct octep_device *oct, + struct octep_oq *oq) +{ + u32 pkt_count, new_pkts; + + pkt_count = readl(oq->pkts_sent_reg); + new_pkts = pkt_count - oq->last_pkt_count; + + /* Clear the hardware packets counter register if the rx queue is + * being processed continuously with-in a single interrupt and + * reached half its max value. + * this counter is not cleared every time read, to save write cycles. + */ + if (unlikely(pkt_count > 0xF0000000U)) { + writel(pkt_count, oq->pkts_sent_reg); + pkt_count = readl(oq->pkts_sent_reg); + new_pkts += pkt_count; + } + oq->last_pkt_count = pkt_count; + oq->pkts_pending += new_pkts; + return new_pkts; +} + +/** + * __octep_oq_process_rx() - Process hardware Rx queue and push to stack. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * @pkts_to_process: number of packets to be processed. + * + * Process the new packets in Rx queue. + * Packets larger than single Rx buffer arrive in consecutive descriptors. + * But, count returned by the API only accounts full packets, not fragments. + * + * Return: number of packets processed and pushed to stack. + */ +static int __octep_oq_process_rx(struct octep_device *oct, + struct octep_oq *oq, u16 pkts_to_process) +{ + struct octep_oq_resp_hw_ext *resp_hw_ext = NULL; + struct octep_rx_buffer *buff_info; + struct octep_oq_resp_hw *resp_hw; + u32 pkt, rx_bytes, desc_used; + struct sk_buff *skb; + u16 data_offset; + u32 read_idx; + + read_idx = oq->host_read_idx; + rx_bytes = 0; + desc_used = 0; + for (pkt = 0; pkt < pkts_to_process; pkt++) { + buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx]; + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + resp_hw = page_address(buff_info->page); + buff_info->page = NULL; + + /* Swap the length field that is in Big-Endian to CPU */ + buff_info->len = be64_to_cpu(resp_hw->length); + if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) { + /* Extended response header is immediately after + * response header (resp_hw) + */ + resp_hw_ext = (struct octep_oq_resp_hw_ext *) + (resp_hw + 1); + buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE; + /* Packet Data is immediately after + * extended response header. + */ + data_offset = OCTEP_OQ_RESP_HW_SIZE + + OCTEP_OQ_RESP_HW_EXT_SIZE; + } else { + /* Data is immediately after + * Hardware Rx response header. + */ + data_offset = OCTEP_OQ_RESP_HW_SIZE; + } + rx_bytes += buff_info->len; + + if (buff_info->len <= oq->max_single_buffer_size) { + skb = build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + skb_put(skb, buff_info->len); + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + } else { + struct skb_shared_info *shinfo; + u16 data_len; + + skb = build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + /* Head fragment includes response header(s); + * subsequent fragments contains only data. + */ + skb_put(skb, oq->max_single_buffer_size); + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + + shinfo = skb_shinfo(skb); + data_len = buff_info->len - oq->max_single_buffer_size; + while (data_len) { + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + buff_info = (struct octep_rx_buffer *) + &oq->buff_info[read_idx]; + if (data_len < oq->buffer_size) { + buff_info->len = data_len; + data_len = 0; + } else { + buff_info->len = oq->buffer_size; + data_len -= oq->buffer_size; + } + + skb_add_rx_frag(skb, shinfo->nr_frags, + buff_info->page, 0, + buff_info->len, + buff_info->len); + buff_info->page = NULL; + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + } + } + + skb->dev = oq->netdev; + skb->protocol = eth_type_trans(skb, skb->dev); + if (resp_hw_ext && + resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + napi_gro_receive(oq->napi, skb); + } + + oq->host_read_idx = read_idx; + oq->refill_count += desc_used; + oq->stats.packets += pkt; + oq->stats.bytes += rx_bytes; + + return pkt; +} + +/** + * octep_oq_process_rx() - Process Rx queue. + * + * @oq: Octeon Rx queue data structure. + * @budget: max number of packets can be processed in one invocation. + * + * Check for newly received packets and process them. + * Keeps checking for new packets until budget is used or no new packets seen. + * + * Return: number of packets processed. + */ +int octep_oq_process_rx(struct octep_oq *oq, int budget) +{ + u32 pkts_available, pkts_processed, total_pkts_processed; + struct octep_device *oct = oq->octep_dev; + + pkts_available = 0; + pkts_processed = 0; + total_pkts_processed = 0; + while (total_pkts_processed < budget) { + /* update pending count only when current one exhausted */ + if (oq->pkts_pending == 0) + octep_oq_check_hw_for_pkts(oct, oq); + pkts_available = min(budget - total_pkts_processed, + oq->pkts_pending); + if (!pkts_available) + break; + + pkts_processed = __octep_oq_process_rx(oct, oq, + pkts_available); + oq->pkts_pending -= pkts_processed; + total_pkts_processed += pkts_processed; + } + + if (oq->refill_count >= oq->refill_threshold) { + u32 desc_refilled = octep_oq_refill(oct, oq); + + /* flush pending writes before updating credits */ + wmb(); + writel(desc_refilled, oq->pkts_credit_reg); + } + + return total_pkts_processed; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c index 185522d72475..511552bc3e87 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -24,6 +24,79 @@ static void octep_iq_reset_indices(struct octep_iq *iq) atomic_set(&iq->instr_pending, 0); } +/** + * octep_iq_process_completions() - Process Tx queue completions. + * + * @iq: Octeon Tx queue data structure. + * @budget: max number of completions to be processed in one invocation. + */ +int octep_iq_process_completions(struct octep_iq *iq, u16 budget) +{ + u32 compl_pkts, compl_bytes, compl_sg; + struct octep_device *oct = iq->octep_dev; + struct octep_tx_buffer *tx_buffer; + struct skb_shared_info *shinfo; + u32 fi = iq->flush_index; + struct sk_buff *skb; + u8 frags, i; + + compl_pkts = 0; + compl_sg = 0; + compl_bytes = 0; + iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq); + + while (likely(budget && (fi != iq->octep_read_index))) { + tx_buffer = iq->buff_info + fi; + skb = tx_buffer->skb; + + fi++; + if (unlikely(fi == iq->max_count)) + fi = 0; + compl_bytes += skb->len; + compl_pkts++; + budget--; + + if (!tx_buffer->gather) { + dma_unmap_single(iq->dev, tx_buffer->dma, + tx_buffer->skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + continue; + } + + /* Scatter/Gather */ + shinfo = skb_shinfo(skb); + frags = shinfo->nr_frags; + compl_sg++; + + dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], + tx_buffer->sglist[0].len[0], DMA_TO_DEVICE); + + i = 1; /* entry 0 is main skb, unmapped above */ + while (frags--) { + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], + tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + iq->pkts_processed += compl_pkts; + atomic_sub(compl_pkts, &iq->instr_pending); + iq->stats.instr_completed += compl_pkts; + iq->stats.bytes_sent += compl_bytes; + iq->stats.sgentry_sent += compl_sg; + iq->flush_index = fi; + + netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); + + if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && + ((iq->max_count - atomic_read(&iq->instr_pending)) > + OCTEP_WAKE_QUEUE_THRESHOLD)) + netif_wake_subqueue(iq->netdev, iq->q_no); + return !budget; +} + /** * octep_iq_free_pending() - Free Tx buffers for pending completions. * -- cgit v1.2.3 From 5cc256e79bff06d299e4416d75786bdc411fa0e6 Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Tue, 12 Apr 2022 20:35:03 -0700 Subject: octeon_ep: add ethtool support for Octeon PCI Endpoint NIC Add support for the following ethtool commands: ethtool -i|--driver devname ethtool devname ethtool -s devname [speed N] [autoneg on|off] [advertise N] ethtool -S|--statistics devname Signed-off-by: Veerasenareddy Burru Signed-off-by: Abhijit Ayarekar Signed-off-by: Satananda Burla Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeon_ep/Makefile | 2 +- .../net/ethernet/marvell/octeon_ep/octep_ethtool.c | 463 +++++++++++++++++++++ .../net/ethernet/marvell/octeon_ep/octep_main.c | 1 + 3 files changed, 465 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile index 6e2db8e80b4a..2026c8118158 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/Makefile +++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_OCTEON_EP) += octeon_ep.o octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \ - octep_ctrl_mbox.o octep_ctrl_net.o + octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c new file mode 100644 index 000000000000..87ef129b269a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include +#include +#include + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_alloc_errors", + "tx_busy_errors", + "rx_dropped", + "tx_dropped", + "tx_hw_pkts", + "tx_hw_octs", + "tx_hw_bcast", + "tx_hw_mcast", + "tx_hw_underflow", + "tx_hw_control", + "tx_less_than_64", + "tx_equal_64", + "tx_equal_65_to_127", + "tx_equal_128_to_255", + "tx_equal_256_to_511", + "tx_equal_512_to_1023", + "tx_equal_1024_to_1518", + "tx_greater_than_1518", + "rx_hw_pkts", + "rx_hw_bytes", + "rx_hw_bcast", + "rx_hw_mcast", + "rx_pause_pkts", + "rx_pause_bytes", + "rx_dropped_pkts_fifo_full", + "rx_dropped_bytes_fifo_full", + "rx_err_pkts", +}; + +#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN) + +static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = { + "tx_packets_posted[Q-%u]", + "tx_packets_completed[Q-%u]", + "tx_bytes[Q-%u]", + "tx_busy[Q-%u]", +}; + +#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN) + +static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = { + "rx_packets[Q-%u]", + "rx_bytes[Q-%u]", + "rx_alloc_errors[Q-%u]", +}; + +#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN) + +static void octep_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct octep_device *oct = netdev_priv(netdev); + + strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info)); +} + +static void octep_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct octep_device *oct = netdev_priv(netdev); + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + char *strings = (char *)data; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_global_stats[i]); + strings += ETH_GSTRING_LEN; + } + + for (i = 0; i < num_queues; i++) { + for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_tx_q_stats[j], i); + strings += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < num_queues; i++) { + for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_rx_q_stats[j], i); + strings += ETH_GSTRING_LEN; + } + } + break; + default: + break; + } +} + +static int octep_get_sset_count(struct net_device *netdev, int sset) +{ + struct octep_device *oct = netdev_priv(netdev); + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + switch (sset) { + case ETH_SS_STATS: + return OCTEP_GLOBAL_STATS_CNT + (num_queues * + (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT)); + break; + default: + return -EOPNOTSUPP; + } +} + +static void +octep_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_tx_stats *iface_tx_stats; + struct octep_iface_rx_stats *iface_rx_stats; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + u64 rx_alloc_errors, tx_busy_errors; + int q, i; + + rx_packets = 0; + rx_bytes = 0; + tx_packets = 0; + tx_bytes = 0; + rx_alloc_errors = 0; + tx_busy_errors = 0; + tx_packets = 0; + tx_bytes = 0; + rx_packets = 0; + rx_bytes = 0; + + octep_get_if_stats(oct); + iface_tx_stats = &oct->iface_tx_stats; + iface_rx_stats = &oct->iface_rx_stats; + + for (q = 0; q < oct->num_oqs; q++) { + struct octep_iq *iq = oct->iq[q]; + struct octep_oq *oq = oct->oq[q]; + + tx_packets += iq->stats.instr_completed; + tx_bytes += iq->stats.bytes_sent; + tx_busy_errors += iq->stats.tx_busy; + + rx_packets += oq->stats.packets; + rx_bytes += oq->stats.bytes; + rx_alloc_errors += oq->stats.alloc_failures; + } + i = 0; + data[i++] = rx_packets; + data[i++] = tx_packets; + data[i++] = rx_bytes; + data[i++] = tx_bytes; + data[i++] = rx_alloc_errors; + data[i++] = tx_busy_errors; + data[i++] = iface_rx_stats->dropped_pkts_fifo_full + + iface_rx_stats->err_pkts; + data[i++] = iface_tx_stats->xscol + + iface_tx_stats->xsdef; + data[i++] = iface_tx_stats->pkts; + data[i++] = iface_tx_stats->octs; + data[i++] = iface_tx_stats->bcst; + data[i++] = iface_tx_stats->mcst; + data[i++] = iface_tx_stats->undflw; + data[i++] = iface_tx_stats->ctl; + data[i++] = iface_tx_stats->hist_lt64; + data[i++] = iface_tx_stats->hist_eq64; + data[i++] = iface_tx_stats->hist_65to127; + data[i++] = iface_tx_stats->hist_128to255; + data[i++] = iface_tx_stats->hist_256to511; + data[i++] = iface_tx_stats->hist_512to1023; + data[i++] = iface_tx_stats->hist_1024to1518; + data[i++] = iface_tx_stats->hist_gt1518; + data[i++] = iface_rx_stats->pkts; + data[i++] = iface_rx_stats->octets; + data[i++] = iface_rx_stats->mcast_pkts; + data[i++] = iface_rx_stats->bcast_pkts; + data[i++] = iface_rx_stats->pause_pkts; + data[i++] = iface_rx_stats->pause_octets; + data[i++] = iface_rx_stats->dropped_pkts_fifo_full; + data[i++] = iface_rx_stats->dropped_octets_fifo_full; + data[i++] = iface_rx_stats->err_pkts; + + /* Per Tx Queue stats */ + for (q = 0; q < oct->num_iqs; q++) { + struct octep_iq *iq = oct->iq[q]; + + data[i++] = iq->stats.instr_posted; + data[i++] = iq->stats.instr_completed; + data[i++] = iq->stats.bytes_sent; + data[i++] = iq->stats.tx_busy; + } + + /* Per Rx Queue stats */ + for (q = 0; q < oct->num_oqs; q++) { + struct octep_oq *oq = oct->oq[q]; + + data[i++] = oq->stats.packets; + data[i++] = oq->stats.bytes; + data[i++] = oq->stats.alloc_failures; + } +} + +#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \ +{ \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \ +} + +static int octep_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info *link_info; + u32 advertised_modes, supported_modes; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + octep_get_link_info(oct); + + advertised_modes = oct->link_info.advertised_modes; + supported_modes = oct->link_info.supported_modes; + link_info = &oct->link_info; + + OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported); + OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising); + + if (link_info->autoneg) { + if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED) + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + if (link_info->pause) { + if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED) + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED) + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + } + + cmd->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + if (netif_carrier_ok(netdev)) { + cmd->base.speed = link_info->speed; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + return 0; +} + +static int octep_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info link_info_new; + struct octep_iface_link_info *link_info; + u64 advertised = 0; + u8 autoneg = 0; + int err; + + link_info = &oct->link_info; + memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info)); + + /* Only Full duplex is supported; + * Assume full duplex when duplex is unknown. + */ + if (cmd->base.duplex != DUPLEX_FULL && + cmd->base.duplex != DUPLEX_UNKNOWN) + return -EOPNOTSUPP; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)) + return -EOPNOTSUPP; + autoneg = 1; + } + + if (!bitmap_subset(cmd->link_modes.advertising, + cmd->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseR_FEC)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseLR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseCR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseKR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseLR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseSR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseCR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseKR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseSR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseLR_ER_FR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseCR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseKR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseLR4_ER4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseSR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4); + + if (advertised == link_info->advertised_modes && + cmd->base.speed == link_info->speed && + cmd->base.autoneg == link_info->autoneg) + return 0; + + link_info_new.advertised_modes = advertised; + link_info_new.speed = cmd->base.speed; + link_info_new.autoneg = autoneg; + + err = octep_set_link_info(oct, &link_info_new); + if (err) + return err; + + memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info)); + return 0; +} + +static const struct ethtool_ops octep_ethtool_ops = { + .get_drvinfo = octep_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = octep_get_strings, + .get_sset_count = octep_get_sset_count, + .get_ethtool_stats = octep_get_ethtool_stats, + .get_link_ksettings = octep_get_link_ksettings, + .set_link_ksettings = octep_set_link_ksettings, +}; + +void octep_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &octep_ethtool_ops; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 2f6579d7c945..5d39c857ea41 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1059,6 +1059,7 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); netdev->netdev_ops = &octep_netdev_ops; + octep_set_ethtool_ops(netdev); netif_carrier_off(netdev); netdev->hw_features = NETIF_F_SG; -- cgit v1.2.3 From 9386ebccfc599de5578a278ffb16d90cc696969a Mon Sep 17 00:00:00 2001 From: Dylan Muller Date: Tue, 12 Apr 2022 17:26:00 +0200 Subject: nfp: update nfp_X logging definitions Previously it was not possible to determine which code path was responsible for generating a certain message after a call to the nfp_X messaging definitions for cases of duplicate strings. We therefore modify nfp_err, nfp_warn, nfp_info, nfp_dbg and nfp_printk to print the corresponding file and line number where the nfp_X definition is used. Signed-off-by: Dylan Muller Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h | 26 +++++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 3d379e937184..ddb34bfb9bef 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -13,22 +13,36 @@ #include #include #include +#include #ifndef NFP_SUBSYS #define NFP_SUBSYS "nfp" #endif -#define nfp_err(cpp, fmt, args...) \ +#define string_format(x) __FILE__ ":" __stringify(__LINE__) ": " x + +#define __nfp_err(cpp, fmt, args...) \ dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) -#define nfp_warn(cpp, fmt, args...) \ +#define __nfp_warn(cpp, fmt, args...) \ dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) -#define nfp_info(cpp, fmt, args...) \ +#define __nfp_info(cpp, fmt, args...) \ dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) -#define nfp_dbg(cpp, fmt, args...) \ +#define __nfp_dbg(cpp, fmt, args...) \ dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define __nfp_printk(level, cpp, fmt, args...) \ + dev_printk(level, nfp_cpp_device(cpp)->parent, \ + NFP_SUBSYS ": " fmt, ## args) + +#define nfp_err(cpp, fmt, args...) \ + __nfp_err(cpp, string_format(fmt), ## args) +#define nfp_warn(cpp, fmt, args...) \ + __nfp_warn(cpp, string_format(fmt), ## args) +#define nfp_info(cpp, fmt, args...) \ + __nfp_info(cpp, string_format(fmt), ## args) +#define nfp_dbg(cpp, fmt, args...) \ + __nfp_dbg(cpp, string_format(fmt), ## args) #define nfp_printk(level, cpp, fmt, args...) \ - dev_printk(level, nfp_cpp_device(cpp)->parent, \ - NFP_SUBSYS ": " fmt, ## args) + __nfp_printk(level, cpp, string_format(fmt), ## args) #define PCI_64BIT_BAR_COUNT 3 -- cgit v1.2.3 From 64b97df995f0c943be469a019d6117c89e2131bc Mon Sep 17 00:00:00 2001 From: Lech Perczak Date: Wed, 13 Apr 2022 03:44:14 +0200 Subject: cdc_ether: export usbnet_cdc_zte_rx_fixup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit bfe9b9d2df66 ("cdc_ether: Improve ZTE MF823/831/910 handling") introduces a workaround for certain ZTE modems reporting invalid MAC addresses over CDC-ECM. The same issue was present on their RNDIS interface,which was fixed in commit a5a18bdf7453 ("rndis_host: Set valid random MAC on buggy devices"). However, internal modem of ZTE MF286R router, on its RNDIS interface, also exhibits a second issue fixed already in CDC-ECM, of the device not respecting configured random MAC address. In order to share the fixup for this with rndis_host driver, export the workaround function, which will be re-used in the following commit in rndis_host. Cc: Kristian Evensen Cc: Bjørn Mork Cc: Oliver Neukum Signed-off-by: Lech Perczak Signed-off-by: Paolo Abeni --- drivers/net/usb/cdc_ether.c | 3 ++- include/linux/usb/usbnet.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 9b4dfa3001d6..2de09ad5bac0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -479,7 +479,7 @@ static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf) * device MAC address has been updated). Always set MAC address to that of the * device. */ -static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02)) return 1; @@ -489,6 +489,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return 1; } +EXPORT_SYMBOL_GPL(usbnet_cdc_zte_rx_fixup); /* Ensure correct link state * diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 8336e86ce606..1b4d72d5e891 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -214,6 +214,7 @@ extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf) extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *); extern void usbnet_cdc_status(struct usbnet *, struct urb *); +extern int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb); /* CDC and RNDIS support the same host-chosen packet filters for IN transfers */ #define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \ -- cgit v1.2.3 From 36e747972d8b4c09e6e3275e31a3acba46e2c4d2 Mon Sep 17 00:00:00 2001 From: Lech Perczak Date: Wed, 13 Apr 2022 03:44:15 +0200 Subject: rndis_host: enable the bogus MAC fixup for ZTE devices from cdc_ether MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Certain ZTE modems, namely: MF823. MF831, MF910, built-in modem from MF286R, expose both CDC-ECM and RNDIS network interfaces. They have a trait of ignoring the locally-administered MAC address configured on the interface both in CDC-ECM and RNDIS part, and this leads to dropping of incoming traffic by the host. However, the workaround was only present in CDC-ECM, and MF286R explicitly requires it in RNDIS mode. Re-use the workaround in rndis_host as well, to fix operation of MF286R module, some versions of which expose only the RNDIS interface. Do so by introducing new flag, RNDIS_DRIVER_DATA_DST_MAC_FIXUP, and testing for it in rndis_rx_fixup. This is required, as RNDIS uses frame batching, and all of the packets inside the batch need the fixup. This might introduce a performance penalty, because test is done for every returned Ethernet frame. Apply the workaround to both "flavors" of RNDIS interfaces, as older ZTE modems, like MF823 found in the wild, report the USB_CLASS_COMM class interfaces, while MF286R reports USB_CLASS_WIRELESS_CONTROLLER. Suggested-by: Bjørn Mork Cc: Kristian Evensen Cc: Oliver Neukum Signed-off-by: Lech Perczak Signed-off-by: Paolo Abeni --- drivers/net/usb/rndis_host.c | 32 ++++++++++++++++++++++++++++++++ include/linux/usb/rndis_host.h | 1 + 2 files changed, 33 insertions(+) (limited to 'drivers') diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 247f58cb0f84..7a9ece2de2c5 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -485,10 +485,14 @@ EXPORT_SYMBOL_GPL(rndis_unbind); */ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + bool dst_mac_fixup; + /* This check is no longer done by usbnet */ if (skb->len < dev->net->hard_header_len) return 0; + dst_mac_fixup = !!(dev->driver_info->data & RNDIS_DRIVER_DATA_DST_MAC_FIXUP); + /* peripheral may have batched packets to us... */ while (likely(skb->len)) { struct rndis_data_hdr *hdr = (void *)skb->data; @@ -523,10 +527,17 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) break; skb_pull(skb, msg_len - sizeof *hdr); skb_trim(skb2, data_len); + + if (unlikely(dst_mac_fixup)) + usbnet_cdc_zte_rx_fixup(dev, skb2); + usbnet_skb_return(dev, skb2); } /* caller will usbnet_skb_return the remaining packet */ + if (unlikely(dst_mac_fixup)) + usbnet_cdc_zte_rx_fixup(dev, skb); + return 1; } EXPORT_SYMBOL_GPL(rndis_rx_fixup); @@ -600,6 +611,17 @@ static const struct driver_info rndis_poll_status_info = { .tx_fixup = rndis_tx_fixup, }; +static const struct driver_info zte_rndis_info = { + .description = "ZTE RNDIS device", + .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, + .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP, + .bind = rndis_bind, + .unbind = rndis_unbind, + .status = rndis_status, + .rx_fixup = rndis_rx_fixup, + .tx_fixup = rndis_tx_fixup, +}; + /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { @@ -613,6 +635,16 @@ static const struct usb_device_id products [] = { USB_VENDOR_AND_INTERFACE_INFO(0x238b, USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long)&rndis_info, +}, { + /* ZTE WWAN modules */ + USB_VENDOR_AND_INTERFACE_INFO(0x19d2, + USB_CLASS_WIRELESS_CONTROLLER, 1, 3), + .driver_info = (unsigned long)&zte_rndis_info, +}, { + /* ZTE WWAN modules, ACM flavour */ + USB_VENDOR_AND_INTERFACE_INFO(0x19d2, + USB_CLASS_COMM, 2 /* ACM */, 0x0ff), + .driver_info = (unsigned long)&zte_rndis_info, }, { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h index 809bccd08455..cc42db51bbba 100644 --- a/include/linux/usb/rndis_host.h +++ b/include/linux/usb/rndis_host.h @@ -197,6 +197,7 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */ /* Flags for driver_info::data */ #define RNDIS_DRIVER_DATA_POLL_STATUS 1 /* poll status before control */ +#define RNDIS_DRIVER_DATA_DST_MAC_FIXUP 2 /* device ignores configured MAC address */ extern void rndis_status(struct usbnet *dev, struct urb *urb); extern int -- cgit v1.2.3 From 171cfae6b78c3b73d8cd3d405a63e38f15f363f2 Mon Sep 17 00:00:00 2001 From: Lech Perczak Date: Wed, 13 Apr 2022 03:44:16 +0200 Subject: rndis_host: limit scope of bogus MAC address detection to ZTE devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reporting of bogus MAC addresses and ignoring configuration of new destination address wasn't observed outside of a range of ZTE devices, among which this seems to be the common bug. Align rndis_host driver with implementation found in cdc_ether, which also limits this workaround to ZTE devices. Suggested-by: Bjørn Mork Cc: Kristian Evensen Cc: Oliver Neukum Signed-off-by: Lech Perczak Signed-off-by: Paolo Abeni --- drivers/net/usb/rndis_host.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 7a9ece2de2c5..4e70dec30e5a 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -418,10 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) goto halt_fail_and_release; } - if (bp[0] & 0x02) - eth_hw_addr_random(net); - else - eth_hw_addr_set(net, bp); + eth_hw_addr_set(net, bp); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); @@ -463,6 +460,16 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf) return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS); } +static int zte_rndis_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int status = rndis_bind(dev, intf); + + if (!status && (dev->net->dev_addr[0] & 0x02)) + eth_hw_addr_random(dev->net); + + return status; +} + void rndis_unbind(struct usbnet *dev, struct usb_interface *intf) { struct rndis_halt *halt; @@ -615,7 +622,7 @@ static const struct driver_info zte_rndis_info = { .description = "ZTE RNDIS device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, .data = RNDIS_DRIVER_DATA_DST_MAC_FIXUP, - .bind = rndis_bind, + .bind = zte_rndis_bind, .unbind = rndis_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, -- cgit v1.2.3 From b3fc79225f055af7ef48b47a90752c31cc062e6e Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 12 Apr 2022 18:31:59 +0200 Subject: net: mvneta: add support for page_pool_get_stats Introduce support for the page_pool stats API into mvneta driver. Report page_pool stats through ethtool. Reviewed-by: Andrew Lunn Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/Kconfig | 1 + drivers/net/ethernet/marvell/mvneta.c | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 4cb55724001b..f58a1c0144ba 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -62,6 +62,7 @@ config MVNETA select MVMDIO select PHYLINK select PAGE_POOL + select PAGE_POOL_STATS help This driver supports the network interface units in the Marvell ARMADA XP, ARMADA 370, ARMADA 38x and diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 934f6dd90992..f6a54c7f0c69 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4735,6 +4735,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) memcpy(data + i * ETH_GSTRING_LEN, mvneta_statistics[i].name, ETH_GSTRING_LEN); + + data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); + page_pool_ethtool_stats_get_strings(data); } } @@ -4847,6 +4850,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) } } +static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) +{ + struct page_pool_stats stats = {}; + int i; + + for (i = 0; i < rxq_number; i++) + page_pool_get_stats(pp->rxqs[i].page_pool, &stats); + + page_pool_ethtool_stats_get(data, &stats); +} + static void mvneta_ethtool_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -4857,12 +4871,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev, for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) *data++ = pp->ethtool_stats[i]; + + mvneta_ethtool_pp_stats(pp, data); } static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) { if (sset == ETH_SS_STATS) - return ARRAY_SIZE(mvneta_statistics); + return ARRAY_SIZE(mvneta_statistics) + + page_pool_ethtool_stats_get_count(); + return -EOPNOTSUPP; } -- cgit v1.2.3 From f623f83ae77396a5eda25451335295c0141c8c46 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 13 Apr 2022 10:44:40 +0200 Subject: geneve: avoid indirect calls in GRO path, when possible In the most common setups, the geneve tunnels use an inner ethernet encapsulation. In the GRO path, when such condition is true, we can call directly the relevant GRO helper and avoid a few indirect calls. Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- drivers/net/geneve.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 7db6c135ac6c..2495a5719e1c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -533,14 +533,16 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk, } } + skb_gro_pull(skb, gh_len); + skb_gro_postpull_rcsum(skb, gh, gh_len); type = gh->proto_type; + if (likely(type == htons(ETH_P_TEB))) + return call_gro_receive(eth_gro_receive, head, skb); ptype = gro_find_receive_by_type(type); if (!ptype) goto out; - skb_gro_pull(skb, gh_len); - skb_gro_postpull_rcsum(skb, gh, gh_len); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0; @@ -563,6 +565,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, gh_len = geneve_hlen(gh); type = gh->proto_type; + /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */ + if (likely(type == htons(ETH_P_TEB))) + return eth_gro_complete(skb, nhoff + gh_len); + ptype = gro_find_complete_by_type(type); if (ptype) err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); -- cgit v1.2.3 From c557a9ae4960a8aaede722833e567897f05aa9ef Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 13 Apr 2022 09:35:29 +0000 Subject: net: ethernet: ti: cpsw_new: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw_new.c | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index dfa0a9cf9d89..0f31cb4168bb 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -449,11 +449,9 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual @@ -831,11 +829,9 @@ static int cpsw_ndo_open(struct net_device *ndev) dev_info(priv->dev, "starting ndev. mode: %s\n", cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac"); - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } /* Notify the stack of the actual queue counts. */ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); @@ -987,11 +983,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } vid = cpsw->slaves[slave_no].port_vlan; flags = ALE_VLAN | ALE_SECURE; @@ -1026,11 +1020,9 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } /* reset the return code as pm_runtime_get_sync() can return * non zero values as well. @@ -1920,9 +1912,8 @@ static int cpsw_probe(struct platform_device *pdev) /* Need to enable clocks with runtime PM api to access module * registers */ - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) { - pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } @@ -2047,11 +2038,9 @@ static int cpsw_remove(struct platform_device *pdev) struct cpsw_common *cpsw = platform_get_drvdata(pdev); int ret; - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) return ret; - } cpsw_unregister_notifiers(cpsw); cpsw_unregister_devlink(cpsw); -- cgit v1.2.3 From 85648865bb95e3c2b10d22687fc5998cb5ae37cc Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 13 Apr 2022 09:38:01 +0000 Subject: net: stmmac: stmmac_main: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4a4b3651ab3e..580cc5d3c4fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3643,11 +3643,9 @@ static int stmmac_open(struct net_device *dev) u32 chan; int ret; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI && @@ -5886,11 +5884,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr) struct stmmac_priv *priv = netdev_priv(ndev); int ret = 0; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } ret = eth_mac_addr(ndev, addr); if (ret) @@ -6220,11 +6216,9 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi bool is_double = false; int ret; - ret = pm_runtime_get_sync(priv->device); - if (ret < 0) { - pm_runtime_put_noidle(priv->device); + ret = pm_runtime_resume_and_get(priv->device); + if (ret < 0) return ret; - } if (be16_to_cpu(proto) == ETH_P_8021AD) is_double = true; -- cgit v1.2.3 From be52d266d2930f8963e49de68c59be3ca431b98d Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 13 Apr 2022 09:38:36 +0000 Subject: net: ethernet: ti: cpsw_priv: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw_priv.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 4dbd327c6a4d..887285c57db8 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -755,11 +755,9 @@ int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) return -EINVAL; } - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); pm_runtime_put(cpsw->dev); @@ -971,11 +969,9 @@ static int cpsw_set_cbs(struct net_device *ndev, return -1; } - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } bw = qopt->enable ? qopt->idleslope : 0; ret = cpsw_set_fifo_rlimit(priv, fifo, bw); @@ -1009,11 +1005,9 @@ static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) if (mqprio->mode != TC_MQPRIO_MODE_DCB) return -EINVAL; - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); + ret = pm_runtime_resume_and_get(cpsw->dev); + if (ret < 0) return ret; - } if (num_tc) { for (i = 0; i < 8; i++) { -- cgit v1.2.3 From 349454526f5fe6488083c35306b04acd0d065bbb Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:25 +0300 Subject: mlxsw: core: Extend interfaces for cable info access with slot argument Extend all cable info APIs with 'slot_index' argument. For main board, slot will always be set to zero and these APIs will work as before. If reading cable information is required from cages located on line cards, slot should be set to the physical slot number, where line card is located in modular systems. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 198 ++++++++++++--------- drivers/net/ethernet/mellanox/mlxsw/core_env.h | 43 +++-- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 10 +- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 4 +- drivers/net/ethernet/mellanox/mlxsw/minimal.c | 24 +-- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 21 +-- .../net/ethernet/mellanox/mlxsw/spectrum_ethtool.c | 18 +- 7 files changed, 184 insertions(+), 134 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index f1bb243dfb8c..95fbfb1ca421 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -28,7 +28,8 @@ struct mlxsw_env { struct mlxsw_env_module_info module_info[]; }; -static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module) +static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, + u8 slot_index, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(core); int err; @@ -44,33 +45,35 @@ static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module) return err; } -static int mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 module) +static int mlxsw_env_validate_module_type(struct mlxsw_core *core, + u8 slot_index, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(core); int err; mutex_lock(&mlxsw_env->module_info_lock); - err = __mlxsw_env_validate_module_type(core, module); + err = __mlxsw_env_validate_module_type(core, slot_index, module); mutex_unlock(&mlxsw_env->module_info_lock); return err; } static int -mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp, - bool *cmis) +mlxsw_env_validate_cable_ident(struct mlxsw_core *core, u8 slot_index, int id, + bool *qsfp, bool *cmis) { char mcia_pl[MLXSW_REG_MCIA_LEN]; char *eeprom_tmp; u8 ident; int err; - err = mlxsw_env_validate_module_type(core, id); + err = mlxsw_env_validate_module_type(core, slot_index, id); if (err) return err; - mlxsw_reg_mcia_pack(mcia_pl, 0, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, - 1, MLXSW_REG_MCIA_I2C_ADDR_LOW); + mlxsw_reg_mcia_pack(mcia_pl, slot_index, id, 0, + MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1, + MLXSW_REG_MCIA_I2C_ADDR_LOW); err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); if (err) return err; @@ -99,8 +102,8 @@ mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, bool *qsfp, } static int -mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, - u16 offset, u16 size, void *data, +mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, u16 offset, u16 size, void *data, bool qsfp, unsigned int *p_read_size) { char mcia_pl[MLXSW_REG_MCIA_LEN]; @@ -145,7 +148,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, } } - mlxsw_reg_mcia_pack(mcia_pl, 0, module, 0, page, offset, size, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, offset, size, i2c_addr); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl); @@ -163,8 +166,9 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, return 0; } -int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, - int off, int *temp) +int +mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, u8 slot_index, + int module, int off, int *temp) { unsigned int module_temp, module_crit, module_emerg; union { @@ -178,8 +182,9 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, int page; int err; - mlxsw_reg_mtmp_pack(mtmp_pl, 0, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, - false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, + MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, + false); err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl); if (err) return err; @@ -208,7 +213,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, */ /* Validate module identifier value. */ - err = mlxsw_env_validate_cable_ident(core, module, &qsfp, &cmis); + err = mlxsw_env_validate_cable_ident(core, slot_index, module, &qsfp, + &cmis); if (err) return err; @@ -220,12 +226,12 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM; else page = MLXSW_REG_MCIA_TH_PAGE_NUM; - mlxsw_reg_mcia_pack(mcia_pl, 0, module, 0, page, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page, MLXSW_REG_MCIA_TH_PAGE_OFF + off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_LOW); } else { - mlxsw_reg_mcia_pack(mcia_pl, 0, module, 0, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, MLXSW_REG_MCIA_PAGE0_LO, off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_HIGH); @@ -243,8 +249,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, } int mlxsw_env_get_module_info(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, int module, - struct ethtool_modinfo *modinfo) + struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, struct ethtool_modinfo *modinfo) { u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE]; u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE; @@ -252,15 +258,16 @@ int mlxsw_env_get_module_info(struct net_device *netdev, unsigned int read_size; int err; - err = mlxsw_env_validate_module_type(mlxsw_core, module); + err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { netdev_err(netdev, "EEPROM is not equipped on port module type"); return err; } - err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset, - module_info, false, &read_size); + err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, module, 0, + offset, module_info, false, + &read_size); if (err) return err; @@ -289,9 +296,10 @@ int mlxsw_env_get_module_info(struct net_device *netdev, break; case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: /* Verify if transceiver provides diagnostic monitoring page */ - err = mlxsw_env_query_module_eeprom(mlxsw_core, module, - SFP_DIAGMON, 1, &diag_mon, - false, &read_size); + err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, + module, SFP_DIAGMON, 1, + &diag_mon, false, + &read_size); if (err) return err; @@ -330,8 +338,9 @@ int mlxsw_env_get_module_info(struct net_device *netdev, EXPORT_SYMBOL(mlxsw_env_get_module_info); int mlxsw_env_get_module_eeprom(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, int module, - struct ethtool_eeprom *ee, u8 *data) + struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, struct ethtool_eeprom *ee, + u8 *data) { int offset = ee->offset; unsigned int read_size; @@ -344,12 +353,14 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, memset(data, 0, ee->len); /* Validate module identifier value. */ - err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp, &cmis); + err = mlxsw_env_validate_cable_ident(mlxsw_core, slot_index, module, + &qsfp, &cmis); if (err) return err; while (i < ee->len) { - err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset, + err = mlxsw_env_query_module_eeprom(mlxsw_core, slot_index, + module, offset, ee->len - i, data + i, qsfp, &read_size); if (err) { @@ -395,7 +406,8 @@ static int mlxsw_env_mcia_status_process(const char *mcia_pl, } int -mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack) { @@ -403,7 +415,7 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, u16 device_addr; int err; - err = mlxsw_env_validate_module_type(mlxsw_core, module); + err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type"); return err; @@ -420,7 +432,7 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, size = min_t(u8, page->length - bytes_read, MLXSW_REG_MCIA_EEPROM_SIZE); - mlxsw_reg_mcia_pack(mcia_pl, 0, module, 0, page->page, + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, 0, page->page, device_addr + bytes_read, size, page->i2c_address); mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank); @@ -444,18 +456,20 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, } EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page); -static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module) +static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, 0, module); + mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module); mlxsw_reg_pmaos_rst_set(pmaos_pl, true); return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); } int mlxsw_env_reset_module(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, u8 module, u32 *flags) + struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, u32 *flags) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); u32 req = *flags; @@ -467,7 +481,7 @@ int mlxsw_env_reset_module(struct net_device *netdev, mutex_lock(&mlxsw_env->module_info_lock); - err = __mlxsw_env_validate_module_type(mlxsw_core, module); + err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { netdev_err(netdev, "Reset module is not supported on port module type\n"); goto out; @@ -486,7 +500,7 @@ int mlxsw_env_reset_module(struct net_device *netdev, goto out; } - err = mlxsw_env_module_reset(mlxsw_core, module); + err = mlxsw_env_module_reset(mlxsw_core, slot_index, module); if (err) { netdev_err(netdev, "Failed to reset module\n"); goto out; @@ -501,7 +515,8 @@ out: EXPORT_SYMBOL(mlxsw_env_reset_module); int -mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, struct ethtool_module_power_mode_params *params, struct netlink_ext_ack *extack) { @@ -512,7 +527,7 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, mutex_lock(&mlxsw_env->module_info_lock); - err = __mlxsw_env_validate_module_type(mlxsw_core, module); + err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { NL_SET_ERR_MSG_MOD(extack, "Power mode is not supported on port module type"); goto out; @@ -520,7 +535,7 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, params->policy = mlxsw_env->module_info[module].power_mode_policy; - mlxsw_reg_mcion_pack(mcion_pl, 0, module); + mlxsw_reg_mcion_pack(mcion_pl, slot_index, module); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode"); @@ -543,12 +558,12 @@ out: EXPORT_SYMBOL(mlxsw_env_get_module_power_mode); static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core, - u8 module, bool enable) + u8 slot_index, u8 module, bool enable) { enum mlxsw_reg_pmaos_admin_status admin_status; char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, 0, module); + mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, module); admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED : MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED; mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status); @@ -558,12 +573,13 @@ static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core, } static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core, - u8 module, bool low_power) + u8 slot_index, u8 module, + bool low_power) { u16 eeprom_override_mask, eeprom_override; char pmmp_pl[MLXSW_REG_PMMP_LEN]; - mlxsw_reg_pmmp_pack(pmmp_pl, 0, module); + mlxsw_reg_pmmp_pack(pmmp_pl, slot_index, module); mlxsw_reg_pmmp_sticky_set(pmmp_pl, true); /* Mask all the bits except low power mode. */ eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK; @@ -576,24 +592,26 @@ static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core, } static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, - u8 module, bool low_power, + u8 slot_index, u8 module, + bool low_power, struct netlink_ext_ack *extack) { int err; - err = mlxsw_env_module_enable_set(mlxsw_core, module, false); + err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, false); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to disable module"); return err; } - err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power); + err = mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module, + low_power); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode"); goto err_module_low_power_set; } - err = mlxsw_env_module_enable_set(mlxsw_core, module, true); + err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to enable module"); goto err_module_enable_set; @@ -602,14 +620,16 @@ static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, return 0; err_module_enable_set: - mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power); + mlxsw_env_module_low_power_set(mlxsw_core, slot_index, module, + !low_power); err_module_low_power_set: - mlxsw_env_module_enable_set(mlxsw_core, module, true); + mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, true); return err; } int -mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, enum ethtool_module_power_mode_policy policy, struct netlink_ext_ack *extack) { @@ -625,7 +645,7 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, mutex_lock(&mlxsw_env->module_info_lock); - err = __mlxsw_env_validate_module_type(mlxsw_core, module); + err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { NL_SET_ERR_MSG_MOD(extack, "Power mode set is not supported on port module type"); @@ -640,8 +660,8 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, goto out_set_policy; low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO; - err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power, - extack); + err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, + low_power, extack); if (err) goto out; @@ -654,14 +674,14 @@ out: EXPORT_SYMBOL(mlxsw_env_set_module_power_mode); static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, - u8 module, + u8 slot_index, u8 module, bool *p_has_temp_sensor) { char mtbr_pl[MLXSW_REG_MTBR_LEN]; u16 temp; int err; - mlxsw_reg_mtbr_pack(mtbr_pl, 0, + mlxsw_reg_mtbr_pack(mtbr_pl, slot_index, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl); if (err) @@ -682,13 +702,15 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, return 0; } -static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, - u16 sensor_index, bool enable) +static int +mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, u8 slot_index, + u16 sensor_index, bool enable) { char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0}; enum mlxsw_reg_mtmp_tee tee; int err, threshold_hi; + mlxsw_reg_mtmp_slot_index_set(mtmp_pl, slot_index); mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl); if (err) @@ -696,6 +718,7 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, if (enable) { err = mlxsw_env_module_temp_thresholds_get(mlxsw_core, + slot_index, sensor_index - MLXSW_REG_MTMP_MODULE_INDEX_MIN, SFP_TEMP_HIGH_WARN, @@ -722,14 +745,15 @@ static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl); } -static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core) +static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core, + u8 slot_index) { int i, err, sensor_index; bool has_temp_sensor; for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) { - err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i, - &has_temp_sensor); + err = mlxsw_env_module_has_temp_sensor(mlxsw_core, slot_index, + i, &has_temp_sensor); if (err) return err; @@ -737,7 +761,8 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core) continue; sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN; - err = mlxsw_env_temp_event_set(mlxsw_core, sensor_index, true); + err = mlxsw_env_temp_event_set(mlxsw_core, slot_index, + sensor_index, true); if (err) return err; } @@ -838,6 +863,7 @@ static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env) struct mlxsw_env_module_plug_unplug_event { struct mlxsw_env *mlxsw_env; + u8 slot_index; u8 module; struct work_struct work; }; @@ -858,7 +884,9 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work) mlxsw_env->module_info[event->module].is_overheat = false; mutex_unlock(&mlxsw_env->module_info_lock); - err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module, + err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, + event->slot_index, + event->module, &has_temp_sensor); /* Do not disable events on modules without sensors or faulty sensors * because FW returns errors. @@ -870,7 +898,8 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work) goto out; sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN; - mlxsw_env_temp_event_set(mlxsw_env->core, sensor_index, true); + mlxsw_env_temp_event_set(mlxsw_env->core, event->slot_index, + sensor_index, true); out: kfree(event); @@ -897,6 +926,7 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl, return; event->mlxsw_env = mlxsw_env; + event->slot_index = 0; event->module = module; INIT_WORK(&event->work, mlxsw_env_pmpe_event_work); mlxsw_core_schedule_work(&event->work); @@ -924,14 +954,15 @@ mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env) } static int -mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core) +mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core, + u8 slot_index) { int i, err; for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, 0, i); + mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, i); mlxsw_reg_pmaos_e_set(pmaos_pl, MLXSW_REG_PMAOS_E_GENERATE_EVENT); mlxsw_reg_pmaos_ee_set(pmaos_pl, true); @@ -943,8 +974,8 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core) } int -mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, - u64 *p_counter) +mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, u64 *p_counter) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); @@ -956,7 +987,8 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, } EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get); -void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module) +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); @@ -966,7 +998,8 @@ void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module) } EXPORT_SYMBOL(mlxsw_env_module_port_map); -void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module) +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); @@ -976,7 +1009,8 @@ void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module) } EXPORT_SYMBOL(mlxsw_env_module_port_unmap); -int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module) +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int err = 0; @@ -993,8 +1027,8 @@ int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module) /* Transition to high power mode following first port using the module * being put administratively up. */ - err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false, - NULL); + err = __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, + false, NULL); if (err) goto out_unlock; @@ -1006,7 +1040,8 @@ out_unlock: } EXPORT_SYMBOL(mlxsw_env_module_port_up); -void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module) +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); @@ -1024,7 +1059,8 @@ void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module) /* Transition to low power mode following last port using the module * being put administratively down. */ - __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL); + __mlxsw_env_set_module_power_mode(mlxsw_core, slot_index, module, true, + NULL); out_unlock: mutex_unlock(&mlxsw_env->module_info_lock); @@ -1032,7 +1068,7 @@ out_unlock: EXPORT_SYMBOL(mlxsw_env_module_port_down); static int -mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core) +mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int i; @@ -1041,7 +1077,7 @@ mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core) char pmtm_pl[MLXSW_REG_PMTM_LEN]; int err; - mlxsw_reg_pmtm_pack(pmtm_pl, 0, i); + mlxsw_reg_pmtm_pack(pmtm_pl, slot_index, i); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl); if (err) return err; @@ -1091,15 +1127,15 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) if (err) goto err_module_plug_event_register; - err = mlxsw_env_module_oper_state_event_enable(mlxsw_core); + err = mlxsw_env_module_oper_state_event_enable(mlxsw_core, 0); if (err) goto err_oper_state_event_enable; - err = mlxsw_env_module_temp_event_enable(mlxsw_core); + err = mlxsw_env_module_temp_event_enable(mlxsw_core, 0); if (err) goto err_temp_event_enable; - err = mlxsw_env_module_type_set(mlxsw_core); + err = mlxsw_env_module_type_set(mlxsw_core, 0); if (err) goto err_type_set; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index ec6564e5d2ee..6b494c64a4d7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -9,47 +9,56 @@ struct ethtool_modinfo; struct ethtool_eeprom; -int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, - int off, int *temp); +int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, + u8 slot_index, int module, int off, + int *temp); int mlxsw_env_get_module_info(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, int module, - struct ethtool_modinfo *modinfo); + struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, struct ethtool_modinfo *modinfo); int mlxsw_env_get_module_eeprom(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, int module, - struct ethtool_eeprom *ee, u8 *data); + struct mlxsw_core *mlxsw_core, u8 slot_index, + int module, struct ethtool_eeprom *ee, + u8 *data); int -mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); int mlxsw_env_reset_module(struct net_device *netdev, - struct mlxsw_core *mlxsw_core, u8 module, - u32 *flags); + struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, u32 *flags); int -mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, struct ethtool_module_power_mode_params *params, struct netlink_ext_ack *extack); int -mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module, +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, enum ethtool_module_power_mode_policy policy, struct netlink_ext_ack *extack); int -mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, - u64 *p_counter); +mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, u64 *p_counter); -void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module); +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module); -void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module); +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module); -int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module); +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module); -void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module); +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module); int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env); void mlxsw_env_fini(struct mlxsw_env *env); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 2bc4c4556895..5df54a5bf292 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -311,8 +311,9 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev, int err; module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, - SFP_TEMP_HIGH_WARN, p_temp); + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, 0, + module, SFP_TEMP_HIGH_WARN, + p_temp); if (err) { dev_err(dev, "Failed to query module temperature thresholds\n"); return err; @@ -345,8 +346,9 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev, int err; module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, - SFP_TEMP_HIGH_ALARM, p_temp); + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, 0, + module, SFP_TEMP_HIGH_ALARM, + p_temp); if (err) { dev_err(dev, "Failed to query module temperature thresholds\n"); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index adb2820430b1..d64af27e5bac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -150,13 +150,13 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, * EEPROM if we got valid thresholds from MTMP. */ if (!emerg_temp || !crit_temp) { - err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + err = mlxsw_env_module_temp_thresholds_get(core, 0, tz->module, SFP_TEMP_HIGH_WARN, &crit_temp); if (err) return err; - err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + err = mlxsw_env_module_temp_thresholds_get(core, 0, tz->module, SFP_TEMP_HIGH_ALARM, &emerg_temp); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 3bc012dafd08..eb906b73b4e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -59,7 +59,8 @@ static int mlxsw_m_port_open(struct net_device *dev) struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; - return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module); + return mlxsw_env_module_port_up(mlxsw_m->core, 0, + mlxsw_m_port->module); } static int mlxsw_m_port_stop(struct net_device *dev) @@ -67,7 +68,7 @@ static int mlxsw_m_port_stop(struct net_device *dev) struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; - mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module); + mlxsw_env_module_port_down(mlxsw_m->core, 0, mlxsw_m_port->module); return 0; } @@ -110,7 +111,7 @@ static int mlxsw_m_get_module_info(struct net_device *netdev, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_get_module_info(netdev, core, mlxsw_m_port->module, + return mlxsw_env_get_module_info(netdev, core, 0, mlxsw_m_port->module, modinfo); } @@ -121,8 +122,8 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_get_module_eeprom(netdev, core, mlxsw_m_port->module, - ee, data); + return mlxsw_env_get_module_eeprom(netdev, core, 0, + mlxsw_m_port->module, ee, data); } static int @@ -133,7 +134,8 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_get_module_eeprom_by_page(core, mlxsw_m_port->module, + return mlxsw_env_get_module_eeprom_by_page(core, 0, + mlxsw_m_port->module, page, extack); } @@ -142,7 +144,7 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags) struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module, + return mlxsw_env_reset_module(netdev, core, 0, mlxsw_m_port->module, flags); } @@ -154,7 +156,7 @@ mlxsw_m_get_module_power_mode(struct net_device *netdev, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module, + return mlxsw_env_get_module_power_mode(core, 0, mlxsw_m_port->module, params, extack); } @@ -166,7 +168,7 @@ mlxsw_m_set_module_power_mode(struct net_device *netdev, struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; - return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module, + return mlxsw_env_set_module_power_mode(core, 0, mlxsw_m_port->module, params->policy, extack); } @@ -311,7 +313,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port, if (WARN_ON_ONCE(module >= max_ports)) return -EINVAL; - mlxsw_env_module_port_map(mlxsw_m->core, module); + mlxsw_env_module_port_map(mlxsw_m->core, 0, module); mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports; return 0; @@ -320,7 +322,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port, static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module) { mlxsw_m->module_to_port[module] = -1; - mlxsw_env_module_port_unmap(mlxsw_m->core, module); + mlxsw_env_module_port_unmap(mlxsw_m->core, 0, module); } static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 8eb05090ffec..684910ca7cf4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -539,7 +539,7 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, char pmlp_pl[MLXSW_REG_PMLP_LEN]; int i, err; - mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module); + mlxsw_env_module_port_map(mlxsw_sp->core, 0, port_mapping->module); mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); @@ -554,19 +554,19 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, return 0; err_pmlp_write: - mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module); + mlxsw_env_module_port_unmap(mlxsw_sp->core, 0, port_mapping->module); return err; } static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port, - u8 module) + u8 slot_index, u8 module) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); - mlxsw_env_module_port_unmap(mlxsw_sp->core, module); + mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module); } static int mlxsw_sp_port_open(struct net_device *dev) @@ -575,7 +575,7 @@ static int mlxsw_sp_port_open(struct net_device *dev) struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - err = mlxsw_env_module_port_up(mlxsw_sp->core, + err = mlxsw_env_module_port_up(mlxsw_sp->core, 0, mlxsw_sp_port->mapping.module); if (err) return err; @@ -586,7 +586,7 @@ static int mlxsw_sp_port_open(struct net_device *dev) return 0; err_port_admin_status_set: - mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_env_module_port_down(mlxsw_sp->core, 0, mlxsw_sp_port->mapping.module); return err; } @@ -598,7 +598,7 @@ static int mlxsw_sp_port_stop(struct net_device *dev) netif_stop_queue(dev); mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); - mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_env_module_port_down(mlxsw_sp->core, 0, mlxsw_sp_port->mapping.module); return 0; } @@ -1449,7 +1449,7 @@ static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_po u64 overheat_counter; int err; - err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module, + err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, 0, module, &overheat_counter); if (err) return err; @@ -1775,7 +1775,8 @@ err_port_label_info_get: mlxsw_sp_port_swid_set(mlxsw_sp, local_port, MLXSW_PORT_SWID_DISABLED_PORT); err_port_swid_set: - mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module); + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 0, + port_mapping->module); return err; } @@ -1804,7 +1805,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) mlxsw_core_port_fini(mlxsw_sp->core, local_port); mlxsw_sp_port_swid_set(mlxsw_sp, local_port, MLXSW_PORT_SWID_DISABLED_PORT); - mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module); + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 0, module); } static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 8b5d7f83b9b0..f72c26ce0391 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -573,7 +573,7 @@ mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port u64 stats; int err; - err = mlxsw_env_module_overheat_counter_get(mlxsw_core, + err = mlxsw_env_module_overheat_counter_get(mlxsw_core, 0, port_mapping.module, &stats); if (err) @@ -1035,7 +1035,7 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - return mlxsw_env_get_module_info(netdev, mlxsw_sp->core, + return mlxsw_env_get_module_info(netdev, mlxsw_sp->core, 0, mlxsw_sp_port->mapping.module, modinfo); } @@ -1046,7 +1046,7 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, + return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 0, mlxsw_sp_port->mapping.module, ee, data); } @@ -1060,8 +1060,8 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, module, page, - extack); + return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, 0, module, + page, extack); } static int @@ -1204,7 +1204,7 @@ static int mlxsw_sp_reset(struct net_device *dev, u32 *flags) struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags); + return mlxsw_env_reset_module(dev, mlxsw_sp->core, 0, module, flags); } static int @@ -1216,8 +1216,8 @@ mlxsw_sp_get_module_power_mode(struct net_device *dev, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params, - extack); + return mlxsw_env_get_module_power_mode(mlxsw_sp->core, 0, module, + params, extack); } static int @@ -1229,7 +1229,7 @@ mlxsw_sp_set_module_power_mode(struct net_device *dev, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module, + return mlxsw_env_set_module_power_mode(mlxsw_sp->core, 0, module, params->policy, extack); } -- cgit v1.2.3 From e5b6a5bac8cc12790eccf69e01372e135f9e4af2 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:26 +0300 Subject: mlxsw: core: Extend port module data structures for line cards The port module core is tasked with module operations such as setting power mode policy and reset. The per-module information is currently stored in one large array suited for non-modular systems where only the main board is present (i.e., slot index 0). As a preparation for line cards support, allocate a per line card array according to the queried number of slots in the system. For each line card, allocate a module array according to the queried maximum number of modules per-slot. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 242 +++++++++++++++++-------- 1 file changed, 169 insertions(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 95fbfb1ca421..9adaa8978d68 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -21,20 +21,36 @@ struct mlxsw_env_module_info { enum mlxsw_reg_pmtm_module_type type; }; -struct mlxsw_env { - struct mlxsw_core *core; +struct mlxsw_env_line_card { u8 module_count; - struct mutex module_info_lock; /* Protects 'module_info'. */ struct mlxsw_env_module_info module_info[]; }; +struct mlxsw_env { + struct mlxsw_core *core; + u8 max_module_count; /* Maximum number of modules per-slot. */ + u8 num_of_slots; /* Including the main board. */ + struct mutex line_cards_lock; /* Protects line cards. */ + struct mlxsw_env_line_card *line_cards[]; +}; + +static struct +mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + return &mlxsw_env->line_cards[slot_index]->module_info[module]; +} + static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 slot_index, u8 module) { - struct mlxsw_env *mlxsw_env = mlxsw_core_env(core); + struct mlxsw_env_module_info *module_info; int err; - switch (mlxsw_env->module_info[module].type) { + module_info = mlxsw_env_module_info_get(core, slot_index, module); + switch (module_info->type) { case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR: err = -EINVAL; break; @@ -51,9 +67,9 @@ static int mlxsw_env_validate_module_type(struct mlxsw_core *core, struct mlxsw_env *mlxsw_env = mlxsw_core_env(core); int err; - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); err = __mlxsw_env_validate_module_type(core, slot_index, module); - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } @@ -472,6 +488,7 @@ int mlxsw_env_reset_module(struct net_device *netdev, u8 module, u32 *flags) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; u32 req = *flags; int err; @@ -479,7 +496,7 @@ int mlxsw_env_reset_module(struct net_device *netdev, !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) return 0; - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { @@ -487,13 +504,14 @@ int mlxsw_env_reset_module(struct net_device *netdev, goto out; } - if (mlxsw_env->module_info[module].num_ports_up) { + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + if (module_info->num_ports_up) { netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n"); err = -EINVAL; goto out; } - if (mlxsw_env->module_info[module].num_ports_mapped > 1 && + if (module_info->num_ports_mapped > 1 && !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) { netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n"); err = -EINVAL; @@ -509,7 +527,7 @@ int mlxsw_env_reset_module(struct net_device *netdev, *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)); out: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } EXPORT_SYMBOL(mlxsw_env_reset_module); @@ -521,11 +539,12 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, struct netlink_ext_ack *extack) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; char mcion_pl[MLXSW_REG_MCION_LEN]; u32 status_bits; int err; - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { @@ -533,7 +552,8 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, goto out; } - params->policy = mlxsw_env->module_info[module].power_mode_policy; + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + params->policy = module_info->power_mode_policy; mlxsw_reg_mcion_pack(mcion_pl, slot_index, module); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl); @@ -552,7 +572,7 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH; out: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } EXPORT_SYMBOL(mlxsw_env_get_module_power_mode); @@ -634,6 +654,7 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, struct netlink_ext_ack *extack) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; bool low_power; int err = 0; @@ -643,7 +664,7 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, return -EOPNOTSUPP; } - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { @@ -652,11 +673,12 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, goto out; } - if (mlxsw_env->module_info[module].power_mode_policy == policy) + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + if (module_info->power_mode_policy == policy) goto out; /* If any ports are up, we are already in high power mode. */ - if (mlxsw_env->module_info[module].num_ports_up) + if (module_info->num_ports_up) goto out_set_policy; low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO; @@ -666,9 +688,9 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, goto out; out_set_policy: - mlxsw_env->module_info[module].power_mode_policy = policy; + module_info->power_mode_policy = policy; out: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } EXPORT_SYMBOL(mlxsw_env_set_module_power_mode); @@ -748,10 +770,11 @@ mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core, u8 slot_index, static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core, u8 slot_index) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int i, err, sensor_index; bool has_temp_sensor; - for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) { + for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) { err = mlxsw_env_module_has_temp_sensor(mlxsw_core, slot_index, i, &has_temp_sensor); if (err) @@ -779,6 +802,7 @@ struct mlxsw_env_module_temp_warn_event { static void mlxsw_env_mtwe_event_work(struct work_struct *work) { struct mlxsw_env_module_temp_warn_event *event; + struct mlxsw_env_module_info *module_info; struct mlxsw_env *mlxsw_env; int i, sensor_warning; bool is_overheat; @@ -787,7 +811,7 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work) work); mlxsw_env = event->mlxsw_env; - for (i = 0; i < mlxsw_env->module_count; i++) { + for (i = 0; i < mlxsw_env->max_module_count; i++) { /* 64-127 of sensor_index are mapped to the port modules * sequentially (module 0 is mapped to sensor_index 64, * module 1 to sensor_index 65 and so on) @@ -795,9 +819,10 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work) sensor_warning = mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl, i + MLXSW_REG_MTMP_MODULE_INDEX_MIN); - mutex_lock(&mlxsw_env->module_info_lock); - is_overheat = - mlxsw_env->module_info[i].is_overheat; + mutex_lock(&mlxsw_env->line_cards_lock); + /* MTWE only supports main board. */ + module_info = mlxsw_env_module_info_get(mlxsw_env->core, 0, i); + is_overheat = module_info->is_overheat; if ((is_overheat && sensor_warning) || (!is_overheat && !sensor_warning)) { @@ -805,21 +830,21 @@ static void mlxsw_env_mtwe_event_work(struct work_struct *work) * warning OR current state in "no warning" and MTWE * does not report warning. */ - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); continue; } else if (is_overheat && !sensor_warning) { /* MTWE reports "no warning", turn is_overheat off. */ - mlxsw_env->module_info[i].is_overheat = false; - mutex_unlock(&mlxsw_env->module_info_lock); + module_info->is_overheat = false; + mutex_unlock(&mlxsw_env->line_cards_lock); } else { /* Current state is "no warning" and MTWE reports * "warning", increase the counter and turn is_overheat * on. */ - mlxsw_env->module_info[i].is_overheat = true; - mlxsw_env->module_info[i].module_overheat_counter++; - mutex_unlock(&mlxsw_env->module_info_lock); + module_info->is_overheat = true; + module_info->module_overheat_counter++; + mutex_unlock(&mlxsw_env->line_cards_lock); } } @@ -871,6 +896,7 @@ struct mlxsw_env_module_plug_unplug_event { static void mlxsw_env_pmpe_event_work(struct work_struct *work) { struct mlxsw_env_module_plug_unplug_event *event; + struct mlxsw_env_module_info *module_info; struct mlxsw_env *mlxsw_env; bool has_temp_sensor; u16 sensor_index; @@ -880,9 +906,12 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work) work); mlxsw_env = event->mlxsw_env; - mutex_lock(&mlxsw_env->module_info_lock); - mlxsw_env->module_info[event->module].is_overheat = false; - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + module_info = mlxsw_env_module_info_get(mlxsw_env->core, + event->slot_index, + event->module); + module_info->is_overheat = false; + mutex_unlock(&mlxsw_env->line_cards_lock); err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->slot_index, @@ -909,12 +938,14 @@ static void mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl, void *priv) { + u8 slot_index = mlxsw_reg_pmpe_slot_index_get(pmpe_pl); struct mlxsw_env_module_plug_unplug_event *event; enum mlxsw_reg_pmpe_module_status module_status; u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl); struct mlxsw_env *mlxsw_env = priv; - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + if (WARN_ON_ONCE(module >= mlxsw_env->max_module_count || + slot_index >= mlxsw_env->num_of_slots)) return; module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl); @@ -926,7 +957,7 @@ mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl, return; event->mlxsw_env = mlxsw_env; - event->slot_index = 0; + event->slot_index = slot_index; event->module = module; INIT_WORK(&event->work, mlxsw_env_pmpe_event_work); mlxsw_core_schedule_work(&event->work); @@ -957,9 +988,10 @@ static int mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core, u8 slot_index) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int i, err; - for (i = 0; i < mlxsw_core_env(mlxsw_core)->module_count; i++) { + for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; mlxsw_reg_pmaos_pack(pmaos_pl, slot_index, i); @@ -978,10 +1010,12 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 slot_ind u8 module, u64 *p_counter) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; - mutex_lock(&mlxsw_env->module_info_lock); - *p_counter = mlxsw_env->module_info[module].module_overheat_counter; - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + *p_counter = module_info->module_overheat_counter; + mutex_unlock(&mlxsw_env->line_cards_lock); return 0; } @@ -991,10 +1025,12 @@ void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; - mutex_lock(&mlxsw_env->module_info_lock); - mlxsw_env->module_info[module].num_ports_mapped++; - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + module_info->num_ports_mapped++; + mutex_unlock(&mlxsw_env->line_cards_lock); } EXPORT_SYMBOL(mlxsw_env_module_port_map); @@ -1002,10 +1038,12 @@ void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; - mutex_lock(&mlxsw_env->module_info_lock); - mlxsw_env->module_info[module].num_ports_mapped--; - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + module_info->num_ports_mapped--; + mutex_unlock(&mlxsw_env->line_cards_lock); } EXPORT_SYMBOL(mlxsw_env_module_port_unmap); @@ -1013,15 +1051,17 @@ int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; int err = 0; - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); - if (mlxsw_env->module_info[module].power_mode_policy != + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + if (module_info->power_mode_policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) goto out_inc; - if (mlxsw_env->module_info[module].num_ports_up != 0) + if (module_info->num_ports_up != 0) goto out_inc; /* Transition to high power mode following first port using the module @@ -1033,9 +1073,9 @@ int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index, goto out_unlock; out_inc: - mlxsw_env->module_info[module].num_ports_up++; + module_info->num_ports_up++; out_unlock: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); return err; } EXPORT_SYMBOL(mlxsw_env_module_port_up); @@ -1044,16 +1084,18 @@ void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + struct mlxsw_env_module_info *module_info; - mutex_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->line_cards_lock); - mlxsw_env->module_info[module].num_ports_up--; + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); + module_info->num_ports_up--; - if (mlxsw_env->module_info[module].power_mode_policy != + if (module_info->power_mode_policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) goto out_unlock; - if (mlxsw_env->module_info[module].num_ports_up != 0) + if (module_info->num_ports_up != 0) goto out_unlock; /* Transition to low power mode following last port using the module @@ -1063,17 +1105,57 @@ void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index, NULL); out_unlock: - mutex_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->line_cards_lock); } EXPORT_SYMBOL(mlxsw_env_module_port_down); +static int mlxsw_env_line_cards_alloc(struct mlxsw_env *env) +{ + struct mlxsw_env_module_info *module_info; + int i, j; + + for (i = 0; i < env->num_of_slots; i++) { + env->line_cards[i] = kzalloc(struct_size(env->line_cards[i], + module_info, + env->max_module_count), + GFP_KERNEL); + if (!env->line_cards[i]) + goto kzalloc_err; + + /* Firmware defaults to high power mode policy where modules + * are transitioned to high power mode following plug-in. + */ + for (j = 0; j < env->max_module_count; j++) { + module_info = &env->line_cards[i]->module_info[j]; + module_info->power_mode_policy = + ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH; + } + } + + return 0; + +kzalloc_err: + for (i--; i >= 0; i--) + kfree(env->line_cards[i]); + return -ENOMEM; +} + +static void mlxsw_env_line_cards_free(struct mlxsw_env *env) +{ + int i = env->num_of_slots; + + for (i--; i >= 0; i--) + kfree(env->line_cards[i]); +} + static int mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index) { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int i; - for (i = 0; i < mlxsw_env->module_count; i++) { + for (i = 0; i < mlxsw_env->line_cards[slot_index]->module_count; i++) { + struct mlxsw_env_module_info *module_info; char pmtm_pl[MLXSW_REG_PMTM_LEN]; int err; @@ -1082,8 +1164,9 @@ mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index) if (err) return err; - mlxsw_env->module_info[i].type = - mlxsw_reg_pmtm_module_type_get(pmtm_pl); + module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, + i); + module_info->type = mlxsw_reg_pmtm_module_type_get(pmtm_pl); } return 0; @@ -1091,32 +1174,38 @@ mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index) int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) { + u8 module_count, num_of_slots, max_module_count; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; struct mlxsw_env *env; - u8 module_count; - int i, err; + int err; mlxsw_reg_mgpir_pack(mgpir_pl, 0); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; - mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count, NULL); + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count, + &num_of_slots); + /* If the system is modular, get the maximum number of modules per-slot. + * Otherwise, get the maximum number of modules on the main board. + */ + max_module_count = num_of_slots ? + mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl) : + module_count; - env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL); + env = kzalloc(struct_size(env, line_cards, num_of_slots + 1), + GFP_KERNEL); if (!env) return -ENOMEM; - /* Firmware defaults to high power mode policy where modules are - * transitioned to high power mode following plug-in. - */ - for (i = 0; i < module_count; i++) - env->module_info[i].power_mode_policy = - ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH; - - mutex_init(&env->module_info_lock); env->core = mlxsw_core; - env->module_count = module_count; + env->num_of_slots = num_of_slots + 1; + env->max_module_count = max_module_count; + err = mlxsw_env_line_cards_alloc(env); + if (err) + goto err_mlxsw_env_line_cards_alloc; + + mutex_init(&env->line_cards_lock); *p_env = env; err = mlxsw_env_temp_warn_event_register(mlxsw_core); @@ -1127,6 +1216,10 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) if (err) goto err_module_plug_event_register; + /* Set 'module_count' only for main board. Actual count for line card + * is to be set after line card is activated. + */ + env->line_cards[0]->module_count = num_of_slots ? 0 : module_count; err = mlxsw_env_module_oper_state_event_enable(mlxsw_core, 0); if (err) goto err_oper_state_event_enable; @@ -1148,7 +1241,9 @@ err_oper_state_event_enable: err_module_plug_event_register: mlxsw_env_temp_warn_event_unregister(env); err_temp_warn_event_register: - mutex_destroy(&env->module_info_lock); + mutex_destroy(&env->line_cards_lock); + mlxsw_env_line_cards_free(env); +err_mlxsw_env_line_cards_alloc: kfree(env); return err; } @@ -1159,6 +1254,7 @@ void mlxsw_env_fini(struct mlxsw_env *env) /* Make sure there is no more event work scheduled. */ mlxsw_core_flush_owq(); mlxsw_env_temp_warn_event_unregister(env); - mutex_destroy(&env->module_info_lock); + mutex_destroy(&env->line_cards_lock); + mlxsw_env_line_cards_free(env); kfree(env); } -- cgit v1.2.3 From b244143a085e1e6b9bee83bafe99b4e1fc9ee51e Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:27 +0300 Subject: mlxsw: core: Move port module events enablement to a separate function Use a separate function for enablement of port module events such plug/unplug and temperature threshold crossing. The motivation is to reuse the function for line cards. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 39 ++++++++++++++++++++------ 1 file changed, 31 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 9adaa8978d68..b3b8a9015cd6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -1148,6 +1148,28 @@ static void mlxsw_env_line_cards_free(struct mlxsw_env *env) kfree(env->line_cards[i]); } +static int +mlxsw_env_module_event_enable(struct mlxsw_env *mlxsw_env, u8 slot_index) +{ + int err; + + err = mlxsw_env_module_oper_state_event_enable(mlxsw_env->core, + slot_index); + if (err) + return err; + + err = mlxsw_env_module_temp_event_enable(mlxsw_env->core, slot_index); + if (err) + return err; + + return 0; +} + +static void +mlxsw_env_module_event_disable(struct mlxsw_env *mlxsw_env, u8 slot_index) +{ +} + static int mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index) { @@ -1220,13 +1242,13 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) * is to be set after line card is activated. */ env->line_cards[0]->module_count = num_of_slots ? 0 : module_count; - err = mlxsw_env_module_oper_state_event_enable(mlxsw_core, 0); - if (err) - goto err_oper_state_event_enable; - - err = mlxsw_env_module_temp_event_enable(mlxsw_core, 0); + /* Enable events only for main board. Line card events are to be + * configured only after line card is activated. Before that, access to + * modules on line cards is not allowed. + */ + err = mlxsw_env_module_event_enable(env, 0); if (err) - goto err_temp_event_enable; + goto err_mlxsw_env_module_event_enable; err = mlxsw_env_module_type_set(mlxsw_core, 0); if (err) @@ -1235,8 +1257,8 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) return 0; err_type_set: -err_temp_event_enable: -err_oper_state_event_enable: + mlxsw_env_module_event_disable(env, 0); +err_mlxsw_env_module_event_enable: mlxsw_env_module_plug_event_unregister(env); err_module_plug_event_register: mlxsw_env_temp_warn_event_unregister(env); @@ -1250,6 +1272,7 @@ err_mlxsw_env_line_cards_alloc: void mlxsw_env_fini(struct mlxsw_env *env) { + mlxsw_env_module_event_disable(env, 0); mlxsw_env_module_plug_event_unregister(env); /* Make sure there is no more event work scheduled. */ mlxsw_core_flush_owq(); -- cgit v1.2.3 From b890ad418e1feada06dddc3f00ee2c8cccf77c13 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:28 +0300 Subject: mlxsw: core_hwmon: Extend internal structures to support multi hwmon objects Currently, mlxsw supports a single hwmon device and registers it with attributes corresponding to the various objects found on the main board such as fans and gearboxes. Line cards can have the same objects, but unlike the main board they can be added and removed while the system is running. The various hwmon objects found on these line cards should be created when the line card becomes available and destroyed when the line card becomes unavailable. The above can be achieved by representing each line card as a different hwmon device and registering / unregistering it when the line card becomes available / unavailable. Prepare for multi hwmon device support by splitting 'struct mlxsw_hwmon' into 'struct mlxsw_hwmon' and 'struct mlxsw_hwmon_dev'. The first will hold information relevant to all hwmon devices, whereas the second will hold per-hwmon device information. Signed-off-by: Vadim Pasternak Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 187 ++++++++++++++--------- 1 file changed, 111 insertions(+), 76 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index 5df54a5bf292..d35aa135beed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -27,7 +27,7 @@ struct mlxsw_hwmon_attr { struct device_attribute dev_attr; - struct mlxsw_hwmon *hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev; unsigned int type_index; char name[32]; }; @@ -40,9 +40,8 @@ static int mlxsw_hwmon_get_attr_index(int index, int count) return index; } -struct mlxsw_hwmon { - struct mlxsw_core *core; - const struct mlxsw_bus_info *bus_info; +struct mlxsw_hwmon_dev { + struct mlxsw_hwmon *hwmon; struct device *hwmon_dev; struct attribute_group group; const struct attribute_group *groups[2]; @@ -53,19 +52,26 @@ struct mlxsw_hwmon { u8 module_sensor_max; }; +struct mlxsw_hwmon { + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + struct mlxsw_hwmon_dev line_cards[]; +}; + static ssize_t mlxsw_hwmon_temp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; int temp, index; int err; index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, - mlxsw_hwmon->module_sensor_max); + mlxsw_hwmon_dev->module_sensor_max); mlxsw_reg_mtmp_pack(mtmp_pl, 0, index, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { @@ -82,13 +88,14 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; int temp_max, index; int err; index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, - mlxsw_hwmon->module_sensor_max); + mlxsw_hwmon_dev->module_sensor_max); mlxsw_reg_mtmp_pack(mtmp_pl, 0, index, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { @@ -105,8 +112,9 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; - char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0}; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; unsigned long val; int index; int err; @@ -118,7 +126,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, return -EINVAL; index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, - mlxsw_hwmon->module_sensor_max); + mlxsw_hwmon_dev->module_sensor_max); mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -140,7 +148,8 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mfsm_pl[MLXSW_REG_MFSM_LEN]; int err; @@ -159,7 +168,8 @@ static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char fore_pl[MLXSW_REG_FORE_LEN]; bool fault; int err; @@ -180,7 +190,8 @@ static ssize_t mlxsw_hwmon_pwm_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mfsc_pl[MLXSW_REG_MFSC_LEN]; int err; @@ -200,7 +211,8 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mfsc_pl[MLXSW_REG_MFSC_LEN]; unsigned long val; int err; @@ -226,12 +238,13 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtmp_pl[MLXSW_REG_MTMP_LEN]; u8 module; int err; - module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; mlxsw_reg_mtmp_pack(mtmp_pl, 0, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, false); @@ -264,15 +277,16 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; u8 module, fault; u16 temp; int err; - module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; - mlxsw_reg_mtbr_pack(mtbr_pl, 0, - MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; + mlxsw_reg_mtbr_pack(mtbr_pl, 0, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, + 1); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); if (err) { dev_err(dev, "Failed to query module temperature sensor\n"); @@ -306,11 +320,12 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; u8 module; int err; - module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, 0, module, SFP_TEMP_HIGH_WARN, p_temp); @@ -341,11 +356,12 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; u8 module; int err; - module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, 0, module, SFP_TEMP_HIGH_ALARM, p_temp); @@ -390,9 +406,9 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev, { struct mlxsw_hwmon_attr *mlxsw_hwmon_attr = container_of(attr, struct mlxsw_hwmon_attr, dev_attr); - struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_attr->hwmon; + struct mlxsw_hwmon_dev *mlxsw_hwmon_dev = mlxsw_hwmon_attr->mlxsw_hwmon_dev; int index = mlxsw_hwmon_attr->type_index - - mlxsw_hwmon->module_sensor_max + 1; + mlxsw_hwmon_dev->module_sensor_max + 1; return sprintf(buf, "gearbox %03u\n", index); } @@ -461,14 +477,15 @@ enum mlxsw_hwmon_attr_type { MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM, }; -static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, +static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev, enum mlxsw_hwmon_attr_type attr_type, - unsigned int type_index, unsigned int num) { + unsigned int type_index, unsigned int num) +{ struct mlxsw_hwmon_attr *mlxsw_hwmon_attr; unsigned int attr_index; - attr_index = mlxsw_hwmon->attrs_count; - mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index]; + attr_index = mlxsw_hwmon_dev->attrs_count; + mlxsw_hwmon_attr = &mlxsw_hwmon_dev->hwmon_attrs[attr_index]; switch (attr_type) { case MLXSW_HWMON_ATTR_TYPE_TEMP: @@ -568,16 +585,17 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, } mlxsw_hwmon_attr->type_index = type_index; - mlxsw_hwmon_attr->hwmon = mlxsw_hwmon; + mlxsw_hwmon_attr->mlxsw_hwmon_dev = mlxsw_hwmon_dev; mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name; sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr); - mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr; - mlxsw_hwmon->attrs_count++; + mlxsw_hwmon_dev->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr; + mlxsw_hwmon_dev->attrs_count++; } -static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) +static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) { + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0}; int i; int err; @@ -587,8 +605,8 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n"); return err; } - mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); - for (i = 0; i < mlxsw_hwmon->sensor_count; i++) { + mlxsw_hwmon_dev->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); + for (i = 0; i < mlxsw_hwmon_dev->sensor_count; i++) { char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0}; mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i); @@ -605,18 +623,19 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) i); return err; } - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i); } return 0; } -static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) +static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) { + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mfcr_pl[MLXSW_REG_MFCR_LEN] = {0}; enum mlxsw_reg_mfcr_pwm_frequency freq; unsigned int type_index; @@ -634,10 +653,10 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) num = 0; for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) { if (tacho_active & BIT(type_index)) { - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_FAN_RPM, type_index, num); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_FAN_FAULT, type_index, num++); } @@ -645,15 +664,16 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) num = 0; for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) { if (pwm_active & BIT(type_index)) - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_PWM, type_index, num++); } return 0; } -static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) +static int mlxsw_hwmon_module_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) { + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; u8 module_sensor_max; int i, err; @@ -671,28 +691,28 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) * sensor_count are already utilized by the sensors connected through * mtmp register by mlxsw_hwmon_temp_init(). */ - mlxsw_hwmon->module_sensor_max = mlxsw_hwmon->sensor_count + - module_sensor_max; - for (i = mlxsw_hwmon->sensor_count; - i < mlxsw_hwmon->module_sensor_max; i++) { - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_dev->module_sensor_max = mlxsw_hwmon_dev->sensor_count + + module_sensor_max; + for (i = mlxsw_hwmon_dev->sensor_count; + i < mlxsw_hwmon_dev->module_sensor_max; i++) { + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM, i, i); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM, i, i); } @@ -700,8 +720,9 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) return 0; } -static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) +static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) { + struct mlxsw_hwmon *mlxsw_hwmon = mlxsw_hwmon_dev->hwmon; enum mlxsw_reg_mgpir_device_type device_type; int index, max_index, sensor_index; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; @@ -720,10 +741,10 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) !gbox_num) return 0; - index = mlxsw_hwmon->module_sensor_max; - max_index = mlxsw_hwmon->module_sensor_max + gbox_num; + index = mlxsw_hwmon_dev->module_sensor_max; + max_index = mlxsw_hwmon_dev->module_sensor_max + gbox_num; while (index < max_index) { - sensor_index = index % mlxsw_hwmon->module_sensor_max + + sensor_index = index % mlxsw_hwmon_dev->module_sensor_max + MLXSW_REG_MTMP_GBOX_INDEX_MIN; mlxsw_reg_mtmp_pack(mtmp_pl, 0, sensor_index, true, true); err = mlxsw_reg_write(mlxsw_hwmon->core, @@ -733,15 +754,15 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) sensor_index); return err; } - mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_TEMP, - index, index); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, + MLXSW_HWMON_ATTR_TYPE_TEMP, index, index); + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, index, index); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_RST, index, index); - mlxsw_hwmon_attr_add(mlxsw_hwmon, + mlxsw_hwmon_attr_add(mlxsw_hwmon_dev, MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL, index, index); index++; @@ -754,44 +775,58 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct mlxsw_hwmon **p_hwmon) { + char mgpir_pl[MLXSW_REG_MGPIR_LEN]; struct mlxsw_hwmon *mlxsw_hwmon; struct device *hwmon_dev; + u8 num_of_slots; int err; - mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); + if (err) + return err; + + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL, + &num_of_slots); + + mlxsw_hwmon = kzalloc(struct_size(mlxsw_hwmon, line_cards, + num_of_slots + 1), GFP_KERNEL); if (!mlxsw_hwmon) return -ENOMEM; + mlxsw_hwmon->core = mlxsw_core; mlxsw_hwmon->bus_info = mlxsw_bus_info; + mlxsw_hwmon->line_cards[0].hwmon = mlxsw_hwmon; - err = mlxsw_hwmon_temp_init(mlxsw_hwmon); + err = mlxsw_hwmon_temp_init(&mlxsw_hwmon->line_cards[0]); if (err) goto err_temp_init; - err = mlxsw_hwmon_fans_init(mlxsw_hwmon); + err = mlxsw_hwmon_fans_init(&mlxsw_hwmon->line_cards[0]); if (err) goto err_fans_init; - err = mlxsw_hwmon_module_init(mlxsw_hwmon); + err = mlxsw_hwmon_module_init(&mlxsw_hwmon->line_cards[0]); if (err) goto err_temp_module_init; - err = mlxsw_hwmon_gearbox_init(mlxsw_hwmon); + err = mlxsw_hwmon_gearbox_init(&mlxsw_hwmon->line_cards[0]); if (err) goto err_temp_gearbox_init; - mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; - mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; + mlxsw_hwmon->line_cards[0].groups[0] = &mlxsw_hwmon->line_cards[0].group; + mlxsw_hwmon->line_cards[0].group.attrs = mlxsw_hwmon->line_cards[0].attrs; hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev, - "mlxsw", mlxsw_hwmon, - mlxsw_hwmon->groups); + "mlxsw", + &mlxsw_hwmon->line_cards[0], + mlxsw_hwmon->line_cards[0].groups); if (IS_ERR(hwmon_dev)) { err = PTR_ERR(hwmon_dev); goto err_hwmon_register; } - mlxsw_hwmon->hwmon_dev = hwmon_dev; + mlxsw_hwmon->line_cards[0].hwmon_dev = hwmon_dev; *p_hwmon = mlxsw_hwmon; return 0; @@ -806,6 +841,6 @@ err_temp_init: void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon) { - hwmon_device_unregister(mlxsw_hwmon->hwmon_dev); + hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev); kfree(mlxsw_hwmon); } -- cgit v1.2.3 From fd27849dd6fd6913c35948653f8e167672655438 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:29 +0300 Subject: mlxsw: core_hwmon: Introduce slot parameter in hwmon interfaces Add 'slot' parameter to 'mlxsw_hwmon_dev' structure. Use this parameter in mlxsw_reg_mtmp_pack(), mlxsw_reg_mtbr_pack(), mlxsw_reg_mgpir_pack() and mlxsw_reg_mtmp_slot_index_set() routines. For main board it'll always be zero, for line cards it'll be set to the physical slot number in modular systems. Signed-off-by: Vadim Pasternak Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 30 ++++++++++++++++-------- 1 file changed, 20 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index d35aa135beed..fff6f248d6f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -50,6 +50,7 @@ struct mlxsw_hwmon_dev { unsigned int attrs_count; u8 sensor_count; u8 module_sensor_max; + u8 slot_index; }; struct mlxsw_hwmon { @@ -72,7 +73,8 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev, index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, mlxsw_hwmon_dev->module_sensor_max); - mlxsw_reg_mtmp_pack(mtmp_pl, 0, index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false, + false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); @@ -96,7 +98,8 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev, index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, mlxsw_hwmon_dev->module_sensor_max); - mlxsw_reg_mtmp_pack(mtmp_pl, 0, index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, index, false, + false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); @@ -128,6 +131,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, index = mlxsw_hwmon_get_attr_index(mlxsw_hwmon_attr->type_index, mlxsw_hwmon_dev->module_sensor_max); + mlxsw_reg_mtmp_slot_index_set(mtmp_pl, mlxsw_hwmon_dev->slot_index); mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) @@ -245,7 +249,7 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev, int err; module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; - mlxsw_reg_mtmp_pack(mtmp_pl, 0, + mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, false, false); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -285,8 +289,8 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, int err; module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; - mlxsw_reg_mtbr_pack(mtbr_pl, 0, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, - 1); + mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index, + MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); if (err) { dev_err(dev, "Failed to query module temperature sensor\n"); @@ -326,7 +330,8 @@ static int mlxsw_hwmon_module_temp_critical_get(struct device *dev, int err; module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; - err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, 0, + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, + mlxsw_hwmon_dev->slot_index, module, SFP_TEMP_HIGH_WARN, p_temp); if (err) { @@ -362,7 +367,8 @@ static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev, int err; module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count; - err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, 0, + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, + mlxsw_hwmon_dev->slot_index, module, SFP_TEMP_HIGH_ALARM, p_temp); if (err) { @@ -609,6 +615,8 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) for (i = 0; i < mlxsw_hwmon_dev->sensor_count; i++) { char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0}; + mlxsw_reg_mtmp_slot_index_set(mtmp_pl, + mlxsw_hwmon_dev->slot_index); mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -678,7 +686,7 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) u8 module_sensor_max; int i, err; - mlxsw_reg_mgpir_pack(mgpir_pl, 0); + mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; @@ -730,7 +738,7 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) u8 gbox_num; int err; - mlxsw_reg_mgpir_pack(mgpir_pl, 0); + mlxsw_reg_mgpir_pack(mgpir_pl, mlxsw_hwmon_dev->slot_index); err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; @@ -746,7 +754,8 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) while (index < max_index) { sensor_index = index % mlxsw_hwmon_dev->module_sensor_max + MLXSW_REG_MTMP_GBOX_INDEX_MIN; - mlxsw_reg_mtmp_pack(mtmp_pl, 0, sensor_index, true, true); + mlxsw_reg_mtmp_pack(mtmp_pl, mlxsw_hwmon_dev->slot_index, + sensor_index, true, true); err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); if (err) { @@ -797,6 +806,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, mlxsw_hwmon->core = mlxsw_core; mlxsw_hwmon->bus_info = mlxsw_bus_info; mlxsw_hwmon->line_cards[0].hwmon = mlxsw_hwmon; + mlxsw_hwmon->line_cards[0].slot_index = 0; err = mlxsw_hwmon_temp_init(&mlxsw_hwmon->line_cards[0]); if (err) -- cgit v1.2.3 From ef0df4fa324af23a5b55d3518f5799c87b6b665a Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:30 +0300 Subject: mlxsw: core_thermal: Extend internal structures to support multi thermal areas Introduce intermediate level for thermal zones areas. Currently all thermal zones are associated with thermal objects located within the main board. Such objects are created during driver initialization and removed during driver de-initialization. For line cards in modular system the thermal zones are to be associated with the specific line card. They should be created whenever new line card is available (inserted, validated, powered and enabled) and removed, when line card is getting unavailable. The thermal objects found on the line card #n are accessed by setting slot index to #n, while for access to objects found on the main board slot index should be set to default value zero. Each thermal area contains the set of thermal zones associated with particular slot index. Thus introduction of thermal zone areas allows to use the same APIs for the main board and line cards, by adding slot index argument. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 148 +++++++++++++-------- 1 file changed, 91 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index d64af27e5bac..6d186cc74df3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -82,6 +82,15 @@ struct mlxsw_thermal_module { struct thermal_zone_device *tzdev; struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; int module; /* Module or gearbox number */ + u8 slot_index; +}; + +struct mlxsw_thermal_area { + struct mlxsw_thermal_module *tz_module_arr; + u8 tz_module_num; + struct mlxsw_thermal_module *tz_gearbox_arr; + u8 tz_gearbox_num; + u8 slot_index; }; struct mlxsw_thermal { @@ -92,12 +101,9 @@ struct mlxsw_thermal { struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1]; struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; - struct mlxsw_thermal_module *tz_module_arr; - u8 tz_module_num; - struct mlxsw_thermal_module *tz_gearbox_arr; - u8 tz_gearbox_num; unsigned int tz_highest_score; struct thermal_zone_device *tz_highest_dev; + struct mlxsw_thermal_area line_cards[]; }; static inline u8 mlxsw_state_to_duty(int state) @@ -150,13 +156,15 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, * EEPROM if we got valid thresholds from MTMP. */ if (!emerg_temp || !crit_temp) { - err = mlxsw_env_module_temp_thresholds_get(core, 0, tz->module, + err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index, + tz->module, SFP_TEMP_HIGH_WARN, &crit_temp); if (err) return err; - err = mlxsw_env_module_temp_thresholds_get(core, 0, tz->module, + err = mlxsw_env_module_temp_thresholds_get(core, tz->slot_index, + tz->module, SFP_TEMP_HIGH_ALARM, &emerg_temp); if (err) @@ -423,15 +431,16 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev, static void mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core, - u16 sensor_index, int *p_temp, - int *p_crit_temp, + u8 slot_index, u16 sensor_index, + int *p_temp, int *p_crit_temp, int *p_emerg_temp) { char mtmp_pl[MLXSW_REG_MTMP_LEN]; int err; /* Read module temperature and thresholds. */ - mlxsw_reg_mtmp_pack(mtmp_pl, 0, sensor_index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, slot_index, sensor_index, + false, false); err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl); if (err) { /* Set temperature and thresholds to zero to avoid passing @@ -462,6 +471,7 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, /* Read module temperature and thresholds. */ mlxsw_thermal_module_temp_and_thresholds_get(thermal->core, + tz->slot_index, sensor_index, &temp, &crit_temp, &emerg_temp); *p_temp = temp; @@ -576,7 +586,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, int err; index = MLXSW_REG_MTMP_GBOX_INDEX_MIN + tz->module; - mlxsw_reg_mtmp_pack(mtmp_pl, 0, index, false, false); + mlxsw_reg_mtmp_pack(mtmp_pl, tz->slot_index, index, false, false); err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl); if (err) @@ -704,25 +714,28 @@ static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev) static int mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core, - struct mlxsw_thermal *thermal, u8 module) + struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area, u8 module) { struct mlxsw_thermal_module *module_tz; int dummy_temp, crit_temp, emerg_temp; u16 sensor_index; sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module; - module_tz = &thermal->tz_module_arr[module]; + module_tz = &area->tz_module_arr[module]; /* Skip if parent is already set (case of port split). */ if (module_tz->parent) return 0; module_tz->module = module; + module_tz->slot_index = area->slot_index; module_tz->parent = thermal; memcpy(module_tz->trips, default_thermal_trips, sizeof(thermal->trips)); /* Initialize all trip point. */ mlxsw_thermal_module_trips_reset(module_tz); /* Read module temperature and thresholds. */ - mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp, + mlxsw_thermal_module_temp_and_thresholds_get(core, area->slot_index, + sensor_index, &dummy_temp, &crit_temp, &emerg_temp); /* Update trip point according to the module data. */ return mlxsw_thermal_module_trips_update(dev, core, module_tz, @@ -740,34 +753,39 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) static int mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, - struct mlxsw_thermal *thermal) + struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area) { struct mlxsw_thermal_module *module_tz; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; int i, err; - mlxsw_reg_mgpir_pack(mgpir_pl, 0); + mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index); err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, - &thermal->tz_module_num, NULL); + &area->tz_module_num, NULL); - thermal->tz_module_arr = kcalloc(thermal->tz_module_num, - sizeof(*thermal->tz_module_arr), - GFP_KERNEL); - if (!thermal->tz_module_arr) + /* For modular system module counter could be zero. */ + if (!area->tz_module_num) + return 0; + + area->tz_module_arr = kcalloc(area->tz_module_num, + sizeof(*area->tz_module_arr), + GFP_KERNEL); + if (!area->tz_module_arr) return -ENOMEM; - for (i = 0; i < thermal->tz_module_num; i++) { - err = mlxsw_thermal_module_init(dev, core, thermal, i); + for (i = 0; i < area->tz_module_num; i++) { + err = mlxsw_thermal_module_init(dev, core, thermal, area, i); if (err) goto err_thermal_module_init; } - for (i = 0; i < thermal->tz_module_num; i++) { - module_tz = &thermal->tz_module_arr[i]; + for (i = 0; i < area->tz_module_num; i++) { + module_tz = &area->tz_module_arr[i]; if (!module_tz->parent) continue; err = mlxsw_thermal_module_tz_init(module_tz); @@ -779,20 +797,21 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, err_thermal_module_tz_init: err_thermal_module_init: - for (i = thermal->tz_module_num - 1; i >= 0; i--) - mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); - kfree(thermal->tz_module_arr); + for (i = area->tz_module_num - 1; i >= 0; i--) + mlxsw_thermal_module_fini(&area->tz_module_arr[i]); + kfree(area->tz_module_arr); return err; } static void -mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal) +mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area) { int i; - for (i = thermal->tz_module_num - 1; i >= 0; i--) - mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); - kfree(thermal->tz_module_arr); + for (i = area->tz_module_num - 1; i >= 0; i--) + mlxsw_thermal_module_fini(&area->tz_module_arr[i]); + kfree(area->tz_module_arr); } static int @@ -828,7 +847,8 @@ mlxsw_thermal_gearbox_tz_fini(struct mlxsw_thermal_module *gearbox_tz) static int mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, - struct mlxsw_thermal *thermal) + struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area) { enum mlxsw_reg_mgpir_device_type device_type; struct mlxsw_thermal_module *gearbox_tz; @@ -837,7 +857,7 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, int i; int err; - mlxsw_reg_mgpir_pack(mgpir_pl, 0); + mlxsw_reg_mgpir_pack(mgpir_pl, area->slot_index); err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); if (err) return err; @@ -848,19 +868,20 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, !gbox_num) return 0; - thermal->tz_gearbox_num = gbox_num; - thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num, - sizeof(*thermal->tz_gearbox_arr), - GFP_KERNEL); - if (!thermal->tz_gearbox_arr) + area->tz_gearbox_num = gbox_num; + area->tz_gearbox_arr = kcalloc(area->tz_gearbox_num, + sizeof(*area->tz_gearbox_arr), + GFP_KERNEL); + if (!area->tz_gearbox_arr) return -ENOMEM; - for (i = 0; i < thermal->tz_gearbox_num; i++) { - gearbox_tz = &thermal->tz_gearbox_arr[i]; + for (i = 0; i < area->tz_gearbox_num; i++) { + gearbox_tz = &area->tz_gearbox_arr[i]; memcpy(gearbox_tz->trips, default_thermal_trips, sizeof(thermal->trips)); gearbox_tz->module = i; gearbox_tz->parent = thermal; + gearbox_tz->slot_index = area->slot_index; err = mlxsw_thermal_gearbox_tz_init(gearbox_tz); if (err) goto err_thermal_gearbox_tz_init; @@ -870,19 +891,20 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, err_thermal_gearbox_tz_init: for (i--; i >= 0; i--) - mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]); - kfree(thermal->tz_gearbox_arr); + mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]); + kfree(area->tz_gearbox_arr); return err; } static void -mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal) +mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal, + struct mlxsw_thermal_area *area) { int i; - for (i = thermal->tz_gearbox_num - 1; i >= 0; i--) - mlxsw_thermal_gearbox_tz_fini(&thermal->tz_gearbox_arr[i]); - kfree(thermal->tz_gearbox_arr); + for (i = area->tz_gearbox_num - 1; i >= 0; i--) + mlxsw_thermal_gearbox_tz_fini(&area->tz_gearbox_arr[i]); + kfree(area->tz_gearbox_arr); } int mlxsw_thermal_init(struct mlxsw_core *core, @@ -892,19 +914,29 @@ int mlxsw_thermal_init(struct mlxsw_core *core, char mfcr_pl[MLXSW_REG_MFCR_LEN] = { 0 }; enum mlxsw_reg_mfcr_pwm_frequency freq; struct device *dev = bus_info->dev; + char mgpir_pl[MLXSW_REG_MGPIR_LEN]; struct mlxsw_thermal *thermal; + u8 pwm_active, num_of_slots; u16 tacho_active; - u8 pwm_active; int err, i; - thermal = devm_kzalloc(dev, sizeof(*thermal), - GFP_KERNEL); + mlxsw_reg_mgpir_pack(mgpir_pl, 0); + err = mlxsw_reg_query(core, MLXSW_REG(mgpir), mgpir_pl); + if (err) + return err; + + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, NULL, + &num_of_slots); + + thermal = kzalloc(struct_size(thermal, line_cards, num_of_slots + 1), + GFP_KERNEL); if (!thermal) return -ENOMEM; thermal->core = core; thermal->bus_info = bus_info; memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips)); + thermal->line_cards[0].slot_index = 0; err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl); if (err) { @@ -970,11 +1002,13 @@ int mlxsw_thermal_init(struct mlxsw_core *core, goto err_thermal_zone_device_register; } - err = mlxsw_thermal_modules_init(dev, core, thermal); + err = mlxsw_thermal_modules_init(dev, core, thermal, + &thermal->line_cards[0]); if (err) goto err_thermal_modules_init; - err = mlxsw_thermal_gearboxes_init(dev, core, thermal); + err = mlxsw_thermal_gearboxes_init(dev, core, thermal, + &thermal->line_cards[0]); if (err) goto err_thermal_gearboxes_init; @@ -986,9 +1020,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core, return 0; err_thermal_zone_device_enable: - mlxsw_thermal_gearboxes_fini(thermal); + mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]); err_thermal_gearboxes_init: - mlxsw_thermal_modules_fini(thermal); + mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]); err_thermal_modules_init: if (thermal->tzdev) { thermal_zone_device_unregister(thermal->tzdev); @@ -1001,7 +1035,7 @@ err_thermal_cooling_device_register: thermal_cooling_device_unregister(thermal->cdevs[i]); err_reg_write: err_reg_query: - devm_kfree(dev, thermal); + kfree(thermal); return err; } @@ -1009,8 +1043,8 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) { int i; - mlxsw_thermal_gearboxes_fini(thermal); - mlxsw_thermal_modules_fini(thermal); + mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]); + mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]); if (thermal->tzdev) { thermal_zone_device_unregister(thermal->tzdev); thermal->tzdev = NULL; @@ -1023,5 +1057,5 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) } } - devm_kfree(thermal->bus_info->dev, thermal); + kfree(thermal); } -- cgit v1.2.3 From 6d94449a7d7dca9d4b9b5a2792db537b2b69b4b7 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:31 +0300 Subject: mlxsw: core_thermal: Add line card id prefix to line card thermal zone name Add prefix "lc#n" to thermal zones associated with the thermal objects found on line cards. For example thermal zone for module #9 located at line card #7 will have type: mlxsw-lc7-module9. And thermal zone for gearbox #3 located at line card #5 will have type: mlxsw-lc5-gearbox3. Signed-off-by: Vadim Pasternak Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 6d186cc74df3..23ff214367d3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -685,8 +685,12 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz) char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME]; int err; - snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d", - module_tz->module + 1); + if (module_tz->slot_index) + snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-module%d", + module_tz->slot_index, module_tz->module + 1); + else + snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d", + module_tz->module + 1); module_tz->tzdev = thermal_zone_device_register(tz_name, MLXSW_THERMAL_NUM_TRIPS, MLXSW_THERMAL_TRIP_MASK, @@ -820,8 +824,12 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz) char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME]; int ret; - snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d", - gearbox_tz->module + 1); + if (gearbox_tz->slot_index) + snprintf(tz_name, sizeof(tz_name), "mlxsw-lc%d-gearbox%d", + gearbox_tz->slot_index, gearbox_tz->module + 1); + else + snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d", + gearbox_tz->module + 1); gearbox_tz->tzdev = thermal_zone_device_register(tz_name, MLXSW_THERMAL_NUM_TRIPS, MLXSW_THERMAL_TRIP_MASK, -- cgit v1.2.3 From 739d56bc635e9ba642777445d8c9c37e4a35ba3c Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:32 +0300 Subject: mlxsw: core_thermal: Use exact name of cooling devices for binding Modular system supports additional cooling devices "mlxreg_fan1", "mlxreg_fan2", etcetera. Thermal zones in "mlxsw" driver should be bound to the same device as before called "mlxreg_fan". Used exact match for cooling device name to avoid binding to new additional cooling devices. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 23ff214367d3..49ea32457703 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -129,8 +129,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, /* Allow mlxsw thermal zone binding to an external cooling device */ for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) { - if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i], - strlen(cdev->type))) + if (!strcmp(cdev->type, mlxsw_thermal_external_allowed_cdev[i])) return 0; } -- cgit v1.2.3 From 03978fb88b06bdbe6c404ffb49cf8290d2d015a5 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Wed, 13 Apr 2022 18:17:33 +0300 Subject: mlxsw: core_thermal: Use common define for thermal zone name length Replace internal define 'MLXSW_THERMAL_ZONE_MAX_NAME' by common 'THERMAL_NAME_LENGTH'. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 49ea32457703..e8ce26a1d483 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -21,7 +21,6 @@ #define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */ #define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */ #define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2) -#define MLXSW_THERMAL_ZONE_MAX_NAME 16 #define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0) #define MLXSW_THERMAL_MAX_STATE 10 #define MLXSW_THERMAL_MIN_STATE 2 @@ -681,7 +680,7 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = { static int mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz) { - char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME]; + char tz_name[THERMAL_NAME_LENGTH]; int err; if (module_tz->slot_index) @@ -820,7 +819,7 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal, static int mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz) { - char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME]; + char tz_name[THERMAL_NAME_LENGTH]; int ret; if (gearbox_tz->slot_index) -- cgit v1.2.3 From bb578430d05b8b9114195ef2c25284374fdf9549 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 14 Apr 2022 09:08:34 +0100 Subject: octeon_ep: Fix spelling mistake "inerrupts" -> "interrupts" There is a spelling mistake in a dev_info message. Fix it. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c index 1e47143c596d..6ad88d0fe43f 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c @@ -487,7 +487,7 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) goto irq_handled; } - dev_info(&pdev->dev, "Reserved inerrupts raised; Ignore\n"); + dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n"); irq_handled: return IRQ_HANDLED; } -- cgit v1.2.3 From 81669e7c6ca44746d869925f337d9bbb0a252fc1 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Thu, 14 Apr 2022 09:08:00 +0000 Subject: net: ethernet: ti: davinci_emac: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_emac.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 4b6aed78d392..9d1e98db308b 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1422,9 +1422,8 @@ static int emac_dev_open(struct net_device *ndev) struct phy_device *phydev = NULL; struct device *phy = NULL; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_resume_and_get(&priv->pdev->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", __func__, ret); return ret; @@ -1661,9 +1660,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) u32 stats_clear_mask; int err; - err = pm_runtime_get_sync(&priv->pdev->dev); + err = pm_runtime_resume_and_get(&priv->pdev->dev); if (err < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", __func__, err); return &ndev->stats; @@ -1954,9 +1952,8 @@ static int davinci_emac_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); pm_runtime_enable(&pdev->dev); - rc = pm_runtime_get_sync(&pdev->dev); + rc = pm_runtime_resume_and_get(&pdev->dev); if (rc < 0) { - pm_runtime_put_noidle(&pdev->dev); dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", __func__, rc); goto err_napi_del; -- cgit v1.2.3 From 945e659dffad2d2e11a105c477d423cb1b5edd95 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 14 Apr 2022 18:07:09 +0530 Subject: net: emaclite: Fix coding style Make coding style changes to fix checkpatch script warnings. There is no functional change. Fixes below check and warnings- CHECK: Blank lines aren't necessary after an open brace '{' CHECK: spinlock_t definition without comment CHECK: Please don't use multiple blank lines WARNING: Prefer 'unsigned int' to bare use of 'unsigned' CHECK: braces {} should be used on all arms of this statement CHECK: Unbalanced braces around else statement CHECK: Alignment should match open parenthesis WARNING: Missing a blank line after declarations Signed-off-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 35 ++++++++++++--------------- 1 file changed, 15 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 57a24f62e353..6294b714fbfa 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* - * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. +/* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. * * This is a new flat driver which is based on the original emac_lite * driver from John Williams . @@ -91,8 +90,6 @@ #define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */ #define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ - - #define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */ #define ALIGNMENT 4 @@ -124,7 +121,6 @@ * @last_link: last link status */ struct net_local { - struct net_device *ndev; bool tx_ping_pong; @@ -133,7 +129,7 @@ struct net_local { u32 next_rx_buf_to_use; void __iomem *base_addr; - spinlock_t reset_lock; + spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */ struct sk_buff *deferred_skb; struct phy_device *phy_dev; @@ -144,7 +140,6 @@ struct net_local { int last_link; }; - /*************************/ /* EmacLite driver calls */ /*************************/ @@ -207,7 +202,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) * address in the EmacLite device. */ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr, - unsigned length) + unsigned int length) { const u16 *from_u16_ptr; u32 align_buffer; @@ -265,7 +260,7 @@ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr, * to a 16-bit aligned buffer. */ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, - unsigned length) + unsigned int length) { u16 *to_u16_ptr, *from_u16_ptr; u32 *from_u32_ptr; @@ -330,7 +325,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { - /* Switch to next buffer if configured */ if (drvdata->tx_ping_pong != 0) drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; @@ -346,8 +340,9 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) return -1; /* Buffers were full, return failure */ - } else + } else { return -1; /* Buffer was full, return failure */ + } /* Write the frame to the buffer */ xemaclite_aligned_write(data, (u32 __force *)addr, byte_count); @@ -423,7 +418,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) * or an IP packet or an ARP packet */ if (proto_type > ETH_DATA_LEN) { - if (proto_type == ETH_P_IP) { length = ((ntohl(xemaclite_readl(addr + XEL_HEADER_IP_LENGTH_OFFSET + @@ -433,23 +427,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) length = min_t(u16, length, ETH_DATA_LEN); length += ETH_HLEN + ETH_FCS_LEN; - } else if (proto_type == ETH_P_ARP) + } else if (proto_type == ETH_P_ARP) { length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; - else + } else { /* Field contains type other than IP or ARP, use max * frame size and let user parse it */ length = ETH_FRAME_LEN + ETH_FCS_LEN; - } else + } + } else { /* Use the length in the frame, plus the header and trailer */ length = proto_type + ETH_HLEN + ETH_FCS_LEN; + } if (WARN_ON(length > maxlen)) length = maxlen; /* Read from the EmacLite device */ xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET), - data, length); + data, length); /* Acknowledge the frame */ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); @@ -671,8 +667,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) /* Check if the Transmission for the first buffer is completed */ tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && - (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { - + (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET); @@ -682,8 +677,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) /* Check if the Transmission for the second buffer is completed */ tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && - (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { - + (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); @@ -840,6 +834,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) of_address_to_resource(npp, 0, &res); if (lp->ndev->mem_start != res.start) { struct phy_device *phydev; + phydev = of_phy_find_device(lp->phy_node); if (!phydev) dev_info(dev, -- cgit v1.2.3 From 7ae7d494f626a2f1a598fcf15b789a347c2d451a Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 14 Apr 2022 18:07:10 +0530 Subject: net: emaclite: Update copyright text to correct format Based on recommended guidance Copyright term should be also present in front of (c). That's why aligned driver to match this pattern. It helps automated tools with source code scanning. Signed-off-by: Michal Simek Signed-off-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 6294b714fbfa..bb9c3ebde522 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -4,7 +4,7 @@ * This is a new flat driver which is based on the original emac_lite * driver from John Williams . * - * 2007 - 2013 (c) Xilinx, Inc. + * Copyright (c) 2007 - 2013 Xilinx, Inc. */ #include -- cgit v1.2.3 From 7240bf6fb216e03fb9add3a3dd23117ad589a0c7 Mon Sep 17 00:00:00 2001 From: Shravya Kumbham Date: Thu, 14 Apr 2022 18:07:11 +0530 Subject: net: emaclite: Remove custom BUFFER_ALIGN macro BUFFER_ALIGN macro is used to calculate the number of bytes required for the next alignment. Instead of this, we can directly use the skb_reserve(skb, NET_IP_ALIGN) to make the protocol header buffer aligned on at least a 4-byte boundary, where the NET_IP_ALIGN is by default defined as 2. So removing the BUFFER_ALIGN and its related defines which it can be done by the skb_reserve() itself. Signed-off-by: Shravya Kumbham Signed-off-by: Radhey Shyam Pandey Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index bb9c3ebde522..7a86ae82fcc1 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -91,10 +91,6 @@ #define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ #define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */ -#define ALIGNMENT 4 - -/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ -#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT) #ifdef __BIG_ENDIAN #define xemaclite_readl ioread32be @@ -595,11 +591,10 @@ static void xemaclite_rx_handler(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); struct sk_buff *skb; - unsigned int align; u32 len; len = ETH_FRAME_LEN + ETH_FCS_LEN; - skb = netdev_alloc_skb(dev, len + ALIGNMENT); + skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); if (!skb) { /* Couldn't get memory. */ dev->stats.rx_dropped++; @@ -607,16 +602,7 @@ static void xemaclite_rx_handler(struct net_device *dev) return; } - /* A new skb should have the data halfword aligned, but this code is - * here just in case that isn't true. Calculate how many - * bytes we should reserve to get the data to start on a word - * boundary - */ - align = BUFFER_ALIGN(skb->data); - if (align) - skb_reserve(skb, align); - - skb_reserve(skb, 2); + skb_reserve(skb, NET_IP_ALIGN); len = xemaclite_recv_data(lp, (u8 *)skb->data, len); -- cgit v1.2.3 From 0a03f3c511f57da4d7159a150f1ab4b87f62c040 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Fri, 15 Apr 2022 10:39:57 +0800 Subject: octeon_ep: fix error return code in octep_probe() If register_netdev() fails , it should return error code in octep_probe(). Fixes: 862cd659a6fb ("octeon_ep: Add driver framework and device initialization") Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeon_ep/octep_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 5d39c857ea41..f2af5ebffcf1 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1071,7 +1071,8 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) octep_get_mac_addr(octep_dev, octep_dev->mac_addr); eth_hw_addr_set(netdev, octep_dev->mac_addr); - if (register_netdev(netdev)) { + err = register_netdev(netdev); + if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); goto register_dev_err; } -- cgit v1.2.3 From 1f702c1643f2f9657f9dd03085b309ab9a1de1d7 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Tue, 12 Apr 2022 10:01:21 +0800 Subject: net: hns3: add tx push support in hns3 ring param process This patch adds tx push param to hns3 ring param and adapts the set and get API of ring params. So users can set it by cmd ethtool -G and get it by cmd ethtool -g. Signed-off-by: Jie Wang Signed-off-by: Guangbin Huang Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 33 +++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f4da77452126..9f4111fd2986 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -664,6 +664,8 @@ static void hns3_get_ringparam(struct net_device *netdev, param->tx_pending = priv->ring[0].desc_num; param->rx_pending = priv->ring[rx_queue_index].desc_num; kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size; + kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, + &priv->state); } static void hns3_get_pauseparam(struct net_device *netdev, @@ -1120,6 +1122,30 @@ static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) return 0; } +static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push) + return -EOPNOTSUPP; + + if (tx_push == old_state) + return 0; + + netdev_dbg(netdev, "Changing tx push from %s to %s\n", + old_state ? "on" : "off", tx_push ? "on" : "off"); + + if (tx_push) + set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + else + clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); + + return 0; +} + static int hns3_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *param, struct kernel_ethtool_ringparam *kernel_param, @@ -1139,6 +1165,10 @@ static int hns3_set_ringparam(struct net_device *ndev, if (ret) return ret; + ret = hns3_set_tx_push(ndev, kernel_param->tx_push); + if (ret) + return ret; + /* Hardware requires that its descriptors must be multiple of eight */ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); @@ -1858,7 +1888,8 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_COALESCE_MAX_FRAMES | \ ETHTOOL_COALESCE_USE_CQE) -#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN +#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \ + ETHTOOL_RING_USE_TX_PUSH) static int hns3_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) -- cgit v1.2.3 From 0bd5ab511e30a8c462c377d0fdda3374fc6200a4 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:04 +0200 Subject: ice, xsk: Decorate ICE_XDP_REDIR with likely() ice_run_xdp_zc() suggests to compiler that XDP_REDIRECT is the most probable action returned from BPF program that AF_XDP has in its pipeline. Let's also bring this suggestion up to the callsite of ice_run_xdp_zc() so that compiler will be able to generate more optimized code which in turn will make branch predictor happy. Suggested-by: Jesper Dangaard Brouer Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-4-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/ice/ice_xsk.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 866ee4df9671..e9ff05de0084 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -629,18 +629,19 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool); xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring); - if (xdp_res) { - if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) - xdp_xmit |= xdp_res; - else - xsk_buff_free(xdp); + if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) + xdp_xmit |= xdp_res; + else if (xdp_res == ICE_XDP_CONSUMED) + xsk_buff_free(xdp); + else + goto construct_skb; - total_rx_bytes += size; - total_rx_packets++; + total_rx_bytes += size; + total_rx_packets++; + + ice_bump_ntc(rx_ring); + continue; - ice_bump_ntc(rx_ring); - continue; - } construct_skb: /* XDP_PASS path */ skb = ice_construct_skb_zc(rx_ring, xdp); -- cgit v1.2.3 From d090c885860f6ef85aa080bdfc94e69c525490a2 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:05 +0200 Subject: ixgbe, xsk: Decorate IXGBE_XDP_REDIR with likely() ixgbe_run_xdp_zc() suggests to compiler that XDP_REDIRECT is the most probable action returned from BPF program that AF_XDP has in its pipeline. Let's also bring this suggestion up to the callsite of ixgbe_run_xdp_zc() so that compiler will be able to generate more optimized code which in turn will make branch predictor happy. Suggested-by: Jesper Dangaard Brouer Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-5-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index dd7ff66d422f..85497bf10624 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -303,21 +303,22 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); - if (xdp_res) { - if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) - xdp_xmit |= xdp_res; - else - xsk_buff_free(bi->xdp); + if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) + xdp_xmit |= xdp_res; + else if (xdp_res == IXGBE_XDP_CONSUMED) + xsk_buff_free(bi->xdp); + else + goto construct_skb; - bi->xdp = NULL; - total_rx_packets++; - total_rx_bytes += size; + bi->xdp = NULL; + total_rx_packets++; + total_rx_bytes += size; - cleaned_count++; - ixgbe_inc_ntc(rx_ring); - continue; - } + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + continue; +construct_skb: /* XDP_PASS path */ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp); if (!skb) { -- cgit v1.2.3 From 50ae066480738e0d3ce72a90c8c0f4d9de6092e0 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:06 +0200 Subject: ice, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full When XSK pool uses need_wakeup feature, correlate -ENOBUFS that was returned from xdp_do_redirect() with a XSK Rx queue being full. In such case, terminate the Rx processing that is being done on the current HW Rx ring and let the user space consume descriptors from XSK Rx queue so that there is room that driver can use later on. Introduce new internal return code ICE_XDP_EXIT that will indicate case described above. Note that it does not affect Tx processing that is bound to the same NAPI context, nor the other Rx rings. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-6-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/ice/ice_txrx.h | 1 + drivers/net/ethernet/intel/ice/ice_xsk.c | 29 +++++++++++++++++++---------- 2 files changed, 20 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index cead3eb149bd..f5a906c03669 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -133,6 +133,7 @@ static inline int ice_skb_pad(void) #define ICE_XDP_CONSUMED BIT(0) #define ICE_XDP_TX BIT(1) #define ICE_XDP_REDIR BIT(2) +#define ICE_XDP_EXIT BIT(3) #define ICE_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index e9ff05de0084..99bfa21c3938 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -540,9 +540,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return ICE_XDP_REDIR; + if (!err) + return ICE_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = ICE_XDP_EXIT; + else + result = ICE_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -553,15 +557,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (result == ICE_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = ICE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = ICE_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; - case XDP_DROP: - result = ICE_XDP_CONSUMED; break; } @@ -629,12 +634,16 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool); xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring); - if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) + if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { xdp_xmit |= xdp_res; - else if (xdp_res == ICE_XDP_CONSUMED) + } else if (xdp_res == ICE_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == ICE_XDP_CONSUMED) { xsk_buff_free(xdp); - else + } else if (xdp_res == ICE_XDP_PASS) { goto construct_skb; + } total_rx_bytes += size; total_rx_packets++; @@ -669,7 +678,7 @@ construct_skb: ice_receive_skb(rx_ring, skb, vlan_tag); } - failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring)); + failure |= !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring)); ice_finalize_xdp_rx(xdp_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); -- cgit v1.2.3 From b8aef650e54982728660919a0cf9cdacb079ef86 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:07 +0200 Subject: i40e, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full When XSK pool uses need_wakeup feature, correlate -ENOBUFS that was returned from xdp_do_redirect() with a XSK Rx queue being full. In such case, terminate the Rx processing that is being done on the current HW Rx ring and let the user space consume descriptors from XSK Rx queue so that there is room that driver can use later on. Introduce new internal return code I40E_XDP_EXIT that will indicate case described above. Note that it does not affect Tx processing that is bound to the same NAPI context, nor the other Rx rings. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-7-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/i40e/i40e_txrx_common.h | 1 + drivers/net/ethernet/intel/i40e/i40e_xsk.c | 32 +++++++++++++++------- 2 files changed, 23 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h index 19da3b22160f..8c5118c8baaf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -20,6 +20,7 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); #define I40E_XDP_CONSUMED BIT(0) #define I40E_XDP_TX BIT(1) #define I40E_XDP_REDIR BIT(2) +#define I40E_XDP_EXIT BIT(3) /* * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index c1d25b0b0ca2..94ea8288859a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -161,9 +161,13 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return I40E_XDP_REDIR; + if (!err) + return I40E_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = I40E_XDP_EXIT; + else + result = I40E_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -175,16 +179,17 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) if (result == I40E_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = I40E_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = I40E_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; /* handle aborts by dropping packet */ - case XDP_DROP: - result = I40E_XDP_CONSUMED; - break; } return result; } @@ -271,7 +276,8 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, unsigned int *rx_packets, unsigned int *rx_bytes, unsigned int size, - unsigned int xdp_res) + unsigned int xdp_res, + bool *failure) { struct sk_buff *skb; @@ -281,11 +287,15 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) return; + if (xdp_res == I40E_XDP_EXIT) { + *failure = true; + return; + } + if (xdp_res == I40E_XDP_CONSUMED) { xsk_buff_free(xdp_buff); return; } - if (xdp_res == I40E_XDP_PASS) { /* NB! We are not checking for errors using * i40e_test_staterr with @@ -371,7 +381,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) xdp_res = i40e_run_xdp_zc(rx_ring, bi); i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets, - &rx_bytes, size, xdp_res); + &rx_bytes, size, xdp_res, &failure); + if (failure) + break; total_rx_packets += rx_packets; total_rx_bytes += rx_bytes; xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); @@ -382,7 +394,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; if (cleaned_count >= I40E_RX_BUFFER_WRITE) - failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); + failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); -- cgit v1.2.3 From c7dd09fd46283029a41615b2b7034aea26b22ee0 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:08 +0200 Subject: ixgbe, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full When XSK pool uses need_wakeup feature, correlate -ENOBUFS that was returned from xdp_do_redirect() with a XSK Rx queue being full. In such case, terminate the Rx processing that is being done on the current HW Rx ring and let the user space consume descriptors from XSK Rx queue so that there is room that driver can use later on. Introduce new internal return code IXGBE_XDP_EXIT that will indicate case described above. Note that it does not affect Tx processing that is bound to the same NAPI context, nor the other Rx rings. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-8-maciej.fijalkowski@intel.com --- .../net/ethernet/intel/ixgbe/ixgbe_txrx_common.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 27 ++++++++++++++-------- 2 files changed, 19 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index bba3feaf3318..f1f69ce67420 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -8,6 +8,7 @@ #define IXGBE_XDP_CONSUMED BIT(0) #define IXGBE_XDP_TX BIT(1) #define IXGBE_XDP_REDIR BIT(2) +#define IXGBE_XDP_EXIT BIT(3) #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 85497bf10624..bdd70b85a787 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) - goto out_failure; - return IXGBE_XDP_REDIR; + if (!err) + return IXGBE_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = IXGBE_XDP_EXIT; + else + result = IXGBE_XDP_CONSUMED; + goto out_failure; } switch (act) { @@ -130,16 +134,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: + result = IXGBE_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; /* handle aborts by dropping packet */ - case XDP_DROP: - result = IXGBE_XDP_CONSUMED; - break; } return result; } @@ -303,12 +308,16 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); - if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) + if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) { xdp_xmit |= xdp_res; - else if (xdp_res == IXGBE_XDP_CONSUMED) + } else if (xdp_res == IXGBE_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == IXGBE_XDP_CONSUMED) { xsk_buff_free(bi->xdp); - else + } else if (xdp_res == IXGBE_XDP_PASS) { goto construct_skb; + } bi->xdp = NULL; total_rx_packets++; -- cgit v1.2.3 From ed8a6bc60f9eed76b8097456676e18d09b5dea7b Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:09 +0200 Subject: ice, xsk: Diversify return values from xsk_wakeup call paths Currently, when debugging AF_XDP workloads, one can correlate the -ENXIO return code as the case that XSK is not in the bound state. Returning same code from ndo_xsk_wakeup can be misleading and simply makes it harder to follow what is going on. Change ENXIOs in ice's ndo_xsk_wakeup() implementation to EINVALs, so that when probing it is clear that something is wrong on the driver side, not the xsk_{recv,send}msg. There is a -ENETDOWN that can happen from both kernel/driver sides though, but I don't have a correct replacement for this on one of the sides, so let's keep it that way. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-9-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/ice/ice_xsk.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 99bfa21c3938..53c299eebd99 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -934,13 +934,13 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi)) - return -ENXIO; + return -EINVAL; if (queue_id >= vsi->num_txq) - return -ENXIO; + return -EINVAL; if (!vsi->xdp_rings[queue_id]->xsk_pool) - return -ENXIO; + return -EINVAL; ring = vsi->xdp_rings[queue_id]; -- cgit v1.2.3 From ed7ae2d6221708971eb20a92f3775dac9cddec14 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:10 +0200 Subject: i40e, xsk: Diversify return values from xsk_wakeup call paths Currently, when debugging AF_XDP workloads, one can correlate the -ENXIO return code as the case that XSK is not in the bound state. Returning same code from ndo_xsk_wakeup can be misleading and simply makes it harder to follow what is going on. Change ENXIOs in i40e's ndo_xsk_wakeup() implementation to EINVALs, so that when probing it is clear that something is wrong on the driver side, not the xsk_{recv,send}msg. There is a -ENETDOWN that can happen from both kernel/driver sides though, but I don't have a correct replacement for this on one of the sides, so let's keep it that way. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-10-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/i40e/i40e_xsk.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 94ea8288859a..050280fd10c1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -606,13 +606,13 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) return -ENETDOWN; if (!i40e_enabled_xdp_vsi(vsi)) - return -ENXIO; + return -EINVAL; if (queue_id >= vsi->num_queue_pairs) - return -ENXIO; + return -EINVAL; if (!vsi->xdp_rings[queue_id]->xsk_pool) - return -ENXIO; + return -EINVAL; ring = vsi->xdp_rings[queue_id]; -- cgit v1.2.3 From 0f8bf018899e0c3137912cb6d547d0d072fc3d20 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:11 +0200 Subject: ixgbe, xsk: Diversify return values from xsk_wakeup call paths Currently, when debugging AF_XDP workloads, one can correlate the -ENXIO return code as the case that XSK is not in the bound state. Returning same code from ndo_xsk_wakeup can be misleading and simply makes it harder to follow what is going on. Change ENXIOs in ixgbe's ndo_xsk_wakeup() implementation to EINVALs, so that when probing it is clear that something is wrong on the driver side, not the xsk_{recv,send}msg. There is a -ENETDOWN that can happen from both kernel/driver sides though, but I don't have a correct replacement for this on one of the sides, so let's keep it that way. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-11-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index bdd70b85a787..68532cffd453 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -526,10 +526,10 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) return -ENETDOWN; if (!READ_ONCE(adapter->xdp_prog)) - return -ENXIO; + return -EINVAL; if (qid >= adapter->num_xdp_queues) - return -ENXIO; + return -EINVAL; ring = adapter->xdp_ring[qid]; @@ -537,7 +537,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) return -ENETDOWN; if (!ring->xsk_pool) - return -ENXIO; + return -EINVAL; if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { u64 eics = BIT_ULL(ring->q_vector->v_idx); -- cgit v1.2.3 From 7b7f2f273d8722d518758cc0770e784872cec781 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:12 +0200 Subject: mlx5, xsk: Diversify return values from xsk_wakeup call paths Currently, when debugging AF_XDP workloads, one can correlate the -ENXIO return code as the case that XSK is not in the bound state. Returning same code from ndo_xsk_wakeup can be misleading and simply makes it harder to follow what is going on. Change ENXIO in mlx5's ndo_xsk_wakeup() implementation to EINVAL, so that when probing it is clear that something is wrong on the driver side, not the xsk_{recv,send}msg. There is a -ENETDOWN that can happen from both kernel/driver sides though, but I don't have a correct replacement for this on one of the sides, so let's keep it that way. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-12-maciej.fijalkowski@intel.com --- drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 3ec0c17db010..4902ef74fedf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -23,7 +23,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) c = priv->channels.c[ix]; if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))) - return -ENXIO; + return -EINVAL; if (!napi_if_scheduled_mark_missed(&c->napi)) { /* To avoid WQE overrun, don't post a NOP if async_icosq is not -- cgit v1.2.3 From a817ead4154d86c74bf98936f3a385e8a21974ff Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:13 +0200 Subject: stmmac, xsk: Diversify return values from xsk_wakeup call paths Currently, when debugging AF_XDP workloads, one can correlate the -ENXIO return code as the case that XSK is not in the bound state. Returning same code from ndo_xsk_wakeup can be misleading and simply makes it harder to follow what is going on. Change ENXIOs in stmmac's ndo_xsk_wakeup() implementation to EINVALs, so that when probing it is clear that something is wrong on the driver side, not the xsk_{recv,send}msg. There is a -ENETDOWN that can happen from both kernel/driver sides though, but I don't have a correct replacement for this on one of the sides, so let's keep it that way. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-13-maciej.fijalkowski@intel.com --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 4a4b3651ab3e..c9e077b2a56e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -6565,7 +6565,7 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) return -ENETDOWN; if (!stmmac_xdp_is_enabled(priv)) - return -ENXIO; + return -EINVAL; if (queue >= priv->plat->rx_queues_to_use || queue >= priv->plat->tx_queues_to_use) @@ -6576,7 +6576,7 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) ch = &priv->channel[queue]; if (!rx_q->xsk_pool && !tx_q->xsk_pool) - return -ENXIO; + return -EINVAL; if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { /* EQoS does not have per-DMA channel SW interrupt, -- cgit v1.2.3 From 4efad196163f4302c5cdc020899074b174eaf956 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 13 Apr 2022 17:30:14 +0200 Subject: ice, xsk: Avoid refilling single Rx descriptors Call alloc Rx routine for ZC driver only when the amount of unallocated descriptors exceeds given threshold. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220413153015.453864-14-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/ice/ice_xsk.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 53c299eebd99..c26b408ba419 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -587,6 +587,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; bool failure = false; + int entries_to_alloc; /* ZC patch is enabled only when XDP program is set, * so here it can not be NULL @@ -678,7 +679,9 @@ construct_skb: ice_receive_skb(rx_ring, skb, vlan_tag); } - failure |= !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring)); + entries_to_alloc = ICE_DESC_UNUSED(rx_ring); + if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) + failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); ice_finalize_xdp_rx(xdp_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); -- cgit v1.2.3 From 8880fc669deda4fafde3ce90b3128f079567447f Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 13 Apr 2022 13:10:21 -0400 Subject: ibmvnic: rename local variable index to bufidx The local variable 'index' is heavily used in some functions and is confusing with the presence of other "index" fields like pool->index, ->consumer_index, etc. Rename it to bufidx to better reflect that its the index of a buffer in the pool. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Dany Madden Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ibm/ibmvnic.c | 44 +++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 77683909ca3d..7fd9dd3759df 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -369,7 +369,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, dma_addr_t dma_addr; unsigned char *dst; int shift = 0; - int index; + int bufidx; int i; if (!pool->active) @@ -385,14 +385,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, * be 0. */ for (i = ind_bufp->index; i < count; ++i) { - index = pool->free_map[pool->next_free]; + bufidx = pool->free_map[pool->next_free]; /* We maybe reusing the skb from earlier resets. Allocate * only if necessary. But since the LTB may have changed * during reset (see init_rx_pools()), update LTB below * even if reusing skb. */ - skb = pool->rx_buff[index].skb; + skb = pool->rx_buff[bufidx].skb; if (!skb) { skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); @@ -407,24 +407,24 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, pool->next_free = (pool->next_free + 1) % pool->size; /* Copy the skb to the long term mapped DMA buffer */ - offset = index * pool->buff_size; + offset = bufidx * pool->buff_size; dst = pool->long_term_buff.buff + offset; memset(dst, 0, pool->buff_size); dma_addr = pool->long_term_buff.addr + offset; /* add the skb to an rx_buff in the pool */ - pool->rx_buff[index].data = dst; - pool->rx_buff[index].dma = dma_addr; - pool->rx_buff[index].skb = skb; - pool->rx_buff[index].pool_index = pool->index; - pool->rx_buff[index].size = pool->buff_size; + pool->rx_buff[bufidx].data = dst; + pool->rx_buff[bufidx].dma = dma_addr; + pool->rx_buff[bufidx].skb = skb; + pool->rx_buff[bufidx].pool_index = pool->index; + pool->rx_buff[bufidx].size = pool->buff_size; /* queue the rx_buff for the next send_subcrq_indirect */ sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; memset(sub_crq, 0, sizeof(*sub_crq)); sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; sub_crq->rx_add.correlator = - cpu_to_be64((u64)&pool->rx_buff[index]); + cpu_to_be64((u64)&pool->rx_buff[bufidx]); sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); sub_crq->rx_add.map_id = pool->long_term_buff.map_id; @@ -466,10 +466,10 @@ failure: sub_crq = &ind_bufp->indir_arr[i]; rx_buff = (struct ibmvnic_rx_buff *) be64_to_cpu(sub_crq->rx_add.correlator); - index = (int)(rx_buff - pool->rx_buff); - pool->free_map[pool->next_free] = index; - dev_kfree_skb_any(pool->rx_buff[index].skb); - pool->rx_buff[index].skb = NULL; + bufidx = (int)(rx_buff - pool->rx_buff); + pool->free_map[pool->next_free] = bufidx; + dev_kfree_skb_any(pool->rx_buff[bufidx].skb); + pool->rx_buff[bufidx].skb = NULL; } adapter->replenish_add_buff_failure += ind_bufp->index; atomic_add(buffers_added, &pool->available); @@ -1926,7 +1926,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int offset; int num_entries = 1; unsigned char *dst; - int index = 0; + int bufidx = 0; u8 proto = 0; /* If a reset is in progress, drop the packet since @@ -1960,9 +1960,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) else tx_pool = &adapter->tx_pool[queue_num]; - index = tx_pool->free_map[tx_pool->consumer_index]; + bufidx = tx_pool->free_map[tx_pool->consumer_index]; - if (index == IBMVNIC_INVALID_MAP) { + if (bufidx == IBMVNIC_INVALID_MAP) { dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; @@ -1973,7 +1973,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; - offset = index * tx_pool->buf_size; + offset = bufidx * tx_pool->buf_size; dst = tx_pool->long_term_buff.buff + offset; memset(dst, 0, tx_pool->buf_size); data_dma_addr = tx_pool->long_term_buff.addr + offset; @@ -2003,9 +2003,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_pool->consumer_index = (tx_pool->consumer_index + 1) % tx_pool->num_buffers; - tx_buff = &tx_pool->tx_buff[index]; + tx_buff = &tx_pool->tx_buff[bufidx]; tx_buff->skb = skb; - tx_buff->index = index; + tx_buff->index = bufidx; tx_buff->pool_index = queue_num; memset(&tx_crq, 0, sizeof(tx_crq)); @@ -2017,9 +2017,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) if (skb_is_gso(skb)) tx_crq.v1.correlator = - cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK); + cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); else - tx_crq.v1.correlator = cpu_to_be32(index); + tx_crq.v1.correlator = cpu_to_be32(bufidx); tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); -- cgit v1.2.3 From 2872a67c6bcfbd996fda08ea0a8119be230f428e Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 13 Apr 2022 13:10:22 -0400 Subject: ibmvnic: define map_rxpool_buf_to_ltb() Define a helper to map a given rx pool buffer into its corresponding long term buffer (LTB) and offset. Currently there is just one LTB per pool so the mapping is trivial. When we add support for multiple LTBs per pool, this helper will be more useful. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Dany Madden Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ibm/ibmvnic.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 7fd9dd3759df..753afda76296 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -345,6 +345,25 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, ltb->map_id = 0; } +/** + * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. + * @rxpool: The receive buffer pool containing buffer + * @bufidx: Index of buffer in rxpool + * @ltbp: (Output) pointer to the long term buffer containing the buffer + * @offset: (Output) offset of buffer in the LTB from @ltbp + * + * Map the given buffer identified by [rxpool, bufidx] to an LTB in the + * pool and its corresponding offset. + */ +static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, + unsigned int bufidx, + struct ibmvnic_long_term_buff **ltbp, + unsigned int *offset) +{ + *ltbp = &rxpool->long_term_buff; + *offset = bufidx * rxpool->buff_size; +} + static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) { int i; @@ -361,6 +380,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, struct device *dev = &adapter->vdev->dev; struct ibmvnic_ind_xmit_queue *ind_bufp; struct ibmvnic_sub_crq_queue *rx_scrq; + struct ibmvnic_long_term_buff *ltb; union sub_crq *sub_crq; int buffers_added = 0; unsigned long lpar_rc; @@ -407,10 +427,10 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, pool->next_free = (pool->next_free + 1) % pool->size; /* Copy the skb to the long term mapped DMA buffer */ - offset = bufidx * pool->buff_size; - dst = pool->long_term_buff.buff + offset; + map_rxpool_buf_to_ltb(pool, bufidx, <b, &offset); + dst = ltb->buff + offset; memset(dst, 0, pool->buff_size); - dma_addr = pool->long_term_buff.addr + offset; + dma_addr = ltb->addr + offset; /* add the skb to an rx_buff in the pool */ pool->rx_buff[bufidx].data = dst; @@ -426,7 +446,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, sub_crq->rx_add.correlator = cpu_to_be64((u64)&pool->rx_buff[bufidx]); sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); - sub_crq->rx_add.map_id = pool->long_term_buff.map_id; + sub_crq->rx_add.map_id = ltb->map_id; /* The length field of the sCRQ is defined to be 24 bits so the * buffer size needs to be left shifted by a byte before it is -- cgit v1.2.3 From 0c91bf9ceba6296892010a48c5eef0bd17ec35d5 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 13 Apr 2022 13:10:23 -0400 Subject: ibmvnic: define map_txpool_buf_to_ltb() Define a helper to map a given txpool buffer into its corresponding long term buffer (LTB) and offset. Currently there is just one LTB per txpool so the mapping is trivial. When we add support for multiple LTBs per txpool, this helper will be more useful. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Dany Madden Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ibm/ibmvnic.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 753afda76296..2e6042409585 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -364,6 +364,25 @@ static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, *offset = bufidx * rxpool->buff_size; } +/** + * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB. + * @txpool: The transmit buffer pool containing buffer + * @bufidx: Index of buffer in txpool + * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer + * @offset: (Output) offset of buffer in the LTB from @ltbp + * + * Map the given buffer identified by [txpool, bufidx] to an LTB in the + * pool and its corresponding offset. + */ +static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool, + unsigned int bufidx, + struct ibmvnic_long_term_buff **ltbp, + unsigned int *offset) +{ + *ltbp = &txpool->long_term_buff; + *offset = bufidx * txpool->buf_size; +} + static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) { int i; @@ -1931,6 +1950,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) struct ibmvnic_ind_xmit_queue *ind_bufp; struct ibmvnic_tx_buff *tx_buff = NULL; struct ibmvnic_sub_crq_queue *tx_scrq; + struct ibmvnic_long_term_buff *ltb; struct ibmvnic_tx_pool *tx_pool; unsigned int tx_send_failed = 0; netdev_tx_t ret = NETDEV_TX_OK; @@ -1993,10 +2013,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; - offset = bufidx * tx_pool->buf_size; - dst = tx_pool->long_term_buff.buff + offset; + map_txpool_buf_to_ltb(tx_pool, bufidx, <b, &offset); + + dst = ltb->buff + offset; memset(dst, 0, tx_pool->buf_size); - data_dma_addr = tx_pool->long_term_buff.addr + offset; + data_dma_addr = ltb->addr + offset; if (skb_shinfo(skb)->nr_frags) { int cur, i; @@ -2040,7 +2061,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK); else tx_crq.v1.correlator = cpu_to_be32(bufidx); - tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); + tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id); tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); -- cgit v1.2.3 From d6b458509035db32b935f15922b530373fb6d27e Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 13 Apr 2022 13:10:24 -0400 Subject: ibmvnic: convert rxpool ltb to a set of ltbs Define and use interfaces that treat the long term buffer (LTB) of an rxpool as a set of LTBs rather than a single LTB. The set only has one LTB for now. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Dany Madden Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ibm/ibmvnic.c | 44 +++++++++++++++++++++++++++++++++----- drivers/net/ethernet/ibm/ibmvnic.h | 7 +++++- 2 files changed, 45 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 2e6042409585..765a48833b3b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -345,6 +345,41 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, ltb->map_id = 0; } +static void free_ltb_set(struct ibmvnic_adapter *adapter, + struct ibmvnic_ltb_set *ltb_set) +{ + int i; + + for (i = 0; i < ltb_set->num_ltbs; i++) + free_long_term_buff(adapter, <b_set->ltbs[i]); + + kfree(ltb_set->ltbs); + ltb_set->ltbs = NULL; + ltb_set->num_ltbs = 0; +} + +static int alloc_ltb_set(struct ibmvnic_adapter *adapter, + struct ibmvnic_ltb_set *ltb_set, int num_buffs, + int buff_size) +{ + struct ibmvnic_long_term_buff *ltb; + int ltb_size; + int size; + + size = sizeof(struct ibmvnic_long_term_buff); + + ltb_set->ltbs = kmalloc(size, GFP_KERNEL); + if (!ltb_set->ltbs) + return -ENOMEM; + + ltb_set->num_ltbs = 1; + ltb = <b_set->ltbs[0]; + + ltb_size = num_buffs * buff_size; + + return alloc_long_term_buff(adapter, ltb, ltb_size); +} + /** * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB. * @rxpool: The receive buffer pool containing buffer @@ -360,7 +395,7 @@ static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, struct ibmvnic_long_term_buff **ltbp, unsigned int *offset) { - *ltbp = &rxpool->long_term_buff; + *ltbp = &rxpool->ltb_set.ltbs[0]; *offset = bufidx * rxpool->buff_size; } @@ -618,7 +653,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) kfree(rx_pool->free_map); - free_long_term_buff(adapter, &rx_pool->long_term_buff); + free_ltb_set(adapter, &rx_pool->ltb_set); if (!rx_pool->rx_buff) continue; @@ -763,9 +798,8 @@ update_ltb: dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", i, rx_pool->size, rx_pool->buff_size); - rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * rx_pool->buff_size); - if (rc) + if (alloc_ltb_set(adapter, &rx_pool->ltb_set, + rx_pool->size, rx_pool->buff_size)) goto out; for (j = 0; j < rx_pool->size; ++j) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 8f5cefb932dd..7d430fdf47c0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -798,6 +798,11 @@ struct ibmvnic_long_term_buff { u8 map_id; }; +struct ibmvnic_ltb_set { + int num_ltbs; + struct ibmvnic_long_term_buff *ltbs; +}; + struct ibmvnic_tx_buff { struct sk_buff *skb; int index; @@ -833,7 +838,7 @@ struct ibmvnic_rx_pool { int next_free; int next_alloc; int active; - struct ibmvnic_long_term_buff long_term_buff; + struct ibmvnic_ltb_set ltb_set; } ____cacheline_aligned; struct ibmvnic_vpd { -- cgit v1.2.3 From a75de820575d54185a7569494e89f83dca49368e Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 13 Apr 2022 13:10:25 -0400 Subject: ibmvnic: Allow multiple ltbs in rxpool ltb_set Allow multiple LTBs in the rxpool's ltb_set. The first n-1 LTBs will all be of the same size. The size of the last LTB in the set depends on the number of buffers and buffer (mtu) size. Having a set of LTBs per pool provides a couple of benefits. First, with the current value of IBMVNIC_MAX_LTB_SIZE of 16MB, with an MTU of 9000, we need a LTB (DMA buffer) of that size but the allocation can fail in low memory conditions. With a set of LTBs per pool, we can use several smaller (8MB) LTBs and hopefully have fewer allocation failures. (See also comments in ibmvnic.h on the trade-off with smaller LTBs) Second since the kernel limits the size of the DMA buffer to 16MB (based on MAX_ORDER), with a single DMA buffer per pool, the pool is also limited to 16MB. This in turn limits the number of buffers per pool to 1763 when MTU is 9000. With a set of LTBs per pool, we can have upto the max of 4096 buffers per pool even when MTU is 9000. Suggested-by: Brian King Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Dany Madden Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ibm/ibmvnic.c | 152 ++++++++++++++++++++++++++++++++----- drivers/net/ethernet/ibm/ibmvnic.h | 45 ++++++++++- 2 files changed, 177 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 765a48833b3b..16fd1f1f1228 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -345,6 +345,14 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, ltb->map_id = 0; } +/** + * free_ltb_set - free the given set of long term buffers (LTBS) + * @adapter: The ibmvnic adapter containing this ltb set + * @ltb_set: The ltb_set to be freed + * + * Free the set of LTBs in the given set. + */ + static void free_ltb_set(struct ibmvnic_adapter *adapter, struct ibmvnic_ltb_set *ltb_set) { @@ -358,26 +366,117 @@ static void free_ltb_set(struct ibmvnic_adapter *adapter, ltb_set->num_ltbs = 0; } +/** + * alloc_ltb_set() - Allocate a set of long term buffers (LTBs) + * + * @adapter: ibmvnic adapter associated to the LTB + * @ltb_set: container object for the set of LTBs + * @num_buffs: Number of buffers in the LTB + * @buff_size: Size of each buffer in the LTB + * + * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size + * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the + * new set of LTBs have fewer LTBs than the old set, free the excess LTBs. + * If new set needs more than in old set, allocate the remaining ones. + * Try and reuse as many LTBs as possible and avoid reallocation. + * + * Any changes to this allocation strategy must be reflected in + * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb(). + */ static int alloc_ltb_set(struct ibmvnic_adapter *adapter, struct ibmvnic_ltb_set *ltb_set, int num_buffs, int buff_size) { - struct ibmvnic_long_term_buff *ltb; - int ltb_size; - int size; + struct device *dev = &adapter->vdev->dev; + struct ibmvnic_ltb_set old_set; + struct ibmvnic_ltb_set new_set; + int rem_size; + int tot_size; /* size of all ltbs */ + int ltb_size; /* size of one ltb */ + int nltbs; + int rc; + int n; + int i; - size = sizeof(struct ibmvnic_long_term_buff); + dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs, + buff_size); - ltb_set->ltbs = kmalloc(size, GFP_KERNEL); - if (!ltb_set->ltbs) - return -ENOMEM; + ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size); + tot_size = num_buffs * buff_size; + + if (ltb_size > tot_size) + ltb_size = tot_size; + + nltbs = tot_size / ltb_size; + if (tot_size % ltb_size) + nltbs++; + + old_set = *ltb_set; + + if (old_set.num_ltbs == nltbs) { + new_set = old_set; + } else { + int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff); + + new_set.ltbs = kzalloc(tmp, GFP_KERNEL); + if (!new_set.ltbs) + return -ENOMEM; + + new_set.num_ltbs = nltbs; + + /* Free any excess ltbs in old set */ + for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++) + free_long_term_buff(adapter, &old_set.ltbs[i]); + + /* Copy remaining ltbs to new set. All LTBs except the + * last one are of the same size. alloc_long_term_buff() + * will realloc if the size changes. + */ + n = min(old_set.num_ltbs, new_set.num_ltbs); + for (i = 0; i < n; i++) + new_set.ltbs[i] = old_set.ltbs[i]; - ltb_set->num_ltbs = 1; - ltb = <b_set->ltbs[0]; + /* Any additional ltbs in new set will have NULL ltbs for + * now and will be allocated in alloc_long_term_buff(). + */ + + /* We no longer need the old_set so free it. Note that we + * may have reused some ltbs from old set and freed excess + * ltbs above. So we only need to free the container now + * not the LTBs themselves. (i.e. dont free_ltb_set()!) + */ + kfree(old_set.ltbs); + old_set.ltbs = NULL; + old_set.num_ltbs = 0; + + /* Install the new set. If allocations fail below, we will + * retry later and know what size LTBs we need. + */ + *ltb_set = new_set; + } + + i = 0; + rem_size = tot_size; + while (rem_size) { + if (ltb_size > rem_size) + ltb_size = rem_size; + + rem_size -= ltb_size; + + rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size); + if (rc) + goto out; + i++; + } - ltb_size = num_buffs * buff_size; + WARN_ON(i != new_set.num_ltbs); - return alloc_long_term_buff(adapter, ltb, ltb_size); + return 0; +out: + /* We may have allocated one/more LTBs before failing and we + * want to try and reuse on next reset. So don't free ltb set. + */ + return rc; } /** @@ -388,14 +487,30 @@ static int alloc_ltb_set(struct ibmvnic_adapter *adapter, * @offset: (Output) offset of buffer in the LTB from @ltbp * * Map the given buffer identified by [rxpool, bufidx] to an LTB in the - * pool and its corresponding offset. + * pool and its corresponding offset. Assume for now that each LTB is of + * different size but could possibly be optimized based on the allocation + * strategy in alloc_ltb_set(). */ static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool, unsigned int bufidx, struct ibmvnic_long_term_buff **ltbp, unsigned int *offset) { - *ltbp = &rxpool->ltb_set.ltbs[0]; + struct ibmvnic_long_term_buff *ltb; + int nbufs; /* # of buffers in one ltb */ + int i; + + WARN_ON(bufidx >= rxpool->size); + + for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) { + ltb = &rxpool->ltb_set.ltbs[i]; + nbufs = ltb->size / rxpool->buff_size; + if (bufidx < nbufs) + break; + bufidx -= nbufs; + } + + *ltbp = ltb; *offset = bufidx * rxpool->buff_size; } @@ -798,8 +913,9 @@ update_ltb: dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", i, rx_pool->size, rx_pool->buff_size); - if (alloc_ltb_set(adapter, &rx_pool->ltb_set, - rx_pool->size, rx_pool->buff_size)) + rc = alloc_ltb_set(adapter, &rx_pool->ltb_set, + rx_pool->size, rx_pool->buff_size); + if (rc) goto out; for (j = 0; j < rx_pool->size; ++j) { @@ -4106,16 +4222,16 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) adapter->desired.rx_entries = adapter->max_rx_add_entries_per_subcrq; - max_entries = IBMVNIC_MAX_LTB_SIZE / + max_entries = IBMVNIC_LTB_SET_SIZE / (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * - adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { + adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) { adapter->desired.tx_entries = max_entries; } if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * - adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { + adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) { adapter->desired.rx_entries = max_entries; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 7d430fdf47c0..178035872c32 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -36,9 +36,50 @@ #define IBMVNIC_TSO_BUFS 64 #define IBMVNIC_TSO_POOL_MASK 0x80000000 -#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) -#define IBMVNIC_BUFFER_HLEN 500 +/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool + * has a set of buffers. The size of each buffer is determined by the MTU. + * + * Each Rx/Tx pool is also associated with a DMA region that is shared + * with the "hardware" (VIOS) and used to send/receive packets. The DMA + * region is also referred to as a Long Term Buffer or LTB. + * + * The size of the DMA region required for an Rx/Tx pool depends on the + * number and size (MTU) of the buffers in the pool. At the max levels + * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus + * some padding. + * + * But the size of a single DMA region is limited by MAX_ORDER in the + * kernel (about 16MB currently). To support say 4K Jumbo frames, we + * use a set of LTBs (struct ltb_set) per pool. + * + * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel + * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set + * (must be <= IBMVNIC_ONE_LTB_MAX) + * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set + * + * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools + * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB. + * + * The Rx and Tx pools can have upto 4096 buffers. The max size of these + * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN). + * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB. + * + * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large, + * the allocation of the LTB can fail when system is low in memory. If + * its too small, we would need several mappings for each of the Rx/ + * Tx/TSO pools but there is a limit of 255 mappings per vnic in the + * VNIC protocol. + * + * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set + * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO + * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160 + * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC. + */ +#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE)) +#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX) +#define IBMVNIC_LTB_SET_SIZE (38 << 20) +#define IBMVNIC_BUFFER_HLEN 500 #define IBMVNIC_RESET_DELAY 100 static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = { -- cgit v1.2.3 From 93b1ebb348a94a04ae4bfbd028f89005090cf8c7 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 13 Apr 2022 13:10:26 -0400 Subject: ibmvnic: Allow multiple ltbs in txpool ltb_set Allow multiple LTBs in the txpool's ltb_set. i.e rather than using a single large LTB, use several smaller LTBs. The first n-1 LTBs will all be of the same size. The size of the last LTB in the set depends on the number of buffers and buffer (mtu) size. This strategy hopefully allows more reuse of the initial LTBs and also reduces the chances of an allocation failure (of the large LTB) when system is low in memory. Suggested-by: Brian King Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Dany Madden Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/ibm/ibmvnic.c | 53 ++++++++++++++++++++++++-------------- drivers/net/ethernet/ibm/ibmvnic.h | 2 +- 2 files changed, 35 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 16fd1f1f1228..4840ad4ccd10 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -257,12 +257,14 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { struct device *dev = &adapter->vdev->dev; + u64 prev = 0; int rc; if (!reuse_ltb(ltb, size)) { dev_dbg(dev, "LTB size changed from 0x%llx to 0x%x, reallocating\n", ltb->size, size); + prev = ltb->size; free_long_term_buff(adapter, ltb); } @@ -283,8 +285,8 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, bitmap_set(adapter->map_ids, ltb->map_id, 1); dev_dbg(dev, - "Allocated new LTB [map %d, size 0x%llx]\n", - ltb->map_id, ltb->size); + "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n", + ltb->map_id, ltb->size, prev); } /* Ensure ltb is zeroed - specially when reusing it. */ @@ -529,7 +531,21 @@ static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool, struct ibmvnic_long_term_buff **ltbp, unsigned int *offset) { - *ltbp = &txpool->long_term_buff; + struct ibmvnic_long_term_buff *ltb; + int nbufs; /* # of buffers in one ltb */ + int i; + + WARN_ON_ONCE(bufidx >= txpool->num_buffers); + + for (i = 0; i < txpool->ltb_set.num_ltbs; i++) { + ltb = &txpool->ltb_set.ltbs[i]; + nbufs = ltb->size / txpool->buf_size; + if (bufidx < nbufs) + break; + bufidx -= nbufs; + } + + *ltbp = ltb; *offset = bufidx * txpool->buf_size; } @@ -971,7 +987,7 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter, { kfree(tx_pool->tx_buff); kfree(tx_pool->free_map); - free_long_term_buff(adapter, &tx_pool->long_term_buff); + free_ltb_set(adapter, &tx_pool->ltb_set); } /** @@ -1161,17 +1177,16 @@ update_ltb: for (i = 0; i < num_pools; i++) { struct ibmvnic_tx_pool *tso_pool; struct ibmvnic_tx_pool *tx_pool; - u32 ltb_size; tx_pool = &adapter->tx_pool[i]; - ltb_size = tx_pool->num_buffers * tx_pool->buf_size; - if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, - ltb_size)) - goto out; - dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", - i, tx_pool->long_term_buff.buff, - tx_pool->num_buffers, tx_pool->buf_size); + dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n", + i, tx_pool->num_buffers, tx_pool->buf_size); + + rc = alloc_ltb_set(adapter, &tx_pool->ltb_set, + tx_pool->num_buffers, tx_pool->buf_size); + if (rc) + goto out; tx_pool->consumer_index = 0; tx_pool->producer_index = 0; @@ -1180,14 +1195,14 @@ update_ltb: tx_pool->free_map[j] = j; tso_pool = &adapter->tso_pool[i]; - ltb_size = tso_pool->num_buffers * tso_pool->buf_size; - if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, - ltb_size)) - goto out; - dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", - i, tso_pool->long_term_buff.buff, - tso_pool->num_buffers, tso_pool->buf_size); + dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n", + i, tso_pool->num_buffers, tso_pool->buf_size); + + rc = alloc_ltb_set(adapter, &tso_pool->ltb_set, + tso_pool->num_buffers, tso_pool->buf_size); + if (rc) + goto out; tso_pool->consumer_index = 0; tso_pool->producer_index = 0; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 178035872c32..90d6833fb1db 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -856,7 +856,7 @@ struct ibmvnic_tx_pool { int *free_map; int consumer_index; int producer_index; - struct ibmvnic_long_term_buff long_term_buff; + struct ibmvnic_ltb_set ltb_set; int num_buffers; int buf_size; } ____cacheline_aligned; -- cgit v1.2.3 From 31248b5a354b6871afecb259690dd615de730ef0 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 14 Apr 2022 10:52:42 +0300 Subject: octeon_ep: Remove custom driver version In review comment [1] was pointed that new code is not supposed to set driver version and should rely on kernel version instead. As an outcome of that comment all the dance around setting such driver version to FW should be removed too, because in upstream kernel whole driver will have same version so read/write from/to FW will give same result. [1] https://lore.kernel.org/all/YladGTmon1x3dfxI@unreal Fixes: 862cd659a6fb ("octeon_ep: Add driver framework and device initialization") Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/5d76f3116ee795071ec044eabb815d6c2bdc7dbd.1649922731.git.leonro@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c | 13 +------------ drivers/net/ethernet/marvell/octeon_ep/octep_main.c | 2 -- drivers/net/ethernet/marvell/octeon_ep/octep_main.h | 9 --------- 3 files changed, 1 insertion(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c index 8c196dadfad0..39322e4dd100 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c @@ -26,9 +26,7 @@ #define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM_OFFSET(m) (m) #define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(m) ((m) + 8) -#define OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(m) ((m) + 16) #define OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(m) ((m) + 24) -#define OCTEP_CTRL_MBOX_INFO_FW_VERSION_OFFSET(m) ((m) + 136) #define OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(m) ((m) + 144) #define OCTEP_CTRL_MBOX_H2FQ_INFO_OFFSET(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ) @@ -65,7 +63,7 @@ static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 mask) int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) { - u64 version, magic_num, status; + u64 magic_num, status; if (!mbox) return -EINVAL; @@ -81,13 +79,6 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) return -EINVAL; } - version = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION_OFFSET(mbox->barmem)); - if (version != OCTEP_DRV_VERSION) { - pr_info("octep_ctrl_mbox : Firmware version mismatch %llx != %x\n", - version, OCTEP_DRV_VERSION); - return -EINVAL; - } - status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS_OFFSET(mbox->barmem)); if (status != OCTEP_CTRL_MBOX_STATUS_READY) { pr_info("octep_ctrl_mbox : Firmware is not ready.\n"); @@ -96,7 +87,6 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ_OFFSET(mbox->barmem)); - writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(mbox->barmem)); writeq(OCTEP_CTRL_MBOX_STATUS_INIT, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); mbox->h2fq.elem_cnt = readl(OCTEP_CTRL_MBOX_H2FQ_ELEM_CNT_OFFSET(mbox->barmem)); @@ -248,7 +238,6 @@ int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox) writeq(OCTEP_CTRL_MBOX_STATUS_INVALID, OCTEP_CTRL_MBOX_INFO_HOST_STATUS_OFFSET(mbox->barmem)); - writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION_OFFSET(mbox->barmem)); pr_info("Octep ctrl mbox : Uninit successful.\n"); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index f2af5ebffcf1..e020c81f3455 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -30,7 +30,6 @@ MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); MODULE_AUTHOR("Veerasenareddy Burru "); MODULE_DESCRIPTION(OCTEP_DRV_STRING); MODULE_LICENSE("GPL"); -MODULE_VERSION(OCTEP_DRV_VERSION_STR); /** * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. @@ -950,7 +949,6 @@ int octep_device_setup(struct octep_device *oct) /* Initialize control mbox */ ctrl_mbox = &oct->ctrl_mbox; - ctrl_mbox->version = OCTEP_DRV_VERSION; ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf); ret = octep_ctrl_mbox_init(ctrl_mbox); if (ret) { diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h index 520f2c3664f9..025626a61383 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -12,15 +12,6 @@ #include "octep_rx.h" #include "octep_ctrl_mbox.h" -#define OCTEP_DRV_VERSION_MAJOR 1 -#define OCTEP_DRV_VERSION_MINOR 0 -#define OCTEP_DRV_VERSION_VARIANT 0 - -#define OCTEP_DRV_VERSION ((OCTEP_DRV_VERSION_MAJOR << 16) + \ - (OCTEP_DRV_VERSION_MINOR << 8) + \ - OCTEP_DRV_VERSION_VARIANT) - -#define OCTEP_DRV_VERSION_STR "1.0.0" #define OCTEP_DRV_NAME "octeon_ep" #define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver" -- cgit v1.2.3 From 69fd055957a02309ffdc23d887a01988b6e5bab1 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Sat, 16 Apr 2022 01:30:12 +0200 Subject: net: dsa: qca8k: drop MTU tracking from qca8k_priv DSA set the CPU port based on the largest MTU of all the slave ports. Based on this we can drop the MTU array from qca8k_priv and set the port_change_mtu logic on DSA changing MTU of the CPU port as the switch have a global MTU settingfor each port. Signed-off-by: Ansuel Smith Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 26 +++++++++----------------- drivers/net/dsa/qca8k.h | 1 - 2 files changed, 9 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index d3ed0a7f8077..4e27d9803a5f 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -2367,16 +2367,18 @@ static int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct qca8k_priv *priv = ds->priv; - int i, mtu = 0; - priv->port_mtu[port] = new_mtu; - - for (i = 0; i < QCA8K_NUM_PORTS; i++) - if (priv->port_mtu[i] > mtu) - mtu = priv->port_mtu[i]; + /* We have only have a general MTU setting. + * DSA always set the CPU port's MTU to the largest MTU of the slave + * ports. + * Setting MTU just for the CPU port is sufficient to correctly set a + * value for every port. + */ + if (!dsa_is_cpu_port(ds, port)) + return 0; /* Include L2 header / FCS length */ - return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN); + return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); } static int @@ -3033,16 +3035,6 @@ qca8k_setup(struct dsa_switch *ds) QCA8K_PORT_HOL_CTRL1_WRED_EN, mask); } - - /* Set initial MTU for every port. - * We have only have a general MTU setting. So track - * every port and set the max across all port. - * Set per port MTU to 1500 as the MTU change function - * will add the overhead and if its set to 1518 then it - * will apply the overhead again and we will end up with - * MTU of 1536 instead of 1518 - */ - priv->port_mtu[i] = ETH_DATA_LEN; } /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */ diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index f375627174c8..562d75997e55 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -398,7 +398,6 @@ struct qca8k_priv { struct device *dev; struct dsa_switch_ops ops; struct gpio_desc *reset_gpio; - unsigned int port_mtu[QCA8K_NUM_PORTS]; struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ struct qca8k_mgmt_eth_data mgmt_eth_data; struct qca8k_mib_eth_data mib_eth_data; -- cgit v1.2.3 From 2b8fd87af7f156942971789abac8ee2bb60c03bc Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Sat, 16 Apr 2022 01:30:13 +0200 Subject: net: dsa: qca8k: drop port_sts from qca8k_priv Port_sts is a thing of the past for this driver. It was something present on the initial implementation of this driver and parts of the original struct were dropped over time. Using an array of int to store if a port is enabled or not to handle PM operation seems overkill. Switch and use a simple u8 to store the port status where each bit correspond to a port. (bit is set port is enabled, bit is not set, port is disabled) Also add some comments to better describe why we need to track port status. Signed-off-by: Ansuel Smith Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 15 +++++++++------ drivers/net/dsa/qca8k.h | 9 ++++----- 2 files changed, 13 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 4e27d9803a5f..766db0d43092 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -2346,7 +2346,7 @@ qca8k_port_enable(struct dsa_switch *ds, int port, struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; qca8k_port_set_status(priv, port, 1); - priv->port_sts[port].enabled = 1; + priv->port_enabled_map |= BIT(port); if (dsa_is_user_port(ds, port)) phy_support_asym_pause(phy); @@ -2360,7 +2360,7 @@ qca8k_port_disable(struct dsa_switch *ds, int port) struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; qca8k_port_set_status(priv, port, 0); - priv->port_sts[port].enabled = 0; + priv->port_enabled_map &= ~BIT(port); } static int @@ -3235,13 +3235,16 @@ static void qca8k_sw_shutdown(struct mdio_device *mdiodev) static void qca8k_set_pm(struct qca8k_priv *priv, int enable) { - int i; + int port; - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - if (!priv->port_sts[i].enabled) + for (port = 0; port < QCA8K_NUM_PORTS; port++) { + /* Do not enable on resume if the port was + * disabled before. + */ + if (!(priv->port_enabled_map & BIT(port))) continue; - qca8k_port_set_status(priv, i, enable); + qca8k_port_set_status(priv, port, enable); } } diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 562d75997e55..12d8d090298b 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -324,10 +324,6 @@ enum qca8k_mid_cmd { QCA8K_MIB_CAST = 3, }; -struct ar8xxx_port_status { - int enabled; -}; - struct qca8k_match_data { u8 id; bool reduced_package; @@ -388,11 +384,14 @@ struct qca8k_priv { u8 mirror_rx; u8 mirror_tx; u8 lag_hash_mode; + /* Each bit correspond to a port. This switch can support a max of 7 port. + * Bit 1: port enabled. Bit 0: port disabled. + */ + u8 port_enabled_map; bool legacy_phy_port_mapping; struct qca8k_ports_config ports_config; struct regmap *regmap; struct mii_bus *bus; - struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; struct dsa_switch *ds; struct mutex reg_mutex; struct device *dev; -- cgit v1.2.3 From 8255212e4130bd2dc1463286a3dddb74797bbdc1 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Sat, 16 Apr 2022 01:30:14 +0200 Subject: net: dsa: qca8k: rework and simplify mdiobus logic In an attempt to reduce qca8k_priv space, rework and simplify mdiobus logic. We now declare a mdiobus instead of relying on DSA phy_read/write even if a mdio node is not present. This is all to make the qca8k ops static and not switch specific. With a legacy implementation where port doesn't have a phy map declared in the dts with a mdio node, we declare a 'qca8k-legacy' mdiobus. The conversion logic is used as legacy read and write ops are used instead of the internal one. Also drop the legacy_phy_port_mapping as we now declare mdiobus with ops that already address the workaround. Signed-off-by: Ansuel Smith Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 95 +++++++++++++++---------------------------------- drivers/net/dsa/qca8k.h | 1 - 2 files changed, 29 insertions(+), 67 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 766db0d43092..24d57083ee2c 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1291,83 +1291,63 @@ qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) } static int -qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) +qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data) { - struct qca8k_priv *priv = ds->priv; - int ret; - - /* Check if the legacy mapping should be used and the - * port is not correctly mapped to the right PHY in the - * devicetree - */ - if (priv->legacy_phy_port_mapping) - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - /* Use mdio Ethernet when available, fallback to legacy one on error */ - ret = qca8k_phy_eth_command(priv, false, port, regnum, 0); - if (!ret) - return ret; - - return qca8k_mdio_write(priv, port, regnum, data); + return qca8k_internal_mdio_write(slave_bus, port, regnum, data); } static int -qca8k_phy_read(struct dsa_switch *ds, int port, int regnum) +qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum) { - struct qca8k_priv *priv = ds->priv; - int ret; + port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - /* Check if the legacy mapping should be used and the - * port is not correctly mapped to the right PHY in the - * devicetree - */ - if (priv->legacy_phy_port_mapping) - port = qca8k_port_to_phy(port) % PHY_MAX_ADDR; - - /* Use mdio Ethernet when available, fallback to legacy one on error */ - ret = qca8k_phy_eth_command(priv, true, port, regnum, 0); - if (ret >= 0) - return ret; - - ret = qca8k_mdio_read(priv, port, regnum); - - if (ret < 0) - return 0xffff; - - return ret; + return qca8k_internal_mdio_read(slave_bus, port, regnum); } static int -qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio) +qca8k_mdio_register(struct qca8k_priv *priv) { struct dsa_switch *ds = priv->ds; + struct device_node *mdio; struct mii_bus *bus; bus = devm_mdiobus_alloc(ds->dev); - if (!bus) return -ENOMEM; bus->priv = (void *)priv; - bus->name = "qca8k slave mii"; - bus->read = qca8k_internal_mdio_read; - bus->write = qca8k_internal_mdio_write; - snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", - ds->index); - bus->parent = ds->dev; bus->phy_mask = ~ds->phys_mii_mask; - ds->slave_mii_bus = bus; - return devm_of_mdiobus_register(priv->dev, bus, mdio); + /* Check if the devicetree declare the port:phy mapping */ + mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); + if (of_device_is_available(mdio)) { + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", ds->index); + bus->name = "qca8k slave mii"; + bus->read = qca8k_internal_mdio_read; + bus->write = qca8k_internal_mdio_write; + return devm_of_mdiobus_register(priv->dev, bus, mdio); + } + + /* If a mapping can't be found the legacy mapping is used, + * using the qca8k_port_to_phy function + */ + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", + ds->dst->index, ds->index); + bus->name = "qca8k-legacy slave mii"; + bus->read = qca8k_legacy_mdio_read; + bus->write = qca8k_legacy_mdio_write; + return devm_mdiobus_register(priv->dev, bus); } static int qca8k_setup_mdio_bus(struct qca8k_priv *priv) { u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg; - struct device_node *ports, *port, *mdio; + struct device_node *ports, *port; phy_interface_t mode; int err; @@ -1429,24 +1409,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv) QCA8K_MDIO_MASTER_EN); } - /* Check if the devicetree declare the port:phy mapping */ - mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); - if (of_device_is_available(mdio)) { - err = qca8k_mdio_register(priv, mdio); - if (err) - of_node_put(mdio); - - return err; - } - - /* If a mapping can't be found the legacy mapping is used, - * using the qca8k_port_to_phy function - */ - priv->legacy_phy_port_mapping = true; - priv->ops.phy_read = qca8k_phy_read; - priv->ops.phy_write = qca8k_phy_write; - - return 0; + return qca8k_mdio_register(priv); } static int diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 12d8d090298b..8bbe36f135b5 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -388,7 +388,6 @@ struct qca8k_priv { * Bit 1: port enabled. Bit 0: port disabled. */ u8 port_enabled_map; - bool legacy_phy_port_mapping; struct qca8k_ports_config ports_config; struct regmap *regmap; struct mii_bus *bus; -- cgit v1.2.3 From 2349b83a2486c55b9dd225326f0172a84a43c5e4 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Sat, 16 Apr 2022 01:30:15 +0200 Subject: net: dsa: qca8k: drop dsa_switch_ops from qca8k_priv Now that dsa_switch_ops is not switch specific anymore, we can drop it from qca8k_priv and use the static ops directly for the dsa_switch pointer. Signed-off-by: Ansuel Smith Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 3 +-- drivers/net/dsa/qca8k.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 24d57083ee2c..ef8d686de609 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -3157,8 +3157,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) priv->ds->dev = &mdiodev->dev; priv->ds->num_ports = QCA8K_NUM_PORTS; priv->ds->priv = priv; - priv->ops = qca8k_switch_ops; - priv->ds->ops = &priv->ops; + priv->ds->ops = &qca8k_switch_ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 8bbe36f135b5..04408e11402a 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -394,7 +394,6 @@ struct qca8k_priv { struct dsa_switch *ds; struct mutex reg_mutex; struct device *dev; - struct dsa_switch_ops ops; struct gpio_desc *reset_gpio; struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */ struct qca8k_mgmt_eth_data mgmt_eth_data; -- cgit v1.2.3 From 6cfc03b602200c5cbbd8d906fd905547814e83df Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Sat, 16 Apr 2022 01:30:16 +0200 Subject: net: dsa: qca8k: correctly handle mdio read error Restore original way to handle mdio read error by returning 0xffff. This was wrongly changed when the internal_mdio_read was introduced, now that both legacy and internal use the same function, make sure that they behave the same way. Fixes: ce062a0adbfe ("net: dsa: qca8k: fix kernel panic with legacy mdio mapping") Signed-off-by: Ansuel Smith Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index ef8d686de609..4fb1486795c4 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1287,7 +1287,12 @@ qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) if (ret >= 0) return ret; - return qca8k_mdio_read(priv, phy, regnum); + ret = qca8k_mdio_read(priv, phy, regnum); + + if (ret < 0) + return 0xffff; + + return ret; } static int -- cgit v1.2.3 From 8d1af50842bf2774f4edc57054206e909117469b Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Sat, 16 Apr 2022 01:30:17 +0200 Subject: net: dsa: qca8k: unify bus id naming with legacy and OF mdio bus Add support for multiple switch with OF mdio bus declaration. Unify the bus id naming and use the same logic for both legacy and OF mdio bus. Signed-off-by: Ansuel Smith Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 4fb1486795c4..2727d3169c25 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1323,6 +1323,8 @@ qca8k_mdio_register(struct qca8k_priv *priv) return -ENOMEM; bus->priv = (void *)priv; + snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", + ds->dst->index, ds->index); bus->parent = ds->dev; bus->phy_mask = ~ds->phys_mii_mask; ds->slave_mii_bus = bus; @@ -1330,7 +1332,6 @@ qca8k_mdio_register(struct qca8k_priv *priv) /* Check if the devicetree declare the port:phy mapping */ mdio = of_get_child_by_name(priv->dev->of_node, "mdio"); if (of_device_is_available(mdio)) { - snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", ds->index); bus->name = "qca8k slave mii"; bus->read = qca8k_internal_mdio_read; bus->write = qca8k_internal_mdio_write; @@ -1340,8 +1341,6 @@ qca8k_mdio_register(struct qca8k_priv *priv) /* If a mapping can't be found the legacy mapping is used, * using the qca8k_port_to_phy function */ - snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d", - ds->dst->index, ds->index); bus->name = "qca8k-legacy slave mii"; bus->read = qca8k_legacy_mdio_read; bus->write = qca8k_legacy_mdio_write; -- cgit v1.2.3 From bac62191a3d4dddc4dbfddb4187d4952380c29c7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:29 +0300 Subject: mlxsw: spectrum: Allow lane to start from non-zero index So far, the lane index always started from zero. That is not true for modular systems with gearbox-equipped linecards. Loose the check so the lanes can start from non-zero index. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 684910ca7cf4..120880fad7f8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -486,6 +486,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, { char pmlp_pl[MLXSW_REG_PMLP_LEN]; bool separate_rxtx; + u8 first_lane; u8 module; u8 width; int err; @@ -498,6 +499,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); width = mlxsw_reg_pmlp_width_get(pmlp_pl); separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); + first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); if (width && !is_power_of_2(width)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n", @@ -518,7 +520,7 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, local_port); return -EINVAL; } - if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) { + if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n", local_port); return -EINVAL; -- cgit v1.2.3 From d3ad2d88209ff8c4ec6fbcf21be7f85104767cdd Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:30 +0300 Subject: mlxsw: spectrum: Allocate port mapping array of structs instead of pointers Instead of array of pointers to port mapping structures, allocate the array of structures directly. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 32 +++++++------------------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 +- 2 files changed, 9 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 120880fad7f8..55b97ccafd45 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1891,8 +1891,8 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) goto err_cpu_port_create; for (i = 1; i < max_ports; i++) { - port_mapping = mlxsw_sp->port_mapping[i]; - if (!port_mapping) + port_mapping = &mlxsw_sp->port_mapping[i]; + if (!port_mapping->width) continue; err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); if (err) @@ -1914,12 +1914,12 @@ err_cpu_port_create: static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); - struct mlxsw_sp_port_mapping port_mapping; + struct mlxsw_sp_port_mapping *port_mapping; int i; int err; mlxsw_sp->port_mapping = kcalloc(max_ports, - sizeof(struct mlxsw_sp_port_mapping *), + sizeof(struct mlxsw_sp_port_mapping), GFP_KERNEL); if (!mlxsw_sp->port_mapping) return -ENOMEM; @@ -1928,36 +1928,20 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) continue; - err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); + port_mapping = &mlxsw_sp->port_mapping[i]; + err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping); if (err) goto err_port_module_info_get; - if (!port_mapping.width) - continue; - - mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping, - sizeof(port_mapping), - GFP_KERNEL); - if (!mlxsw_sp->port_mapping[i]) { - err = -ENOMEM; - goto err_port_module_info_dup; - } } return 0; err_port_module_info_get: -err_port_module_info_dup: - for (i--; i >= 1; i--) - kfree(mlxsw_sp->port_mapping[i]); kfree(mlxsw_sp->port_mapping); return err; } static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) { - int i; - - for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) - kfree(mlxsw_sp->port_mapping[i]); kfree(mlxsw_sp->port_mapping); } @@ -2007,8 +1991,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < count; i++) { u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); - port_mapping = mlxsw_sp->port_mapping[local_port]; - if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) + port_mapping = &mlxsw_sp->port_mapping[local_port]; + if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port)) continue; mlxsw_sp_port_create(mlxsw_sp, local_port, false, port_mapping); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 20588e699588..68f71e77b5c7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -164,7 +164,7 @@ struct mlxsw_sp { unsigned char base_mac[ETH_ALEN]; const unsigned char *mac_mask; struct mlxsw_sp_upper *lags; - struct mlxsw_sp_port_mapping **port_mapping; + struct mlxsw_sp_port_mapping *port_mapping; struct rhashtable sample_trigger_ht; struct mlxsw_sp_sb *sb; struct mlxsw_sp_bridge *bridge; -- cgit v1.2.3 From ebf0c53417319ff568d9b6238a60ba27395385f3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:31 +0300 Subject: mlxsw: reg: Add Ports Mapping Event Configuration Register The PMECR register is used to enable/disable event triggering in case of local port mapping change. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 64 +++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index b8a236872fea..7b51a63d23c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5875,6 +5875,69 @@ static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module, mlxsw_reg_pmtdb_num_ports_set(payload, num_ports); } +/* PMECR - Ports Mapping Event Configuration Register + * -------------------------------------------------- + * The PMECR register is used to enable/disable event triggering + * in case of local port mapping change. + */ +#define MLXSW_REG_PMECR_ID 0x501B +#define MLXSW_REG_PMECR_LEN 0x20 + +MLXSW_REG_DEFINE(pmecr, MLXSW_REG_PMECR_ID, MLXSW_REG_PMECR_LEN); + +/* reg_pmecr_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32_LP(reg, pmecr, 0x00, 16, 0x00, 12); + +/* reg_pmecr_ee + * Event update enable. If this bit is set, event generation will be updated + * based on the e field. Only relevant on Set operations. + * Access: WO + */ +MLXSW_ITEM32(reg, pmecr, ee, 0x04, 30, 1); + +/* reg_pmecr_eswi + * Software ignore enable bit. If this bit is set, the value of swi is used. + * If this bit is clear, the value of swi is ignored. + * Only relevant on Set operations. + * Access: WO + */ +MLXSW_ITEM32(reg, pmecr, eswi, 0x04, 24, 1); + +/* reg_pmecr_swi + * Software ignore. If this bit is set, the device shouldn't generate events + * in case of PMLP SET operation but only upon self local port mapping change + * (if applicable according to e configuration). This is supplementary + * configuration on top of e value. + * Access: RW + */ +MLXSW_ITEM32(reg, pmecr, swi, 0x04, 8, 1); + +enum mlxsw_reg_pmecr_e { + MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT, + MLXSW_REG_PMECR_E_GENERATE_EVENT, + MLXSW_REG_PMECR_E_GENERATE_SINGLE_EVENT, +}; + +/* reg_pmecr_e + * Event generation on local port mapping change. + * Access: RW + */ +MLXSW_ITEM32(reg, pmecr, e, 0x04, 0, 2); + +static inline void mlxsw_reg_pmecr_pack(char *payload, u16 local_port, + enum mlxsw_reg_pmecr_e e) +{ + MLXSW_REG_ZERO(pmecr, payload); + mlxsw_reg_pmecr_local_port_set(payload, local_port); + mlxsw_reg_pmecr_e_set(payload, e); + mlxsw_reg_pmecr_ee_set(payload, true); + mlxsw_reg_pmecr_swi_set(payload, true); + mlxsw_reg_pmecr_eswi_set(payload, true); +} + /* PMPE - Port Module Plug/Unplug Event Register * --------------------------------------------- * This register reports any operational status change of a module. @@ -12678,6 +12741,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pmaos), MLXSW_REG(pplr), MLXSW_REG(pmtdb), + MLXSW_REG(pmecr), MLXSW_REG(pmpe), MLXSW_REG(pddr), MLXSW_REG(pmmp), -- cgit v1.2.3 From adc6462376b1a22d486ab5f6e624ba9528a380da Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:32 +0300 Subject: mlxsw: Narrow the critical section of devl_lock during ports creation/removal No need to hold the lock for alloc and freecpu. So narrow the critical section. Follow-up patch is going to benefit from this by adding more code to the functions which will be out of the critical as well. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/minimal.c | 13 +++++++------ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 14 +++++++------- 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index eb906b73b4e2..ee1cb1b81669 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -328,6 +328,7 @@ static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module) static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core); + struct devlink *devlink = priv_to_devlink(mlxsw_m->core); u8 last_module = max_ports; int i; int err; @@ -356,6 +357,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) } /* Create port objects for each valid entry */ + devl_lock(devlink); for (i = 0; i < mlxsw_m->max_ports; i++) { if (mlxsw_m->module_to_port[i] > 0 && !mlxsw_core_port_is_xm(mlxsw_m->core, i)) { @@ -366,6 +368,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) goto err_module_to_port_create; } } + devl_unlock(devlink); return 0; @@ -375,6 +378,7 @@ err_module_to_port_create: mlxsw_m_port_remove(mlxsw_m, mlxsw_m->module_to_port[i]); } + devl_unlock(devlink); i = max_ports; err_module_to_port_map: for (i--; i > 0; i--) @@ -387,8 +391,10 @@ err_module_to_port_alloc: static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m) { + struct devlink *devlink = priv_to_devlink(mlxsw_m->core); int i; + devl_lock(devlink); for (i = 0; i < mlxsw_m->max_ports; i++) { if (mlxsw_m->module_to_port[i] > 0) { mlxsw_m_port_remove(mlxsw_m, @@ -396,6 +402,7 @@ static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m) mlxsw_m_port_module_unmap(mlxsw_m, i); } } + devl_unlock(devlink); kfree(mlxsw_m->module_to_port); kfree(mlxsw_m->ports); @@ -424,7 +431,6 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core, struct netlink_ext_ack *extack) { struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core); - struct devlink *devlink = priv_to_devlink(mlxsw_core); int err; mlxsw_m->core = mlxsw_core; @@ -440,9 +446,7 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core, return err; } - devl_lock(devlink); err = mlxsw_m_ports_create(mlxsw_m); - devl_unlock(devlink); if (err) { dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n"); return err; @@ -454,11 +458,8 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core, static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core); - struct devlink *devlink = priv_to_devlink(mlxsw_core); - devl_lock(devlink); mlxsw_m_ports_remove(mlxsw_m); - devl_unlock(devlink); } static const struct mlxsw_config_profile mlxsw_m_config_profile; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 55b97ccafd45..c26c160744d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1863,12 +1863,15 @@ static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) { + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); int i; + devl_lock(devlink); for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); mlxsw_sp_cpu_port_remove(mlxsw_sp); + devl_unlock(devlink); kfree(mlxsw_sp->ports); mlxsw_sp->ports = NULL; } @@ -1876,6 +1879,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_port_mapping *port_mapping; size_t alloc_size; int i; @@ -1886,6 +1890,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp->ports) return -ENOMEM; + devl_lock(devlink); err = mlxsw_sp_cpu_port_create(mlxsw_sp); if (err) goto err_cpu_port_create; @@ -1898,6 +1903,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) if (err) goto err_port_create; } + devl_unlock(devlink); return 0; err_port_create: @@ -1906,6 +1912,7 @@ err_port_create: mlxsw_sp_port_remove(mlxsw_sp, i); mlxsw_sp_cpu_port_remove(mlxsw_sp); err_cpu_port_create: + devl_unlock(devlink); kfree(mlxsw_sp->ports); mlxsw_sp->ports = NULL; return err; @@ -2805,7 +2812,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - struct devlink *devlink = priv_to_devlink(mlxsw_core); int err; mlxsw_sp->core = mlxsw_core; @@ -2966,9 +2972,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_sample_trigger_init; } - devl_lock(devlink); err = mlxsw_sp_ports_create(mlxsw_sp); - devl_unlock(devlink); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); goto err_ports_create; @@ -3149,12 +3153,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - struct devlink *devlink = priv_to_devlink(mlxsw_core); - devl_lock(devlink); mlxsw_sp_ports_remove(mlxsw_sp); - devl_unlock(devlink); - rhashtable_destroy(&mlxsw_sp->sample_trigger_ht); mlxsw_sp_port_module_info_fini(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); -- cgit v1.2.3 From b0ec003e9a906431073628e7ecc70a84f0e54074 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:33 +0300 Subject: mlxsw: spectrum: Introduce port mapping change event processing Register PMLPE trap and process the port mapping changes delivered by it by creating related ports. Note that this happens after provisioning. The INI of the linecard is processed and merged by FW. PMLPE is generated for each port. Process this mapping change. Layout of PMLPE is the same as layout of PMLP. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 166 +++++++++++++++++++++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 7 ++ drivers/net/ethernet/mellanox/mlxsw/trap.h | 2 + 3 files changed, 166 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c26c160744d0..c3457a216642 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -481,21 +481,16 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) } static int -mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, - struct mlxsw_sp_port_mapping *port_mapping) +mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, + u16 local_port, char *pmlp_pl, + struct mlxsw_sp_port_mapping *port_mapping) { - char pmlp_pl[MLXSW_REG_PMLP_LEN]; bool separate_rxtx; u8 first_lane; u8 module; u8 width; - int err; int i; - mlxsw_reg_pmlp_pack(pmlp_pl, local_port); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); - if (err) - return err; module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); width = mlxsw_reg_pmlp_width_get(pmlp_pl); separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); @@ -534,6 +529,21 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, return 0; } +static int +mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port, + struct mlxsw_sp_port_mapping *port_mapping) +{ + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, + pmlp_pl, port_mapping); +} + static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, const struct mlxsw_sp_port_mapping *port_mapping) @@ -1861,13 +1871,121 @@ static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port) return mlxsw_sp->ports[local_port] != NULL; } +static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp, + u16 local_port, bool enable) +{ + char pmecr_pl[MLXSW_REG_PMECR_LEN]; + + mlxsw_reg_pmecr_pack(pmecr_pl, local_port, + enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT : + MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl); +} + +struct mlxsw_sp_port_mapping_event { + struct list_head list; + char pmlp_pl[MLXSW_REG_PMLP_LEN]; +}; + +static void mlxsw_sp_port_mapping_events_work(struct work_struct *work) +{ + struct mlxsw_sp_port_mapping_event *event, *next_event; + struct mlxsw_sp_port_mapping_events *events; + struct mlxsw_sp_port_mapping port_mapping; + struct mlxsw_sp *mlxsw_sp; + struct devlink *devlink; + LIST_HEAD(event_queue); + u16 local_port; + int err; + + events = container_of(work, struct mlxsw_sp_port_mapping_events, work); + mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events); + devlink = priv_to_devlink(mlxsw_sp->core); + + spin_lock_bh(&events->queue_lock); + list_splice_init(&events->queue, &event_queue); + spin_unlock_bh(&events->queue_lock); + + list_for_each_entry_safe(event, next_event, &event_queue, list) { + local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl); + err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port, + event->pmlp_pl, &port_mapping); + if (err) + goto out; + + if (WARN_ON_ONCE(!port_mapping.width)) + goto out; + + devl_lock(devlink); + + if (!mlxsw_sp_port_created(mlxsw_sp, local_port)) + mlxsw_sp_port_create(mlxsw_sp, local_port, + false, &port_mapping); + else + WARN_ON_ONCE(1); + + devl_unlock(devlink); + + mlxsw_sp->port_mapping[local_port] = port_mapping; + +out: + kfree(event); + } +} + +static void +mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg, + char *pmlp_pl, void *priv) +{ + struct mlxsw_sp_port_mapping_events *events; + struct mlxsw_sp_port_mapping_event *event; + struct mlxsw_sp *mlxsw_sp = priv; + u16 local_port; + + local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl); + if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port))) + return; + + events = &mlxsw_sp->port_mapping_events; + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl)); + spin_lock(&events->queue_lock); + list_add_tail(&event->list, &events->queue); + spin_unlock(&events->queue_lock); + mlxsw_core_schedule_work(&events->work); +} + +static void +__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_port_mapping_event *event, *next_event; + struct mlxsw_sp_port_mapping_events *events; + + events = &mlxsw_sp->port_mapping_events; + + /* Caller needs to make sure that no new event is going to appear. */ + cancel_work_sync(&events->work); + list_for_each_entry_safe(event, next_event, &events->queue, list) { + list_del(&event->list); + kfree(event); + } +} + static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) { + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); int i; + for (i = 1; i < max_ports; i++) + mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); + /* Make sure all scheduled events are processed */ + __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); + devl_lock(devlink); - for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) + for (i = 1; i < max_ports; i++) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); mlxsw_sp_cpu_port_remove(mlxsw_sp); @@ -1880,6 +1998,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + struct mlxsw_sp_port_mapping_events *events; struct mlxsw_sp_port_mapping *port_mapping; size_t alloc_size; int i; @@ -1890,6 +2009,17 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp->ports) return -ENOMEM; + events = &mlxsw_sp->port_mapping_events; + INIT_LIST_HEAD(&events->queue); + spin_lock_init(&events->queue_lock); + INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work); + + for (i = 1; i < max_ports; i++) { + err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true); + if (err) + goto err_event_enable; + } + devl_lock(devlink); err = mlxsw_sp_cpu_port_create(mlxsw_sp); if (err) @@ -1910,9 +2040,15 @@ err_port_create: for (i--; i >= 1; i--) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); + i = max_ports; mlxsw_sp_cpu_port_remove(mlxsw_sp); err_cpu_port_create: devl_unlock(devlink); +err_event_enable: + for (i--; i >= 1; i--) + mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false); + /* Make sure all scheduled events are processed */ + __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp); kfree(mlxsw_sp->ports); mlxsw_sp->ports = NULL; return err; @@ -2074,6 +2210,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, err_port_split_create: mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); + return err; } @@ -2294,6 +2431,11 @@ static const struct mlxsw_listener mlxsw_sp1_listener[] = { MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0), }; +static const struct mlxsw_listener mlxsw_sp2_listener[] = { + /* Events */ + MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE), +}; + static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); @@ -3085,6 +3227,8 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; + mlxsw_sp->listeners = mlxsw_sp2_listener; + mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -3115,6 +3259,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; + mlxsw_sp->listeners = mlxsw_sp2_listener; + mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -3145,6 +3291,8 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops; mlxsw_sp->router_ops = &mlxsw_sp2_router_ops; + mlxsw_sp->listeners = mlxsw_sp2_listener; + mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 68f71e77b5c7..928c3a63b6b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -150,6 +150,12 @@ struct mlxsw_sp_port_mapping { u8 lane; }; +struct mlxsw_sp_port_mapping_events { + struct list_head queue; + spinlock_t queue_lock; /* protects queue */ + struct work_struct work; +}; + struct mlxsw_sp_parsing { refcount_t parsing_depth_ref; u16 parsing_depth; @@ -165,6 +171,7 @@ struct mlxsw_sp { const unsigned char *mac_mask; struct mlxsw_sp_upper *lags; struct mlxsw_sp_port_mapping *port_mapping; + struct mlxsw_sp_port_mapping_events port_mapping_events; struct rhashtable sample_trigger_ht; struct mlxsw_sp_sb *sb; struct mlxsw_sp_bridge *bridge; diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 9e070ab3ed76..7405c400f09b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -133,6 +133,8 @@ enum mlxsw_event_trap_id { MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D, /* PTP Egress FIFO has a new entry */ MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E, + /* Port mapping change */ + MLXSW_TRAP_ID_PMLPE = 0x32E, }; #endif /* _MLXSW_TRAP_H */ -- cgit v1.2.3 From 505f524dc66093a5db5ef2fea8153d5b449d260d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:34 +0300 Subject: mlxsw: reg: Add Management DownStream Device Query Register The MDDQ register allows to query the DownStream device properties. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 144 ++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 7b51a63d23c1..1595b33ac519 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11492,6 +11492,149 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices, *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload); } +/* MDDQ - Management DownStream Device Query Register + * -------------------------------------------------- + * This register allows to query the DownStream device properties. The desired + * information is chosen upon the query_type field and is delivered by 32B + * of data blocks. + */ +#define MLXSW_REG_MDDQ_ID 0x9161 +#define MLXSW_REG_MDDQ_LEN 0x30 + +MLXSW_REG_DEFINE(mddq, MLXSW_REG_MDDQ_ID, MLXSW_REG_MDDQ_LEN); + +/* reg_mddq_sie + * Slot info event enable. + * When set to '1', each change in the slot_info.provisioned / sr_valid / + * active / ready will generate a DSDSC event. + * Access: RW + */ +MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1); + +enum mlxsw_reg_mddq_query_type { + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1, + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME = 3, +}; + +/* reg_mddq_query_type + * Access: Index + */ +MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8); + +/* reg_mddq_slot_index + * Slot index. 0 is reserved. + * Access: Index + */ +MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4); + +/* reg_mddq_slot_info_provisioned + * If set, the INI file is applied and the card is provisioned. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_provisioned, 0x10, 31, 1); + +/* reg_mddq_slot_info_sr_valid + * If set, Shift Register is valid (after being provisioned) and data + * can be sent from the switch ASIC to the line-card CPLD over Shift-Register. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_sr_valid, 0x10, 30, 1); + +enum mlxsw_reg_mddq_slot_info_ready { + MLXSW_REG_MDDQ_SLOT_INFO_READY_NOT_READY, + MLXSW_REG_MDDQ_SLOT_INFO_READY_READY, + MLXSW_REG_MDDQ_SLOT_INFO_READY_ERROR, +}; + +/* reg_mddq_slot_info_lc_ready + * If set, the LC is powered on, matching the INI version and a new FW + * version can be burnt (if necessary). + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_lc_ready, 0x10, 28, 2); + +/* reg_mddq_slot_info_active + * If set, the FW has completed the MDDC.device_enable command. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_active, 0x10, 27, 1); + +/* reg_mddq_slot_info_hw_revision + * Major user-configured version number of the current INI file. + * Valid only when active or ready are '1'. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_hw_revision, 0x14, 16, 16); + +/* reg_mddq_slot_info_ini_file_version + * User-configured version number of the current INI file. + * Valid only when active or lc_ready are '1'. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_ini_file_version, 0x14, 0, 16); + +/* reg_mddq_slot_info_card_type + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, slot_info_card_type, 0x18, 0, 8); + +static inline void +__mlxsw_reg_mddq_pack(char *payload, u8 slot_index, + enum mlxsw_reg_mddq_query_type query_type) +{ + MLXSW_REG_ZERO(mddq, payload); + mlxsw_reg_mddq_slot_index_set(payload, slot_index); + mlxsw_reg_mddq_query_type_set(payload, query_type); +} + +static inline void +mlxsw_reg_mddq_slot_info_pack(char *payload, u8 slot_index, bool sie) +{ + __mlxsw_reg_mddq_pack(payload, slot_index, + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO); + mlxsw_reg_mddq_sie_set(payload, sie); +} + +static inline void +mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index, + bool *p_provisioned, bool *p_sr_valid, + enum mlxsw_reg_mddq_slot_info_ready *p_lc_ready, + bool *p_active, u16 *p_hw_revision, + u16 *p_ini_file_version, + u8 *p_card_type) +{ + *p_slot_index = mlxsw_reg_mddq_slot_index_get(payload); + *p_provisioned = mlxsw_reg_mddq_slot_info_provisioned_get(payload); + *p_sr_valid = mlxsw_reg_mddq_slot_info_sr_valid_get(payload); + *p_lc_ready = mlxsw_reg_mddq_slot_info_lc_ready_get(payload); + *p_active = mlxsw_reg_mddq_slot_info_active_get(payload); + *p_hw_revision = mlxsw_reg_mddq_slot_info_hw_revision_get(payload); + *p_ini_file_version = mlxsw_reg_mddq_slot_info_ini_file_version_get(payload); + *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload); +} + +#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20 + +/* reg_mddq_slot_ascii_name + * Slot's ASCII name. + * Access: RO + */ +MLXSW_ITEM_BUF(reg, mddq, slot_ascii_name, 0x10, + MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN); + +static inline void +mlxsw_reg_mddq_slot_name_pack(char *payload, u8 slot_index) +{ + __mlxsw_reg_mddq_pack(payload, slot_index, + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME); +} + +static inline void +mlxsw_reg_mddq_slot_name_unpack(const char *payload, char *slot_ascii_name) +{ + mlxsw_reg_mddq_slot_ascii_name_memcpy_from(payload, slot_ascii_name); +} + /* MFDE - Monitoring FW Debug Register * ----------------------------------- */ @@ -12811,6 +12954,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mtptpt), MLXSW_REG(mfgd), MLXSW_REG(mgpir), + MLXSW_REG(mddq), MLXSW_REG(mfde), MLXSW_REG(tngcr), MLXSW_REG(tnumt), -- cgit v1.2.3 From 5290a8ff2e11e11d4f41369ec66c3bf7fc2f4a09 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:35 +0300 Subject: mlxsw: reg: Add Management DownStream Device Control Register The MDDC register allows to control downstream devices and line cards. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 37 +++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1595b33ac519..31a91de61537 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11635,6 +11635,42 @@ mlxsw_reg_mddq_slot_name_unpack(const char *payload, char *slot_ascii_name) mlxsw_reg_mddq_slot_ascii_name_memcpy_from(payload, slot_ascii_name); } +/* MDDC - Management DownStream Device Control Register + * ---------------------------------------------------- + * This register allows to control downstream devices and line cards. + */ +#define MLXSW_REG_MDDC_ID 0x9163 +#define MLXSW_REG_MDDC_LEN 0x30 + +MLXSW_REG_DEFINE(mddc, MLXSW_REG_MDDC_ID, MLXSW_REG_MDDC_LEN); + +/* reg_mddc_slot_index + * Slot index. 0 is reserved. + * Access: Index + */ +MLXSW_ITEM32(reg, mddc, slot_index, 0x00, 0, 4); + +/* reg_mddc_rst + * Reset request. + * Access: OP + */ +MLXSW_ITEM32(reg, mddc, rst, 0x04, 29, 1); + +/* reg_mddc_device_enable + * When set, FW is the manager and allowed to program the downstream device. + * Access: RW + */ +MLXSW_ITEM32(reg, mddc, device_enable, 0x04, 28, 1); + +static inline void mlxsw_reg_mddc_pack(char *payload, u8 slot_index, bool rst, + bool device_enable) +{ + MLXSW_REG_ZERO(mddc, payload); + mlxsw_reg_mddc_slot_index_set(payload, slot_index); + mlxsw_reg_mddc_rst_set(payload, rst); + mlxsw_reg_mddc_device_enable_set(payload, device_enable); +} + /* MFDE - Monitoring FW Debug Register * ----------------------------------- */ @@ -12955,6 +12991,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mfgd), MLXSW_REG(mgpir), MLXSW_REG(mddq), + MLXSW_REG(mddc), MLXSW_REG(mfde), MLXSW_REG(tngcr), MLXSW_REG(tnumt), -- cgit v1.2.3 From 5bade5aa4afc914dac9086500e0bfabec0d3ffbe Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:36 +0300 Subject: mlxsw: reg: Add Management Binary Code Transfer Register The MBCT register allows to transfer binary INI codes from the host to the management FW by transferring it by chunks of maximum 1KB. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 122 ++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 31a91de61537..e41451028478 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11492,6 +11492,127 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices, *num_of_slots = mlxsw_reg_mgpir_num_of_slots_get(payload); } +/* MBCT - Management Binary Code Transfer Register + * ----------------------------------------------- + * This register allows to transfer binary codes from the host to + * the management FW by transferring it by chunks of maximum 1KB. + */ +#define MLXSW_REG_MBCT_ID 0x9120 +#define MLXSW_REG_MBCT_LEN 0x420 + +MLXSW_REG_DEFINE(mbct, MLXSW_REG_MBCT_ID, MLXSW_REG_MBCT_LEN); + +/* reg_mbct_slot_index + * Slot index. 0 is reserved. + * Access: Index + */ +MLXSW_ITEM32(reg, mbct, slot_index, 0x00, 0, 4); + +/* reg_mbct_data_size + * Actual data field size in bytes for the current data transfer. + * Access: WO + */ +MLXSW_ITEM32(reg, mbct, data_size, 0x04, 0, 11); + +enum mlxsw_reg_mbct_op { + MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE = 1, + MLXSW_REG_MBCT_OP_DATA_TRANSFER, /* Download */ + MLXSW_REG_MBCT_OP_ACTIVATE, + MLXSW_REG_MBCT_OP_CLEAR_ERRORS = 6, + MLXSW_REG_MBCT_OP_QUERY_STATUS, +}; + +/* reg_mbct_op + * Access: WO + */ +MLXSW_ITEM32(reg, mbct, op, 0x08, 28, 4); + +/* reg_mbct_last + * Indicates that the current data field is the last chunk of the INI. + * Access: WO + */ +MLXSW_ITEM32(reg, mbct, last, 0x08, 26, 1); + +/* reg_mbct_oee + * Opcode Event Enable. When set a BCTOE event will be sent once the opcode + * was executed and the fsm_state has changed. + * Access: WO + */ +MLXSW_ITEM32(reg, mbct, oee, 0x08, 25, 1); + +enum mlxsw_reg_mbct_status { + /* Partial data transfer completed successfully and ready for next + * data transfer. + */ + MLXSW_REG_MBCT_STATUS_PART_DATA = 2, + MLXSW_REG_MBCT_STATUS_LAST_DATA, + MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE, + /* Error - trying to erase INI while it being used. */ + MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE, + /* Last data transfer completed, applying magic pattern. */ + MLXSW_REG_MBCT_STATUS_ERASE_FAILED = 7, + MLXSW_REG_MBCT_STATUS_INI_ERROR, + MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED, + MLXSW_REG_MBCT_STATUS_ILLEGAL_OPERATION = 11, +}; + +/* reg_mbct_status + * Status. + * Access: RO + */ +MLXSW_ITEM32(reg, mbct, status, 0x0C, 24, 5); + +enum mlxsw_reg_mbct_fsm_state { + MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE = 5, + MLXSW_REG_MBCT_FSM_STATE_ERROR, +}; + +/* reg_mbct_fsm_state + * FSM state. + * Access: RO + */ +MLXSW_ITEM32(reg, mbct, fsm_state, 0x0C, 16, 4); + +#define MLXSW_REG_MBCT_DATA_LEN 1024 + +/* reg_mbct_data + * Up to 1KB of data. + * Access: WO + */ +MLXSW_ITEM_BUF(reg, mbct, data, 0x20, MLXSW_REG_MBCT_DATA_LEN); + +static inline void mlxsw_reg_mbct_pack(char *payload, u8 slot_index, + enum mlxsw_reg_mbct_op op, bool oee) +{ + MLXSW_REG_ZERO(mbct, payload); + mlxsw_reg_mbct_slot_index_set(payload, slot_index); + mlxsw_reg_mbct_op_set(payload, op); + mlxsw_reg_mbct_oee_set(payload, oee); +} + +static inline void mlxsw_reg_mbct_dt_pack(char *payload, + u16 data_size, bool last, + const char *data) +{ + if (WARN_ON(data_size > MLXSW_REG_MBCT_DATA_LEN)) + return; + mlxsw_reg_mbct_data_size_set(payload, data_size); + mlxsw_reg_mbct_last_set(payload, last); + mlxsw_reg_mbct_data_memcpy_to(payload, data); +} + +static inline void +mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index, + enum mlxsw_reg_mbct_status *p_status, + enum mlxsw_reg_mbct_fsm_state *p_fsm_state) +{ + if (p_slot_index) + *p_slot_index = mlxsw_reg_mbct_slot_index_get(payload); + *p_status = mlxsw_reg_mbct_status_get(payload); + if (p_fsm_state) + *p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload); +} + /* MDDQ - Management DownStream Device Query Register * -------------------------------------------------- * This register allows to query the DownStream device properties. The desired @@ -12990,6 +13111,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mtptpt), MLXSW_REG(mfgd), MLXSW_REG(mgpir), + MLXSW_REG(mbct), MLXSW_REG(mddq), MLXSW_REG(mddc), MLXSW_REG(mfde), -- cgit v1.2.3 From b217127e5e4ee0ecfce7c5f84cfe082238123bda Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:37 +0300 Subject: mlxsw: core_linecards: Add line card objects and implement provisioning Introduce objects for line cards and an infrastructure around that. Use devlink_linecard_create/destroy() to register the line card with devlink core. Implement provisioning ops with a list of supported line cards. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- drivers/net/ethernet/mellanox/mlxsw/core.c | 19 + drivers/net/ethernet/mellanox/mlxsw/core.h | 46 + .../net/ethernet/mellanox/mlxsw/core_linecards.c | 929 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 6 + drivers/net/ethernet/mellanox/mlxsw/trap.h | 4 + 6 files changed, 1006 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/core_linecards.c (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 196adeb33495..1a465fd5d8b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o mlxsw_core-objs := core.o core_acl_flex_keys.o \ - core_acl_flex_actions.o core_env.o + core_acl_flex_actions.o core_env.o \ + core_linecards.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index b13e0f8d232a..5e1855f752d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -82,6 +82,7 @@ struct mlxsw_core { struct mlxsw_res res; struct mlxsw_hwmon *hwmon; struct mlxsw_thermal *thermal; + struct mlxsw_linecards *linecards; struct mlxsw_core_port *ports; unsigned int max_ports; atomic_t active_ports_count; @@ -94,6 +95,17 @@ struct mlxsw_core { /* driver_priv has to be always the last item */ }; +struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_core->linecards; +} + +void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards) +{ + mlxsw_core->linecards = linecards; +} + #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 static u64 mlxsw_ports_occ_get(void *priv) @@ -2145,6 +2157,10 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_fw_rev_validate; + err = mlxsw_linecards_init(mlxsw_core, mlxsw_bus_info); + if (err) + goto err_linecards_init; + err = mlxsw_core_health_init(mlxsw_core); if (err) goto err_health_init; @@ -2183,6 +2199,8 @@ err_thermal_init: err_hwmon_init: mlxsw_core_health_fini(mlxsw_core); err_health_init: + mlxsw_linecards_fini(mlxsw_core); +err_linecards_init: err_fw_rev_validate: if (!reload) mlxsw_core_params_unregister(mlxsw_core); @@ -2255,6 +2273,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, mlxsw_thermal_fini(mlxsw_core->thermal); mlxsw_hwmon_fini(mlxsw_core->hwmon); mlxsw_core_health_fini(mlxsw_core); + mlxsw_linecards_fini(mlxsw_core); if (!reload) mlxsw_core_params_unregister(mlxsw_core); mlxsw_emad_fini(mlxsw_core); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 16ee5e90973d..44c8a7888985 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -35,6 +35,11 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core); void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); +struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core); + +void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecard); + bool mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev, const struct mlxsw_fw_rev *req_rev); @@ -543,4 +548,45 @@ static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb) return (struct mlxsw_skb_cb *) skb->cb; } +struct mlxsw_linecards; + +enum mlxsw_linecard_status_event_type { + MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION, + MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION, +}; + +struct mlxsw_linecard { + u8 slot_index; + struct mlxsw_linecards *linecards; + struct devlink_linecard *devlink_linecard; + struct mutex lock; /* Locks accesses to the linecard structure */ + char name[MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN]; + char mbct_pl[MLXSW_REG_MBCT_LEN]; /* Too big for stack */ + enum mlxsw_linecard_status_event_type status_event_type_to; + struct delayed_work status_event_to_dw; + u8 provisioned:1; + u16 hw_revision; + u16 ini_version; +}; + +struct mlxsw_linecard_types_info; + +struct mlxsw_linecards { + struct mlxsw_core *mlxsw_core; + const struct mlxsw_bus_info *bus_info; + u8 count; + struct mlxsw_linecard_types_info *types_info; + struct mlxsw_linecard linecards[]; +}; + +static inline struct mlxsw_linecard * +mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index) +{ + return &linecards->linecards[slot_index - 1]; +} + +int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *bus_info); +void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c new file mode 100644 index 000000000000..1401f6d34635 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -0,0 +1,929 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" + +struct mlxsw_linecard_ini_file { + __le16 size; + union { + u8 data[0]; + struct { + __be16 hw_revision; + __be16 ini_version; + u8 __dontcare[3]; + u8 type; + u8 name[20]; + } format; + }; +}; + +struct mlxsw_linecard_types_info { + struct mlxsw_linecard_ini_file **ini_files; + unsigned int count; + size_t data_size; + char *data; +}; + +#define MLXSW_LINECARD_STATUS_EVENT_TO (10 * MSEC_PER_SEC) + +static void +mlxsw_linecard_status_event_to_schedule(struct mlxsw_linecard *linecard, + enum mlxsw_linecard_status_event_type status_event_type) +{ + cancel_delayed_work_sync(&linecard->status_event_to_dw); + linecard->status_event_type_to = status_event_type; + mlxsw_core_schedule_dw(&linecard->status_event_to_dw, + msecs_to_jiffies(MLXSW_LINECARD_STATUS_EVENT_TO)); +} + +static void +mlxsw_linecard_status_event_done(struct mlxsw_linecard *linecard, + enum mlxsw_linecard_status_event_type status_event_type) +{ + if (linecard->status_event_type_to == status_event_type) + cancel_delayed_work_sync(&linecard->status_event_to_dw); +} + +static const char * +mlxsw_linecard_types_lookup(struct mlxsw_linecards *linecards, u8 card_type) +{ + struct mlxsw_linecard_types_info *types_info; + struct mlxsw_linecard_ini_file *ini_file; + int i; + + types_info = linecards->types_info; + if (!types_info) + return NULL; + for (i = 0; i < types_info->count; i++) { + ini_file = linecards->types_info->ini_files[i]; + if (ini_file->format.type == card_type) + return ini_file->format.name; + } + return NULL; +} + +static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + int err; + + mlxsw_reg_mddq_slot_name_pack(mddq_pl, linecard->slot_index); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); + if (err) + return ERR_PTR(err); + mlxsw_reg_mddq_slot_name_unpack(mddq_pl, linecard->name); + return linecard->name; +} + +static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard) +{ + linecard->provisioned = false; + devlink_linecard_provision_fail(linecard->devlink_linecard); +} + +static int +mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, + u16 hw_revision, u16 ini_version) +{ + struct mlxsw_linecards *linecards = linecard->linecards; + const char *type; + + type = mlxsw_linecard_types_lookup(linecards, card_type); + mlxsw_linecard_status_event_done(linecard, + MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION); + if (!type) { + /* It is possible for a line card to be provisioned before + * driver initialization. Due to a missing INI bundle file + * or an outdated one, the queried card's type might not + * be recognized by the driver. In this case, try to query + * the card's name from the device. + */ + type = mlxsw_linecard_type_name(linecard); + if (IS_ERR(type)) { + mlxsw_linecard_provision_fail(linecard); + return PTR_ERR(type); + } + } + linecard->provisioned = true; + linecard->hw_revision = hw_revision; + linecard->ini_version = ini_version; + devlink_linecard_provision_set(linecard->devlink_linecard, type); + return 0; +} + +static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard) +{ + mlxsw_linecard_status_event_done(linecard, + MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION); + linecard->provisioned = false; + devlink_linecard_provision_clear(linecard->devlink_linecard); +} + +static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards, + struct mlxsw_linecard *linecard, + const char *mddq_pl) +{ + enum mlxsw_reg_mddq_slot_info_ready ready; + bool provisioned, sr_valid, active; + u16 ini_version, hw_revision; + u8 slot_index, card_type; + int err = 0; + + mlxsw_reg_mddq_slot_info_unpack(mddq_pl, &slot_index, &provisioned, + &sr_valid, &ready, &active, + &hw_revision, &ini_version, + &card_type); + + if (linecard) { + if (WARN_ON(slot_index != linecard->slot_index)) + return -EINVAL; + } else { + if (WARN_ON(slot_index > linecards->count)) + return -EINVAL; + linecard = mlxsw_linecard_get(linecards, slot_index); + } + + mutex_lock(&linecard->lock); + + if (provisioned && linecard->provisioned != provisioned) { + err = mlxsw_linecard_provision_set(linecard, card_type, + hw_revision, ini_version); + if (err) + goto out; + } + + if (!provisioned && linecard->provisioned != provisioned) + mlxsw_linecard_provision_clear(linecard); + +out: + mutex_unlock(&linecard->lock); + return err; +} + +static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards, + struct mlxsw_linecard *linecard) +{ + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + int err; + + mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, false); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); + if (err) + return err; + + return mlxsw_linecard_status_process(linecards, linecard, mddq_pl); +} + +static const char * const mlxsw_linecard_status_event_type_name[] = { + [MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision", + [MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision", +}; + +static void mlxsw_linecard_status_event_to_work(struct work_struct *work) +{ + struct mlxsw_linecard *linecard = + container_of(work, struct mlxsw_linecard, + status_event_to_dw.work); + + mutex_lock(&linecard->lock); + dev_err(linecard->linecards->bus_info->dev, "linecard %u: Timeout reached waiting on %s status event", + linecard->slot_index, + mlxsw_linecard_status_event_type_name[linecard->status_event_type_to]); + mlxsw_linecard_provision_fail(linecard); + mutex_unlock(&linecard->lock); +} + +static int __mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard) +{ + dev_info(linecard->linecards->bus_info->dev, "linecard %u: Clearing FSM state error", + linecard->slot_index); + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_CLEAR_ERRORS, false); + return mlxsw_reg_write(linecard->linecards->mlxsw_core, + MLXSW_REG(mbct), linecard->mbct_pl); +} + +static int mlxsw_linecard_fix_fsm_state(struct mlxsw_linecard *linecard, + enum mlxsw_reg_mbct_fsm_state fsm_state) +{ + if (fsm_state != MLXSW_REG_MBCT_FSM_STATE_ERROR) + return 0; + return __mlxsw_linecard_fix_fsm_state(linecard); +} + +static int +mlxsw_linecard_query_ini_status(struct mlxsw_linecard *linecard, + enum mlxsw_reg_mbct_status *status, + enum mlxsw_reg_mbct_fsm_state *fsm_state, + struct netlink_ext_ack *extack) +{ + int err; + + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_QUERY_STATUS, false); + err = mlxsw_reg_query(linecard->linecards->mlxsw_core, MLXSW_REG(mbct), + linecard->mbct_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query linecard INI status"); + return err; + } + mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, status, fsm_state); + return err; +} + +static int +mlxsw_linecard_ini_transfer(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + const struct mlxsw_linecard_ini_file *ini_file, + struct netlink_ext_ack *extack) +{ + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + size_t size_left; + const u8 *data; + int err; + + size_left = le16_to_cpu(ini_file->size); + data = ini_file->data; + while (size_left) { + size_t data_size = MLXSW_REG_MBCT_DATA_LEN; + bool is_last = false; + + if (size_left <= MLXSW_REG_MBCT_DATA_LEN) { + data_size = size_left; + is_last = true; + } + + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_DATA_TRANSFER, false); + mlxsw_reg_mbct_dt_pack(linecard->mbct_pl, data_size, + is_last, data); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), + linecard->mbct_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI data transfer"); + return err; + } + mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, + &status, &fsm_state); + if ((!is_last && status != MLXSW_REG_MBCT_STATUS_PART_DATA) || + (is_last && status != MLXSW_REG_MBCT_STATUS_LAST_DATA)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to transfer linecard INI data"); + mlxsw_linecard_fix_fsm_state(linecard, fsm_state); + return -EINVAL; + } + size_left -= data_size; + data += data_size; + } + + return 0; +} + +static int +mlxsw_linecard_ini_erase(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + struct netlink_ext_ack *extack) +{ + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + int err; + + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_ERASE_INI_IMAGE, false); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), + linecard->mbct_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI erase"); + return err; + } + mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state); + switch (status) { + case MLXSW_REG_MBCT_STATUS_ERASE_COMPLETE: + break; + default: + /* Should not happen */ + fallthrough; + case MLXSW_REG_MBCT_STATUS_ERASE_FAILED: + NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI"); + goto fix_fsm_err_out; + case MLXSW_REG_MBCT_STATUS_ERROR_INI_IN_USE: + NL_SET_ERR_MSG_MOD(extack, "Failed to erase linecard INI while being used"); + goto fix_fsm_err_out; + } + return 0; + +fix_fsm_err_out: + mlxsw_linecard_fix_fsm_state(linecard, fsm_state); + return -EINVAL; +} + +static void mlxsw_linecard_bct_process(struct mlxsw_core *mlxsw_core, + const char *mbct_pl) +{ + struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core); + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + struct mlxsw_linecard *linecard; + u8 slot_index; + + mlxsw_reg_mbct_unpack(mbct_pl, &slot_index, &status, &fsm_state); + if (WARN_ON(slot_index > linecards->count)) + return; + linecard = mlxsw_linecard_get(linecards, slot_index); + mutex_lock(&linecard->lock); + if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) { + dev_err(linecards->bus_info->dev, "linecard %u: Failed to activate INI", + linecard->slot_index); + goto fix_fsm_out; + } + mutex_unlock(&linecard->lock); + return; + +fix_fsm_out: + mlxsw_linecard_fix_fsm_state(linecard, fsm_state); + mlxsw_linecard_provision_fail(linecard); + mutex_unlock(&linecard->lock); +} + +static int +mlxsw_linecard_ini_activate(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + struct netlink_ext_ack *extack) +{ + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + int err; + + mlxsw_reg_mbct_pack(linecard->mbct_pl, linecard->slot_index, + MLXSW_REG_MBCT_OP_ACTIVATE, true); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mbct), linecard->mbct_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to issue linecard INI activation"); + return err; + } + mlxsw_reg_mbct_unpack(linecard->mbct_pl, NULL, &status, &fsm_state); + if (status == MLXSW_REG_MBCT_STATUS_ACTIVATION_FAILED) { + NL_SET_ERR_MSG_MOD(extack, "Failed to activate linecard INI"); + goto fix_fsm_err_out; + } + + return 0; + +fix_fsm_err_out: + mlxsw_linecard_fix_fsm_state(linecard, fsm_state); + return -EINVAL; +} + +#define MLXSW_LINECARD_INI_WAIT_RETRIES 10 +#define MLXSW_LINECARD_INI_WAIT_MS 500 + +static int +mlxsw_linecard_ini_in_use_wait(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + struct netlink_ext_ack *extack) +{ + enum mlxsw_reg_mbct_fsm_state fsm_state; + enum mlxsw_reg_mbct_status status; + unsigned int ini_wait_retries = 0; + int err; + +query_ini_status: + err = mlxsw_linecard_query_ini_status(linecard, &status, + &fsm_state, extack); + if (err) + return err; + + switch (fsm_state) { + case MLXSW_REG_MBCT_FSM_STATE_INI_IN_USE: + if (ini_wait_retries++ > MLXSW_LINECARD_INI_WAIT_RETRIES) { + NL_SET_ERR_MSG_MOD(extack, "Failed to wait for linecard INI to be unused"); + return -EINVAL; + } + mdelay(MLXSW_LINECARD_INI_WAIT_MS); + goto query_ini_status; + default: + break; + } + return 0; +} + +static int mlxsw_linecard_provision(struct devlink_linecard *devlink_linecard, + void *priv, const char *type, + const void *type_priv, + struct netlink_ext_ack *extack) +{ + const struct mlxsw_linecard_ini_file *ini_file = type_priv; + struct mlxsw_linecard *linecard = priv; + struct mlxsw_core *mlxsw_core; + int err; + + mutex_lock(&linecard->lock); + + mlxsw_core = linecard->linecards->mlxsw_core; + + err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack); + if (err) + goto err_out; + + err = mlxsw_linecard_ini_transfer(mlxsw_core, linecard, + ini_file, extack); + if (err) + goto err_out; + + mlxsw_linecard_status_event_to_schedule(linecard, + MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION); + err = mlxsw_linecard_ini_activate(mlxsw_core, linecard, extack); + if (err) + goto err_out; + + goto out; + +err_out: + mlxsw_linecard_provision_fail(linecard); +out: + mutex_unlock(&linecard->lock); + return err; +} + +static int mlxsw_linecard_unprovision(struct devlink_linecard *devlink_linecard, + void *priv, + struct netlink_ext_ack *extack) +{ + struct mlxsw_linecard *linecard = priv; + struct mlxsw_core *mlxsw_core; + int err; + + mutex_lock(&linecard->lock); + + mlxsw_core = linecard->linecards->mlxsw_core; + + err = mlxsw_linecard_ini_in_use_wait(mlxsw_core, linecard, extack); + if (err) + goto err_out; + + mlxsw_linecard_status_event_to_schedule(linecard, + MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION); + err = mlxsw_linecard_ini_erase(mlxsw_core, linecard, extack); + if (err) + goto err_out; + + goto out; + +err_out: + mlxsw_linecard_provision_fail(linecard); +out: + mutex_unlock(&linecard->lock); + return err; +} + +static bool mlxsw_linecard_same_provision(struct devlink_linecard *devlink_linecard, + void *priv, const char *type, + const void *type_priv) +{ + const struct mlxsw_linecard_ini_file *ini_file = type_priv; + struct mlxsw_linecard *linecard = priv; + bool ret; + + mutex_lock(&linecard->lock); + ret = linecard->hw_revision == be16_to_cpu(ini_file->format.hw_revision) && + linecard->ini_version == be16_to_cpu(ini_file->format.ini_version); + mutex_unlock(&linecard->lock); + return ret; +} + +static unsigned int +mlxsw_linecard_types_count(struct devlink_linecard *devlink_linecard, + void *priv) +{ + struct mlxsw_linecard *linecard = priv; + + return linecard->linecards->types_info ? + linecard->linecards->types_info->count : 0; +} + +static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard, + void *priv, unsigned int index, + const char **type, const void **type_priv) +{ + struct mlxsw_linecard_types_info *types_info; + struct mlxsw_linecard_ini_file *ini_file; + struct mlxsw_linecard *linecard = priv; + + types_info = linecard->linecards->types_info; + if (WARN_ON_ONCE(!types_info)) + return; + ini_file = types_info->ini_files[index]; + *type = ini_file->format.name; + *type_priv = ini_file; +} + +static const struct devlink_linecard_ops mlxsw_linecard_ops = { + .provision = mlxsw_linecard_provision, + .unprovision = mlxsw_linecard_unprovision, + .same_provision = mlxsw_linecard_same_provision, + .types_count = mlxsw_linecard_types_count, + .types_get = mlxsw_linecard_types_get, +}; + +struct mlxsw_linecard_status_event { + struct mlxsw_core *mlxsw_core; + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + struct work_struct work; +}; + +static void mlxsw_linecard_status_event_work(struct work_struct *work) +{ + struct mlxsw_linecard_status_event *event; + struct mlxsw_linecards *linecards; + struct mlxsw_core *mlxsw_core; + + event = container_of(work, struct mlxsw_linecard_status_event, work); + mlxsw_core = event->mlxsw_core; + linecards = mlxsw_core_linecards(mlxsw_core); + mlxsw_linecard_status_process(linecards, NULL, event->mddq_pl); + kfree(event); +} + +static void +mlxsw_linecard_status_listener_func(const struct mlxsw_reg_info *reg, + char *mddq_pl, void *priv) +{ + struct mlxsw_linecard_status_event *event; + struct mlxsw_core *mlxsw_core = priv; + + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + event->mlxsw_core = mlxsw_core; + memcpy(event->mddq_pl, mddq_pl, sizeof(event->mddq_pl)); + INIT_WORK(&event->work, mlxsw_linecard_status_event_work); + mlxsw_core_schedule_work(&event->work); +} + +struct mlxsw_linecard_bct_event { + struct mlxsw_core *mlxsw_core; + char mbct_pl[MLXSW_REG_MBCT_LEN]; + struct work_struct work; +}; + +static void mlxsw_linecard_bct_event_work(struct work_struct *work) +{ + struct mlxsw_linecard_bct_event *event; + struct mlxsw_core *mlxsw_core; + + event = container_of(work, struct mlxsw_linecard_bct_event, work); + mlxsw_core = event->mlxsw_core; + mlxsw_linecard_bct_process(mlxsw_core, event->mbct_pl); + kfree(event); +} + +static void +mlxsw_linecard_bct_listener_func(const struct mlxsw_reg_info *reg, + char *mbct_pl, void *priv) +{ + struct mlxsw_linecard_bct_event *event; + struct mlxsw_core *mlxsw_core = priv; + + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + event->mlxsw_core = mlxsw_core; + memcpy(event->mbct_pl, mbct_pl, sizeof(event->mbct_pl)); + INIT_WORK(&event->work, mlxsw_linecard_bct_event_work); + mlxsw_core_schedule_work(&event->work); +} + +static const struct mlxsw_listener mlxsw_linecard_listener[] = { + MLXSW_CORE_EVENTL(mlxsw_linecard_status_listener_func, DSDSC), + MLXSW_CORE_EVENTL(mlxsw_linecard_bct_listener_func, BCTOE), +}; + +static int mlxsw_linecard_event_delivery_set(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + bool enable) +{ + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + + mlxsw_reg_mddq_slot_info_pack(mddq_pl, linecard->slot_index, enable); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddq), mddq_pl); +} + +static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards, + u8 slot_index) +{ + struct devlink_linecard *devlink_linecard; + struct mlxsw_linecard *linecard; + int err; + + linecard = mlxsw_linecard_get(linecards, slot_index); + linecard->slot_index = slot_index; + linecard->linecards = linecards; + mutex_init(&linecard->lock); + + devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core), + slot_index, &mlxsw_linecard_ops, + linecard); + if (IS_ERR(devlink_linecard)) { + err = PTR_ERR(devlink_linecard); + goto err_devlink_linecard_create; + } + linecard->devlink_linecard = devlink_linecard; + INIT_DELAYED_WORK(&linecard->status_event_to_dw, + &mlxsw_linecard_status_event_to_work); + + err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true); + if (err) + goto err_event_delivery_set; + + err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards, + linecard); + if (err) + goto err_status_get_and_process; + + return 0; + +err_status_get_and_process: + mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false); +err_event_delivery_set: + devlink_linecard_destroy(linecard->devlink_linecard); +err_devlink_linecard_create: + mutex_destroy(&linecard->lock); + return err; +} + +static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards, + u8 slot_index) +{ + struct mlxsw_linecard *linecard; + + linecard = mlxsw_linecard_get(linecards, slot_index); + mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false); + cancel_delayed_work_sync(&linecard->status_event_to_dw); + /* Make sure all scheduled events are processed */ + mlxsw_core_flush_owq(); + devlink_linecard_destroy(linecard->devlink_linecard); + mutex_destroy(&linecard->lock); +} + +/* LINECARDS INI BUNDLE FILE + * +----------------------------------+ + * | MAGIC ("NVLCINI+") | + * +----------------------------------+ +--------------------+ + * | INI 0 +---> | __le16 size | + * +----------------------------------+ | __be16 hw_revision | + * | INI 1 | | __be16 ini_version | + * +----------------------------------+ | u8 __dontcare[3] | + * | ... | | u8 type | + * +----------------------------------+ | u8 name[20] | + * | INI N | | ... | + * +----------------------------------+ +--------------------+ + */ + +#define MLXSW_LINECARDS_INI_BUNDLE_MAGIC "NVLCINI+" + +static int +mlxsw_linecard_types_file_validate(struct mlxsw_linecards *linecards, + struct mlxsw_linecard_types_info *types_info) +{ + size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC); + struct mlxsw_linecard_ini_file *ini_file; + size_t size = types_info->data_size; + const u8 *data = types_info->data; + unsigned int count = 0; + u16 ini_file_size; + + if (size < magic_size) { + dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file size, smaller than magic size\n"); + return -EINVAL; + } + if (memcmp(data, MLXSW_LINECARDS_INI_BUNDLE_MAGIC, magic_size)) { + dev_warn(linecards->bus_info->dev, "Invalid linecards INIs file magic pattern\n"); + return -EINVAL; + } + + data += magic_size; + size -= magic_size; + + while (size > 0) { + if (size < sizeof(*ini_file)) { + dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI which is smaller than bare minimum\n"); + return -EINVAL; + } + ini_file = (struct mlxsw_linecard_ini_file *) data; + ini_file_size = le16_to_cpu(ini_file->size); + if (ini_file_size + sizeof(__le16) > size) { + dev_warn(linecards->bus_info->dev, "Linecards INIs file appears to be truncated\n"); + return -EINVAL; + } + if (ini_file_size % 4) { + dev_warn(linecards->bus_info->dev, "Linecards INIs file contains INI with invalid size\n"); + return -EINVAL; + } + data += ini_file_size + sizeof(__le16); + size -= ini_file_size + sizeof(__le16); + count++; + } + if (!count) { + dev_warn(linecards->bus_info->dev, "Linecards INIs file does not contain any INI\n"); + return -EINVAL; + } + types_info->count = count; + return 0; +} + +static void +mlxsw_linecard_types_file_parse(struct mlxsw_linecard_types_info *types_info) +{ + size_t magic_size = strlen(MLXSW_LINECARDS_INI_BUNDLE_MAGIC); + size_t size = types_info->data_size - magic_size; + const u8 *data = types_info->data + magic_size; + struct mlxsw_linecard_ini_file *ini_file; + unsigned int count = 0; + u16 ini_file_size; + int i; + + while (size) { + ini_file = (struct mlxsw_linecard_ini_file *) data; + ini_file_size = le16_to_cpu(ini_file->size); + for (i = 0; i < ini_file_size / 4; i++) { + u32 *val = &((u32 *) ini_file->data)[i]; + + *val = swab32(*val); + } + types_info->ini_files[count] = ini_file; + data += ini_file_size + sizeof(__le16); + size -= ini_file_size + sizeof(__le16); + count++; + } +} + +#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT \ + "mellanox/lc_ini_bundle_%u_%u.bin" +#define MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN \ + (sizeof(MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT) + 4) + +static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards *linecards) +{ + const struct mlxsw_fw_rev *rev = &linecards->bus_info->fw_rev; + char filename[MLXSW_LINECARDS_INI_BUNDLE_FILENAME_LEN]; + struct mlxsw_linecard_types_info *types_info; + const struct firmware *firmware; + int err; + + err = snprintf(filename, sizeof(filename), + MLXSW_LINECARDS_INI_BUNDLE_FILENAME_FMT, + rev->minor, rev->subminor); + WARN_ON(err >= sizeof(filename)); + + err = request_firmware_direct(&firmware, filename, + linecards->bus_info->dev); + if (err) { + dev_warn(linecards->bus_info->dev, "Could not request linecards INI file \"%s\", provisioning will not be possible\n", + filename); + return 0; + } + + types_info = kzalloc(sizeof(*types_info), GFP_KERNEL); + if (!types_info) { + release_firmware(firmware); + return -ENOMEM; + } + linecards->types_info = types_info; + + types_info->data_size = firmware->size; + types_info->data = vmalloc(types_info->data_size); + if (!types_info->data) { + err = -ENOMEM; + release_firmware(firmware); + goto err_data_alloc; + } + memcpy(types_info->data, firmware->data, types_info->data_size); + release_firmware(firmware); + + err = mlxsw_linecard_types_file_validate(linecards, types_info); + if (err) { + err = 0; + goto err_type_file_file_validate; + } + + types_info->ini_files = kmalloc_array(types_info->count, + sizeof(struct mlxsw_linecard_ini_file), + GFP_KERNEL); + if (!types_info->ini_files) { + err = -ENOMEM; + goto err_ini_files_alloc; + } + + mlxsw_linecard_types_file_parse(types_info); + + return 0; + +err_ini_files_alloc: +err_type_file_file_validate: + vfree(types_info->data); +err_data_alloc: + kfree(types_info); + return err; +} + +static void mlxsw_linecard_types_fini(struct mlxsw_linecards *linecards) +{ + struct mlxsw_linecard_types_info *types_info = linecards->types_info; + + if (!types_info) + return; + kfree(types_info->ini_files); + vfree(types_info->data); + kfree(types_info); +} + +int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *bus_info) +{ + char mgpir_pl[MLXSW_REG_MGPIR_LEN]; + struct mlxsw_linecards *linecards; + u8 slot_count; + int err; + int i; + + mlxsw_reg_mgpir_pack(mgpir_pl, 0); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl); + if (err) + return err; + + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, + NULL, &slot_count); + if (!slot_count) + return 0; + + linecards = vzalloc(struct_size(linecards, linecards, slot_count)); + if (!linecards) + return -ENOMEM; + linecards->count = slot_count; + linecards->mlxsw_core = mlxsw_core; + linecards->bus_info = bus_info; + + err = mlxsw_linecard_types_init(mlxsw_core, linecards); + if (err) + goto err_types_init; + + err = mlxsw_core_traps_register(mlxsw_core, mlxsw_linecard_listener, + ARRAY_SIZE(mlxsw_linecard_listener), + mlxsw_core); + if (err) + goto err_traps_register; + + mlxsw_core_linecards_set(mlxsw_core, linecards); + + for (i = 0; i < linecards->count; i++) { + err = mlxsw_linecard_init(mlxsw_core, linecards, i + 1); + if (err) + goto err_linecard_init; + } + + return 0; + +err_linecard_init: + for (i--; i >= 0; i--) + mlxsw_linecard_fini(mlxsw_core, linecards, i + 1); + mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener, + ARRAY_SIZE(mlxsw_linecard_listener), + mlxsw_core); +err_traps_register: + mlxsw_linecard_types_fini(linecards); +err_types_init: + vfree(linecards); + return err; +} + +void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core) +{ + struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core); + int i; + + if (!linecards) + return; + for (i = 0; i < linecards->count; i++) + mlxsw_linecard_fini(mlxsw_core, linecards, i + 1); + mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener, + ARRAY_SIZE(mlxsw_linecard_listener), + mlxsw_core); + mlxsw_linecard_types_fini(linecards); + vfree(linecards); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3457a216642..b4e064bd77bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -89,6 +89,11 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { "." __stringify(MLXSW_SP_FWREV_MINOR) \ "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2" +#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \ + "mellanox/lc_ini_bundle_" \ + __stringify(MLXSW_SP_FWREV_MINOR) "_" \ + __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin" + static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; @@ -5159,3 +5164,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table); MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); +MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME); diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 7405c400f09b..d888498aed33 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -133,6 +133,10 @@ enum mlxsw_event_trap_id { MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D, /* PTP Egress FIFO has a new entry */ MLXSW_TRAP_ID_PTP_EGR_FIFO = 0x2E, + /* Downstream Device Status Change */ + MLXSW_TRAP_ID_DSDSC = 0x321, + /* Binary Code Transfer Operation Executed Event */ + MLXSW_TRAP_ID_BCTOE = 0x322, /* Port mapping change */ MLXSW_TRAP_ID_PMLPE = 0x32E, }; -- cgit v1.2.3 From ee7a70fa671b274aa7e34c66a6e9ab55acadc59a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:38 +0300 Subject: mlxsw: core_linecards: Implement line card activation process Allow to process events generated upon line card getting "ready" and "active". When DSDSC event with "ready" bit set is delivered, that means the line card is powered up. Use MDDC register to push the line card to active state. Once FW is done with that, the DSDSC event with "active" bit set is delivered. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.h | 4 +- .../net/ethernet/mellanox/mlxsw/core_linecards.c | 63 ++++++++++++++++++++++ 2 files changed, 66 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 44c8a7888985..7d6f8f3bcd93 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -564,7 +564,9 @@ struct mlxsw_linecard { char mbct_pl[MLXSW_REG_MBCT_LEN]; /* Too big for stack */ enum mlxsw_linecard_status_event_type status_event_type_to; struct delayed_work status_event_to_dw; - u8 provisioned:1; + u8 provisioned:1, + ready:1, + active:1; u16 hw_revision; u16 ini_version; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 1401f6d34635..49dfec14da75 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -90,6 +90,8 @@ static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard) static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard) { linecard->provisioned = false; + linecard->ready = false; + linecard->active = false; devlink_linecard_provision_fail(linecard->devlink_linecard); } @@ -131,6 +133,46 @@ static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard) devlink_linecard_provision_clear(linecard->devlink_linecard); } +static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + char mddc_pl[MLXSW_REG_MDDC_LEN]; + int err; + + mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl); + if (err) + return err; + linecard->ready = true; + return 0; +} + +static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + char mddc_pl[MLXSW_REG_MDDC_LEN]; + int err; + + mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, false); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl); + if (err) + return err; + linecard->ready = false; + return 0; +} + +static void mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) +{ + linecard->active = true; + devlink_linecard_activate(linecard->devlink_linecard); +} + +static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard) +{ + linecard->active = false; + devlink_linecard_deactivate(linecard->devlink_linecard); +} + static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards, struct mlxsw_linecard *linecard, const char *mddq_pl) @@ -164,6 +206,25 @@ static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards, goto out; } + if (ready == MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && !linecard->ready) { + err = mlxsw_linecard_ready_set(linecard); + if (err) + goto out; + } + + if (active && linecard->active != active) + mlxsw_linecard_active_set(linecard); + + if (!active && linecard->active != active) + mlxsw_linecard_active_clear(linecard); + + if (ready != MLXSW_REG_MDDQ_SLOT_INFO_READY_READY && + linecard->ready) { + err = mlxsw_linecard_ready_clear(linecard); + if (err) + goto out; + } + if (!provisioned && linecard->provisioned != provisioned) mlxsw_linecard_provision_clear(linecard); @@ -676,6 +737,8 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core, cancel_delayed_work_sync(&linecard->status_event_to_dw); /* Make sure all scheduled events are processed */ mlxsw_core_flush_owq(); + if (linecard->active) + mlxsw_linecard_active_clear(linecard); devlink_linecard_destroy(linecard->devlink_linecard); mutex_destroy(&linecard->lock); } -- cgit v1.2.3 From 45bf3b7267e0264ff25f85ea46ba4628d39b69b4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:39 +0300 Subject: mlxsw: core: Extend driver ops by remove selected ports op In case of line card implementation, the core has to have a way to remove relevant ports manually. Extend the Spectrum driver ops by an op that implements port removal of selected ports upon request. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 9 +++++++++ drivers/net/ethernet/mellanox/mlxsw/core.h | 8 ++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 17 +++++++++++++++++ 3 files changed, 34 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 5e1855f752d0..9b5c2c60b9aa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -3143,6 +3143,15 @@ bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port) } EXPORT_SYMBOL(mlxsw_core_port_is_xm); +void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core, + bool (*selector)(void *priv, u16 local_port), + void *priv) +{ + if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected)) + return; + mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv); +} + struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) { return mlxsw_core->env; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 7d6f8f3bcd93..865252e97e14 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -258,6 +258,10 @@ struct devlink_port * mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, u16 local_port); bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port); +void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core, + bool (*selector)(void *priv, + u16 local_port), + void *priv); struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); @@ -331,6 +335,10 @@ struct mlxsw_driver { unsigned int count, struct netlink_ext_ack *extack); int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port, struct netlink_ext_ack *extack); + void (*ports_remove_selected)(struct mlxsw_core *mlxsw_core, + bool (*selector)(void *priv, + u16 local_port), + void *priv); int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index b4e064bd77bc..c3b9e244e888 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1999,6 +1999,20 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) mlxsw_sp->ports = NULL; } +static void +mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core, + bool (*selector)(void *priv, u16 local_port), + void *priv) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core); + int i; + + for (i = 1; i < max_ports; i++) + if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i)) + mlxsw_sp_port_remove(mlxsw_sp, i); +} + static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); @@ -3785,6 +3799,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = { .fini = mlxsw_sp_fini, .port_split = mlxsw_sp_port_split, .port_unsplit = mlxsw_sp_port_unsplit, + .ports_remove_selected = mlxsw_sp_ports_remove_selected, .sb_pool_get = mlxsw_sp_sb_pool_get, .sb_pool_set = mlxsw_sp_sb_pool_set, .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, @@ -3822,6 +3837,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = { .fini = mlxsw_sp_fini, .port_split = mlxsw_sp_port_split, .port_unsplit = mlxsw_sp_port_unsplit, + .ports_remove_selected = mlxsw_sp_ports_remove_selected, .sb_pool_get = mlxsw_sp_sb_pool_get, .sb_pool_set = mlxsw_sp_sb_pool_set, .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, @@ -3857,6 +3873,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = { .fini = mlxsw_sp_fini, .port_split = mlxsw_sp_port_split, .port_unsplit = mlxsw_sp_port_unsplit, + .ports_remove_selected = mlxsw_sp_ports_remove_selected, .sb_pool_get = mlxsw_sp_sb_pool_get, .sb_pool_set = mlxsw_sp_sb_pool_set, .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, -- cgit v1.2.3 From 6445eef0f600b726da19b4d5885ebb25ba47f590 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 18 Apr 2022 09:42:40 +0300 Subject: mlxsw: spectrum: Add port to linecard mapping For each port get slot_index using PMLP register. For ports residing on a linecard, identify it with the linecard by setting mapping using devlink_port_linecard_set() helper. Use linecard slot index for PMTDB register queries. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 28 ++++++++++++-- drivers/net/ethernet/mellanox/mlxsw/core.h | 6 ++- .../net/ethernet/mellanox/mlxsw/core_linecards.c | 13 +++++++ drivers/net/ethernet/mellanox/mlxsw/minimal.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/reg.h | 9 +++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 44 ++++++++++++++++------ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + .../net/ethernet/mellanox/mlxsw/spectrum_ethtool.c | 37 ++++++++++-------- 8 files changed, 107 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 9b5c2c60b9aa..0e92dd91eca4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -48,6 +48,7 @@ struct mlxsw_core_port { struct devlink_port devlink_port; void *port_driver_priv; u16 local_port; + struct mlxsw_linecard *linecard; }; void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) @@ -2975,7 +2976,7 @@ EXPORT_SYMBOL(mlxsw_core_res_get); static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, enum devlink_port_flavour flavour, - u32 port_number, bool split, + u8 slot_index, u32 port_number, bool split, u32 split_port_subnumber, bool splittable, u32 lanes, const unsigned char *switch_id, @@ -2998,6 +2999,15 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, attrs.switch_id.id_len = switch_id_len; mlxsw_core_port->local_port = local_port; devlink_port_attrs_set(devlink_port, &attrs); + if (slot_index) { + struct mlxsw_linecard *linecard; + + linecard = mlxsw_linecard_get(mlxsw_core->linecards, + slot_index); + mlxsw_core_port->linecard = linecard; + devlink_port_linecard_set(devlink_port, + linecard->devlink_linecard); + } err = devl_port_register(devlink, devlink_port, local_port); if (err) memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); @@ -3015,7 +3025,7 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port } int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, - u32 port_number, bool split, + u8 slot_index, u32 port_number, bool split, u32 split_port_subnumber, bool splittable, u32 lanes, const unsigned char *switch_id, @@ -3024,7 +3034,7 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, int err; err = __mlxsw_core_port_init(mlxsw_core, local_port, - DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index, port_number, split, split_port_subnumber, splittable, lanes, switch_id, switch_id_len); @@ -3055,7 +3065,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, DEVLINK_PORT_FLAVOUR_CPU, - 0, false, 0, false, 0, + 0, 0, false, 0, false, 0, switch_id, switch_id_len); if (err) return err; @@ -3131,6 +3141,16 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); +struct mlxsw_linecard * +mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core, + u16 local_port) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; + + return mlxsw_core_port->linecard; +} + bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port) { const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 865252e97e14..850fff51b79f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -236,7 +236,8 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port); int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, - u32 port_number, bool split, u32 split_port_subnumber, + u8 slot_index, u32 port_number, bool split, + u32 split_port_subnumber, bool splittable, u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len); @@ -257,6 +258,9 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, struct devlink_port * mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, u16 local_port); +struct mlxsw_linecard * +mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core, + u16 local_port); bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port); void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core, bool (*selector)(void *priv, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 49dfec14da75..1d50bfe67156 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -481,6 +481,15 @@ query_ini_status: return 0; } +static bool mlxsw_linecard_port_selector(void *priv, u16 local_port) +{ + struct mlxsw_linecard *linecard = priv; + struct mlxsw_core *mlxsw_core; + + mlxsw_core = linecard->linecards->mlxsw_core; + return linecard == mlxsw_core_port_linecard_get(mlxsw_core, local_port); +} + static int mlxsw_linecard_provision(struct devlink_linecard *devlink_linecard, void *priv, const char *type, const void *type_priv, @@ -531,6 +540,10 @@ static int mlxsw_linecard_unprovision(struct devlink_linecard *devlink_linecard, mlxsw_core = linecard->linecards->mlxsw_core; + mlxsw_core_ports_remove_selected(mlxsw_core, + mlxsw_linecard_port_selector, + linecard); + err = mlxsw_linecard_ini_in_use_wait(mlxsw_core, linecard, extack); if (err) goto err_out; diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index ee1cb1b81669..d9660d4cce96 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -223,7 +223,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module) struct net_device *dev; int err; - err = mlxsw_core_port_init(mlxsw_m->core, local_port, + err = mlxsw_core_port_init(mlxsw_m->core, local_port, 0, module + 1, false, 0, false, 0, mlxsw_m->base_mac, sizeof(mlxsw_m->base_mac)); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index e41451028478..23589d3b160a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4325,6 +4325,15 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8); */ MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false); +/* reg_pmlp_slot_index + * Module number. + * Slot_index + * Slot_index = 0 represent the onboard (motherboard). + * In case of non-modular system only slot_index = 0 is available. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pmlp, slot_index, 0x04, 8, 4, 0x04, 0x00, false); + /* reg_pmlp_tx_lane * Tx Lane. When rxtx field is cleared, this field is used for Rx as well. * Access: RW diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3b9e244e888..ac6348e2ff1f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -492,11 +492,13 @@ mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, { bool separate_rxtx; u8 first_lane; + u8 slot_index; u8 module; u8 width; int i; module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0); width = mlxsw_reg_pmlp_width_get(pmlp_pl); separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl); first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); @@ -513,6 +515,11 @@ mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, local_port); return -EINVAL; } + if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n", + local_port); + return -EINVAL; + } if (separate_rxtx && mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) { @@ -528,6 +535,7 @@ mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp, } port_mapping->module = module; + port_mapping->slot_index = slot_index; port_mapping->width = width; port_mapping->module_width = width; port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); @@ -556,11 +564,14 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, char pmlp_pl[MLXSW_REG_PMLP_LEN]; int i, err; - mlxsw_env_module_port_map(mlxsw_sp->core, 0, port_mapping->module); + mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index, + port_mapping->module); mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); for (i = 0; i < port_mapping->width; i++) { + mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i, + port_mapping->slot_index); mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ } @@ -571,7 +582,8 @@ mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port, return 0; err_pmlp_write: - mlxsw_env_module_port_unmap(mlxsw_sp->core, 0, port_mapping->module); + mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index, + port_mapping->module); return err; } @@ -592,7 +604,8 @@ static int mlxsw_sp_port_open(struct net_device *dev) struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - err = mlxsw_env_module_port_up(mlxsw_sp->core, 0, + err = mlxsw_env_module_port_up(mlxsw_sp->core, + mlxsw_sp_port->mapping.slot_index, mlxsw_sp_port->mapping.module); if (err) return err; @@ -603,7 +616,8 @@ static int mlxsw_sp_port_open(struct net_device *dev) return 0; err_port_admin_status_set: - mlxsw_env_module_port_down(mlxsw_sp->core, 0, + mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.slot_index, mlxsw_sp_port->mapping.module); return err; } @@ -615,7 +629,8 @@ static int mlxsw_sp_port_stop(struct net_device *dev) netif_stop_queue(dev); mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); - mlxsw_env_module_port_down(mlxsw_sp->core, 0, + mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.slot_index, mlxsw_sp_port->mapping.module); return 0; } @@ -1462,12 +1477,13 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; u64 overheat_counter; int err; - err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, 0, module, - &overheat_counter); + err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index, + module, &overheat_counter); if (err) return err; @@ -1542,7 +1558,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, } splittable = lanes > 1 && !split; - err = mlxsw_core_port_init(mlxsw_sp->core, local_port, + err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index, port_number, split, split_port_subnumber, splittable, lanes, mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac)); @@ -1792,7 +1808,8 @@ err_port_label_info_get: mlxsw_sp_port_swid_set(mlxsw_sp, local_port, MLXSW_PORT_SWID_DISABLED_PORT); err_port_swid_set: - mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 0, + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, + port_mapping->slot_index, port_mapping->module); return err; } @@ -1800,6 +1817,7 @@ err_port_swid_set: static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); @@ -1822,7 +1840,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port) mlxsw_core_port_fini(mlxsw_sp->core, local_port); mlxsw_sp_port_swid_set(mlxsw_sp, local_port, MLXSW_PORT_SWID_DISABLED_PORT); - mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, 0, module); + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module); } static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) @@ -2194,7 +2212,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port, return -EINVAL; } - mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, + mlxsw_sp_port->mapping.module, mlxsw_sp_port->mapping.module_width / count, count); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); @@ -2259,7 +2278,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port, count = mlxsw_sp_port->mapping.module_width / mlxsw_sp_port->mapping.width; - mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index, + mlxsw_sp_port->mapping.module, mlxsw_sp_port->mapping.module_width / count, count); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 928c3a63b6b6..2ad29ae1c640 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -145,6 +145,7 @@ struct mlxsw_sp_mall_entry; struct mlxsw_sp_port_mapping { u8 module; + u8 slot_index; u8 width; /* Number of lanes used by the port */ u8 module_width; /* Number of lanes in the module (static) */ u8 lane; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index f72c26ce0391..915dffb85a1c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -568,14 +568,14 @@ struct mlxsw_sp_port_stats { static u64 mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port) { - struct mlxsw_sp_port_mapping port_mapping = mlxsw_sp_port->mapping; struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; + u8 module = mlxsw_sp_port->mapping.module; u64 stats; int err; - err = mlxsw_env_module_overheat_counter_get(mlxsw_core, 0, - port_mapping.module, - &stats); + err = mlxsw_env_module_overheat_counter_get(mlxsw_core, slot_index, + module, &stats); if (err) return mlxsw_sp_port->module_overheat_initial_val; @@ -1035,7 +1035,8 @@ static int mlxsw_sp_get_module_info(struct net_device *netdev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - return mlxsw_env_get_module_info(netdev, mlxsw_sp->core, 0, + return mlxsw_env_get_module_info(netdev, mlxsw_sp->core, + mlxsw_sp_port->mapping.slot_index, mlxsw_sp_port->mapping.module, modinfo); } @@ -1045,10 +1046,11 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; + u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, 0, - mlxsw_sp_port->mapping.module, ee, - data); + return mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, slot_index, + module, ee, data); } static int @@ -1058,10 +1060,11 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, 0, module, - page, extack); + return mlxsw_env_get_module_eeprom_by_page(mlxsw_sp->core, slot_index, + module, page, extack); } static int @@ -1202,9 +1205,11 @@ static int mlxsw_sp_reset(struct net_device *dev, u32 *flags) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_reset_module(dev, mlxsw_sp->core, 0, module, flags); + return mlxsw_env_reset_module(dev, mlxsw_sp->core, slot_index, + module, flags); } static int @@ -1214,10 +1219,11 @@ mlxsw_sp_get_module_power_mode(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_get_module_power_mode(mlxsw_sp->core, 0, module, - params, extack); + return mlxsw_env_get_module_power_mode(mlxsw_sp->core, slot_index, + module, params, extack); } static int @@ -1227,10 +1233,11 @@ mlxsw_sp_set_module_power_mode(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; u8 module = mlxsw_sp_port->mapping.module; - return mlxsw_env_set_module_power_mode(mlxsw_sp->core, 0, module, - params->policy, extack); + return mlxsw_env_set_module_power_mode(mlxsw_sp->core, slot_index, + module, params->policy, extack); } const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { -- cgit v1.2.3 From eb38c2053b67977844404cbbdee341dbf3a02d36 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Mon, 14 Mar 2022 23:09:10 +0100 Subject: can: rx-offload: rename can_rx_offload_queue_sorted() -> can_rx_offload_queue_timestamp() This patch renames the function can_rx_offload_queue_sorted() to can_rx_offload_queue_timestamp(). This better describes what the function does, it adds a newly RX'ed skb to the sorted queue by its timestamp. Link: https://lore.kernel.org/all/20220417194327.2699059-1-mkl@pengutronix.de Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev/rx-offload.c | 6 +++--- drivers/net/can/flexcan/flexcan-core.c | 4 ++-- drivers/net/can/m_can/m_can.c | 2 +- drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c | 6 +++--- drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c | 2 +- drivers/net/can/ti_hecc.c | 4 ++-- include/linux/can/rx-offload.h | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c index 7f80d8e1e750..6d0dc18c03e7 100644 --- a/drivers/net/can/dev/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -221,7 +221,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) } EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); -int can_rx_offload_queue_sorted(struct can_rx_offload *offload, +int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp) { struct can_rx_offload_cb *cb; @@ -240,7 +240,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, return 0; } -EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted); +EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp); unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, unsigned int idx, u32 timestamp, @@ -256,7 +256,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, if (!skb) return 0; - err = can_rx_offload_queue_sorted(offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(offload, skb, timestamp); if (err) { stats->rx_errors++; stats->tx_fifo_errors++; diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 74d7fcbfd065..389bd9da7e9a 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -845,7 +845,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) if (tx_errors) dev->stats.tx_errors++; - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) dev->stats.rx_fifo_errors++; } @@ -892,7 +892,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) dev->stats.rx_fifo_errors++; } diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index b3b5bc1c803b..2779bba390f2 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -464,7 +464,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev, struct net_device_stats *stats = &cdev->net->stats; int err; - err = can_rx_offload_queue_sorted(&cdev->offload, skb, + err = can_rx_offload_queue_timestamp(&cdev->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index f9dd8fdba12b..230ec60003fc 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -916,7 +916,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; @@ -1021,7 +1021,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) return 0; mcp251xfd_skb_set_timestamp(priv, skb, timestamp); - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; @@ -1094,7 +1094,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) cf->data[7] = bec.rxerr; } - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c index d09f7fbf2ba7..ced8d9c81f8c 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -173,7 +173,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, } mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); - err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts); if (err) stats->rx_fifo_errors++; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index ff31b993ab17..bb3f2e3b004c 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -633,7 +633,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, cf->data[3] = CAN_ERR_PROT_LOC_ACK; timestamp = hecc_read(priv, HECC_CANLNT); - err = can_rx_offload_queue_sorted(&priv->offload, skb, + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) ndev->stats.rx_fifo_errors++; @@ -668,7 +668,7 @@ static void ti_hecc_change_state(struct net_device *ndev, } timestamp = hecc_read(priv, HECC_CANLNT); - err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) ndev->stats.rx_fifo_errors++; } diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index c11477620403..c205c51d79c9 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -42,8 +42,8 @@ int can_rx_offload_add_manual(struct net_device *dev, int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); -int can_rx_offload_queue_sorted(struct can_rx_offload *offload, - struct sk_buff *skb, u32 timestamp); +int can_rx_offload_queue_timestamp(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp); unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, unsigned int idx, u32 timestamp, unsigned int *frame_len_ptr); -- cgit v1.2.3 From 85d4eb2a3dfe939dda5304d61e406cb8e0852d60 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Fri, 18 Mar 2022 14:58:44 +0100 Subject: can: bittiming: can_calc_bittiming(): prefer small bit rate pre-scalers over larger ones The CiA (CAN in Automation) lists in their Newsletter 1/2018 in the "Recommendation for the CAN FD bit-timing" [1] article several recommendations, one of them is: | Recommendation 3: Choose BRPA and BRPD as low as possible [1] https://can-newsletter.org/uploads/media/raw/f6a36d1461371a2f86ef0011a513712c.pdf With the current bit timing algorithm Srinivas Neeli noticed that on the Xilinx Versal ACAP board the CAN data bit timing parameters are not calculated optimally. For most bit rates, the bit rate prescaler (BRP) is != 1, although it's possible to configure the requested with a bit rate with a prescaler of 1: | Data Bit timing parameters for xilinx_can_fd2i with 79.999999 MHz ref clock (cmd-line) using algo 'v4.8' | nominal real Bitrt nom real SampP | Bitrate TQ[ns] PrS PhS1 PhS2 SJW BRP Bitrate Error SampP SampP Error | 12000000 12 2 2 2 1 1 11428571 4.8% 75.0% 71.4% 4.8% | 10000000 25 1 1 1 1 2 9999999 0.0% 75.0% 75.0% 0.0% | 8000000 12 3 3 3 1 1 7999999 0.0% 75.0% 70.0% 6.7% | 5000000 50 1 1 1 1 4 4999999 0.0% 75.0% 75.0% 0.0% | 4000000 62 1 1 1 1 5 3999999 0.0% 75.0% 75.0% 0.0% | 2000000 125 1 1 1 1 10 1999999 0.0% 75.0% 75.0% 0.0% | 1000000 250 1 1 1 1 20 999999 0.0% 75.0% 75.0% 0.0% The bit timing parameter calculation algorithm iterates effectively from low to high BRP values. It selects a new best parameter set, if the sample point error of the current parameter set is equal or less to old best parameter set. If the given hardware constraints (clock rate and bit timing parameter constants) don't allow a sample point error of 0, the algorithm will first find a valid bit timing parameter set with a low BRP, but then will accept parameter sets with higher BRPs that have the same sample point error. This patch changes the algorithm to only accept a new parameter set, if the resulting sample point error is lower. This leads to the following data bit timing parameter for the Versal ACAP board: | Data Bit timing parameters for xilinx_can_fd2i with 79.999999 MHz ref clock (cmd-line) using algo 'can-next' | nominal real Bitrt nom real SampP | Bitrate TQ[ns] PrS PhS1 PhS2 SJW BRP Bitrate Error SampP SampP Error | 12000000 12 2 2 2 1 1 11428571 4.8% 75.0% 71.4% 4.8% | 10000000 12 2 3 2 1 1 9999999 0.0% 75.0% 75.0% 0.0% | 8000000 12 3 3 3 1 1 7999999 0.0% 75.0% 70.0% 6.7% | 5000000 12 5 6 4 1 1 4999999 0.0% 75.0% 75.0% 0.0% | 4000000 12 7 7 5 1 1 3999999 0.0% 75.0% 75.0% 0.0% | 2000000 12 14 15 10 1 1 1999999 0.0% 75.0% 75.0% 0.0% | 1000000 25 14 15 10 1 2 999999 0.0% 75.0% 75.0% 0.0% Note: Due to HW constraints a data bit rate of 1 MBit/s with BRP = 1 is not possible. Link: https://lore.kernel.org/all/20220318144913.873614-1-mkl@pengutronix.de Link: https://lore.kernel.org/all/20220113203004.jf2rqj2pirhgx72i@pengutronix.de Cc: Srinivas Neeli Signed-off-by: Marc Kleine-Budde --- drivers/net/can/dev/bittiming.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c index 2103bcca9012..c1e76f0a5064 100644 --- a/drivers/net/can/dev/bittiming.c +++ b/drivers/net/can/dev/bittiming.c @@ -116,7 +116,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); - if (sample_point_error > best_sample_point_error) + if (sample_point_error >= best_sample_point_error) continue; best_sample_point_error = sample_point_error; -- cgit v1.2.3 From 20c7258980e07fb2a54797baf157e55b6c77d6e9 Mon Sep 17 00:00:00 2001 From: Kris Bahnsen Date: Tue, 29 Mar 2022 13:12:29 -0700 Subject: can: Fix Links to Technologic Systems web resources Technologic Systems has rebranded as embeddedTS with the current domain eventually going offline. Update web/doc URLs to correct resource locations. Link: https://lore.kernel.org/all/20220329201229.16279-1-kris@embeddedTS.com Signed-off-by: Kris Bahnsen Signed-off-by: Marc Kleine-Budde --- drivers/net/can/sja1000/Kconfig | 2 +- drivers/net/can/sja1000/tscan1.c | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 110071b26921..4b2f9cb17fc3 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -107,7 +107,7 @@ config CAN_TSCAN1 depends on ISA help This driver is for Technologic Systems' TSCAN-1 PC104 boards. - http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1 + https://www.embeddedts.com/products/TS-CAN1 The driver supports multiple boards and automatically configures them: PLD IO base addresses are read from jumpers JP1 and JP2, IRQ numbers are read from jumpers JP4 and JP5, diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c index 3dbba8d61afb..f3862bed3d40 100644 --- a/drivers/net/can/sja1000/tscan1.c +++ b/drivers/net/can/sja1000/tscan1.c @@ -5,10 +5,9 @@ * Copyright 2010 Andre B. Oliveira */ -/* - * References: - * - Getting started with TS-CAN1, Technologic Systems, Jun 2009 - * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf +/* References: + * - Getting started with TS-CAN1, Technologic Systems, Feb 2022 + * https://docs.embeddedts.com/TS-CAN1 */ #include -- cgit v1.2.3 From bb75e352d7ac35778fc555b02a69edcc03c36e74 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 1 Apr 2022 19:21:20 +0200 Subject: can: mscan: mpc5xxx_can: Prepare cleanup of powerpc's asm/prom.h powerpc's asm/prom.h brings some headers that it doesn't need itself. In order to clean it up, first add missing headers in users of asm/prom.h Link: https://lore.kernel.org/all/878888f9057ad2f66ca0621a0007472bf57f3e3d.1648833432.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Signed-off-by: Marc Kleine-Budde --- drivers/net/can/mscan/mpc5xxx_can.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index de4ddf79ba9b..65ba6697bd7d 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include #include #include -- cgit v1.2.3 From e6ec83790593af2d7ad4bae25831baa206780f66 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Tue, 19 Apr 2022 08:14:49 +0000 Subject: can: flexcan: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get is more appropriate for simplifing code Link: https://lore.kernel.org/all/20220419081449.2574026-1-chi.minghao@zte.com.cn Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Marc Kleine-Budde --- drivers/net/can/flexcan/flexcan-core.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 389bd9da7e9a..fe9bda0f5ec4 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -723,11 +723,9 @@ static int flexcan_get_berr_counter(const struct net_device *dev, const struct flexcan_priv *priv = netdev_priv(dev); int err; - err = pm_runtime_get_sync(priv->dev); - if (err < 0) { - pm_runtime_put_noidle(priv->dev); + err = pm_runtime_resume_and_get(priv->dev); + if (err < 0) return err; - } err = __flexcan_get_berr_counter(dev, bec); @@ -1700,11 +1698,9 @@ static int flexcan_open(struct net_device *dev) return -EINVAL; } - err = pm_runtime_get_sync(priv->dev); - if (err < 0) { - pm_runtime_put_noidle(priv->dev); + err = pm_runtime_resume_and_get(priv->dev); + if (err < 0) return err; - } err = open_candev(dev); if (err) -- cgit v1.2.3 From ae38fda02996d43d9fb09f16e81e0008704dd524 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Thu, 17 Mar 2022 21:29:07 +0100 Subject: can: xilinx_can: mark bit timing constants as const This patch marks the bit timing constants as const. Fixes: c223da689324 ("can: xilinx_can: Add support for CANFD FD frames") Link: https://lore.kernel.org/all/20220317203119.792552-1-mkl@pengutronix.de Cc: Appana Durga Kedareswara rao Cc: Naga Sureshkumar Relli Signed-off-by: Marc Kleine-Budde --- drivers/net/can/xilinx_can.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index e562c5ab1149..43f0c6a064ba 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -239,7 +239,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = { }; /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ -static struct can_bittiming_const xcan_data_bittiming_const_canfd = { +static const struct can_bittiming_const xcan_data_bittiming_const_canfd = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 16, @@ -265,7 +265,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { }; /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ -static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { +static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 32, -- cgit v1.2.3 From c6f2a617a0a81b28ea006f91830ed2879c95c064 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 22 Mar 2022 09:19:13 +0100 Subject: can: mcp251xfd: add support for mcp251863 The MCP251863 device is a CAN-FD controller (MCP2518FD) with an integrated transceiver (ATA6563). This patch add support for the new device. Link: https://lore.kernel.org/all/20220419072805.2840340-3-mkl@pengutronix.de Cc: Thomas Kopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c | 19 +++++++++++++++++-- drivers/net/can/spi/mcp251xfd/mcp251xfd.h | 12 +++++++----- 2 files changed, 24 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 230ec60003fc..b21252390216 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -37,6 +37,12 @@ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = { .model = MCP251XFD_MODEL_MCP2518FD, }; +static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = { + .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | + MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, + .model = MCP251XFD_MODEL_MCP251863, +}; + /* Autodetect model, start with CRC enabled. */ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = { .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | @@ -75,6 +81,8 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) return "MCP2517FD"; case MCP251XFD_MODEL_MCP2518FD: return "MCP2518FD"; + case MCP251XFD_MODEL_MCP251863: + return "MCP251863"; case MCP251XFD_MODEL_MCP251XFD: return "MCP251xFD"; } @@ -1259,7 +1267,8 @@ mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr) * - for mcp2518fd: offset not 0 or 1 */ if (chip_tx_tail != tx_tail || - !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) { + !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) || + mcp251xfd_is_251863(priv))))) { netdev_err(priv->ndev, "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n", addr, nr, tx_ring->tail, tx_tail, chip_tx_tail, @@ -1697,7 +1706,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) else devtype_data = &mcp251xfd_devtype_data_mcp2517fd; - if (!mcp251xfd_is_251X(priv) && + if (!mcp251xfd_is_251XFD(priv) && priv->devtype_data.model != devtype_data->model) { netdev_info(ndev, "Detected %s, but firmware specifies a %s. Fixing up.\n", @@ -1929,6 +1938,9 @@ static const struct of_device_id mcp251xfd_of_match[] = { }, { .compatible = "microchip,mcp2518fd", .data = &mcp251xfd_devtype_data_mcp2518fd, + }, { + .compatible = "microchip,mcp251863", + .data = &mcp251xfd_devtype_data_mcp251863, }, { .compatible = "microchip,mcp251xfd", .data = &mcp251xfd_devtype_data_mcp251xfd, @@ -1945,6 +1957,9 @@ static const struct spi_device_id mcp251xfd_id_table[] = { }, { .name = "mcp2518fd", .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd, + }, { + .name = "mcp251863", + .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863, }, { .name = "mcp251xfd", .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd, diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 9cb6b5ad8dda..1d43bccc29bf 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -586,7 +586,8 @@ struct mcp251xfd_regs_status { enum mcp251xfd_model { MCP251XFD_MODEL_MCP2517FD = 0x2517, MCP251XFD_MODEL_MCP2518FD = 0x2518, - MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */ + MCP251XFD_MODEL_MCP251863 = 0x251863, + MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */ }; struct mcp251xfd_devtype_data { @@ -659,12 +660,13 @@ struct mcp251xfd_priv { static inline bool \ mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \ { \ - return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \ + return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \ } -MCP251XFD_IS(2517); -MCP251XFD_IS(2518); -MCP251XFD_IS(251X); +MCP251XFD_IS(2517FD); +MCP251XFD_IS(2518FD); +MCP251XFD_IS(251863); +MCP251XFD_IS(251XFD); static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv) { -- cgit v1.2.3 From 2dcb8e8782d8e4c38903bf37b1a24d3ffd193da7 Mon Sep 17 00:00:00 2001 From: Martin Jerabek Date: Tue, 22 Mar 2022 00:32:30 +0100 Subject: can: ctucanfd: add support for CTU CAN FD open-source IP core - bus independent part. This driver adds support for the CTU CAN FD open-source IP core. More documentation and core sources at project page (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core). The core integration to Xilinx Zynq system as platform driver is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top). Implementation on Intel FPGA based PCI Express board is available from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd). More about CAN bus related projects used and developed at CTU FEE at https://canbus.pages.fel.cvut.cz/ . Link: https://lore.kernel.org/all/1906e4941560ae2ce4b8d181131fd4963aa31611.1647904780.git.pisa@cmp.felk.cvut.cz Signed-off-by: Martin Jerabek Signed-off-by: Ondrej Ille Signed-off-by: Pavel Pisa Signed-off-by: Marc Kleine-Budde --- drivers/net/can/Kconfig | 1 + drivers/net/can/Makefile | 1 + drivers/net/can/ctucanfd/Kconfig | 12 + drivers/net/can/ctucanfd/Makefile | 7 + drivers/net/can/ctucanfd/ctucanfd.h | 82 ++ drivers/net/can/ctucanfd/ctucanfd_base.c | 1490 ++++++++++++++++++++++++++++ drivers/net/can/ctucanfd/ctucanfd_kframe.h | 77 ++ drivers/net/can/ctucanfd/ctucanfd_kregs.h | 325 ++++++ 8 files changed, 1995 insertions(+) create mode 100644 drivers/net/can/ctucanfd/Kconfig create mode 100644 drivers/net/can/ctucanfd/Makefile create mode 100644 drivers/net/can/ctucanfd/ctucanfd.h create mode 100644 drivers/net/can/ctucanfd/ctucanfd_base.c create mode 100644 drivers/net/can/ctucanfd/ctucanfd_kframe.h create mode 100644 drivers/net/can/ctucanfd/ctucanfd_kregs.h (limited to 'drivers') diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index fff259247d52..ac760fd39282 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -170,6 +170,7 @@ config PCH_CAN source "drivers/net/can/c_can/Kconfig" source "drivers/net/can/cc770/Kconfig" +source "drivers/net/can/ctucanfd/Kconfig" source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 1e660afcb61b..0af85983634c 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -16,6 +16,7 @@ obj-y += softing/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_C_CAN) += c_can/ +obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/ obj-$(CONFIG_CAN_FLEXCAN) += flexcan/ obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig new file mode 100644 index 000000000000..b5f364068f86 --- /dev/null +++ b/drivers/net/can/ctucanfd/Kconfig @@ -0,0 +1,12 @@ +config CAN_CTUCANFD + tristate "CTU CAN-FD IP core" + help + This driver adds support for the CTU CAN FD open-source IP core. + More documentation and core sources at project page + (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core). + The core integration to Xilinx Zynq system as platform driver + is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top). + Implementation on Intel FPGA-based PCI Express board is available + from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and + on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd). + Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ . diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile new file mode 100644 index 000000000000..259ecb0222c2 --- /dev/null +++ b/drivers/net/can/ctucanfd/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Makefile for the CTU CAN-FD IP module drivers +# + +obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o +ctucanfd-y := ctucanfd_base.o diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h new file mode 100644 index 000000000000..0e9904f6a05d --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille self-funded + * Copyright (C) 2018-2019 Martin Jerabek FEE CTU + * Copyright (C) 2018-2021 Pavel Pisa FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak + * Pavel Pisa + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#ifndef __CTUCANFD__ +#define __CTUCANFD__ + +#include +#include +#include + +enum ctu_can_fd_can_registers; + +struct ctucan_priv { + struct can_priv can; /* must be first member! */ + + void __iomem *mem_base; + u32 (*read_reg)(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg); + void (*write_reg)(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val); + + unsigned int txb_head; + unsigned int txb_tail; + u32 txb_prio; + unsigned int ntxbufs; + spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */ + + struct napi_struct napi; + struct device *dev; + struct clk *can_clk; + + int irq_flags; + unsigned long drv_flags; + + u32 rxfrm_first_word; + + struct list_head peers_on_pdev; +}; + +/** + * ctucan_probe_common - Device type independent registration call + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * @dev: Handle to the generic device structure + * @addr: Base address of CTU CAN FD core address + * @irq: Interrupt number + * @ntxbufs: Number of implemented Tx buffers + * @can_clk_rate: Clock rate, if 0 then clock are taken from device node + * @pm_enable_call: Whether pm_runtime_enable should be called + * @set_drvdata_fnc: Function to set network driver data for physical device + * + * Return: 0 on success and failure value on error + */ +int ctucan_probe_common(struct device *dev, void __iomem *addr, + int irq, unsigned int ntxbufs, + unsigned long can_clk_rate, + int pm_enable_call, + void (*set_drvdata_fnc)(struct device *dev, + struct net_device *ndev)); + +int ctucan_suspend(struct device *dev) __maybe_unused; +int ctucan_resume(struct device *dev) __maybe_unused; + +#endif /*__CTUCANFD__*/ diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c new file mode 100644 index 000000000000..7a4550f60abb --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -0,0 +1,1490 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille self-funded + * Copyright (C) 2018-2019 Martin Jerabek FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak + * Pavel Pisa + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ctucanfd.h" +#include "ctucanfd_kregs.h" +#include "ctucanfd_kframe.h" + +#ifdef DEBUG +#define ctucan_netdev_dbg(ndev, args...) \ + netdev_dbg(ndev, args) +#else +#define ctucan_netdev_dbg(...) do { } while (0) +#endif + +#define CTUCANFD_ID 0xCAFD + +/* TX buffer rotation: + * - when a buffer transitions to empty state, rotate order and priorities + * - if more buffers seem to transition at the same time, rotate by the number of buffers + * - it may be assumed that buffers transition to empty state in FIFO order (because we manage + * priorities that way) + * - at frame filling, do not rotate anything, just increment buffer modulo counter + */ + +#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1 + +#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \ + [st] = #st + +enum ctucan_txtb_status { + TXT_NOT_EXIST = 0x0, + TXT_RDY = 0x1, + TXT_TRAN = 0x2, + TXT_ABTP = 0x3, + TXT_TOK = 0x4, + TXT_ERR = 0x6, + TXT_ABT = 0x7, + TXT_ETY = 0x8, +}; + +enum ctucan_txtb_command { + TXT_CMD_SET_EMPTY = 0x01, + TXT_CMD_SET_READY = 0x02, + TXT_CMD_SET_ABORT = 0x04 +}; + +static const struct can_bittiming_const ctu_can_fd_bit_timing_max = { + .name = "ctu_can_fd", + .tseg1_min = 2, + .tseg1_max = 190, + .tseg2_min = 1, + .tseg2_max = 63, + .sjw_max = 31, + .brp_min = 1, + .brp_max = 8, + .brp_inc = 1, +}; + +static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = { + .name = "ctu_can_fd", + .tseg1_min = 2, + .tseg1_max = 94, + .tseg2_min = 1, + .tseg2_max = 31, + .sjw_max = 31, + .brp_min = 1, + .brp_max = 2, + .brp_inc = 1, +}; + +static const char * const ctucan_state_strings[CAN_STATE_MAX] = { + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED), + CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING) +}; + +static void ctucan_write32_le(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val) +{ + iowrite32(val, priv->mem_base + reg); +} + +static void ctucan_write32_be(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg, u32 val) +{ + iowrite32be(val, priv->mem_base + reg); +} + +static u32 ctucan_read32_le(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg) +{ + return ioread32(priv->mem_base + reg); +} + +static u32 ctucan_read32_be(struct ctucan_priv *priv, + enum ctu_can_fd_can_registers reg) +{ + return ioread32be(priv->mem_base + reg); +} + +static inline void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, + u32 val) +{ + priv->write_reg(priv, reg, val); +} + +static inline u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg) +{ + return priv->read_reg(priv, reg); +} + +static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base, + u32 offset, u32 val) +{ + priv->write_reg(priv, buf_base + offset, val); +} + +#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS))) +#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE))) + +/** + * ctucan_state_to_str() - Converts CAN controller state code to corresponding text + * @state: CAN controller state code + * + * Return: Pointer to string representation of the error state + */ +static const char *ctucan_state_to_str(enum can_state state) +{ + const char *txt = NULL; + + if (state >= 0 && state < CAN_STATE_MAX) + txt = ctucan_state_strings[state]; + return txt ? txt : "UNKNOWN"; +} + +/** + * ctucan_reset() - Issues software reset request to CTU CAN FD + * @ndev: Pointer to net_device structure + * + * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset + */ +static int ctucan_reset(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int i = 100; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST); + clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + + do { + u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID, + ctucan_read32(priv, CTUCANFD_DEVICE_ID)); + + if (device_id == 0xCAFD) + return 0; + if (!i--) { + netdev_warn(ndev, "device did not leave reset\n"); + return -ETIMEDOUT; + } + usleep_range(100, 200); + } while (1); +} + +/** + * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD + * @ndev: Pointer to net_device structure + * @bt: Pointer to Bit timing structure + * @nominal: True - Nominal bit timing, False - Data bit timing + * + * Return: 0 - OK, -%EPERM if controller is enabled + */ +static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int max_ph1_len = 31; + u32 btr = 0; + u32 prop_seg = bt->prop_seg; + u32 phase_seg1 = bt->phase_seg1; + + if (CTU_CAN_FD_ENABLED(priv)) { + netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n"); + return -EPERM; + } + + if (nominal) + max_ph1_len = 63; + + /* The timing calculation functions have only constraints on tseg1, which is prop_seg + + * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1. + * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the + * values here. + */ + if (phase_seg1 > max_ph1_len) { + prop_seg += phase_seg1 - max_ph1_len; + phase_seg1 = max_ph1_len; + bt->prop_seg = prop_seg; + bt->phase_seg1 = phase_seg1; + } + + if (nominal) { + btr = FIELD_PREP(REG_BTR_PROP, prop_seg); + btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1); + btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2); + btr |= FIELD_PREP(REG_BTR_BRP, bt->brp); + btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw); + + ctucan_write32(priv, CTUCANFD_BTR, btr); + } else { + btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg); + btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1); + btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2); + btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp); + btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw); + + ctucan_write32(priv, CTUCANFD_BTR_FD, btr); + } + + return 0; +} + +/** + * ctucan_set_bittiming() - CAN set nominal bit timing routine + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM on error + */ +static int ctucan_set_bittiming(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *bt = &priv->can.bittiming; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + /* Note that bt may be modified here */ + return ctucan_set_btr(ndev, bt, true); +} + +/** + * ctucan_set_data_bittiming() - CAN set data bit timing routine + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM on error + */ +static int ctucan_set_data_bittiming(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *dbt = &priv->can.data_bittiming; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + /* Note that dbt may be modified here */ + return ctucan_set_btr(ndev, dbt, false); +} + +/** + * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD + * @ndev: Pointer to net_device structure + * + * Return: 0 on success, -%EPERM if controller is enabled + */ +static int ctucan_set_secondary_sample_point(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct can_bittiming *dbt = &priv->can.data_bittiming; + int ssp_offset = 0; + u32 ssp_cfg = 0; /* No SSP by default */ + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + if (CTU_CAN_FD_ENABLED(priv)) { + netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n"); + return -EPERM; + } + + /* Use SSP for bit-rates above 1 Mbits/s */ + if (dbt->bitrate > 1000000) { + /* Calculate SSP in minimal time quanta */ + ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate; + + if (ssp_offset > 127) { + netdev_warn(ndev, "SSP offset saturated to 127\n"); + ssp_offset = 127; + } + + ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset); + ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1); + } + + ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg); + + return 0; +} + +/** + * ctucan_set_mode() - Sets CTU CAN FDs mode + * @priv: Pointer to private data + * @mode: Pointer to controller modes to be set + */ +static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode) +{ + u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE); + + mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ? + (mode_reg | REG_MODE_ILBP) : + (mode_reg & ~REG_MODE_ILBP); + + mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ? + (mode_reg | REG_MODE_BMM) : + (mode_reg & ~REG_MODE_BMM); + + mode_reg = (mode->flags & CAN_CTRLMODE_FD) ? + (mode_reg | REG_MODE_FDE) : + (mode_reg & ~REG_MODE_FDE); + + mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ? + (mode_reg | REG_MODE_ACF) : + (mode_reg & ~REG_MODE_ACF); + + mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ? + (mode_reg | REG_MODE_NISOFD) : + (mode_reg & ~REG_MODE_NISOFD); + + /* One shot mode supported indirectly via Retransmit limit */ + mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF); + mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ? + (mode_reg | REG_MODE_RTRLE) : + (mode_reg & ~REG_MODE_RTRLE); + + /* Some bits fixed: + * TSTM - Off, User shall not be able to change REC/TEC by hand during operation + */ + mode_reg &= ~REG_MODE_TSTM; + + ctucan_write32(priv, CTUCANFD_MODE, mode_reg); +} + +/** + * ctucan_chip_start() - This routine starts the driver + * @ndev: Pointer to net_device structure + * + * Routine expects that chip is in reset state. It setups initial + * Tx buffers for FIFO priorities, sets bittiming, enables interrupts, + * switches core to operational mode and changes controller + * state to %CAN_STATE_STOPPED. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_chip_start(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 int_ena, int_msk; + u32 mode_reg; + int err; + struct can_ctrlmode mode; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + priv->txb_prio = 0x01234567; + priv->txb_head = 0; + priv->txb_tail = 0; + ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio); + + /* Configure bit-rates and ssp */ + err = ctucan_set_bittiming(ndev); + if (err < 0) + return err; + + err = ctucan_set_data_bittiming(ndev); + if (err < 0) + return err; + + err = ctucan_set_secondary_sample_point(ndev); + if (err < 0) + return err; + + /* Configure modes */ + mode.flags = priv->can.ctrlmode; + mode.mask = 0xFFFFFFFF; + ctucan_set_mode(priv, &mode); + + /* Configure interrupts */ + int_ena = REG_INT_STAT_RBNEI | + REG_INT_STAT_TXBHCI | + REG_INT_STAT_EWLI | + REG_INT_STAT_FCSI; + + /* Bus error reporting -> Allow Error/Arb.lost interrupts */ + if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { + int_ena |= REG_INT_STAT_ALI | + REG_INT_STAT_BEI; + } + + int_msk = ~int_ena; /* Mask all disabled interrupts */ + + /* It's after reset, so there is no need to clear anything */ + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk); + ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena); + + /* Controller enters ERROR_ACTIVE on initial FCSI */ + priv->can.state = CAN_STATE_STOPPED; + + /* Enable the controller */ + mode_reg = ctucan_read32(priv, CTUCANFD_MODE); + mode_reg |= REG_MODE_ENA; + ctucan_write32(priv, CTUCANFD_MODE, mode_reg); + + return 0; +} + +/** + * ctucan_do_set_mode() - Sets mode of the driver + * @ndev: Pointer to net_device structure + * @mode: Tells the mode of the driver + * + * This check the drivers state and calls the corresponding modes to set. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode) +{ + int ret; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + switch (mode) { + case CAN_MODE_START: + ret = ctucan_reset(ndev); + if (ret < 0) + return ret; + ret = ctucan_chip_start(ndev); + if (ret < 0) { + netdev_err(ndev, "ctucan_chip_start failed!\n"); + return ret; + } + netif_wake_queue(ndev); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +/** + * ctucan_get_tx_status() - Gets status of TXT buffer + * @priv: Pointer to private data + * @buf: Buffer index (0-based) + * + * Return: Status of TXT buffer + */ +static inline enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf) +{ + u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS); + enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7; + + return status; +} + +/** + * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer + * @priv: Pointer to private data + * @buf: Buffer index (0-based) + * + * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be + * inserted to TXT Buffer + */ +static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf) +{ + enum ctucan_txtb_status buf_status; + + buf_status = ctucan_get_tx_status(priv, buf); + if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP) + return false; + + return true; +} + +/** + * ctucan_insert_frame() - Inserts frame to TXT buffer + * @priv: Pointer to private data + * @cf: Pointer to CAN frame to be inserted + * @buf: TXT Buffer index to which frame is inserted (0-based) + * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame + * + * Return: True - Frame inserted successfully + * False - Frame was not inserted due to one of: + * 1. TXT Buffer is not writable (it is in wrong state) + * 2. Invalid TXT buffer index + * 3. Invalid frame length + */ +static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf, + bool isfdf) +{ + u32 buf_base; + u32 ffw = 0; + u32 idw = 0; + unsigned int i; + + if (buf >= priv->ntxbufs) + return false; + + if (!ctucan_is_txt_buf_writable(priv, buf)) + return false; + + if (cf->len > CANFD_MAX_DLEN) + return false; + + /* Prepare Frame format */ + if (cf->can_id & CAN_RTR_FLAG) + ffw |= REG_FRAME_FORMAT_W_RTR; + + if (cf->can_id & CAN_EFF_FLAG) + ffw |= REG_FRAME_FORMAT_W_IDE; + + if (isfdf) { + ffw |= REG_FRAME_FORMAT_W_FDF; + if (cf->flags & CANFD_BRS) + ffw |= REG_FRAME_FORMAT_W_BRS; + } + + ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len)); + + /* Prepare identifier */ + if (cf->can_id & CAN_EFF_FLAG) + idw = cf->can_id & CAN_EFF_MASK; + else + idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK); + + /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */ + buf_base = (buf + 1) * 0x100; + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw); + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw); + + /* Write Data payload */ + if (!(cf->can_id & CAN_RTR_FLAG)) { + for (i = 0; i < cf->len; i += 4) { + u32 data = le32_to_cpu(*(__le32 *)(cf->data + i)); + + ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data); + } + } + + return true; +} + +/** + * ctucan_give_txtb_cmd() - Applies command on TXT buffer + * @priv: Pointer to private data + * @cmd: Command to give + * @buf: Buffer index (0-based) + */ +static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf) +{ + u32 tx_cmd = cmd; + + tx_cmd |= 1 << (buf + 8); + ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd); +} + +/** + * ctucan_start_xmit() - Starts the transmission + * @skb: sk_buff pointer that contains data to be Txed + * @ndev: Pointer to net_device structure + * + * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and + * populates its fields to start the transmission. + * + * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available, + * negative return values reserved for error cases + */ +static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 txtb_id; + bool ok; + unsigned long flags; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (unlikely(!CTU_CAN_FD_TXTNF(priv))) { + netif_stop_queue(ndev); + netdev_err(ndev, "BUG!, no TXB free when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + txtb_id = priv->txb_head % priv->ntxbufs; + ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id); + ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb)); + + if (!ok) { + netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?"); + kfree_skb(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + can_put_echo_skb(skb, ndev, txtb_id, 0); + + spin_lock_irqsave(&priv->tx_lock, flags); + ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id); + priv->txb_head++; + + /* Check if all TX buffers are full */ + if (!CTU_CAN_FD_TXTNF(priv)) + netif_stop_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + + return NETDEV_TX_OK; +} + +/** + * ctucan_read_rx_frame() - Reads frame from RX FIFO + * @priv: Pointer to CTU CAN FD's private data + * @cf: Pointer to CAN frame struct + * @ffw: Previously read frame format word + * + * Note: Frame format word must be read separately and provided in 'ffw'. + */ +static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw) +{ + u32 idw; + unsigned int i; + unsigned int wc; + unsigned int len; + + idw = ctucan_read32(priv, CTUCANFD_RX_DATA); + if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw)) + cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (idw >> 18) & CAN_SFF_MASK; + + /* BRS, ESI, RTR Flags */ + cf->flags = 0; + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) { + if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw)) + cf->flags |= CANFD_BRS; + if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw)) + cf->flags |= CANFD_ESI; + } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) { + cf->can_id |= CAN_RTR_FLAG; + } + + wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3; + + /* DLC */ + if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) { + len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw); + } else { + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) + len = wc << 2; + else + len = 8; + } + cf->len = len; + if (unlikely(len > wc * 4)) + len = wc * 4; + + /* Timestamp - Read and throw away */ + ctucan_read32(priv, CTUCANFD_RX_DATA); + ctucan_read32(priv, CTUCANFD_RX_DATA); + + /* Data */ + for (i = 0; i < len; i += 4) { + u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA); + *(__le32 *)(cf->data + i) = cpu_to_le32(data); + } + while (unlikely(i < wc * 4)) { + ctucan_read32(priv, CTUCANFD_RX_DATA); + i += 4; + } +} + +/** + * ctucan_rx() - Called from CAN ISR to complete the received frame processing + * @ndev: Pointer to net_device structure + * + * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal + * processing and invokes "netif_receive_skb" to complete further processing. + * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but + * system is out of free SKBs temporally and left code to resolve SKB allocation later, + * -%EAGAIN in a case of empty Rx FIFO. + */ +static int ctucan_rx(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 ffw; + + if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) { + ffw = priv->rxfrm_first_word; + clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + } else { + ffw = ctucan_read32(priv, CTUCANFD_RX_DATA); + } + + if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw)) + return -EAGAIN; + + if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) + skb = alloc_canfd_skb(ndev, &cf); + else + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); + + if (unlikely(!skb)) { + priv->rxfrm_first_word = ffw; + set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); + return 0; + } + + ctucan_read_rx_frame(priv, cf, ffw); + + stats->rx_bytes += cf->len; + stats->rx_packets++; + netif_receive_skb(skb); + + return 1; +} + +/** + * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state. + * @priv: Pointer to private data + * + * Returns: Fault confinement state of controller + */ +static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv) +{ + u32 fs; + u32 rec_tec; + u32 ewl; + + fs = ctucan_read32(priv, CTUCANFD_EWL); + rec_tec = ctucan_read32(priv, CTUCANFD_REC); + ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs); + + if (FIELD_GET(REG_EWL_ERA, fs)) { + if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) && + ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec)) + return CAN_STATE_ERROR_ACTIVE; + else + return CAN_STATE_ERROR_WARNING; + } else if (FIELD_GET(REG_EWL_ERP, fs)) { + return CAN_STATE_ERROR_PASSIVE; + } else if (FIELD_GET(REG_EWL_BOF, fs)) { + return CAN_STATE_BUS_OFF; + } + + WARN(true, "Invalid error state"); + return CAN_STATE_ERROR_PASSIVE; +} + +/** + * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller + * @priv: Pointer to private data + * @bec: Pointer to Error counter structure + */ +static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec) +{ + u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC); + + bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs); + bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs); +} + +/** + * ctucan_err_interrupt() - Error frame ISR + * @ndev: net_device pointer + * @isr: interrupt status register value + * + * This is the CAN error interrupt and it will check the type of error and forward the error + * frame to upper layers. + */ +static void ctucan_err_interrupt(struct net_device *ndev, u32 isr) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + enum can_state state; + struct can_berr_counter bec; + u32 err_capt_alc; + int dologerr = net_ratelimit(); + + ctucan_get_rec_tec(priv, &bec); + state = ctucan_read_fault_state(priv); + err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT); + + if (dologerr) + netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n", + __func__, isr, bec.rxerr, bec.txerr, + FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc), + FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc)); + + skb = alloc_can_err_skb(ndev, &cf); + + /* EWLI: error warning limit condition met + * FCSI: fault confinement state changed + * ALI: arbitration lost (just informative) + * BEI: bus error interrupt + */ + if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) { + netdev_info(ndev, "state changes from %s to %s\n", + ctucan_state_to_str(priv->can.state), + ctucan_state_to_str(state)); + + if (priv->can.state == state) + netdev_warn(ndev, + "current and previous state is the same! (missed interrupt?)\n"); + + priv->can.state = state; + switch (state) { + case CAN_STATE_BUS_OFF: + priv->can.can_stats.bus_off++; + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + break; + case CAN_STATE_ERROR_PASSIVE: + priv->can.can_stats.error_passive++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.rxerr > 127) ? + CAN_ERR_CRTL_RX_PASSIVE : + CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + break; + case CAN_STATE_ERROR_WARNING: + priv->can.can_stats.error_warning++; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } + break; + case CAN_STATE_ERROR_ACTIVE: + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + default: + netdev_warn(ndev, "unhandled error state (%d:%s)!\n", + state, ctucan_state_to_str(state)); + break; + } + } + + /* Check for Arbitration Lost interrupt */ + if (FIELD_GET(REG_INT_STAT_ALI, isr)) { + if (dologerr) + netdev_info(ndev, "arbitration lost\n"); + priv->can.can_stats.arbitration_lost++; + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; + } + } + + /* Check for Bus Error interrupt */ + if (FIELD_GET(REG_INT_STAT_BEI, isr)) { + netdev_info(ndev, "bus error\n"); + priv->can.can_stats.bus_error++; + stats->rx_errors++; + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + cf->data[2] = CAN_ERR_PROT_UNSPEC; + cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC; + } + } + + if (skb) { + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +/** + * ctucan_rx_poll() - Poll routine for rx packets (NAPI) + * @napi: NAPI structure pointer + * @quota: Max number of rx packets to be processed. + * + * This is the poll routine for rx part. It will process the packets maximux quota value. + * + * Return: Number of packets received + */ +static int ctucan_rx_poll(struct napi_struct *napi, int quota) +{ + struct net_device *ndev = napi->dev; + struct ctucan_priv *priv = netdev_priv(ndev); + int work_done = 0; + u32 status; + u32 framecnt; + int res = 1; + + framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); + while (framecnt && work_done < quota && res > 0) { + res = ctucan_rx(ndev); + work_done++; + framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); + } + + /* Check for RX FIFO Overflow */ + status = ctucan_read32(priv, CTUCANFD_STATUS); + if (FIELD_GET(REG_STATUS_DOR, status)) { + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + netdev_info(ndev, "rx_poll: rx fifo overflow\n"); + stats->rx_over_errors++; + stats->rx_errors++; + skb = alloc_can_err_skb(ndev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } + + /* Clear Data Overrun */ + ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO); + } + + if (work_done) + can_led_event(ndev, CAN_LED_EVENT_RX); + + if (!framecnt && res != 0) { + if (napi_complete_done(napi, work_done)) { + /* Clear and enable RBNEI. It is level-triggered, so + * there is no race condition. + */ + ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI); + ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI); + } + } + + return work_done; +} + +/** + * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers + * @ndev: net_device pointer + */ +static void ctucan_rotate_txb_prio(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 prio = priv->txb_prio; + + prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF); + ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio); + priv->txb_prio = prio; + ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio); +} + +/** + * ctucan_tx_interrupt() - Tx done Isr + * @ndev: net_device pointer + */ +static void ctucan_tx_interrupt(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + bool first = true; + bool some_buffers_processed; + unsigned long flags; + enum ctucan_txtb_status txtb_status; + u32 txtb_id; + + /* read tx_status + * if txb[n].finished (bit 2) + * if ok -> echo + * if error / aborted -> ?? (find how to handle oneshot mode) + * txb_tail++ + */ + do { + spin_lock_irqsave(&priv->tx_lock, flags); + + some_buffers_processed = false; + while ((int)(priv->txb_head - priv->txb_tail) > 0) { + txtb_id = priv->txb_tail % priv->ntxbufs; + txtb_status = ctucan_get_tx_status(priv, txtb_id); + + ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status); + + switch (txtb_status) { + case TXT_TOK: + ctucan_netdev_dbg(ndev, "TXT_OK\n"); + stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL); + stats->tx_packets++; + break; + case TXT_ERR: + /* This indicated that retransmit limit has been reached. Obviously + * we should not echo the frame, but also not indicate any kind of + * error. If desired, it was already reported (possible multiple + * times) on each arbitration lost. + */ + netdev_warn(ndev, "TXB in Error state\n"); + can_free_echo_skb(ndev, txtb_id, NULL); + stats->tx_dropped++; + break; + case TXT_ABT: + /* Same as for TXT_ERR, only with different cause. We *could* + * re-queue the frame, but multiqueue/abort is not supported yet + * anyway. + */ + netdev_warn(ndev, "TXB in Aborted state\n"); + can_free_echo_skb(ndev, txtb_id, NULL); + stats->tx_dropped++; + break; + default: + /* Bug only if the first buffer is not finished, otherwise it is + * pretty much expected. + */ + if (first) { + netdev_err(ndev, + "BUG: TXB#%u not in a finished state (0x%x)!\n", + txtb_id, txtb_status); + spin_unlock_irqrestore(&priv->tx_lock, flags); + /* do not clear nor wake */ + return; + } + goto clear; + } + priv->txb_tail++; + first = false; + some_buffers_processed = true; + /* Adjust priorities *before* marking the buffer as empty. */ + ctucan_rotate_txb_prio(ndev); + ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id); + } +clear: + spin_unlock_irqrestore(&priv->tx_lock, flags); + + /* If no buffers were processed this time, we cannot clear - that would introduce + * a race condition. + */ + if (some_buffers_processed) { + /* Clear the interrupt again. We do not want to receive again interrupt for + * the buffer already handled. If it is the last finished one then it would + * cause log of spurious interrupt. + */ + ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI); + } + } while (some_buffers_processed); + + can_led_event(ndev, CAN_LED_EVENT_TX); + + spin_lock_irqsave(&priv->tx_lock, flags); + + /* Check if at least one TX buffer is free */ + if (CTU_CAN_FD_TXTNF(priv)) + netif_wake_queue(ndev); + + spin_unlock_irqrestore(&priv->tx_lock, flags); +} + +/** + * ctucan_interrupt() - CAN Isr + * @irq: irq number + * @dev_id: device id poniter + * + * This is the CTU CAN FD ISR. It checks for the type of interrupt + * and invokes the corresponding ISR. + * + * Return: + * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise + */ +static irqreturn_t ctucan_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct ctucan_priv *priv = netdev_priv(ndev); + u32 isr, icr; + u32 imask; + int irq_loops; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + for (irq_loops = 0; irq_loops < 10000; irq_loops++) { + /* Get the interrupt status */ + isr = ctucan_read32(priv, CTUCANFD_INT_STAT); + + if (!isr) + return irq_loops ? IRQ_HANDLED : IRQ_NONE; + + /* Receive Buffer Not Empty Interrupt */ + if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) { + ctucan_netdev_dbg(ndev, "RXBNEI\n"); + /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if + * another IRQ fires, RBNEI will always be 0 (masked). + */ + icr = REG_INT_STAT_RBNEI; + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr); + ctucan_write32(priv, CTUCANFD_INT_STAT, icr); + napi_schedule(&priv->napi); + } + + /* TXT Buffer HW Command Interrupt */ + if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) { + ctucan_netdev_dbg(ndev, "TXBHCI\n"); + /* Cleared inside */ + ctucan_tx_interrupt(ndev); + } + + /* Error interrupts */ + if (FIELD_GET(REG_INT_STAT_EWLI, isr) || + FIELD_GET(REG_INT_STAT_FCSI, isr) || + FIELD_GET(REG_INT_STAT_ALI, isr)) { + icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI); + + ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr); + ctucan_write32(priv, CTUCANFD_INT_STAT, icr); + ctucan_err_interrupt(ndev, isr); + } + /* Ignore RI, TI, LFI, RFI, BSI */ + } + + netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr); + + if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) { + int i; + + netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n", + priv->txb_head, priv->txb_tail); + for (i = 0; i < priv->ntxbufs; i++) { + u32 status = ctucan_get_tx_status(priv, i); + + netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status); + } + } + + imask = 0xffffffff; + ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask); + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask); + + return IRQ_HANDLED; +} + +/** + * ctucan_chip_stop() - Driver stop routine + * @ndev: Pointer to net_device structure + * + * This is the drivers stop routine. It will disable the + * interrupts and disable the controller. + */ +static void ctucan_chip_stop(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + u32 mask = 0xffffffff; + u32 mode; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + /* Disable interrupts and disable CAN */ + ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask); + ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask); + mode = ctucan_read32(priv, CTUCANFD_MODE); + mode &= ~REG_MODE_ENA; + ctucan_write32(priv, CTUCANFD_MODE, mode); + + priv->can.state = CAN_STATE_STOPPED; +} + +/** + * ctucan_open() - Driver open routine + * @ndev: Pointer to net_device structure + * + * This is the driver open routine. + * Return: 0 on success and failure value on error + */ +static int ctucan_open(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int ret; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + pm_runtime_put_noidle(priv->dev); + return ret; + } + + ret = ctucan_reset(ndev); + if (ret < 0) + goto err_reset; + + /* Common open */ + ret = open_candev(ndev); + if (ret) { + netdev_warn(ndev, "open_candev failed!\n"); + goto err_open; + } + + ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev); + if (ret < 0) { + netdev_err(ndev, "irq allocation for CAN failed\n"); + goto err_irq; + } + + ret = ctucan_chip_start(ndev); + if (ret < 0) { + netdev_err(ndev, "ctucan_chip_start failed!\n"); + goto err_chip_start; + } + + netdev_info(ndev, "ctu_can_fd device registered\n"); + can_led_event(ndev, CAN_LED_EVENT_OPEN); + napi_enable(&priv->napi); + netif_start_queue(ndev); + + return 0; + +err_chip_start: + free_irq(ndev->irq, ndev); +err_irq: + close_candev(ndev); +err_open: +err_reset: + pm_runtime_put(priv->dev); + + return ret; +} + +/** + * ctucan_close() - Driver close routine + * @ndev: Pointer to net_device structure + * + * Return: 0 always + */ +static int ctucan_close(struct net_device *ndev) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + netif_stop_queue(ndev); + napi_disable(&priv->napi); + ctucan_chip_stop(ndev); + free_irq(ndev->irq, ndev); + close_candev(ndev); + + can_led_event(ndev, CAN_LED_EVENT_STOP); + pm_runtime_put(priv->dev); + + return 0; +} + +/** + * ctucan_get_berr_counter() - error counter routine + * @ndev: Pointer to net_device structure + * @bec: Pointer to can_berr_counter structure + * + * This is the driver error counter routine. + * Return: 0 on success and failure value on error + */ +static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) +{ + struct ctucan_priv *priv = netdev_priv(ndev); + int ret; + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); + pm_runtime_put_noidle(priv->dev); + return ret; + } + + ctucan_get_rec_tec(priv, bec); + pm_runtime_put(priv->dev); + + return 0; +} + +static const struct net_device_ops ctucan_netdev_ops = { + .ndo_open = ctucan_open, + .ndo_stop = ctucan_close, + .ndo_start_xmit = ctucan_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +int ctucan_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ctucan_priv *priv = netdev_priv(ndev); + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + + priv->can.state = CAN_STATE_SLEEPING; + + return 0; +} +EXPORT_SYMBOL(ctucan_suspend); + +int ctucan_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ctucan_priv *priv = netdev_priv(ndev); + + ctucan_netdev_dbg(ndev, "%s\n", __func__); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} +EXPORT_SYMBOL(ctucan_resume); + +int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs, + unsigned long can_clk_rate, int pm_enable_call, + void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev)) +{ + struct ctucan_priv *priv; + struct net_device *ndev; + int ret; + + /* Create a CAN device instance */ + ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + spin_lock_init(&priv->tx_lock); + INIT_LIST_HEAD(&priv->peers_on_pdev); + priv->ntxbufs = ntxbufs; + priv->dev = dev; + priv->can.bittiming_const = &ctu_can_fd_bit_timing_max; + priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max; + priv->can.do_set_mode = ctucan_do_set_mode; + + /* Needed for timing adjustment to be performed as soon as possible */ + priv->can.do_set_bittiming = ctucan_set_bittiming; + priv->can.do_set_data_bittiming = ctucan_set_data_bittiming; + + priv->can.do_get_berr_counter = ctucan_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK + | CAN_CTRLMODE_LISTENONLY + | CAN_CTRLMODE_FD + | CAN_CTRLMODE_PRESUME_ACK + | CAN_CTRLMODE_BERR_REPORTING + | CAN_CTRLMODE_FD_NON_ISO + | CAN_CTRLMODE_ONE_SHOT; + priv->mem_base = addr; + + /* Get IRQ for the device */ + ndev->irq = irq; + ndev->flags |= IFF_ECHO; /* We support local echo */ + + if (set_drvdata_fnc) + set_drvdata_fnc(dev, ndev); + SET_NETDEV_DEV(ndev, dev); + ndev->netdev_ops = &ctucan_netdev_ops; + + /* Getting the can_clk info */ + if (!can_clk_rate) { + priv->can_clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->can_clk)) { + dev_err(dev, "Device clock not found.\n"); + ret = PTR_ERR(priv->can_clk); + goto err_free; + } + can_clk_rate = clk_get_rate(priv->can_clk); + } + + priv->write_reg = ctucan_write32_le; + priv->read_reg = ctucan_read32_le; + + if (pm_enable_call) + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", + __func__, ret); + pm_runtime_put_noidle(priv->dev); + goto err_pmdisable; + } + + /* Check for big-endianity and set according IO-accessors */ + if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) { + priv->write_reg = ctucan_write32_be; + priv->read_reg = ctucan_read32_be; + if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) { + netdev_err(ndev, "CTU_CAN_FD signature not found\n"); + ret = -ENODEV; + goto err_deviceoff; + } + } + + ret = ctucan_reset(ndev); + if (ret < 0) + goto err_deviceoff; + + priv->can.clock.freq = can_clk_rate; + + netif_napi_add(ndev, &priv->napi, ctucan_rx_poll, NAPI_POLL_WEIGHT); + + ret = register_candev(ndev); + if (ret) { + dev_err(dev, "fail to register failed (err=%d)\n", ret); + goto err_deviceoff; + } + + devm_can_led_init(ndev); + + pm_runtime_put(dev); + + netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n", + priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs); + + return 0; + +err_deviceoff: + pm_runtime_put(priv->dev); +err_pmdisable: + if (pm_enable_call) + pm_runtime_disable(dev); +err_free: + list_del_init(&priv->peers_on_pdev); + free_candev(ndev); + return ret; +} +EXPORT_SYMBOL(ctucan_probe_common); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Jerabek "); +MODULE_AUTHOR("Pavel Pisa "); +MODULE_AUTHOR("Ondrej Ille "); +MODULE_DESCRIPTION("CTU CAN FD interface"); diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h new file mode 100644 index 000000000000..3491299eaac2 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille self-funded + * Copyright (C) 2018-2019 Martin Jerabek FEE CTU + * Copyright (C) 2018-2021 Pavel Pisa FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak + * Pavel Pisa + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +/* This file is autogenerated, DO NOT EDIT! */ + +#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ +#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__ + +#include + +/* CAN_Frame_format memory map */ +enum ctu_can_fd_can_frame_format { + CTUCANFD_FRAME_FORMAT_W = 0x0, + CTUCANFD_IDENTIFIER_W = 0x4, + CTUCANFD_TIMESTAMP_L_W = 0x8, + CTUCANFD_TIMESTAMP_U_W = 0xc, + CTUCANFD_DATA_1_4_W = 0x10, + CTUCANFD_DATA_5_8_W = 0x14, + CTUCANFD_DATA_61_64_W = 0x4c, +}; + +/* CAN_FD_Frame_format memory region */ + +/* FRAME_FORMAT_W registers */ +#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0) +#define REG_FRAME_FORMAT_W_RTR BIT(5) +#define REG_FRAME_FORMAT_W_IDE BIT(6) +#define REG_FRAME_FORMAT_W_FDF BIT(7) +#define REG_FRAME_FORMAT_W_BRS BIT(9) +#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10) +#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11) + +/* IDENTIFIER_W registers */ +#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0) +#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18) + +/* TIMESTAMP_L_W registers */ +#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0) + +/* TIMESTAMP_U_W registers */ +#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0) + +/* DATA_1_4_W registers */ +#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0) +#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8) +#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16) +#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24) + +/* DATA_5_8_W registers */ +#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0) +#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8) +#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16) +#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24) + +/* DATA_61_64_W registers */ +#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0) +#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8) +#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16) +#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24) + +#endif diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h new file mode 100644 index 000000000000..edc1c1a24348 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h @@ -0,0 +1,325 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille self-funded + * Copyright (C) 2018-2019 Martin Jerabek FEE CTU + * Copyright (C) 2018-2021 Pavel Pisa FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak + * Pavel Pisa + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +/* This file is autogenerated, DO NOT EDIT! */ + +#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ +#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__ + +#include + +/* CAN_Registers memory map */ +enum ctu_can_fd_can_registers { + CTUCANFD_DEVICE_ID = 0x0, + CTUCANFD_VERSION = 0x2, + CTUCANFD_MODE = 0x4, + CTUCANFD_SETTINGS = 0x6, + CTUCANFD_STATUS = 0x8, + CTUCANFD_COMMAND = 0xc, + CTUCANFD_INT_STAT = 0x10, + CTUCANFD_INT_ENA_SET = 0x14, + CTUCANFD_INT_ENA_CLR = 0x18, + CTUCANFD_INT_MASK_SET = 0x1c, + CTUCANFD_INT_MASK_CLR = 0x20, + CTUCANFD_BTR = 0x24, + CTUCANFD_BTR_FD = 0x28, + CTUCANFD_EWL = 0x2c, + CTUCANFD_ERP = 0x2d, + CTUCANFD_FAULT_STATE = 0x2e, + CTUCANFD_REC = 0x30, + CTUCANFD_TEC = 0x32, + CTUCANFD_ERR_NORM = 0x34, + CTUCANFD_ERR_FD = 0x36, + CTUCANFD_CTR_PRES = 0x38, + CTUCANFD_FILTER_A_MASK = 0x3c, + CTUCANFD_FILTER_A_VAL = 0x40, + CTUCANFD_FILTER_B_MASK = 0x44, + CTUCANFD_FILTER_B_VAL = 0x48, + CTUCANFD_FILTER_C_MASK = 0x4c, + CTUCANFD_FILTER_C_VAL = 0x50, + CTUCANFD_FILTER_RAN_LOW = 0x54, + CTUCANFD_FILTER_RAN_HIGH = 0x58, + CTUCANFD_FILTER_CONTROL = 0x5c, + CTUCANFD_FILTER_STATUS = 0x5e, + CTUCANFD_RX_MEM_INFO = 0x60, + CTUCANFD_RX_POINTERS = 0x64, + CTUCANFD_RX_STATUS = 0x68, + CTUCANFD_RX_SETTINGS = 0x6a, + CTUCANFD_RX_DATA = 0x6c, + CTUCANFD_TX_STATUS = 0x70, + CTUCANFD_TX_COMMAND = 0x74, + CTUCANFD_TX_PRIORITY = 0x78, + CTUCANFD_ERR_CAPT = 0x7c, + CTUCANFD_ALC = 0x7e, + CTUCANFD_TRV_DELAY = 0x80, + CTUCANFD_SSP_CFG = 0x82, + CTUCANFD_RX_FR_CTR = 0x84, + CTUCANFD_TX_FR_CTR = 0x88, + CTUCANFD_DEBUG_REGISTER = 0x8c, + CTUCANFD_YOLO_REG = 0x90, + CTUCANFD_TIMESTAMP_LOW = 0x94, + CTUCANFD_TIMESTAMP_HIGH = 0x98, + CTUCANFD_TXTB1_DATA_1 = 0x100, + CTUCANFD_TXTB1_DATA_2 = 0x104, + CTUCANFD_TXTB1_DATA_20 = 0x14c, + CTUCANFD_TXTB2_DATA_1 = 0x200, + CTUCANFD_TXTB2_DATA_2 = 0x204, + CTUCANFD_TXTB2_DATA_20 = 0x24c, + CTUCANFD_TXTB3_DATA_1 = 0x300, + CTUCANFD_TXTB3_DATA_2 = 0x304, + CTUCANFD_TXTB3_DATA_20 = 0x34c, + CTUCANFD_TXTB4_DATA_1 = 0x400, + CTUCANFD_TXTB4_DATA_2 = 0x404, + CTUCANFD_TXTB4_DATA_20 = 0x44c, +}; + +/* Control_registers memory region */ + +/* DEVICE_ID VERSION registers */ +#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0) +#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16) +#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24) + +/* MODE SETTINGS registers */ +#define REG_MODE_RST BIT(0) +#define REG_MODE_BMM BIT(1) +#define REG_MODE_STM BIT(2) +#define REG_MODE_AFM BIT(3) +#define REG_MODE_FDE BIT(4) +#define REG_MODE_ACF BIT(7) +#define REG_MODE_TSTM BIT(8) +#define REG_MODE_RTRLE BIT(16) +#define REG_MODE_RTRTH GENMASK(20, 17) +#define REG_MODE_ILBP BIT(21) +#define REG_MODE_ENA BIT(22) +#define REG_MODE_NISOFD BIT(23) +#define REG_MODE_PEX BIT(24) +#define REG_MODE_TBFBO BIT(25) +#define REG_MODE_FDRF BIT(26) + +/* STATUS registers */ +#define REG_STATUS_RXNE BIT(0) +#define REG_STATUS_DOR BIT(1) +#define REG_STATUS_TXNF BIT(2) +#define REG_STATUS_EFT BIT(3) +#define REG_STATUS_RXS BIT(4) +#define REG_STATUS_TXS BIT(5) +#define REG_STATUS_EWL BIT(6) +#define REG_STATUS_IDLE BIT(7) +#define REG_STATUS_PEXS BIT(8) + +/* COMMAND registers */ +#define REG_COMMAND_RRB BIT(2) +#define REG_COMMAND_CDO BIT(3) +#define REG_COMMAND_ERCRST BIT(4) +#define REG_COMMAND_RXFCRST BIT(5) +#define REG_COMMAND_TXFCRST BIT(6) +#define REG_COMMAND_CPEXS BIT(7) + +/* INT_STAT registers */ +#define REG_INT_STAT_RXI BIT(0) +#define REG_INT_STAT_TXI BIT(1) +#define REG_INT_STAT_EWLI BIT(2) +#define REG_INT_STAT_DOI BIT(3) +#define REG_INT_STAT_FCSI BIT(4) +#define REG_INT_STAT_ALI BIT(5) +#define REG_INT_STAT_BEI BIT(6) +#define REG_INT_STAT_OFI BIT(7) +#define REG_INT_STAT_RXFI BIT(8) +#define REG_INT_STAT_BSI BIT(9) +#define REG_INT_STAT_RBNEI BIT(10) +#define REG_INT_STAT_TXBHCI BIT(11) + +/* INT_ENA_SET registers */ +#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0) + +/* INT_ENA_CLR registers */ +#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0) + +/* INT_MASK_SET registers */ +#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0) + +/* INT_MASK_CLR registers */ +#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0) + +/* BTR registers */ +#define REG_BTR_PROP GENMASK(6, 0) +#define REG_BTR_PH1 GENMASK(12, 7) +#define REG_BTR_PH2 GENMASK(18, 13) +#define REG_BTR_BRP GENMASK(26, 19) +#define REG_BTR_SJW GENMASK(31, 27) + +/* BTR_FD registers */ +#define REG_BTR_FD_PROP_FD GENMASK(5, 0) +#define REG_BTR_FD_PH1_FD GENMASK(11, 7) +#define REG_BTR_FD_PH2_FD GENMASK(17, 13) +#define REG_BTR_FD_BRP_FD GENMASK(26, 19) +#define REG_BTR_FD_SJW_FD GENMASK(31, 27) + +/* EWL ERP FAULT_STATE registers */ +#define REG_EWL_EW_LIMIT GENMASK(7, 0) +#define REG_EWL_ERP_LIMIT GENMASK(15, 8) +#define REG_EWL_ERA BIT(16) +#define REG_EWL_ERP BIT(17) +#define REG_EWL_BOF BIT(18) + +/* REC TEC registers */ +#define REG_REC_REC_VAL GENMASK(8, 0) +#define REG_REC_TEC_VAL GENMASK(24, 16) + +/* ERR_NORM ERR_FD registers */ +#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0) +#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16) + +/* CTR_PRES registers */ +#define REG_CTR_PRES_CTPV GENMASK(8, 0) +#define REG_CTR_PRES_PTX BIT(9) +#define REG_CTR_PRES_PRX BIT(10) +#define REG_CTR_PRES_ENORM BIT(11) +#define REG_CTR_PRES_EFD BIT(12) + +/* FILTER_A_MASK registers */ +#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0) + +/* FILTER_A_VAL registers */ +#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0) + +/* FILTER_B_MASK registers */ +#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0) + +/* FILTER_B_VAL registers */ +#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0) + +/* FILTER_C_MASK registers */ +#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0) + +/* FILTER_C_VAL registers */ +#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0) + +/* FILTER_RAN_LOW registers */ +#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0) + +/* FILTER_RAN_HIGH registers */ +#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0) + +/* FILTER_CONTROL FILTER_STATUS registers */ +#define REG_FILTER_CONTROL_FANB BIT(0) +#define REG_FILTER_CONTROL_FANE BIT(1) +#define REG_FILTER_CONTROL_FAFB BIT(2) +#define REG_FILTER_CONTROL_FAFE BIT(3) +#define REG_FILTER_CONTROL_FBNB BIT(4) +#define REG_FILTER_CONTROL_FBNE BIT(5) +#define REG_FILTER_CONTROL_FBFB BIT(6) +#define REG_FILTER_CONTROL_FBFE BIT(7) +#define REG_FILTER_CONTROL_FCNB BIT(8) +#define REG_FILTER_CONTROL_FCNE BIT(9) +#define REG_FILTER_CONTROL_FCFB BIT(10) +#define REG_FILTER_CONTROL_FCFE BIT(11) +#define REG_FILTER_CONTROL_FRNB BIT(12) +#define REG_FILTER_CONTROL_FRNE BIT(13) +#define REG_FILTER_CONTROL_FRFB BIT(14) +#define REG_FILTER_CONTROL_FRFE BIT(15) +#define REG_FILTER_CONTROL_SFA BIT(16) +#define REG_FILTER_CONTROL_SFB BIT(17) +#define REG_FILTER_CONTROL_SFC BIT(18) +#define REG_FILTER_CONTROL_SFR BIT(19) + +/* RX_MEM_INFO registers */ +#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0) +#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16) + +/* RX_POINTERS registers */ +#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0) +#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16) + +/* RX_STATUS RX_SETTINGS registers */ +#define REG_RX_STATUS_RXE BIT(0) +#define REG_RX_STATUS_RXF BIT(1) +#define REG_RX_STATUS_RXMOF BIT(2) +#define REG_RX_STATUS_RXFRC GENMASK(14, 4) +#define REG_RX_STATUS_RTSOP BIT(16) + +/* RX_DATA registers */ +#define REG_RX_DATA_RX_DATA GENMASK(31, 0) + +/* TX_STATUS registers */ +#define REG_TX_STATUS_TX1S GENMASK(3, 0) +#define REG_TX_STATUS_TX2S GENMASK(7, 4) +#define REG_TX_STATUS_TX3S GENMASK(11, 8) +#define REG_TX_STATUS_TX4S GENMASK(15, 12) + +/* TX_COMMAND registers */ +#define REG_TX_COMMAND_TXCE BIT(0) +#define REG_TX_COMMAND_TXCR BIT(1) +#define REG_TX_COMMAND_TXCA BIT(2) +#define REG_TX_COMMAND_TXB1 BIT(8) +#define REG_TX_COMMAND_TXB2 BIT(9) +#define REG_TX_COMMAND_TXB3 BIT(10) +#define REG_TX_COMMAND_TXB4 BIT(11) + +/* TX_PRIORITY registers */ +#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0) +#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4) +#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8) +#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12) + +/* ERR_CAPT ALC registers */ +#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0) +#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5) +#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16) +#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21) + +/* TRV_DELAY SSP_CFG registers */ +#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0) +#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16) +#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24) + +/* RX_FR_CTR registers */ +#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0) + +/* TX_FR_CTR registers */ +#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0) + +/* DEBUG_REGISTER registers */ +#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0) +#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3) +#define REG_DEBUG_REGISTER_PC_ARB BIT(6) +#define REG_DEBUG_REGISTER_PC_CON BIT(7) +#define REG_DEBUG_REGISTER_PC_DAT BIT(8) +#define REG_DEBUG_REGISTER_PC_STC BIT(9) +#define REG_DEBUG_REGISTER_PC_CRC BIT(10) +#define REG_DEBUG_REGISTER_PC_CRCD BIT(11) +#define REG_DEBUG_REGISTER_PC_ACK BIT(12) +#define REG_DEBUG_REGISTER_PC_ACKD BIT(13) +#define REG_DEBUG_REGISTER_PC_EOF BIT(14) +#define REG_DEBUG_REGISTER_PC_INT BIT(15) +#define REG_DEBUG_REGISTER_PC_SUSP BIT(16) +#define REG_DEBUG_REGISTER_PC_OVR BIT(17) +#define REG_DEBUG_REGISTER_PC_SOF BIT(18) + +/* YOLO_REG registers */ +#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0) + +/* TIMESTAMP_LOW registers */ +#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0) + +/* TIMESTAMP_HIGH registers */ +#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0) + +#endif -- cgit v1.2.3 From 792a5b678e81080313beb3bee19db189aaa2cf8c Mon Sep 17 00:00:00 2001 From: Pavel Pisa Date: Tue, 22 Mar 2022 00:32:31 +0100 Subject: can: ctucanfd: CTU CAN FD open-source IP core - PCI bus support. PCI bus adaptation for CTU CAN FD open-source IP core. The project providing FPGA design for Intel EP4CGX15 based DB4CGX15 PCIe board with PiKRON.com designed transceiver riser shield is available at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd . Link: https://lore.kernel.org/all/a81333e206a9bcf9434797f6f54d8664775542e2.1647904780.git.pisa@cmp.felk.cvut.cz Signed-off-by: Pavel Pisa Signed-off-by: Martin Jerabek Signed-off-by: Ondrej Ille Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/Kconfig | 10 ++ drivers/net/can/ctucanfd/Makefile | 2 + drivers/net/can/ctucanfd/ctucanfd_pci.c | 304 ++++++++++++++++++++++++++++++++ 3 files changed, 316 insertions(+) create mode 100644 drivers/net/can/ctucanfd/ctucanfd_pci.c (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig index b5f364068f86..d6e59522f4cf 100644 --- a/drivers/net/can/ctucanfd/Kconfig +++ b/drivers/net/can/ctucanfd/Kconfig @@ -10,3 +10,13 @@ config CAN_CTUCANFD from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd). Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ . + +config CAN_CTUCANFD_PCI + tristate "CTU CAN-FD IP core PCI/PCIe driver" + depends on CAN_CTUCANFD + depends on PCI + help + This driver adds PCI/PCIe support for CTU CAN-FD IP core. + The project providing FPGA design for Intel EP4CGX15 based DB4CGX15 + PCIe board with PiKRON.com designed transceiver riser shield is available + at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd . diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile index 259ecb0222c2..48555c3511e7 100644 --- a/drivers/net/can/ctucanfd/Makefile +++ b/drivers/net/can/ctucanfd/Makefile @@ -5,3 +5,5 @@ obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o ctucanfd-y := ctucanfd_base.o + +obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c new file mode 100644 index 000000000000..c37a42480533 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille self-funded + * Copyright (C) 2018-2019 Martin Jerabek FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak + * Pavel Pisa + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include +#include + +#include "ctucanfd.h" + +#ifndef PCI_DEVICE_DATA +#define PCI_DEVICE_DATA(vend, dev, data) \ +.vendor = PCI_VENDOR_ID_##vend, \ +.device = PCI_DEVICE_ID_##vend##_##dev, \ +.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ +.driver_data = (kernel_ulong_t)(data) +#endif + +#ifndef PCI_VENDOR_ID_TEDIA +#define PCI_VENDOR_ID_TEDIA 0x1760 +#endif + +#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 +#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00 +#endif + +#define CTUCAN_BAR0_CTUCAN_ID 0x0000 +#define CTUCAN_BAR0_CRA_BASE 0x4000 +#define CYCLONE_IV_CRA_A2P_IE (0x0050) + +#define CTUCAN_WITHOUT_CTUCAN_ID 0 +#define CTUCAN_WITH_CTUCAN_ID 1 + +static bool use_msi = true; +module_param(use_msi, bool, 0444); +MODULE_PARM_DESC(use_msi, "PCIe implementation use MSI interrupts. Default: 1 (yes)"); + +static bool pci_use_second = true; +module_param(pci_use_second, bool, 0444); +MODULE_PARM_DESC(pci_use_second, "Use the second CAN core on PCIe card. Default: 1 (yes)"); + +struct ctucan_pci_board_data { + void __iomem *bar0_base; + void __iomem *cra_base; + void __iomem *bar1_base; + struct list_head ndev_list_head; + int use_msi; +}; + +static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev) +{ + return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev); +} + +static void ctucan_pci_set_drvdata(struct device *dev, + struct net_device *ndev) +{ + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + struct ctucan_priv *priv = netdev_priv(ndev); + struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev); + + list_add(&priv->peers_on_pdev, &bdata->ndev_list_head); + priv->irq_flags = IRQF_SHARED; +} + +/** + * ctucan_pci_probe - PCI registration call + * @pdev: Handle to the pci device structure + * @ent: Pointer to the entry from ctucan_pci_tbl + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + unsigned long driver_data = ent->driver_data; + struct ctucan_pci_board_data *bdata; + void __iomem *addr; + void __iomem *cra_addr; + void __iomem *bar0_base; + u32 cra_a2p_ie; + u32 ctucan_id = 0; + int ret; + unsigned int ntxbufs; + unsigned int num_cores = 1; + unsigned int core_i = 0; + int irq; + int msi_ok = 0; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(dev, "pci_enable_device FAILED\n"); + goto err; + } + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + dev_err(dev, "pci_request_regions FAILED\n"); + goto err_disable_device; + } + + if (use_msi) { + ret = pci_enable_msi(pdev); + if (!ret) { + dev_info(dev, "MSI enabled\n"); + pci_set_master(pdev); + msi_ok = 1; + } + } + + dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n", + (long long)pci_resource_start(pdev, 0), + (long long)pci_resource_len(pdev, 0)); + + dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n", + (long long)pci_resource_start(pdev, 1), + (long long)pci_resource_len(pdev, 1)); + + addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); + if (!addr) { + dev_err(dev, "PCI BAR 1 cannot be mapped\n"); + ret = -ENOMEM; + goto err_release_regions; + } + + /* Cyclone IV PCI Express Control Registers Area */ + bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + if (!bar0_base) { + dev_err(dev, "PCI BAR 0 cannot be mapped\n"); + ret = -EIO; + goto err_pci_iounmap_bar1; + } + + if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) { + cra_addr = bar0_base; + num_cores = 2; + } else { + cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE; + ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID); + dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id); + num_cores = ctucan_id & 0xf; + } + + irq = pdev->irq; + + ntxbufs = 4; + + bdata = kzalloc(sizeof(*bdata), GFP_KERNEL); + if (!bdata) { + ret = -ENOMEM; + goto err_pci_iounmap_bar0; + } + + INIT_LIST_HEAD(&bdata->ndev_list_head); + bdata->bar0_base = bar0_base; + bdata->cra_base = cra_addr; + bdata->bar1_base = addr; + bdata->use_msi = msi_ok; + + pci_set_drvdata(pdev, bdata); + + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, + 0, ctucan_pci_set_drvdata); + if (ret < 0) + goto err_free_board; + + core_i++; + + while (pci_use_second && (core_i < num_cores)) { + addr += 0x4000; + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, + 0, ctucan_pci_set_drvdata); + if (ret < 0) { + dev_info(dev, "CTU CAN FD core %d initialization failed\n", + core_i); + break; + } + core_i++; + } + + /* enable interrupt in + * Avalon-MM to PCI Express Interrupt Enable Register + */ + cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE); + dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie); + cra_a2p_ie |= 1; + iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE); + cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE); + dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie); + + return 0; + +err_free_board: + pci_set_drvdata(pdev, NULL); + kfree(bdata); +err_pci_iounmap_bar0: + pci_iounmap(pdev, cra_addr); +err_pci_iounmap_bar1: + pci_iounmap(pdev, addr); +err_release_regions: + if (msi_ok) { + pci_disable_msi(pdev); + pci_clear_master(pdev); + } + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err: + return ret; +} + +/** + * ctucan_pci_remove - Unregister the device after releasing the resources + * @pdev: Handle to the pci device structure + * + * This function frees all the resources allocated to the device. + * Return: 0 always + */ +static void ctucan_pci_remove(struct pci_dev *pdev) +{ + struct net_device *ndev; + struct ctucan_priv *priv = NULL; + struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev); + + dev_dbg(&pdev->dev, "ctucan_remove"); + + if (!bdata) { + dev_err(&pdev->dev, "%s: no list of devices\n", __func__); + return; + } + + /* disable interrupt in + * Avalon-MM to PCI Express Interrupt Enable Register + */ + if (bdata->cra_base) + iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE); + + while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv, + peers_on_pdev)) != NULL) { + ndev = priv->can.dev; + + unregister_candev(ndev); + + netif_napi_del(&priv->napi); + + list_del_init(&priv->peers_on_pdev); + free_candev(ndev); + } + + pci_iounmap(pdev, bdata->bar1_base); + + if (bdata->use_msi) { + pci_disable_msi(pdev); + pci_clear_master(pdev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + + pci_iounmap(pdev, bdata->bar0_base); + + pci_set_drvdata(pdev, NULL); + kfree(bdata); +} + +static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume); + +static const struct pci_device_id ctucan_pci_tbl[] = { + {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21, + CTUCAN_WITH_CTUCAN_ID)}, + {}, +}; + +static struct pci_driver ctucan_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = ctucan_pci_tbl, + .probe = ctucan_pci_probe, + .remove = ctucan_pci_remove, + .driver.pm = &ctucan_pci_pm_ops, +}; + +module_pci_driver(ctucan_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pavel Pisa "); +MODULE_DESCRIPTION("CTU CAN FD for PCI bus"); -- cgit v1.2.3 From e8f0c23a2415fa8f95cca61e1b4a4c90c4f5a983 Mon Sep 17 00:00:00 2001 From: Pavel Pisa Date: Tue, 22 Mar 2022 00:32:32 +0100 Subject: can: ctucanfd: CTU CAN FD open-source IP core - platform/SoC support. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Platform bus adaptation for CTU CAN FD open-source IP core. The core has been tested together with OpenCores SJA1000 modified to be CAN FD frames tolerant on MicroZed Zynq based MZ_APO education kits designed by Petr Porazil from PiKRON.com company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top. The kit description at the Computer Architectures course pages https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start . Kit carrier board and mechanics design source files https://gitlab.com/pikron/projects/mz_apo/microzed_apo The work is documented in Martin Jeřábek's diploma theses Open-source and Open-hardware CAN FD Protocol Support https://dspace.cvut.cz/handle/10467/80366 . Link: https://lore.kernel.org/all/4d5c53499bafe7717815f948801bd5aedaa05c12.1647904780.git.pisa@cmp.felk.cvut.cz Signed-off-by: Pavel Pisa Signed-off-by: Martin Jerabek Signed-off-by: Ondrej Ille Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/Kconfig | 12 +++ drivers/net/can/ctucanfd/Makefile | 1 + drivers/net/can/ctucanfd/ctucanfd_platform.c | 132 +++++++++++++++++++++++++++ 3 files changed, 145 insertions(+) create mode 100644 drivers/net/can/ctucanfd/ctucanfd_platform.c (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig index d6e59522f4cf..48963efc7f19 100644 --- a/drivers/net/can/ctucanfd/Kconfig +++ b/drivers/net/can/ctucanfd/Kconfig @@ -20,3 +20,15 @@ config CAN_CTUCANFD_PCI The project providing FPGA design for Intel EP4CGX15 based DB4CGX15 PCIe board with PiKRON.com designed transceiver riser shield is available at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd . + +config CAN_CTUCANFD_PLATFORM + tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver" + depends on CAN_CTUCANFD + depends on OF || COMPILE_TEST + help + The core has been tested together with OpenCores SJA1000 + modified to be CAN FD frames tolerant on MicroZed Zynq based + MZ_APO education kits designed by Petr Porazil from PiKRON.com + company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top. + The kit description at the Computer Architectures course pages + https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start . diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile index 48555c3511e7..8078f1f2c30f 100644 --- a/drivers/net/can/ctucanfd/Makefile +++ b/drivers/net/can/ctucanfd/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o ctucanfd-y := ctucanfd_base.o obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o +obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c new file mode 100644 index 000000000000..5e4806068662 --- /dev/null +++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/******************************************************************************* + * + * CTU CAN FD IP Core + * + * Copyright (C) 2015-2018 Ondrej Ille FEE CTU + * Copyright (C) 2018-2021 Ondrej Ille self-funded + * Copyright (C) 2018-2019 Martin Jerabek FEE CTU + * Copyright (C) 2018-2022 Pavel Pisa FEE CTU/self-funded + * + * Project advisors: + * Jiri Novak + * Pavel Pisa + * + * Department of Measurement (http://meas.fel.cvut.cz/) + * Faculty of Electrical Engineering (http://www.fel.cvut.cz) + * Czech Technical University (http://www.cvut.cz/) + ******************************************************************************/ + +#include +#include +#include +#include +#include + +#include "ctucanfd.h" + +#define DRV_NAME "ctucanfd" + +static void ctucan_platform_set_drvdata(struct device *dev, + struct net_device *ndev) +{ + struct platform_device *pdev = container_of(dev, struct platform_device, + dev); + + platform_set_drvdata(pdev, ndev); +} + +/** + * ctucan_platform_probe - Platform registration call + * @pdev: Handle to the platform device structure + * + * This function does all the memory allocation and registration for the CAN + * device. + * + * Return: 0 on success and failure value on error + */ +static int ctucan_platform_probe(struct platform_device *pdev) +{ + struct resource *res; /* IO mem resources */ + struct device *dev = &pdev->dev; + void __iomem *addr; + int ret; + unsigned int ntxbufs; + int irq; + + /* Get the virtual base address for the device */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = devm_ioremap_resource(dev, res); + if (IS_ERR(addr)) { + dev_err(dev, "Cannot remap address.\n"); + ret = PTR_ERR(addr); + goto err; + } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "Cannot find interrupt.\n"); + ret = irq; + goto err; + } + + /* Number of tx bufs might be change in HW for future. If so, + * it will be passed as property via device tree + */ + ntxbufs = 4; + ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0, + 1, ctucan_platform_set_drvdata); + + if (ret < 0) + platform_set_drvdata(pdev, NULL); + +err: + return ret; +} + +/** + * ctucan_platform_remove - Unregister the device after releasing the resources + * @pdev: Handle to the platform device structure + * + * This function frees all the resources allocated to the device. + * Return: 0 always + */ +static int ctucan_platform_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ctucan_priv *priv = netdev_priv(ndev); + + netdev_dbg(ndev, "ctucan_remove"); + + unregister_candev(ndev); + pm_runtime_disable(&pdev->dev); + netif_napi_del(&priv->napi); + free_candev(ndev); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume); + +/* Match table for OF platform binding */ +static const struct of_device_id ctucan_of_match[] = { + { .compatible = "ctu,ctucanfd-2", }, + { .compatible = "ctu,ctucanfd", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, ctucan_of_match); + +static struct platform_driver ctucanfd_driver = { + .probe = ctucan_platform_probe, + .remove = ctucan_platform_remove, + .driver = { + .name = DRV_NAME, + .pm = &ctucan_platform_pm_ops, + .of_match_table = ctucan_of_match, + }, +}; + +module_platform_driver(ctucanfd_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Jerabek"); +MODULE_DESCRIPTION("CTU CAN FD for platform"); -- cgit v1.2.3 From 055eb95533273bc334794dbc598400d10800528f Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 14 Apr 2022 09:12:33 -0700 Subject: bpf: Move rcu lock management out of BPF_PROG_RUN routines Commit 7d08c2c91171 ("bpf: Refactor BPF_PROG_RUN_ARRAY family of macros into functions") switched a bunch of BPF_PROG_RUN macros to inline routines. This changed the semantic a bit. Due to arguments expansion of macros, it used to be: rcu_read_lock(); array = rcu_dereference(cgrp->bpf.effective[atype]); ... Now, with with inline routines, we have: array_rcu = rcu_dereference(cgrp->bpf.effective[atype]); /* array_rcu can be kfree'd here */ rcu_read_lock(); array = rcu_dereference(array_rcu); I'm assuming in practice rcu subsystem isn't fast enough to trigger this but let's use rcu API properly. Also, rename to lower caps to not confuse with macros. Additionally, drop and expand BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY. See [1] for more context. [1] https://lore.kernel.org/bpf/CAKH8qBs60fOinFdxiiQikK_q0EcVxGvNTQoWvHLEUGbgcj1UYg@mail.gmail.com/T/#u v2 - keep rcu locks inside by passing cgroup_bpf Fixes: 7d08c2c91171 ("bpf: Refactor BPF_PROG_RUN_ARRAY family of macros into functions") Signed-off-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220414161233.170780-1-sdf@google.com --- drivers/media/rc/bpf-lirc.c | 8 ++- include/linux/bpf.h | 115 +++------------------------------------- kernel/bpf/cgroup.c | 124 ++++++++++++++++++++++++++++++++++++++------ kernel/trace/bpf_trace.c | 5 +- 4 files changed, 124 insertions(+), 128 deletions(-) (limited to 'drivers') diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c index 3eff08d7b8e5..fe17c7f98e81 100644 --- a/drivers/media/rc/bpf-lirc.c +++ b/drivers/media/rc/bpf-lirc.c @@ -216,8 +216,12 @@ void lirc_bpf_run(struct rc_dev *rcdev, u32 sample) raw->bpf_sample = sample; - if (raw->progs) - BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, bpf_prog_run); + if (raw->progs) { + rcu_read_lock(); + bpf_prog_run_array(rcu_dereference(raw->progs), + &raw->bpf_sample, bpf_prog_run); + rcu_read_unlock(); + } } /* diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bdb5298735ce..7bf441563ffc 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1221,7 +1221,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, /* an array of programs to be executed under rcu_lock. * * Typical usage: - * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run); + * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run); * * the structure returned by bpf_prog_array_alloc() should be populated * with program pointers and the last pointer must be NULL. @@ -1315,83 +1315,22 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); -static __always_inline int -BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, - const void *ctx, bpf_prog_run_fn run_prog, - int retval, u32 *ret_flags) -{ - const struct bpf_prog_array_item *item; - const struct bpf_prog *prog; - const struct bpf_prog_array *array; - struct bpf_run_ctx *old_run_ctx; - struct bpf_cg_run_ctx run_ctx; - u32 func_ret; - - run_ctx.retval = retval; - migrate_disable(); - rcu_read_lock(); - array = rcu_dereference(array_rcu); - item = &array->items[0]; - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); - while ((prog = READ_ONCE(item->prog))) { - run_ctx.prog_item = item; - func_ret = run_prog(prog, ctx); - if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval)) - run_ctx.retval = -EPERM; - *(ret_flags) |= (func_ret >> 1); - item++; - } - bpf_reset_run_ctx(old_run_ctx); - rcu_read_unlock(); - migrate_enable(); - return run_ctx.retval; -} - -static __always_inline int -BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, - const void *ctx, bpf_prog_run_fn run_prog, - int retval) -{ - const struct bpf_prog_array_item *item; - const struct bpf_prog *prog; - const struct bpf_prog_array *array; - struct bpf_run_ctx *old_run_ctx; - struct bpf_cg_run_ctx run_ctx; - - run_ctx.retval = retval; - migrate_disable(); - rcu_read_lock(); - array = rcu_dereference(array_rcu); - item = &array->items[0]; - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); - while ((prog = READ_ONCE(item->prog))) { - run_ctx.prog_item = item; - if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval)) - run_ctx.retval = -EPERM; - item++; - } - bpf_reset_run_ctx(old_run_ctx); - rcu_read_unlock(); - migrate_enable(); - return run_ctx.retval; -} - static __always_inline u32 -BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, +bpf_prog_run_array(const struct bpf_prog_array *array, const void *ctx, bpf_prog_run_fn run_prog) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; - const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_trace_run_ctx run_ctx; u32 ret = 1; - migrate_disable(); - rcu_read_lock(); - array = rcu_dereference(array_rcu); + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held"); + if (unlikely(!array)) - goto out; + return ret; + + migrate_disable(); old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); item = &array->items[0]; while ((prog = READ_ONCE(item->prog))) { @@ -1400,50 +1339,10 @@ BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, item++; } bpf_reset_run_ctx(old_run_ctx); -out: - rcu_read_unlock(); migrate_enable(); return ret; } -/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs - * so BPF programs can request cwr for TCP packets. - * - * Current cgroup skb programs can only return 0 or 1 (0 to drop the - * packet. This macro changes the behavior so the low order bit - * indicates whether the packet should be dropped (0) or not (1) - * and the next bit is a congestion notification bit. This could be - * used by TCP to call tcp_enter_cwr() - * - * Hence, new allowed return values of CGROUP EGRESS BPF programs are: - * 0: drop packet - * 1: keep packet - * 2: drop packet and cn - * 3: keep packet and cn - * - * This macro then converts it to one of the NET_XMIT or an error - * code that is then interpreted as drop packet (and no cn): - * 0: NET_XMIT_SUCCESS skb should be transmitted - * 1: NET_XMIT_DROP skb should be dropped and cn - * 2: NET_XMIT_CN skb should be transmitted and cn - * 3: -err skb should be dropped - */ -#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ - ({ \ - u32 _flags = 0; \ - bool _cn; \ - u32 _ret; \ - _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \ - _cn = _flags & BPF_RET_SET_CN; \ - if (_ret && !IS_ERR_VALUE((long)_ret)) \ - _ret = -EFAULT; \ - if (!_ret) \ - _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ - else \ - _ret = (_cn ? NET_XMIT_DROP : _ret); \ - _ret; \ - }) - #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); extern struct mutex bpf_stats_enabled_mutex; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 128028efda64..0cb6211fcb58 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -22,6 +22,72 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE); EXPORT_SYMBOL(cgroup_bpf_enabled_key); +/* __always_inline is necessary to prevent indirect call through run_prog + * function pointer. + */ +static __always_inline int +bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp, + enum cgroup_bpf_attach_type atype, + const void *ctx, bpf_prog_run_fn run_prog, + int retval, u32 *ret_flags) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_cg_run_ctx run_ctx; + u32 func_ret; + + run_ctx.retval = retval; + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(cgrp->effective[atype]); + item = &array->items[0]; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + while ((prog = READ_ONCE(item->prog))) { + run_ctx.prog_item = item; + func_ret = run_prog(prog, ctx); + if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval)) + run_ctx.retval = -EPERM; + *(ret_flags) |= (func_ret >> 1); + item++; + } + bpf_reset_run_ctx(old_run_ctx); + rcu_read_unlock(); + migrate_enable(); + return run_ctx.retval; +} + +static __always_inline int +bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp, + enum cgroup_bpf_attach_type atype, + const void *ctx, bpf_prog_run_fn run_prog, + int retval) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_cg_run_ctx run_ctx; + + run_ctx.retval = retval; + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(cgrp->effective[atype]); + item = &array->items[0]; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + while ((prog = READ_ONCE(item->prog))) { + run_ctx.prog_item = item; + if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval)) + run_ctx.retval = -EPERM; + item++; + } + bpf_reset_run_ctx(old_run_ctx); + rcu_read_unlock(); + migrate_enable(); + return run_ctx.retval; +} + void cgroup_bpf_offline(struct cgroup *cgrp) { cgroup_get(cgrp); @@ -1075,11 +1141,38 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, bpf_compute_and_save_data_end(skb, &saved_data_end); if (atype == CGROUP_INET_EGRESS) { - ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( - cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb); + u32 flags = 0; + bool cn; + + ret = bpf_prog_run_array_cg_flags( + &cgrp->bpf, atype, + skb, __bpf_prog_run_save_cb, 0, &flags); + + /* Return values of CGROUP EGRESS BPF programs are: + * 0: drop packet + * 1: keep packet + * 2: drop packet and cn + * 3: keep packet and cn + * + * The returned value is then converted to one of the NET_XMIT + * or an error code that is then interpreted as drop packet + * (and no cn): + * 0: NET_XMIT_SUCCESS skb should be transmitted + * 1: NET_XMIT_DROP skb should be dropped and cn + * 2: NET_XMIT_CN skb should be transmitted and cn + * 3: -err skb should be dropped + */ + + cn = flags & BPF_RET_SET_CN; + if (ret && !IS_ERR_VALUE((long)ret)) + ret = -EFAULT; + if (!ret) + ret = (cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); + else + ret = (cn ? NET_XMIT_DROP : ret); } else { - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb, - __bpf_prog_run_save_cb, 0); + ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, + skb, __bpf_prog_run_save_cb, 0); if (ret && !IS_ERR_VALUE((long)ret)) ret = -EFAULT; } @@ -1109,8 +1202,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk, { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); - return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, - bpf_prog_run, 0); + return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); @@ -1155,8 +1247,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, } cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); - return BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx, - bpf_prog_run, 0, flags); + return bpf_prog_run_array_cg_flags(&cgrp->bpf, atype, + &ctx, bpf_prog_run, 0, flags); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); @@ -1182,8 +1274,8 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, { struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); - return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops, - bpf_prog_run, 0); + return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run, + 0); } EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); @@ -1200,8 +1292,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, rcu_read_lock(); cgrp = task_dfl_cgroup(current); - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, - bpf_prog_run, 0); + ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0); rcu_read_unlock(); return ret; @@ -1366,8 +1457,7 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, rcu_read_lock(); cgrp = task_dfl_cgroup(current); - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, - bpf_prog_run, 0); + ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0); rcu_read_unlock(); kfree(ctx.cur_val); @@ -1459,7 +1549,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, } lock_sock(sk); - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT], + ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT, &ctx, bpf_prog_run, 0); release_sock(sk); @@ -1559,7 +1649,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, } lock_sock(sk); - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], + ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, &ctx, bpf_prog_run, retval); release_sock(sk); @@ -1608,7 +1698,7 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, * be called if that data shouldn't be "exported". */ - ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], + ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT, &ctx, bpf_prog_run, retval); if (ret < 0) return ret; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index b26f3da943de..f15b826f9899 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -129,7 +129,10 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) * out of events when it was updated in between this and the * rcu_dereference() which is accepted risk. */ - ret = BPF_PROG_RUN_ARRAY(call->prog_array, ctx, bpf_prog_run); + rcu_read_lock(); + ret = bpf_prog_run_array(rcu_dereference(call->prog_array), + ctx, bpf_prog_run); + rcu_read_unlock(); out: __this_cpu_dec(bpf_prog_active); -- cgit v1.2.3 From 0d14657f40830243266f972766f1e4d00436e648 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Sun, 17 Apr 2022 10:12:45 +0000 Subject: net: atlantic: Implement xdp control plane aq_xdp() is a xdp setup callback function for Atlantic driver. When XDP is attached or detached, the device will be restarted because it uses different headroom, tailroom, and page order value. If XDP enabled, it switches default page order value from 0 to 2. Because the default maximum frame size is still 2K and it needs additional area for headroom and tailroom. The total size(headroom + frame size + tailroom) is 2624. So, 1472Bytes will be always wasted for every frame. But when order-2 is used, these pages can be used 6 times with flip strategy. It means only about 106Bytes per frame will be wasted. Also, It supports xdp fragment feature. MTU can be 16K if xdp prog supports xdp fragment. If not, MTU can not exceed 2K - ETH_HLEN - ETH_FCS. And a static key is added and It will be used to call the xdp_clean handler in ->poll(). data plane implementation will be contained the followed patch. Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | 1 + drivers/net/ethernet/aquantia/atlantic/aq_main.c | 86 ++++++++++++++++++++++ drivers/net/ethernet/aquantia/atlantic/aq_main.h | 2 + drivers/net/ethernet/aquantia/atlantic/aq_nic.h | 3 + drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 64 ++++++++++------ drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 13 +++- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 23 ++++-- drivers/net/ethernet/aquantia/atlantic/aq_vec.h | 6 ++ .../ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | 6 +- .../ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | 10 +-- 10 files changed, 177 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 52b9833fda99..8bcda2cb3a2e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -40,6 +40,7 @@ #define AQ_CFG_RX_HDR_SIZE 256U #define AQ_CFG_RX_PAGEORDER 0U +#define AQ_CFG_XDP_PAGEORDER 2U /* LRO */ #define AQ_CFG_IS_LRO_DEF 1U diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index e65ce7199dac..e794ee25b12b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -14,17 +14,22 @@ #include "aq_ptp.h" #include "aq_filters.h" #include "aq_hw_utils.h" +#include "aq_vec.h" #include #include #include #include #include +#include MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR); MODULE_DESCRIPTION(AQ_CFG_DRV_DESC); +DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key); +EXPORT_SYMBOL(aq_xdp_locking_key); + static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME; static const struct net_device_ops aq_ndev_ops; @@ -126,9 +131,19 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) { + int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN; struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *prog; int err; + prog = READ_ONCE(aq_nic->xdp_prog); + if (prog && !prog->aux->xdp_has_frags && + new_frame_size > AQ_CFG_RX_FRAME_MAX) { + netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n", + ndev->mtu); + return -EOPNOTSUPP; + } + err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN); if (err < 0) @@ -204,6 +219,25 @@ err_exit: return err; } +static netdev_features_t aq_ndev_fix_features(struct net_device *ndev, + netdev_features_t features) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *prog; + + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + prog = READ_ONCE(aq_nic->xdp_prog); + if (prog && !prog->aux->xdp_has_frags && + aq_nic->xdp_prog && features & NETIF_F_LRO) { + netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n"); + features &= ~NETIF_F_LRO; + } + + return features; +} + static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr) { struct aq_nic_s *aq_nic = netdev_priv(ndev); @@ -410,6 +444,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type, mqprio->qopt.prio_tc_map); } +static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + bool need_update, running = netif_running(ndev); + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct bpf_prog *old_prog; + + if (prog && !prog->aux->xdp_has_frags) { + if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) { + NL_SET_ERR_MSG_MOD(extack, + "prog does not support XDP frags"); + return -EOPNOTSUPP; + } + + if (prog && ndev->features & NETIF_F_LRO) { + netdev_err(ndev, + "LRO is not supported with single buffer XDP, disabling\n"); + ndev->features &= ~NETIF_F_LRO; + } + } + + need_update = !!aq_nic->xdp_prog != !!prog; + if (running && need_update) + aq_ndev_close(ndev); + + old_prog = xchg(&aq_nic->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (!old_prog && prog) + static_branch_inc(&aq_xdp_locking_key); + else if (old_prog && !prog) + static_branch_dec(&aq_xdp_locking_key); + + if (running && need_update) + return aq_ndev_open(ndev); + + return 0; +} + +static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return aq_xdp_setup(dev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + static const struct net_device_ops aq_ndev_ops = { .ndo_open = aq_ndev_open, .ndo_stop = aq_ndev_close, @@ -418,10 +502,12 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_change_mtu = aq_ndev_change_mtu, .ndo_set_mac_address = aq_ndev_set_mac_address, .ndo_set_features = aq_ndev_set_features, + .ndo_fix_features = aq_ndev_fix_features, .ndo_eth_ioctl = aq_ndev_ioctl, .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, .ndo_setup_tc = aq_ndo_setup_tc, + .ndo_bpf = aq_xdp, }; static int __init aq_ndev_init_module(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h index a5a624b9ce73..99870865f66d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h @@ -12,6 +12,8 @@ #include "aq_common.h" #include "aq_nic.h" +DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key); + void aq_ndev_schedule_work(struct work_struct *work); struct net_device *aq_ndev_alloc(void); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 1a7148041e3d..47123baabd5e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -11,6 +11,8 @@ #define AQ_NIC_H #include +#include +#include #include "aq_common.h" #include "aq_rss.h" @@ -128,6 +130,7 @@ struct aq_nic_s { struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX]; struct aq_hw_s *aq_hw; + struct bpf_prog *xdp_prog; struct net_device *ndev; unsigned int aq_vecs; unsigned int packet_filter; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 77e76c9efd32..31a14a0dc25d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -7,12 +7,16 @@ /* File aq_ring.c: Definition of functions for Rx/Tx rings. */ -#include "aq_ring.h" #include "aq_nic.h" #include "aq_hw.h" #include "aq_hw_utils.h" #include "aq_ptp.h" +#include "aq_vec.h" +#include "aq_main.h" +#include +#include +#include #include #include @@ -27,9 +31,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) rxpage->page = NULL; } -static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, - struct device *dev) +static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring) { + struct device *dev = aq_nic_get_dev(rx_ring->aq_nic); + unsigned int order = rx_ring->page_order; struct page *page; int ret = -ENOMEM; dma_addr_t daddr; @@ -47,7 +52,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, rxpage->page = page; rxpage->daddr = daddr; rxpage->order = order; - rxpage->pg_off = 0; + rxpage->pg_off = rx_ring->page_offset; return 0; @@ -58,21 +63,26 @@ err_exit: return ret; } -static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, - int order) +static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf) { + unsigned int order = self->page_order; + u16 page_offset = self->page_offset; + u16 frame_max = self->frame_max; + u16 tail_size = self->tail_size; int ret; if (rxbuf->rxdata.page) { /* One means ring is the only user and can reuse */ if (page_ref_count(rxbuf->rxdata.page) > 1) { /* Try reuse buffer */ - rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; - if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= - (PAGE_SIZE << order)) { + rxbuf->rxdata.pg_off += frame_max + page_offset + + tail_size; + if (rxbuf->rxdata.pg_off + frame_max + tail_size <= + (PAGE_SIZE << order)) { u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_flips++; u64_stats_update_end(&self->stats.rx.syncp); + } else { /* Buffer exhausted. We have other users and * should release this page and realloc @@ -84,7 +94,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, u64_stats_update_end(&self->stats.rx.syncp); } } else { - rxbuf->rxdata.pg_off = 0; + rxbuf->rxdata.pg_off = page_offset; u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_reuses++; u64_stats_update_end(&self->stats.rx.syncp); @@ -92,8 +102,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, } if (!rxbuf->rxdata.page) { - ret = aq_get_rxpage(&rxbuf->rxdata, order, - aq_nic_get_dev(self->aq_nic)); + ret = aq_alloc_rxpages(&rxbuf->rxdata, self); if (ret) { u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.alloc_fails++; @@ -117,6 +126,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, err = -ENOMEM; goto err_exit; } + self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), self->size * self->dx_size, &self->dx_ring_pa, GFP_KERNEL); @@ -172,11 +182,22 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, self->idx = idx; self->size = aq_nic_cfg->rxds; self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; - self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + - (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; - - if (aq_nic_cfg->rxpageorder > self->page_order) - self->page_order = aq_nic_cfg->rxpageorder; + self->xdp_prog = aq_nic->xdp_prog; + self->frame_max = AQ_CFG_RX_FRAME_MAX; + + /* Only order-2 is allowed if XDP is enabled */ + if (READ_ONCE(self->xdp_prog)) { + self->page_offset = AQ_XDP_HEADROOM; + self->page_order = AQ_CFG_XDP_PAGEORDER; + self->tail_size = AQ_XDP_TAILROOM; + } else { + self->page_offset = 0; + self->page_order = fls(self->frame_max / PAGE_SIZE + + (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1; + if (aq_nic_cfg->rxpageorder > self->page_order) + self->page_order = aq_nic_cfg->rxpageorder; + self->tail_size = 0; + } self = aq_ring_alloc(self, aq_nic); if (!self) { @@ -449,7 +470,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, skb_add_rx_frag(skb, 0, buff->rxdata.page, buff->rxdata.pg_off + hdr_len, buff->len - hdr_len, - AQ_CFG_RX_FRAME_MAX); + self->frame_max); page_ref_inc(buff->rxdata.page); } @@ -469,7 +490,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, buff_->rxdata.page, buff_->rxdata.pg_off, buff_->len, - AQ_CFG_RX_FRAME_MAX); + self->frame_max); page_ref_inc(buff_->rxdata.page); buff_->is_cleaned = 1; @@ -529,7 +550,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) int aq_ring_rx_fill(struct aq_ring_s *self) { - unsigned int page_order = self->page_order; struct aq_ring_buff_s *buff = NULL; int err = 0; int i = 0; @@ -543,9 +563,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self) buff = &self->buff_ring[self->sw_tail]; buff->flags = 0U; - buff->len = AQ_CFG_RX_FRAME_MAX; + buff->len = self->frame_max; - err = aq_get_rxpages(self, buff, page_order); + err = aq_get_rxpages(self, buff); if (err) goto err_exit; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 93659e58f1ce..2dadfaff47f5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -11,6 +11,10 @@ #define AQ_RING_H #include "aq_common.h" +#include "aq_vec.h" + +#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) struct page; struct aq_nic_cfg_s; @@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s { struct { dma_addr_t pa_eop; struct sk_buff *skb; + struct xdp_frame *xdpf; }; /* TxC */ struct { @@ -132,10 +137,15 @@ struct aq_ring_s { unsigned int size; /* descriptors number */ unsigned int dx_size; /* TX or RX descriptor size, */ /* stored here for fater math */ - unsigned int page_order; + u16 page_order; + u16 page_offset; + u16 frame_max; + u16 tail_size; union aq_ring_stats_s stats; dma_addr_t dx_ring_pa; + struct bpf_prog *xdp_prog; enum atl_ring_type ring_type; + struct xdp_rxq_info xdp_rxq; }; struct aq_ring_param_s { @@ -175,6 +185,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); + int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 6ab1f3212d24..997f351bc371 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -10,11 +10,6 @@ */ #include "aq_vec.h" -#include "aq_nic.h" -#include "aq_ring.h" -#include "aq_hw.h" - -#include struct aq_vec_s { const struct aq_hw_ops *aq_hw_ops; @@ -153,9 +148,23 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, aq_nic_set_tx_ring(aq_nic, idx_ring, ring); + if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, + aq_nic->ndev, idx, + self->napi.napi_id) < 0) { + err = -ENOMEM; + goto err_exit; + } + if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL) < 0) { + xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); + err = -ENOMEM; + goto err_exit; + } + ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic, idx_ring, aq_nic_cfg); if (!ring) { + xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); err = -ENOMEM; goto err_exit; } @@ -300,8 +309,10 @@ void aq_vec_ring_free(struct aq_vec_s *self) for (i = 0U; self->tx_rings > i; ++i) { ring = self->ring[i]; aq_ring_free(&ring[AQ_VEC_TX_ID]); - if (i < self->rx_rings) + if (i < self->rx_rings) { + xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq); aq_ring_free(&ring[AQ_VEC_RX_ID]); + } } self->tx_rings = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h index 567f3d4b79a2..78fac609b71d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -13,7 +13,13 @@ #define AQ_VEC_H #include "aq_common.h" +#include "aq_nic.h" +#include "aq_ring.h" +#include "aq_hw.h" + #include +#include +#include struct aq_hw_s; struct aq_hw_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 4625ccb79499..9dfd68f0fda9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); hw_atl_rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->frame_max / 1024U, aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); @@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) { buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; + ring->frame_max; buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; + buff->len : ring->frame_max; buff->next = 0U; buff->is_eop = 1U; } else { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index d875ce3ec759..878a53abec33 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx); hw_atl_rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->frame_max / 1024U, aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); @@ -969,15 +969,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring) rxd_wb->status); if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; + ring->frame_max; buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; + buff->len : ring->frame_max; buff->next = 0U; buff->is_eop = 1U; } else { buff->len = - rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ? - AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len; + rxd_wb->pkt_len > ring->frame_max ? + ring->frame_max : rxd_wb->pkt_len; if (buff->is_lro) { /* LRO */ -- cgit v1.2.3 From 26efaef759a1bc43e819ee44cfd8a1fc4514e8c9 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Sun, 17 Apr 2022 10:12:46 +0000 Subject: net: atlantic: Implement xdp data plane It supports XDP_PASS, XDP_DROP and multi buffer. The new function aq_nic_xmit_xdpf() is used to send packet with xdp_frame and internally it calls aq_nic_map_xdp(). AQC chip supports 32 multi-queues and 8 vectors(irq). there are two option 1. under 8 cores and 4 tx queues per core. 2. under 4 cores and 8 tx queues per core. Like ixgbe, these tx queues can be used only for XDP_TX, XDP_REDIRECT queue. If so, no tx_lock is needed. But this patchset doesn't use this strategy because getting hardware tx queue index cost is too high. So, tx_lock is used in the aq_nic_xmit_xdpf(). single-core, single queue, 80% cpu utilization. 30.75% bpf_prog_xxx_xdp_prog_tx [k] bpf_prog_xxx_xdp_prog_tx 10.35% [kernel] [k] aq_hw_read_reg <---------- here 4.38% [kernel] [k] get_page_from_freelist single-core, 8 queues, 100% cpu utilization, half PPS. 45.56% [kernel] [k] aq_hw_read_reg <---------- here 17.58% bpf_prog_xxx_xdp_prog_tx [k] bpf_prog_xxx_xdp_prog_tx 4.72% [kernel] [k] hw_atl_b0_hw_ring_rx_receive The new function __aq_ring_xdp_clean() is a xdp rx handler and this is called only when XDP is attached. Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_ethtool.c | 9 + drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 136 +++++++++ drivers/net/ethernet/aquantia/atlantic/aq_nic.h | 2 + drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 322 ++++++++++++++++++++- drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 6 + drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 2 +- 6 files changed, 468 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a418238f6309..1daecd483b8d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -97,6 +97,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = { "%sQueue[%d] AllocFails", "%sQueue[%d] SkbAllocFails", "%sQueue[%d] Polls", + "%sQueue[%d] PageFlips", + "%sQueue[%d] PageReuses", + "%sQueue[%d] PageFrees", + "%sQueue[%d] XdpAbort", + "%sQueue[%d] XdpDrop", + "%sQueue[%d] XdpPass", + "%sQueue[%d] XdpTx", + "%sQueue[%d] XdpInvalid", + "%sQueue[%d] XdpRedirect", }; static const char * const aq_ethtool_queue_tx_stat_names[] = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 24d715c28a35..e11cc29d3264 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -569,6 +569,103 @@ err_exit: return err; } +static unsigned int aq_nic_map_xdp(struct aq_nic_s *self, + struct xdp_frame *xdpf, + struct aq_ring_s *ring) +{ + struct device *dev = aq_nic_get_dev(self); + struct aq_ring_buff_s *first = NULL; + unsigned int dx = ring->sw_tail; + struct aq_ring_buff_s *dx_buff; + struct skb_shared_info *sinfo; + unsigned int frag_count = 0U; + unsigned int nr_frags = 0U; + unsigned int ret = 0U; + u16 total_len; + + dx_buff = &ring->buff_ring[dx]; + dx_buff->flags = 0U; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + total_len = xdpf->len; + dx_buff->len = total_len; + if (xdp_frame_has_frags(xdpf)) { + nr_frags = sinfo->nr_frags; + total_len += sinfo->xdp_frags_size; + } + dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(dev, dx_buff->pa))) + goto exit; + + first = dx_buff; + dx_buff->len_pkt = total_len; + dx_buff->is_sop = 1U; + dx_buff->is_mapped = 1U; + ++ret; + + for (; nr_frags--; ++frag_count) { + skb_frag_t *frag = &sinfo->frags[frag_count]; + unsigned int frag_len = skb_frag_size(frag); + unsigned int buff_offset = 0U; + unsigned int buff_size = 0U; + dma_addr_t frag_pa; + + while (frag_len) { + if (frag_len > AQ_CFG_TX_FRAME_MAX) + buff_size = AQ_CFG_TX_FRAME_MAX; + else + buff_size = frag_len; + + frag_pa = skb_frag_dma_map(dev, frag, buff_offset, + buff_size, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(dev, frag_pa))) + goto mapping_error; + + dx = aq_ring_next_dx(ring, dx); + dx_buff = &ring->buff_ring[dx]; + + dx_buff->flags = 0U; + dx_buff->len = buff_size; + dx_buff->pa = frag_pa; + dx_buff->is_mapped = 1U; + dx_buff->eop_index = 0xffffU; + + frag_len -= buff_size; + buff_offset += buff_size; + + ++ret; + } + } + + first->eop_index = dx; + dx_buff->is_eop = 1U; + dx_buff->skb = NULL; + dx_buff->xdpf = xdpf; + goto exit; + +mapping_error: + for (dx = ring->sw_tail; + ret > 0; + --ret, dx = aq_ring_next_dx(ring, dx)) { + dx_buff = &ring->buff_ring[dx]; + + if (!dx_buff->pa) + continue; + if (unlikely(dx_buff->is_sop)) + dma_unmap_single(dev, dx_buff->pa, dx_buff->len, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dx_buff->pa, dx_buff->len, + DMA_TO_DEVICE); + } + +exit: + return ret; +} + unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring) { @@ -697,6 +794,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, first->eop_index = dx; dx_buff->is_eop = 1U; dx_buff->skb = skb; + dx_buff->xdpf = NULL; goto exit; mapping_error: @@ -725,6 +823,44 @@ exit: return ret; } +int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring, + struct xdp_frame *xdpf) +{ + u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx); + struct net_device *ndev = aq_nic_get_ndev(aq_nic); + struct skb_shared_info *sinfo; + int cpu = smp_processor_id(); + int err = NETDEV_TX_BUSY; + struct netdev_queue *nq; + unsigned int frags = 1; + + if (xdp_frame_has_frags(xdpf)) { + sinfo = xdp_get_shared_info_from_frame(xdpf); + frags += sinfo->nr_frags; + } + + if (frags > AQ_CFG_SKB_FRAGS_MAX) + return err; + + nq = netdev_get_tx_queue(ndev, tx_ring->idx); + __netif_tx_lock(nq, cpu); + + aq_ring_update_queue_state(tx_ring); + + /* Above status update may stop the queue. Check this. */ + if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index)) + goto out; + + frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring); + if (likely(frags)) + err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring, + frags); +out: + __netif_tx_unlock(nq); + + return err; +} + int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) { struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 47123baabd5e..935ba889bd9a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -180,6 +180,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self); int aq_nic_start(struct aq_nic_s *self); unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring); +int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring, + struct xdp_frame *xdpf); int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p); int aq_nic_get_regs_count(struct aq_nic_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 31a14a0dc25d..9bb9b764d31c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -20,6 +20,24 @@ #include #include +static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff, + struct xdp_buff *xdp) +{ + struct skb_shared_info *sinfo; + int i; + + if (xdp_buff_has_frags(xdp)) { + sinfo = xdp_get_shared_info_from_buff(xdp); + + for (i = 0; i < sinfo->nr_frags; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + + page_ref_inc(skb_frag_page(frag)); + } + } + page_ref_inc(buff->rxdata.page); +} + static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) { unsigned int len = PAGE_SIZE << rxpage->order; @@ -319,15 +337,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } } - if (unlikely(buff->is_eop && buff->skb)) { + if (likely(!buff->is_eop)) + goto out; + + if (buff->skb) { u64_stats_update_begin(&self->stats.tx.syncp); ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; u64_stats_update_end(&self->stats.tx.syncp); - dev_kfree_skb_any(buff->skb); - buff->skb = NULL; + } else if (buff->xdpf) { + u64_stats_update_begin(&self->stats.tx.syncp); + ++self->stats.tx.packets; + self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf); + u64_stats_update_end(&self->stats.tx.syncp); + xdp_return_frame_rx_napi(buff->xdpf); } + +out: + buff->skb = NULL; + buff->xdpf = NULL; buff->pa = 0U; buff->eop_index = 0xffffU; self->sw_head = aq_ring_next_dx(self, self->sw_head); @@ -360,11 +389,136 @@ static void aq_rx_checksum(struct aq_ring_s *self, __skb_incr_checksum_unnecessary(skb); } -#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -int aq_ring_rx_clean(struct aq_ring_s *self, - struct napi_struct *napi, - int *work_done, - int budget) +static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, + struct xdp_buff *xdp, + struct aq_ring_s *rx_ring, + struct aq_ring_buff_s *buff) +{ + int result = NETDEV_TX_BUSY; + struct aq_ring_s *tx_ring; + struct xdp_frame *xdpf; + struct bpf_prog *prog; + u32 act = XDP_ABORTED; + struct sk_buff *skb; + + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.packets; + rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp); + u64_stats_update_end(&rx_ring->stats.rx.syncp); + + prog = READ_ONCE(rx_ring->xdp_prog); + if (!prog) + goto pass; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + /* single buffer XDP program, but packet is multi buffer, aborted */ + if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags) + goto out_aborted; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: +pass: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_aborted; + skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev); + if (!skb) + goto out_aborted; + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_pass; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + return skb; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_aborted; + tx_ring = aq_nic->aq_ring_tx[rx_ring->idx]; + result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf); + if (result == NETDEV_TX_BUSY) + goto out_aborted; + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_tx; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0) + goto out_aborted; + xdp_do_flush(); + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_redirect; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + break; + default: + fallthrough; + case XDP_ABORTED: +out_aborted: + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_aborted; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + trace_xdp_exception(aq_nic->ndev, prog, act); + bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act); + break; + case XDP_DROP: + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_drop; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + break; + } + + return ERR_PTR(-result); +} + +static bool aq_add_rx_fragment(struct device *dev, + struct aq_ring_s *ring, + struct aq_ring_buff_s *buff, + struct xdp_buff *xdp) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + struct aq_ring_buff_s *buff_ = buff; + + memset(sinfo, 0, sizeof(*sinfo)); + do { + skb_frag_t *frag; + + if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) + return true; + + frag = &sinfo->frags[sinfo->nr_frags++]; + buff_ = &ring->buff_ring[buff_->next]; + dma_sync_single_range_for_cpu(dev, + buff_->rxdata.daddr, + buff_->rxdata.pg_off, + buff_->len, + DMA_FROM_DEVICE); + skb_frag_off_set(frag, buff_->rxdata.pg_off); + skb_frag_size_set(frag, buff_->len); + sinfo->xdp_frags_size += buff_->len; + __skb_frag_set_page(frag, buff_->rxdata.page); + + buff_->is_cleaned = 1; + + buff->is_ip_cso &= buff_->is_ip_cso; + buff->is_udp_cso &= buff_->is_udp_cso; + buff->is_tcp_cso &= buff_->is_tcp_cso; + buff->is_cso_err |= buff_->is_cso_err; + + if (page_is_pfmemalloc(buff_->rxdata.page)) + xdp_buff_set_frag_pfmemalloc(xdp); + + } while (!buff_->is_eop); + + xdp_buff_set_frags_flag(xdp); + + return false; +} + +static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, + int *work_done, int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); bool is_rsc_completed = true; @@ -531,6 +685,149 @@ err_exit: return err; } +static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, + struct napi_struct *napi, int *work_done, + int budget) +{ + int frame_sz = rx_ring->page_offset + rx_ring->frame_max + + rx_ring->tail_size; + struct aq_nic_s *aq_nic = rx_ring->aq_nic; + bool is_rsc_completed = true; + struct device *dev; + int err = 0; + + dev = aq_nic_get_dev(aq_nic); + for (; (rx_ring->sw_head != rx_ring->hw_head) && budget; + rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head), + --budget, ++(*work_done)) { + struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head]; + bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring); + struct aq_ring_buff_s *buff_ = NULL; + struct sk_buff *skb = NULL; + unsigned int next_ = 0U; + struct xdp_buff xdp; + void *hard_start; + + if (buff->is_cleaned) + continue; + + if (!buff->is_eop) { + buff_ = buff; + do { + if (buff_->next >= rx_ring->size) { + err = -EIO; + goto err_exit; + } + next_ = buff_->next; + buff_ = &rx_ring->buff_ring[next_]; + is_rsc_completed = + aq_ring_dx_in_range(rx_ring->sw_head, + next_, + rx_ring->hw_head); + + if (unlikely(!is_rsc_completed)) + break; + + buff->is_error |= buff_->is_error; + buff->is_cso_err |= buff_->is_cso_err; + } while (!buff_->is_eop); + + if (!is_rsc_completed) { + err = 0; + goto err_exit; + } + if (buff->is_error || + (buff->is_lro && buff->is_cso_err)) { + buff_ = buff; + do { + if (buff_->next >= rx_ring->size) { + err = -EIO; + goto err_exit; + } + next_ = buff_->next; + buff_ = &rx_ring->buff_ring[next_]; + + buff_->is_cleaned = true; + } while (!buff_->is_eop); + + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.errors; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + } + + if (buff->is_error) { + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.errors; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + + dma_sync_single_range_for_cpu(dev, + buff->rxdata.daddr, + buff->rxdata.pg_off, + buff->len, DMA_FROM_DEVICE); + hard_start = page_address(buff->rxdata.page) + + buff->rxdata.pg_off - rx_ring->page_offset; + + if (is_ptp_ring) + buff->len -= + aq_ptp_extract_ts(rx_ring->aq_nic, skb, + aq_buf_vaddr(&buff->rxdata), + buff->len); + + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset, + buff->len, false); + if (!buff->is_eop) { + if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) { + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.packets; + rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp); + ++rx_ring->stats.rx.xdp_aborted; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + } + + skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff); + if (IS_ERR(skb) || !skb) + continue; + + if (buff->is_vlan) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + buff->vlan_rx_tag); + + aq_rx_checksum(rx_ring, buff, skb); + + skb_set_hash(skb, buff->rss_hash, + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_NONE); + /* Send all PTP traffic to 0 queue */ + skb_record_rx_queue(skb, + is_ptp_ring ? 0 + : AQ_NIC_RING2QMAP(rx_ring->aq_nic, + rx_ring->idx)); + + napi_gro_receive(napi, skb); + } + +err_exit: + return err; +} + +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget) +{ + if (static_branch_unlikely(&aq_xdp_locking_key)) + return __aq_ring_xdp_clean(self, napi, work_done, budget); + else + return __aq_ring_rx_clean(self, napi, work_done, budget); +} + void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) { #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) @@ -620,6 +917,15 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) data[++count] = self->stats.rx.alloc_fails; data[++count] = self->stats.rx.skb_alloc_fails; data[++count] = self->stats.rx.polls; + data[++count] = self->stats.rx.pg_flips; + data[++count] = self->stats.rx.pg_reuses; + data[++count] = self->stats.rx.pg_losts; + data[++count] = self->stats.rx.xdp_aborted; + data[++count] = self->stats.rx.xdp_drop; + data[++count] = self->stats.rx.xdp_pass; + data[++count] = self->stats.rx.xdp_tx; + data[++count] = self->stats.rx.xdp_invalid; + data[++count] = self->stats.rx.xdp_redirect; } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); } else { /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 2dadfaff47f5..52f40ea3f1df 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -106,6 +106,12 @@ struct aq_ring_stats_rx_s { u64 pg_losts; u64 pg_flips; u64 pg_reuses; + u64 xdp_aborted; + u64 xdp_drop; + u64 xdp_pass; + u64 xdp_tx; + u64 xdp_invalid; + u64 xdp_redirect; }; struct aq_ring_stats_tx_s { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 997f351bc371..e839e1002ec7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -155,7 +155,7 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic, goto err_exit; } if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq, - MEM_TYPE_PAGE_ORDER0, NULL) < 0) { + MEM_TYPE_PAGE_SHARED, NULL) < 0) { xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq); err = -ENOMEM; goto err_exit; -- cgit v1.2.3 From 45638f013a63219378f7a2f9b49c5acbfe031c5a Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Sun, 17 Apr 2022 10:12:47 +0000 Subject: net: atlantic: Implement .ndo_xdp_xmit handler aq_xdp_xmit() is the callback function of .ndo_xdp_xmit. It internally calls aq_nic_xmit_xdpf() to send packet. Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 1 + drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 23 +++++++++++++++++++++++ drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 2 ++ 3 files changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index e794ee25b12b..88595863d8bc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -508,6 +508,7 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, .ndo_setup_tc = aq_ndo_setup_tc, .ndo_bpf = aq_xdp, + .ndo_xdp_xmit = aq_xdp_xmit, }; static int __init aq_ndev_init_module(void) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 9bb9b764d31c..ea740210803f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -389,6 +389,29 @@ static void aq_rx_checksum(struct aq_ring_s *self, __skb_incr_checksum_unnecessary(skb); } +int aq_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct aq_nic_s *aq_nic = netdev_priv(dev); + unsigned int vec, i, drop = 0; + int cpu = smp_processor_id(); + struct aq_nic_cfg_s *aq_cfg; + struct aq_ring_s *ring; + + aq_cfg = aq_nic_get_cfg(aq_nic); + vec = cpu % aq_cfg->vecs; + ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)]; + + for (i = 0; i < num_frames; i++) { + struct xdp_frame *xdpf = frames[i]; + + if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY) + drop++; + } + + return num_frames - drop; +} + static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, struct xdp_buff *xdp, struct aq_ring_s *rx_ring, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 52f40ea3f1df..0a6c34438c1d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -199,6 +199,8 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring); void aq_ring_queue_wake(struct aq_ring_s *ring); void aq_ring_queue_stop(struct aq_ring_s *ring); bool aq_ring_tx_clean(struct aq_ring_s *self); +int aq_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags); int aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, int *work_done, -- cgit v1.2.3 From 286c61e7279768cec9c13bf8ce947de5c33e2325 Mon Sep 17 00:00:00 2001 From: Yufeng Mo Date: Tue, 19 Apr 2022 11:27:01 +0800 Subject: net: hns3: add ethtool parameter check for CQE/EQE mode For DEVICE_VERSION_V2, the hardware does not support the CQE mode. So add capability bit for coalesce CQE mode and add parameter check for it in ethtool. Signed-off-by: Yufeng Mo Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 4 ++++ .../hisilicon/hns3/hns3_common/hclge_comm_cmd.c | 2 ++ .../hisilicon/hns3/hns3_common/hclge_comm_cmd.h | 1 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 5 +--- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 28 +++++++++++++++++++--- 5 files changed, 33 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 79c64f4e67d2..8a3a446219f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, + HNAE3_DEV_SUPPORT_CQ_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) +#define hnae3_ae_dev_cq_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index c15ca710dabb..c8b151d29f53 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { @@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static void diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 876650eddac4..7a7d4cf9bf35 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_PAUSE_B = 14, HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15, HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17, + HCLGE_COMM_CAP_CQ_B = 18, }; enum HCLGE_COMM_API_CAP_BITS { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 14dc12c2155d..7e9f9da2f392 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, priv->tqp_vector[i].rx_group.dim.mode = mode; } - /* only device version above V3(include V3), GL can switch CQ/EQ - * period mode. - */ - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + if (hnae3_ae_dev_cq_supported(ae_dev)) { u32 new_mode; u64 reg; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 9f4111fd2986..8663ba5d41d8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1415,11 +1415,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev, return 0; } -static int hns3_check_coalesce_para(struct net_device *netdev, - struct ethtool_coalesce *cmd) +static int +hns3_check_cqe_coalesce_param(struct net_device *netdev, + struct kernel_ethtool_coalesce *kernel_coal) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + + if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) && + !hnae3_ae_dev_cq_supported(ae_dev)) { + netdev_err(netdev, "coalesced cqe mode is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +hns3_check_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal) { int ret; + ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal); + if (ret) + return ret; + ret = hns3_check_gl_coalesce_para(netdev, cmd); if (ret) { netdev_err(netdev, @@ -1494,7 +1516,7 @@ static int hns3_set_coalesce(struct net_device *netdev, if (hns3_nic_resetting(netdev)) return -EBUSY; - ret = hns3_check_coalesce_para(netdev, cmd); + ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal); if (ret) return ret; -- cgit v1.2.3 From 07fdc163ac886613e406e8c4356279d9a824b529 Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Tue, 19 Apr 2022 11:27:02 +0800 Subject: net: hns3: refactor hns3_set_ringparam() Use struct hns3_ring_param to replace variable new/old_xxx and add hns3_is_ringparam_changed() to judge them if is changed to improve code readability. Signed-off-by: Hao Chen Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 65 ++++++++++++++-------- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h | 6 ++ 2 files changed, 49 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 8663ba5d41d8..e647751e9054 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1106,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev, return 0; } +static bool +hns3_is_ringparam_changed(struct net_device *ndev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct hns3_ring_param *old_ringparam, + struct hns3_ring_param *new_ringparam) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + u16 queue_num = h->kinfo.num_tqps; + + new_ringparam->tx_desc_num = ALIGN(param->tx_pending, + HNS3_RING_BD_MULTIPLE); + new_ringparam->rx_desc_num = ALIGN(param->rx_pending, + HNS3_RING_BD_MULTIPLE); + old_ringparam->tx_desc_num = priv->ring[0].desc_num; + old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num; + old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size; + new_ringparam->rx_buf_len = kernel_param->rx_buf_len; + + if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && + old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && + old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { + netdev_info(ndev, "ringparam not changed\n"); + return false; + } + + return true; +} + static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) { struct hns3_nic_priv *priv = netdev_priv(ndev); @@ -1151,14 +1181,11 @@ static int hns3_set_ringparam(struct net_device *ndev, struct kernel_ethtool_ringparam *kernel_param, struct netlink_ext_ack *extack) { + struct hns3_ring_param old_ringparam, new_ringparam; struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_ring *tmp_rings; bool if_running = netif_running(ndev); - u32 old_tx_desc_num, new_tx_desc_num; - u32 old_rx_desc_num, new_rx_desc_num; - u16 queue_num = h->kinfo.num_tqps; - u32 old_rx_buf_len; int ret, i; ret = hns3_check_ringparam(ndev, param, kernel_param); @@ -1169,15 +1196,8 @@ static int hns3_set_ringparam(struct net_device *ndev, if (ret) return ret; - /* Hardware requires that its descriptors must be multiple of eight */ - new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); - new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); - old_tx_desc_num = priv->ring[0].desc_num; - old_rx_desc_num = priv->ring[queue_num].desc_num; - old_rx_buf_len = priv->ring[queue_num].buf_size; - if (old_tx_desc_num == new_tx_desc_num && - old_rx_desc_num == new_rx_desc_num && - kernel_param->rx_buf_len == old_rx_buf_len) + if (!hns3_is_ringparam_changed(ndev, param, kernel_param, + &old_ringparam, &new_ringparam)) return 0; tmp_rings = hns3_backup_ringparam(priv); @@ -1188,24 +1208,25 @@ static int hns3_set_ringparam(struct net_device *ndev, } netdev_info(ndev, - "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n", - old_tx_desc_num, old_rx_desc_num, - new_tx_desc_num, new_rx_desc_num, - old_rx_buf_len, kernel_param->rx_buf_len); + "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n", + old_ringparam.tx_desc_num, old_ringparam.rx_desc_num, + new_ringparam.tx_desc_num, new_ringparam.rx_desc_num, + old_ringparam.rx_buf_len, new_ringparam.rx_buf_len); if (if_running) ndev->netdev_ops->ndo_stop(ndev); - hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); - hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len); + hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num, + new_ringparam.rx_desc_num); + hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len); ret = hns3_init_all_ring(priv); if (ret) { netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", ret); - hns3_change_rx_buf_len(ndev, old_rx_buf_len); - hns3_change_all_ring_bd_num(priv, old_tx_desc_num, - old_rx_desc_num); + hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len); + hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num, + old_ringparam.rx_desc_num); for (i = 0; i < h->kinfo.num_tqps * 2; i++) memcpy(&priv->ring[i], &tmp_rings[i], sizeof(struct hns3_enet_ring)); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h index 822d6fcbc73b..da207d1d9aa9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping { u8 link_ext_substate; }; +struct hns3_ring_param { + u32 tx_desc_num; + u32 rx_desc_num; + u32 rx_buf_len; +}; + #endif -- cgit v1.2.3 From 6fde96df0447a29ab785de4fcb229e5543f0cbf7 Mon Sep 17 00:00:00 2001 From: Jian Shen Date: Tue, 19 Apr 2022 11:27:03 +0800 Subject: net: hns3: refine the definition for struct hclge_pf_to_vf_msg The struct hclge_pf_to_vf_msg is used for mailbox message from PF to VF, including both response and request. But its definition can only indicate respone, which makes the message data copy in function hclge_send_mbx_msg() unreadable. So refine it by edding a general message definition into it. Signed-off-by: Jian Shen Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 17 +++++++++++++---- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index b668df6193be..8c7fadf2b734 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -135,10 +135,19 @@ struct hclge_vf_to_pf_msg { struct hclge_pf_to_vf_msg { u16 code; - u16 vf_mbx_msg_code; - u16 vf_mbx_msg_subcode; - u16 resp_status; - u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + union { + /* used for mbx response */ + struct { + u16 vf_mbx_msg_code; + u16 vf_mbx_msg_subcode; + u16 resp_status; + u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + }; + /* used for general mbx */ + struct { + u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE]; + }; + }; }; struct hclge_mbx_vf_to_pf_cmd { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 6799d16de34b..76d0f17d6be3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -102,7 +102,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, resp_pf_to_vf->msg_len = msg_len; resp_pf_to_vf->msg.code = mbx_opcode; - memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); + memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); -- cgit v1.2.3 From bcc7a98f0d3cbb18ca2502c278812867f81add48 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Tue, 19 Apr 2022 11:27:04 +0800 Subject: net: hns3: add failure logs in hclge_set_vport_mtu Currently, There is a low probability that pf mtu configuration fails, but the information in logs is insufficient for problem locating when the VF mtu value is illegally modified. So record the vf index and vf mtu value at the failure scenario. Signed-off-by: Jie Wang Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8cebb180c812..a5dd2c8c244a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -10449,6 +10449,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) /* PF's mps must be greater then VF's mps */ for (i = 1; i < hdev->num_alloc_vport; i++) if (max_frm_size < hdev->vport[i].mps) { + dev_err(&hdev->pdev->dev, + "failed to set pf mtu for less than vport %d, mps = %u.\n", + i, hdev->vport[i].mps); mutex_unlock(&hdev->vport_lock); return -EINVAL; } -- cgit v1.2.3 From 2373b35c24ffdf680efad11377ceb994149b1f6f Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Tue, 19 Apr 2022 11:27:05 +0800 Subject: net: hns3: add log for setting tx spare buf size For the active tx spare buffer size maybe changed according to the page size, so add log to notice it. Signed-off-by: Hao Chen Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index e647751e9054..73498e125492 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1898,6 +1898,8 @@ static int hns3_set_tunable(struct net_device *netdev, case ETHTOOL_TX_COPYBREAK_BUF_SIZE: old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; new_tx_spare_buf_size = *(u32 *)data; + netdev_info(netdev, "request to set tx spare buf size from %u to %u\n", + old_tx_spare_buf_size, new_tx_spare_buf_size); ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); if (ret || (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { @@ -1915,6 +1917,10 @@ static int hns3_set_tunable(struct net_device *netdev, return ret; } + + netdev_info(netdev, "the actvie tx spare buf size is %u, due to page order\n", + priv->ring->tx_spare->len); + break; default: ret = -EOPNOTSUPP; -- cgit v1.2.3 From 2e0f538870119dfdacf3caf11af2d925157eb84f Mon Sep 17 00:00:00 2001 From: Peng Li Date: Tue, 19 Apr 2022 11:27:06 +0800 Subject: net: hns3: update the comment of function hclgevf_get_mbx_resp The param of function hclgevf_get_mbx_resp has been changed but the comments not upodated. This patch updates it. Signed-off-by: Peng Li Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index d5e0a3f762f7..4761dceccea5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) /* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox * message to PF. * @hdev: pointer to struct hclgevf_dev - * @resp_msg: pointer to store the original message type and response status - * @len: the resp_msg data array length. + * @code0: the message opcode VF send to PF. + * @code1: the message sub-opcode VF send to PF. + * @resp_data: pointer to store response data from PF to VF. + * @resp_len: the length of resp_data from PF to VF. */ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, u8 *resp_data, u16 resp_len) -- cgit v1.2.3 From 9c657cbc2c158a32e18f541df309772ca390fc0c Mon Sep 17 00:00:00 2001 From: Peng Li Date: Tue, 19 Apr 2022 11:27:07 +0800 Subject: net: hns3: fix the wrong words in comments This patch fixes wrong words in comments. Signed-off-by: Peng Li Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 42a9e73d8588..6efd768cc07c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, * @num: number of extended command structures * * This function handles all the PF RAS errors in the - * hw register/s using command. + * hw registers using command. */ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, struct hclge_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 342d7cdf6285..528b5a17adb0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2963,7 +2963,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* ensure vf tbl list as empty before init*/ + /* ensure vf tbl list as empty before init */ ret = hclgevf_clear_vport_list(hdev); if (ret) { dev_err(&pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 4761dceccea5..c8055d69255c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held - * to prtect the received_response from race condition + * to protect the received_response from race condition */ hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; -- cgit v1.2.3 From 350cb44092461d719fd1a9662b2f7fb7263891d5 Mon Sep 17 00:00:00 2001 From: Peng Li Date: Tue, 19 Apr 2022 11:27:08 +0800 Subject: net: hns3: replace magic value by HCLGE_RING_REG_OFFSET Magic values are not recommended. Signed-off-by: Peng Li Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 528b5a17adb0..e13d71abd9f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -3315,7 +3315,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, for (i = 0; i < reg_um; i++) *reg++ = hclgevf_read_dev(&hdev->hw, ring_reg_addr_list[i] + - 0x200 * j); + HCLGEVF_TQP_REG_SIZE * j); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; } -- cgit v1.2.3 From 29c17cb67271709755973a6136e10bb08ae34c16 Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Tue, 19 Apr 2022 11:27:09 +0800 Subject: net: hns3: remove unnecessary line wrap for hns3_set_tunable Remove unnecessary line wrap for hns3_set_tunable to improve function readability. Signed-off-by: Hao Chen Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 73498e125492..bb001f597857 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1202,8 +1202,7 @@ static int hns3_set_ringparam(struct net_device *ndev, tmp_rings = hns3_backup_ringparam(priv); if (!tmp_rings) { - netdev_err(ndev, - "backup ring param failed by allocating memory fail\n"); + netdev_err(ndev, "backup ring param failed by allocating memory fail\n"); return -ENOMEM; } @@ -1905,13 +1904,11 @@ static int hns3_set_tunable(struct net_device *netdev, (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { int ret1; - netdev_warn(netdev, - "change tx spare buf size fail, revert to old value\n"); + netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n"); ret1 = hns3_set_tx_spare_buf_size(netdev, old_tx_spare_buf_size); if (ret1) { - netdev_err(netdev, - "revert to old tx spare buf size fail\n"); + netdev_err(netdev, "revert to old tx spare buf size fail\n"); return ret1; } -- cgit v1.2.3 From fcd30c96af957f9c0eeccb95689548dfce52e149 Mon Sep 17 00:00:00 2001 From: Luiz Angelo Daros de Luca Date: Mon, 18 Apr 2022 20:35:58 -0300 Subject: net: dsa: realtek: remove realtek,rtl8367s string MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no need to add new compatible strings for each new supported chip version. The compatible string is used only to select the subdriver (rtl8365mb.c or rtl8366rb.c). Once in the subdriver, it will detect the chip model by itself, ignoring which compatible string was used. Link: https://lore.kernel.org/netdev/20220414014055.m4wbmr7tdz6hsa3m@bang-olufsen.dk/ Signed-off-by: Luiz Angelo Daros de Luca Reviewed-by: Alvin Šipraga Reviewed-by: Florian Fainelli Reviewed-by: Andrew Lunn Acked-by: Arınç ÜNAL Signed-off-by: David S. Miller --- drivers/net/dsa/realtek/realtek-mdio.c | 1 - drivers/net/dsa/realtek/realtek-smi.c | 4 ---- 2 files changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c index 31e1f100e48e..c58f49d558d2 100644 --- a/drivers/net/dsa/realtek/realtek-mdio.c +++ b/drivers/net/dsa/realtek/realtek-mdio.c @@ -267,7 +267,6 @@ static const struct of_device_id realtek_mdio_of_match[] = { #endif #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB) { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, }, - { .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, }, #endif { /* sentinel */ }, }; diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c index 6cec559c90ce..45992f79ec8d 100644 --- a/drivers/net/dsa/realtek/realtek-smi.c +++ b/drivers/net/dsa/realtek/realtek-smi.c @@ -551,10 +551,6 @@ static const struct of_device_id realtek_smi_of_match[] = { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, }, - { - .compatible = "realtek,rtl8367s", - .data = &rtl8365mb_variant, - }, #endif { /* sentinel */ }, }; -- cgit v1.2.3 From e63dd41235072575a190ef3f3e6e618a6dd0ac64 Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Tue, 19 Apr 2022 09:37:31 +0800 Subject: ar5523: Use kzalloc instead of kmalloc/memset Use kzalloc rather than duplicating its implementation, which makes code simple and easy to understand. Signed-off-by: Haowen Bai Signed-off-by: David S. Miller --- drivers/net/wireless/ath/ar5523/ar5523.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 9cabd342d156..7527a382327f 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1500,7 +1500,7 @@ static int ar5523_load_firmware(struct usb_device *dev) return -ENOENT; } - txblock = kmalloc(sizeof(*txblock), GFP_KERNEL); + txblock = kzalloc(sizeof(*txblock), GFP_KERNEL); if (!txblock) goto out; @@ -1512,7 +1512,6 @@ static int ar5523_load_firmware(struct usb_device *dev) if (!fwbuf) goto out_free_rxblock; - memset(txblock, 0, sizeof(struct ar5523_fwblock)); txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK); txblock->total = cpu_to_be32(fw->size); -- cgit v1.2.3 From de28976d2650d3cd962035a9fac5eef3c5b1b379 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 19 Apr 2022 17:54:26 +0300 Subject: mlxsw: core_linecards: Introduce ops for linecards status change tracking Introduce an infrastructure allowing users to register a set of operations which are to be called whenever a line card gets active/inactive. Signed-off-by: Jiri Pirko Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.h | 17 +++ .../net/ethernet/mellanox/mlxsw/core_linecards.c | 137 +++++++++++++++++++++ 2 files changed, 154 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 850fff51b79f..c2a891287047 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -590,6 +590,8 @@ struct mlxsw_linecards { const struct mlxsw_bus_info *bus_info; u8 count; struct mlxsw_linecard_types_info *types_info; + struct list_head event_ops_list; + struct mutex event_ops_list_lock; /* Locks accesses to event ops list */ struct mlxsw_linecard linecards[]; }; @@ -603,4 +605,19 @@ int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *bus_info); void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core); +typedef void mlxsw_linecards_event_op_t(struct mlxsw_core *mlxsw_core, + u8 slot_index, void *priv); + +struct mlxsw_linecards_event_ops { + mlxsw_linecards_event_op_t *got_active; + mlxsw_linecards_event_op_t *got_inactive; +}; + +int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards_event_ops *ops, + void *priv); +void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards_event_ops *ops, + void *priv); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 1d50bfe67156..90e487cc2e2a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -95,6 +95,137 @@ static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard) devlink_linecard_provision_fail(linecard->devlink_linecard); } +struct mlxsw_linecards_event_ops_item { + struct list_head list; + const struct mlxsw_linecards_event_ops *event_ops; + void *priv; +}; + +static void +mlxsw_linecard_event_op_call(struct mlxsw_linecard *linecard, + mlxsw_linecards_event_op_t *op, void *priv) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + + if (!op) + return; + op(mlxsw_core, linecard->slot_index, priv); +} + +static void +mlxsw_linecard_active_ops_call(struct mlxsw_linecard *linecard) +{ + struct mlxsw_linecards *linecards = linecard->linecards; + struct mlxsw_linecards_event_ops_item *item; + + mutex_lock(&linecards->event_ops_list_lock); + list_for_each_entry(item, &linecards->event_ops_list, list) + mlxsw_linecard_event_op_call(linecard, + item->event_ops->got_active, + item->priv); + mutex_unlock(&linecards->event_ops_list_lock); +} + +static void +mlxsw_linecard_inactive_ops_call(struct mlxsw_linecard *linecard) +{ + struct mlxsw_linecards *linecards = linecard->linecards; + struct mlxsw_linecards_event_ops_item *item; + + mutex_lock(&linecards->event_ops_list_lock); + list_for_each_entry(item, &linecards->event_ops_list, list) + mlxsw_linecard_event_op_call(linecard, + item->event_ops->got_inactive, + item->priv); + mutex_unlock(&linecards->event_ops_list_lock); +} + +static void +mlxsw_linecards_event_ops_register_call(struct mlxsw_linecards *linecards, + const struct mlxsw_linecards_event_ops_item *item) +{ + struct mlxsw_linecard *linecard; + int i; + + for (i = 0; i < linecards->count; i++) { + linecard = mlxsw_linecard_get(linecards, i + 1); + mutex_lock(&linecard->lock); + if (linecard->active) + mlxsw_linecard_event_op_call(linecard, + item->event_ops->got_active, + item->priv); + mutex_unlock(&linecard->lock); + } +} + +static void +mlxsw_linecards_event_ops_unregister_call(struct mlxsw_linecards *linecards, + const struct mlxsw_linecards_event_ops_item *item) +{ + struct mlxsw_linecard *linecard; + int i; + + for (i = 0; i < linecards->count; i++) { + linecard = mlxsw_linecard_get(linecards, i + 1); + mutex_lock(&linecard->lock); + if (linecard->active) + mlxsw_linecard_event_op_call(linecard, + item->event_ops->got_inactive, + item->priv); + mutex_unlock(&linecard->lock); + } +} + +int mlxsw_linecards_event_ops_register(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards_event_ops *ops, + void *priv) +{ + struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core); + struct mlxsw_linecards_event_ops_item *item; + + if (!linecards) + return 0; + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) + return -ENOMEM; + item->event_ops = ops; + item->priv = priv; + + mutex_lock(&linecards->event_ops_list_lock); + list_add_tail(&item->list, &linecards->event_ops_list); + mutex_unlock(&linecards->event_ops_list_lock); + mlxsw_linecards_event_ops_register_call(linecards, item); + return 0; +} +EXPORT_SYMBOL(mlxsw_linecards_event_ops_register); + +void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecards_event_ops *ops, + void *priv) +{ + struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core); + struct mlxsw_linecards_event_ops_item *item, *tmp; + bool found = false; + + if (!linecards) + return; + mutex_lock(&linecards->event_ops_list_lock); + list_for_each_entry_safe(item, tmp, &linecards->event_ops_list, list) { + if (item->event_ops == ops && item->priv == priv) { + list_del(&item->list); + found = true; + break; + } + } + mutex_unlock(&linecards->event_ops_list_lock); + + if (!found) + return; + mlxsw_linecards_event_ops_unregister_call(linecards, item); + kfree(item); +} +EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister); + static int mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, u16 hw_revision, u16 ini_version) @@ -163,12 +294,14 @@ static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard) static void mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) { + mlxsw_linecard_active_ops_call(linecard); linecard->active = true; devlink_linecard_activate(linecard->devlink_linecard); } static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard) { + mlxsw_linecard_inactive_ops_call(linecard); linecard->active = false; devlink_linecard_deactivate(linecard->devlink_linecard); } @@ -954,6 +1087,8 @@ int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core, linecards->count = slot_count; linecards->mlxsw_core = mlxsw_core; linecards->bus_info = bus_info; + INIT_LIST_HEAD(&linecards->event_ops_list); + mutex_init(&linecards->event_ops_list_lock); err = mlxsw_linecard_types_init(mlxsw_core, linecards); if (err) @@ -1001,5 +1136,7 @@ void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core) ARRAY_SIZE(mlxsw_linecard_listener), mlxsw_core); mlxsw_linecard_types_fini(linecards); + mutex_destroy(&linecards->event_ops_list_lock); + WARN_ON(!list_empty(&linecards->event_ops_list)); vfree(linecards); } -- cgit v1.2.3 From 7b261af9f641595f2fc2c8fae6cdcffc740b9b9e Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 19 Apr 2022 17:54:27 +0300 Subject: mlxsw: core: Add bus argument to environment init API Pass bus argument to mlxsw_env_init(). The purpose is to get access to device handle, which is to be provided to error message in case of line card activation failure. Signed-off-by: Vadim Pasternak Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 6 +++++- drivers/net/ethernet/mellanox/mlxsw/core_env.h | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 0e92dd91eca4..fc52832241b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2175,7 +2175,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_thermal_init; - err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env); + err = mlxsw_env_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->env); if (err) goto err_env_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index b3b8a9015cd6..abb54177485c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -28,6 +28,7 @@ struct mlxsw_env_line_card { struct mlxsw_env { struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; u8 max_module_count; /* Maximum number of modules per-slot. */ u8 num_of_slots; /* Including the main board. */ struct mutex line_cards_lock; /* Protects line cards. */ @@ -1194,7 +1195,9 @@ mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index) return 0; } -int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) +int mlxsw_env_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *bus_info, + struct mlxsw_env **p_env) { u8 module_count, num_of_slots, max_module_count; char mgpir_pl[MLXSW_REG_MGPIR_LEN]; @@ -1221,6 +1224,7 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) return -ENOMEM; env->core = mlxsw_core; + env->bus_info = bus_info; env->num_of_slots = num_of_slots + 1; env->max_module_count = max_module_count; err = mlxsw_env_line_cards_alloc(env); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index 6b494c64a4d7..a197e3ae069c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -60,7 +60,9 @@ int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 slot_index, void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module); -int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env); +int mlxsw_env_init(struct mlxsw_core *core, + const struct mlxsw_bus_info *bus_info, + struct mlxsw_env **p_env); void mlxsw_env_fini(struct mlxsw_env *env); #endif -- cgit v1.2.3 From a11e1ec141ea14b9e44240b275378a315ee44457 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 19 Apr 2022 17:54:28 +0300 Subject: mlxsw: core_env: Split module power mode setting to a separate function Move the code that applies the module power mode to the device to a separate function. This function will be invoked by the next patch to set the power mode on transceiver modules found on a line card when the line card becomes active. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 41 +++++++++++++++++--------- 1 file changed, 27 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index abb54177485c..a9b133d6c2fc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -648,25 +648,16 @@ err_module_low_power_set: return err; } -int -mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, - u8 module, - enum ethtool_module_power_mode_policy policy, - struct netlink_ext_ack *extack) +static int +mlxsw_env_set_module_power_mode_apply(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, + enum ethtool_module_power_mode_policy policy, + struct netlink_ext_ack *extack) { - struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); struct mlxsw_env_module_info *module_info; bool low_power; int err = 0; - if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH && - policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy"); - return -EOPNOTSUPP; - } - - mutex_lock(&mlxsw_env->line_cards_lock); - err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { NL_SET_ERR_MSG_MOD(extack, @@ -691,7 +682,29 @@ mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, out_set_policy: module_info->power_mode_policy = policy; out: + return err; +} + +int +mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, + u8 module, + enum ethtool_module_power_mode_policy policy, + struct netlink_ext_ack *extack) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + int err; + + if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH && + policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy"); + return -EOPNOTSUPP; + } + + mutex_lock(&mlxsw_env->line_cards_lock); + err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, slot_index, + module, policy, extack); mutex_unlock(&mlxsw_env->line_cards_lock); + return err; } EXPORT_SYMBOL(mlxsw_env_set_module_power_mode); -- cgit v1.2.3 From 06a0fc43bb10e48355574b8d28ab5ab1a3c86c61 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 19 Apr 2022 17:54:29 +0300 Subject: mlxsw: core_env: Add interfaces for line card initialization and de-initialization Netdevs for ports found on line cards are registered upon provisioning. However, user space is not allowed to access the transceiver modules found on a line card until the line card becomes active. Therefore, register event operations with the line card core to get notifications whenever a line card becomes active or inactive. When user space tries to dump the EEPROM of a transceiver module or reset it and the corresponding line card is inactive, emit an error message: ethtool -m enp1s0nl7p9 netlink error: mlxsw_core: Cannot read EEPROM of module on an inactive line card netlink error: Input/output error When user space tries to set the power mode policy of such a transceiver, cache the configuration and apply it when the line card becomes active. This is consistent with other port configuration (e.g., MTU setting) that user space is able to perform while the line card is provisioned, but inactive. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_env.c | 166 ++++++++++++++++++++++++- 1 file changed, 165 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index a9b133d6c2fc..34bec9cd572c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -23,6 +23,7 @@ struct mlxsw_env_module_info { struct mlxsw_env_line_card { u8 module_count; + bool active; struct mlxsw_env_module_info module_info[]; }; @@ -35,6 +36,24 @@ struct mlxsw_env { struct mlxsw_env_line_card *line_cards[]; }; +static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env, + u8 slot_index) +{ + return mlxsw_env->line_cards[slot_index]->active; +} + +static bool mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env, + u8 slot_index) +{ + bool active; + + mutex_lock(&mlxsw_env->line_cards_lock); + active = __mlxsw_env_linecard_is_active(mlxsw_env, slot_index); + mutex_unlock(&mlxsw_env->line_cards_lock); + + return active; +} + static struct mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module) @@ -47,9 +66,13 @@ mlxsw_env_module_info *mlxsw_env_module_info_get(struct mlxsw_core *mlxsw_core, static int __mlxsw_env_validate_module_type(struct mlxsw_core *core, u8 slot_index, u8 module) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(core); struct mlxsw_env_module_info *module_info; int err; + if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + return 0; + module_info = mlxsw_env_module_info_get(core, slot_index, module); switch (module_info->type) { case MLXSW_REG_PMTM_MODULE_TYPE_TWISTED_PAIR: @@ -269,12 +292,18 @@ int mlxsw_env_get_module_info(struct net_device *netdev, struct mlxsw_core *mlxsw_core, u8 slot_index, int module, struct ethtool_modinfo *modinfo) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE]; u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE; u8 module_rev_id, module_id, diag_mon; unsigned int read_size; int err; + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n"); + return -EIO; + } + err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { netdev_err(netdev, @@ -359,6 +388,7 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, int module, struct ethtool_eeprom *ee, u8 *data) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int offset = ee->offset; unsigned int read_size; bool qsfp, cmis; @@ -368,6 +398,11 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, if (!ee->len) return -EINVAL; + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + netdev_err(netdev, "Cannot read EEPROM of module on an inactive line card\n"); + return -EIO; + } + memset(data, 0, ee->len); /* Validate module identifier value. */ err = mlxsw_env_validate_cable_ident(mlxsw_core, slot_index, module, @@ -428,10 +463,17 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); u32 bytes_read = 0; u16 device_addr; int err; + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot read EEPROM of module on an inactive line card"); + return -EIO; + } + err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); if (err) { NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type"); @@ -497,6 +539,11 @@ int mlxsw_env_reset_module(struct net_device *netdev, !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) return 0; + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + netdev_err(netdev, "Cannot reset module on an inactive line card\n"); + return -EIO; + } + mutex_lock(&mlxsw_env->line_cards_lock); err = __mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); @@ -543,7 +590,7 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, struct mlxsw_env_module_info *module_info; char mcion_pl[MLXSW_REG_MCION_LEN]; u32 status_bits; - int err; + int err = 0; mutex_lock(&mlxsw_env->line_cards_lock); @@ -556,6 +603,10 @@ mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 slot_index, module_info = mlxsw_env_module_info_get(mlxsw_core, slot_index, module); params->policy = module_info->power_mode_policy; + /* Avoid accessing an inactive line card, as it will result in an error. */ + if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + goto out; + mlxsw_reg_mcion_pack(mcion_pl, slot_index, module); err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl); if (err) { @@ -617,8 +668,16 @@ static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, bool low_power, struct netlink_ext_ack *extack) { + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); int err; + /* Avoid accessing an inactive line card, as it will result in an error. + * Cached configuration will be applied by mlxsw_env_got_active() when + * line card becomes active. + */ + if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + return 0; + err = mlxsw_env_module_enable_set(mlxsw_core, slot_index, module, false); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to disable module"); @@ -1208,6 +1267,98 @@ mlxsw_env_module_type_set(struct mlxsw_core *mlxsw_core, u8 slot_index) return 0; } +static void +mlxsw_env_linecard_modules_power_mode_apply(struct mlxsw_core *mlxsw_core, + struct mlxsw_env *env, + u8 slot_index) +{ + int i; + + for (i = 0; i < env->line_cards[slot_index]->module_count; i++) { + enum ethtool_module_power_mode_policy policy; + struct mlxsw_env_module_info *module_info; + struct netlink_ext_ack extack; + int err; + + module_info = &env->line_cards[slot_index]->module_info[i]; + policy = module_info->power_mode_policy; + err = mlxsw_env_set_module_power_mode_apply(mlxsw_core, + slot_index, i, + policy, &extack); + if (err) + dev_err(env->bus_info->dev, "%s\n", extack._msg); + } +} + +static void +mlxsw_env_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv) +{ + struct mlxsw_env *mlxsw_env = priv; + char mgpir_pl[MLXSW_REG_MGPIR_LEN]; + int err; + + mutex_lock(&mlxsw_env->line_cards_lock); + if (__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + goto out_unlock; + + mlxsw_reg_mgpir_pack(mgpir_pl, slot_index); + err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mgpir), mgpir_pl); + if (err) + goto out_unlock; + + mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, + &mlxsw_env->line_cards[slot_index]->module_count, + NULL); + + err = mlxsw_env_module_event_enable(mlxsw_env, slot_index); + if (err) { + dev_err(mlxsw_env->bus_info->dev, "Failed to enable port module events for line card in slot %d\n", + slot_index); + goto err_mlxsw_env_module_event_enable; + } + err = mlxsw_env_module_type_set(mlxsw_env->core, slot_index); + if (err) { + dev_err(mlxsw_env->bus_info->dev, "Failed to set modules' type for line card in slot %d\n", + slot_index); + goto err_type_set; + } + + mlxsw_env->line_cards[slot_index]->active = true; + /* Apply power mode policy. */ + mlxsw_env_linecard_modules_power_mode_apply(mlxsw_core, mlxsw_env, + slot_index); + mutex_unlock(&mlxsw_env->line_cards_lock); + + return; + +err_type_set: + mlxsw_env_module_event_disable(mlxsw_env, slot_index); +err_mlxsw_env_module_event_enable: +out_unlock: + mutex_unlock(&mlxsw_env->line_cards_lock); +} + +static void +mlxsw_env_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_env *mlxsw_env = priv; + + mutex_lock(&mlxsw_env->line_cards_lock); + if (!__mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) + goto out_unlock; + mlxsw_env->line_cards[slot_index]->active = false; + mlxsw_env_module_event_disable(mlxsw_env, slot_index); + mlxsw_env->line_cards[slot_index]->module_count = 0; +out_unlock: + mutex_unlock(&mlxsw_env->line_cards_lock); +} + +static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = { + .got_active = mlxsw_env_got_active, + .got_inactive = mlxsw_env_got_inactive, +}; + int mlxsw_env_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *bus_info, struct mlxsw_env **p_env) @@ -1247,6 +1398,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, mutex_init(&env->line_cards_lock); *p_env = env; + err = mlxsw_linecards_event_ops_register(env->core, + &mlxsw_env_event_ops, env); + if (err) + goto err_linecards_event_ops_register; + err = mlxsw_env_temp_warn_event_register(mlxsw_core); if (err) goto err_temp_warn_event_register; @@ -1271,6 +1427,8 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, if (err) goto err_type_set; + env->line_cards[0]->active = true; + return 0; err_type_set: @@ -1280,6 +1438,9 @@ err_mlxsw_env_module_event_enable: err_module_plug_event_register: mlxsw_env_temp_warn_event_unregister(env); err_temp_warn_event_register: + mlxsw_linecards_event_ops_unregister(env->core, + &mlxsw_env_event_ops, env); +err_linecards_event_ops_register: mutex_destroy(&env->line_cards_lock); mlxsw_env_line_cards_free(env); err_mlxsw_env_line_cards_alloc: @@ -1289,11 +1450,14 @@ err_mlxsw_env_line_cards_alloc: void mlxsw_env_fini(struct mlxsw_env *env) { + env->line_cards[0]->active = false; mlxsw_env_module_event_disable(env, 0); mlxsw_env_module_plug_event_unregister(env); /* Make sure there is no more event work scheduled. */ mlxsw_core_flush_owq(); mlxsw_env_temp_warn_event_unregister(env); + mlxsw_linecards_event_ops_unregister(env->core, + &mlxsw_env_event_ops, env); mutex_destroy(&env->line_cards_lock); mlxsw_env_line_cards_free(env); kfree(env); -- cgit v1.2.3 From f11a323da46cff1ce3e5bb87c36cc8cd21f9a793 Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 19 Apr 2022 17:54:30 +0300 Subject: mlxsw: core_thermal: Add interfaces for line card initialization and de-initialization Add callback functions for line card thermal area initialization and de-initialization. Each line card is associated with the relevant thermal area, which may contain thermal zones for cages and gearboxes found on this line card. The line card thermal initialization / de-initialization APIs are to be called when line card is set to active / inactive state by got_active() / got_inactive() callbacks from line card state machine. For example thermal zone for module #9 located at line card #7 will have type: mlxsw-lc7-module9. And thermal zone for gearbox #2 located at line card #5 will have type: mlxsw-lc5-gearbox2. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 74 ++++++++++++++++++++++ 1 file changed, 74 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index e8ce26a1d483..3548fe1df7c8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -90,6 +90,7 @@ struct mlxsw_thermal_area { struct mlxsw_thermal_module *tz_gearbox_arr; u8 tz_gearbox_num; u8 slot_index; + bool active; }; struct mlxsw_thermal { @@ -913,6 +914,64 @@ mlxsw_thermal_gearboxes_fini(struct mlxsw_thermal *thermal, kfree(area->tz_gearbox_arr); } +static void +mlxsw_thermal_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_thermal *thermal = priv; + struct mlxsw_thermal_area *linecard; + int err; + + linecard = &thermal->line_cards[slot_index]; + + if (linecard->active) + return; + + linecard->slot_index = slot_index; + err = mlxsw_thermal_modules_init(thermal->bus_info->dev, thermal->core, + thermal, linecard); + if (err) { + dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card modules in slot %d\n", + slot_index); + return; + } + + err = mlxsw_thermal_gearboxes_init(thermal->bus_info->dev, + thermal->core, thermal, linecard); + if (err) { + dev_err(thermal->bus_info->dev, "Failed to configure thermal objects for line card gearboxes in slot %d\n", + slot_index); + goto err_thermal_linecard_gearboxes_init; + } + + linecard->active = true; + + return; + +err_thermal_linecard_gearboxes_init: + mlxsw_thermal_modules_fini(thermal, linecard); +} + +static void +mlxsw_thermal_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_thermal *thermal = priv; + struct mlxsw_thermal_area *linecard; + + linecard = &thermal->line_cards[slot_index]; + if (!linecard->active) + return; + linecard->active = false; + mlxsw_thermal_gearboxes_fini(thermal, linecard); + mlxsw_thermal_modules_fini(thermal, linecard); +} + +static struct mlxsw_linecards_event_ops mlxsw_thermal_event_ops = { + .got_active = mlxsw_thermal_got_active, + .got_inactive = mlxsw_thermal_got_inactive, +}; + int mlxsw_thermal_init(struct mlxsw_core *core, const struct mlxsw_bus_info *bus_info, struct mlxsw_thermal **p_thermal) @@ -1018,14 +1077,25 @@ int mlxsw_thermal_init(struct mlxsw_core *core, if (err) goto err_thermal_gearboxes_init; + err = mlxsw_linecards_event_ops_register(core, + &mlxsw_thermal_event_ops, + thermal); + if (err) + goto err_linecards_event_ops_register; + err = thermal_zone_device_enable(thermal->tzdev); if (err) goto err_thermal_zone_device_enable; + thermal->line_cards[0].active = true; *p_thermal = thermal; return 0; err_thermal_zone_device_enable: + mlxsw_linecards_event_ops_unregister(thermal->core, + &mlxsw_thermal_event_ops, + thermal); +err_linecards_event_ops_register: mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]); err_thermal_gearboxes_init: mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]); @@ -1049,6 +1119,10 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) { int i; + thermal->line_cards[0].active = false; + mlxsw_linecards_event_ops_unregister(thermal->core, + &mlxsw_thermal_event_ops, + thermal); mlxsw_thermal_gearboxes_fini(thermal, &thermal->line_cards[0]); mlxsw_thermal_modules_fini(thermal, &thermal->line_cards[0]); if (thermal->tzdev) { -- cgit v1.2.3 From 99a03b3193f628a525ee0191e60418af6960004b Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 19 Apr 2022 17:54:31 +0300 Subject: mlxsw: core_hwmon: Add interfaces for line card initialization and de-initialization Add callback functions for line card 'hwmon' initialization and de-initialization. Each line card is associated with the relevant 'hwmon' device, which may contain thermal attributes for the cages and gearboxes found on this line card. The line card 'hwmon' initialization / de-initialization APIs are to be called when line card is set to active / inactive state by got_active() / got_inactive() callbacks from line card state machine. For example cage temperature for module #9 located at line card #7 will be exposed by utility 'sensors' like: linecard#07 front panel 009: +32.0C (crit = +70.0C, emerg = +80.0C) And temperature for gearbox #3 located at line card #5 will be exposed like: linecard#05 gearbox 003: +41.0C (highest = +41.0C) Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 84 ++++++++++++++++++++++++ 1 file changed, 84 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index fff6f248d6f7..70735068cf29 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -19,6 +19,7 @@ #define MLXSW_HWMON_ATTR_PER_SENSOR 3 #define MLXSW_HWMON_ATTR_PER_MODULE 7 #define MLXSW_HWMON_ATTR_PER_GEARBOX 4 +#define MLXSW_HWMON_DEV_NAME_LEN_MAX 16 #define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \ MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \ @@ -41,6 +42,7 @@ static int mlxsw_hwmon_get_attr_index(int index, int count) } struct mlxsw_hwmon_dev { + char name[MLXSW_HWMON_DEV_NAME_LEN_MAX]; struct mlxsw_hwmon *hwmon; struct device *hwmon_dev; struct attribute_group group; @@ -51,6 +53,7 @@ struct mlxsw_hwmon_dev { u8 sensor_count; u8 module_sensor_max; u8 slot_index; + bool active; }; struct mlxsw_hwmon { @@ -780,6 +783,75 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon_dev *mlxsw_hwmon_dev) return 0; } +static void +mlxsw_hwmon_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_hwmon *hwmon = priv; + struct mlxsw_hwmon_dev *linecard; + struct device *dev; + int err; + + dev = hwmon->bus_info->dev; + linecard = &hwmon->line_cards[slot_index]; + if (linecard->active) + return; + /* For the main board, module sensor indexes start from 1, sensor index + * 0 is used for the ASIC. Use the same numbering for line cards. + */ + linecard->sensor_count = 1; + linecard->slot_index = slot_index; + linecard->hwmon = hwmon; + err = mlxsw_hwmon_module_init(linecard); + if (err) { + dev_err(dev, "Failed to configure hwmon objects for line card modules in slot %d\n", + slot_index); + return; + } + + err = mlxsw_hwmon_gearbox_init(linecard); + if (err) { + dev_err(dev, "Failed to configure hwmon objects for line card gearboxes in slot %d\n", + slot_index); + return; + } + + linecard->groups[0] = &linecard->group; + linecard->group.attrs = linecard->attrs; + sprintf(linecard->name, "%s#%02u", "linecard", slot_index); + linecard->hwmon_dev = + hwmon_device_register_with_groups(dev, linecard->name, + linecard, linecard->groups); + if (IS_ERR(linecard->hwmon_dev)) { + dev_err(dev, "Failed to register hwmon objects for line card in slot %d\n", + slot_index); + return; + } + + linecard->active = true; +} + +static void +mlxsw_hwmon_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, + void *priv) +{ + struct mlxsw_hwmon *hwmon = priv; + struct mlxsw_hwmon_dev *linecard; + + linecard = &hwmon->line_cards[slot_index]; + if (!linecard->active) + return; + linecard->active = false; + hwmon_device_unregister(linecard->hwmon_dev); + /* Reset attributes counter */ + linecard->attrs_count = 0; +} + +static struct mlxsw_linecards_event_ops mlxsw_hwmon_event_ops = { + .got_active = mlxsw_hwmon_got_active, + .got_inactive = mlxsw_hwmon_got_inactive, +}; + int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct mlxsw_hwmon **p_hwmon) @@ -836,10 +908,19 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, goto err_hwmon_register; } + err = mlxsw_linecards_event_ops_register(mlxsw_hwmon->core, + &mlxsw_hwmon_event_ops, + mlxsw_hwmon); + if (err) + goto err_linecards_event_ops_register; + mlxsw_hwmon->line_cards[0].hwmon_dev = hwmon_dev; + mlxsw_hwmon->line_cards[0].active = true; *p_hwmon = mlxsw_hwmon; return 0; +err_linecards_event_ops_register: + hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev); err_hwmon_register: err_temp_gearbox_init: err_temp_module_init: @@ -851,6 +932,9 @@ err_temp_init: void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon) { + mlxsw_hwmon->line_cards[0].active = false; + mlxsw_linecards_event_ops_unregister(mlxsw_hwmon->core, + &mlxsw_hwmon_event_ops, mlxsw_hwmon); hwmon_device_unregister(mlxsw_hwmon->line_cards[0].hwmon_dev); kfree(mlxsw_hwmon); } -- cgit v1.2.3 From 4facbe3d4426720b65354be1d0f5608c65eebc20 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Mon, 18 Apr 2022 06:29:21 +0000 Subject: drivers: net: davinci_mdio: using pm_runtime_resume_and_get instead of pm_runtime_get_sync Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Link: https://lore.kernel.org/r/20220418062921.2557884-1-chi.minghao@zte.com.cn Signed-off-by: Paolo Abeni --- drivers/net/ethernet/ti/davinci_mdio.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index fce2626e34fa..ea3772618043 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -134,11 +134,9 @@ static int davinci_mdio_reset(struct mii_bus *bus) u32 phy_mask, ver; int ret; - ret = pm_runtime_get_sync(data->dev); - if (ret < 0) { - pm_runtime_put_noidle(data->dev); + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) return ret; - } /* wait for scan logic to settle */ msleep(PHY_MAX_ADDR * data->access_time); @@ -232,11 +230,9 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; - ret = pm_runtime_get_sync(data->dev); - if (ret < 0) { - pm_runtime_put_noidle(data->dev); + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) return ret; - } reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | (phy_id << 16)); @@ -276,11 +272,9 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; - ret = pm_runtime_get_sync(data->dev); - if (ret < 0) { - pm_runtime_put_noidle(data->dev); + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) return ret; - } reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | (phy_id << 16) | (phy_data & USERACCESS_DATA)); -- cgit v1.2.3 From 9c8774e629a1950c24b44e3c8fb93d76fb644b49 Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Mon, 18 Apr 2022 18:22:13 +0800 Subject: net: eql: Use kzalloc instead of kmalloc/memset Use kzalloc rather than duplicating its implementation, which makes code simple and easy to understand. Signed-off-by: Haowen Bai Link: https://lore.kernel.org/r/1650277333-31090-1-git-send-email-baihaowen@meizu.com Signed-off-by: Paolo Abeni --- drivers/net/eql.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 1111d1f33865..557ca8ff9dec 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -425,14 +425,13 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user * if ((master_dev->flags & IFF_UP) == IFF_UP) { /* slave is not a master & not already a slave: */ if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) { - slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL); + slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL); equalizer_t *eql = netdev_priv(master_dev); int ret; if (!s) return -ENOMEM; - memset(s, 0, sizeof(*s)); s->dev = slave_dev; s->priority = srq.priority; s->priority_bps = srq.priority; -- cgit v1.2.3 From e130e8d5434b8732926c3eb71f678dda0d77da61 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 21 Apr 2022 15:21:25 +0200 Subject: ixgbe, xsk: Get rid of redundant 'fallthrough' Intel drivers translate actions returned from XDP programs to their own return codes that have the following mapping: XDP_REDIRECT -> IXGBE_XDP_{REDIR,CONSUMED} XDP_TX -> IXGBE_XDP_{TX,CONSUMED} XDP_DROP -> IXGBE_XDP_CONSUMED XDP_ABORTED -> IXGBE_XDP_CONSUMED XDP_PASS -> IXGBE_XDP_PASS Commit c7dd09fd4628 ("ixgbe, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full") introduced new translation XDP_REDIRECT -> IXGBE_XDP_EXIT which is set when XSK RQ gets full and to indicate that driver should stop further Rx processing. This happens for unsuccessful xdp_do_redirect() so it is valuable to call trace_xdp_exception() for this case. In order to avoid IXGBE_XDP_EXIT -> IXGBE_XDP_CONSUMED overwrite, XDP_DROP case was moved above which in turn made the 'fallthrough' that is in XDP_ABORTED useless as it became the last label in the switch statement. Simply drop this leftover. Fixes: c7dd09fd4628 ("ixgbe, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full") Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220421132126.471515-2-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 68532cffd453..1703c640a434 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -144,7 +144,6 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, result = IXGBE_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ } return result; } -- cgit v1.2.3 From 9d87e41a6d644d4cee66a72fd1656fac5b7850d1 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 21 Apr 2022 15:21:26 +0200 Subject: i40e, xsk: Get rid of redundant 'fallthrough' Intel drivers translate actions returned from XDP programs to their own return codes that have the following mapping: XDP_REDIRECT -> I40E_XDP_{REDIR,CONSUMED} XDP_TX -> I40E_XDP_{TX,CONSUMED} XDP_DROP -> I40E_XDP_CONSUMED XDP_ABORTED -> I40E_XDP_CONSUMED XDP_PASS -> I40E_XDP_PASS Commit b8aef650e549 ("i40e, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full") introduced new translation XDP_REDIRECT -> I40E_XDP_EXIT which is set when XSK RQ gets full and to indicate that driver should stop further Rx processing. This happens for unsuccessful xdp_do_redirect() so it is valuable to call trace_xdp_exception() for this case. In order to avoid I40E_XDP_EXIT -> IXGBE_XDP_CONSUMED overwrite, XDP_DROP case was moved above which in turn made the 'fallthrough' that is in XDP_ABORTED useless as it became the last label in the switch statement. Simply drop this leftover. Fixes: b8aef650e549 ("i40e, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full") Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220421132126.471515-3-maciej.fijalkowski@intel.com --- drivers/net/ethernet/intel/i40e/i40e_xsk.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 050280fd10c1..af3e7e6afc85 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -189,7 +189,6 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) result = I40E_XDP_CONSUMED; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ } return result; } -- cgit v1.2.3 From f1ed409fb1eef7695c1ac84b109acf4b01a61399 Mon Sep 17 00:00:00 2001 From: Yunbo Yu Date: Mon, 18 Apr 2022 22:18:12 +0800 Subject: net: cdc-ncm: Move spin_lock_bh() to spin_lock() It is unnecessary to call spin_lock_bh() for you are already in a tasklet. Signed-off-by: Yunbo Yu Acked-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ncm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 15f91d691bba..cdca00c0dc1f 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1492,19 +1492,19 @@ static void cdc_ncm_txpath_bh(struct tasklet_struct *t) struct cdc_ncm_ctx *ctx = from_tasklet(ctx, t, bh); struct usbnet *dev = ctx->dev; - spin_lock_bh(&ctx->mtx); + spin_lock(&ctx->mtx); if (ctx->tx_timer_pending != 0) { ctx->tx_timer_pending--; cdc_ncm_tx_timeout_start(ctx); - spin_unlock_bh(&ctx->mtx); + spin_unlock(&ctx->mtx); } else if (dev->net != NULL) { ctx->tx_reason_timeout++; /* count reason for transmitting */ - spin_unlock_bh(&ctx->mtx); + spin_unlock(&ctx->mtx); netif_tx_lock_bh(dev->net); usbnet_start_xmit(NULL, dev->net); netif_tx_unlock_bh(dev->net); } else { - spin_unlock_bh(&ctx->mtx); + spin_unlock(&ctx->mtx); } } -- cgit v1.2.3 From 1c604f91b7735a8d55ba56c8580b4b9276d6d7ab Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 18 Apr 2022 15:37:59 +0100 Subject: myri10ge: remove redundant assignment to variable status Variable status is being assigned a value that is never read, it is being re-assigned again later on. The assignment is redundant and can be removed. Cleans up clang scan build warning: drivers/net/ethernet/myricom/myri10ge/myri10ge.c:582:7: warning: Although the value stored to 'status' is used in the enclosing expression, the value is never actually read from 'status' [deadcode.DeadStores] Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 21d2645885ce..fe5e77330f5f 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -579,7 +579,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) int status; unsigned i; - if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { + if (request_firmware(&fw, mgp->fw_name, dev) < 0) { dev_err(dev, "Unable to load %s firmware image via hotplug\n", mgp->fw_name); status = -EINVAL; -- cgit v1.2.3 From e350dbac3c09a3318f871e0fab255e234408bde4 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Tue, 19 Apr 2022 11:03:52 +0000 Subject: net: ethernet: ti: am65-cpsw-ethtool: use pm_runtime_resume_and_get Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-ethtool.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index 72acdf802258..abc1e4276cf0 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -380,11 +380,9 @@ static int am65_cpsw_ethtool_op_begin(struct net_device *ndev) struct am65_cpsw_common *common = am65_ndev_to_common(ndev); int ret; - ret = pm_runtime_get_sync(common->dev); - if (ret < 0) { + ret = pm_runtime_resume_and_get(common->dev); + if (ret < 0) dev_err(common->dev, "ethtool begin failed %d\n", ret); - pm_runtime_put_noidle(common->dev); - } return ret; } -- cgit v1.2.3 From 59359597b010ced20e6e14c8660834c05c2a96b7 Mon Sep 17 00:00:00 2001 From: Baowen Zheng Date: Tue, 19 Apr 2022 14:44:43 +0200 Subject: nfp: support 802.1ad VLAN assingment to VF The NFP driver already supports assignment of 802.1Q VLANs to VFs e.g. # ip link set $DEV vf $VF_NUM vlan $VLAN_ID [proto 802.1Q] This patch enhances the NFP driver to also allow assingment of 802.1ad VLANs to VFs. e.g. # ip link set $DEV vf $VF_NUM vlan $VLAN_ID proto 802.1ad Signed-off-by: Bin Chen Signed-off-by: Baowen Zheng Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c | 43 ++++++++++++++++------ drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h | 3 ++ 2 files changed, 35 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 3fdaaf8ed2ba..4627715a5e32 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -95,15 +95,17 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct nfp_app *app = nfp_app_from_netdev(netdev); + u16 update = NFP_NET_VF_CFG_MB_UPD_VLAN; + bool is_proto_sup = true; unsigned int vf_offset; - u16 vlan_tci; + u32 vlan_tag; int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan"); if (err) return err; - if (vlan_proto != htons(ETH_P_8021Q)) + if (!eth_type_vlan(vlan_proto)) return -EOPNOTSUPP; if (vlan > 4095 || qos > 7) { @@ -112,14 +114,32 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, return -EINVAL; } + /* Check if fw supports or not */ + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"); + if (err) + is_proto_sup = false; + + if (vlan_proto != htons(ETH_P_8021Q)) { + if (!is_proto_sup) + return -EOPNOTSUPP; + update |= NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO; + } + /* Write VLAN tag to VF entry in VF config symbol */ - vlan_tci = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) | + vlan_tag = FIELD_PREP(NFP_NET_VF_CFG_VLAN_VID, vlan) | FIELD_PREP(NFP_NET_VF_CFG_VLAN_QOS, qos); + + /* vlan_tag of 0 means that the configuration should be cleared and in + * such circumstances setting the TPID has no meaning when + * configuring firmware. + */ + if (vlan_tag && is_proto_sup) + vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto)); + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; - writew(vlan_tci, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); + writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); - return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_VLAN, - "vlan"); + return nfp_net_sriov_update(app, vf, update, "vlan"); } int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) @@ -209,7 +229,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, { struct nfp_app *app = nfp_app_from_netdev(netdev); unsigned int vf_offset; - u16 vlan_tci; + u32 vlan_tag; u32 mac_hi; u16 mac_lo; u8 flags; @@ -225,7 +245,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); flags = readb(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_CTRL); - vlan_tci = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); + vlan_tag = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); memset(ivi, 0, sizeof(*ivi)); ivi->vf = vf; @@ -233,9 +253,10 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, put_unaligned_be32(mac_hi, &ivi->mac[0]); put_unaligned_be16(mac_lo, &ivi->mac[4]); - ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tci); - ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci); - + ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag); + ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag); + if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto")) + ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag)); ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index 786be58a907e..7b72cc083476 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -19,6 +19,7 @@ #define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2) #define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3) #define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4) +#define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_RET 0x2 #define NFP_NET_VF_CFG_MB_UPD 0x4 #define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) @@ -26,6 +27,7 @@ #define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2) #define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3) #define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4) +#define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_VF_NUM 0x7 /* VF config entry @@ -43,6 +45,7 @@ #define NFP_NET_VF_CFG_LS_MODE_ENABLE 1 #define NFP_NET_VF_CFG_LS_MODE_DISABLE 2 #define NFP_NET_VF_CFG_VLAN 0x8 +#define NFP_NET_VF_CFG_VLAN_PROT 0xffff0000 #define NFP_NET_VF_CFG_VLAN_QOS 0xe000 #define NFP_NET_VF_CFG_VLAN_VID 0x0fff -- cgit v1.2.3 From 0844d36f771d3fe3c418df7569396a9a58feab85 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Tue, 19 Apr 2022 10:06:25 -0400 Subject: USB2NET : SR9800 : change SR9800_BULKIN_SIZE from global to static Smatch reports this issue sr9800.h:166:53: warning: symbol 'SR9800_BULKIN_SIZE' was not declared. Should it be static? Global variables should not be defined in header files. This only works because sr9800.h in only included by sr9800.c Change the storage-class specifier to static. And since it does not change add type qualifier const. Signed-off-by: Tom Rix Signed-off-by: David S. Miller --- drivers/net/usb/sr9800.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h index 18f670251275..952e6f7c0321 100644 --- a/drivers/net/usb/sr9800.h +++ b/drivers/net/usb/sr9800.h @@ -163,7 +163,7 @@ #define SR9800_MAX_BULKIN_24K 6 #define SR9800_MAX_BULKIN_32K 7 -struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = { +static const struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = { /* 2k */ {2048, 0x8000, 0x8001}, /* 4k */ -- cgit v1.2.3 From 81ee0eb6c0fe34490ed92667538197d9295e899e Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 20 Apr 2022 10:58:51 +0900 Subject: ipv6: Use ipv6_only_sock() helper in condition. This patch replaces some sk_ipv6only tests with ipv6_only_sock(). Signed-off-by: Kuniyuki Iwashima Reviewed-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 2 +- drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c | 2 +- drivers/net/ethernet/netronome/nfp/crypto/tls.c | 2 +- net/core/filter.c | 2 +- net/ipv6/af_inet6.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 38e152548126..c9e75a9de282 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5226,7 +5226,7 @@ static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow) switch (sk->sk_family) { #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - if (sk->sk_ipv6only || + if (ipv6_only_sock(sk) || ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; flow->addrs.v6addrs.src = inet6_sk(sk)->saddr; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 59683f79959c..60b648b46f75 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -483,7 +483,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, tx_info->ip_family = AF_INET; #if IS_ENABLED(CONFIG_IPV6) } else { - if (!sk->sk_ipv6only && + if (!ipv6_only_sock(sk) && ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { memcpy(daaddr, &sk->sk_daddr, 4); tx_info->ip_family = AF_INET; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index 4c4ee524176c..3ae6067c7e6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -102,7 +102,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - if (!sk->sk_ipv6only && + if (!ipv6_only_sock(sk) && ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { accel_fs_tcp_set_ipv4_flow(spec, sk); ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP]; diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c index 84d66d138c3d..78368e71ce83 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c @@ -289,7 +289,7 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, switch (sk->sk_family) { #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: - if (sk->sk_ipv6only || + if (ipv6_only_sock(sk) || ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) { req_sz = sizeof(struct nfp_crypto_req_add_v6); ipv6 = true; diff --git a/net/core/filter.c b/net/core/filter.c index 143f442a9505..2ed81c48c282 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7099,7 +7099,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, */ switch (((struct iphdr *)iph)->version) { case 4: - if (sk->sk_family == AF_INET6 && sk->sk_ipv6only) + if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk)) return -EINVAL; mss = tcp_v4_get_syncookie(sk, iph, th, &cookie); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 6595a78672c8..70564ddccc46 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -318,7 +318,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, /* Binding to v4-mapped address on a v6-only socket * makes no sense */ - if (sk->sk_ipv6only) { + if (ipv6_only_sock(sk)) { err = -EINVAL; goto out; } -- cgit v1.2.3 From 29e96fe9e0ec0f0fe1dd306a4ccb7b8983eae67a Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Wed, 20 Apr 2022 16:33:10 +0530 Subject: net: macb: In ZynqMP initialization make SGMII phy configuration optional In the macb binding documentation "phys" is an optional property. Make implementation in line with it. This change allows the traditional flow in which first stage bootloader does PS-GT configuration to work along with newer use cases in which PS-GT configuration is managed by the phy-zynqmp driver. It fixes below macb probe failure when macb DT node doesn't have SGMII phys handle. "macb ff0b0000.ethernet: error -ENODEV: failed to get PS-GTR PHY" Signed-off-by: Radhey Shyam Pandey Reviewed-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index a5140d4d3baf..6434e74c04f1 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4588,7 +4588,7 @@ static int zynqmp_init(struct platform_device *pdev) if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { /* Ensure PS-GTR PHY device used in SGMII mode is ready */ - bp->sgmii_phy = devm_phy_get(&pdev->dev, "sgmii-phy"); + bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL); if (IS_ERR(bp->sgmii_phy)) { ret = PTR_ERR(bp->sgmii_phy); -- cgit v1.2.3 From 5e7260712b9a76de19c32f15d667f8f93e573978 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Thu, 21 Apr 2022 14:47:26 +0200 Subject: qed: Remove IP services API. qed_nvmetcp_ip_services.c and its corresponding header file were introduced in commit 806ee7f81a2b ("qed: Add IP services APIs support") but there's still no users for any of the functions they declare. Since these files are effectively unused, let's just drop them. Found by code inspection. Compile-tested only. Signed-off-by: Guillaume Nault Link: https://lore.kernel.org/r/351ac8c847980e22850eb390553f8cc0e1ccd0ce.1650545051.git.gnault@redhat.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/qlogic/qed/Makefile | 3 +- .../ethernet/qlogic/qed/qed_nvmetcp_ip_services.c | 238 --------------------- include/linux/qed/qed_nvmetcp_ip_services_if.h | 29 --- 3 files changed, 1 insertion(+), 269 deletions(-) delete mode 100644 drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c delete mode 100644 include/linux/qed/qed_nvmetcp_ip_services_if.h (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 0d9c2fe0245d..3d2098f21bb7 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -30,8 +30,7 @@ qed-$(CONFIG_QED_OOO) += qed_ooo.o qed-$(CONFIG_QED_NVMETCP) += \ qed_nvmetcp.o \ - qed_nvmetcp_fw_funcs.o \ - qed_nvmetcp_ip_services.o + qed_nvmetcp_fw_funcs.o qed-$(CONFIG_QED_RDMA) += \ qed_iwarp.o \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c deleted file mode 100644 index 7e286cddbedb..000000000000 --- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c +++ /dev/null @@ -1,238 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) -/* - * Copyright 2021 Marvell. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#define QED_IP_RESOL_TIMEOUT 4 - -int qed_route_ipv4(struct sockaddr_storage *local_addr, - struct sockaddr_storage *remote_addr, - struct sockaddr *hardware_address, - struct net_device **ndev) -{ - struct neighbour *neigh = NULL; - __be32 *loc_ip, *rem_ip; - struct rtable *rt; - int rc = -ENXIO; - int retry; - - loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr; - rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr; - *ndev = NULL; - rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/); - if (IS_ERR(rt)) { - pr_err("lookup route failed\n"); - rc = PTR_ERR(rt); - goto return_err; - } - - neigh = dst_neigh_lookup(&rt->dst, rem_ip); - if (!neigh) { - rc = -ENOMEM; - ip_rt_put(rt); - goto return_err; - } - - *ndev = rt->dst.dev; - ip_rt_put(rt); - - /* If not resolved, kick-off state machine towards resolution */ - if (!(neigh->nud_state & NUD_VALID)) - neigh_event_send(neigh, NULL); - - /* query neighbor until resolved or timeout */ - retry = QED_IP_RESOL_TIMEOUT; - while (!(neigh->nud_state & NUD_VALID) && retry > 0) { - msleep(1000); - retry--; - } - - if (neigh->nud_state & NUD_VALID) { - /* copy resolved MAC address */ - neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev); - hardware_address->sa_family = (*ndev)->type; - rc = 0; - } - - neigh_release(neigh); - if (!(*loc_ip)) { - *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE); - local_addr->ss_family = AF_INET; - } - -return_err: - - return rc; -} -EXPORT_SYMBOL(qed_route_ipv4); - -int qed_route_ipv6(struct sockaddr_storage *local_addr, - struct sockaddr_storage *remote_addr, - struct sockaddr *hardware_address, - struct net_device **ndev) -{ - struct neighbour *neigh = NULL; - struct dst_entry *dst; - struct flowi6 fl6; - int rc = -ENXIO; - int retry; - - memset(&fl6, 0, sizeof(fl6)); - fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr; - fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr; - dst = ip6_route_output(&init_net, NULL, &fl6); - if (!dst || dst->error) { - if (dst) { - dst_release(dst); - pr_err("lookup route failed %d\n", dst->error); - } - - goto out; - } - - neigh = dst_neigh_lookup(dst, &fl6.daddr); - if (neigh) { - *ndev = ip6_dst_idev(dst)->dev; - - /* If not resolved, kick-off state machine towards resolution */ - if (!(neigh->nud_state & NUD_VALID)) - neigh_event_send(neigh, NULL); - - /* query neighbor until resolved or timeout */ - retry = QED_IP_RESOL_TIMEOUT; - while (!(neigh->nud_state & NUD_VALID) && retry > 0) { - msleep(1000); - retry--; - } - - if (neigh->nud_state & NUD_VALID) { - neigh_ha_snapshot((u8 *)hardware_address->sa_data, - neigh, *ndev); - hardware_address->sa_family = (*ndev)->type; - rc = 0; - } - - neigh_release(neigh); - - if (ipv6_addr_any(&fl6.saddr)) { - if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev, - &fl6.daddr, 0, &fl6.saddr)) { - pr_err("Unable to find source IP address\n"); - goto out; - } - - local_addr->ss_family = AF_INET6; - ((struct sockaddr_in6 *)local_addr)->sin6_addr = - fl6.saddr; - } - } - - dst_release(dst); - -out: - - return rc; -} -EXPORT_SYMBOL(qed_route_ipv6); - -void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id) -{ - if (is_vlan_dev(*ndev)) { - *vlan_id = vlan_dev_vlan_id(*ndev); - *ndev = vlan_dev_real_dev(*ndev); - } -} -EXPORT_SYMBOL(qed_vlan_get_ndev); - -struct pci_dev *qed_validate_ndev(struct net_device *ndev) -{ - struct net_device *upper; - struct pci_dev *pdev; - - for_each_pci_dev(pdev) { - if (pdev->driver && - !strcmp(pdev->driver->name, "qede")) { - upper = pci_get_drvdata(pdev); - if (upper->ifindex == ndev->ifindex) - return pdev; - } - } - - return NULL; -} -EXPORT_SYMBOL(qed_validate_ndev); - -__be16 qed_get_in_port(struct sockaddr_storage *sa) -{ - return sa->ss_family == AF_INET - ? ((struct sockaddr_in *)sa)->sin_port - : ((struct sockaddr_in6 *)sa)->sin6_port; -} -EXPORT_SYMBOL(qed_get_in_port); - -int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr, - struct socket **sock, u16 *port) -{ - struct sockaddr_storage sa; - int rc = 0; - - rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, - sock); - if (rc) { - pr_warn("failed to create socket: %d\n", rc); - goto err; - } - - (*sock)->sk->sk_allocation = GFP_KERNEL; - sk_set_memalloc((*sock)->sk); - - rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr, - sizeof(local_ip_addr)); - - if (rc) { - pr_warn("failed to bind socket: %d\n", rc); - goto err_sock; - } - - rc = kernel_getsockname(*sock, (struct sockaddr *)&sa); - if (rc < 0) { - pr_warn("getsockname() failed: %d\n", rc); - goto err_sock; - } - - *port = ntohs(qed_get_in_port(&sa)); - - return 0; - -err_sock: - sock_release(*sock); - sock = NULL; -err: - - return rc; -} -EXPORT_SYMBOL(qed_fetch_tcp_port); - -void qed_return_tcp_port(struct socket *sock) -{ - if (sock && sock->sk) { - tcp_set_state(sock->sk, TCP_CLOSE); - sock_release(sock); - } -} -EXPORT_SYMBOL(qed_return_tcp_port); diff --git a/include/linux/qed/qed_nvmetcp_ip_services_if.h b/include/linux/qed/qed_nvmetcp_ip_services_if.h deleted file mode 100644 index 3604aee53796..000000000000 --- a/include/linux/qed/qed_nvmetcp_ip_services_if.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ -/* - * Copyright 2021 Marvell. All rights reserved. - */ - -#ifndef _QED_IP_SERVICES_IF_H -#define _QED_IP_SERVICES_IF_H - -#include -#include -#include -#include - -int qed_route_ipv4(struct sockaddr_storage *local_addr, - struct sockaddr_storage *remote_addr, - struct sockaddr *hardware_address, - struct net_device **ndev); -int qed_route_ipv6(struct sockaddr_storage *local_addr, - struct sockaddr_storage *remote_addr, - struct sockaddr *hardware_address, - struct net_device **ndev); -void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id); -struct pci_dev *qed_validate_ndev(struct net_device *ndev); -void qed_return_tcp_port(struct socket *sock); -int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr, - struct socket **sock, u16 *port); -__be16 qed_get_in_port(struct sockaddr_storage *sa); - -#endif /* _QED_IP_SERVICES_IF_H */ -- cgit v1.2.3 From 869376d0859acf40c83b2c3942ad9d5a8b5d31e4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 20 Apr 2022 17:20:07 +0300 Subject: mlxsw: core_linecards: Fix size of array element during ini_files allocation types_info->ini_files is an array of pointers to struct mlxsw_linecard_ini_file. Fix the kmalloc_array() argument to be of a size of a pointer. Addresses-Coverity: ("Incorrect expression (SIZEOF_MISMATCH)") Fixes: b217127e5e4e ("mlxsw: core_linecards: Add line card objects and implement provisioning") Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Link: https://lore.kernel.org/r/20220420142007.3041173-1-idosch@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/core_linecards.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 90e487cc2e2a..5c9869dcf674 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -1032,7 +1032,7 @@ static int mlxsw_linecard_types_init(struct mlxsw_core *mlxsw_core, } types_info->ini_files = kmalloc_array(types_info->count, - sizeof(struct mlxsw_linecard_ini_file), + sizeof(struct mlxsw_linecard_ini_file *), GFP_KERNEL); if (!types_info->ini_files) { err = -ENOMEM; -- cgit v1.2.3 From b649695248b15bfd921abda7c9096e1a93f8d67d Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Wed, 20 Apr 2022 20:50:15 +0530 Subject: net: phy: LAN87xx: add ethtool SQI support This patch add the support for measuring Signal Quality Index for LAN87xx and LAN937x T1 Phy. It uses the SQI Method 5 for obtaining the values. Signed-off-by: Arun Ramadoss Reviewed-by: Andrew Lunn Signed-off-by: Jakub Kicinski --- drivers/net/phy/microchip_t1.c | 48 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index c2c0e361fd3d..796fbcb7dafe 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -68,7 +68,12 @@ #define T1_POST_LCK_MUFACT_CFG_REG 0x1C #define T1_TX_RX_FIFO_CFG_REG 0x02 #define T1_TX_LPF_FIR_CFG_REG 0x55 +#define T1_COEF_CLK_PWR_DN_CFG 0x04 +#define T1_COEF_RW_CTL_CFG 0x0D #define T1_SQI_CONFIG_REG 0x2E +#define T1_SQI_CONFIG2_REG 0x4A +#define T1_DCQ_SQI_REG 0xC3 +#define T1_DCQ_SQI_MSK GENMASK(3, 1) #define T1_MDIO_CONTROL2_REG 0x10 #define T1_INTERRUPT_SOURCE_REG 0x18 #define T1_INTERRUPT2_SOURCE_REG 0x08 @@ -82,6 +87,9 @@ #define T1_MODE_STAT_REG 0x11 #define T1_LINK_UP_MSK BIT(0) +/* SQI defines */ +#define LAN87XX_MAX_SQI 0x07 + #define DRIVER_AUTHOR "Nisar Sayed " #define DRIVER_DESC "Microchip LAN87XX/LAN937x T1 PHY driver" @@ -346,9 +354,20 @@ static int lan87xx_phy_init(struct phy_device *phydev) T1_TX_LPF_FIR_CFG_REG, 0x1011, 0 }, { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, T1_TX_LPF_FIR_CFG_REG, 0x1000, 0 }, + /* Setup SQI measurement */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_COEF_CLK_PWR_DN_CFG, 0x16d6, 0 }, /* SQI enable */ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, T1_SQI_CONFIG_REG, 0x9572, 0 }, + /* SQI select mode 5 */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_SQI_CONFIG2_REG, 0x0001, 0 }, + /* Throws the first SQI reading */ + { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, + T1_COEF_RW_CTL_CFG, 0x0301, 0 }, + { PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_DSP, + T1_DCQ_SQI_REG, 0, 0 }, /* Flag LPS and WUR as idle errors */ { PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI, T1_MDIO_CONTROL2_REG, 0x0014, 0 }, @@ -724,6 +743,31 @@ static int lan87xx_config_aneg(struct phy_device *phydev) return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl); } +static int lan87xx_get_sqi(struct phy_device *phydev) +{ + u8 sqi_value = 0; + int rc; + + rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, + PHYACC_ATTR_BANK_DSP, T1_COEF_RW_CTL_CFG, 0x0301); + if (rc < 0) + return rc; + + rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, + PHYACC_ATTR_BANK_DSP, T1_DCQ_SQI_REG, 0x0); + if (rc < 0) + return rc; + + sqi_value = FIELD_GET(T1_DCQ_SQI_MSK, rc); + + return sqi_value; +} + +static int lan87xx_get_sqi_max(struct phy_device *phydev) +{ + return LAN87XX_MAX_SQI; +} + static struct phy_driver microchip_t1_phy_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX), @@ -737,6 +781,8 @@ static struct phy_driver microchip_t1_phy_driver[] = { .resume = genphy_resume, .config_aneg = lan87xx_config_aneg, .read_status = lan87xx_read_status, + .get_sqi = lan87xx_get_sqi, + .get_sqi_max = lan87xx_get_sqi_max, .cable_test_start = lan87xx_cable_test_start, .cable_test_get_status = lan87xx_cable_test_get_status, }, @@ -750,6 +796,8 @@ static struct phy_driver microchip_t1_phy_driver[] = { .resume = genphy_resume, .config_aneg = lan87xx_config_aneg, .read_status = lan87xx_read_status, + .get_sqi = lan87xx_get_sqi, + .get_sqi_max = lan87xx_get_sqi_max, .cable_test_start = lan87xx_cable_test_start, .cable_test_get_status = lan87xx_cable_test_get_status, } -- cgit v1.2.3 From f28c47bb9fd3286a99194876a6e528be87484c75 Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Thu, 21 Apr 2022 10:48:03 +0800 Subject: tsnep: Remove useless null check before call of_node_put() No need to add null check before call of_node_put(), since the implementation of of_node_put() has done it. Signed-off-by: Haowen Bai Link: https://lore.kernel.org/r/1650509283-26168-1-git-send-email-baihaowen@meizu.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/engleder/tsnep_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 904f3304727e..49c93aa38862 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -1091,8 +1091,7 @@ static int tsnep_mdio_init(struct tsnep_adapter *adapter) retval = of_mdiobus_register(adapter->mdiobus, np); out: - if (np) - of_node_put(np); + of_node_put(np); return retval; } -- cgit v1.2.3 From 31693d02b06ed2f0ac668cede16cf3258e86204e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 21 Apr 2022 09:55:46 +0100 Subject: net: hns3: Fix spelling mistake "actvie" -> "active" There is a spelling mistake in a netdev_info message. Fix it. Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20220421085546.321792-1-colin.i.king@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index bb001f597857..1db8a86f046d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1915,7 +1915,7 @@ static int hns3_set_tunable(struct net_device *netdev, return ret; } - netdev_info(netdev, "the actvie tx spare buf size is %u, due to page order\n", + netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", priv->ring->tx_spare->len); break; -- cgit v1.2.3 From ab589ac24ee1c00be2ae47aba7ef868394b7fa9c Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Tue, 12 Apr 2022 09:17:42 +0000 Subject: wlcore: main: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220412091742.2533527-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/main.c | 225 ++++++++++++---------------------- 1 file changed, 75 insertions(+), 150 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 8440e0942674..6959efa4bfa9 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -141,11 +141,9 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) if (!wl->conf.rx_streaming.interval) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl1271_set_rx_streaming(wl, wlvif, true); if (ret < 0) @@ -174,11 +172,9 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work) if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl1271_set_rx_streaming(wl, wlvif, false); if (ret) @@ -223,11 +219,9 @@ static void wlcore_rc_update_work(struct work_struct *work) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } if (ieee80211_vif_is_mesh(vif)) { ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap, @@ -537,11 +531,9 @@ static int wlcore_irq_locked(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } while (!done && loopcount--) { smp_mb__after_atomic(); @@ -838,11 +830,9 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl) * Do not send a stop fwlog command if the fw is hanged or if * dbgpins are used (due to some fw bug). */ - error = pm_runtime_get_sync(wl->dev); - if (error < 0) { - pm_runtime_put_noidle(wl->dev); + error = pm_runtime_resume_and_get(wl->dev); + if (error < 0) return; - } if (!wl->watchdog_recovery && wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS) wl12xx_cmd_stop_fwlog(wl); @@ -937,11 +927,9 @@ static void wl1271_recovery_work(struct work_struct *work) if (wl->state == WLCORE_STATE_OFF || wl->plt) goto out_unlock; - error = pm_runtime_get_sync(wl->dev); - if (error < 0) { + error = pm_runtime_resume_and_get(wl->dev); + if (error < 0) wl1271_warning("Enable for recovery failed"); - pm_runtime_put_noidle(wl->dev); - } wlcore_disable_interrupts_nosync(wl); if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { @@ -1741,9 +1729,8 @@ static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = pm_runtime_get_sync(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); if (ret < 0) { - pm_runtime_put_noidle(wl->dev); mutex_unlock(&wl->mutex); return ret; } @@ -1855,11 +1842,9 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) goto out_sleep; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_for_each_wlvif(wl, wlvif) { if (wlcore_is_p2p_mgmt(wlvif)) @@ -2060,11 +2045,9 @@ static void wlcore_channel_switch_work(struct work_struct *work) vif = wl12xx_wlvif_to_vif(wlvif); ieee80211_chswitch_done(vif, false); - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_cmd_stop_channel_switch(wl, wlvif); @@ -2131,11 +2114,9 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work) if (!time_after(time_spare, wlvif->pending_auth_reply_time)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } /* cancel the ROC if active */ wlcore_update_inconn_sta(wl, wlvif, NULL, false); @@ -2591,11 +2572,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, * Call runtime PM only after possible wl12xx_init_fw() above * is done. Otherwise we do not have interrupts enabled. */ - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out_unlock; - } if (wl12xx_need_fw_change(wl, vif_count, true)) { wl12xx_force_active_psm(wl); @@ -2691,11 +2670,9 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { /* disable active roles */ - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto deinit; - } if (wlvif->bss_type == BSS_TYPE_STA_BSS || wlvif->bss_type == BSS_TYPE_IBSS) { @@ -3129,11 +3106,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } /* configure each interface */ wl12xx_for_each_wlvif(wl, wlvif) { @@ -3213,11 +3188,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_for_each_wlvif(wl, wlvif) { if (wlcore_is_p2p_mgmt(wlvif)) @@ -3470,11 +3443,9 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, goto out_wake_queues; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out_wake_queues; - } ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); @@ -3622,11 +3593,9 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, goto out_unlock; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out_unlock; - } wlvif->default_key = key_idx; @@ -3659,11 +3628,9 @@ void wlcore_regdomain_config(struct wl1271 *wl) if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wlcore_cmd_regdomain_config_locked(wl); if (ret < 0) { @@ -3706,11 +3673,9 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } /* fail if there is any role in ROC */ if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { @@ -3749,11 +3714,9 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, if (wl->scan.state == WL1271_SCAN_STATE_IDLE) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } if (wl->scan.state != WL1271_SCAN_STATE_DONE) { ret = wl->ops->scan_stop(wl, wlvif); @@ -3800,11 +3763,9 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl->ops->sched_scan_start(wl, wlvif, req, ies); if (ret < 0) @@ -3834,11 +3795,9 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl->ops->sched_scan_stop(wl, wlvif); @@ -3862,11 +3821,9 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl1271_acx_frag_threshold(wl, value); if (ret < 0) @@ -3894,11 +3851,9 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_for_each_wlvif(wl, wlvif) { ret = wl1271_acx_rts_threshold(wl, wlvif, value); @@ -4653,11 +4608,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } if ((changed & BSS_CHANGED_TXPOWER) && bss_conf->txpower != wlvif->power_level) { @@ -4714,11 +4667,9 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_for_each_wlvif(wl, wlvif) { struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); @@ -4771,11 +4722,9 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wlvif->band = ctx->def.chan->band; wlvif->channel = channel; @@ -4823,11 +4772,9 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } if (wlvif->radar_enabled) { wl1271_debug(DEBUG_MAC80211, "Stop radar detection"); @@ -4893,11 +4840,9 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, mutex_lock(&wl->mutex); - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } for (i = 0; i < n_vifs; i++) { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif); @@ -4939,11 +4884,9 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } /* * the txop is confed in units of 32us by the mac80211, @@ -4987,11 +4930,9 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime); if (ret < 0) @@ -5306,11 +5247,9 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); @@ -5364,11 +5303,9 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, ba_bitmap = &wl->links[hlid].ba_bitmap; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d", tid, action); @@ -5476,11 +5413,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, if (wlvif->bss_type == BSS_TYPE_STA_BSS && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl1271_set_band_rate(wl, wlvif); wlvif->basic_rate = @@ -5518,11 +5453,9 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } /* TODO: change mac80211 to pass vif as param */ @@ -5612,11 +5545,9 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl->ops->channel_switch(wl, wlvif, &ch_switch); if (ret) @@ -5667,11 +5598,9 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl12xx_start_dev(wl, wlvif, chan->band, channel); if (ret < 0) @@ -5724,11 +5653,9 @@ static int wlcore_roc_completed(struct wl1271 *wl) goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = __wlcore_roc_completed(wl); @@ -5810,11 +5737,9 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out_sleep; - } ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm); if (ret < 0) -- cgit v1.2.3 From da8e909c99e4525da5ce56814fca5e9cc4a20a63 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 13 Apr 2022 09:34:31 +0000 Subject: wlcore: sysfs: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220413093431.2538254-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/sysfs.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index 35b535c125b6..f0c7e09b314d 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -56,11 +56,9 @@ static ssize_t bt_coex_state_store(struct device *dev, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl1271_acx_sg_enable(wl, wl->sg_enabled); pm_runtime_mark_last_busy(wl->dev); -- cgit v1.2.3 From 3447eebe6084f3d0919e72eafa1b7d19e3f87580 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 13 Apr 2022 09:35:02 +0000 Subject: wlcore: testmode: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220413093502.2538316-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/testmode.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index 3a17b9a8207e..3f338b8096c7 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -83,11 +83,9 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl1271_cmd_test(wl, buf, buf_len, answer); if (ret < 0) { @@ -158,11 +156,9 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { -- cgit v1.2.3 From d8e11976d8e897ffedca5d1f3b9032156274eceb Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 13 Apr 2022 09:39:39 +0000 Subject: wlcore: vendor_cmd: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220413093939.2538825-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/vendor_cmd.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index e1bd344c4ebc..e4269e2b0098 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -53,11 +53,9 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wlcore_smart_config_start(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); @@ -88,11 +86,9 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wlcore_smart_config_stop(wl); @@ -135,11 +131,9 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, goto out; } - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wlcore_smart_config_set_group_key(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]), -- cgit v1.2.3 From 00bfc8964f4349dacc6799fe712de186e9bd8bb8 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Tue, 19 Apr 2022 11:04:45 +0000 Subject: wlcore: sdio: using pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220419110445.2574424-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/sdio.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 72fc41ac83c0..7b4e8cc36b49 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -132,9 +132,8 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; - ret = pm_runtime_get_sync(&card->dev); + ret = pm_runtime_resume_and_get(&card->dev); if (ret < 0) { - pm_runtime_put_noidle(&card->dev); dev_err(glue->dev, "%s: failed to get_sync(%d)\n", __func__, ret); -- cgit v1.2.3 From e05c7ddfeb23182421972b9074fb8f5aa356cfee Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 20 Apr 2022 09:01:41 +0000 Subject: wlcore: cmd: using pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220420090141.2588553-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/cmd.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 09eb1116be0e..df6029ef6304 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -178,11 +178,9 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto free_vector; - } do { if (time_after(jiffies, timeout_time)) { -- cgit v1.2.3 From e2e23a791745a194aa8e6fb4983c2e7b408ab6e1 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Tue, 12 Apr 2022 16:15:50 +0300 Subject: ath11k: add support for extended wmi service bit When the WMI service bits are reported from firmware they are divided into multiple segments, with 128 bits in each segment. The first segment is processed by ath11k_wmi_service_bitmap_copy(), the second segment is processed by ath11k_service_available_event() with WMI_TAG_SERVICE_AVAILABLE_EVENT. When the service bit exceed 256 bits, then firmware reports it by tag WMI_TAG_ARRAY_UINT32 in WMI_SERVICE_AVAILABLE_EVENTID. Currently ath11k does not process the third segment. Upcoming features need to know if firmware support is available for the features, so add processing of the third segment. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Signed-off-by: Wen Gong Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401120948.1312956-2-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/wmi.c | 81 +++++++++++++++++++++-------------- drivers/net/wireless/ath/ath11k/wmi.h | 9 +++- 2 files changed, 56 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 3af24b18204e..a5876e0378fe 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -7297,47 +7297,64 @@ static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, rcu_read_unlock(); } -static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) +static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) { - const void **tb; const struct wmi_service_available_event *ev; - int ret; + u32 *wmi_ext2_service_bitmap; int i, j; - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); - if (IS_ERR(tb)) { - ret = PTR_ERR(tb); - ath11k_warn(ab, "failed to parse tlv: %d\n", ret); - return; - } + switch (tag) { + case WMI_TAG_SERVICE_AVAILABLE_EVENT: + ev = (struct wmi_service_available_event *)ptr; + for (i = 0, j = WMI_MAX_SERVICE; + i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; + i++) { + do { + if (ev->wmi_service_segment_bitmap[i] & + BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) + set_bit(j, ab->wmi_ab.svc_map); + } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + } - ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT]; - if (!ev) { - ath11k_warn(ab, "failed to fetch svc available ev"); - kfree(tb); - return; - } + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", + ev->wmi_service_segment_bitmap[0], + ev->wmi_service_segment_bitmap[1], + ev->wmi_service_segment_bitmap[2], + ev->wmi_service_segment_bitmap[3]); + break; + case WMI_TAG_ARRAY_UINT32: + wmi_ext2_service_bitmap = (u32 *)ptr; + for (i = 0, j = WMI_MAX_EXT_SERVICE; + i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE; + i++) { + do { + if (wmi_ext2_service_bitmap[i] & + BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) + set_bit(j, ab->wmi_ab.svc_map); + } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + } - /* TODO: Use wmi_service_segment_offset information to get the service - * especially when more services are advertised in multiple sevice - * available events. - */ - for (i = 0, j = WMI_MAX_SERVICE; - i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; - i++) { - do { - if (ev->wmi_service_segment_bitmap[i] & - BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) - set_bit(j, ab->wmi_ab.svc_map); - } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", + wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1], + wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]); + break; } + return 0; +} - ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x", - ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1], - ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]); +static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + int ret; - kfree(tb); + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_services_parser, + NULL); + if (ret) + ath11k_warn(ab, "failed to parse services available tlv %d\n", ret); } static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb) diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index c9ffefea549c..4ff7e71409ce 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -1992,6 +1992,7 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_ACK_TIMEOUT = 126, WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127, + /* The first 128 bits */ WMI_MAX_SERVICE = 128, WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128, @@ -2084,7 +2085,11 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_EXT2_MSG = 220, WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249, - WMI_MAX_EXT_SERVICE + /* The second 128 bits */ + WMI_MAX_EXT_SERVICE = 256, + + /* The third 128 bits */ + WMI_MAX_EXT2_SERVICE = 384 }; enum { @@ -5370,7 +5375,7 @@ struct ath11k_wmi_base { struct completion service_ready; struct completion unified_ready; - DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE); + DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE); wait_queue_head_t tx_credits_wq; const struct wmi_peer_flags_map *peer_flags; u32 num_mem_chunks; -- cgit v1.2.3 From 652f69ed9c1b4f8ef4ea9d6738f1e6263a5ff26d Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Tue, 12 Apr 2022 16:15:50 +0300 Subject: ath11k: Add support for SAR Add ath11k_mac_op_set_bios_sar_specs() to ath11k_ops, this function is called when user space application calls NL80211_CMD_SET_SAR_SPECS. ath11k also registers SAR type and frequency ranges to wiphy so user space can query SAR capabilities. This feature is currently enabled for WCN6855. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-02431-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220401120948.1312956-3-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 6 +++ drivers/net/wireless/ath/ath11k/hw.c | 20 ++++++++++ drivers/net/wireless/ath/ath11k/hw.h | 2 + drivers/net/wireless/ath/ath11k/mac.c | 67 ++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.c | 70 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.h | 27 +++++++++++++ 6 files changed, 192 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 1537ec0ae2e7..f011db4bcd2b 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -101,6 +101,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, + .bios_sar_capa = NULL, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -167,6 +168,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, + .bios_sar_capa = NULL, }, { .name = "qca6390 hw2.0", @@ -232,6 +234,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, + .bios_sar_capa = NULL, }, { .name = "qcn9074 hw1.0", @@ -297,6 +300,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, + .bios_sar_capa = NULL, }, { .name = "wcn6855 hw2.0", @@ -362,6 +366,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, + .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, }, { .name = "wcn6855 hw2.1", @@ -426,6 +431,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, + .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, }, }; diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index d1b0e76d9ec2..46449a18b5cb 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -2154,3 +2154,23 @@ const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = { const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = { .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM, }; + +static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = { + {.start_freq = 2402, .end_freq = 2482 }, /* 2G ch1~ch13 */ + {.start_freq = 5150, .end_freq = 5250 }, /* 5G UNII-1 ch32~ch48 */ + {.start_freq = 5250, .end_freq = 5725 }, /* 5G UNII-2 ch50~ch144 */ + {.start_freq = 5725, .end_freq = 5810 }, /* 5G UNII-3 ch149~ch161 */ + {.start_freq = 5815, .end_freq = 5895 }, /* 5G UNII-4 ch163~ch177 */ + {.start_freq = 5925, .end_freq = 6165 }, /* 6G UNII-5 Ch1, Ch2 ~ Ch41 */ + {.start_freq = 6165, .end_freq = 6425 }, /* 6G UNII-5 ch45~ch93 */ + {.start_freq = 6425, .end_freq = 6525 }, /* 6G UNII-6 ch97~ch113 */ + {.start_freq = 6525, .end_freq = 6705 }, /* 6G UNII-7 ch117~ch149 */ + {.start_freq = 6705, .end_freq = 6875 }, /* 6G UNII-7 ch153~ch185 */ + {.start_freq = 6875, .end_freq = 7125 }, /* 6G UNII-8 ch189~ch233 */ +}; + +const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855 = { + .type = NL80211_SAR_TYPE_POWER, + .num_freq_ranges = (ARRAY_SIZE(ath11k_hw_sar_freq_ranges_wcn6855)), + .freq_ranges = ath11k_hw_sar_freq_ranges_wcn6855, +}; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 08f958c03ec4..b7ece3d5678c 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -195,6 +195,7 @@ struct ath11k_hw_params { bool current_cc_support; bool dbr_debug_support; bool global_reset; + const struct cfg80211_sar_capa *bios_sar_capa; }; struct ath11k_hw_ops { @@ -380,4 +381,5 @@ static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type) return "unknown"; } +extern const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855; #endif diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 175a4ae752f3..12fd30742664 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -8273,6 +8273,68 @@ static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw, mutex_unlock(&ar->conf_mutex); } +static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar) +{ + struct ath11k *ar = hw->priv; + const struct cfg80211_sar_sub_specs *sspec = sar->sub_specs; + int ret, index; + u8 *sar_tbl; + u32 i; + + mutex_lock(&ar->conf_mutex); + + if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) || + !ar->ab->hw_params.bios_sar_capa) { + ret = -EOPNOTSUPP; + goto exit; + } + + if (!sar || sar->type != NL80211_SAR_TYPE_POWER || + sar->num_sub_specs == 0) { + ret = -EINVAL; + goto exit; + } + + ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret); + goto exit; + } + + sar_tbl = kzalloc(BIOS_SAR_TABLE_LEN, GFP_KERNEL); + if (!sar_tbl) { + ret = -ENOMEM; + goto exit; + } + + for (i = 0; i < sar->num_sub_specs; i++) { + if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) { + ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n", + sspec->freq_range_index, BIOS_SAR_TABLE_LEN >> 1); + continue; + } + + /* chain0 and chain1 share same power setting */ + sar_tbl[sspec->freq_range_index] = sspec->power; + index = sspec->freq_range_index + (BIOS_SAR_TABLE_LEN >> 1); + sar_tbl[index] = sspec->power; + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sar tbl[%d] = %d\n", + sspec->freq_range_index, sar_tbl[sspec->freq_range_index]); + sspec++; + } + + ret = ath11k_wmi_pdev_set_bios_sar_table_param(ar, sar_tbl); + if (ret) + ath11k_warn(ar->ab, "failed to set sar power: %d", ret); + + kfree(sar_tbl); +exit: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + static const struct ieee80211_ops ath11k_ops = { .tx = ath11k_mac_op_tx, .start = ath11k_mac_op_start, @@ -8324,6 +8386,7 @@ static const struct ieee80211_ops ath11k_ops = { .ipv6_addr_change = ath11k_mac_op_ipv6_changed, #endif + .set_sar_specs = ath11k_mac_op_set_bios_sar_specs, }; static void ath11k_mac_update_ch_list(struct ath11k *ar, @@ -8746,6 +8809,10 @@ static int __ath11k_mac_register(struct ath11k *ar) ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); } + if (test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) && + ab->hw_params.bios_sar_capa) + ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa; + ret = ieee80211_register_hw(ar->hw); if (ret) { ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index a5876e0378fe..dcf31bdad47e 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -8883,3 +8883,73 @@ int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, arvif->vdev_id); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); } + +int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val) +{ struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_set_sar_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + u32 len, sar_len_aligned, rsvd_len_aligned; + + sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32)); + rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32)); + len = sizeof(*cmd) + + TLV_HDR_SIZE + sar_len_aligned + + TLV_HDR_SIZE + rsvd_len_aligned; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->pdev_id = ar->pdev->pdev_id; + cmd->sar_len = BIOS_SAR_TABLE_LEN; + cmd->rsvd_len = BIOS_SAR_RSVD1_LEN; + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, sar_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN); + + buf_ptr += sar_len_aligned; + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); +} + +int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_set_geo_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *buf_ptr; + u32 len, rsvd_len_aligned; + + rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->pdev_id = ar->pdev->pdev_id; + cmd->rsvd_len = BIOS_SAR_RSVD2_LEN; + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); +} diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 4ff7e71409ce..7600e9a52da8 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -285,6 +285,11 @@ enum wmi_tlv_cmd_id { WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID, WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID, WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID, + WMI_PDEV_GET_TPC_STATS_CMDID, + WMI_PDEV_ENABLE_DURATION_BASED_TX_MODE_SELECTION_CMDID, + WMI_PDEV_GET_DPD_STATUS_CMDID, + WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID, + WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID, WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV), WMI_VDEV_DELETE_CMDID, WMI_VDEV_START_REQUEST_CMDID, @@ -1859,6 +1864,8 @@ enum wmi_tlv_tag { WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD, WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD, WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD, + WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8, + WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD, WMI_TAG_MAX }; @@ -2087,6 +2094,7 @@ enum wmi_tlv_service { /* The second 128 bits */ WMI_MAX_EXT_SERVICE = 256, + WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326, /* The third 128 bits */ WMI_MAX_EXT2_SERVICE = 384 @@ -5882,6 +5890,23 @@ struct wmi_gtk_rekey_offload_cmd { u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES]; } __packed; +#define BIOS_SAR_TABLE_LEN (22) +#define BIOS_SAR_RSVD1_LEN (6) +#define BIOS_SAR_RSVD2_LEN (18) + +struct wmi_pdev_set_sar_table_cmd { + u32 tlv_header; + u32 pdev_id; + u32 sar_len; + u32 rsvd_len; +} __packed; + +struct wmi_pdev_set_geo_table_cmd { + u32 tlv_header; + u32 pdev_id; + u32 rsvd_len; +} __packed; + int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id); struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); @@ -6060,5 +6085,7 @@ int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar, struct ath11k_vif *arvif, bool enable); int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, struct ath11k_vif *arvif); +int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val); +int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar); #endif -- cgit v1.2.3 From 605194411d7379645ed44e447c4b804a0b476109 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Tue, 12 Apr 2022 16:15:53 +0300 Subject: ath11k: fix missing unlock on error in ath11k_wow_op_resume() Add the missing unlock before return from function ath11k_wow_op_resume() in the error handling case. Fixes: 90bf5c8d0f7e ("ath11k: purge rx pktlog when entering WoW") Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220408030912.3087293-1-yangyingliang@huawei.com --- drivers/net/wireless/ath/ath11k/wow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 6c2611f93739..9d088cebef03 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -758,7 +758,7 @@ int ath11k_wow_op_resume(struct ieee80211_hw *hw) ret = ath11k_dp_rx_pktlog_start(ar->ab); if (ret) { ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret); - return ret; + goto exit; } ret = ath11k_wow_wakeup(ar->ab); -- cgit v1.2.3 From 67888630addeaa2ae1c04d8d020daccbd26d05d9 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 11 Apr 2022 10:08:43 +0800 Subject: ath11k: Fix build warning without CONFIG_IPV6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/net/wireless/ath/ath11k/mac.c:8175:13: error: ‘ath11k_mac_op_ipv6_changed’ defined but not used [-Werror=unused-function] static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw, ^~~~~~~~~~~~~~~~~~~~~~~~~~ Wrap it with #ifdef block to fix this. Fixes: c3c36bfe998b ("ath11k: support ARP and NS offload") Signed-off-by: YueHaibing Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220411020843.10284-1-yuehaibing@huawei.com --- drivers/net/wireless/ath/ath11k/mac.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 12fd30742664..c987f365c63a 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -8145,6 +8145,7 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, } } +#if IS_ENABLED(CONFIG_IPV6) static void ath11k_generate_ns_mc_addr(struct ath11k *ar, struct ath11k_arp_ns_offload *offload) { @@ -8239,6 +8240,7 @@ generate: /* generate ns multicast address */ ath11k_generate_ns_mc_addr(ar, offload); } +#endif static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, -- cgit v1.2.3 From 45286070e9e782f2f53b5272a227fc879fbda2c8 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Mon, 11 Apr 2022 01:36:02 +0000 Subject: wil6210: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get() is more appropriate for simplifing code. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220411013602.2517086-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ath/wil6210/pm.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index ed4df561e5c5..f521af575e9b 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -445,10 +445,9 @@ int wil_pm_runtime_get(struct wil6210_priv *wil) int rc; struct device *dev = wil_to_dev(wil); - rc = pm_runtime_get_sync(dev); + rc = pm_runtime_resume_and_get(dev); if (rc < 0) { - wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc); - pm_runtime_put_noidle(dev); + wil_err(wil, "pm_runtime_resume_and_get() failed, rc = %d\n", rc); return rc; } -- cgit v1.2.3 From 2dc509305cf956381532792cb8dceef2b1504765 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 9 Apr 2022 09:12:25 +0300 Subject: ath9k_htc: fix potential out of bounds access with invalid rxstatus->rs_keyix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "rxstatus->rs_keyix" eventually gets passed to test_bit() so we need to ensure that it is within the bitmap. drivers/net/wireless/ath/ath9k/common.c:46 ath9k_cmn_rx_accept() error: passing untrusted data 'rx_stats->rs_keyix' to 'test_bit()' Fixes: 4ed1a8d4a257 ("ath9k_htc: use ath9k_cmn_rx_accept") Signed-off-by: Dan Carpenter Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220409061225.GA5447@kili --- drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 6a850a0bfa8a..a23eaca0326d 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -1016,6 +1016,14 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, goto rx_next; } + if (rxstatus->rs_keyix >= ATH_KEYMAX && + rxstatus->rs_keyix != ATH9K_RXKEYIX_INVALID) { + ath_dbg(common, ANY, + "Invalid keyix, dropping (keyix: %d)\n", + rxstatus->rs_keyix); + goto rx_next; + } + /* Get the RX status information */ memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); -- cgit v1.2.3 From e999a5da28a0e0f7de242d841ef7d5e48f4646ae Mon Sep 17 00:00:00 2001 From: Thibaut VARÈNE Date: Sun, 17 Apr 2022 16:51:45 +0200 Subject: ath9k: fix QCA9561 PA bias level MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes an invalid TX PA DC bias level on QCA9561, which results in a very low output power and very low throughput as devices are further away from the AP (compared to other 2.4GHz APs). This patch was suggested by Felix Fietkau, who noted[1]: "The value written to that register is wrong, because while the mask definition AR_CH0_TOP2_XPABIASLVL uses a different value for 9561, the shift definition AR_CH0_TOP2_XPABIASLVL_S is hardcoded to 12, which is wrong for 9561." In real life testing, without this patch the 2.4GHz throughput on Yuncore XD3200 is around 10Mbps sitting next to the AP, and closer to practical maximum with the patch applied. [1] https://lore.kernel.org/all/91c58969-c60e-2f41-00ac-737786d435ae@nbd.name Signed-off-by: Thibaut VARÈNE Acked-by: Felix Fietkau Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220417145145.1847-1-hacks+kernel@slashdirt.org --- drivers/net/wireless/ath/ath9k/ar9003_phy.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index a171dbb29fbb..ad949eb02f3d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -720,7 +720,7 @@ #define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ (AR_SREV_9462(ah) ? 0x16290 : 0x16284)) #define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000) -#define AR_CH0_TOP2_XPABIASLVL_S 12 +#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12) #define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \ -- cgit v1.2.3 From 8e95061b5b9c519891dbe2e519dc3fc1d5f6f4a4 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 13 Apr 2022 09:33:56 +0000 Subject: wl18xx: debugfs: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get is more appropriate for simplifing code Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220413093356.2538192-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wl18xx/debugfs.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 2f921a44f1e2..80fbf740fe6d 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -264,11 +264,9 @@ static ssize_t radar_detection_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl18xx_cmd_radar_detection_debug(wl, channel); if (ret < 0) @@ -306,11 +304,9 @@ static ssize_t dynamic_fw_traces_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wl18xx_acx_dynamic_fw_traces(wl); if (ret < 0) @@ -368,11 +364,9 @@ static ssize_t radar_debug_mode_write(struct file *file, if (unlikely(wl->state != WLCORE_STATE_ON)) goto out; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } wl12xx_for_each_wlvif_ap(wl, wlvif) { wlcore_cmd_generic_cfg(wl, wlvif, -- cgit v1.2.3 From eefad995c24295138669e5522f9b04c31fef0755 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:15 +0800 Subject: rtw89: 8852c: add BB and RF parameters tables These parameters are used to initialize BB and RF hardware when we are going to bring up interface and start to transmit and receive. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 1 + drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 6 + .../net/wireless/realtek/rtw89/rtw8852c_table.c | 13697 +++++++++++++++++++ .../net/wireless/realtek/rtw89/rtw8852c_table.h | 16 + 5 files changed, 13721 insertions(+) create mode 100644 drivers/net/wireless/realtek/rtw89/rtw8852c_table.c create mode 100644 drivers/net/wireless/realtek/rtw89/rtw8852c_table.h (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 090d41656901..e8e31492b363 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2374,6 +2374,7 @@ struct rtw89_chip_info { const struct rtw89_pwr_cfg * const *pwr_on_seq; const struct rtw89_pwr_cfg * const *pwr_off_seq; const struct rtw89_phy_table *bb_table; + const struct rtw89_phy_table *bb_gain_table; const struct rtw89_phy_table *rf_table[RF_PATH_MAX]; const struct rtw89_phy_table *nctl_table; const struct rtw89_txpwr_table *byr_table; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 975c50495304..2c5bd381ebf5 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2101,6 +2101,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .pwr_on_seq = pwr_on_seq_8852a, .pwr_off_seq = pwr_off_seq_8852a, .bb_table = &rtw89_8852a_phy_bb_table, + .bb_gain_table = NULL, .rf_table = {&rtw89_8852a_phy_radioa_table, &rtw89_8852a_phy_radiob_table,}, .nctl_table = &rtw89_8852a_phy_nctl_table, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index d56d65661ce0..0855c41e32af 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -9,6 +9,7 @@ #include "phy.h" #include "reg.h" #include "rtw8852c.h" +#include "rtw8852c_table.h" static const struct rtw89_dle_mem rtw8852c_dle_mem_pcie[] = { [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size19, @@ -658,6 +659,11 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .rf_base_addr = {0xe000, 0xf000}, .pwr_on_seq = NULL, .pwr_off_seq = NULL, + .bb_table = &rtw89_8852c_phy_bb_table, + .bb_gain_table = &rtw89_8852c_phy_bb_gain_table, + .rf_table = {&rtw89_8852c_phy_radiob_table, + &rtw89_8852c_phy_radioa_table,}, + .nctl_table = &rtw89_8852c_phy_nctl_table, .dig_table = NULL, .hw_sec_hdr = true, .sec_ctrl_efuse_size = 4, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c new file mode 100644 index 000000000000..b0431874d7c4 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -0,0 +1,13697 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#include "phy.h" +#include "reg.h" +#include "rtw8852c_table.h" + +static const struct rtw89_reg2_def rtw89_8852c_phy_bb_regs[] = { + {0xF0FF0000, 0x00000000}, + {0xF03300FF, 0x00000001}, + {0xF03400FF, 0x00000002}, + {0x70C, 0x00000020}, + {0x704, 0x601E0100}, + {0x4000, 0x00000000}, + {0x4004, 0xCA014000}, + {0x4008, 0xC751D4F0}, + {0x400C, 0x44511475}, + {0x4010, 0x00000000}, + {0x4014, 0x00000000}, + {0x44AC, 0x01F60380}, + {0x4018, 0x4F4C4B4B}, + {0x401C, 0x494A4E52}, + {0x4020, 0x4D504E4B}, + {0x4024, 0x4F4C4949}, + {0x4028, 0x49484C50}, + {0x402C, 0x4C50504C}, + {0x4030, 0x54544D4A}, + {0x4034, 0x504B5654}, + {0x4038, 0x6A6C605A}, + {0x403C, 0x48484848}, + {0x4040, 0x48483D47}, + {0x4044, 0x3D474848}, + {0x4048, 0x51484848}, + {0x404C, 0x4A4A404F}, + {0x4050, 0x514F4C4A}, + {0x4054, 0x524E4A4A}, + {0x4058, 0x4A4A5154}, + {0x405C, 0x53555554}, + {0x4060, 0x45454545}, + {0x4064, 0x45454144}, + {0x4068, 0x40434445}, + {0x406C, 0x44454545}, + {0x4070, 0x44444043}, + {0x4074, 0x42434444}, + {0x4078, 0x46454444}, + {0x407C, 0x44444843}, + {0x4080, 0x4B4E4A47}, + {0x4084, 0x514D4A49}, + {0x4088, 0x4A495454}, + {0x408C, 0x5454514D}, + {0x4090, 0x524E4B4A}, + {0x4094, 0x4C4B5455}, + {0x4098, 0x55565550}, + {0x409C, 0x5959504D}, + {0x40A0, 0x544E5D5A}, + {0x40A4, 0x7975665F}, + {0x40A8, 0x48484848}, + {0x40AC, 0x48483D47}, + {0x40B0, 0x3D474848}, + {0x40B4, 0x48484848}, + {0x40B8, 0x48483E48}, + {0x40BC, 0x3E4A4A49}, + {0x40C0, 0x514E4948}, + {0x40C4, 0x4A49404F}, + {0x40C8, 0x42525555}, + {0x40CC, 0x47474747}, + {0x40D0, 0x47474747}, + {0x40D4, 0x47474747}, + {0x40D8, 0x48484848}, + {0x40DC, 0x48474848}, + {0x40E0, 0x4A484848}, + {0x40E4, 0x49484847}, + {0x40E8, 0x4847524D}, + {0x40EC, 0x55544F4B}, + {0x40F0, 0x00000000}, + {0x4604, 0x4C4C4D4E}, + {0x4608, 0x3D3D6A56}, + {0x460C, 0x53515140}, + {0x4610, 0x42404041}, + {0x4614, 0x54544B48}, + {0x4618, 0x795D5554}, + {0x461C, 0x3E3E3D3D}, + {0x4620, 0x47474240}, + {0x4624, 0x55524A48}, + {0x4ED4, 0x00000000}, + {0x40F4, 0x00000006}, + {0x4628, 0x00000000}, + {0x4E9C, 0x26663333}, + {0x4EA0, 0x6EDA4148}, + {0x4EA4, 0x599A0000}, + {0x4EA8, 0x40000000}, + {0x4ED0, 0x00000001}, + {0x40F8, 0x00000000}, + {0x40FC, 0x8C30C30C}, + {0x4100, 0x4C30C30C}, + {0x4104, 0x0C30C30C}, + {0x4108, 0x0C30C30C}, + {0x410C, 0x0C30C30C}, + {0x4110, 0x0C30C30C}, + {0x4114, 0x28A28A28}, + {0x4118, 0x28A28A28}, + {0x411C, 0x28A28A28}, + {0x4120, 0x28A28A28}, + {0x4124, 0x28A28A28}, + {0x4128, 0x28A28A28}, + {0x412C, 0x06666666}, + {0x4130, 0x33333333}, + {0x4134, 0x33333333}, + {0x4138, 0x33333333}, + {0x413C, 0x00000031}, + {0x462C, 0x0C30C30C}, + {0x4630, 0x0C30C30C}, + {0x4634, 0x28A28A28}, + {0x4638, 0x28A28A28}, + {0x463C, 0x33333333}, + {0x4640, 0x00000033}, + {0x4140, 0x5100600A}, + {0x4144, 0x18363113}, + {0x4148, 0x1D976DDC}, + {0x414C, 0x1C072DD7}, + {0x4150, 0x1127CDF4}, + {0x4154, 0x1E37BDF1}, + {0x4158, 0x1FB7F1D6}, + {0x415C, 0x1EA7DDF9}, + {0x4160, 0x1FE445DD}, + {0x4164, 0x1F97F1FE}, + {0x4168, 0x1FF781ED}, + {0x416C, 0x1FA7F5FE}, + {0x4170, 0x1E07B913}, + {0x4174, 0x1FD7FDFF}, + {0x4178, 0x1E17B9FA}, + {0x417C, 0x19A66914}, + {0x4180, 0x10F65598}, + {0x4184, 0x14A5A111}, + {0x4188, 0x1D3765DB}, + {0x418C, 0x17C685CA}, + {0x4190, 0x1107C5F3}, + {0x4194, 0x1B5785EB}, + {0x4198, 0x1F97ED8F}, + {0x419C, 0x1BC7A5F3}, + {0x41A0, 0x1FE43595}, + {0x41A4, 0x1EB7D9FC}, + {0x41A8, 0x1FE65DBE}, + {0x41AC, 0x1EC7D9FC}, + {0x41B0, 0x1976FCFF}, + {0x41B4, 0x1F77F5FF}, + {0x41B8, 0x1976FDEC}, + {0x41BC, 0x198664EF}, + {0x41C0, 0x11062D93}, + {0x41C4, 0x10C4E910}, + {0x41C8, 0x1CA759DB}, + {0x41CC, 0x1335A9B5}, + {0x41D0, 0x1097B9F3}, + {0x41D4, 0x17B72DE1}, + {0x41D8, 0x1F67ED42}, + {0x41DC, 0x18074DE9}, + {0x41E0, 0x1FD40547}, + {0x41E4, 0x1D57ADF9}, + {0x41E8, 0x1FE52182}, + {0x41EC, 0x1D67B1F9}, + {0x41F0, 0x14860CE1}, + {0x41F4, 0x1EC7E9FE}, + {0x41F8, 0x14860DD6}, + {0x41FC, 0x195664C7}, + {0x4200, 0x0005E58A}, + {0x4204, 0x00000000}, + {0x4208, 0x00000000}, + {0x420C, 0x7A000000}, + {0x4210, 0x0F9F3D7A}, + {0x4214, 0x0040817C}, + {0x4218, 0x00E10204}, + {0x421C, 0x257D94CD}, + {0x4220, 0x0802DB6D}, + {0x4224, 0x00000200}, + {0x4228, 0x04688000}, + {0x4644, 0x00000000}, + {0x4648, 0x00000000}, + {0x464C, 0x00000000}, + {0x4650, 0x00000020}, + {0x4ECC, 0x00000001}, + {0x422C, 0x0060B002}, + {0x4230, 0x9A8249A8}, + {0x4234, 0x26A1469E}, + {0x4238, 0x2099A824}, + {0x423C, 0x2359461C}, + {0x4240, 0x1631A675}, + {0x4244, 0x2C6B1D63}, + {0x4248, 0x0000000E}, + {0x424C, 0x00000001}, + {0x4250, 0x00000001}, + {0x4254, 0x00000000}, + {0x4258, 0x00000000}, + {0x425C, 0x00000000}, + {0x4260, 0x01E0000C}, + {0x4654, 0x00000000}, + {0x4658, 0x00000000}, + {0x465C, 0x0000001E}, + {0x4E74, 0x00000000}, + {0x4264, 0x00000000}, + {0x4268, 0x00000000}, + {0x426C, 0x0418317C}, + {0x46C0, 0x00000001}, + {0x4270, 0x00D6135C}, + {0x46C4, 0x00000033}, + {0x4274, 0x00000000}, + {0x4278, 0x00000000}, + {0x427C, 0x00000000}, + {0x4280, 0x00000000}, + {0x4284, 0x00000000}, + {0x4288, 0x00000000}, + {0x46D8, 0x00000000}, + {0x46DC, 0x00000000}, + {0x46E0, 0x00000000}, + {0x46E4, 0x00000000}, + {0x46E8, 0x00000000}, + {0x428C, 0x00000000}, + {0x4290, 0x00000000}, + {0x4294, 0x00000000}, + {0x4298, 0x84026000}, + {0x429C, 0x0051AC20}, + {0x46EC, 0x1020C040}, + {0x46F0, 0xB8BEBEB8}, + {0x46F4, 0x021102BE}, + {0x46F8, 0x14221142}, + {0x46FC, 0x18C4098C}, + {0x4700, 0x00021084}, + {0x42A0, 0x02024008}, + {0x42A4, 0x00000000}, + {0x42A8, 0x00000000}, + {0x42AC, 0x22CE803C}, + {0x42B0, 0x32000000}, + {0x42B4, 0x996FD67D}, + {0x42B8, 0xBD67D67D}, + {0x42BC, 0x7D67D65B}, + {0x42C0, 0x28029F59}, + {0x42C4, 0x00280280}, + {0x4704, 0x00000000}, + {0x42C8, 0x00000000}, + {0x42CC, 0x00000000}, + {0x42D0, 0x00000003}, + {0x4708, 0x00280000}, + {0x42D4, 0x00000001}, + {0x42D8, 0x61861800}, + {0x42DC, 0x830C30C3}, + {0x42E0, 0xC30C30C3}, + {0x42E4, 0x830C30C3}, + {0x42E8, 0x451450C3}, + {0x42EC, 0x05145145}, + {0x42F0, 0x05145145}, + {0x42F4, 0x05145145}, + {0x42F8, 0x03207145}, + {0x42FC, 0x041C32C6}, + {0x4300, 0x031C5247}, + {0x4304, 0x030C5143}, + {0x4308, 0x030C30C3}, + {0x430C, 0x0F3CF3C3}, + {0x4310, 0x0F3CF3CF}, + {0x4314, 0x0F3CF3CF}, + {0x4318, 0x0F3CF3CF}, + {0x431C, 0x0F3CF3CF}, + {0x4320, 0x030C10C3}, + {0x4324, 0x051430C3}, + {0x4328, 0x051490CB}, + {0x432C, 0x030C70D1}, + {0x4330, 0x050C50C7}, + {0x4334, 0x051492CB}, + {0x4338, 0x05145145}, + {0x433C, 0x05145145}, + {0x4340, 0x05145145}, + {0x4344, 0x05145145}, + {0x4348, 0x090CD243}, + {0x434C, 0x0918A1C5}, + {0x4350, 0x071C3143}, + {0x4354, 0x071431C3}, + {0x4358, 0x0F3CF1C5}, + {0x435C, 0x0F3CF3CF}, + {0x4360, 0x0F3CF3CF}, + {0x4364, 0x0F3CF3CF}, + {0x4368, 0x0F3CF3CF}, + {0x436C, 0x090C91CF}, + {0x4370, 0x11243143}, + {0x4374, 0x9777A777}, + {0x4378, 0xBB7BAC95}, + {0x437C, 0xB667B889}, + {0x4380, 0x7B9B8899}, + {0x4384, 0x7A5567C8}, + {0x4388, 0x2278CCCC}, + {0x438C, 0x7C222222}, + {0x4390, 0x0000049B}, + {0x470C, 0x00000888}, + {0x4EB4, 0x00000002}, + {0x4394, 0x001CCCCC}, + {0x4710, 0xCCCCCAAC}, + {0x4714, 0x0000AACC}, + {0x4398, 0x00000000}, + {0x439C, 0x00000008}, + {0x49A4, 0x00000000}, + {0x43A0, 0x00000000}, + {0x43A4, 0x00000000}, + {0x43A8, 0x00000000}, + {0x43AC, 0x10000000}, + {0x43B0, 0x00401001}, + {0x43B4, 0x00061003}, + {0x4718, 0x00003000}, + {0x43B8, 0x000024D8}, + {0x43BC, 0x00000000}, + {0x43C0, 0x10000020}, + {0x43C4, 0x20000200}, + {0x43C8, 0x00000000}, + {0x43CC, 0x04000000}, + {0x43D0, 0x44000100}, + {0x43D4, 0x60804060}, + {0x43D8, 0x44204210}, + {0x43DC, 0x82108082}, + {0x43E0, 0x82108402}, + {0x43E4, 0xC8082108}, + {0x43E8, 0xC8202084}, + {0x43EC, 0x44208208}, + {0x43F0, 0x84108204}, + {0x43F4, 0xD0108104}, + {0x43F8, 0xF8210108}, + {0x43FC, 0x6431E930}, + {0x4400, 0x02109468}, + {0x4404, 0x10C61C22}, + {0x4408, 0x02109469}, + {0x440C, 0x10C61C22}, + {0x4410, 0x00041049}, + {0x471C, 0x0B02C080}, + {0x4414, 0x00000000}, + {0x4418, 0x00000000}, + {0x441C, 0x80000000}, + {0x4420, 0xB0200000}, + {0x4424, 0x00001FF0}, + {0x4780, 0xEC000000}, + {0x4784, 0x8C400020}, + {0x4964, 0x51089104}, + {0x4968, 0x88448844}, + {0x496C, 0x07000044}, + {0x4E4C, 0x00000000}, + {0x4428, 0x00000000}, + {0x442C, 0x00000000}, + {0x4430, 0x00000000}, + {0x4434, 0x00000000}, + {0x4438, 0x590642D0}, + {0x443C, 0x398668A0}, + {0x4440, 0x6C100808}, + {0x4444, 0x4A145344}, + {0x4448, 0x0C5B008F}, + {0x444C, 0x6E30498A}, + {0x4450, 0x656E371B}, + {0x4454, 0x00000F53}, + {0x49A8, 0x68120000}, + {0x49AC, 0xDA0681E0}, + {0x49BC, 0x14060180}, + {0x49D8, 0x600603FF}, + {0x49DC, 0x3C502000}, + {0x49E0, 0x2C580050}, + {0x49E4, 0x45B055EF}, + {0x49E8, 0x00000290}, + {0x4A0C, 0x00000001}, + {0x4A28, 0x0DAC1B58}, + {0x4A2C, 0x0000001E}, + {0x4E50, 0x16878003}, + {0x4E54, 0x0F00F078}, + {0x4E58, 0x03C1E0B4}, + {0x4E5C, 0x78584830}, + {0x4E60, 0x88C0140C}, + {0x4E64, 0x90302C24}, + {0x4E68, 0x0F84A00A}, + {0x4E6C, 0x00000011}, + {0x4E78, 0x00003039}, + {0x4E7C, 0x0000D431}, + {0x4E80, 0x00008235}, + {0x4E84, 0x00000000}, + {0x4E88, 0x000056CE}, + {0x4E8C, 0x00002B67}, + {0x4E90, 0x00000237}, + {0x4EB8, 0x00004624}, + {0x4A30, 0x00000000}, + {0x4458, 0x00000000}, + {0x445C, 0x4801442E}, + {0x4460, 0x0051A0B8}, + {0x4A34, 0x0000011F}, + {0x4EBC, 0x00000000}, + {0x4A38, 0x0000011F}, + {0x4EC0, 0x00000000}, + {0x4464, 0x00000000}, + {0x4468, 0x00000000}, + {0x446C, 0x00000000}, + {0x4470, 0x00000000}, + {0x4474, 0x00000000}, + {0x4478, 0x00000000}, + {0x447C, 0x00000000}, + {0x4480, 0x2A0AA040}, + {0x4484, 0x0A886926}, + {0x4488, 0x00000004}, + {0x4A3C, 0x00002B1C}, + {0x448C, 0x00000000}, + {0x4490, 0x88000000}, + {0x4494, 0x10000000}, + {0x4498, 0xE0000000}, + {0x4A08, 0x00000FE6}, + {0x4A40, 0x00000000}, + {0x4A44, 0x00000000}, + {0x4A48, 0x00000000}, + {0x4A4C, 0x00000000}, + {0x4A50, 0x00000000}, + {0x4A54, 0x00000000}, + {0x449C, 0x00000019}, + {0x44A0, 0x02B2E394}, + {0x44A4, 0x00000400}, + {0x4A58, 0x14285208}, + {0x4A84, 0x02850A14}, + {0x4A88, 0x048D0A14}, + {0x4A8C, 0x01123401}, + {0x4A90, 0x34011234}, + {0x4A94, 0x23450112}, + {0x4A98, 0x45123451}, + {0x4AAC, 0x12345123}, + {0x4AB0, 0x00000000}, + {0x44A8, 0x00000001}, + {0x44B0, 0x00000000}, + {0x44B4, 0x00000000}, + {0x44B8, 0x00000000}, + {0x44BC, 0x00000000}, + {0x44C0, 0x00000000}, + {0x44C4, 0x00000000}, + {0x44C8, 0x00000000}, + {0x44CC, 0x00000000}, + {0x44D0, 0x00000000}, + {0x44D4, 0x00000000}, + {0x44D8, 0x00000000}, + {0x44DC, 0x00000000}, + {0x44E0, 0x00000000}, + {0x44E4, 0x00000000}, + {0x44E8, 0x00000000}, + {0x44EC, 0x00000000}, + {0x44F0, 0x00000000}, + {0x44F4, 0x00000000}, + {0x44F8, 0x00000000}, + {0x44FC, 0x00000000}, + {0x4500, 0x00000000}, + {0x4504, 0x00000000}, + {0x4508, 0x00000000}, + {0x450C, 0x00000000}, + {0x4510, 0x00000000}, + {0x4514, 0x00000000}, + {0x4518, 0x00000000}, + {0x451C, 0x00000000}, + {0x4520, 0x00000000}, + {0x4524, 0x00000000}, + {0x4528, 0x00000000}, + {0x452C, 0x00000000}, + {0x4530, 0x4ED80C81}, + {0x4534, 0x00001808}, + {0x4538, 0x000000FF}, + {0x453C, 0x00000000}, + {0x4540, 0x00000000}, + {0x4544, 0x00000000}, + {0x4548, 0x00000000}, + {0x454C, 0x00000000}, + {0x4550, 0x00000000}, + {0x4554, 0x00000000}, + {0x4558, 0x00000000}, + {0x455C, 0x00000000}, + {0x4560, 0x40600033}, + {0x4564, 0x40000000}, + {0x4568, 0x00000000}, + {0x456C, 0x20000000}, + {0x4570, 0x04AAA407}, + {0x4574, 0x0001A2B4}, + {0x4578, 0x0002024B}, + {0x457C, 0x00200000}, + {0x4580, 0x00001B40}, + {0x4584, 0x00000000}, + {0x4588, 0x000000C8}, + {0x458C, 0x30000000}, + {0x4590, 0x00000000}, + {0x4594, 0x00000000}, + {0x4598, 0x00000001}, + {0x459C, 0x0003FE00}, + {0x45A0, 0x00000000}, + {0x45A4, 0x00000000}, + {0x45A8, 0xC00002C0}, + {0x45AC, 0x78028000}, + {0x45B0, 0x80000048}, + {0x45B4, 0x00098800}, + {0x45B8, 0x00200002}, + {0x4AB4, 0x00000000}, + {0x4AB8, 0x00000000}, + {0x4ABC, 0x00000000}, + {0x4AC0, 0x00000000}, + {0x4AC4, 0x00000000}, + {0x4AC8, 0x00000000}, + {0x4AF4, 0x00000000}, + {0x4AF8, 0x00000000}, + {0x4AFC, 0x00000000}, + {0x4B00, 0x00000000}, + {0x4B04, 0x00000000}, + {0x4B08, 0x00000000}, + {0x4B0C, 0x00000000}, + {0x4B10, 0x00000000}, + {0x4B14, 0x00000000}, + {0x4B18, 0xB0000000}, + {0x4B1C, 0x00000000}, + {0x4B20, 0x00000000}, + {0x4B24, 0x00000000}, + {0x4B28, 0x00000000}, + {0x4B2C, 0x00000000}, + {0x4B30, 0x00000000}, + {0x4B34, 0x00000000}, + {0x4B38, 0x00000000}, + {0x4B3C, 0x00000000}, + {0x4B40, 0x00000000}, + {0x45BC, 0x06748790}, + {0x45C0, 0x80000000}, + {0x45C4, 0x00000000}, + {0x45C8, 0x00000000}, + {0x45CC, 0x00558670}, + {0x45D0, 0x002883F0}, + {0x45D4, 0x00090120}, + {0x45D8, 0x00000000}, + {0x4B44, 0x00000100}, + {0x4B48, 0xA6DBC4B1}, + {0x4B4C, 0x64F624C3}, + {0x4B50, 0x00D4EF15}, + {0x49B0, 0x11110F0A}, + {0x49B4, 0x00000003}, + {0x49B8, 0x0000000A}, + {0x4B54, 0xBE9007FF}, + {0x4B58, 0x00000001}, + {0x49C0, 0x00000007}, + {0x49C4, 0x000003D9}, + {0x4A10, 0x00000001}, + {0x49C8, 0x002B1CB0}, + {0x4A00, 0xC0000000}, + {0x4A04, 0x00001000}, + {0x4B5C, 0x00000005}, + {0x4A18, 0x00000007}, + {0x4B60, 0x00000024}, + {0x49CC, 0x00000001}, + {0x49D0, 0x00000010}, + {0x49D4, 0x00000001}, + {0x4B64, 0x927FBFBF}, + {0x4B68, 0x1D07BDD0}, + {0x4B6C, 0x318A4DEF}, + {0x4B70, 0x158C5318}, + {0x4B74, 0x18C5318C}, + {0x4B78, 0x4E7394EC}, + {0x4B7C, 0xD9081CE5}, + {0x4B80, 0x00000001}, + {0x49EC, 0x00000001}, + {0x4B84, 0x00000000}, + {0x4B88, 0x00000000}, + {0x4B8C, 0x00000000}, + {0x4B90, 0x00000000}, + {0x4B94, 0x00000000}, + {0x4B98, 0x00000000}, + {0x4B9C, 0x00000000}, + {0x4BA0, 0x00000000}, + {0x4BA4, 0x00EA99A2}, + {0x49F8, 0x0000C4C3}, + {0x4A1C, 0x00020800}, + {0x4A20, 0x0002CC00}, + {0x4BA8, 0x002B6456}, + {0x45E0, 0x00000000}, + {0x45E4, 0x00000000}, + {0x45E8, 0x00E2E1E1}, + {0x45EC, 0xCBCBB6B6}, + {0x45F0, 0x59100FCA}, + {0x4BAC, 0x12CAB6DE}, + {0x4BB0, 0x00001110}, + {0x45F4, 0x08882550}, + {0x45F8, 0x08CC2660}, + {0x45FC, 0x09102660}, + {0x4600, 0x00000154}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x45DC, 0xE1CB38E8}, + {0x4660, 0x4A2E1800}, + {0x4664, 0x6750E462}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x45DC, 0xD1B942F4}, + {0x4660, 0x41250EF4}, + {0x4664, 0x6750E458}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x45DC, 0xE1CB38E8}, + {0x4660, 0x4A2E1800}, + {0x4664, 0x6750E462}, + {0xA0000000, 0x00000000}, + {0x45DC, 0xE1CB38E8}, + {0x4660, 0x4A2E1800}, + {0x4664, 0x6750E462}, + {0xB0000000, 0x00000000}, + {0x4668, 0x0E0CFB0A}, + {0x466C, 0x30100F06}, + {0x4670, 0x34333333}, + {0x4674, 0x34343434}, + {0x4678, 0xC39D38E8}, + {0x467C, 0x482800E3}, + {0x4680, 0x5836E46A}, + {0x4684, 0xFBEBDA00}, + {0x4688, 0x1A10FF04}, + {0x468C, 0x282A3000}, + {0x4690, 0x2A29292A}, + {0x4694, 0x04FA2A2A}, + {0x4698, 0xEE0F04D1}, + {0x469C, 0x89291436}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A0, 0x0701E79E}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A0, 0x0701E79E}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A0, 0x0701E79E}, + {0xA0000000, 0x00000000}, + {0x46A0, 0x0701E79E}, + {0xB0000000, 0x00000000}, + {0x46A4, 0x08D07CFF}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A8, 0x2212FF14}, + {0x46AC, 0x60423537}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A8, 0x4D1E7F14}, + {0x46AC, 0x60B37C4E}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46A8, 0x2212FF14}, + {0x46AC, 0x60423537}, + {0xA0000000, 0x00000000}, + {0x46A8, 0x2212FF14}, + {0x46AC, 0x60423537}, + {0xB0000000, 0x00000000}, + {0x46B0, 0x63666666}, + {0x46B4, 0x35374425}, + {0x46B8, 0x25883043}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x46BC, 0x5107C252}, + {0x4720, 0x3FFFFD63}, + {0x4724, 0xB58D11FF}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46BC, 0x5107C252}, + {0x4720, 0x27795843}, + {0x4724, 0xB58D11F5}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x46BC, 0x5107C252}, + {0x4720, 0x27795303}, + {0x4724, 0xB58D11F5}, + {0xA0000000, 0x00000000}, + {0x46BC, 0x5107C252}, + {0x4720, 0x3FFFFD63}, + {0x4724, 0xB58D11FF}, + {0xB0000000, 0x00000000}, + {0x4728, 0x07FFFFFF}, + {0x472C, 0x0E7893B6}, + {0x4730, 0xE0399201}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4734, 0x00000020}, + {0x4738, 0x8325C500}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4734, 0x003D4C20}, + {0x4738, 0x8F25C500}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4734, 0x003D5420}, + {0x4738, 0x8725C500}, + {0xA0000000, 0x00000000}, + {0x4734, 0x00000020}, + {0x4738, 0x8325C500}, + {0xB0000000, 0x00000000}, + {0x473C, 0x00000B7F}, + {0x4ACC, 0x000F7D00}, + {0x4AD0, 0x00000000}, + {0x4AD4, 0x00000040}, + {0x4AE4, 0x5379E99E}, + {0x4AE8, 0x00000744}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4BB4, 0xFBD5B89F}, + {0x4BB8, 0x99563918}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4BB4, 0x05EBC8AF}, + {0x4BB8, 0x99543D24}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4BB4, 0xFBD5B89F}, + {0x4BB8, 0x99563918}, + {0xA0000000, 0x00000000}, + {0x4BB4, 0xFBD5B89F}, + {0x4BB8, 0x99563918}, + {0xB0000000, 0x00000000}, + {0x4BBC, 0x12EED5B8}, + {0x4BC0, 0x80C4542F}, + {0x4BC4, 0x005A007F}, + {0x4BC8, 0x40000000}, + {0x4BCC, 0x40000000}, + {0x4BD0, 0x00000000}, + {0x4BD4, 0x40000000}, + {0x4BD8, 0xC0000000}, + {0x4BDC, 0x40000000}, + {0x4BE0, 0x80000000}, + {0x4BE4, 0xBAAC8000}, + {0x4BE8, 0x638A88C5}, + {0x4BEC, 0x00900000}, + {0x4EAC, 0x00000000}, + {0x4BF0, 0x00000000}, + {0x4BF4, 0x00000000}, + {0x4BF8, 0x00000219}, + {0x4EC4, 0x00000001}, + {0x4EE8, 0x00002020}, + {0x4BFC, 0x00000000}, + {0x4C00, 0x00000010}, + {0x4C04, 0x00000001}, + {0x4C08, 0x00000001}, + {0x4C0C, 0x00000000}, + {0x4C10, 0x00000000}, + {0x4C14, 0x00000151}, + {0x4C18, 0x00000000}, + {0x4C1C, 0x00000000}, + {0x4C20, 0x00000151}, + {0x4C24, 0x00000498}, + {0x4C28, 0x00000498}, + {0x4C2C, 0x00000498}, + {0x4C30, 0x00000498}, + {0x4C34, 0x00000498}, + {0x4C38, 0x00000498}, + {0x4C3C, 0x00000498}, + {0x4C40, 0x00000498}, + {0x4C44, 0x00000000}, + {0x4C48, 0x00000000}, + {0x4C4C, 0x00001146}, + {0x4C50, 0x00000000}, + {0x4C54, 0x00000000}, + {0x4C58, 0x00001146}, + {0x4C5C, 0x00000000}, + {0x4C60, 0x00000000}, + {0x4C64, 0xE2E1E1DE}, + {0x4C68, 0xB6B600B6}, + {0x4C6C, 0xCACBCBCA}, + {0x4C70, 0x8091010F}, + {0x4C74, 0x00000B11}, + {0x46C8, 0x08882550}, + {0x46CC, 0x08CC2660}, + {0x46D0, 0x09102660}, + {0x46D4, 0x00000154}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4740, 0xE4CD38E8}, + {0x4744, 0x4C321B04}, + {0x4748, 0x6750E466}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4740, 0xC5AD42F4}, + {0x4744, 0x412504E8}, + {0x4748, 0x6850E459}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4740, 0xE4CD38E8}, + {0x4744, 0x4C321B04}, + {0x4748, 0x6750E466}, + {0xA0000000, 0x00000000}, + {0x4740, 0xE4CD38E8}, + {0x4744, 0x4C321B04}, + {0x4748, 0x6750E466}, + {0xB0000000, 0x00000000}, + {0x474C, 0x0E0CFB0A}, + {0x4750, 0x30100F06}, + {0x4754, 0x34333333}, + {0x4758, 0x34343434}, + {0x475C, 0xC49E38E8}, + {0x4760, 0x482800E2}, + {0x4764, 0x5636E466}, + {0x4768, 0xFBEBDA00}, + {0x476C, 0x1A10FF04}, + {0x4770, 0x282A3000}, + {0x4774, 0x2A29292A}, + {0x4778, 0x04FA2A2A}, + {0x477C, 0xEE0F04D1}, + {0x49F0, 0x89291436}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x49F4, 0x0701E79E}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x49F4, 0x0701E79E}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x49F4, 0x0701E79E}, + {0xA0000000, 0x00000000}, + {0x49F4, 0x0701E79E}, + {0xB0000000, 0x00000000}, + {0x49FC, 0x08D07CFF}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A5C, 0x2212FF14}, + {0x4A60, 0x60423537}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A5C, 0x4D1E7F14}, + {0x4A60, 0x60B37C4E}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A5C, 0x2212FF14}, + {0x4A60, 0x60423537}, + {0xA0000000, 0x00000000}, + {0x4A5C, 0x2212FF14}, + {0x4A60, 0x60423537}, + {0xB0000000, 0x00000000}, + {0x4A64, 0x63666666}, + {0x4A68, 0x35374425}, + {0x4A6C, 0x25883043}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A70, 0x5107C252}, + {0x4A74, 0x3FFFFD63}, + {0x4A78, 0xB58D11FF}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A70, 0x5107C252}, + {0x4A74, 0x27795843}, + {0x4A78, 0xB58D11F5}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4A70, 0x5107C252}, + {0x4A74, 0x27795303}, + {0x4A78, 0xB58D11F5}, + {0xA0000000, 0x00000000}, + {0x4A70, 0x5107C252}, + {0x4A74, 0x3FFFFD63}, + {0x4A78, 0xB58D11FF}, + {0xB0000000, 0x00000000}, + {0x4A7C, 0x07FFFFFF}, + {0x4A80, 0x0E7893B6}, + {0x4A9C, 0xE0399201}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4AA0, 0x00000020}, + {0x4AA4, 0x8325C500}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4AA0, 0x003D4C20}, + {0x4AA4, 0x8F25C500}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4AA0, 0x003D5420}, + {0x4AA4, 0x8725C500}, + {0xA0000000, 0x00000000}, + {0x4AA0, 0x00000020}, + {0x4AA4, 0x8325C500}, + {0xB0000000, 0x00000000}, + {0x4AA8, 0x00000B7F}, + {0x4AD8, 0x000F7D00}, + {0x4ADC, 0x00000000}, + {0x4AE0, 0x00000040}, + {0x4AEC, 0x5379E99E}, + {0x4AF0, 0x00000744}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x4C78, 0xFBD5B89F}, + {0x4C7C, 0x99563918}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4C78, 0x07ECC9B0}, + {0x4C7C, 0x995B4126}, + {0x903400ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x4C78, 0xFBD5B89F}, + {0x4C7C, 0x99563918}, + {0xA0000000, 0x00000000}, + {0x4C78, 0xFBD5B89F}, + {0x4C7C, 0x99563918}, + {0xB0000000, 0x00000000}, + {0x4C80, 0x12EED5B8}, + {0x4C84, 0x80C4542F}, + {0x4C88, 0x005A007F}, + {0x4C8C, 0x40000000}, + {0x4C90, 0x40000000}, + {0x4C94, 0x00000000}, + {0x4C98, 0x40000000}, + {0x4C9C, 0xC0000000}, + {0x4CA0, 0x40000000}, + {0x4CA4, 0x80000000}, + {0x4CA8, 0xBAAC8000}, + {0x4CAC, 0x638A88C5}, + {0x4CB0, 0x00900000}, + {0x4EB0, 0x00000000}, + {0x4CB4, 0x00000000}, + {0x4CB8, 0x00000000}, + {0x4CBC, 0x00000219}, + {0x4EC8, 0x00000001}, + {0x4EEC, 0x00002020}, + {0x4CC0, 0x00000000}, + {0x4CC4, 0x00000010}, + {0x4CC8, 0x00000001}, + {0x4CCC, 0x00000001}, + {0x4CD0, 0x00000000}, + {0x4CD4, 0x00000000}, + {0x4CD8, 0x00000151}, + {0x4CDC, 0x00000000}, + {0x4CE0, 0x00000000}, + {0x4CE4, 0x00000151}, + {0x4CE8, 0x00000498}, + {0x4CEC, 0x00000498}, + {0x4CF0, 0x00000498}, + {0x4CF4, 0x00000498}, + {0x4CF8, 0x00000498}, + {0x4CFC, 0x00000498}, + {0x4D00, 0x00000498}, + {0x4D04, 0x00000498}, + {0x4D08, 0x00000000}, + {0x4D0C, 0x00000000}, + {0x4D10, 0x00001146}, + {0x4D14, 0x00000000}, + {0x4D18, 0x00000000}, + {0x4D1C, 0x00001146}, + {0x4788, 0x00000000}, + {0x478C, 0xA32103FE}, + {0x4790, 0xB20A7B28}, + {0x4794, 0xC6A7B14F}, + {0x4798, 0x000000D3}, + {0x4D20, 0x00000000}, + {0x4D24, 0x0C442416}, + {0x4D28, 0x00000000}, + {0x479C, 0x009B902A}, + {0x47A0, 0x009B902A}, + {0x47A4, 0x98682C18}, + {0x47A8, 0x6318C4C1}, + {0x47AC, 0x6248C631}, + {0x47B0, 0x922A8253}, + {0x47B4, 0x00000005}, + {0x4D2C, 0x0008C0C1}, + {0x47B8, 0x00001759}, + {0x47BC, 0x4B702400}, + {0x47C0, 0x831508BA}, + {0x4A14, 0x000000E9}, + {0x4D30, 0x00000001}, + {0x4E94, 0x000000FC}, + {0x47C4, 0x9ABBCACB}, + {0x47C8, 0x56767578}, + {0x47CC, 0xBBCCBBB3}, + {0x47D0, 0x57889989}, + {0x47D4, 0x00000F45}, + {0x4D34, 0x7BB167AB}, + {0x4D38, 0xBBBBBB05}, + {0x4D3C, 0x777777BB}, + {0x4D40, 0x00015277}, + {0x47D8, 0x27039CE9}, + {0x47DC, 0x41414432}, + {0x47E0, 0x36058342}, + {0x47E4, 0x00000006}, + {0x4D44, 0x00000687}, + {0x47E8, 0x00000001}, + {0x47EC, 0x00000001}, + {0x47F0, 0xC7013016}, + {0x47F4, 0x84413016}, + {0x47F8, 0x84413016}, + {0x47FC, 0x8C413016}, + {0x4800, 0x8C40B028}, + {0x4804, 0x3140B028}, + {0x4808, 0x2940B028}, + {0x480C, 0x8440B028}, + {0x4810, 0x6318C610}, + {0x4814, 0x45334753}, + {0x4818, 0x236A6A88}, + {0x4D48, 0x8C413016}, + {0x4D4C, 0xA140B028}, + {0x4D50, 0x00150A31}, + {0x481C, 0x576DF814}, + {0x4820, 0xA08877AC}, + {0x4824, 0x0000007A}, + {0x4D54, 0x00001184}, + {0x4828, 0xBCEB4A14}, + {0x482C, 0x000A3A4A}, + {0x4830, 0xBCEB4A14}, + {0x4834, 0x000A3A4A}, + {0x4D58, 0x2F63DD3A}, + {0x4838, 0xBCBDBD85}, + {0x483C, 0x0CABB99A}, + {0x4D5C, 0x000000BC}, + {0x4840, 0x38384242}, + {0x4844, 0x0086102E}, + {0x4848, 0xCA24C82A}, + {0x4D60, 0x00000000}, + {0x4D64, 0x0000F49D}, + {0x4ED8, 0x00000001}, + {0x4D68, 0x000001C4}, + {0x4D6C, 0x00000000}, + {0x4D70, 0x38384242}, + {0x4D74, 0x030E902E}, + {0x4D78, 0x994C1502}, + {0x4D7C, 0x00017912}, + {0x4EDC, 0x00000001}, + {0x484C, 0x00008A62}, + {0x4D80, 0x00000002}, + {0x4850, 0x00000008}, + {0x4854, 0x009B902A}, + {0x4858, 0x009B902A}, + {0x485C, 0x98682C18}, + {0x4860, 0x6318C4C1}, + {0x4864, 0x6248C631}, + {0x4868, 0x922A8253}, + {0x486C, 0x00000005}, + {0x4D84, 0x0008C0C1}, + {0x4870, 0x00001759}, + {0x4874, 0x4B702400}, + {0x4878, 0x831508BA}, + {0x4A24, 0x000000E9}, + {0x4D88, 0x00000001}, + {0x4E98, 0x000000FC}, + {0x487C, 0x9898A8BB}, + {0x4880, 0x54535368}, + {0x4884, 0x999999B3}, + {0x4888, 0x35555589}, + {0x488C, 0x00000745}, + {0x4D8C, 0x6AB14487}, + {0x4D90, 0xBBBBBB04}, + {0x4D94, 0x777777BB}, + {0x4D98, 0x00015277}, + {0x4890, 0x27039CE9}, + {0x4894, 0x41414432}, + {0x4898, 0x36058342}, + {0x489C, 0x00000006}, + {0x4D9C, 0x00000687}, + {0x48A0, 0x00000001}, + {0x48A4, 0x00000001}, + {0x48A8, 0xC7013016}, + {0x48AC, 0x84413016}, + {0x48B0, 0x84413016}, + {0x48B4, 0x8C413016}, + {0x48B8, 0x8C40B028}, + {0x48BC, 0x3140B028}, + {0x48C0, 0x2940B028}, + {0x48C4, 0x8440B028}, + {0x48C8, 0x6318C610}, + {0x48CC, 0x45334753}, + {0x48D0, 0x236A6A88}, + {0x4DA0, 0x8C413016}, + {0x4DA4, 0xA140B028}, + {0x4DA8, 0x00150A31}, + {0x48D4, 0x576DF814}, + {0x48D8, 0xA08877AC}, + {0x48DC, 0x0000007A}, + {0x4DAC, 0x00001184}, + {0x48E0, 0xBCEB4A14}, + {0x48E4, 0x000A3A4A}, + {0x48E8, 0xBCEB4A14}, + {0x48EC, 0x000A3A4A}, + {0x4DB0, 0x2F63DD3A}, + {0x48F0, 0x9A8A8A85}, + {0x48F4, 0x0C9BB99A}, + {0x4DB4, 0x0000009A}, + {0x48F8, 0x38384242}, + {0x48FC, 0x0086102E}, + {0x4900, 0xCA24C82A}, + {0x4DB8, 0x00000000}, + {0x4DBC, 0x0000F49D}, + {0x4EE0, 0x00000001}, + {0x4DC0, 0x000001C4}, + {0x4DC4, 0x00000000}, + {0x4DC8, 0x38384242}, + {0x4DCC, 0x030E902E}, + {0x4DD0, 0x994C1502}, + {0x4DD4, 0x00017912}, + {0x4EE4, 0x00000001}, + {0x4904, 0x00008A62}, + {0x4DD8, 0x00000002}, + {0x4908, 0x00000008}, + {0x490C, 0x80040000}, + {0x4910, 0x80040000}, + {0x4914, 0xFE800000}, + {0x4918, 0x834C0000}, + {0x491C, 0x00000000}, + {0x4920, 0x00000000}, + {0x4924, 0x000003FF}, + {0x4928, 0x00000000}, + {0x492C, 0x00000000}, + {0x4930, 0x00000000}, + {0x4934, 0x40000000}, + {0x4938, 0x00000000}, + {0x493C, 0x00000000}, + {0x4940, 0x00000000}, + {0x4944, 0x00000000}, + {0x4948, 0x04065800}, + {0x494C, 0x02010080}, + {0x4950, 0x0E1E3E05}, + {0x4954, 0x0A163068}, + {0x4958, 0x00206040}, + {0x495C, 0x02020202}, + {0x4960, 0x00002020}, + {0x4DDC, 0x18002000}, + {0x4DE0, 0x00004001}, + {0x4DE4, 0x00040004}, + {0x4DE8, 0x00400040}, + {0x4DEC, 0x04000400}, + {0x4DF0, 0x08080618}, + {0x4DF4, 0x08081616}, + {0x4DF8, 0x08080808}, + {0x4DFC, 0x18180808}, + {0x4E00, 0x01020100}, + {0x4E04, 0x05020502}, + {0x4E08, 0x00020E0F}, + {0x4E0C, 0x00000000}, + {0x4E10, 0x16080806}, + {0x4E14, 0x08080816}, + {0x4E18, 0x08080808}, + {0x4E1C, 0x00181808}, + {0x4E20, 0x02010201}, + {0x4E24, 0x0F050205}, + {0x4E28, 0x0000020E}, + {0x4E2C, 0x00000000}, + {0x4E70, 0x00000001}, + {0x4970, 0x00000000}, + {0x4974, 0xC00CD62D}, + {0x4978, 0x00000103}, + {0x4E30, 0x02E416A8}, + {0x497C, 0x00000000}, + {0x4980, 0x00000000}, + {0x4984, 0x00000000}, + {0x4988, 0x00000000}, + {0x498C, 0x00000000}, + {0x4E34, 0x00FC0000}, + {0x4E38, 0x0000F800}, + {0x4E3C, 0x00000001}, + {0x4990, 0x00000000}, + {0x4994, 0x00000000}, + {0x4998, 0x00000000}, + {0x499C, 0x00000000}, + {0x49A0, 0x00000000}, + {0x4E40, 0x00FC0000}, + {0x4E44, 0x0000F800}, + {0x4E48, 0x00000001}, + {0xC54, 0x10014368}, + {0xC58, 0x61000000}, + {0xC5C, 0x805580F0}, + {0xC64, 0x0010A030}, + {0x189C, 0x000003FF}, + {0xC6C, 0x00060020}, + {0xC3C, 0x2840E1BF}, + {0xC40, 0x00000000}, + {0xC44, 0x00000007}, + {0xC48, 0x410E4000}, + {0xC54, 0x1EE1436A}, + {0xC58, 0x61000000}, + {0x730, 0x00000002}, + {0xC60, 0x017FFFF2}, + {0xC64, 0x0010A170}, + {0xC64, 0x0010A170}, + {0xC68, 0x000000FF}, + {0xC64, 0x0010A130}, + {0xC54, 0x1AE1436A}, + {0xC6C, 0x00060020}, + {0xC58, 0x41000000}, + {0x708, 0x00000000}, + {0xC6C, 0x00061020}, + {0x884, 0x0043F01D}, + {0x704, 0x601E0100}, + {0x710, 0xEF810000}, + {0xC54, 0x1AE1436A}, + {0xC58, 0x41000000}, + {0xC68, 0x10000050}, + {0xC6C, 0x20061020}, + {0x704, 0x601E0100}, + {0xC74, 0x00000000}, + {0x90C, 0x00300000}, + {0xC70, 0x071BFC00}, + {0xC74, 0x3FFFFFFF}, + {0xC78, 0x3FFFFFFF}, + {0xC7C, 0x0000BFFF}, + {0xD40, 0xF64FA0F7}, + {0xD44, 0x0400463F}, + {0xD48, 0x0003FFFF}, + {0xD4C, 0x00000000}, + {0xD50, 0xF64FA0F7}, + {0xD54, 0x04100437}, + {0xD58, 0x0000FF7F}, + {0xD5C, 0x00000000}, + {0xD60, 0x00000000}, + {0xD64, 0x00000000}, + {0xD70, 0x00000015}, + {0xD90, 0x000003FF}, + {0xD94, 0x00000000}, + {0xD98, 0x0000003F}, + {0xD9C, 0x00000000}, + {0xDA0, 0x000003FE}, + {0xDA4, 0x00000000}, + {0xDA8, 0x0000003F}, + {0xDAC, 0x00000000}, + {0xD00, 0x77777777}, + {0xD04, 0xBBBBBBBB}, + {0xD08, 0xBBBBBBBB}, + {0xD0C, 0x00000070}, + {0xD10, 0x20110900}, + {0xD10, 0x20110FFF}, + {0xD78, 0x00000001}, + {0xD7C, 0x001C040A}, + {0xD84, 0x00006007}, + {0xD84, 0x00006607}, + {0xD10, 0x28110FFF}, + {0xD18, 0x50209900}, + {0xD80, 0x00804100}, + {0xD80, 0x00804200}, + {0x718, 0x1333233F}, + {0x604, 0x041E1E1E}, + {0x714, 0x00010000}, + {0x586C, 0x000000F0}, + {0x586C, 0x000000E0}, + {0x586C, 0x000000D0}, + {0x586C, 0x000000C0}, + {0x586C, 0x000000B0}, + {0x586C, 0x000000A0}, + {0x586C, 0x00000090}, + {0x586C, 0x00000080}, + {0x586C, 0x00000070}, + {0x586C, 0x00000060}, + {0x586C, 0x00000050}, + {0x586C, 0x00000040}, + {0x586C, 0x00000030}, + {0x586C, 0x00000020}, + {0x586C, 0x00000010}, + {0x586C, 0x00000000}, + {0x786C, 0x000000F0}, + {0x786C, 0x000000E0}, + {0x786C, 0x000000D0}, + {0x786C, 0x000000C0}, + {0x786C, 0x000000B0}, + {0x786C, 0x000000A0}, + {0x786C, 0x00000090}, + {0x786C, 0x00000080}, + {0x786C, 0x00000070}, + {0x786C, 0x00000060}, + {0x786C, 0x00000050}, + {0x786C, 0x00000040}, + {0x786C, 0x00000030}, + {0x786C, 0x00000020}, + {0x786C, 0x00000010}, + {0x786C, 0x00000000}, + {0x304, 0x0CE31333}, + {0x300, 0xF30CE31C}, + {0x304, 0x13EF1F19}, + {0x308, 0x0C13E3F3}, + {0x30C, 0x130C0C0C}, + {0x310, 0x80496000}, + {0x314, 0x0041E000}, + {0x318, 0x20022042}, + {0x31C, 0x20448009}, + {0x320, 0x00490040}, + {0x324, 0xE0000070}, + {0x328, 0xE000E000}, + {0x32C, 0x0041E000}, + {0x35C, 0x000004C4}, + {0xC0D4, 0xA7C41460}, + {0xC0D8, 0xC6BA7F67}, + {0xC0DC, 0x30C52868}, + {0xC0E0, 0x75008128}, + {0xC0E4, 0x0000272B}, + {0xC1D4, 0xA7C41460}, + {0xC1D8, 0xC6BA7F67}, + {0xC1DC, 0x30C52868}, + {0xC1E0, 0x75008128}, + {0xC1E4, 0x0000272B}, + {0xC0EC, 0x00030003}, + {0xC1EC, 0x00030003}, + {0xC004, 0x03020000}, + {0xC024, 0x03020000}, + {0xC104, 0x03020000}, + {0xC124, 0x03020000}, + {0xC0E8, 0x000A0C81}, + {0xC0F0, 0x00000024}, + {0xC1E8, 0x000A0C81}, + {0xC1F0, 0x00000024}, + {0x334, 0xFFFFFFFF}, + {0x33C, 0x55000000}, + {0x340, 0x00005555}, + {0x724, 0x00111201}, + {0x5868, 0xA9550000}, + {0x5870, 0x33221100}, + {0x5874, 0x77665544}, + {0x5878, 0xBBAA9988}, + {0x587C, 0xFFEEDDCC}, + {0x5880, 0x76543210}, + {0x5884, 0xFEDCBA98}, + {0x5888, 0x00000000}, + {0x588C, 0x00000000}, + {0x5894, 0x00000008}, + {0x7868, 0xA9550000}, + {0x7870, 0x33221100}, + {0x7874, 0x77665544}, + {0x7878, 0xBBAA9988}, + {0x787C, 0xFFEEDDCC}, + {0x7880, 0x76543210}, + {0x7884, 0xFEDCBA98}, + {0x7888, 0x00000000}, + {0x788C, 0x00000000}, + {0x7894, 0x00000008}, + {0x650, 0x00200888}, + {0x710, 0xF3810000}, + {0x020, 0x0000F381}, + {0x024, 0x0000F381}, + {0xC0A8, 0x00000080}, + {0xC0AC, 0x00000100}, + {0xC0B8, 0x00020000}, + {0xC1A8, 0x00000080}, + {0xC1AC, 0x00000100}, + {0xC1B8, 0x00020000}, + {0x1038, 0x00003100}, + {0x1038, 0x00003100}, + {0x3038, 0x00003100}, + {0x3038, 0x00003100}, + {0xC14, 0xA5000000}, + {0x908, 0x00000001}, + {0xC54, 0x1EE14368}, + {0xC88, 0xC2AC8000}, + {0xC8C, 0x02F2FC08}, + {0xC70, 0x071BFC00}, + {0x980, 0x10002251}, + {0x988, 0x3C3C4107}, + {0x904, 0x00000005}, + {0x994, 0x00000010}, + {0x000, 0x0580801F}, + {0x240C, 0x00000000}, + {0x010, 0x000C01FF}, + {0x010, 0x001C01FF}, + {0x2424, 0x00000008}, + {0x620, 0x00141A30}, + {0x660, 0x00000004}, + {0x2620, 0x00141A30}, + {0x2660, 0x00000000}, + {0x640, 0x180A141E}, + {0x640, 0x1814141E}, + {0x640, 0x1814141E}, + {0x640, 0x14141414}, + {0x644, 0x3C14283C}, + {0x644, 0x3C29283C}, + {0x644, 0x3C29203C}, + {0x644, 0x3C29201A}, + {0x2640, 0x180A141E}, + {0x2640, 0x1814141E}, + {0x2640, 0x1814141E}, + {0x2640, 0x14141414}, + {0x2644, 0x3C14283C}, + {0x2644, 0x3C29283C}, + {0x2644, 0x3C29203C}, + {0x2644, 0x3C29201A}, + {0x620, 0x00141A40}, + {0x64C, 0x1D0A141E}, + {0x64C, 0x1D1D141E}, + {0x64C, 0x1D1D1D1E}, + {0x2620, 0x00141A40}, + {0x264C, 0x1D0A141E}, + {0x264C, 0x1D1D141E}, + {0x264C, 0x1D1D1D1E}, + {0x2300, 0x03020100}, + {0x2304, 0x07060504}, + {0x2308, 0x0B0A0908}, + {0x230C, 0x0F0E0D0C}, + {0x2310, 0x13121110}, + {0x2314, 0x17161514}, + {0x2318, 0x00000018}, + {0x231C, 0x00C00000}, + {0x2320, 0x00000000}, + {0x2324, 0x0005298F}, + {0x2328, 0x0015296E}, + {0x232C, 0x0D3B5200}, + {0x2330, 0x00000000}, + {0x2334, 0x00000000}, + {0x2338, 0x00000000}, + {0x233C, 0x00000402}, + {0x2340, 0x00020080}, + {0x2344, 0x03C00000}, + {0x2348, 0x0001FFFF}, + {0x234C, 0x00C80064}, + {0x2350, 0x0190012C}, + {0x2354, 0x000032FE}, + {0x2358, 0xF0203C28}, + {0x235C, 0xF027C000}, + {0x2360, 0x01210C00}, + {0x2320, 0x00000001}, + {0x2300, 0x0C811B40}, + {0x2304, 0xF3FC4ED8}, + {0x2308, 0x08FF808F}, + {0x230C, 0xFCBC80C8}, + {0x2310, 0xBC80536C}, + {0x2314, 0x0363A0F3}, + {0x2318, 0x000000BB}, + {0x724, 0x00111200}, + {0x704, 0x601E0D00}, + {0xC78, 0xBFFFFFFF}, + {0x704, 0x601E0D02}, + {0x704, 0x601E0D02}, + {0x5864, 0x080801FF}, + {0x7864, 0x080801FF}, + {0xC60, 0x017FFFF3}, + {0xC6C, 0x20061021}, + {0x58AC, 0x08000000}, + {0x78AC, 0x08000000}, + {0x8088, 0x007F0000}, + {0x81A4, 0x003F3A00}, + {0x81B4, 0x0100007F}, + {0x81C0, 0x0060010B}, + {0x81A0, 0x00000010}, + {0x8138, 0x40000002}, + {0x82A4, 0x003F3A00}, + {0x82B4, 0x0100007F}, + {0x82C0, 0x0060010B}, + {0x82A0, 0x00000010}, + {0x81A0, 0x00000010}, + {0x8238, 0x40000002}, + {0x8088, 0x00000000}, + {0x8020, 0x00000000}, + {0x8120, 0x00000000}, + {0x8220, 0x00000000}, + {0x8124, 0x00000F0F}, + {0x8224, 0x00000F0F}, + {0x5864, 0x180801FF}, + {0x7864, 0x180801FF}, + {0xC60, 0x017FFFF3}, + {0xC70, 0x071BFE00}, + {0xC70, 0x071BFE60}, + {0xC6C, 0x20061021}, + {0x58AC, 0x08000000}, + {0x78AC, 0x08000000}, + {0x8120, 0x10000000}, + {0x8120, 0x10030000}, + {0x8124, 0x00000F0F}, + {0x8124, 0x00000F0F}, + {0x8224, 0x00000F0F}, + {0x8224, 0x00000F0F}, + {0x8220, 0x10000000}, + {0x8220, 0x10030000}, + {0x704, 0x601E0D00}, + {0x5864, 0x100801FF}, + {0x7864, 0x100801FF}, + {0x5864, 0x180801FF}, + {0x7864, 0x180801FF}, + {0xC60, 0x017FFFF3}, + {0x58D4, 0x7401FE00}, + {0x78D4, 0x7401FE00}, + {0x58F0, 0x400401FF}, + {0x78F0, 0x400401FF}, + {0x58F0, 0x400401FF}, + {0x78F0, 0x400401FF}, + {0x704, 0x601E0D02}, + {0xC7C, 0x0020BFFF}, + {0x58C0, 0x00FE0000}, + {0x58FC, 0x00000000}, + {0x566C, 0x00010005}, + {0x566C, 0x00011005}, + {0x700, 0x00000030}, + {0x9D0, 0x00001001}, + {0x704, 0x601E0D02}, + {0x704, 0x601E0D00}, + {0x704, 0x601C0502}, + {0x000, 0x0580801F}, + {0x980, 0x10002250}, + {0x010, 0x001C01FF}, + {0xC3C, 0x2840E1BF}, + {0x12A8, 0x33337824}, + {0x32A8, 0x33337824}, + {0x620, 0x00141A40}, + {0x2320, 0x00000000}, + {0x664, 0x0000000C}, + {0xC0F8, 0x00000001}, + {0xC1F8, 0x00000001}, + {0x2D7C, 0x739C040A}, + {0x1010, 0x00000000}, + {0x3010, 0x00000000}, + {0x2C14, 0x80000005}, + {0x5818, 0x082C1800}, + {0x7818, 0x082C1800}, + {0x624, 0x0101030A}, + {0x028, 0x0000F381}, + {0x02C, 0x0000F381}, + {0x720, 0x20000000}, + {0x1200, 0x00010142}, + {0x12A0, 0x24903056}, + {0x12AC, 0x12333121}, + {0x12B8, 0x30020000}, + {0x2000, 0x18BBBF84}, + {0x2C14, 0x85000005}, + {0x3200, 0x00010142}, + {0x32A0, 0x24903056}, + {0x32AC, 0x12333121}, + {0x32B8, 0x30020000}, + {0x5800, 0x03FF807F}, + {0x5804, 0x04237040}, + {0x5808, 0x04237040}, + {0x7800, 0x03FF807F}, + {0x7804, 0x04237040}, + {0x7808, 0x04237040}, + {0x010, 0x001C61FF}, + {0x56C8, 0x0E800400}, + {0x76C8, 0x0E800400}, + {0x984, 0x000000E0}, + {0x2008, 0x000FFFFF}, + {0x58B0, 0x00000800}, + {0x5A00, 0x00000000}, + {0x5A04, 0x00000000}, + {0x5A08, 0x00000000}, + {0x5A0C, 0x00000000}, + {0x5A10, 0x00000000}, + {0x5A14, 0x00000000}, + {0x5A18, 0x00000000}, + {0x5A1C, 0x00000000}, + {0x5A20, 0x00000000}, + {0x5A24, 0x00050000}, + {0x5A28, 0x00000000}, + {0x5A2C, 0x00000000}, + {0x5A30, 0x00000000}, + {0x5A34, 0x00000000}, + {0x5A38, 0x00000000}, + {0x5A3C, 0x00000000}, + {0x5A40, 0x00000000}, + {0x5A44, 0x00000005}, + {0x5A48, 0x00000000}, + {0x5A4C, 0x00000000}, + {0x5A50, 0x00000000}, + {0x5A54, 0x00000000}, + {0x5A58, 0x00000000}, + {0x5A5C, 0x00000000}, + {0x5A60, 0x00050000}, + {0x5A64, 0x00000000}, + {0x5A68, 0x00000000}, + {0x5A6C, 0x00000000}, + {0x5A70, 0x00000000}, + {0x5A74, 0x00000000}, + {0x5A78, 0x00000000}, + {0x5A7C, 0x00000000}, + {0x5A80, 0x00000000}, + {0x5A84, 0x00000000}, + {0x5A88, 0x00000000}, + {0x5A8C, 0x00000000}, + {0x5A90, 0x00000000}, + {0x5A94, 0x00000000}, + {0x5A98, 0x00000000}, + {0x5A9C, 0x00000000}, + {0x5AA0, 0x00000000}, + {0x5AA4, 0x00000000}, + {0x5AA8, 0x00000000}, + {0x5AAC, 0x00000000}, + {0x5AB0, 0x00050005}, + {0x5AB4, 0x00050005}, + {0x5AB8, 0x00050005}, + {0x5ABC, 0x00050005}, + {0x5AC0, 0x00000005}, + {0x78B0, 0x00000800}, + {0x7A00, 0x00000000}, + {0x7A04, 0x00000000}, + {0x7A08, 0x00000000}, + {0x7A0C, 0x00000000}, + {0x7A10, 0x00000000}, + {0x7A14, 0x00000000}, + {0x7A18, 0x00000000}, + {0x7A1C, 0x00000000}, + {0x7A20, 0x00000000}, + {0x7A24, 0x00050000}, + {0x7A28, 0x00000000}, + {0x7A2C, 0x00000000}, + {0x7A30, 0x00000000}, + {0x7A34, 0x00000000}, + {0x7A38, 0x00000000}, + {0x7A3C, 0x00000000}, + {0x7A40, 0x00000000}, + {0x7A44, 0x00000005}, + {0x7A48, 0x00000000}, + {0x7A4C, 0x00000000}, + {0x7A50, 0x00000000}, + {0x7A54, 0x00000000}, + {0x7A58, 0x00000000}, + {0x7A5C, 0x00000000}, + {0x7A60, 0x00050000}, + {0x7A64, 0x00000000}, + {0x7A68, 0x00000000}, + {0x7A6C, 0x00000000}, + {0x7A70, 0x00000000}, + {0x7A74, 0x00000000}, + {0x7A78, 0x00000000}, + {0x7A7C, 0x00000000}, + {0x7A80, 0x00000000}, + {0x7A84, 0x00000000}, + {0x7A88, 0x00000000}, + {0x7A8C, 0x00000000}, + {0x7A90, 0x00000000}, + {0x7A94, 0x00000000}, + {0x7A98, 0x00000000}, + {0x7A9C, 0x00000000}, + {0x7AA0, 0x00000000}, + {0x7AA4, 0x00000000}, + {0x7AA8, 0x00000000}, + {0x7AAC, 0x00000000}, + {0x7AB0, 0x00050005}, + {0x7AB4, 0x00050005}, + {0x7AB8, 0x00050005}, + {0x7ABC, 0x00050005}, + {0x7AC0, 0x00000005}, + {0x0F0, 0x00010000}, + {0x0F4, 0x00000018}, + {0x0F8, 0x20220120}, +}; + +static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = { + {0xF0FF0000, 0x00000000}, + {0xF03300FF, 0x00000001}, + {0x000, 0x01E3C39F}, + {0x001, 0x00694727}, + {0x002, 0x00005536}, + {0x100, 0x02E3C39F}, + {0x101, 0x0069472A}, + {0x102, 0x00005536}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10000, 0x1A02E1C9}, + {0x10001, 0x00644A30}, + {0x10002, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x10000, 0x0EF4D1B9}, + {0x10001, 0x00584125}, + {0x10002, 0x00006750}, + {0xA0000000, 0x00000000}, + {0x10000, 0x1A02E1C9}, + {0x10001, 0x00644A30}, + {0x10002, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10100, 0x1901E1C8}, + {0x10101, 0x0061482D}, + {0x10102, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x10100, 0x04E8C5AD}, + {0x10101, 0x00594125}, + {0x10102, 0x00006850}, + {0xA0000000, 0x00000000}, + {0x10100, 0x1901E1C8}, + {0x10101, 0x0061482D}, + {0x10102, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x20000, 0x1601E2CA}, + {0x20001, 0x005D452A}, + {0x20002, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x20000, 0x0EF4D3BB}, + {0x20001, 0x00563F25}, + {0x20002, 0x00006850}, + {0xA0000000, 0x00000000}, + {0x20000, 0x1601E2CA}, + {0x20001, 0x005D452A}, + {0x20002, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x20100, 0x1901E1C8}, + {0x20101, 0x0061482D}, + {0x20102, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x20100, 0x0BF1CFB7}, + {0x20101, 0x00574025}, + {0x20102, 0x00006750}, + {0xA0000000, 0x00000000}, + {0x20100, 0x1901E1C8}, + {0x20101, 0x0061482D}, + {0x20102, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x30000, 0x1700E1CA}, + {0x30001, 0x005E472B}, + {0x30002, 0x00006750}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x30000, 0x05EFCEB7}, + {0x30001, 0x004B351A}, + {0x30002, 0x00006850}, + {0xA0000000, 0x00000000}, + {0x30000, 0x1700E1CA}, + {0x30001, 0x005E472B}, + {0x30002, 0x00006750}, + {0xB0000000, 0x00000000}, + {0x80ff0000, 0x00000000}, {0x40000000, 0x00000000}, + {0x30100, 0x14FEE0C9}, + {0x30101, 0x00594428}, + {0x30102, 0x00006650}, + {0x903300ff, 0x00000000}, {0x40000000, 0x00000000}, + {0x30100, 0x0CF2D1B9}, + {0x30101, 0x00563F24}, + {0x30102, 0x00006750}, + {0xA0000000, 0x00000000}, + {0x30100, 0x14FEE0C9}, + {0x30101, 0x00594428}, + {0x30102, 0x00006650}, + {0xB0000000, 0x00000000}, + {0x40000, 0x13FCDDC8}, + {0x40001, 0x005D4328}, + {0x40002, 0x00006850}, + {0x40100, 0x14FEE3CF}, + {0x40101, 0x00583E24}, + {0x40102, 0x00006850}, + {0x50000, 0x0DF4D6C6}, + {0x50001, 0x00604227}, + {0x50002, 0x00006850}, + {0x50100, 0x1903E7D5}, + {0x50101, 0x0061462B}, + {0x50102, 0x00006850}, + {0x60000, 0x0FF5D7C6}, + {0x60001, 0x005D4429}, + {0x60002, 0x00006850}, + {0x60100, 0x12FADECF}, + {0x60101, 0x005B4126}, + {0x60102, 0x00006850}, + {0x70000, 0x09F1D2C3}, + {0x70001, 0x00554026}, + {0x70002, 0x00006750}, + {0x70100, 0x0CF5DACC}, + {0x70101, 0x00563E25}, + {0x70102, 0x00006750}, + {0x2000000, 0x02E4C4A0}, + {0x2000001, 0x006A4828}, + {0x2000100, 0x02E4C5A1}, + {0x2000101, 0x00664629}, + {0x2010000, 0x05EBC8AF}, + {0x2010001, 0x00543D24}, + {0x2010100, 0x07ECC9B0}, + {0x2010101, 0x005B4126}, + {0x2020000, 0x05EDCCB2}, + {0x2020001, 0x004D361C}, + {0x2020100, 0x06ECCBB2}, + {0x2020101, 0x00553D22}, + {0x2030000, 0x02ECCCB3}, + {0x2030001, 0x00483118}, + {0x2030100, 0x04ECCCB2}, + {0x2030101, 0x004F381C}, + {0x3000000, 0x00000000}, + {0x3000001, 0x00000000}, + {0x3000002, 0x00000000}, + {0x3000003, 0x00000000}, + {0x3000100, 0x00000000}, + {0x3000101, 0x00000000}, + {0x3000102, 0x00000000}, + {0x3000103, 0x00000000}, + {0x3010000, 0x0E0CFB0A}, + {0x3010001, 0x00100F06}, + {0x3010002, 0x34333333}, + {0x3010003, 0x3434343C}, + {0x3010100, 0x0E0CFB0A}, + {0x3010101, 0x00100F06}, + {0x3010102, 0x34333333}, + {0x3010103, 0x3434343C}, + {0x3020000, 0x0E0CFB0A}, + {0x3020001, 0x00100F06}, + {0x3020002, 0x34333333}, + {0x3020003, 0x3434343C}, + {0x3020100, 0x0E0CFB0A}, + {0x3020101, 0x00100F06}, + {0x3020102, 0x34333333}, + {0x3020103, 0x3434343C}, + {0x3030000, 0x0E0CFB0A}, + {0x3030001, 0x00100F06}, + {0x3030002, 0x34333333}, + {0x3030003, 0x3434343C}, + {0x3030100, 0x0E0CFB0A}, + {0x3030101, 0x00100F06}, + {0x3030102, 0x34333333}, + {0x3030103, 0x3434343C}, + {0x3040000, 0x0E0CFB0A}, + {0x3040001, 0x00100F06}, + {0x3040002, 0x343B3333}, + {0x3040003, 0x34343C3C}, + {0x3040100, 0x0E0CFB0A}, + {0x3040101, 0x00100F06}, + {0x3040102, 0x343B3333}, + {0x3040103, 0x34343C3C}, + {0x3050000, 0x0E0CFB0A}, + {0x3050001, 0x00100F06}, + {0x3050002, 0x343B3333}, + {0x3050003, 0x34343C3C}, + {0x3050100, 0x0E0CFB0A}, + {0x3050101, 0x00100F06}, + {0x3050102, 0x343B3333}, + {0x3050103, 0x34343C3C}, + {0x3060000, 0x0E0CFB0A}, + {0x3060001, 0x00100F06}, + {0x3060002, 0x3C3B3333}, + {0x3060003, 0x34343C3C}, + {0x3060100, 0x0E0CFB0A}, + {0x3060101, 0x00100F06}, + {0x3060102, 0x3C3B3333}, + {0x3060103, 0x34343C3C}, + {0x3070000, 0x0E0CFB0A}, + {0x3070001, 0x00100F06}, + {0x3070002, 0x3C3B3333}, + {0x3070003, 0x34343C3C}, + {0x3070100, 0x0E0CFB0A}, + {0x3070101, 0x00100F06}, + {0x3070102, 0x3C3B3333}, + {0x3070103, 0x34343C3C}, +}; + +static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = { + {0xF0010000, 0x00000000}, + {0xF0020000, 0x00000001}, + {0xF0320000, 0x00000002}, + {0xF0330000, 0x00000003}, + {0xF0340000, 0x00000004}, + {0xF0350000, 0x00000005}, + {0xF0360000, 0x00000006}, + {0xF0010001, 0x00000007}, + {0xF0020001, 0x00000008}, + {0xF0320001, 0x00000009}, + {0xF0330001, 0x0000000A}, + {0xF0340001, 0x0000000B}, + {0xF0350001, 0x0000000C}, + {0xF0360001, 0x0000000D}, + {0xF03F0001, 0x0000000E}, + {0xF0400001, 0x0000000F}, + {0x005, 0x00000000}, + {0x10005, 0x00000000}, + {0x000, 0x00030001}, + {0x10000, 0x00030000}, + {0x018, 0x00011124}, + {0x10018, 0x00011124}, + {0x0EF, 0x00080000}, + {0x033, 0x00000001}, + {0x03E, 0x00000620}, + {0x03F, 0x0000020C}, + {0x0EF, 0x00000000}, + {0x05F, 0x00000032}, + {0x097, 0x00043200}, + {0x0A6, 0x00066DB7}, + {0x0EF, 0x00004000}, + {0x033, 0x00000005}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x00000003}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x00000002}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x033, 0x0000000D}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x0000000B}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x0000000A}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x0EF, 0x00000000}, + {0x000, 0x00033C01}, + {0x10000, 0x00033C00}, + {0x01A, 0x00040004}, + {0x0FE, 0x00000000}, + {0x096, 0x00015200}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0xA0000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0xB0000000, 0x00000000}, + {0x057, 0x0000D589}, + {0x05A, 0x0007FFFF}, + {0x043, 0x00005000}, + {0x0B5, 0x00001720}, + {0x0ED, 0x00000080}, + {0x033, 0x00000000}, + {0x03E, 0x00013FAB}, + {0x03F, 0x000FD800}, + {0x033, 0x00000010}, + {0x03E, 0x00013FAB}, + {0x03F, 0x000FD800}, + {0x033, 0x00000020}, + {0x03E, 0x00013FAB}, + {0x03F, 0x000FD800}, + {0x0ED, 0x00000000}, + {0x0ED, 0x00000200}, + {0x033, 0x00000000}, + {0x03F, 0x000000FA}, + {0x033, 0x00000001}, + {0x03F, 0x000000F2}, + {0x033, 0x00000002}, + {0x03F, 0x000000EA}, + {0x033, 0x00000003}, + {0x03F, 0x000000E2}, + {0x033, 0x00000004}, + {0x03F, 0x000000DA}, + {0x033, 0x00000005}, + {0x03F, 0x000000D2}, + {0x033, 0x00000006}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000CC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000CA}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000007}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000C4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000C2}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000008}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000009}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000098}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000098}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000088}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000000F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000088}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000010}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000BC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000B8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000011}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000B4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000B0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000012}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000AC}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000A8}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000013}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000000A4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000000A0}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000014}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000098}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000098}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000090}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000094}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000090}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000088}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000008C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000088}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000017}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000080}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000084}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000080}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000018}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000038}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000003C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000038}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000019}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000030}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000034}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000030}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001A}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000028}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000002C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000028}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000020}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000024}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000020}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001C}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000018}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000001C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000018}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001D}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000010}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000014}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000010}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001E}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000008}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000000C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000008}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000001F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000004}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000000}, + {0xB0000000, 0x00000000}, + {0x0ED, 0x00000000}, + {0x0B9, 0x00020440}, + {0x018, 0x00001001}, + {0x10018, 0x00001001}, + {0x002, 0x0000000D}, + {0x10002, 0x0000000D}, + {0x0EE, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x0EE, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0xA0000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0xB0000000, 0x00000000}, + {0x0EB, 0x00000000}, + {0x030, 0x000109B0}, + {0x030, 0x000189B0}, + {0x0EB, 0x00000000}, + {0x0EE, 0x00000010}, + {0x033, 0x00000006}, + {0x03F, 0x00000003}, + {0x033, 0x00000007}, + {0x03F, 0x00000003}, + {0x033, 0x00000008}, + {0x03F, 0x00000001}, + {0x0EE, 0x00000000}, + {0x0EF, 0x00001000}, + {0x033, 0x00000000}, + {0x03F, 0x00000015}, + {0x033, 0x00000001}, + {0x03F, 0x00000017}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000020}, + {0x03F, 0x00060001}, + {0x033, 0x00000021}, + {0x03F, 0x00060032}, + {0x033, 0x00000022}, + {0x03F, 0x00050042}, + {0x033, 0x00000023}, + {0x03F, 0x00040042}, + {0x033, 0x00000024}, + {0x03F, 0x00000001}, + {0x033, 0x00000025}, + {0x03F, 0x00000002}, + {0x033, 0x00000026}, + {0x03F, 0x00000003}, + {0x033, 0x00000027}, + {0x03F, 0x00000003}, + {0x033, 0x00000028}, + {0x03F, 0x00060001}, + {0x033, 0x00000029}, + {0x03F, 0x00060032}, + {0x033, 0x0000002A}, + {0x03F, 0x00050042}, + {0x033, 0x0000002B}, + {0x03F, 0x00040042}, + {0x033, 0x0000002C}, + {0x03F, 0x00000001}, + {0x033, 0x0000002D}, + {0x03F, 0x00000002}, + {0x033, 0x0000002E}, + {0x03F, 0x00000003}, + {0x033, 0x0000002F}, + {0x03F, 0x00000003}, + {0x033, 0x00000030}, + {0x03F, 0x00060001}, + {0x033, 0x00000031}, + {0x03F, 0x00060032}, + {0x033, 0x00000032}, + {0x03F, 0x00050042}, + {0x033, 0x00000033}, + {0x03F, 0x00040042}, + {0x033, 0x00000034}, + {0x03F, 0x00000001}, + {0x033, 0x00000035}, + {0x03F, 0x00000002}, + {0x033, 0x00000036}, + {0x03F, 0x00000003}, + {0x033, 0x00000037}, + {0x03F, 0x00000003}, + {0x033, 0x00000060}, + {0x03F, 0x00060001}, + {0x033, 0x00000061}, + {0x03F, 0x00060032}, + {0x033, 0x00000062}, + {0x03F, 0x00050042}, + {0x033, 0x00000063}, + {0x03F, 0x00040042}, + {0x033, 0x00000064}, + {0x03F, 0x00000001}, + {0x033, 0x00000065}, + {0x03F, 0x00000002}, + {0x033, 0x00000066}, + {0x03F, 0x00000003}, + {0x033, 0x00000067}, + {0x03F, 0x00000003}, + {0x033, 0x00000068}, + {0x03F, 0x00060001}, + {0x033, 0x00000069}, + {0x03F, 0x00060032}, + {0x033, 0x0000006A}, + {0x03F, 0x00050042}, + {0x033, 0x0000006B}, + {0x03F, 0x00040042}, + {0x033, 0x0000006C}, + {0x03F, 0x00000001}, + {0x033, 0x0000006D}, + {0x03F, 0x00000002}, + {0x033, 0x0000006E}, + {0x03F, 0x00000003}, + {0x033, 0x0000006F}, + {0x03F, 0x00000003}, + {0x033, 0x00000070}, + {0x03F, 0x00060001}, + {0x033, 0x00000071}, + {0x03F, 0x00060032}, + {0x033, 0x00000072}, + {0x03F, 0x00050042}, + {0x033, 0x00000073}, + {0x03F, 0x00040042}, + {0x033, 0x00000074}, + {0x03F, 0x00000001}, + {0x033, 0x00000075}, + {0x03F, 0x00000002}, + {0x033, 0x00000076}, + {0x03F, 0x00000003}, + {0x033, 0x00000077}, + {0x03F, 0x00000003}, + {0x033, 0x00000078}, + {0x03F, 0x00060001}, + {0x033, 0x00000079}, + {0x03F, 0x00060032}, + {0x033, 0x0000007A}, + {0x03F, 0x00050042}, + {0x033, 0x0000007B}, + {0x03F, 0x00040042}, + {0x033, 0x0000007C}, + {0x03F, 0x00000001}, + {0x033, 0x0000007D}, + {0x03F, 0x00000002}, + {0x033, 0x0000007E}, + {0x03F, 0x00000003}, + {0x033, 0x0000007F}, + {0x03F, 0x00000003}, + {0x033, 0x000000A0}, + {0x03F, 0x00060001}, + {0x033, 0x000000A1}, + {0x03F, 0x00060032}, + {0x033, 0x000000A2}, + {0x03F, 0x00050042}, + {0x033, 0x000000A3}, + {0x03F, 0x00040042}, + {0x033, 0x000000A4}, + {0x03F, 0x00000001}, + {0x033, 0x000000A5}, + {0x03F, 0x00000002}, + {0x033, 0x000000A6}, + {0x03F, 0x00000003}, + {0x033, 0x000000A7}, + {0x03F, 0x00000003}, + {0x033, 0x000000A8}, + {0x03F, 0x00060001}, + {0x033, 0x000000A9}, + {0x03F, 0x00060032}, + {0x033, 0x000000AA}, + {0x03F, 0x00050042}, + {0x033, 0x000000AB}, + {0x03F, 0x00040042}, + {0x033, 0x000000AC}, + {0x03F, 0x00000001}, + {0x033, 0x000000AD}, + {0x03F, 0x00000002}, + {0x033, 0x000000AE}, + {0x03F, 0x00000003}, + {0x033, 0x000000AF}, + {0x03F, 0x00000003}, + {0x033, 0x000000B0}, + {0x03F, 0x00060001}, + {0x033, 0x000000B1}, + {0x03F, 0x00060032}, + {0x033, 0x000000B2}, + {0x03F, 0x00050042}, + {0x033, 0x000000B3}, + {0x03F, 0x00040042}, + {0x033, 0x000000B4}, + {0x03F, 0x00000001}, + {0x033, 0x000000B5}, + {0x03F, 0x00000002}, + {0x033, 0x000000B6}, + {0x03F, 0x00000003}, + {0x033, 0x000000B7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E0}, + {0x03F, 0x00060001}, + {0x033, 0x000000E1}, + {0x03F, 0x00060032}, + {0x033, 0x000000E2}, + {0x03F, 0x00050042}, + {0x033, 0x000000E3}, + {0x03F, 0x00040042}, + {0x033, 0x000000E4}, + {0x03F, 0x00000001}, + {0x033, 0x000000E5}, + {0x03F, 0x00000002}, + {0x033, 0x000000E6}, + {0x03F, 0x00000003}, + {0x033, 0x000000E7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E8}, + {0x03F, 0x00060001}, + {0x033, 0x000000E9}, + {0x03F, 0x00060032}, + {0x033, 0x000000EA}, + {0x03F, 0x00050042}, + {0x033, 0x000000EB}, + {0x03F, 0x00040042}, + {0x033, 0x000000EC}, + {0x03F, 0x00000001}, + {0x033, 0x000000ED}, + {0x03F, 0x00000002}, + {0x033, 0x000000EE}, + {0x03F, 0x00000003}, + {0x033, 0x000000EF}, + {0x03F, 0x00000003}, + {0x033, 0x000000F0}, + {0x03F, 0x00060001}, + {0x033, 0x000000F1}, + {0x03F, 0x00060032}, + {0x033, 0x000000F2}, + {0x03F, 0x00050042}, + {0x033, 0x000000F3}, + {0x03F, 0x00040042}, + {0x033, 0x000000F4}, + {0x03F, 0x00000001}, + {0x033, 0x000000F5}, + {0x03F, 0x00000002}, + {0x033, 0x000000F6}, + {0x03F, 0x00000003}, + {0x033, 0x000000F7}, + {0x03F, 0x00000003}, + {0x033, 0x000000F8}, + {0x03F, 0x00060001}, + {0x033, 0x000000F9}, + {0x03F, 0x00060032}, + {0x033, 0x000000FA}, + {0x03F, 0x00050042}, + {0x033, 0x000000FB}, + {0x03F, 0x00040042}, + {0x033, 0x000000FC}, + {0x03F, 0x00000001}, + {0x033, 0x000000FD}, + {0x03F, 0x00000002}, + {0x033, 0x000000FE}, + {0x03F, 0x00000003}, + {0x033, 0x000000FF}, + {0x03F, 0x00000003}, + {0x033, 0x00000120}, + {0x03F, 0x00060001}, + {0x033, 0x00000121}, + {0x03F, 0x00060032}, + {0x033, 0x00000122}, + {0x03F, 0x00050042}, + {0x033, 0x00000123}, + {0x03F, 0x00040042}, + {0x033, 0x00000124}, + {0x03F, 0x00000001}, + {0x033, 0x00000125}, + {0x03F, 0x00000002}, + {0x033, 0x00000126}, + {0x03F, 0x00000003}, + {0x033, 0x00000127}, + {0x03F, 0x00000003}, + {0x033, 0x00000128}, + {0x03F, 0x00060001}, + {0x033, 0x00000129}, + {0x03F, 0x00060032}, + {0x033, 0x0000012A}, + {0x03F, 0x00050042}, + {0x033, 0x0000012B}, + {0x03F, 0x00040042}, + {0x033, 0x0000012C}, + {0x03F, 0x00000001}, + {0x033, 0x0000012D}, + {0x03F, 0x00000002}, + {0x033, 0x0000012E}, + {0x03F, 0x00000003}, + {0x033, 0x0000012F}, + {0x03F, 0x00000003}, + {0x033, 0x00000130}, + {0x03F, 0x00060001}, + {0x033, 0x00000131}, + {0x03F, 0x00060032}, + {0x033, 0x00000132}, + {0x03F, 0x00050042}, + {0x033, 0x00000133}, + {0x03F, 0x00040042}, + {0x033, 0x00000134}, + {0x03F, 0x00000001}, + {0x033, 0x00000135}, + {0x03F, 0x00000002}, + {0x033, 0x00000136}, + {0x03F, 0x00000003}, + {0x033, 0x00000137}, + {0x03F, 0x00000003}, + {0x033, 0x00000160}, + {0x03F, 0x00060001}, + {0x033, 0x00000161}, + {0x03F, 0x00060032}, + {0x033, 0x00000162}, + {0x03F, 0x00050042}, + {0x033, 0x00000163}, + {0x03F, 0x00040042}, + {0x033, 0x00000164}, + {0x03F, 0x00000001}, + {0x033, 0x00000165}, + {0x03F, 0x00000002}, + {0x033, 0x00000166}, + {0x03F, 0x00000003}, + {0x033, 0x00000167}, + {0x03F, 0x00000003}, + {0x033, 0x00000168}, + {0x03F, 0x00060001}, + {0x033, 0x00000169}, + {0x03F, 0x00060032}, + {0x033, 0x0000016A}, + {0x03F, 0x00050042}, + {0x033, 0x0000016B}, + {0x03F, 0x00040042}, + {0x033, 0x0000016C}, + {0x03F, 0x00000001}, + {0x033, 0x0000016D}, + {0x03F, 0x00000002}, + {0x033, 0x0000016E}, + {0x03F, 0x00000003}, + {0x033, 0x0000016F}, + {0x03F, 0x00000003}, + {0x033, 0x00000170}, + {0x03F, 0x00060001}, + {0x033, 0x00000171}, + {0x03F, 0x00060032}, + {0x033, 0x00000172}, + {0x03F, 0x00050042}, + {0x033, 0x00000173}, + {0x03F, 0x00040042}, + {0x033, 0x00000174}, + {0x03F, 0x00000001}, + {0x033, 0x00000175}, + {0x03F, 0x00000002}, + {0x033, 0x00000176}, + {0x03F, 0x00000003}, + {0x033, 0x00000177}, + {0x03F, 0x00000003}, + {0x033, 0x00000178}, + {0x03F, 0x00060001}, + {0x033, 0x00000179}, + {0x03F, 0x00060032}, + {0x033, 0x0000017A}, + {0x03F, 0x00050042}, + {0x033, 0x0000017B}, + {0x03F, 0x00040042}, + {0x033, 0x0000017C}, + {0x03F, 0x00000001}, + {0x033, 0x0000017D}, + {0x03F, 0x00000002}, + {0x033, 0x0000017E}, + {0x03F, 0x00000003}, + {0x033, 0x0000017F}, + {0x03F, 0x00000003}, + {0x033, 0x000001A0}, + {0x03F, 0x00060001}, + {0x033, 0x000001A1}, + {0x03F, 0x00060032}, + {0x033, 0x000001A2}, + {0x03F, 0x00050042}, + {0x033, 0x000001A3}, + {0x03F, 0x00040042}, + {0x033, 0x000001A4}, + {0x03F, 0x00000001}, + {0x033, 0x000001A5}, + {0x03F, 0x00000002}, + {0x033, 0x000001A6}, + {0x03F, 0x00000003}, + {0x033, 0x000001A7}, + {0x03F, 0x00000003}, + {0x033, 0x000001A8}, + {0x03F, 0x00060001}, + {0x033, 0x000001A9}, + {0x03F, 0x00060032}, + {0x033, 0x000001AA}, + {0x03F, 0x00050042}, + {0x033, 0x000001AB}, + {0x03F, 0x00040042}, + {0x033, 0x000001AC}, + {0x03F, 0x00000001}, + {0x033, 0x000001AD}, + {0x03F, 0x00000002}, + {0x033, 0x000001AE}, + {0x03F, 0x00000003}, + {0x033, 0x000001AF}, + {0x03F, 0x00000003}, + {0x033, 0x000001B0}, + {0x03F, 0x00060001}, + {0x033, 0x000001B1}, + {0x03F, 0x00060032}, + {0x033, 0x000001B2}, + {0x03F, 0x00050042}, + {0x033, 0x000001B3}, + {0x03F, 0x00040042}, + {0x033, 0x000001B4}, + {0x03F, 0x00000001}, + {0x033, 0x000001B5}, + {0x03F, 0x00000002}, + {0x033, 0x000001B6}, + {0x03F, 0x00000003}, + {0x033, 0x000001B7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E0}, + {0x03F, 0x00060001}, + {0x033, 0x000001E1}, + {0x03F, 0x00060032}, + {0x033, 0x000001E2}, + {0x03F, 0x00050042}, + {0x033, 0x000001E3}, + {0x03F, 0x00040042}, + {0x033, 0x000001E4}, + {0x03F, 0x00000001}, + {0x033, 0x000001E5}, + {0x03F, 0x00000002}, + {0x033, 0x000001E6}, + {0x03F, 0x00000003}, + {0x033, 0x000001E7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E8}, + {0x03F, 0x00060001}, + {0x033, 0x000001E9}, + {0x03F, 0x00060032}, + {0x033, 0x000001EA}, + {0x03F, 0x00050042}, + {0x033, 0x000001EB}, + {0x03F, 0x00040042}, + {0x033, 0x000001EC}, + {0x03F, 0x00000001}, + {0x033, 0x000001ED}, + {0x03F, 0x00000002}, + {0x033, 0x000001EE}, + {0x03F, 0x00000003}, + {0x033, 0x000001EF}, + {0x03F, 0x00000003}, + {0x033, 0x000001F0}, + {0x03F, 0x00060001}, + {0x033, 0x000001F1}, + {0x03F, 0x00060032}, + {0x033, 0x000001F2}, + {0x03F, 0x00050042}, + {0x033, 0x000001F3}, + {0x03F, 0x00040042}, + {0x033, 0x000001F4}, + {0x03F, 0x00000001}, + {0x033, 0x000001F5}, + {0x03F, 0x00000002}, + {0x033, 0x000001F6}, + {0x03F, 0x00000003}, + {0x033, 0x000001F7}, + {0x03F, 0x00000003}, + {0x033, 0x000001F8}, + {0x03F, 0x00060001}, + {0x033, 0x000001F9}, + {0x03F, 0x00060032}, + {0x033, 0x000001FA}, + {0x03F, 0x00050042}, + {0x033, 0x000001FB}, + {0x03F, 0x00040042}, + {0x033, 0x000001FC}, + {0x03F, 0x00000001}, + {0x033, 0x000001FD}, + {0x03F, 0x00000002}, + {0x033, 0x000001FE}, + {0x03F, 0x00000003}, + {0x033, 0x000001FF}, + {0x03F, 0x00000003}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000100}, + {0x033, 0x00000001}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000002}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000003}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000004}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000005}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000006}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000007}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000008}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000009}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000B}, + {0x03F, 0x0000AFFF}, + {0x033, 0x0000000C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000010}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000011}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000012}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000013}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000014}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000017}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000018}, + {0x03F, 0x0000FBFF}, + {0x033, 0x00000019}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001B}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000020}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000021}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000022}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000023}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000024}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000025}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000026}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000027}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000028}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000029}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002B}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000030}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000031}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000032}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000033}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000034}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000035}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000036}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000037}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000038}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000039}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000003A}, + {0x03F, 0x0000EFFF}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000040}, + {0x033, 0x00000000}, + {0x03F, 0x00004344}, + {0x033, 0x00000001}, + {0x03F, 0x00004344}, + {0x033, 0x00000002}, + {0x03F, 0x00004344}, + {0x033, 0x00000003}, + {0x03F, 0x00004344}, + {0x033, 0x00000004}, + {0x03F, 0x00004344}, + {0x033, 0x00000005}, + {0x03F, 0x00004344}, + {0x033, 0x00000006}, + {0x03F, 0x00004324}, + {0x033, 0x00000007}, + {0x03F, 0x00004344}, + {0x033, 0x00000008}, + {0x03F, 0x00004344}, + {0x033, 0x00000009}, + {0x03F, 0x00004344}, + {0x033, 0x0000000A}, + {0x03F, 0x00004344}, + {0x033, 0x0000000B}, + {0x03F, 0x00004344}, + {0x033, 0x00000010}, + {0x03F, 0x00004344}, + {0x033, 0x00000011}, + {0x03F, 0x00004344}, + {0x033, 0x00000012}, + {0x03F, 0x00004344}, + {0x033, 0x00000013}, + {0x03F, 0x00004344}, + {0x033, 0x00000014}, + {0x03F, 0x00004344}, + {0x033, 0x00000015}, + {0x03F, 0x00004344}, + {0x033, 0x00000016}, + {0x03F, 0x00004344}, + {0x033, 0x00000017}, + {0x03F, 0x00004344}, + {0x033, 0x00000018}, + {0x03F, 0x00004344}, + {0x033, 0x00000019}, + {0x03F, 0x00004344}, + {0x033, 0x0000001A}, + {0x03F, 0x00004344}, + {0x033, 0x0000001B}, + {0x03F, 0x00004344}, + {0x033, 0x0000001C}, + {0x03F, 0x00004344}, + {0x033, 0x0000001D}, + {0x03F, 0x00004344}, + {0x033, 0x0000001E}, + {0x03F, 0x00004344}, + {0x033, 0x0000001F}, + {0x03F, 0x00004344}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000020}, + {0x033, 0x00000010}, + {0x03F, 0x00000200}, + {0x033, 0x00000011}, + {0x03F, 0x00000200}, + {0x033, 0x00000012}, + {0x03F, 0x00000200}, + {0x033, 0x00000013}, + {0x03F, 0x00000200}, + {0x033, 0x00000020}, + {0x03F, 0x00000200}, + {0x033, 0x00000021}, + {0x03F, 0x00000200}, + {0x033, 0x00000022}, + {0x03F, 0x00000200}, + {0x033, 0x00000023}, + {0x03F, 0x00000200}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000010}, + {0x030, 0x000084DC}, + {0x030, 0x000103C9}, + {0x030, 0x00018399}, + {0x030, 0x00020287}, + {0x030, 0x00028277}, + {0x030, 0x00030165}, + {0x030, 0x00038144}, + {0x030, 0x00040044}, + {0x030, 0x00048022}, + {0x030, 0x00050011}, + {0x030, 0x00058000}, + {0x030, 0x00060000}, + {0x030, 0x00068000}, + {0x030, 0x00070000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000080}, + {0x033, 0x00000004}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000005}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000006}, + {0x03E, 0x00000014}, + {0x03F, 0x00021C58}, + {0x033, 0x00000007}, + {0x03E, 0x00000014}, + {0x03F, 0x00022B58}, + {0x033, 0x00000008}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000009}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x0000000A}, + {0x03E, 0x00000014}, + {0x03F, 0x00021C58}, + {0x033, 0x0000000B}, + {0x03E, 0x00000014}, + {0x03F, 0x00022B58}, + {0x033, 0x0000000C}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x0000000D}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x0000000E}, + {0x03E, 0x00000014}, + {0x03F, 0x00021C58}, + {0x033, 0x0000000F}, + {0x03E, 0x00000014}, + {0x03F, 0x00022B58}, + {0x033, 0x00000010}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000011}, + {0x03E, 0x0000001B}, + {0x03F, 0x00023C58}, + {0x033, 0x00000012}, + {0x03E, 0x00000014}, + {0x03F, 0x00021C58}, + {0x033, 0x00000013}, + {0x03E, 0x00000014}, + {0x03F, 0x00022B58}, + {0x033, 0x00000014}, + {0x03E, 0x00000013}, + {0x03F, 0x00023C58}, + {0x033, 0x00000015}, + {0x03E, 0x0000001B}, + {0x03F, 0x00025A58}, + {0x033, 0x00000016}, + {0x03E, 0x0000001C}, + {0x03F, 0x00021C58}, + {0x033, 0x00000017}, + {0x03E, 0x00000014}, + {0x03F, 0x00022A58}, + {0x033, 0x00000018}, + {0x03E, 0x00000013}, + {0x03F, 0x00025A58}, + {0x033, 0x00000019}, + {0x03E, 0x0000001B}, + {0x03F, 0x00025A58}, + {0x033, 0x0000001A}, + {0x03E, 0x00000014}, + {0x03F, 0x00022A58}, + {0x033, 0x0000001B}, + {0x03E, 0x00000014}, + {0x03F, 0x00022A58}, + {0x033, 0x0000001C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000001D}, + {0x03E, 0x0000001B}, + {0x03F, 0x00025A58}, + {0x033, 0x0000001E}, + {0x03E, 0x00000013}, + {0x03F, 0x00021E58}, + {0x033, 0x0000001F}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000020}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000021}, + {0x03E, 0x0000001C}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000022}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x00000023}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000024}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000025}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000026}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x00000027}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000028}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000029}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000002A}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x0000002B}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x0000002C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000002D}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000002E}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x0000002F}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000030}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000031}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000032}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x00000033}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000034}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000035}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000036}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x00000037}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x00000038}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x00000039}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000003A}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x0000003B}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x033, 0x0000003C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000003D}, + {0x03E, 0x00000014}, + {0x03F, 0x0002CD58}, + {0x033, 0x0000003E}, + {0x03E, 0x00000014}, + {0x03F, 0x00021E58}, + {0x033, 0x0000003F}, + {0x03E, 0x00000014}, + {0x03F, 0x00022D58}, + {0x0EF, 0x00000000}, + {0x0EE, 0x00000800}, + {0x033, 0x00000000}, + {0x03F, 0x00000031}, + {0x033, 0x00000001}, + {0x03F, 0x00000023}, + {0x033, 0x00000002}, + {0x03F, 0x00000015}, + {0x033, 0x00000003}, + {0x03F, 0x00000007}, + {0x0EE, 0x00000000}, + {0x0EC, 0x00000400}, + {0x033, 0x00000003}, + {0x03F, 0x00000030}, + {0x033, 0x00000004}, + {0x03F, 0x00000021}, + {0x0EC, 0x00000000}, + {0x0DE, 0x00000000}, + {0x0EF, 0x00000000}, + {0x033, 0x00000000}, + {0x008, 0x00060280}, + {0x009, 0x00030400}, + {0x0EF, 0x00000000}, + {0x0A7, 0x00080308}, + {0x066, 0x00006000}, + {0x0EF, 0x00000400}, + {0x030, 0x000001FF}, + {0x030, 0x000081FF}, + {0x030, 0x000101FF}, + {0x030, 0x000181FF}, + {0x030, 0x000201FF}, + {0x030, 0x000281FF}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x0003017F}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x030, 0x000300FF}, + {0xA0000000, 0x00000000}, + {0x030, 0x0003017F}, + {0xB0000000, 0x00000000}, + {0x030, 0x000380FB}, + {0x0EF, 0x00000000}, + {0x06E, 0x00077A18}, + {0x06D, 0x00000C31}, + {0x06A, 0x000E0F8A}, + {0x06B, 0x000018A0}, + {0x06F, 0x000F81FC}, + {0x05E, 0x0000001F}, + {0x0EF, 0x00000200}, + {0x030, 0x0003D407}, + {0x030, 0x00035A87}, + {0x030, 0x0002CF07}, + {0x030, 0x00024F07}, + {0x030, 0x0001CF07}, + {0x030, 0x00014F07}, + {0x030, 0x0000CF07}, + {0x030, 0x00004F07}, + {0x0EF, 0x00000000}, + {0x0EB, 0x00080000}, + {0x030, 0x00008038}, + {0x030, 0x00010038}, + {0x030, 0x00018038}, + {0x030, 0x00020038}, + {0x030, 0x00028038}, + {0x030, 0x00030038}, + {0x030, 0x0003803C}, + {0x030, 0x0004003C}, + {0x030, 0x0004803C}, + {0x030, 0x0005003C}, + {0x030, 0x0005803C}, + {0x030, 0x0006003C}, + {0x030, 0x0006803C}, + {0x030, 0x0007003C}, + {0x0EB, 0x00000000}, + {0x094, 0x000000FC}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0xA0000000, 0x00000000}, + {0x095, 0x00000000}, + {0xB0000000, 0x00000000}, + {0x0EE, 0x00001000}, + {0x033, 0x00000020}, + {0x03F, 0x00000052}, + {0x033, 0x00000024}, + {0x03F, 0x0000005A}, + {0x033, 0x00000028}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002C}, + {0x03F, 0x0000019C}, + {0x033, 0x00000030}, + {0x03F, 0x000001A4}, + {0x033, 0x00000034}, + {0x03F, 0x000001E7}, + {0x033, 0x00000038}, + {0x03F, 0x000002E7}, + {0x033, 0x0000003C}, + {0x03F, 0x000003E7}, + {0x033, 0x00000021}, + {0x03F, 0x00000052}, + {0x033, 0x00000025}, + {0x03F, 0x0000005A}, + {0x033, 0x00000029}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002D}, + {0x03F, 0x0000019C}, + {0x033, 0x00000031}, + {0x03F, 0x000001A4}, + {0x033, 0x00000035}, + {0x03F, 0x000001E6}, + {0x033, 0x00000039}, + {0x03F, 0x000002E6}, + {0x033, 0x0000003D}, + {0x03F, 0x000003E6}, + {0x033, 0x00000022}, + {0x03F, 0x00000052}, + {0x033, 0x00000026}, + {0x03F, 0x0000005A}, + {0x033, 0x0000002A}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002E}, + {0x03F, 0x0000019C}, + {0x033, 0x00000032}, + {0x03F, 0x000001A4}, + {0x033, 0x00000036}, + {0x03F, 0x000001E6}, + {0x033, 0x0000003A}, + {0x03F, 0x000002E6}, + {0x033, 0x0000003E}, + {0x03F, 0x000003E6}, + {0x033, 0x00000060}, + {0x03F, 0x00000052}, + {0x033, 0x00000064}, + {0x03F, 0x0000005A}, + {0x033, 0x00000068}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006C}, + {0x03F, 0x0000019C}, + {0x033, 0x00000070}, + {0x03F, 0x000001A4}, + {0x033, 0x00000074}, + {0x03F, 0x000001E6}, + {0x033, 0x00000078}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007C}, + {0x03F, 0x000003E6}, + {0x033, 0x00000061}, + {0x03F, 0x00000052}, + {0x033, 0x00000065}, + {0x03F, 0x0000005A}, + {0x033, 0x00000069}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006D}, + {0x03F, 0x0000019C}, + {0x033, 0x00000071}, + {0x03F, 0x000001A4}, + {0x033, 0x00000075}, + {0x03F, 0x000001E6}, + {0x033, 0x00000079}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007D}, + {0x03F, 0x000003E6}, + {0x033, 0x00000062}, + {0x03F, 0x00000052}, + {0x033, 0x00000066}, + {0x03F, 0x0000005A}, + {0x033, 0x0000006A}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006E}, + {0x03F, 0x0000019C}, + {0x033, 0x00000072}, + {0x03F, 0x000001A4}, + {0x033, 0x00000076}, + {0x03F, 0x000001E6}, + {0x033, 0x0000007A}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007E}, + {0x03F, 0x000003E6}, + {0x033, 0x00000063}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000052}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000067}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000073}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000077}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007B}, + {0x03F, 0x000002E7}, + {0x033, 0x0000007F}, + {0x03F, 0x000003E7}, + {0x0EE, 0x00000000}, + {0x100EE, 0x00004000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0xA0000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0xB0000000, 0x00000000}, + {0x100EE, 0x00000000}, + {0x100EE, 0x00002000}, + {0x10030, 0x000000FC}, + {0x10030, 0x000004F9}, + {0x10030, 0x000008F6}, + {0x10030, 0x00000CF3}, + {0x10030, 0x000010F0}, + {0x10030, 0x000014ED}, + {0x10030, 0x000018AC}, + {0x10030, 0x00001CA9}, + {0x10030, 0x00002069}, + {0x10030, 0x00002466}, + {0x10030, 0x00002829}, + {0x10030, 0x00002C26}, + {0x10030, 0x00003023}, + {0x10030, 0x00003420}, + {0x10030, 0x0000381D}, + {0x10030, 0x00003C1A}, + {0x10030, 0x00004017}, + {0x100EE, 0x00000000}, + {0x100EE, 0x00002000}, + {0x10030, 0x000780F4}, + {0x10030, 0x000784F1}, + {0x10030, 0x000788EE}, + {0x10030, 0x00078CEB}, + {0x10030, 0x000790E8}, + {0x10030, 0x000794E5}, + {0x10030, 0x000798E2}, + {0x10030, 0x00079CDF}, + {0x10030, 0x0007A0DC}, + {0x10030, 0x0007A4D9}, + {0x10030, 0x0007A8D6}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B0D0}, + {0x10030, 0x0007B4CD}, + {0x10030, 0x0007B8CA}, + {0x10030, 0x0007BC07}, + {0x10030, 0x0007C004}, + {0x100EE, 0x00000000}, + {0x0EF, 0x00002000}, + {0x033, 0x00000008}, + {0x03F, 0x00000004}, + {0x033, 0x00000009}, + {0x03F, 0x00000003}, + {0x033, 0x0000000A}, + {0x03F, 0x00000003}, + {0x033, 0x0000000B}, + {0x03F, 0x00000002}, + {0x033, 0x0000000C}, + {0x03F, 0x00000002}, + {0x033, 0x0000000D}, + {0x03F, 0x00000002}, + {0x033, 0x0000000E}, + {0x03F, 0x00000002}, + {0x033, 0x0000000F}, + {0x03F, 0x00000002}, + {0x0EF, 0x00000000}, + {0x0EB, 0x00040000}, + {0x030, 0x000109B7}, + {0x0EB, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000020}, + {0x03F, 0x00050002}, + {0x033, 0x00000021}, + {0x03F, 0x00060032}, + {0x033, 0x00000022}, + {0x03F, 0x00050042}, + {0x033, 0x00000023}, + {0x03F, 0x00040042}, + {0x033, 0x00000024}, + {0x03F, 0x00008001}, + {0x033, 0x00000025}, + {0x03F, 0x00008002}, + {0x033, 0x00000026}, + {0x03F, 0x00000003}, + {0x033, 0x00000027}, + {0x03F, 0x00000003}, + {0x033, 0x00000028}, + {0x03F, 0x00050002}, + {0x033, 0x00000029}, + {0x03F, 0x00060032}, + {0x033, 0x0000002A}, + {0x03F, 0x00050042}, + {0x033, 0x0000002B}, + {0x03F, 0x00040042}, + {0x033, 0x0000002C}, + {0x03F, 0x00008001}, + {0x033, 0x0000002D}, + {0x03F, 0x00008002}, + {0x033, 0x0000002E}, + {0x03F, 0x00000003}, + {0x033, 0x0000002F}, + {0x03F, 0x00000003}, + {0x033, 0x00000030}, + {0x03F, 0x00050002}, + {0x033, 0x00000031}, + {0x03F, 0x00060032}, + {0x033, 0x00000032}, + {0x03F, 0x00050042}, + {0x033, 0x00000033}, + {0x03F, 0x00040042}, + {0x033, 0x00000034}, + {0x03F, 0x00008001}, + {0x033, 0x00000035}, + {0x03F, 0x00008002}, + {0x033, 0x00000036}, + {0x03F, 0x00000003}, + {0x033, 0x00000037}, + {0x03F, 0x00000003}, + {0x033, 0x00000060}, + {0x03F, 0x00050002}, + {0x033, 0x00000061}, + {0x03F, 0x00060032}, + {0x033, 0x00000062}, + {0x03F, 0x00050042}, + {0x033, 0x00000063}, + {0x03F, 0x00040042}, + {0x033, 0x00000064}, + {0x03F, 0x00008001}, + {0x033, 0x00000065}, + {0x03F, 0x00008002}, + {0x033, 0x00000066}, + {0x03F, 0x00000003}, + {0x033, 0x00000067}, + {0x03F, 0x00000003}, + {0x033, 0x00000068}, + {0x03F, 0x00050002}, + {0x033, 0x00000069}, + {0x03F, 0x00060032}, + {0x033, 0x0000006A}, + {0x03F, 0x00050042}, + {0x033, 0x0000006B}, + {0x03F, 0x00040042}, + {0x033, 0x0000006C}, + {0x03F, 0x00008001}, + {0x033, 0x0000006D}, + {0x03F, 0x00008002}, + {0x033, 0x0000006E}, + {0x03F, 0x00000003}, + {0x033, 0x0000006F}, + {0x03F, 0x00000003}, + {0x033, 0x00000070}, + {0x03F, 0x00050002}, + {0x033, 0x00000071}, + {0x03F, 0x00060032}, + {0x033, 0x00000072}, + {0x03F, 0x00050042}, + {0x033, 0x00000073}, + {0x03F, 0x00040042}, + {0x033, 0x00000074}, + {0x03F, 0x00008001}, + {0x033, 0x00000075}, + {0x03F, 0x00008002}, + {0x033, 0x00000076}, + {0x03F, 0x00000003}, + {0x033, 0x00000077}, + {0x03F, 0x00000003}, + {0x033, 0x00000078}, + {0x03F, 0x00050002}, + {0x033, 0x00000079}, + {0x03F, 0x00060032}, + {0x033, 0x0000007A}, + {0x03F, 0x00050042}, + {0x033, 0x0000007B}, + {0x03F, 0x00040042}, + {0x033, 0x0000007C}, + {0x03F, 0x00008001}, + {0x033, 0x0000007D}, + {0x03F, 0x00008002}, + {0x033, 0x0000007E}, + {0x03F, 0x00000003}, + {0x033, 0x0000007F}, + {0x03F, 0x00000003}, + {0x033, 0x000000A0}, + {0x03F, 0x00050002}, + {0x033, 0x000000A1}, + {0x03F, 0x00060032}, + {0x033, 0x000000A2}, + {0x03F, 0x00050042}, + {0x033, 0x000000A3}, + {0x03F, 0x00040042}, + {0x033, 0x000000A4}, + {0x03F, 0x00008001}, + {0x033, 0x000000A5}, + {0x03F, 0x00008002}, + {0x033, 0x000000A6}, + {0x03F, 0x00000003}, + {0x033, 0x000000A7}, + {0x03F, 0x00000003}, + {0x033, 0x000000A8}, + {0x03F, 0x00050002}, + {0x033, 0x000000A9}, + {0x03F, 0x00060032}, + {0x033, 0x000000AA}, + {0x03F, 0x00050042}, + {0x033, 0x000000AB}, + {0x03F, 0x00040042}, + {0x033, 0x000000AC}, + {0x03F, 0x00008001}, + {0x033, 0x000000AD}, + {0x03F, 0x00008002}, + {0x033, 0x000000AE}, + {0x03F, 0x00000003}, + {0x033, 0x000000AF}, + {0x03F, 0x00000003}, + {0x033, 0x000000B0}, + {0x03F, 0x00050002}, + {0x033, 0x000000B1}, + {0x03F, 0x00060032}, + {0x033, 0x000000B2}, + {0x03F, 0x00050042}, + {0x033, 0x000000B3}, + {0x03F, 0x00040042}, + {0x033, 0x000000B4}, + {0x03F, 0x00008001}, + {0x033, 0x000000B5}, + {0x03F, 0x00008002}, + {0x033, 0x000000B6}, + {0x03F, 0x00000003}, + {0x033, 0x000000B7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E0}, + {0x03F, 0x00050002}, + {0x033, 0x000000E1}, + {0x03F, 0x00060032}, + {0x033, 0x000000E2}, + {0x03F, 0x00050042}, + {0x033, 0x000000E3}, + {0x03F, 0x00040042}, + {0x033, 0x000000E4}, + {0x03F, 0x00008001}, + {0x033, 0x000000E5}, + {0x03F, 0x00008002}, + {0x033, 0x000000E6}, + {0x03F, 0x00000003}, + {0x033, 0x000000E7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E8}, + {0x03F, 0x00050002}, + {0x033, 0x000000E9}, + {0x03F, 0x00060032}, + {0x033, 0x000000EA}, + {0x03F, 0x00050042}, + {0x033, 0x000000EB}, + {0x03F, 0x00040042}, + {0x033, 0x000000EC}, + {0x03F, 0x00008001}, + {0x033, 0x000000ED}, + {0x03F, 0x00008002}, + {0x033, 0x000000EE}, + {0x03F, 0x00000003}, + {0x033, 0x000000EF}, + {0x03F, 0x00000003}, + {0x033, 0x000000F0}, + {0x03F, 0x00050002}, + {0x033, 0x000000F1}, + {0x03F, 0x00060032}, + {0x033, 0x000000F2}, + {0x03F, 0x00050042}, + {0x033, 0x000000F3}, + {0x03F, 0x00040042}, + {0x033, 0x000000F4}, + {0x03F, 0x00008001}, + {0x033, 0x000000F5}, + {0x03F, 0x00008002}, + {0x033, 0x000000F6}, + {0x03F, 0x00000003}, + {0x033, 0x000000F7}, + {0x03F, 0x00000003}, + {0x033, 0x000000F8}, + {0x03F, 0x00050002}, + {0x033, 0x000000F9}, + {0x03F, 0x00060032}, + {0x033, 0x000000FA}, + {0x03F, 0x00050042}, + {0x033, 0x000000FB}, + {0x03F, 0x00040042}, + {0x033, 0x000000FC}, + {0x03F, 0x00008001}, + {0x033, 0x000000FD}, + {0x03F, 0x00008002}, + {0x033, 0x000000FE}, + {0x03F, 0x00000003}, + {0x033, 0x000000FF}, + {0x03F, 0x00000003}, + {0x033, 0x00000120}, + {0x03F, 0x00050002}, + {0x033, 0x00000121}, + {0x03F, 0x00060032}, + {0x033, 0x00000122}, + {0x03F, 0x00050042}, + {0x033, 0x00000123}, + {0x03F, 0x00040042}, + {0x033, 0x00000124}, + {0x03F, 0x00008001}, + {0x033, 0x00000125}, + {0x03F, 0x00008002}, + {0x033, 0x00000126}, + {0x03F, 0x00000003}, + {0x033, 0x00000127}, + {0x03F, 0x00000003}, + {0x033, 0x00000128}, + {0x03F, 0x00050002}, + {0x033, 0x00000129}, + {0x03F, 0x00060032}, + {0x033, 0x0000012A}, + {0x03F, 0x00050042}, + {0x033, 0x0000012B}, + {0x03F, 0x00040042}, + {0x033, 0x0000012C}, + {0x03F, 0x00008001}, + {0x033, 0x0000012D}, + {0x03F, 0x00008002}, + {0x033, 0x0000012E}, + {0x03F, 0x00000003}, + {0x033, 0x0000012F}, + {0x03F, 0x00000003}, + {0x033, 0x00000130}, + {0x03F, 0x00050002}, + {0x033, 0x00000131}, + {0x03F, 0x00060032}, + {0x033, 0x00000132}, + {0x03F, 0x00050042}, + {0x033, 0x00000133}, + {0x03F, 0x00040042}, + {0x033, 0x00000134}, + {0x03F, 0x00008001}, + {0x033, 0x00000135}, + {0x03F, 0x00008002}, + {0x033, 0x00000136}, + {0x03F, 0x00000003}, + {0x033, 0x00000137}, + {0x03F, 0x00000003}, + {0x033, 0x00000160}, + {0x03F, 0x00050002}, + {0x033, 0x00000161}, + {0x03F, 0x00060032}, + {0x033, 0x00000162}, + {0x03F, 0x00050042}, + {0x033, 0x00000163}, + {0x03F, 0x00040042}, + {0x033, 0x00000164}, + {0x03F, 0x00008001}, + {0x033, 0x00000165}, + {0x03F, 0x00008002}, + {0x033, 0x00000166}, + {0x03F, 0x00000003}, + {0x033, 0x00000167}, + {0x03F, 0x00000003}, + {0x033, 0x00000168}, + {0x03F, 0x00050002}, + {0x033, 0x00000169}, + {0x03F, 0x00060032}, + {0x033, 0x0000016A}, + {0x03F, 0x00050042}, + {0x033, 0x0000016B}, + {0x03F, 0x00040042}, + {0x033, 0x0000016C}, + {0x03F, 0x00008001}, + {0x033, 0x0000016D}, + {0x03F, 0x00008002}, + {0x033, 0x0000016E}, + {0x03F, 0x00000003}, + {0x033, 0x0000016F}, + {0x03F, 0x00000003}, + {0x033, 0x00000170}, + {0x03F, 0x00050002}, + {0x033, 0x00000171}, + {0x03F, 0x00060032}, + {0x033, 0x00000172}, + {0x03F, 0x00050042}, + {0x033, 0x00000173}, + {0x03F, 0x00040042}, + {0x033, 0x00000174}, + {0x03F, 0x00008001}, + {0x033, 0x00000175}, + {0x03F, 0x00008002}, + {0x033, 0x00000176}, + {0x03F, 0x00000003}, + {0x033, 0x00000177}, + {0x03F, 0x00000003}, + {0x033, 0x00000178}, + {0x03F, 0x00050002}, + {0x033, 0x00000179}, + {0x03F, 0x00060032}, + {0x033, 0x0000017A}, + {0x03F, 0x00050042}, + {0x033, 0x0000017B}, + {0x03F, 0x00040042}, + {0x033, 0x0000017C}, + {0x03F, 0x00008001}, + {0x033, 0x0000017D}, + {0x03F, 0x00008002}, + {0x033, 0x0000017E}, + {0x03F, 0x00000003}, + {0x033, 0x0000017F}, + {0x03F, 0x00000003}, + {0x033, 0x000001A0}, + {0x03F, 0x00050002}, + {0x033, 0x000001A1}, + {0x03F, 0x00060032}, + {0x033, 0x000001A2}, + {0x03F, 0x00050042}, + {0x033, 0x000001A3}, + {0x03F, 0x00040042}, + {0x033, 0x000001A4}, + {0x03F, 0x00008001}, + {0x033, 0x000001A5}, + {0x03F, 0x00008002}, + {0x033, 0x000001A6}, + {0x03F, 0x00000003}, + {0x033, 0x000001A7}, + {0x03F, 0x00000003}, + {0x033, 0x000001A8}, + {0x03F, 0x00050002}, + {0x033, 0x000001A9}, + {0x03F, 0x00060032}, + {0x033, 0x000001AA}, + {0x03F, 0x00050042}, + {0x033, 0x000001AB}, + {0x03F, 0x00040042}, + {0x033, 0x000001AC}, + {0x03F, 0x00008001}, + {0x033, 0x000001AD}, + {0x03F, 0x00008002}, + {0x033, 0x000001AE}, + {0x03F, 0x00000003}, + {0x033, 0x000001AF}, + {0x03F, 0x00000003}, + {0x033, 0x000001B0}, + {0x03F, 0x00050002}, + {0x033, 0x000001B1}, + {0x03F, 0x00060032}, + {0x033, 0x000001B2}, + {0x03F, 0x00050042}, + {0x033, 0x000001B3}, + {0x03F, 0x00040042}, + {0x033, 0x000001B4}, + {0x03F, 0x00008001}, + {0x033, 0x000001B5}, + {0x03F, 0x00008002}, + {0x033, 0x000001B6}, + {0x03F, 0x00000003}, + {0x033, 0x000001B7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E0}, + {0x03F, 0x00050002}, + {0x033, 0x000001E1}, + {0x03F, 0x00060032}, + {0x033, 0x000001E2}, + {0x03F, 0x00050042}, + {0x033, 0x000001E3}, + {0x03F, 0x00040042}, + {0x033, 0x000001E4}, + {0x03F, 0x00008001}, + {0x033, 0x000001E5}, + {0x03F, 0x00008002}, + {0x033, 0x000001E6}, + {0x03F, 0x00000003}, + {0x033, 0x000001E7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E8}, + {0x03F, 0x00050002}, + {0x033, 0x000001E9}, + {0x03F, 0x00060032}, + {0x033, 0x000001EA}, + {0x03F, 0x00050042}, + {0x033, 0x000001EB}, + {0x03F, 0x00040042}, + {0x033, 0x000001EC}, + {0x03F, 0x00008001}, + {0x033, 0x000001ED}, + {0x03F, 0x00008002}, + {0x033, 0x000001EE}, + {0x03F, 0x00000003}, + {0x033, 0x000001EF}, + {0x03F, 0x00000003}, + {0x033, 0x000001F0}, + {0x03F, 0x00050002}, + {0x033, 0x000001F1}, + {0x03F, 0x00060032}, + {0x033, 0x000001F2}, + {0x03F, 0x00050042}, + {0x033, 0x000001F3}, + {0x03F, 0x00040042}, + {0x033, 0x000001F4}, + {0x03F, 0x00008001}, + {0x033, 0x000001F5}, + {0x03F, 0x00008002}, + {0x033, 0x000001F6}, + {0x03F, 0x00000003}, + {0x033, 0x000001F7}, + {0x03F, 0x00000003}, + {0x033, 0x000001F8}, + {0x03F, 0x00050002}, + {0x033, 0x000001F9}, + {0x03F, 0x00060032}, + {0x033, 0x000001FA}, + {0x03F, 0x00050042}, + {0x033, 0x000001FB}, + {0x03F, 0x00040042}, + {0x033, 0x000001FC}, + {0x03F, 0x00008001}, + {0x033, 0x000001FD}, + {0x03F, 0x00008002}, + {0x033, 0x000001FE}, + {0x03F, 0x00000003}, + {0x033, 0x000001FF}, + {0x03F, 0x00000003}, + {0x0EF, 0x00000000}, + {0x005, 0x00000001}, + {0x10005, 0x00000001}, + {0x100EE, 0x00000400}, + {0x10030, 0x00000000}, + {0x10030, 0x00001000}, + {0x10030, 0x00002000}, + {0x10030, 0x00003000}, + {0x10030, 0x00004000}, + {0x10030, 0x00005000}, + {0x10030, 0x00006003}, + {0x10030, 0x00007003}, + {0x10030, 0x00008000}, + {0x10030, 0x00009000}, + {0x10030, 0x0000A000}, + {0x10030, 0x0000B000}, + {0x10030, 0x0000C000}, + {0x10030, 0x0000D000}, + {0x10030, 0x0000E003}, + {0x10030, 0x0000F003}, + {0x10030, 0x00010000}, + {0x10030, 0x00011000}, + {0x10030, 0x00012000}, + {0x10030, 0x00013000}, + {0x10030, 0x00014000}, + {0x10030, 0x00015000}, + {0x10030, 0x00016003}, + {0x10030, 0x00017003}, + {0x10030, 0x00018000}, + {0x10030, 0x00019000}, + {0x10030, 0x0001A000}, + {0x10030, 0x0001B000}, + {0x10030, 0x0001C000}, + {0x10030, 0x0001D000}, + {0x10030, 0x0001E003}, + {0x10030, 0x0001F003}, + {0x10030, 0x00020000}, + {0x10030, 0x00021000}, + {0x10030, 0x00022000}, + {0x10030, 0x00023000}, + {0x10030, 0x00024000}, + {0x10030, 0x00025000}, + {0x10030, 0x00026003}, + {0x10030, 0x00027003}, + {0x10030, 0x00028000}, + {0x10030, 0x00029000}, + {0x10030, 0x0002A000}, + {0x10030, 0x0002B000}, + {0x10030, 0x0002C000}, + {0x10030, 0x0002D000}, + {0x10030, 0x0002E003}, + {0x10030, 0x0002F003}, + {0x10030, 0x00030000}, + {0x10030, 0x00031000}, + {0x10030, 0x00032000}, + {0x10030, 0x00033000}, + {0x10030, 0x00034000}, + {0x10030, 0x00035000}, + {0x10030, 0x00036003}, + {0x10030, 0x00037003}, + {0x10030, 0x00038000}, + {0x10030, 0x00039000}, + {0x10030, 0x0003A000}, + {0x10030, 0x0003B000}, + {0x10030, 0x0003C000}, + {0x10030, 0x0003D000}, + {0x10030, 0x0003E003}, + {0x10030, 0x0003F003}, + {0x10030, 0x00060000}, + {0x10030, 0x00061000}, + {0x10030, 0x00062000}, + {0x10030, 0x00063000}, + {0x10030, 0x00064000}, + {0x10030, 0x00065000}, + {0x10030, 0x00066000}, + {0x10030, 0x00067003}, + {0x10030, 0x00068000}, + {0x10030, 0x00069000}, + {0x10030, 0x0006A000}, + {0x10030, 0x0006B000}, + {0x10030, 0x0006C000}, + {0x10030, 0x0006D000}, + {0x10030, 0x0006E000}, + {0x10030, 0x0006F003}, + {0x10030, 0x00070000}, + {0x10030, 0x00071000}, + {0x10030, 0x00072000}, + {0x10030, 0x00073000}, + {0x10030, 0x00074000}, + {0x10030, 0x00075000}, + {0x10030, 0x00076000}, + {0x10030, 0x00077003}, + {0x10030, 0x00078000}, + {0x10030, 0x00079000}, + {0x10030, 0x0007A000}, + {0x10030, 0x0007B000}, + {0x10030, 0x0007C000}, + {0x10030, 0x0007D000}, + {0x10030, 0x0007E000}, + {0x10030, 0x0007F003}, + {0x100EE, 0x00000000}, + {0x0FE, 0x00000031}, +}; + +static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = { + {0xF0010000, 0x00000000}, + {0xF0020000, 0x00000001}, + {0xF0320000, 0x00000002}, + {0xF0330000, 0x00000003}, + {0xF0340000, 0x00000004}, + {0xF0350000, 0x00000005}, + {0xF0360000, 0x00000006}, + {0xF0010001, 0x00000007}, + {0xF0020001, 0x00000008}, + {0xF0320001, 0x00000009}, + {0xF0330001, 0x0000000A}, + {0xF0340001, 0x0000000B}, + {0xF0350001, 0x0000000C}, + {0xF0360001, 0x0000000D}, + {0xF03F0001, 0x0000000E}, + {0xF0400001, 0x0000000F}, + {0x005, 0x00000000}, + {0x10005, 0x00000000}, + {0x0B9, 0x00020440}, + {0x000, 0x00030001}, + {0x10000, 0x00030000}, + {0x018, 0x00011124}, + {0x10018, 0x00011124}, + {0x05F, 0x00000032}, + {0x097, 0x00043200}, + {0x0A6, 0x00066DB7}, + {0x0EF, 0x00004000}, + {0x033, 0x00000005}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x00000003}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x00000002}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x033, 0x0000000D}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x0000000B}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x0000000A}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x033, 0x00000015}, + {0x03E, 0x00000000}, + {0x03F, 0x00010500}, + {0x033, 0x00000013}, + {0x03E, 0x00000000}, + {0x03F, 0x00028B00}, + {0x033, 0x00000012}, + {0x03E, 0x00000000}, + {0x03F, 0x0009AB00}, + {0x0EF, 0x00000000}, + {0x000, 0x00033C01}, + {0x10000, 0x00033C00}, + {0x01A, 0x00040004}, + {0x0FE, 0x00000000}, + {0x096, 0x00015200}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x067, 0x0000D300}, + {0x0DA, 0x000D4000}, + {0xA0000000, 0x00000000}, + {0x067, 0x0004D000}, + {0x0DA, 0x000D4009}, + {0xB0000000, 0x00000000}, + {0x057, 0x0000D589}, + {0x05A, 0x0007FFFF}, + {0x043, 0x00005000}, + {0x018, 0x00001001}, + {0x10018, 0x00001001}, + {0x002, 0x0000000D}, + {0x10002, 0x0000000D}, + {0x0EE, 0x00000000}, + {0x033, 0x0000000B}, + {0x03F, 0x0000000B}, + {0x033, 0x0000000C}, + {0x03F, 0x00000012}, + {0x033, 0x0000000D}, + {0x03F, 0x00000019}, + {0x0EE, 0x00000000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x08F, 0x000D1352}, + {0xA0000000, 0x00000000}, + {0x08F, 0x000D1752}, + {0xB0000000, 0x00000000}, + {0x0EB, 0x00000000}, + {0x030, 0x000109B0}, + {0x030, 0x000189B0}, + {0x0EB, 0x00000000}, + {0x0EE, 0x00000010}, + {0x033, 0x00000006}, + {0x03F, 0x00000003}, + {0x033, 0x00000007}, + {0x03F, 0x00000003}, + {0x033, 0x00000008}, + {0x03F, 0x00000001}, + {0x0EE, 0x00000000}, + {0x0EF, 0x00001000}, + {0x033, 0x00000000}, + {0x03F, 0x00000015}, + {0x033, 0x00000001}, + {0x03F, 0x00000017}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000020}, + {0x03F, 0x00060001}, + {0x033, 0x00000021}, + {0x03F, 0x00060032}, + {0x033, 0x00000022}, + {0x03F, 0x00050042}, + {0x033, 0x00000023}, + {0x03F, 0x00040042}, + {0x033, 0x00000024}, + {0x03F, 0x00000001}, + {0x033, 0x00000025}, + {0x03F, 0x00000002}, + {0x033, 0x00000026}, + {0x03F, 0x00000003}, + {0x033, 0x00000027}, + {0x03F, 0x00000003}, + {0x033, 0x00000028}, + {0x03F, 0x00060001}, + {0x033, 0x00000029}, + {0x03F, 0x00060032}, + {0x033, 0x0000002A}, + {0x03F, 0x00050042}, + {0x033, 0x0000002B}, + {0x03F, 0x00040042}, + {0x033, 0x0000002C}, + {0x03F, 0x00000001}, + {0x033, 0x0000002D}, + {0x03F, 0x00000002}, + {0x033, 0x0000002E}, + {0x03F, 0x00000003}, + {0x033, 0x0000002F}, + {0x03F, 0x00000003}, + {0x033, 0x00000030}, + {0x03F, 0x00060001}, + {0x033, 0x00000031}, + {0x03F, 0x00060032}, + {0x033, 0x00000032}, + {0x03F, 0x00050042}, + {0x033, 0x00000033}, + {0x03F, 0x00040042}, + {0x033, 0x00000034}, + {0x03F, 0x00000001}, + {0x033, 0x00000035}, + {0x03F, 0x00000002}, + {0x033, 0x00000036}, + {0x03F, 0x00000003}, + {0x033, 0x00000037}, + {0x03F, 0x00000003}, + {0x033, 0x00000060}, + {0x03F, 0x00060001}, + {0x033, 0x00000061}, + {0x03F, 0x00060032}, + {0x033, 0x00000062}, + {0x03F, 0x00050042}, + {0x033, 0x00000063}, + {0x03F, 0x00040042}, + {0x033, 0x00000064}, + {0x03F, 0x00000001}, + {0x033, 0x00000065}, + {0x03F, 0x00000002}, + {0x033, 0x00000066}, + {0x03F, 0x00000003}, + {0x033, 0x00000067}, + {0x03F, 0x00000003}, + {0x033, 0x00000068}, + {0x03F, 0x00060001}, + {0x033, 0x00000069}, + {0x03F, 0x00060032}, + {0x033, 0x0000006A}, + {0x03F, 0x00050042}, + {0x033, 0x0000006B}, + {0x03F, 0x00040042}, + {0x033, 0x0000006C}, + {0x03F, 0x00000001}, + {0x033, 0x0000006D}, + {0x03F, 0x00000002}, + {0x033, 0x0000006E}, + {0x03F, 0x00000003}, + {0x033, 0x0000006F}, + {0x03F, 0x00000003}, + {0x033, 0x00000070}, + {0x03F, 0x00060001}, + {0x033, 0x00000071}, + {0x03F, 0x00060032}, + {0x033, 0x00000072}, + {0x03F, 0x00050042}, + {0x033, 0x00000073}, + {0x03F, 0x00040042}, + {0x033, 0x00000074}, + {0x03F, 0x00000001}, + {0x033, 0x00000075}, + {0x03F, 0x00000002}, + {0x033, 0x00000076}, + {0x03F, 0x00000003}, + {0x033, 0x00000077}, + {0x03F, 0x00000003}, + {0x033, 0x00000078}, + {0x03F, 0x00060001}, + {0x033, 0x00000079}, + {0x03F, 0x00060032}, + {0x033, 0x0000007A}, + {0x03F, 0x00050042}, + {0x033, 0x0000007B}, + {0x03F, 0x00040042}, + {0x033, 0x0000007C}, + {0x03F, 0x00000001}, + {0x033, 0x0000007D}, + {0x03F, 0x00000002}, + {0x033, 0x0000007E}, + {0x03F, 0x00000003}, + {0x033, 0x0000007F}, + {0x03F, 0x00000003}, + {0x033, 0x000000A0}, + {0x03F, 0x00060001}, + {0x033, 0x000000A1}, + {0x03F, 0x00060032}, + {0x033, 0x000000A2}, + {0x03F, 0x00050042}, + {0x033, 0x000000A3}, + {0x03F, 0x00040042}, + {0x033, 0x000000A4}, + {0x03F, 0x00000001}, + {0x033, 0x000000A5}, + {0x03F, 0x00000002}, + {0x033, 0x000000A6}, + {0x03F, 0x00000003}, + {0x033, 0x000000A7}, + {0x03F, 0x00000003}, + {0x033, 0x000000A8}, + {0x03F, 0x00060001}, + {0x033, 0x000000A9}, + {0x03F, 0x00060032}, + {0x033, 0x000000AA}, + {0x03F, 0x00050042}, + {0x033, 0x000000AB}, + {0x03F, 0x00040042}, + {0x033, 0x000000AC}, + {0x03F, 0x00000001}, + {0x033, 0x000000AD}, + {0x03F, 0x00000002}, + {0x033, 0x000000AE}, + {0x03F, 0x00000003}, + {0x033, 0x000000AF}, + {0x03F, 0x00000003}, + {0x033, 0x000000B0}, + {0x03F, 0x00060001}, + {0x033, 0x000000B1}, + {0x03F, 0x00060032}, + {0x033, 0x000000B2}, + {0x03F, 0x00050042}, + {0x033, 0x000000B3}, + {0x03F, 0x00040042}, + {0x033, 0x000000B4}, + {0x03F, 0x00000001}, + {0x033, 0x000000B5}, + {0x03F, 0x00000002}, + {0x033, 0x000000B6}, + {0x03F, 0x00000003}, + {0x033, 0x000000B7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E0}, + {0x03F, 0x00060001}, + {0x033, 0x000000E1}, + {0x03F, 0x00060032}, + {0x033, 0x000000E2}, + {0x03F, 0x00050042}, + {0x033, 0x000000E3}, + {0x03F, 0x00040042}, + {0x033, 0x000000E4}, + {0x03F, 0x00000001}, + {0x033, 0x000000E5}, + {0x03F, 0x00000002}, + {0x033, 0x000000E6}, + {0x03F, 0x00000003}, + {0x033, 0x000000E7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E8}, + {0x03F, 0x00060001}, + {0x033, 0x000000E9}, + {0x03F, 0x00060032}, + {0x033, 0x000000EA}, + {0x03F, 0x00050042}, + {0x033, 0x000000EB}, + {0x03F, 0x00040042}, + {0x033, 0x000000EC}, + {0x03F, 0x00000001}, + {0x033, 0x000000ED}, + {0x03F, 0x00000002}, + {0x033, 0x000000EE}, + {0x03F, 0x00000003}, + {0x033, 0x000000EF}, + {0x03F, 0x00000003}, + {0x033, 0x000000F0}, + {0x03F, 0x00060001}, + {0x033, 0x000000F1}, + {0x03F, 0x00060032}, + {0x033, 0x000000F2}, + {0x03F, 0x00050042}, + {0x033, 0x000000F3}, + {0x03F, 0x00040042}, + {0x033, 0x000000F4}, + {0x03F, 0x00000001}, + {0x033, 0x000000F5}, + {0x03F, 0x00000002}, + {0x033, 0x000000F6}, + {0x03F, 0x00000003}, + {0x033, 0x000000F7}, + {0x03F, 0x00000003}, + {0x033, 0x000000F8}, + {0x03F, 0x00060001}, + {0x033, 0x000000F9}, + {0x03F, 0x00060032}, + {0x033, 0x000000FA}, + {0x03F, 0x00050042}, + {0x033, 0x000000FB}, + {0x03F, 0x00040042}, + {0x033, 0x000000FC}, + {0x03F, 0x00000001}, + {0x033, 0x000000FD}, + {0x03F, 0x00000002}, + {0x033, 0x000000FE}, + {0x03F, 0x00000003}, + {0x033, 0x000000FF}, + {0x03F, 0x00000003}, + {0x033, 0x00000120}, + {0x03F, 0x00060001}, + {0x033, 0x00000121}, + {0x03F, 0x00060032}, + {0x033, 0x00000122}, + {0x03F, 0x00050042}, + {0x033, 0x00000123}, + {0x03F, 0x00040042}, + {0x033, 0x00000124}, + {0x03F, 0x00000001}, + {0x033, 0x00000125}, + {0x03F, 0x00000002}, + {0x033, 0x00000126}, + {0x03F, 0x00000003}, + {0x033, 0x00000127}, + {0x03F, 0x00000003}, + {0x033, 0x00000128}, + {0x03F, 0x00060001}, + {0x033, 0x00000129}, + {0x03F, 0x00060032}, + {0x033, 0x0000012A}, + {0x03F, 0x00050042}, + {0x033, 0x0000012B}, + {0x03F, 0x00040042}, + {0x033, 0x0000012C}, + {0x03F, 0x00000001}, + {0x033, 0x0000012D}, + {0x03F, 0x00000002}, + {0x033, 0x0000012E}, + {0x03F, 0x00000003}, + {0x033, 0x0000012F}, + {0x03F, 0x00000003}, + {0x033, 0x00000130}, + {0x03F, 0x00060001}, + {0x033, 0x00000131}, + {0x03F, 0x00060032}, + {0x033, 0x00000132}, + {0x03F, 0x00050042}, + {0x033, 0x00000133}, + {0x03F, 0x00040042}, + {0x033, 0x00000134}, + {0x03F, 0x00000001}, + {0x033, 0x00000135}, + {0x03F, 0x00000002}, + {0x033, 0x00000136}, + {0x03F, 0x00000003}, + {0x033, 0x00000137}, + {0x03F, 0x00000003}, + {0x033, 0x00000160}, + {0x03F, 0x00060001}, + {0x033, 0x00000161}, + {0x03F, 0x00060032}, + {0x033, 0x00000162}, + {0x03F, 0x00050042}, + {0x033, 0x00000163}, + {0x03F, 0x00040042}, + {0x033, 0x00000164}, + {0x03F, 0x00000001}, + {0x033, 0x00000165}, + {0x03F, 0x00000002}, + {0x033, 0x00000166}, + {0x03F, 0x00000003}, + {0x033, 0x00000167}, + {0x03F, 0x00000003}, + {0x033, 0x00000168}, + {0x03F, 0x00060001}, + {0x033, 0x00000169}, + {0x03F, 0x00060032}, + {0x033, 0x0000016A}, + {0x03F, 0x00050042}, + {0x033, 0x0000016B}, + {0x03F, 0x00040042}, + {0x033, 0x0000016C}, + {0x03F, 0x00000001}, + {0x033, 0x0000016D}, + {0x03F, 0x00000002}, + {0x033, 0x0000016E}, + {0x03F, 0x00000003}, + {0x033, 0x0000016F}, + {0x03F, 0x00000003}, + {0x033, 0x00000170}, + {0x03F, 0x00060001}, + {0x033, 0x00000171}, + {0x03F, 0x00060032}, + {0x033, 0x00000172}, + {0x03F, 0x00050042}, + {0x033, 0x00000173}, + {0x03F, 0x00040042}, + {0x033, 0x00000174}, + {0x03F, 0x00000001}, + {0x033, 0x00000175}, + {0x03F, 0x00000002}, + {0x033, 0x00000176}, + {0x03F, 0x00000003}, + {0x033, 0x00000177}, + {0x03F, 0x00000003}, + {0x033, 0x00000178}, + {0x03F, 0x00060001}, + {0x033, 0x00000179}, + {0x03F, 0x00060032}, + {0x033, 0x0000017A}, + {0x03F, 0x00050042}, + {0x033, 0x0000017B}, + {0x03F, 0x00040042}, + {0x033, 0x0000017C}, + {0x03F, 0x00000001}, + {0x033, 0x0000017D}, + {0x03F, 0x00000002}, + {0x033, 0x0000017E}, + {0x03F, 0x00000003}, + {0x033, 0x0000017F}, + {0x03F, 0x00000003}, + {0x033, 0x000001A0}, + {0x03F, 0x00060001}, + {0x033, 0x000001A1}, + {0x03F, 0x00060032}, + {0x033, 0x000001A2}, + {0x03F, 0x00050042}, + {0x033, 0x000001A3}, + {0x03F, 0x00040042}, + {0x033, 0x000001A4}, + {0x03F, 0x00000001}, + {0x033, 0x000001A5}, + {0x03F, 0x00000002}, + {0x033, 0x000001A6}, + {0x03F, 0x00000003}, + {0x033, 0x000001A7}, + {0x03F, 0x00000003}, + {0x033, 0x000001A8}, + {0x03F, 0x00060001}, + {0x033, 0x000001A9}, + {0x03F, 0x00060032}, + {0x033, 0x000001AA}, + {0x03F, 0x00050042}, + {0x033, 0x000001AB}, + {0x03F, 0x00040042}, + {0x033, 0x000001AC}, + {0x03F, 0x00000001}, + {0x033, 0x000001AD}, + {0x03F, 0x00000002}, + {0x033, 0x000001AE}, + {0x03F, 0x00000003}, + {0x033, 0x000001AF}, + {0x03F, 0x00000003}, + {0x033, 0x000001B0}, + {0x03F, 0x00060001}, + {0x033, 0x000001B1}, + {0x03F, 0x00060032}, + {0x033, 0x000001B2}, + {0x03F, 0x00050042}, + {0x033, 0x000001B3}, + {0x03F, 0x00040042}, + {0x033, 0x000001B4}, + {0x03F, 0x00000001}, + {0x033, 0x000001B5}, + {0x03F, 0x00000002}, + {0x033, 0x000001B6}, + {0x03F, 0x00000003}, + {0x033, 0x000001B7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E0}, + {0x03F, 0x00060001}, + {0x033, 0x000001E1}, + {0x03F, 0x00060032}, + {0x033, 0x000001E2}, + {0x03F, 0x00050042}, + {0x033, 0x000001E3}, + {0x03F, 0x00040042}, + {0x033, 0x000001E4}, + {0x03F, 0x00000001}, + {0x033, 0x000001E5}, + {0x03F, 0x00000002}, + {0x033, 0x000001E6}, + {0x03F, 0x00000003}, + {0x033, 0x000001E7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E8}, + {0x03F, 0x00060001}, + {0x033, 0x000001E9}, + {0x03F, 0x00060032}, + {0x033, 0x000001EA}, + {0x03F, 0x00050042}, + {0x033, 0x000001EB}, + {0x03F, 0x00040042}, + {0x033, 0x000001EC}, + {0x03F, 0x00000001}, + {0x033, 0x000001ED}, + {0x03F, 0x00000002}, + {0x033, 0x000001EE}, + {0x03F, 0x00000003}, + {0x033, 0x000001EF}, + {0x03F, 0x00000003}, + {0x033, 0x000001F0}, + {0x03F, 0x00060001}, + {0x033, 0x000001F1}, + {0x03F, 0x00060032}, + {0x033, 0x000001F2}, + {0x03F, 0x00050042}, + {0x033, 0x000001F3}, + {0x03F, 0x00040042}, + {0x033, 0x000001F4}, + {0x03F, 0x00000001}, + {0x033, 0x000001F5}, + {0x03F, 0x00000002}, + {0x033, 0x000001F6}, + {0x03F, 0x00000003}, + {0x033, 0x000001F7}, + {0x03F, 0x00000003}, + {0x033, 0x000001F8}, + {0x03F, 0x00060001}, + {0x033, 0x000001F9}, + {0x03F, 0x00060032}, + {0x033, 0x000001FA}, + {0x03F, 0x00050042}, + {0x033, 0x000001FB}, + {0x03F, 0x00040042}, + {0x033, 0x000001FC}, + {0x03F, 0x00000001}, + {0x033, 0x000001FD}, + {0x03F, 0x00000002}, + {0x033, 0x000001FE}, + {0x03F, 0x00000003}, + {0x033, 0x000001FF}, + {0x03F, 0x00000003}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000100}, + {0x033, 0x00000001}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000002}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000003}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000004}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000005}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000006}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000007}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000008}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000009}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000B}, + {0x03F, 0x0000AFFF}, + {0x033, 0x0000000C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000000F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000010}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000011}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000012}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000013}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000014}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000015}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000EFFF}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000E3FF}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000016}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000017}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000018}, + {0x03F, 0x0000FBFF}, + {0x033, 0x00000019}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001B}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000001F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000020}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000021}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000022}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000023}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000024}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000025}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000026}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000027}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000028}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000029}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002A}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002B}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002C}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002D}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002E}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000002F}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000030}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000031}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000032}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000033}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000034}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000035}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000036}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000037}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000038}, + {0x03F, 0x0000EFFF}, + {0x033, 0x00000039}, + {0x03F, 0x0000EFFF}, + {0x033, 0x0000003A}, + {0x03F, 0x0000EFFF}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000040}, + {0x033, 0x00000000}, + {0x03F, 0x00004344}, + {0x033, 0x00000001}, + {0x03F, 0x00004344}, + {0x033, 0x00000002}, + {0x03F, 0x00004344}, + {0x033, 0x00000003}, + {0x03F, 0x00004344}, + {0x033, 0x00000004}, + {0x03F, 0x00004344}, + {0x033, 0x00000005}, + {0x03F, 0x00004344}, + {0x033, 0x00000006}, + {0x03F, 0x00004324}, + {0x033, 0x00000007}, + {0x03F, 0x00004344}, + {0x033, 0x00000008}, + {0x03F, 0x00004344}, + {0x033, 0x00000009}, + {0x03F, 0x00004344}, + {0x033, 0x0000000A}, + {0x03F, 0x00004344}, + {0x033, 0x0000000B}, + {0x03F, 0x00004344}, + {0x033, 0x00000010}, + {0x03F, 0x00004344}, + {0x033, 0x00000011}, + {0x03F, 0x00004344}, + {0x033, 0x00000012}, + {0x03F, 0x00004344}, + {0x033, 0x00000013}, + {0x03F, 0x00004344}, + {0x033, 0x00000014}, + {0x03F, 0x00004344}, + {0x033, 0x00000015}, + {0x03F, 0x00004344}, + {0x033, 0x00000016}, + {0x03F, 0x00004344}, + {0x033, 0x00000017}, + {0x03F, 0x00004344}, + {0x033, 0x00000018}, + {0x03F, 0x00004344}, + {0x033, 0x00000019}, + {0x03F, 0x00004344}, + {0x033, 0x0000001A}, + {0x03F, 0x00004344}, + {0x033, 0x0000001B}, + {0x03F, 0x00004344}, + {0x033, 0x0000001C}, + {0x03F, 0x00004344}, + {0x033, 0x0000001D}, + {0x03F, 0x00004344}, + {0x033, 0x0000001E}, + {0x03F, 0x00004344}, + {0x033, 0x0000001F}, + {0x03F, 0x00004344}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000020}, + {0x033, 0x00000010}, + {0x03F, 0x00000200}, + {0x033, 0x00000011}, + {0x03F, 0x00000200}, + {0x033, 0x00000012}, + {0x03F, 0x00000200}, + {0x033, 0x00000013}, + {0x03F, 0x00000200}, + {0x033, 0x00000020}, + {0x03F, 0x00000200}, + {0x033, 0x00000021}, + {0x03F, 0x00000200}, + {0x033, 0x00000022}, + {0x03F, 0x00000200}, + {0x033, 0x00000023}, + {0x03F, 0x00000200}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000010}, + {0x030, 0x000084DC}, + {0x030, 0x000103C9}, + {0x030, 0x00018399}, + {0x030, 0x00020287}, + {0x030, 0x00028277}, + {0x030, 0x00030165}, + {0x030, 0x00038144}, + {0x030, 0x00040044}, + {0x030, 0x00048022}, + {0x030, 0x00050011}, + {0x030, 0x00058000}, + {0x030, 0x00060000}, + {0x030, 0x00068000}, + {0x030, 0x00070000}, + {0x0EF, 0x00000000}, + {0x0EF, 0x00000080}, + {0x033, 0x00000004}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000005}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000006}, + {0x03E, 0x00000014}, + {0x03F, 0x00023958}, + {0x033, 0x00000007}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000008}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000009}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x0000000A}, + {0x03E, 0x00000014}, + {0x03F, 0x00023958}, + {0x033, 0x0000000B}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000000C}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x0000000D}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x0000000E}, + {0x03E, 0x00000014}, + {0x03F, 0x00023958}, + {0x033, 0x0000000F}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000010}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000011}, + {0x03E, 0x0000001B}, + {0x03F, 0x00022A58}, + {0x033, 0x00000012}, + {0x03E, 0x00000014}, + {0x03F, 0x00023958}, + {0x033, 0x00000013}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000014}, + {0x03E, 0x00000013}, + {0x03F, 0x00022A58}, + {0x033, 0x00000015}, + {0x03E, 0x0000001B}, + {0x03F, 0x00029858}, + {0x033, 0x00000016}, + {0x03E, 0x0000001C}, + {0x03F, 0x00023958}, + {0x033, 0x00000017}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000018}, + {0x03E, 0x00000013}, + {0x03F, 0x00029858}, + {0x033, 0x00000019}, + {0x03E, 0x0000001B}, + {0x03F, 0x00029858}, + {0x033, 0x0000001A}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000001B}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000001C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000001D}, + {0x03E, 0x0000001B}, + {0x03F, 0x00029858}, + {0x033, 0x0000001E}, + {0x03E, 0x00000013}, + {0x03F, 0x00023A58}, + {0x033, 0x0000001F}, + {0x03E, 0x00000013}, + {0x03F, 0x00023A58}, + {0x033, 0x00000020}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000021}, + {0x03E, 0x0000001C}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000022}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000023}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000024}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000025}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000026}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000027}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000028}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000029}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000002A}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000002B}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000002C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000002D}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000002E}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000002F}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000030}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000031}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000032}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000033}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000034}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000035}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000036}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000037}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x00000038}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x00000039}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000003A}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000003B}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000003C}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000003D}, + {0x03E, 0x00000014}, + {0x03F, 0x0002AC58}, + {0x033, 0x0000003E}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x033, 0x0000003F}, + {0x03E, 0x00000014}, + {0x03F, 0x00023A58}, + {0x0EF, 0x00000000}, + {0x0EE, 0x00000800}, + {0x033, 0x00000000}, + {0x03F, 0x00000031}, + {0x033, 0x00000001}, + {0x03F, 0x00000023}, + {0x033, 0x00000002}, + {0x03F, 0x00000015}, + {0x033, 0x00000003}, + {0x03F, 0x00000007}, + {0x0EE, 0x00000000}, + {0x0EC, 0x00000400}, + {0x033, 0x00000003}, + {0x03F, 0x00000030}, + {0x033, 0x00000004}, + {0x03F, 0x00000021}, + {0x0EC, 0x00000000}, + {0x0DE, 0x00000000}, + {0x0EF, 0x00000000}, + {0x033, 0x00000000}, + {0x008, 0x00060280}, + {0x009, 0x00030400}, + {0x0EF, 0x00000000}, + {0x0A7, 0x00080308}, + {0x066, 0x00006000}, + {0x0EF, 0x00000400}, + {0x030, 0x000001FF}, + {0x030, 0x000081FF}, + {0x030, 0x000101FF}, + {0x030, 0x000181FF}, + {0x030, 0x000201FF}, + {0x030, 0x000281FF}, + {0x030, 0x0003017F}, + {0x030, 0x000380FB}, + {0x0EF, 0x00000000}, + {0x06E, 0x00077A18}, + {0x06D, 0x00000C31}, + {0x06A, 0x000E0F8A}, + {0x06B, 0x000018A0}, + {0x06F, 0x000F81FC}, + {0x05E, 0x0000001F}, + {0x0EF, 0x00000200}, + {0x030, 0x0003D407}, + {0x030, 0x00035A87}, + {0x030, 0x0002CF07}, + {0x030, 0x00024F07}, + {0x030, 0x0001CF07}, + {0x030, 0x00014F07}, + {0x030, 0x0000CF07}, + {0x030, 0x00004F07}, + {0x0EF, 0x00000000}, + {0x0EB, 0x00080000}, + {0x030, 0x00008038}, + {0x030, 0x00010038}, + {0x030, 0x00018038}, + {0x030, 0x00020038}, + {0x030, 0x00028038}, + {0x030, 0x00030038}, + {0x030, 0x0003803C}, + {0x030, 0x0004003C}, + {0x030, 0x0004803C}, + {0x030, 0x0005003C}, + {0x030, 0x0005803C}, + {0x030, 0x0006003C}, + {0x030, 0x0006803C}, + {0x030, 0x0007003C}, + {0x0EB, 0x00000000}, + {0x094, 0x000000FC}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000000}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x095, 0x00000008}, + {0xA0000000, 0x00000000}, + {0x095, 0x00000000}, + {0xB0000000, 0x00000000}, + {0x0EE, 0x00001000}, + {0x033, 0x00000020}, + {0x03F, 0x00000052}, + {0x033, 0x00000024}, + {0x03F, 0x0000005A}, + {0x033, 0x00000028}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002C}, + {0x03F, 0x0000019C}, + {0x033, 0x00000030}, + {0x03F, 0x000001A4}, + {0x033, 0x00000034}, + {0x03F, 0x000001E7}, + {0x033, 0x00000038}, + {0x03F, 0x000002E7}, + {0x033, 0x0000003C}, + {0x03F, 0x000003E7}, + {0x033, 0x00000021}, + {0x03F, 0x00000052}, + {0x033, 0x00000025}, + {0x03F, 0x0000005A}, + {0x033, 0x00000029}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002D}, + {0x03F, 0x0000019C}, + {0x033, 0x00000031}, + {0x03F, 0x000001A4}, + {0x033, 0x00000035}, + {0x03F, 0x000001E6}, + {0x033, 0x00000039}, + {0x03F, 0x000002E6}, + {0x033, 0x0000003D}, + {0x03F, 0x000003E6}, + {0x033, 0x00000022}, + {0x03F, 0x00000052}, + {0x033, 0x00000026}, + {0x03F, 0x0000005A}, + {0x033, 0x0000002A}, + {0x03F, 0x0000009C}, + {0x033, 0x0000002E}, + {0x03F, 0x0000019C}, + {0x033, 0x00000032}, + {0x03F, 0x000001A4}, + {0x033, 0x00000036}, + {0x03F, 0x000001E6}, + {0x033, 0x0000003A}, + {0x03F, 0x000002E6}, + {0x033, 0x0000003E}, + {0x03F, 0x000003E6}, + {0x033, 0x00000060}, + {0x03F, 0x00000052}, + {0x033, 0x00000064}, + {0x03F, 0x0000005A}, + {0x033, 0x00000068}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006C}, + {0x03F, 0x0000019C}, + {0x033, 0x00000070}, + {0x03F, 0x000001A4}, + {0x033, 0x00000074}, + {0x03F, 0x000001E6}, + {0x033, 0x00000078}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007C}, + {0x03F, 0x000003E6}, + {0x033, 0x00000061}, + {0x03F, 0x00000052}, + {0x033, 0x00000065}, + {0x03F, 0x0000005A}, + {0x033, 0x00000069}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006D}, + {0x03F, 0x0000019C}, + {0x033, 0x00000071}, + {0x03F, 0x000001A4}, + {0x033, 0x00000075}, + {0x03F, 0x000001E6}, + {0x033, 0x00000079}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007D}, + {0x03F, 0x000003E6}, + {0x033, 0x00000062}, + {0x03F, 0x00000052}, + {0x033, 0x00000066}, + {0x03F, 0x0000005A}, + {0x033, 0x0000006A}, + {0x03F, 0x0000009C}, + {0x033, 0x0000006E}, + {0x03F, 0x0000019C}, + {0x033, 0x00000072}, + {0x03F, 0x000001A4}, + {0x033, 0x00000076}, + {0x03F, 0x000001E6}, + {0x033, 0x0000007A}, + {0x03F, 0x000002E6}, + {0x033, 0x0000007E}, + {0x03F, 0x000003E6}, + {0x033, 0x00000063}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000052}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x00000152}, + {0xA0000000, 0x00000000}, + {0x03F, 0x00000052}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000067}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000015A}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000005A}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006B}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000009C}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000006F}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xA0000000, 0x00000000}, + {0x03F, 0x0000019C}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000073}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001A4}, + {0xB0000000, 0x00000000}, + {0x033, 0x00000077}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x03F, 0x000002E6}, + {0xA0000000, 0x00000000}, + {0x03F, 0x000001E6}, + {0xB0000000, 0x00000000}, + {0x033, 0x0000007B}, + {0x03F, 0x000002E7}, + {0x033, 0x0000007F}, + {0x03F, 0x000003E7}, + {0x0EE, 0x00000000}, + {0x100EE, 0x00004000}, + {0x80010000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90020000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90320000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90330000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90340000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90350000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90360000, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90010001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90020001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90320001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781EF}, + {0x10030, 0x000785E9}, + {0x10030, 0x000789E3}, + {0x10030, 0x00078DA3}, + {0x10030, 0x00079161}, + {0x10030, 0x0007955B}, + {0x10030, 0x00079921}, + {0x10030, 0x00079D1B}, + {0x10030, 0x0007A0E1}, + {0x10030, 0x0007A4DB}, + {0x10030, 0x0007A8A1}, + {0x10030, 0x0007AC9B}, + {0x10030, 0x0007B061}, + {0x10030, 0x0007B45B}, + {0x10030, 0x0007B821}, + {0x10030, 0x0007BC1B}, + {0x10030, 0x0007C015}, + {0x10030, 0x0007C40F}, + {0x90330001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90340001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90350001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90360001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x903f0001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x90400001, 0x00000000}, {0x40000000, 0x00000000}, + {0x10030, 0x000201DF}, + {0x10030, 0x000205D9}, + {0x10030, 0x000209D3}, + {0x10030, 0x00020D99}, + {0x10030, 0x00021193}, + {0x10030, 0x0002155F}, + {0x10030, 0x00021959}, + {0x10030, 0x00021D21}, + {0x10030, 0x00022119}, + {0x10030, 0x000224DF}, + {0x10030, 0x000228D9}, + {0x10030, 0x00022C9F}, + {0x10030, 0x00023099}, + {0x10030, 0x0002345F}, + {0x10030, 0x00023859}, + {0x10030, 0x00023C1F}, + {0x10030, 0x00024019}, + {0x10030, 0x00024413}, + {0x10030, 0x000281CD}, + {0x10030, 0x000285DB}, + {0x10030, 0x000289D5}, + {0x10030, 0x00028D9B}, + {0x10030, 0x0002918D}, + {0x10030, 0x00029555}, + {0x10030, 0x00029957}, + {0x10030, 0x00029D1F}, + {0x10030, 0x0002A119}, + {0x10030, 0x0002A4DF}, + {0x10030, 0x0002A8D9}, + {0x10030, 0x0002AC9F}, + {0x10030, 0x0002B099}, + {0x10030, 0x0002B45F}, + {0x10030, 0x0002B859}, + {0x10030, 0x0002BC1F}, + {0x10030, 0x0002C019}, + {0x10030, 0x0002C413}, + {0x10030, 0x000301D9}, + {0x10030, 0x000305DB}, + {0x10030, 0x000309D5}, + {0x10030, 0x00030D9B}, + {0x10030, 0x00031195}, + {0x10030, 0x0003155D}, + {0x10030, 0x00031955}, + {0x10030, 0x00031D1D}, + {0x10030, 0x00032119}, + {0x10030, 0x000324DF}, + {0x10030, 0x000328D9}, + {0x10030, 0x00032C9F}, + {0x10030, 0x00033099}, + {0x10030, 0x0003345F}, + {0x10030, 0x00033859}, + {0x10030, 0x00033C1F}, + {0x10030, 0x00034019}, + {0x10030, 0x00034413}, + {0x10030, 0x000601E1}, + {0x10030, 0x000605DB}, + {0x10030, 0x000609D5}, + {0x10030, 0x00060D9B}, + {0x10030, 0x00061195}, + {0x10030, 0x0006155B}, + {0x10030, 0x00061957}, + {0x10030, 0x00061D1F}, + {0x10030, 0x00062119}, + {0x10030, 0x000624DF}, + {0x10030, 0x000628D9}, + {0x10030, 0x00062C9F}, + {0x10030, 0x00063099}, + {0x10030, 0x0006345F}, + {0x10030, 0x00063859}, + {0x10030, 0x00063C1F}, + {0x10030, 0x00064019}, + {0x10030, 0x00064413}, + {0x10030, 0x000681E1}, + {0x10030, 0x000685DB}, + {0x10030, 0x000689D5}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955B}, + {0x10030, 0x00069957}, + {0x10030, 0x00069D1F}, + {0x10030, 0x0006A119}, + {0x10030, 0x0006A4DF}, + {0x10030, 0x0006A8D9}, + {0x10030, 0x0006AC9F}, + {0x10030, 0x0006B099}, + {0x10030, 0x0006B45F}, + {0x10030, 0x0006B859}, + {0x10030, 0x0006BC1F}, + {0x10030, 0x0006C019}, + {0x10030, 0x0006C413}, + {0x10030, 0x000701E1}, + {0x10030, 0x000705DB}, + {0x10030, 0x000709D5}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071957}, + {0x10030, 0x00071D1F}, + {0x10030, 0x00072119}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072C9F}, + {0x10030, 0x00073099}, + {0x10030, 0x0007345F}, + {0x10030, 0x00073859}, + {0x10030, 0x00073C1F}, + {0x10030, 0x00074019}, + {0x10030, 0x00074413}, + {0x10030, 0x000781DF}, + {0x10030, 0x000785D9}, + {0x10030, 0x000789D3}, + {0x10030, 0x00078D99}, + {0x10030, 0x00079193}, + {0x10030, 0x0007955F}, + {0x10030, 0x00079959}, + {0x10030, 0x00079D21}, + {0x10030, 0x0007A115}, + {0x10030, 0x0007A4DF}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007AC9F}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B45F}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC1F}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0x10030, 0x00000000}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0xA0000000, 0x00000000}, + {0x10030, 0x000001EF}, + {0x10030, 0x000005E9}, + {0x10030, 0x000009E3}, + {0x10030, 0x00000DDD}, + {0x10030, 0x000011D7}, + {0x10030, 0x0000159F}, + {0x10030, 0x00001999}, + {0x10030, 0x00001D5F}, + {0x10030, 0x00002159}, + {0x10030, 0x0000251F}, + {0x10030, 0x00002919}, + {0x10030, 0x00002CDF}, + {0x10030, 0x000030D9}, + {0x10030, 0x0000349F}, + {0x10030, 0x00003899}, + {0x10030, 0x00003C5F}, + {0x10030, 0x00004059}, + {0x10030, 0x00004453}, + {0x10030, 0x000201ED}, + {0x10030, 0x000205AD}, + {0x10030, 0x000209A7}, + {0x10030, 0x00020DA1}, + {0x10030, 0x0002119B}, + {0x10030, 0x00021561}, + {0x10030, 0x0002195B}, + {0x10030, 0x00021D27}, + {0x10030, 0x00022121}, + {0x10030, 0x000224E9}, + {0x10030, 0x000228E3}, + {0x10030, 0x00022CA9}, + {0x10030, 0x000230A3}, + {0x10030, 0x00023469}, + {0x10030, 0x00023863}, + {0x10030, 0x00023C29}, + {0x10030, 0x00024023}, + {0x10030, 0x0002441D}, + {0x10030, 0x000281EF}, + {0x10030, 0x000285AF}, + {0x10030, 0x000289A9}, + {0x10030, 0x00028DA3}, + {0x10030, 0x0002919D}, + {0x10030, 0x00029563}, + {0x10030, 0x0002995D}, + {0x10030, 0x00029D25}, + {0x10030, 0x0002A11F}, + {0x10030, 0x0002A4E7}, + {0x10030, 0x0002A8E1}, + {0x10030, 0x0002ACA7}, + {0x10030, 0x0002B0A1}, + {0x10030, 0x0002B467}, + {0x10030, 0x0002B861}, + {0x10030, 0x0002BC27}, + {0x10030, 0x0002C021}, + {0x10030, 0x0002C41B}, + {0x10030, 0x000301EF}, + {0x10030, 0x000305AF}, + {0x10030, 0x000309A9}, + {0x10030, 0x00030DA3}, + {0x10030, 0x0003119D}, + {0x10030, 0x00031563}, + {0x10030, 0x0003195D}, + {0x10030, 0x00031D25}, + {0x10030, 0x0003211F}, + {0x10030, 0x000324E7}, + {0x10030, 0x000328E1}, + {0x10030, 0x00032CA7}, + {0x10030, 0x000330A1}, + {0x10030, 0x00033467}, + {0x10030, 0x00033861}, + {0x10030, 0x00033C27}, + {0x10030, 0x00034021}, + {0x10030, 0x0003441B}, + {0x10030, 0x000601EB}, + {0x10030, 0x000605AB}, + {0x10030, 0x000609A5}, + {0x10030, 0x00060D9F}, + {0x10030, 0x00061199}, + {0x10030, 0x00061593}, + {0x10030, 0x00061959}, + {0x10030, 0x00061D53}, + {0x10030, 0x0006211B}, + {0x10030, 0x00062515}, + {0x10030, 0x000628DD}, + {0x10030, 0x00062CD7}, + {0x10030, 0x0006309D}, + {0x10030, 0x00063497}, + {0x10030, 0x0006385D}, + {0x10030, 0x00063C57}, + {0x10030, 0x0006401D}, + {0x10030, 0x00064417}, + {0x10030, 0x000681E7}, + {0x10030, 0x000685A7}, + {0x10030, 0x000689A1}, + {0x10030, 0x00068D9B}, + {0x10030, 0x00069195}, + {0x10030, 0x0006955F}, + {0x10030, 0x00069959}, + {0x10030, 0x00069D21}, + {0x10030, 0x0006A11B}, + {0x10030, 0x0006A4E3}, + {0x10030, 0x0006A8DD}, + {0x10030, 0x0006ACA5}, + {0x10030, 0x0006B09F}, + {0x10030, 0x0006B465}, + {0x10030, 0x0006B85F}, + {0x10030, 0x0006BC25}, + {0x10030, 0x0006C01F}, + {0x10030, 0x0006C419}, + {0x10030, 0x000701E7}, + {0x10030, 0x000705A7}, + {0x10030, 0x000709A1}, + {0x10030, 0x00070D9B}, + {0x10030, 0x00071195}, + {0x10030, 0x0007155B}, + {0x10030, 0x00071955}, + {0x10030, 0x00071D1D}, + {0x10030, 0x00072117}, + {0x10030, 0x000724DF}, + {0x10030, 0x000728D9}, + {0x10030, 0x00072CA1}, + {0x10030, 0x0007309B}, + {0x10030, 0x00073461}, + {0x10030, 0x0007385B}, + {0x10030, 0x00073C21}, + {0x10030, 0x0007401B}, + {0x10030, 0x0007441B}, + {0x10030, 0x000781E9}, + {0x10030, 0x000785A9}, + {0x10030, 0x000789A3}, + {0x10030, 0x00078D9D}, + {0x10030, 0x00079197}, + {0x10030, 0x00079591}, + {0x10030, 0x00079957}, + {0x10030, 0x00079D51}, + {0x10030, 0x0007A119}, + {0x10030, 0x0007A513}, + {0x10030, 0x0007A8D9}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B099}, + {0x10030, 0x0007B493}, + {0x10030, 0x0007B859}, + {0x10030, 0x0007BC53}, + {0x10030, 0x0007C019}, + {0x10030, 0x0007C413}, + {0xB0000000, 0x00000000}, + {0x100EE, 0x00000000}, + {0x100EE, 0x00002000}, + {0x10030, 0x000000FC}, + {0x10030, 0x000004F9}, + {0x10030, 0x000008F6}, + {0x10030, 0x00000CF3}, + {0x10030, 0x000010F0}, + {0x10030, 0x000014ED}, + {0x10030, 0x000018AC}, + {0x10030, 0x00001CA9}, + {0x10030, 0x00002069}, + {0x10030, 0x00002466}, + {0x10030, 0x00002829}, + {0x10030, 0x00002C26}, + {0x10030, 0x00003023}, + {0x10030, 0x00003420}, + {0x10030, 0x0000381D}, + {0x10030, 0x00003C1A}, + {0x10030, 0x00004017}, + {0x100EE, 0x00000000}, + {0x100EE, 0x00002000}, + {0x10030, 0x000780F4}, + {0x10030, 0x000784F1}, + {0x10030, 0x000788EE}, + {0x10030, 0x00078CEB}, + {0x10030, 0x000790E8}, + {0x10030, 0x000794E5}, + {0x10030, 0x000798E2}, + {0x10030, 0x00079CDF}, + {0x10030, 0x0007A0DC}, + {0x10030, 0x0007A4D9}, + {0x10030, 0x0007A8D6}, + {0x10030, 0x0007ACD3}, + {0x10030, 0x0007B0D0}, + {0x10030, 0x0007B4CD}, + {0x10030, 0x0007B8CA}, + {0x10030, 0x0007BC07}, + {0x10030, 0x0007C004}, + {0x100EE, 0x00000000}, + {0x0EF, 0x00002000}, + {0x033, 0x00000008}, + {0x03F, 0x00000004}, + {0x033, 0x00000009}, + {0x03F, 0x00000003}, + {0x033, 0x0000000A}, + {0x03F, 0x00000003}, + {0x033, 0x0000000B}, + {0x03F, 0x00000002}, + {0x033, 0x0000000C}, + {0x03F, 0x00000002}, + {0x033, 0x0000000D}, + {0x03F, 0x00000002}, + {0x033, 0x0000000E}, + {0x03F, 0x00000002}, + {0x033, 0x0000000F}, + {0x03F, 0x00000002}, + {0x0EF, 0x00000000}, + {0x0EB, 0x00040000}, + {0x030, 0x000109B7}, + {0x0EB, 0x00000000}, + {0x0EF, 0x00008000}, + {0x033, 0x00000020}, + {0x03F, 0x00050002}, + {0x033, 0x00000021}, + {0x03F, 0x00060032}, + {0x033, 0x00000022}, + {0x03F, 0x00050042}, + {0x033, 0x00000023}, + {0x03F, 0x00040042}, + {0x033, 0x00000024}, + {0x03F, 0x00008001}, + {0x033, 0x00000025}, + {0x03F, 0x00008002}, + {0x033, 0x00000026}, + {0x03F, 0x00000003}, + {0x033, 0x00000027}, + {0x03F, 0x00000003}, + {0x033, 0x00000028}, + {0x03F, 0x00050002}, + {0x033, 0x00000029}, + {0x03F, 0x00060032}, + {0x033, 0x0000002A}, + {0x03F, 0x00050042}, + {0x033, 0x0000002B}, + {0x03F, 0x00040042}, + {0x033, 0x0000002C}, + {0x03F, 0x00008001}, + {0x033, 0x0000002D}, + {0x03F, 0x00008002}, + {0x033, 0x0000002E}, + {0x03F, 0x00000003}, + {0x033, 0x0000002F}, + {0x03F, 0x00000003}, + {0x033, 0x00000030}, + {0x03F, 0x00050002}, + {0x033, 0x00000031}, + {0x03F, 0x00060032}, + {0x033, 0x00000032}, + {0x03F, 0x00050042}, + {0x033, 0x00000033}, + {0x03F, 0x00040042}, + {0x033, 0x00000034}, + {0x03F, 0x00008001}, + {0x033, 0x00000035}, + {0x03F, 0x00008002}, + {0x033, 0x00000036}, + {0x03F, 0x00000003}, + {0x033, 0x00000037}, + {0x03F, 0x00000003}, + {0x033, 0x00000060}, + {0x03F, 0x00050002}, + {0x033, 0x00000061}, + {0x03F, 0x00060032}, + {0x033, 0x00000062}, + {0x03F, 0x00050042}, + {0x033, 0x00000063}, + {0x03F, 0x00040042}, + {0x033, 0x00000064}, + {0x03F, 0x00008001}, + {0x033, 0x00000065}, + {0x03F, 0x00008002}, + {0x033, 0x00000066}, + {0x03F, 0x00000003}, + {0x033, 0x00000067}, + {0x03F, 0x00000003}, + {0x033, 0x00000068}, + {0x03F, 0x00050002}, + {0x033, 0x00000069}, + {0x03F, 0x00060032}, + {0x033, 0x0000006A}, + {0x03F, 0x00050042}, + {0x033, 0x0000006B}, + {0x03F, 0x00040042}, + {0x033, 0x0000006C}, + {0x03F, 0x00008001}, + {0x033, 0x0000006D}, + {0x03F, 0x00008002}, + {0x033, 0x0000006E}, + {0x03F, 0x00000003}, + {0x033, 0x0000006F}, + {0x03F, 0x00000003}, + {0x033, 0x00000070}, + {0x03F, 0x00050002}, + {0x033, 0x00000071}, + {0x03F, 0x00060032}, + {0x033, 0x00000072}, + {0x03F, 0x00050042}, + {0x033, 0x00000073}, + {0x03F, 0x00040042}, + {0x033, 0x00000074}, + {0x03F, 0x00008001}, + {0x033, 0x00000075}, + {0x03F, 0x00008002}, + {0x033, 0x00000076}, + {0x03F, 0x00000003}, + {0x033, 0x00000077}, + {0x03F, 0x00000003}, + {0x033, 0x00000078}, + {0x03F, 0x00050002}, + {0x033, 0x00000079}, + {0x03F, 0x00060032}, + {0x033, 0x0000007A}, + {0x03F, 0x00050042}, + {0x033, 0x0000007B}, + {0x03F, 0x00040042}, + {0x033, 0x0000007C}, + {0x03F, 0x00008001}, + {0x033, 0x0000007D}, + {0x03F, 0x00008002}, + {0x033, 0x0000007E}, + {0x03F, 0x00000003}, + {0x033, 0x0000007F}, + {0x03F, 0x00000003}, + {0x033, 0x000000A0}, + {0x03F, 0x00050002}, + {0x033, 0x000000A1}, + {0x03F, 0x00060032}, + {0x033, 0x000000A2}, + {0x03F, 0x00050042}, + {0x033, 0x000000A3}, + {0x03F, 0x00040042}, + {0x033, 0x000000A4}, + {0x03F, 0x00008001}, + {0x033, 0x000000A5}, + {0x03F, 0x00008002}, + {0x033, 0x000000A6}, + {0x03F, 0x00000003}, + {0x033, 0x000000A7}, + {0x03F, 0x00000003}, + {0x033, 0x000000A8}, + {0x03F, 0x00050002}, + {0x033, 0x000000A9}, + {0x03F, 0x00060032}, + {0x033, 0x000000AA}, + {0x03F, 0x00050042}, + {0x033, 0x000000AB}, + {0x03F, 0x00040042}, + {0x033, 0x000000AC}, + {0x03F, 0x00008001}, + {0x033, 0x000000AD}, + {0x03F, 0x00008002}, + {0x033, 0x000000AE}, + {0x03F, 0x00000003}, + {0x033, 0x000000AF}, + {0x03F, 0x00000003}, + {0x033, 0x000000B0}, + {0x03F, 0x00050002}, + {0x033, 0x000000B1}, + {0x03F, 0x00060032}, + {0x033, 0x000000B2}, + {0x03F, 0x00050042}, + {0x033, 0x000000B3}, + {0x03F, 0x00040042}, + {0x033, 0x000000B4}, + {0x03F, 0x00008001}, + {0x033, 0x000000B5}, + {0x03F, 0x00008002}, + {0x033, 0x000000B6}, + {0x03F, 0x00000003}, + {0x033, 0x000000B7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E0}, + {0x03F, 0x00050002}, + {0x033, 0x000000E1}, + {0x03F, 0x00060032}, + {0x033, 0x000000E2}, + {0x03F, 0x00050042}, + {0x033, 0x000000E3}, + {0x03F, 0x00040042}, + {0x033, 0x000000E4}, + {0x03F, 0x00008001}, + {0x033, 0x000000E5}, + {0x03F, 0x00008002}, + {0x033, 0x000000E6}, + {0x03F, 0x00000003}, + {0x033, 0x000000E7}, + {0x03F, 0x00000003}, + {0x033, 0x000000E8}, + {0x03F, 0x00050002}, + {0x033, 0x000000E9}, + {0x03F, 0x00060032}, + {0x033, 0x000000EA}, + {0x03F, 0x00050042}, + {0x033, 0x000000EB}, + {0x03F, 0x00040042}, + {0x033, 0x000000EC}, + {0x03F, 0x00008001}, + {0x033, 0x000000ED}, + {0x03F, 0x00008002}, + {0x033, 0x000000EE}, + {0x03F, 0x00000003}, + {0x033, 0x000000EF}, + {0x03F, 0x00000003}, + {0x033, 0x000000F0}, + {0x03F, 0x00050002}, + {0x033, 0x000000F1}, + {0x03F, 0x00060032}, + {0x033, 0x000000F2}, + {0x03F, 0x00050042}, + {0x033, 0x000000F3}, + {0x03F, 0x00040042}, + {0x033, 0x000000F4}, + {0x03F, 0x00008001}, + {0x033, 0x000000F5}, + {0x03F, 0x00008002}, + {0x033, 0x000000F6}, + {0x03F, 0x00000003}, + {0x033, 0x000000F7}, + {0x03F, 0x00000003}, + {0x033, 0x000000F8}, + {0x03F, 0x00050002}, + {0x033, 0x000000F9}, + {0x03F, 0x00060032}, + {0x033, 0x000000FA}, + {0x03F, 0x00050042}, + {0x033, 0x000000FB}, + {0x03F, 0x00040042}, + {0x033, 0x000000FC}, + {0x03F, 0x00008001}, + {0x033, 0x000000FD}, + {0x03F, 0x00008002}, + {0x033, 0x000000FE}, + {0x03F, 0x00000003}, + {0x033, 0x000000FF}, + {0x03F, 0x00000003}, + {0x033, 0x00000120}, + {0x03F, 0x00050002}, + {0x033, 0x00000121}, + {0x03F, 0x00060032}, + {0x033, 0x00000122}, + {0x03F, 0x00050042}, + {0x033, 0x00000123}, + {0x03F, 0x00040042}, + {0x033, 0x00000124}, + {0x03F, 0x00008001}, + {0x033, 0x00000125}, + {0x03F, 0x00008002}, + {0x033, 0x00000126}, + {0x03F, 0x00000003}, + {0x033, 0x00000127}, + {0x03F, 0x00000003}, + {0x033, 0x00000128}, + {0x03F, 0x00050002}, + {0x033, 0x00000129}, + {0x03F, 0x00060032}, + {0x033, 0x0000012A}, + {0x03F, 0x00050042}, + {0x033, 0x0000012B}, + {0x03F, 0x00040042}, + {0x033, 0x0000012C}, + {0x03F, 0x00008001}, + {0x033, 0x0000012D}, + {0x03F, 0x00008002}, + {0x033, 0x0000012E}, + {0x03F, 0x00000003}, + {0x033, 0x0000012F}, + {0x03F, 0x00000003}, + {0x033, 0x00000130}, + {0x03F, 0x00050002}, + {0x033, 0x00000131}, + {0x03F, 0x00060032}, + {0x033, 0x00000132}, + {0x03F, 0x00050042}, + {0x033, 0x00000133}, + {0x03F, 0x00040042}, + {0x033, 0x00000134}, + {0x03F, 0x00008001}, + {0x033, 0x00000135}, + {0x03F, 0x00008002}, + {0x033, 0x00000136}, + {0x03F, 0x00000003}, + {0x033, 0x00000137}, + {0x03F, 0x00000003}, + {0x033, 0x00000160}, + {0x03F, 0x00050002}, + {0x033, 0x00000161}, + {0x03F, 0x00060032}, + {0x033, 0x00000162}, + {0x03F, 0x00050042}, + {0x033, 0x00000163}, + {0x03F, 0x00040042}, + {0x033, 0x00000164}, + {0x03F, 0x00008001}, + {0x033, 0x00000165}, + {0x03F, 0x00008002}, + {0x033, 0x00000166}, + {0x03F, 0x00000003}, + {0x033, 0x00000167}, + {0x03F, 0x00000003}, + {0x033, 0x00000168}, + {0x03F, 0x00050002}, + {0x033, 0x00000169}, + {0x03F, 0x00060032}, + {0x033, 0x0000016A}, + {0x03F, 0x00050042}, + {0x033, 0x0000016B}, + {0x03F, 0x00040042}, + {0x033, 0x0000016C}, + {0x03F, 0x00008001}, + {0x033, 0x0000016D}, + {0x03F, 0x00008002}, + {0x033, 0x0000016E}, + {0x03F, 0x00000003}, + {0x033, 0x0000016F}, + {0x03F, 0x00000003}, + {0x033, 0x00000170}, + {0x03F, 0x00050002}, + {0x033, 0x00000171}, + {0x03F, 0x00060032}, + {0x033, 0x00000172}, + {0x03F, 0x00050042}, + {0x033, 0x00000173}, + {0x03F, 0x00040042}, + {0x033, 0x00000174}, + {0x03F, 0x00008001}, + {0x033, 0x00000175}, + {0x03F, 0x00008002}, + {0x033, 0x00000176}, + {0x03F, 0x00000003}, + {0x033, 0x00000177}, + {0x03F, 0x00000003}, + {0x033, 0x00000178}, + {0x03F, 0x00050002}, + {0x033, 0x00000179}, + {0x03F, 0x00060032}, + {0x033, 0x0000017A}, + {0x03F, 0x00050042}, + {0x033, 0x0000017B}, + {0x03F, 0x00040042}, + {0x033, 0x0000017C}, + {0x03F, 0x00008001}, + {0x033, 0x0000017D}, + {0x03F, 0x00008002}, + {0x033, 0x0000017E}, + {0x03F, 0x00000003}, + {0x033, 0x0000017F}, + {0x03F, 0x00000003}, + {0x033, 0x000001A0}, + {0x03F, 0x00050002}, + {0x033, 0x000001A1}, + {0x03F, 0x00060032}, + {0x033, 0x000001A2}, + {0x03F, 0x00050042}, + {0x033, 0x000001A3}, + {0x03F, 0x00040042}, + {0x033, 0x000001A4}, + {0x03F, 0x00008001}, + {0x033, 0x000001A5}, + {0x03F, 0x00008002}, + {0x033, 0x000001A6}, + {0x03F, 0x00000003}, + {0x033, 0x000001A7}, + {0x03F, 0x00000003}, + {0x033, 0x000001A8}, + {0x03F, 0x00050002}, + {0x033, 0x000001A9}, + {0x03F, 0x00060032}, + {0x033, 0x000001AA}, + {0x03F, 0x00050042}, + {0x033, 0x000001AB}, + {0x03F, 0x00040042}, + {0x033, 0x000001AC}, + {0x03F, 0x00008001}, + {0x033, 0x000001AD}, + {0x03F, 0x00008002}, + {0x033, 0x000001AE}, + {0x03F, 0x00000003}, + {0x033, 0x000001AF}, + {0x03F, 0x00000003}, + {0x033, 0x000001B0}, + {0x03F, 0x00050002}, + {0x033, 0x000001B1}, + {0x03F, 0x00060032}, + {0x033, 0x000001B2}, + {0x03F, 0x00050042}, + {0x033, 0x000001B3}, + {0x03F, 0x00040042}, + {0x033, 0x000001B4}, + {0x03F, 0x00008001}, + {0x033, 0x000001B5}, + {0x03F, 0x00008002}, + {0x033, 0x000001B6}, + {0x03F, 0x00000003}, + {0x033, 0x000001B7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E0}, + {0x03F, 0x00050002}, + {0x033, 0x000001E1}, + {0x03F, 0x00060032}, + {0x033, 0x000001E2}, + {0x03F, 0x00050042}, + {0x033, 0x000001E3}, + {0x03F, 0x00040042}, + {0x033, 0x000001E4}, + {0x03F, 0x00008001}, + {0x033, 0x000001E5}, + {0x03F, 0x00008002}, + {0x033, 0x000001E6}, + {0x03F, 0x00000003}, + {0x033, 0x000001E7}, + {0x03F, 0x00000003}, + {0x033, 0x000001E8}, + {0x03F, 0x00050002}, + {0x033, 0x000001E9}, + {0x03F, 0x00060032}, + {0x033, 0x000001EA}, + {0x03F, 0x00050042}, + {0x033, 0x000001EB}, + {0x03F, 0x00040042}, + {0x033, 0x000001EC}, + {0x03F, 0x00008001}, + {0x033, 0x000001ED}, + {0x03F, 0x00008002}, + {0x033, 0x000001EE}, + {0x03F, 0x00000003}, + {0x033, 0x000001EF}, + {0x03F, 0x00000003}, + {0x033, 0x000001F0}, + {0x03F, 0x00050002}, + {0x033, 0x000001F1}, + {0x03F, 0x00060032}, + {0x033, 0x000001F2}, + {0x03F, 0x00050042}, + {0x033, 0x000001F3}, + {0x03F, 0x00040042}, + {0x033, 0x000001F4}, + {0x03F, 0x00008001}, + {0x033, 0x000001F5}, + {0x03F, 0x00008002}, + {0x033, 0x000001F6}, + {0x03F, 0x00000003}, + {0x033, 0x000001F7}, + {0x03F, 0x00000003}, + {0x033, 0x000001F8}, + {0x03F, 0x00050002}, + {0x033, 0x000001F9}, + {0x03F, 0x00060032}, + {0x033, 0x000001FA}, + {0x03F, 0x00050042}, + {0x033, 0x000001FB}, + {0x03F, 0x00040042}, + {0x033, 0x000001FC}, + {0x03F, 0x00008001}, + {0x033, 0x000001FD}, + {0x03F, 0x00008002}, + {0x033, 0x000001FE}, + {0x03F, 0x00000003}, + {0x033, 0x000001FF}, + {0x03F, 0x00000003}, + {0x0EF, 0x00000000}, + {0x005, 0x00000001}, + {0x10005, 0x00000001}, + {0x100EE, 0x00000400}, + {0x10030, 0x00000000}, + {0x10030, 0x00001000}, + {0x10030, 0x00002000}, + {0x10030, 0x00003000}, + {0x10030, 0x00004000}, + {0x10030, 0x00005000}, + {0x10030, 0x00006003}, + {0x10030, 0x00007003}, + {0x10030, 0x00008000}, + {0x10030, 0x00009000}, + {0x10030, 0x0000A000}, + {0x10030, 0x0000B000}, + {0x10030, 0x0000C000}, + {0x10030, 0x0000D000}, + {0x10030, 0x0000E003}, + {0x10030, 0x0000F003}, + {0x10030, 0x00010000}, + {0x10030, 0x00011000}, + {0x10030, 0x00012000}, + {0x10030, 0x00013000}, + {0x10030, 0x00014000}, + {0x10030, 0x00015000}, + {0x10030, 0x00016003}, + {0x10030, 0x00017003}, + {0x10030, 0x00018000}, + {0x10030, 0x00019000}, + {0x10030, 0x0001A000}, + {0x10030, 0x0001B000}, + {0x10030, 0x0001C000}, + {0x10030, 0x0001D000}, + {0x10030, 0x0001E003}, + {0x10030, 0x0001F003}, + {0x10030, 0x00020000}, + {0x10030, 0x00021000}, + {0x10030, 0x00022000}, + {0x10030, 0x00023000}, + {0x10030, 0x00024000}, + {0x10030, 0x00025000}, + {0x10030, 0x00026003}, + {0x10030, 0x00027003}, + {0x10030, 0x00028000}, + {0x10030, 0x00029000}, + {0x10030, 0x0002A000}, + {0x10030, 0x0002B000}, + {0x10030, 0x0002C000}, + {0x10030, 0x0002D000}, + {0x10030, 0x0002E003}, + {0x10030, 0x0002F003}, + {0x10030, 0x00030000}, + {0x10030, 0x00031000}, + {0x10030, 0x00032000}, + {0x10030, 0x00033000}, + {0x10030, 0x00034000}, + {0x10030, 0x00035000}, + {0x10030, 0x00036003}, + {0x10030, 0x00037003}, + {0x10030, 0x00038000}, + {0x10030, 0x00039000}, + {0x10030, 0x0003A000}, + {0x10030, 0x0003B000}, + {0x10030, 0x0003C000}, + {0x10030, 0x0003D000}, + {0x10030, 0x0003E003}, + {0x10030, 0x0003F003}, + {0x10030, 0x00060000}, + {0x10030, 0x00061000}, + {0x10030, 0x00062000}, + {0x10030, 0x00063000}, + {0x10030, 0x00064000}, + {0x10030, 0x00065000}, + {0x10030, 0x00066000}, + {0x10030, 0x00067003}, + {0x10030, 0x00068000}, + {0x10030, 0x00069000}, + {0x10030, 0x0006A000}, + {0x10030, 0x0006B000}, + {0x10030, 0x0006C000}, + {0x10030, 0x0006D000}, + {0x10030, 0x0006E000}, + {0x10030, 0x0006F003}, + {0x10030, 0x00070000}, + {0x10030, 0x00071000}, + {0x10030, 0x00072000}, + {0x10030, 0x00073000}, + {0x10030, 0x00074000}, + {0x10030, 0x00075000}, + {0x10030, 0x00076000}, + {0x10030, 0x00077003}, + {0x10030, 0x00078000}, + {0x10030, 0x00079000}, + {0x10030, 0x0007A000}, + {0x10030, 0x0007B000}, + {0x10030, 0x0007C000}, + {0x10030, 0x0007D000}, + {0x10030, 0x0007E000}, + {0x10030, 0x0007F003}, + {0x0ED, 0x00000010}, + {0x033, 0x00000001}, + {0x03F, 0x0000000A}, + {0x033, 0x00000002}, + {0x03F, 0x0000000A}, + {0x033, 0x00000003}, + {0x03F, 0x0000000A}, + {0x033, 0x00000005}, + {0x03F, 0x0000000A}, + {0x033, 0x00000006}, + {0x03F, 0x0000000A}, + {0x033, 0x00000007}, + {0x03F, 0x0000000A}, + {0x0ED, 0x00000000}, + {0x100EE, 0x00000000}, + {0x0FE, 0x00000031}, +}; + +static const struct rtw89_reg2_def rtw89_8852c_phy_nctl_regs[] = { + {0x8008, 0x00000000}, + {0x8000, 0x00000008}, + {0x8004, 0xf0862966}, + {0x800c, 0x78000000}, + {0x8010, 0x88015000}, + {0x8014, 0x80010100}, + {0x8018, 0x10010100}, + {0x801c, 0xa210bc00}, + {0x8020, 0x000403e0}, + {0x8024, 0x00072160}, + {0x8028, 0x00180e00}, + {0x8030, 0x400000c0}, + {0x8034, 0x11000830}, + {0x8038, 0x00000009}, + {0x803c, 0x00000008}, + {0x8040, 0x00000046}, + {0x8044, 0x0010001f}, + {0x8048, 0xf0000003}, + {0x804c, 0x62ac6162}, + {0x8050, 0xf2acf162}, + {0x8054, 0x62ac6162}, + {0x8058, 0xf2acf162}, + {0x805c, 0x150c0b02}, + {0x8060, 0x150c0b02}, + {0x8064, 0x2aa00047}, + {0x8074, 0x80000000}, + {0x807c, 0x000000ee}, + {0x8088, 0x80000000}, + {0x808c, 0x00000000}, + {0x80b0, 0x00000000}, + {0x80d0, 0x00000000}, + {0x80ec, 0x00000002}, + {0x8098, 0x0000ff00}, + {0x8070, 0x00e80000}, + {0x80b0, 0xffe00fff}, + {0x809c, 0x0000001f}, + {0x80b8, 0x00001000}, + {0x80bc, 0x0005001d}, + {0x810c, 0x33112211}, + {0x8110, 0x33112211}, + {0x8114, 0x00000000}, + {0x8120, 0x10010000}, + {0x8124, 0x00000000}, + {0x8128, 0x00000200}, + {0x812c, 0x0000c000}, + {0x8138, 0x40000000}, + {0x813c, 0x40000000}, + {0x8140, 0x00000000}, + {0x8144, 0x0b040b03}, + {0x8148, 0x0a040b04}, + {0x814c, 0x0a040b04}, + {0x8150, 0xe4e40000}, + {0x8158, 0xffffffff}, + {0x815c, 0xffffffff}, + {0x8160, 0xffffffff}, + {0x8164, 0xffffffff}, + {0x8168, 0xffffffff}, + {0x816c, 0x1fffffff}, + {0x81cc, 0x00000000}, + {0x81dc, 0x00000002}, + {0x81e0, 0x00000000}, + {0x81e4, 0x00000001}, + {0x81a0, 0x00000000}, + {0x81ac, 0x3fc20400}, + {0x81b0, 0x3f914100}, + {0x81bc, 0x0000005b}, + {0x81c0, 0x0000005b}, + {0x81b4, 0x01e0f078}, + {0x81b8, 0x01e0f078}, + {0x81f0, 0x0000f078}, + {0x820c, 0x33112211}, + {0x8210, 0x33112211}, + {0x8214, 0x00000000}, + {0x8220, 0x10010000}, + {0x8224, 0x00000000}, + {0x8228, 0x00000200}, + {0x822c, 0x0000d000}, + {0x8238, 0x40000000}, + {0x823c, 0x40000000}, + {0x8240, 0x00000000}, + {0x8244, 0x0b040b03}, + {0x8248, 0x0a040b04}, + {0x824c, 0x0a040b04}, + {0x8250, 0xe4e40000}, + {0x8258, 0xffffffff}, + {0x825c, 0xffffffff}, + {0x8260, 0xffffffff}, + {0x8264, 0xffffffff}, + {0x8268, 0xffffffff}, + {0x826c, 0x1fffffff}, + {0x82cc, 0x00000000}, + {0x82dc, 0x00000002}, + {0x82e0, 0x00100000}, + {0x82e4, 0x00000001}, + {0x82a0, 0x00000000}, + {0x82ac, 0x3fc20400}, + {0x82b0, 0x3f914100}, + {0x82bc, 0x0000005b}, + {0x82c0, 0x0000005b}, + {0x82b4, 0x01e0f078}, + {0x82b8, 0x01e0f078}, + {0x82f0, 0x0000f078}, + {0x81d8, 0x00000001}, + {0x82d8, 0x00000001}, + {0x9500, 0x00000000}, + {0x9504, 0x00000000}, + {0x9508, 0x00000000}, + {0x950c, 0x00000000}, + {0x9510, 0x00000000}, + {0x9514, 0x00000000}, + {0x9518, 0x00000000}, + {0x951c, 0x00000000}, + {0x9520, 0x00000000}, + {0x9524, 0x00000000}, + {0x9528, 0x00000000}, + {0x952c, 0x00000000}, + {0x9530, 0x00000000}, + {0x9534, 0x00000000}, + {0x9538, 0x00000000}, + {0x953c, 0x00000000}, + {0x9540, 0x04000000}, + {0x9544, 0x00000000}, + {0x9548, 0x00000000}, + {0x954c, 0x00000000}, + {0x9550, 0x00000000}, + {0x9554, 0x00000000}, + {0x9558, 0x00000000}, + {0x955c, 0x00000000}, + {0x9560, 0x00000000}, + {0x9564, 0x00000000}, + {0x9568, 0x00000000}, + {0x956c, 0x00000000}, + {0x9570, 0x00000000}, + {0x9574, 0x00000000}, + {0x9578, 0x00000000}, + {0x957c, 0x00000000}, + {0x9580, 0x00000000}, + {0x9584, 0x04000000}, + {0x9588, 0x00000000}, + {0x958c, 0x00000000}, + {0x9590, 0x00000000}, + {0x9594, 0x00000000}, + {0x9598, 0x00000000}, + {0x959c, 0x00000000}, + {0x95a0, 0x00000000}, + {0x95a4, 0x00000000}, + {0x95a8, 0x00000000}, + {0x95ac, 0x00000000}, + {0x95b0, 0x00000000}, + {0x95b4, 0x00000000}, + {0x95b8, 0x00000000}, + {0x95bc, 0x00000000}, + {0x95c0, 0x00000000}, + {0x95c4, 0x00000000}, + {0x95c8, 0x04000000}, + {0x95cc, 0x00000000}, + {0x95d0, 0x00000000}, + {0x95d4, 0x00000000}, + {0x95d8, 0x00000000}, + {0x95dc, 0x00000000}, + {0x95e0, 0x00000000}, + {0x95e4, 0x00000000}, + {0x95e8, 0x00000000}, + {0x95ec, 0x00000000}, + {0x95f0, 0x00000000}, + {0x95f4, 0x00000000}, + {0x95f8, 0x00000000}, + {0x95fc, 0x00000000}, + {0x9600, 0x00000000}, + {0x9604, 0x00000000}, + {0x9608, 0x00000000}, + {0x960c, 0x04000000}, + {0x9610, 0x00000000}, + {0x9614, 0x00000000}, + {0x9618, 0x00000000}, + {0x961c, 0x00000000}, + {0x9620, 0x00000000}, + {0x9624, 0x00000000}, + {0x9628, 0x00000000}, + {0x962c, 0x00000000}, + {0x9630, 0x00000000}, + {0x9634, 0x00000000}, + {0x9638, 0x00000000}, + {0x963c, 0x00000000}, + {0x9640, 0x00000000}, + {0x9644, 0x00000000}, + {0x9648, 0x00000000}, + {0x964c, 0x00000000}, + {0x9650, 0x04000000}, + {0x9654, 0x00000000}, + {0x9658, 0x00000000}, + {0x965c, 0x00000000}, + {0x9660, 0x00000000}, + {0x9664, 0x00000000}, + {0x9668, 0x00000000}, + {0x966c, 0x00000000}, + {0x9670, 0x00000000}, + {0x9674, 0x00000000}, + {0x9678, 0x00000000}, + {0x967c, 0x00000000}, + {0x9680, 0x00000000}, + {0x9684, 0x00000000}, + {0x9688, 0x00000000}, + {0x968c, 0x00000000}, + {0x9690, 0x00000000}, + {0x9694, 0x04000000}, + {0x9698, 0x00000000}, + {0x969c, 0x00000000}, + {0x96a0, 0x00000000}, + {0x96a4, 0x00000000}, + {0x96a8, 0x00000000}, + {0x96ac, 0x00000000}, + {0x96b0, 0x00000000}, + {0x96b4, 0x00000000}, + {0x96b8, 0x00000000}, + {0x96bc, 0x00000000}, + {0x96c0, 0x00000000}, + {0x96c4, 0x00000000}, + {0x96c8, 0x00000000}, + {0x96cc, 0x00000000}, + {0x96d0, 0x00000000}, + {0x96d4, 0x00000000}, + {0x96d8, 0x04000000}, + {0x96dc, 0x00000000}, + {0x96e0, 0x00000000}, + {0x96e4, 0x00000000}, + {0x96e8, 0x00000000}, + {0x96ec, 0x00000000}, + {0x96f0, 0x00000000}, + {0x96f4, 0x00000000}, + {0x96f8, 0x00000000}, + {0x96fc, 0x00000000}, + {0x9700, 0x00000000}, + {0x9704, 0x00000000}, + {0x9708, 0x00000000}, + {0x970c, 0x00000000}, + {0x9710, 0x00000000}, + {0x9714, 0x00000000}, + {0x9718, 0x00000000}, + {0x971c, 0x04000000}, + {0x9720, 0x00000000}, + {0x9724, 0x00000000}, + {0x9728, 0x00000000}, + {0x972c, 0x00000000}, + {0x9730, 0x00000000}, + {0x9734, 0x00000000}, + {0x9738, 0x00000000}, + {0x973c, 0x00000000}, + {0x9740, 0x00000000}, + {0x9744, 0x00000000}, + {0x9748, 0x00000000}, + {0x974c, 0x00000000}, + {0x9750, 0x00000000}, + {0x9754, 0x00000000}, + {0x9758, 0x00000000}, + {0x975c, 0x00000000}, + {0x9760, 0x04000000}, + {0x9764, 0x00000000}, + {0x9768, 0x00000000}, + {0x976c, 0x00000000}, + {0x9770, 0x00000000}, + {0x9774, 0x00000000}, + {0x9778, 0x00000000}, + {0x977c, 0x00000000}, + {0x9780, 0x00000000}, + {0x9784, 0x00000000}, + {0x9788, 0x00000000}, + {0x978c, 0x00000000}, + {0x9790, 0x00000000}, + {0x9794, 0x00000000}, + {0x9798, 0x00000000}, + {0x979c, 0x00000000}, + {0x97a0, 0x00000000}, + {0x97a4, 0x04000000}, + {0x97a8, 0x00000000}, + {0x97ac, 0x00000000}, + {0x97b0, 0x00000000}, + {0x97b4, 0x00000000}, + {0x97b8, 0x00000000}, + {0x97bc, 0x00000000}, + {0x97c0, 0x00000000}, + {0x97c4, 0x00000000}, + {0x97c8, 0x00000000}, + {0x97cc, 0x00000000}, + {0x97d0, 0x00000000}, + {0x97d4, 0x00000000}, + {0x97d8, 0x00000000}, + {0x97dc, 0x00000000}, + {0x97e0, 0x00000000}, + {0x97e4, 0x00000000}, + {0x97e8, 0x04000000}, + {0x97ec, 0x00000000}, + {0x97f0, 0x00000000}, + {0x97f4, 0x00000000}, + {0x97f8, 0x00000000}, + {0x97fc, 0x00000000}, + {0x9800, 0x00000000}, + {0x9804, 0x00000000}, + {0x9808, 0x00000000}, + {0x980c, 0x00000000}, + {0x9810, 0x00000000}, + {0x9814, 0x00000000}, + {0x9818, 0x00000000}, + {0x981c, 0x00000000}, + {0x9820, 0x00000000}, + {0x9824, 0x00000000}, + {0x9828, 0x00000000}, + {0x982c, 0x04000000}, + {0x9830, 0x00000000}, + {0x9834, 0x00000000}, + {0x9838, 0x00000000}, + {0x983c, 0x00000000}, + {0x9840, 0x00000000}, + {0x9844, 0x00000000}, + {0x9848, 0x00000000}, + {0x984c, 0x00000000}, + {0x9850, 0x00000000}, + {0x9854, 0x00000000}, + {0x9858, 0x00000000}, + {0x985c, 0x00000000}, + {0x9860, 0x00000000}, + {0x9864, 0x00000000}, + {0x9868, 0x00000000}, + {0x986c, 0x00000000}, + {0x9870, 0x04000000}, + {0x9874, 0x00000000}, + {0x9878, 0x00000000}, + {0x987c, 0x00000000}, + {0x9880, 0x00000000}, + {0x9884, 0x00000000}, + {0x9888, 0x00000000}, + {0x988c, 0x00000000}, + {0x9890, 0x00000000}, + {0x9894, 0x00000000}, + {0x9898, 0x00000000}, + {0x989c, 0x00000000}, + {0x98a0, 0x00000000}, + {0x98a4, 0x00000000}, + {0x98a8, 0x00000000}, + {0x98ac, 0x00000000}, + {0x98b0, 0x00000000}, + {0x98b4, 0x04000000}, + {0x98b8, 0x00000000}, + {0x98bc, 0x00000000}, + {0x98c0, 0x00000000}, + {0x98c4, 0x00000000}, + {0x98c8, 0x00000000}, + {0x98cc, 0x00000000}, + {0x98d0, 0x00000000}, + {0x98d4, 0x00000000}, + {0x98d8, 0x00000000}, + {0x98dc, 0x00000000}, + {0x98e0, 0x00000000}, + {0x98e4, 0x00000000}, + {0x98e8, 0x00000000}, + {0x98ec, 0x00000000}, + {0x98f0, 0x00000000}, + {0x98f4, 0x00000000}, + {0x98f8, 0x04000000}, + {0x98fc, 0x00000000}, + {0x9900, 0x00000000}, + {0x9904, 0x00000000}, + {0x9908, 0x00000000}, + {0x990c, 0x00000000}, + {0x9910, 0x00000000}, + {0x9914, 0x00000000}, + {0x9918, 0x00000000}, + {0x991c, 0x00000000}, + {0x9920, 0x00000000}, + {0x9924, 0x00000000}, + {0x9928, 0x00000000}, + {0x992c, 0x00000000}, + {0x9930, 0x00000000}, + {0x9934, 0x00000000}, + {0x9938, 0x00000000}, + {0x993c, 0x04000000}, + {0x9940, 0x00000000}, + {0x9944, 0x00000000}, + {0x9948, 0x00000000}, + {0x994c, 0x00000000}, + {0x9950, 0x00000000}, + {0x9954, 0x00000000}, + {0x9958, 0x00000000}, + {0x995c, 0x00000000}, + {0x9960, 0x00000000}, + {0x9964, 0x00000000}, + {0x9968, 0x00000000}, + {0x996c, 0x00000000}, + {0x9970, 0x00000000}, + {0x9974, 0x00000000}, + {0x9978, 0x00000000}, + {0x997c, 0x00000000}, + {0x9980, 0x04000000}, + {0x9984, 0x00000000}, + {0x9988, 0x00000000}, + {0x998c, 0x00000000}, + {0x9990, 0x00000000}, + {0x9994, 0x00000000}, + {0x9998, 0x00000000}, + {0x999c, 0x00000000}, + {0x99a0, 0x00000000}, + {0x99a4, 0x00000000}, + {0x99a8, 0x00000000}, + {0x99ac, 0x00000000}, + {0x99b0, 0x00000000}, + {0x99b4, 0x00000000}, + {0x99b8, 0x00000000}, + {0x99bc, 0x00000000}, + {0x99c0, 0x00000000}, + {0x99c4, 0x04000000}, + {0x99c8, 0x00000000}, + {0x99cc, 0x00000000}, + {0x99d0, 0x00000000}, + {0x99d4, 0x00000000}, + {0x99d8, 0x00000000}, + {0x99dc, 0x00000000}, + {0x99e0, 0x00000000}, + {0x99e4, 0x00000000}, + {0x99e8, 0x00000000}, + {0x99ec, 0x00000000}, + {0x99f0, 0x00000000}, + {0x99f4, 0x00000000}, + {0x99f8, 0x00000000}, + {0x99fc, 0x00000000}, + {0x9a00, 0x00000000}, + {0x9a04, 0x00000000}, + {0x9a08, 0x04000000}, + {0x9a0c, 0x00000000}, + {0x9a10, 0x00000000}, + {0x9a14, 0x00000000}, + {0x9a18, 0x00000000}, + {0x9a1c, 0x00000000}, + {0x9a20, 0x00000000}, + {0x9a24, 0x00000000}, + {0x9a28, 0x00000000}, + {0x9a2c, 0x00000000}, + {0x9a30, 0x00000000}, + {0x9a34, 0x00000000}, + {0x9a38, 0x00000000}, + {0x9a3c, 0x00000000}, + {0x9a40, 0x00000000}, + {0x9a44, 0x00000000}, + {0x9a48, 0x00000000}, + {0x9a4c, 0x04000000}, + {0x9a50, 0x00000000}, + {0x9a54, 0x00000000}, + {0x9a58, 0x00000000}, + {0x9a5c, 0x00000000}, + {0x9a60, 0x00000000}, + {0x9a64, 0x00000000}, + {0x9a68, 0x00000000}, + {0x9a6c, 0x00000000}, + {0x9a70, 0x00000000}, + {0x9a74, 0x00000000}, + {0x9a78, 0x00000000}, + {0x9a7c, 0x00000000}, + {0x9a80, 0x00000000}, + {0x9a84, 0x00000000}, + {0x9a88, 0x00000000}, + {0x9a8c, 0x00000000}, + {0x9a90, 0x04000000}, + {0x9a94, 0x00000000}, + {0x9a98, 0x00000000}, + {0x9a9c, 0x00000000}, + {0x9aa0, 0x00000000}, + {0x9aa4, 0x00000000}, + {0x9aa8, 0x00000000}, + {0x9aac, 0x00000000}, + {0x9ab0, 0x00000000}, + {0x9ab4, 0x00000000}, + {0x9ab8, 0x00000000}, + {0x9abc, 0x00000000}, + {0x9ac0, 0x00000000}, + {0x9ac4, 0x00000000}, + {0x9ac8, 0x00000000}, + {0x9acc, 0x00000000}, + {0x9ad0, 0x00000000}, + {0x9ad4, 0x04000000}, + {0x9ad8, 0x00000000}, + {0x9adc, 0x00000000}, + {0x9ae0, 0x00000000}, + {0x9ae4, 0x00000000}, + {0x9ae8, 0x00000000}, + {0x9aec, 0x00000000}, + {0x9af0, 0x00000000}, + {0x9af4, 0x00000000}, + {0x9af8, 0x00000000}, + {0x9afc, 0x00000000}, + {0x9b00, 0x00000000}, + {0x9b04, 0x00000000}, + {0x9b08, 0x00000000}, + {0x9b0c, 0x00000000}, + {0x9b10, 0x00000000}, + {0x9b14, 0x00000000}, + {0x9b18, 0x04000000}, + {0x9b1c, 0x00000000}, + {0x9b20, 0x00000000}, + {0x9b24, 0x00000000}, + {0x9b28, 0x00000000}, + {0x9b2c, 0x00000000}, + {0x9b30, 0x00000000}, + {0x9b34, 0x00000000}, + {0x9b38, 0x00000000}, + {0x9b3c, 0x00000000}, + {0x9b40, 0x00000000}, + {0x9b44, 0x00000000}, + {0x9b48, 0x00000000}, + {0x9b4c, 0x00000000}, + {0x9b50, 0x00000000}, + {0x9b54, 0x00000000}, + {0x9b58, 0x00000000}, + {0x9b5c, 0x04000000}, + {0x9d00, 0x00000000}, + {0x9d04, 0x00000000}, + {0x9d08, 0x00000000}, + {0x9d0c, 0x00000000}, + {0x9d10, 0x00000000}, + {0x9d14, 0x00000000}, + {0x9d18, 0x00000000}, + {0x9d1c, 0x00000000}, + {0x9d20, 0x00000000}, + {0x9d24, 0x00000000}, + {0x9d28, 0x00000000}, + {0x9d2c, 0x00000000}, + {0x9d30, 0x00000000}, + {0x9d34, 0x00000000}, + {0x9d38, 0x00000000}, + {0x9d3c, 0x00000000}, + {0x9d40, 0x04000000}, + {0x9d44, 0x00000000}, + {0x9d48, 0x00000000}, + {0x9d4c, 0x00000000}, + {0x9d50, 0x00000000}, + {0x9d54, 0x00000000}, + {0x9d58, 0x00000000}, + {0x9d5c, 0x00000000}, + {0x9d60, 0x00000000}, + {0x9d64, 0x00000000}, + {0x9d68, 0x00000000}, + {0x9d6c, 0x00000000}, + {0x9d70, 0x00000000}, + {0x9d74, 0x00000000}, + {0x9d78, 0x00000000}, + {0x9d7c, 0x00000000}, + {0x9d80, 0x00000000}, + {0x9d84, 0x04000000}, + {0x9d88, 0x00000000}, + {0x9d8c, 0x00000000}, + {0x9d90, 0x00000000}, + {0x9d94, 0x00000000}, + {0x9d98, 0x00000000}, + {0x9d9c, 0x00000000}, + {0x9da0, 0x00000000}, + {0x9da4, 0x00000000}, + {0x9da8, 0x00000000}, + {0x9dac, 0x00000000}, + {0x9db0, 0x00000000}, + {0x9db4, 0x00000000}, + {0x9db8, 0x00000000}, + {0x9dbc, 0x00000000}, + {0x9dc0, 0x00000000}, + {0x9dc4, 0x00000000}, + {0x9dc8, 0x04000000}, + {0x9dcc, 0x00000000}, + {0x9dd0, 0x00000000}, + {0x9dd4, 0x00000000}, + {0x9dd8, 0x00000000}, + {0x9ddc, 0x00000000}, + {0x9de0, 0x00000000}, + {0x9de4, 0x00000000}, + {0x9de8, 0x00000000}, + {0x9dec, 0x00000000}, + {0x9df0, 0x00000000}, + {0x9df4, 0x00000000}, + {0x9df8, 0x00000000}, + {0x9dfc, 0x00000000}, + {0x9e00, 0x00000000}, + {0x9e04, 0x00000000}, + {0x9e08, 0x00000000}, + {0x9e0c, 0x04000000}, + {0x9e10, 0x00000000}, + {0x9e14, 0x00000000}, + {0x9e18, 0x00000000}, + {0x9e1c, 0x00000000}, + {0x9e20, 0x00000000}, + {0x9e24, 0x00000000}, + {0x9e28, 0x00000000}, + {0x9e2c, 0x00000000}, + {0x9e30, 0x00000000}, + {0x9e34, 0x00000000}, + {0x9e38, 0x00000000}, + {0x9e3c, 0x00000000}, + {0x9e40, 0x00000000}, + {0x9e44, 0x00000000}, + {0x9e48, 0x00000000}, + {0x9e4c, 0x00000000}, + {0x9e50, 0x04000000}, + {0x9e54, 0x00000000}, + {0x9e58, 0x00000000}, + {0x9e5c, 0x00000000}, + {0x9e60, 0x00000000}, + {0x9e64, 0x00000000}, + {0x9e68, 0x00000000}, + {0x9e6c, 0x00000000}, + {0x9e70, 0x00000000}, + {0x9e74, 0x00000000}, + {0x9e78, 0x00000000}, + {0x9e7c, 0x00000000}, + {0x9e80, 0x00000000}, + {0x9e84, 0x00000000}, + {0x9e88, 0x00000000}, + {0x9e8c, 0x00000000}, + {0x9e90, 0x00000000}, + {0x9e94, 0x04000000}, + {0x9e98, 0x00000000}, + {0x9e9c, 0x00000000}, + {0x9ea0, 0x00000000}, + {0x9ea4, 0x00000000}, + {0x9ea8, 0x00000000}, + {0x9eac, 0x00000000}, + {0x9eb0, 0x00000000}, + {0x9eb4, 0x00000000}, + {0x9eb8, 0x00000000}, + {0x9ebc, 0x00000000}, + {0x9ec0, 0x00000000}, + {0x9ec4, 0x00000000}, + {0x9ec8, 0x00000000}, + {0x9ecc, 0x00000000}, + {0x9ed0, 0x00000000}, + {0x9ed4, 0x00000000}, + {0x9ed8, 0x04000000}, + {0x9edc, 0x00000000}, + {0x9ee0, 0x00000000}, + {0x9ee4, 0x00000000}, + {0x9ee8, 0x00000000}, + {0x9eec, 0x00000000}, + {0x9ef0, 0x00000000}, + {0x9ef4, 0x00000000}, + {0x9ef8, 0x00000000}, + {0x9efc, 0x00000000}, + {0x9f00, 0x00000000}, + {0x9f04, 0x00000000}, + {0x9f08, 0x00000000}, + {0x9f0c, 0x00000000}, + {0x9f10, 0x00000000}, + {0x9f14, 0x00000000}, + {0x9f18, 0x00000000}, + {0x9f1c, 0x04000000}, + {0x9f20, 0x00000000}, + {0x9f24, 0x00000000}, + {0x9f28, 0x00000000}, + {0x9f2c, 0x00000000}, + {0x9f30, 0x00000000}, + {0x9f34, 0x00000000}, + {0x9f38, 0x00000000}, + {0x9f3c, 0x00000000}, + {0x9f40, 0x00000000}, + {0x9f44, 0x00000000}, + {0x9f48, 0x00000000}, + {0x9f4c, 0x00000000}, + {0x9f50, 0x00000000}, + {0x9f54, 0x00000000}, + {0x9f58, 0x00000000}, + {0x9f5c, 0x00000000}, + {0x9f60, 0x04000000}, + {0x9f64, 0x00000000}, + {0x9f68, 0x00000000}, + {0x9f6c, 0x00000000}, + {0x9f70, 0x00000000}, + {0x9f74, 0x00000000}, + {0x9f78, 0x00000000}, + {0x9f7c, 0x00000000}, + {0x9f80, 0x00000000}, + {0x9f84, 0x00000000}, + {0x9f88, 0x00000000}, + {0x9f8c, 0x00000000}, + {0x9f90, 0x00000000}, + {0x9f94, 0x00000000}, + {0x9f98, 0x00000000}, + {0x9f9c, 0x00000000}, + {0x9fa0, 0x00000000}, + {0x9fa4, 0x04000000}, + {0x9fa8, 0x00000000}, + {0x9fac, 0x00000000}, + {0x9fb0, 0x00000000}, + {0x9fb4, 0x00000000}, + {0x9fb8, 0x00000000}, + {0x9fbc, 0x00000000}, + {0x9fc0, 0x00000000}, + {0x9fc4, 0x00000000}, + {0x9fc8, 0x00000000}, + {0x9fcc, 0x00000000}, + {0x9fd0, 0x00000000}, + {0x9fd4, 0x00000000}, + {0x9fd8, 0x00000000}, + {0x9fdc, 0x00000000}, + {0x9fe0, 0x00000000}, + {0x9fe4, 0x00000000}, + {0x9fe8, 0x04000000}, + {0x9fec, 0x00000000}, + {0x9ff0, 0x00000000}, + {0x9ff4, 0x00000000}, + {0x9ff8, 0x00000000}, + {0x9ffc, 0x00000000}, + {0xa000, 0x00000000}, + {0xa004, 0x00000000}, + {0xa008, 0x00000000}, + {0xa00c, 0x00000000}, + {0xa010, 0x00000000}, + {0xa014, 0x00000000}, + {0xa018, 0x00000000}, + {0xa01c, 0x00000000}, + {0xa020, 0x00000000}, + {0xa024, 0x00000000}, + {0xa028, 0x00000000}, + {0xa02c, 0x04000000}, + {0xa030, 0x00000000}, + {0xa034, 0x00000000}, + {0xa038, 0x00000000}, + {0xa03c, 0x00000000}, + {0xa040, 0x00000000}, + {0xa044, 0x00000000}, + {0xa048, 0x00000000}, + {0xa04c, 0x00000000}, + {0xa050, 0x00000000}, + {0xa054, 0x00000000}, + {0xa058, 0x00000000}, + {0xa05c, 0x00000000}, + {0xa060, 0x00000000}, + {0xa064, 0x00000000}, + {0xa068, 0x00000000}, + {0xa06c, 0x00000000}, + {0xa070, 0x04000000}, + {0xa074, 0x00000000}, + {0xa078, 0x00000000}, + {0xa07c, 0x00000000}, + {0xa080, 0x00000000}, + {0xa084, 0x00000000}, + {0xa088, 0x00000000}, + {0xa08c, 0x00000000}, + {0xa090, 0x00000000}, + {0xa094, 0x00000000}, + {0xa098, 0x00000000}, + {0xa09c, 0x00000000}, + {0xa0a0, 0x00000000}, + {0xa0a4, 0x00000000}, + {0xa0a8, 0x00000000}, + {0xa0ac, 0x00000000}, + {0xa0b0, 0x00000000}, + {0xa0b4, 0x04000000}, + {0xa0b8, 0x00000000}, + {0xa0bc, 0x00000000}, + {0xa0c0, 0x00000000}, + {0xa0c4, 0x00000000}, + {0xa0c8, 0x00000000}, + {0xa0cc, 0x00000000}, + {0xa0d0, 0x00000000}, + {0xa0d4, 0x00000000}, + {0xa0d8, 0x00000000}, + {0xa0dc, 0x00000000}, + {0xa0e0, 0x00000000}, + {0xa0e4, 0x00000000}, + {0xa0e8, 0x00000000}, + {0xa0ec, 0x00000000}, + {0xa0f0, 0x00000000}, + {0xa0f4, 0x00000000}, + {0xa0f8, 0x04000000}, + {0xa0fc, 0x00000000}, + {0xa100, 0x00000000}, + {0xa104, 0x00000000}, + {0xa108, 0x00000000}, + {0xa10c, 0x00000000}, + {0xa110, 0x00000000}, + {0xa114, 0x00000000}, + {0xa118, 0x00000000}, + {0xa11c, 0x00000000}, + {0xa120, 0x00000000}, + {0xa124, 0x00000000}, + {0xa128, 0x00000000}, + {0xa12c, 0x00000000}, + {0xa130, 0x00000000}, + {0xa134, 0x00000000}, + {0xa138, 0x00000000}, + {0xa13c, 0x04000000}, + {0xa140, 0x00000000}, + {0xa144, 0x00000000}, + {0xa148, 0x00000000}, + {0xa14c, 0x00000000}, + {0xa150, 0x00000000}, + {0xa154, 0x00000000}, + {0xa158, 0x00000000}, + {0xa15c, 0x00000000}, + {0xa160, 0x00000000}, + {0xa164, 0x00000000}, + {0xa168, 0x00000000}, + {0xa16c, 0x00000000}, + {0xa170, 0x00000000}, + {0xa174, 0x00000000}, + {0xa178, 0x00000000}, + {0xa17c, 0x00000000}, + {0xa180, 0x04000000}, + {0xa184, 0x00000000}, + {0xa188, 0x00000000}, + {0xa18c, 0x00000000}, + {0xa190, 0x00000000}, + {0xa194, 0x00000000}, + {0xa198, 0x00000000}, + {0xa19c, 0x00000000}, + {0xa1a0, 0x00000000}, + {0xa1a4, 0x00000000}, + {0xa1a8, 0x00000000}, + {0xa1ac, 0x00000000}, + {0xa1b0, 0x00000000}, + {0xa1b4, 0x00000000}, + {0xa1b8, 0x00000000}, + {0xa1bc, 0x00000000}, + {0xa1c0, 0x00000000}, + {0xa1c4, 0x04000000}, + {0xa1c8, 0x00000000}, + {0xa1cc, 0x00000000}, + {0xa1d0, 0x00000000}, + {0xa1d4, 0x00000000}, + {0xa1d8, 0x00000000}, + {0xa1dc, 0x00000000}, + {0xa1e0, 0x00000000}, + {0xa1e4, 0x00000000}, + {0xa1e8, 0x00000000}, + {0xa1ec, 0x00000000}, + {0xa1f0, 0x00000000}, + {0xa1f4, 0x00000000}, + {0xa1f8, 0x00000000}, + {0xa1fc, 0x00000000}, + {0xa200, 0x00000000}, + {0xa204, 0x00000000}, + {0xa208, 0x04000000}, + {0xa20c, 0x00000000}, + {0xa210, 0x00000000}, + {0xa214, 0x00000000}, + {0xa218, 0x00000000}, + {0xa21c, 0x00000000}, + {0xa220, 0x00000000}, + {0xa224, 0x00000000}, + {0xa228, 0x00000000}, + {0xa22c, 0x00000000}, + {0xa230, 0x00000000}, + {0xa234, 0x00000000}, + {0xa238, 0x00000000}, + {0xa23c, 0x00000000}, + {0xa240, 0x00000000}, + {0xa244, 0x00000000}, + {0xa248, 0x00000000}, + {0xa24c, 0x04000000}, + {0xa250, 0x00000000}, + {0xa254, 0x00000000}, + {0xa258, 0x00000000}, + {0xa25c, 0x00000000}, + {0xa260, 0x00000000}, + {0xa264, 0x00000000}, + {0xa268, 0x00000000}, + {0xa26c, 0x00000000}, + {0xa270, 0x00000000}, + {0xa274, 0x00000000}, + {0xa278, 0x00000000}, + {0xa27c, 0x00000000}, + {0xa280, 0x00000000}, + {0xa284, 0x00000000}, + {0xa288, 0x00000000}, + {0xa28c, 0x00000000}, + {0xa290, 0x04000000}, + {0xa294, 0x00000000}, + {0xa298, 0x00000000}, + {0xa29c, 0x00000000}, + {0xa2a0, 0x00000000}, + {0xa2a4, 0x00000000}, + {0xa2a8, 0x00000000}, + {0xa2ac, 0x00000000}, + {0xa2b0, 0x00000000}, + {0xa2b4, 0x00000000}, + {0xa2b8, 0x00000000}, + {0xa2bc, 0x00000000}, + {0xa2c0, 0x00000000}, + {0xa2c4, 0x00000000}, + {0xa2c8, 0x00000000}, + {0xa2cc, 0x00000000}, + {0xa2d0, 0x00000000}, + {0xa2d4, 0x04000000}, + {0xa2d8, 0x00000000}, + {0xa2dc, 0x00000000}, + {0xa2e0, 0x00000000}, + {0xa2e4, 0x00000000}, + {0xa2e8, 0x00000000}, + {0xa2ec, 0x00000000}, + {0xa2f0, 0x00000000}, + {0xa2f4, 0x00000000}, + {0xa2f8, 0x00000000}, + {0xa2fc, 0x00000000}, + {0xa300, 0x00000000}, + {0xa304, 0x00000000}, + {0xa308, 0x00000000}, + {0xa30c, 0x00000000}, + {0xa310, 0x00000000}, + {0xa314, 0x00000000}, + {0xa318, 0x04000000}, + {0xa31c, 0x00000000}, + {0xa320, 0x00000000}, + {0xa324, 0x00000000}, + {0xa328, 0x00000000}, + {0xa32c, 0x00000000}, + {0xa330, 0x00000000}, + {0xa334, 0x00000000}, + {0xa338, 0x00000000}, + {0xa33c, 0x00000000}, + {0xa340, 0x00000000}, + {0xa344, 0x00000000}, + {0xa348, 0x00000000}, + {0xa34c, 0x00000000}, + {0xa350, 0x00000000}, + {0xa354, 0x00000000}, + {0xa358, 0x00000000}, + {0xa35c, 0x04000000}, + {0x81d8, 0x00000000}, + {0x82d8, 0x00000000}, + {0xb104, 0x2b251f19}, + {0xb108, 0x433d3731}, + {0xb10c, 0x5b554f49}, + {0xb110, 0x736d6761}, + {0xb114, 0x7f7f7f79}, + {0xb118, 0x120f7f7f}, + {0xb11c, 0x1e1b1815}, + {0xb120, 0x2a272421}, + {0xb124, 0x3633302d}, + {0xb128, 0x3f3f3c39}, + {0xb12c, 0x3f3f3f3f}, + {0x8088, 0x00000110}, + {0x8000, 0x00000008}, + {0x8080, 0x00000005}, + {0x8500, 0x80000008}, + {0x8504, 0x43000004}, + {0x8508, 0x4b044a00}, + {0x850c, 0x40098604}, + {0x8510, 0x0004e024}, + {0x8514, 0x87044b05}, + {0x8518, 0xe024400b}, + {0x851c, 0x4b000004}, + {0x8520, 0x21e07410}, + {0x8524, 0x16580000}, + {0x8528, 0x00047430}, + {0x852c, 0x00074380}, + {0x8530, 0x00044c00}, + {0x8534, 0x00074300}, + {0x8538, 0x00045603}, + {0x853c, 0x42fe5700}, + {0x8540, 0x42004000}, + {0x8544, 0x30005055}, + {0x8548, 0xa512b41c}, + {0x854c, 0xf02fe66f}, + {0x8550, 0xf22ff12f}, + {0x8554, 0xf42ff32f}, + {0x8558, 0xf62ff52f}, + {0x855c, 0xf82ff72f}, + {0x8560, 0xfa2ff92f}, + {0x8564, 0xfc2ffb2f}, + {0x8568, 0xfe2ffd2f}, + {0x856c, 0xe66fff2f}, + {0x8570, 0xf12ef02e}, + {0x8574, 0xf32ef22e}, + {0x8578, 0xf52ef42e}, + {0x857c, 0xff2ef62e}, + {0x8580, 0xa511000b}, + {0x8584, 0xf12cf02c}, + {0x8588, 0xf32cf22c}, + {0x858c, 0xf52cf42c}, + {0x8590, 0xf72cf62c}, + {0x8594, 0xf92cf82c}, + {0x8598, 0xfb2cfa2c}, + {0x859c, 0xfd2cfc2c}, + {0x85a0, 0xff2cfe2c}, + {0x85a4, 0xf12cf02c}, + {0x85a8, 0x0001f22c}, + {0x85ac, 0x30b330b3}, + {0x85b0, 0x310c3125}, + {0x85b4, 0x31253161}, + {0x85b8, 0x3081316f}, + {0x85bc, 0x317f3172}, + {0x85c0, 0x3192318c}, + {0x85c4, 0x32b832a6}, + {0x85c8, 0x31fd32c2}, + {0x85cc, 0x330732cc}, + {0x85d0, 0x33193343}, + {0x85d4, 0x331d3312}, + {0x85d8, 0x31663316}, + {0x85dc, 0x3365335b}, + {0x85e0, 0x3379336f}, + {0x85e4, 0x338d3383}, + {0x85e8, 0x33a13397}, + {0x85ec, 0x33b833ab}, + {0x85f0, 0x33d733c9}, + {0x85f4, 0x342333db}, + {0x85f8, 0x343c343b}, + {0x85fc, 0x3471346f}, + {0x8600, 0xe493347c}, + {0x8604, 0x20887410}, + {0x8608, 0x140f0200}, + {0x860c, 0x02002098}, + {0x8610, 0x20a8140f}, + {0x8614, 0x140f0200}, + {0x8618, 0xe4df7430}, + {0x861c, 0x74105b10}, + {0x8620, 0x000120a0}, + {0x8624, 0x140f140f}, + {0x8628, 0x56e15507}, + {0x862c, 0xe4c95c06}, + {0x8630, 0x20a87410}, + {0x8634, 0x140f0201}, + {0x8638, 0xe4c95517}, + {0x863c, 0x20a87410}, + {0x8640, 0x140f0200}, + {0x8644, 0x56c15517}, + {0x8648, 0xe4c95c02}, + {0x864c, 0x20a07410}, + {0x8650, 0x140f0000}, + {0x8654, 0x55071407}, + {0x8658, 0xe47ee4c9}, + {0x865c, 0x4686750a}, + {0x8660, 0xe159e4d3}, + {0x8664, 0xe4930001}, + {0x8668, 0x20a87410}, + {0x866c, 0x140f0200}, + {0x8670, 0x02002098}, + {0x8674, 0x2088140f}, + {0x8678, 0x140f0200}, + {0x867c, 0xe4df7430}, + {0x8680, 0x74105b10}, + {0x8684, 0x020120a8}, + {0x8688, 0x2080140f}, + {0x868c, 0x140f0000}, + {0x8690, 0x56615507}, + {0x8694, 0xe4c95c06}, + {0x8698, 0x20887410}, + {0x869c, 0x140f0200}, + {0x86a0, 0xe4c95517}, + {0x86a4, 0x20a87410}, + {0x86a8, 0x140f0200}, + {0x86ac, 0x56415517}, + {0x86b0, 0xe4c95c02}, + {0x86b4, 0x20807410}, + {0x86b8, 0x140f0000}, + {0x86bc, 0x55071407}, + {0x86c0, 0xe47ee4c9}, + {0x86c4, 0x468e7508}, + {0x86c8, 0xe159e4d3}, + {0x86cc, 0x5b10f025}, + {0x86d0, 0x20a87410}, + {0x86d4, 0x140f0201}, + {0x86d8, 0x00002090}, + {0x86dc, 0x5507140f}, + {0x86e0, 0x5c065661}, + {0x86e4, 0x7410e4c9}, + {0x86e8, 0x02002098}, + {0x86ec, 0x5517140f}, + {0x86f0, 0x7410e4c9}, + {0x86f4, 0x020020a8}, + {0x86f8, 0x5517140f}, + {0x86fc, 0x5c025641}, + {0x8700, 0x7410e4c9}, + {0x8704, 0x00002090}, + {0x8708, 0x5507140f}, + {0x870c, 0x7509e4c9}, + {0x8710, 0xe4d34696}, + {0x8714, 0x0001e159}, + {0x8718, 0x74105b10}, + {0x871c, 0x000020a0}, + {0x8720, 0x5507140f}, + {0x8724, 0xe4c95601}, + {0x8728, 0x20a87410}, + {0x872c, 0x140f0200}, + {0x8730, 0xe4c95517}, + {0x8734, 0x750ae47e}, + {0x8738, 0xe4d34686}, + {0x873c, 0x5500e159}, + {0x8740, 0x5501e4c5}, + {0x8744, 0xe4930001}, + {0x8748, 0x5b10e4df}, + {0x874c, 0x20807410}, + {0x8750, 0x140f0000}, + {0x8754, 0x02002098}, + {0x8758, 0xf205140f}, + {0x875c, 0x20a8f504}, + {0x8760, 0x140f0200}, + {0x8764, 0x56015507}, + {0x8768, 0x7410e4c9}, + {0x876c, 0x02002088}, + {0x8770, 0x5517140f}, + {0x8774, 0xe47ee4c9}, + {0x8778, 0x468e7508}, + {0x877c, 0xe159e4d3}, + {0x8780, 0x7410f512}, + {0x8784, 0x00002090}, + {0x8788, 0x5507140f}, + {0x878c, 0x7410e4c9}, + {0x8790, 0x02002098}, + {0x8794, 0x5517140f}, + {0x8798, 0x7509e4c9}, + {0x879c, 0xe4d34696}, + {0x87a0, 0x0001e159}, + {0x87a4, 0x46965b90}, + {0x87a8, 0xe4c55500}, + {0x87ac, 0x5b105501}, + {0x87b0, 0x79000001}, + {0x87b4, 0x57107420}, + {0x87b8, 0x140f5700}, + {0x87bc, 0x74309700}, + {0x87c0, 0xe4930001}, + {0x87c4, 0x0bbde4df}, + {0x87c8, 0x0001e662}, + {0x87cc, 0x5720e493}, + {0x87d0, 0x540054fd}, + {0x87d4, 0x70005700}, + {0x87d8, 0x70c0e4dd}, + {0x87dc, 0xe4a90001}, + {0x87e0, 0x0001e512}, + {0x87e4, 0x31abe493}, + {0x87e8, 0xe6620023}, + {0x87ec, 0x54ed0002}, + {0x87f0, 0x00230baa}, + {0x87f4, 0x0002e662}, + {0x87f8, 0xe486e52e}, + {0x87fc, 0xe4930001}, + {0x8800, 0x002231a1}, + {0x8804, 0x0002e662}, + {0x8808, 0x0baa54ec}, + {0x880c, 0xe6620022}, + {0x8810, 0xe52e0002}, + {0x8814, 0x0001e486}, + {0x8818, 0x0baae493}, + {0x881c, 0xe52e3194}, + {0x8820, 0x0001e486}, + {0x8824, 0x0babe493}, + {0x8828, 0x6d0f6c67}, + {0x882c, 0xe662e4df}, + {0x8830, 0x6c8bfb04}, + {0x8834, 0xe662e4df}, + {0x8838, 0x6c95fa04}, + {0x883c, 0xe662e4df}, + {0x8840, 0x0bacfb06}, + {0x8844, 0x6d0f6cb3}, + {0x8848, 0xe662e4df}, + {0x884c, 0xf904fa05}, + {0x8850, 0xe4df6ccb}, + {0x8854, 0xfb06e662}, + {0x8858, 0x6cdb0bad}, + {0x885c, 0xe4df6d0f}, + {0x8860, 0x6cf5e662}, + {0x8864, 0xe4df6d0f}, + {0x8868, 0x6c0be662}, + {0x886c, 0xe4df6d00}, + {0x8870, 0xfb04e662}, + {0x8874, 0xe4df6c25}, + {0x8878, 0xf8b7e662}, + {0x887c, 0xf904fa05}, + {0x8880, 0xe4df6c35}, + {0x8884, 0xfb04e662}, + {0x8888, 0xe4df6c4d}, + {0x888c, 0xf9bae662}, + {0x8890, 0x6c6bfa04}, + {0x8894, 0xe662e4df}, + {0x8898, 0x6c75fb04}, + {0x889c, 0xe662e4df}, + {0x88a0, 0xe4df6c99}, + {0x88a4, 0xfabce662}, + {0x88a8, 0x57200ba8}, + {0x88ac, 0x540054f0}, + {0x88b0, 0x7c355700}, + {0x88b4, 0x70007d00}, + {0x88b8, 0x6d0e6cc5}, + {0x88bc, 0xe662e4dd}, + {0x88c0, 0xe4dd6cf5}, + {0x88c4, 0x6c29e662}, + {0x88c8, 0xe4dd6d0f}, + {0x88cc, 0x0bb3e662}, + {0x88d0, 0x54ed5720}, + {0x88d4, 0x57005400}, + {0x88d8, 0x7d0f7ccb}, + {0x88dc, 0x6d006cd7}, + {0x88e0, 0xe662e4dd}, + {0x88e4, 0x6d016c0b}, + {0x88e8, 0xe662e4dd}, + {0x88ec, 0xe4dd6c3b}, + {0x88f0, 0x70c0e662}, + {0x88f4, 0xe486e52e}, + {0x88f8, 0xe4a90001}, + {0x88fc, 0x63424380}, + {0x8900, 0x43006887}, + {0x8904, 0x74100ba6}, + {0x8908, 0x000121e8}, + {0x890c, 0x6ec71658}, + {0x8910, 0xe5126f0e}, + {0x8914, 0x7410e667}, + {0x8918, 0x000321e8}, + {0x891c, 0x6eeb1658}, + {0x8920, 0xe667e512}, + {0x8924, 0x21e87410}, + {0x8928, 0x16580005}, + {0x892c, 0x6f0f6e13}, + {0x8930, 0xe667e512}, + {0x8934, 0x21e87410}, + {0x8938, 0x16580007}, + {0x893c, 0xe5126e3b}, + {0x8940, 0x7410e667}, + {0x8944, 0x000921e8}, + {0x8948, 0x6e671658}, + {0x894c, 0xe5126f0f}, + {0x8950, 0x7410e667}, + {0x8954, 0x000b21e8}, + {0x8958, 0x6e8b1658}, + {0x895c, 0xe667e512}, + {0x8960, 0x21e87410}, + {0x8964, 0x1658000d}, + {0x8968, 0x6f0f6eb3}, + {0x896c, 0xe667e512}, + {0x8970, 0xfe08ff09}, + {0x8974, 0x21e87410}, + {0x8978, 0x1658000e}, + {0x897c, 0xe5126ec7}, + {0x8980, 0x7410e667}, + {0x8984, 0x000f21e8}, + {0x8988, 0x6edb1658}, + {0x898c, 0xe5126f0f}, + {0x8990, 0x7410e667}, + {0x8994, 0x001021e8}, + {0x8998, 0x6eef1658}, + {0x899c, 0xe667e512}, + {0x89a0, 0xfe02ff03}, + {0x89a4, 0x7410e667}, + {0x89a8, 0x001321e8}, + {0x89ac, 0x6e111658}, + {0x89b0, 0xe5126f00}, + {0x89b4, 0xff03e667}, + {0x89b8, 0xe667fe02}, + {0x89bc, 0x21e87410}, + {0x89c0, 0x16580014}, + {0x89c4, 0xe5126e25}, + {0x89c8, 0xfc48e667}, + {0x89cc, 0xfe08ff09}, + {0x89d0, 0x21e87410}, + {0x89d4, 0x16580015}, + {0x89d8, 0xe5126e39}, + {0x89dc, 0x7410e667}, + {0x89e0, 0x001621e8}, + {0x89e4, 0x6e4d1658}, + {0x89e8, 0xe667e512}, + {0x89ec, 0x7410fd49}, + {0x89f0, 0x001821e8}, + {0x89f4, 0x6e751658}, + {0x89f8, 0xe667e512}, + {0x89fc, 0x21e87410}, + {0x8a00, 0x1658001a}, + {0x8a04, 0xe5126e99}, + {0x8a08, 0xfe44e667}, + {0x8a0c, 0x21e87410}, + {0x8a10, 0x1658001c}, + {0x8a14, 0xe5126ec5}, + {0x8a18, 0x7410e667}, + {0x8a1c, 0x001e21e8}, + {0x8a20, 0x6eed1658}, + {0x8a24, 0xe667e512}, + {0x8a28, 0x21e87410}, + {0x8a2c, 0x16580020}, + {0x8a30, 0x6f016e15}, + {0x8a34, 0xe667e512}, + {0x8a38, 0x21e87410}, + {0x8a3c, 0x16580022}, + {0x8a40, 0xe5126e39}, + {0x8a44, 0xe52ee667}, + {0x8a48, 0x0001e49c}, + {0x8a4c, 0x4380e4a9}, + {0x8a50, 0x68806340}, + {0x8a54, 0x0bac4300}, + {0x8a58, 0x00223241}, + {0x8a5c, 0x0002e667}, + {0x8a60, 0x0baa54ec}, + {0x8a64, 0xe6670022}, + {0x8a68, 0xe52e0002}, + {0x8a6c, 0x0001e49c}, + {0x8a70, 0x4380e4a9}, + {0x8a74, 0x68816340}, + {0x8a78, 0x0baa4300}, + {0x8a7c, 0xe52e3230}, + {0x8a80, 0x0001e49c}, + {0x8a84, 0x4380e4a9}, + {0x8a88, 0x68826341}, + {0x8a8c, 0x0baa4300}, + {0x8a90, 0xe52e3221}, + {0x8a94, 0x0001e49c}, + {0x8a98, 0x42fc0004}, + {0x8a9c, 0x60010007}, + {0x8aa0, 0x42000004}, + {0x8aa4, 0x62200007}, + {0x8aa8, 0x00046200}, + {0x8aac, 0x5b405501}, + {0x8ab0, 0x00076605}, + {0x8ab4, 0x63006200}, + {0x8ab8, 0x0004e54f}, + {0x8abc, 0x0a010900}, + {0x8ac0, 0x0d000b40}, + {0x8ac4, 0x00320e01}, + {0x8ac8, 0x95090004}, + {0x8acc, 0x790442fb}, + {0x8ad0, 0x43804200}, + {0x8ad4, 0x4d010007}, + {0x8ad8, 0x43000004}, + {0x8adc, 0x05620007}, + {0x8ae0, 0x961d05a3}, + {0x8ae4, 0x0004e54f}, + {0x8ae8, 0x0007e4c5}, + {0x8aec, 0x07a306a2}, + {0x8af0, 0x0004e54f}, + {0x8af4, 0xe53fe4c5}, + {0x8af8, 0xe5470002}, + {0x8afc, 0x00074380}, + {0x8b00, 0x00044d00}, + {0x8b04, 0x42fe4300}, + {0x8b08, 0x42007900}, + {0x8b0c, 0x00040001}, + {0x8b10, 0x000742fc}, + {0x8b14, 0x00046003}, + {0x8b18, 0x32d24200}, + {0x8b1c, 0x06a20007}, + {0x8b20, 0x32fc07a3}, + {0x8b24, 0xe32ee320}, + {0x8b28, 0x0001e333}, + {0x8b2c, 0xe333e320}, + {0x8b30, 0xe3270001}, + {0x8b34, 0xe333e32e}, + {0x8b38, 0xe3270001}, + {0x8b3c, 0x0001e333}, + {0x8b40, 0x42fc0004}, + {0x8b44, 0x60030007}, + {0x8b48, 0x42000004}, + {0x8b4c, 0x00040001}, + {0x8b50, 0x000742fc}, + {0x8b54, 0x00046001}, + {0x8b58, 0x00014200}, + {0x8b5c, 0x62200007}, + {0x8b60, 0xe5476200}, + {0x8b64, 0x00070001}, + {0x8b68, 0x00046300}, + {0x8b6c, 0x0a000900}, + {0x8b70, 0x00320e01}, + {0x8b74, 0x06a20007}, + {0x8b78, 0xe559e54f}, + {0x8b7c, 0x42fe0002}, + {0x8b80, 0x42007900}, + {0x8b84, 0x00050001}, + {0x8b88, 0x00077700}, + {0x8b8c, 0x00045200}, + {0x8b90, 0x000742fe}, + {0x8b94, 0x00046000}, + {0x8b98, 0x43804200}, + {0x8b9c, 0x61006000}, + {0x8ba0, 0x63106201}, + {0x8ba4, 0x00056804}, + {0x8ba8, 0x55004100}, + {0x8bac, 0x5c020007}, + {0x8bb0, 0x43000004}, + {0x8bb4, 0x00050001}, + {0x8bb8, 0xe3c96c06}, + {0x8bbc, 0xe3b8e3db}, + {0x8bc0, 0xe423e567}, + {0x8bc4, 0xe43ce56f}, + {0x8bc8, 0xe3b80001}, + {0x8bcc, 0x6c060005}, + {0x8bd0, 0xe5e6e3c9}, + {0x8bd4, 0xe423e567}, + {0x8bd8, 0xe43ce56f}, + {0x8bdc, 0x00050001}, + {0x8be0, 0xe3c96c00}, + {0x8be4, 0xe3b8e3db}, + {0x8be8, 0xe423e582}, + {0x8bec, 0xe43ce58a}, + {0x8bf0, 0xe3b80001}, + {0x8bf4, 0x6c000005}, + {0x8bf8, 0xe5e6e3c9}, + {0x8bfc, 0xe423e582}, + {0x8c00, 0xe43ce58a}, + {0x8c04, 0x00050001}, + {0x8c08, 0xe3c96c04}, + {0x8c0c, 0xe3b8e3db}, + {0x8c10, 0xe423e59d}, + {0x8c14, 0xe43ce5a5}, + {0x8c18, 0xe3b80001}, + {0x8c1c, 0x6c040005}, + {0x8c20, 0xe5e6e3c9}, + {0x8c24, 0xe423e59d}, + {0x8c28, 0xe43ce5a5}, + {0x8c2c, 0x00050001}, + {0x8c30, 0xe3c96c02}, + {0x8c34, 0xe3b8e3db}, + {0x8c38, 0xe423e5b8}, + {0x8c3c, 0xe43ce5c0}, + {0x8c40, 0xe3b80001}, + {0x8c44, 0x6c020005}, + {0x8c48, 0xe5e6e3c9}, + {0x8c4c, 0xe423e5b8}, + {0x8c50, 0xe43ce5c0}, + {0x8c54, 0x00040001}, + {0x8c58, 0x60084380}, + {0x8c5c, 0x6200610a}, + {0x8c60, 0x000663ce}, + {0x8c64, 0x7f006080}, + {0x8c68, 0x43000004}, + {0x8c6c, 0x0001e618}, + {0x8c70, 0x55000007}, + {0x8c74, 0x74200004}, + {0x8c78, 0x77117901}, + {0x8c7c, 0x57005710}, + {0x8c80, 0x7430140f}, + {0x8c84, 0x43800004}, + {0x8c88, 0x72000007}, + {0x8c8c, 0x43000004}, + {0x8c90, 0x00040001}, + {0x8c94, 0x00057420}, + {0x8c98, 0x7e067700}, + {0x8c9c, 0x73807388}, + {0x8ca0, 0x140f8f00}, + {0x8ca4, 0x74300004}, + {0x8ca8, 0x73000005}, + {0x8cac, 0xe5d30001}, + {0x8cb0, 0x73000005}, + {0x8cb4, 0x00040001}, + {0x8cb8, 0xb1034380}, + {0x8cbc, 0x7cdb0006}, + {0x8cc0, 0x00079103}, + {0x8cc4, 0x000440db}, + {0x8cc8, 0xe5d34300}, + {0x8ccc, 0x73800005}, + {0x8cd0, 0x5d010006}, + {0x8cd4, 0x62006002}, + {0x8cd8, 0x0005e5f7}, + {0x8cdc, 0x00077300}, + {0x8ce0, 0x75787608}, + {0x8ce4, 0x43800004}, + {0x8ce8, 0x5e010007}, + {0x8cec, 0x140a5e00}, + {0x8cf0, 0x63800006}, + {0x8cf4, 0x00077f00}, + {0x8cf8, 0x4e204c3f}, + {0x8cfc, 0x73047280}, + {0x8d00, 0x140a7300}, + {0x8d04, 0x00044d20}, + {0x8d08, 0x00064300}, + {0x8d0c, 0x00077402}, + {0x8d10, 0x40004001}, + {0x8d14, 0x0006ab00}, + {0x8d18, 0x00077404}, + {0x8d1c, 0x40004001}, + {0x8d20, 0x140aab00}, + {0x8d24, 0x43800004}, + {0x8d28, 0x52800007}, + {0x8d2c, 0x140a5200}, + {0x8d30, 0x4d004c00}, + {0x8d34, 0x00064e00}, + {0x8d38, 0x63006080}, + {0x8d3c, 0x43000004}, + {0x8d40, 0x76000007}, + {0x8d44, 0x00040001}, + {0x8d48, 0xb1034380}, + {0x8d4c, 0x7cdb0006}, + {0x8d50, 0x00079103}, + {0x8d54, 0x000440db}, + {0x8d58, 0xe5d34300}, + {0x8d5c, 0xe5f77e03}, + {0x8d60, 0x43800004}, + {0x8d64, 0x0006b103}, + {0x8d68, 0x91037c5b}, + {0x8d6c, 0x405b0007}, + {0x8d70, 0x43000004}, + {0x8d74, 0x00010001}, + {0x8d78, 0x43800004}, + {0x8d7c, 0x4e200007}, + {0x8d80, 0x63800006}, + {0x8d84, 0x5f807cdb}, + {0x8d88, 0x43000004}, + {0x8d8c, 0x76080007}, + {0x8d90, 0x00057560}, + {0x8d94, 0x00047380}, + {0x8d98, 0x0005420e}, + {0x8d9c, 0x92006c01}, + {0x8da0, 0x6c001432}, + {0x8da4, 0x42000004}, + {0x8da8, 0x43800004}, + {0x8dac, 0x5f000006}, + {0x8db0, 0x73010007}, + {0x8db4, 0x00047300}, + {0x8db8, 0x0007420f}, + {0x8dbc, 0x52005280}, + {0x8dc0, 0x0004140a}, + {0x8dc4, 0x00064200}, + {0x8dc8, 0x7c5b6300}, + {0x8dcc, 0x4e000007}, + {0x8dd0, 0x43000004}, + {0x8dd4, 0x73000005}, + {0x8dd8, 0x76000007}, + {0x8ddc, 0xe5fb0001}, + {0x8de0, 0x00040001}, + {0x8de4, 0x60004380}, + {0x8de8, 0x62016100}, + {0x8dec, 0x00066310}, + {0x8df0, 0x00046000}, + {0x8df4, 0x00014300}, + {0x8df8, 0x0001e618}, + {0x8dfc, 0x4e004f02}, + {0x8e00, 0x52015302}, + {0x8e04, 0x140f0001}, + {0x8e08, 0x00019700}, + {0x8e0c, 0x8a084380}, + {0x8e10, 0x7800aa09}, + {0x8e14, 0x7a007900}, + {0x8e18, 0x43007b40}, + {0x8e1c, 0x65010001}, + {0x8e20, 0x67013489}, + {0x8e24, 0x43803489}, + {0x8e28, 0xaa058a04}, + {0x8e2c, 0x00014300}, + {0x8e30, 0x34966500}, + {0x8e34, 0x34966700}, + {0x8e38, 0x8a084380}, + {0x8e3c, 0x7c00aa09}, + {0x8e40, 0x7e007d00}, + {0x8e44, 0x43007f40}, + {0x8e48, 0x64010001}, + {0x8e4c, 0x6601349f}, + {0x8e50, 0x4380349f}, + {0x8e54, 0xaa058a04}, + {0x8e58, 0x00014300}, + {0x8e5c, 0x34ac6400}, + {0x8e60, 0x34ac6600}, + {0x8e64, 0x7b484380}, + {0x8e68, 0x79007a90}, + {0x8e6c, 0x43007802}, + {0x8e70, 0x34c95503}, + {0x8e74, 0x7b384380}, + {0x8e78, 0x43007a80}, + {0x8e7c, 0x34c95513}, + {0x8e80, 0x7b404380}, + {0x8e84, 0x43007a00}, + {0x8e88, 0x74015523}, + {0x8e8c, 0x8e007400}, + {0x8e90, 0x00070001}, + {0x8e94, 0x00045230}, + {0x8e98, 0x74307431}, + {0x8e9c, 0x00078e00}, + {0x8ea0, 0x00045220}, + {0x8ea4, 0x57020001}, + {0x8ea8, 0x8e005700}, + {0x8eac, 0x42ef0001}, + {0x8eb0, 0x56005610}, + {0x8eb4, 0x8c004200}, + {0x8eb8, 0x5b500001}, + {0x8ebc, 0x5b2034e0}, + {0x8ec0, 0x4e004f78}, + {0x8ec4, 0x52015388}, + {0x8ec8, 0x4e004f78}, + {0x8ecc, 0x52015388}, + {0x8ed0, 0x5480e4f2}, + {0x8ed4, 0x54815400}, + {0x8ed8, 0x54825400}, + {0x8edc, 0xe4fd5400}, + {0x8ee0, 0x3010bf1d}, + {0x8ee4, 0xe4bae4b2}, + {0x8ee8, 0xe4d3e4c0}, + {0x8eec, 0x5523e65b}, + {0x8ef0, 0x5525e4c9}, + {0x8ef4, 0xe65be4d3}, + {0x8ef8, 0x54bf0001}, + {0x8efc, 0x54a354c0}, + {0x8f00, 0x54a454c1}, + {0x8f04, 0xbf074c18}, + {0x8f08, 0x54a454c2}, + {0x8f0c, 0x54c1bf04}, + {0x8f10, 0xbf0154a3}, + {0x8f14, 0x54dfe66c}, + {0x8f18, 0x54bf0001}, + {0x8f1c, 0x050a54e5}, + {0x8f20, 0x000154df}, + {0x8f24, 0x7b801657}, + {0x8f28, 0x43807430}, + {0x8f2c, 0x7e007f40}, + {0x8f30, 0x7c027d00}, + {0x8f34, 0x5b404300}, + {0x8f38, 0x5c015501}, + {0x8f3c, 0x5480e4d7}, + {0x8f40, 0x54815400}, + {0x8f44, 0x54825400}, + {0x8f48, 0x7b005400}, + {0x8f4c, 0xe4fd7410}, + {0x8f50, 0x3010bfe5}, + {0x8f54, 0x56005610}, + {0x8f58, 0x00018c00}, + {0x8f5c, 0x57005704}, + {0x8f60, 0x57088e00}, + {0x8f64, 0x8e005700}, + {0x8f68, 0x57805781}, + {0x8f6c, 0x43808e00}, + {0x8f70, 0x5c010007}, + {0x8f74, 0x14035c00}, + {0x8f78, 0x43000004}, + {0x8f7c, 0x427f0001}, + {0x8f80, 0x62800007}, + {0x8f84, 0x92006200}, + {0x8f88, 0x42000004}, + {0x8f8c, 0x427f0001}, + {0x8f90, 0x63940007}, + {0x8f94, 0x92006314}, + {0x8f98, 0x42000004}, + {0x8f9c, 0x00040001}, + {0x8fa0, 0x790142fe}, + {0x8fa4, 0x74204200}, + {0x8fa8, 0x5710140f}, + {0x8fac, 0x141f5700}, + {0x8fb0, 0x00040001}, + {0x8fb4, 0x790142fe}, + {0x8fb8, 0x74204200}, + {0x8fbc, 0x42bf140f}, + {0x8fc0, 0x62400007}, + {0x8fc4, 0x141f6200}, + {0x8fc8, 0x42000004}, + {0x8fcc, 0x00060001}, + {0x8fd0, 0x60035d06}, + {0x8fd4, 0x62016104}, + {0x8fd8, 0x73100005}, + {0x8fdc, 0x00040001}, + {0x8fe0, 0x00074380}, + {0x8fe4, 0x5e005e01}, + {0x8fe8, 0xb103140a}, + {0x8fec, 0x7f070006}, + {0x8ff0, 0x00079103}, + {0x8ff4, 0x00064307}, + {0x8ff8, 0x5d025c00}, + {0x8ffc, 0x00045e03}, + {0x9000, 0x00014300}, + {0x9004, 0x5d040006}, + {0x9008, 0x61046000}, + {0x900c, 0x00056201}, + {0x9010, 0x00017310}, + {0x9014, 0x43800004}, + {0x9018, 0x5e010007}, + {0x901c, 0x140a5e00}, + {0x9020, 0x0006b103}, + {0x9024, 0x91037fc6}, + {0x9028, 0x43c60007}, + {0x902c, 0x5c000006}, + {0x9030, 0x5e035d02}, + {0x9034, 0x43000004}, + {0x9038, 0x00060001}, + {0x903c, 0x60005d04}, + {0x9040, 0x62016104}, + {0x9044, 0x73100005}, + {0x9048, 0x00040001}, + {0x904c, 0x00074380}, + {0x9050, 0x5e005e01}, + {0x9054, 0xb103140a}, + {0x9058, 0x7fc60006}, + {0x905c, 0x00079103}, + {0x9060, 0x000643c6}, + {0x9064, 0x5d025c00}, + {0x9068, 0x00045e03}, + {0x906c, 0x00014300}, + {0x9070, 0x5d000006}, + {0x9074, 0x61006002}, + {0x9078, 0x00056201}, + {0x907c, 0x00017300}, + {0x9080, 0x43800004}, + {0x9084, 0x5e010007}, + {0x9088, 0x140a5e00}, + {0x908c, 0x0006b103}, + {0x9090, 0x91037fc0}, + {0x9094, 0x43c00007}, + {0x9098, 0x5c000006}, + {0x909c, 0x5e035d02}, + {0x90a0, 0x43000004}, + {0x90a4, 0x00050001}, + {0x90a8, 0x00047e02}, + {0x90ac, 0x000542f7}, + {0x90b0, 0x00046c08}, + {0x90b4, 0x00054270}, + {0x90b8, 0x73807381}, + {0x90bc, 0x00049300}, + {0x90c0, 0x000542f7}, + {0x90c4, 0x00046c00}, + {0x90c8, 0x00014200}, + {0x90cc, 0x43800004}, + {0x90d0, 0x73040007}, + {0x90d4, 0x14057300}, + {0x90d8, 0x00047240}, + {0x90dc, 0x00064300}, + {0x90e0, 0x00077404}, + {0x90e4, 0x40004001}, + {0x90e8, 0x140fab00}, + {0x90ec, 0xe64f0001}, + {0x90f0, 0xe656e5fb}, + {0x90f4, 0x00040001}, + {0x90f8, 0x00047410}, + {0x90fc, 0x42f04380}, + {0x9100, 0x62080007}, + {0x9104, 0x24206301}, + {0x9108, 0x14c80000}, + {0x910c, 0x00002428}, + {0x9110, 0x1a4215f4}, + {0x9114, 0x6300000b}, + {0x9118, 0x42000004}, + {0x911c, 0x74304300}, + {0x9120, 0x4380140f}, + {0x9124, 0x73080007}, + {0x9128, 0x00047300}, + {0x912c, 0x00014300}, + {0x9130, 0x4bf00007}, + {0x9134, 0x490b4a8f}, + {0x9138, 0x4a8e48f1}, + {0x913c, 0x48a5490a}, + {0x9140, 0x49094a8d}, + {0x9144, 0x4a8c487d}, + {0x9148, 0x48754908}, + {0x914c, 0x49074a8b}, + {0x9150, 0x4a8a4889}, + {0x9154, 0x48b74906}, + {0x9158, 0x49054a89}, + {0x915c, 0x4a8848fc}, + {0x9160, 0x48564905}, + {0x9164, 0x49044a87}, + {0x9168, 0x4a8648c1}, + {0x916c, 0x483d4904}, + {0x9170, 0x49034a85}, + {0x9174, 0x4a8448c7}, + {0x9178, 0x485e4903}, + {0x917c, 0x49024a83}, + {0x9180, 0x4a8248ac}, + {0x9184, 0x48624902}, + {0x9188, 0x49024a81}, + {0x918c, 0x4a804820}, + {0x9190, 0x48004900}, + {0x9194, 0x49014a90}, + {0x9198, 0x4a10481f}, + {0x919c, 0x00060001}, + {0x91a0, 0x5f005f80}, + {0x91a4, 0x00059900}, + {0x91a8, 0x00017300}, + {0x91ac, 0x63800006}, + {0x91b0, 0x98006300}, + {0x91b4, 0x549f0001}, + {0x91b8, 0x5c015400}, + {0x91bc, 0x540054df}, + {0x91c0, 0x00015c02}, + {0x91c4, 0x07145c01}, + {0x91c8, 0x5c025400}, + {0x91cc, 0x5c020001}, + {0x91d0, 0x54000714}, + {0x91d4, 0x00015c01}, + {0x91d8, 0x4c184c98}, + {0x91dc, 0x00080001}, + {0x91e0, 0x5c020004}, + {0x91e4, 0x09017430}, + {0x91e8, 0x0ba60c01}, + {0x91ec, 0x77800005}, + {0x91f0, 0x52200007}, + {0x91f4, 0x43800004}, + {0x91f8, 0x610a6008}, + {0x91fc, 0x63c26200}, + {0x9200, 0x5c000007}, + {0x9204, 0x43000004}, + {0x9208, 0x00000001}, + {0x8080, 0x00000004}, + {0x8080, 0x00000000}, + {0x8088, 0x00000000}, +}; + +const struct rtw89_phy_table rtw89_8852c_phy_bb_table = { + .regs = rtw89_8852c_phy_bb_regs, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_regs), + .rf_path = 0, /* don't care */ +}; + +const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table = { + .regs = rtw89_8852c_phy_bb_reg_gain, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_reg_gain), + .rf_path = 0, /* don't care */ +}; + +const struct rtw89_phy_table rtw89_8852c_phy_radioa_table = { + .regs = rtw89_8852c_phy_radioa_regs, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_radioa_regs), + .rf_path = RF_PATH_A, + .config = rtw89_phy_config_rf_reg_v1, +}; + +const struct rtw89_phy_table rtw89_8852c_phy_radiob_table = { + .regs = rtw89_8852c_phy_radiob_regs, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_radiob_regs), + .rf_path = RF_PATH_B, + .config = rtw89_phy_config_rf_reg_v1, +}; + +const struct rtw89_phy_table rtw89_8852c_phy_nctl_table = { + .regs = rtw89_8852c_phy_nctl_regs, + .n_regs = ARRAY_SIZE(rtw89_8852c_phy_nctl_regs), + .rf_path = 0, /* don't care */ +}; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h new file mode 100644 index 000000000000..807021ab9f1d --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#ifndef __RTW89_8852C_TABLE_H__ +#define __RTW89_8852C_TABLE_H__ + +#include "core.h" + +extern const struct rtw89_phy_table rtw89_8852c_phy_bb_table; +extern const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table; +extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table; +extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table; +extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table; + +#endif -- cgit v1.2.3 From 342475ac510ac0231fb38d00befdc86228453825 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:16 +0800 Subject: rtw89: 8852c: add TX power by rate and limit tables TX power depends on rate, but must follow regulation for specific country. Once asked to set channel, we configure registers according to these TX power tables. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 9 + .../net/wireless/realtek/rtw89/rtw8852c_table.c | 5627 ++++++++++++++++++++ .../net/wireless/realtek/rtw89/rtw8852c_table.h | 18 + 4 files changed, 5655 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index e8e31492b363..1dd558d89567 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -401,6 +401,7 @@ enum rtw89_rate_section { RTW89_RS_OFFSET, RTW89_RS_MAX, RTW89_RS_LMT_NUM = RTW89_RS_MCS + 1, + RTW89_RS_TX_SHAPE_NUM = RTW89_RS_OFDM + 1, }; enum rtw89_rate_max { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 0855c41e32af..3f727dd42064 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -664,6 +664,15 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .rf_table = {&rtw89_8852c_phy_radiob_table, &rtw89_8852c_phy_radioa_table,}, .nctl_table = &rtw89_8852c_phy_nctl_table, + .byr_table = &rtw89_8852c_byr_table, + .txpwr_lmt_2g = &rtw89_8852c_txpwr_lmt_2g, + .txpwr_lmt_5g = &rtw89_8852c_txpwr_lmt_5g, + .txpwr_lmt_6g = &rtw89_8852c_txpwr_lmt_6g, + .txpwr_lmt_ru_2g = &rtw89_8852c_txpwr_lmt_ru_2g, + .txpwr_lmt_ru_5g = &rtw89_8852c_txpwr_lmt_ru_5g, + .txpwr_lmt_ru_6g = &rtw89_8852c_txpwr_lmt_ru_6g, + .txpwr_factor_rf = 2, + .txpwr_factor_mac = 1, .dig_table = NULL, .hw_sec_hdr = true, .sec_ctrl_efuse_size = 4, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c index b0431874d7c4..bd0c4ff5ca4d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -13664,6 +13664,5627 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_nctl_regs[] = { {0x8088, 0x00000000}, }; +static const struct rtw89_txpwr_byrate_cfg rtw89_8852c_txpwr_byrate[] = { + { 0, 0, 0, 0, 4, 0x50505050, }, + { 0, 0, 1, 0, 4, 0x50505050, }, + { 0, 0, 1, 4, 4, 0x484c5050, }, + { 0, 0, 2, 0, 4, 0x50505050, }, + { 0, 0, 2, 4, 4, 0x44484c50, }, + { 0, 0, 2, 8, 4, 0x34383c40, }, + { 0, 0, 3, 0, 4, 0x50505050, }, + { 0, 1, 2, 0, 4, 0x50505050, }, + { 0, 1, 2, 4, 4, 0x44484c50, }, + { 0, 1, 2, 8, 4, 0x34383c40, }, + { 0, 1, 3, 0, 4, 0x50505050, }, + { 0, 0, 4, 1, 4, 0x00000000, }, + { 0, 0, 4, 0, 1, 0x00000000, }, + { 1, 0, 1, 0, 4, 0x5054585c, }, + { 1, 0, 1, 4, 4, 0x4044484c, }, + { 1, 0, 2, 0, 4, 0x4c505458, }, + { 1, 0, 2, 4, 4, 0x3c404448, }, + { 1, 0, 2, 8, 4, 0x2c303438, }, + { 1, 0, 3, 0, 4, 0x3c40484c, }, + { 1, 1, 2, 0, 4, 0x4c505458, }, + { 1, 1, 2, 4, 4, 0x3c404448, }, + { 1, 1, 2, 8, 4, 0x2c303438, }, + { 1, 1, 3, 0, 4, 0x3c40484c, }, + { 1, 0, 4, 0, 4, 0x00000000, }, + { 2, 0, 1, 0, 4, 0x5054585c, }, + { 2, 0, 1, 4, 4, 0x4044484c, }, + { 2, 0, 2, 0, 4, 0x4c505458, }, + { 2, 0, 2, 4, 4, 0x3c404448, }, + { 2, 0, 2, 8, 4, 0x2c303438, }, + { 2, 0, 3, 0, 4, 0x3c40484c, }, + { 2, 1, 2, 0, 4, 0x4c505458, }, + { 2, 1, 2, 4, 4, 0x3c404448, }, + { 2, 1, 2, 8, 4, 0x2c303438, }, + { 2, 1, 3, 0, 4, 0x3c40484c, }, + { 2, 0, 4, 0, 4, 0x00000000, }, +}; + +const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM] = { + [0][0][RTW89_ACMA] = 0, + [0][0][RTW89_ETSI] = 0, + [0][0][RTW89_FCC] = 1, + [0][0][RTW89_IC] = 1, + [0][0][RTW89_MKK] = 0, + [0][1][RTW89_ACMA] = 0, + [0][1][RTW89_ETSI] = 0, + [0][1][RTW89_FCC] = 3, + [0][1][RTW89_IC] = 3, + [0][1][RTW89_MKK] = 0, + [1][1][RTW89_ACMA] = 0, + [1][1][RTW89_ETSI] = 0, + [1][1][RTW89_FCC] = 3, + [1][1][RTW89_IC] = 3, + [1][1][RTW89_MKK] = 0, + [2][1][RTW89_FCC] = 1, +}; + +const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { + [0][0][0][0][RTW89_WW][0] = 60, + [0][0][0][0][RTW89_WW][1] = 60, + [0][0][0][0][RTW89_WW][2] = 60, + [0][0][0][0][RTW89_WW][3] = 60, + [0][0][0][0][RTW89_WW][4] = 60, + [0][0][0][0][RTW89_WW][5] = 60, + [0][0][0][0][RTW89_WW][6] = 60, + [0][0][0][0][RTW89_WW][7] = 60, + [0][0][0][0][RTW89_WW][8] = 60, + [0][0][0][0][RTW89_WW][9] = 60, + [0][0][0][0][RTW89_WW][10] = 60, + [0][0][0][0][RTW89_WW][11] = 60, + [0][0][0][0][RTW89_WW][12] = 58, + [0][0][0][0][RTW89_WW][13] = 74, + [0][1][0][0][RTW89_WW][0] = 48, + [0][1][0][0][RTW89_WW][1] = 48, + [0][1][0][0][RTW89_WW][2] = 48, + [0][1][0][0][RTW89_WW][3] = 48, + [0][1][0][0][RTW89_WW][4] = 48, + [0][1][0][0][RTW89_WW][5] = 48, + [0][1][0][0][RTW89_WW][6] = 48, + [0][1][0][0][RTW89_WW][7] = 48, + [0][1][0][0][RTW89_WW][8] = 48, + [0][1][0][0][RTW89_WW][9] = 48, + [0][1][0][0][RTW89_WW][10] = 48, + [0][1][0][0][RTW89_WW][11] = 48, + [0][1][0][0][RTW89_WW][12] = 44, + [0][1][0][0][RTW89_WW][13] = 62, + [1][0][0][0][RTW89_WW][0] = 0, + [1][0][0][0][RTW89_WW][1] = 0, + [1][0][0][0][RTW89_WW][2] = 52, + [1][0][0][0][RTW89_WW][3] = 52, + [1][0][0][0][RTW89_WW][4] = 52, + [1][0][0][0][RTW89_WW][5] = 60, + [1][0][0][0][RTW89_WW][6] = 52, + [1][0][0][0][RTW89_WW][7] = 52, + [1][0][0][0][RTW89_WW][8] = 52, + [1][0][0][0][RTW89_WW][9] = 44, + [1][0][0][0][RTW89_WW][10] = 32, + [1][0][0][0][RTW89_WW][11] = 0, + [1][0][0][0][RTW89_WW][12] = 0, + [1][0][0][0][RTW89_WW][13] = 0, + [1][1][0][0][RTW89_WW][0] = 0, + [1][1][0][0][RTW89_WW][1] = 0, + [1][1][0][0][RTW89_WW][2] = 48, + [1][1][0][0][RTW89_WW][3] = 48, + [1][1][0][0][RTW89_WW][4] = 48, + [1][1][0][0][RTW89_WW][5] = 48, + [1][1][0][0][RTW89_WW][6] = 36, + [1][1][0][0][RTW89_WW][7] = 36, + [1][1][0][0][RTW89_WW][8] = 36, + [1][1][0][0][RTW89_WW][9] = 32, + [1][1][0][0][RTW89_WW][10] = 32, + [1][1][0][0][RTW89_WW][11] = 0, + [1][1][0][0][RTW89_WW][12] = 0, + [1][1][0][0][RTW89_WW][13] = 0, + [0][0][1][0][RTW89_WW][0] = 60, + [0][0][1][0][RTW89_WW][1] = 60, + [0][0][1][0][RTW89_WW][2] = 60, + [0][0][1][0][RTW89_WW][3] = 60, + [0][0][1][0][RTW89_WW][4] = 60, + [0][0][1][0][RTW89_WW][5] = 60, + [0][0][1][0][RTW89_WW][6] = 60, + [0][0][1][0][RTW89_WW][7] = 60, + [0][0][1][0][RTW89_WW][8] = 60, + [0][0][1][0][RTW89_WW][9] = 60, + [0][0][1][0][RTW89_WW][10] = 60, + [0][0][1][0][RTW89_WW][11] = 56, + [0][0][1][0][RTW89_WW][12] = 52, + [0][0][1][0][RTW89_WW][13] = 0, + [0][1][1][0][RTW89_WW][0] = 48, + [0][1][1][0][RTW89_WW][1] = 48, + [0][1][1][0][RTW89_WW][2] = 48, + [0][1][1][0][RTW89_WW][3] = 48, + [0][1][1][0][RTW89_WW][4] = 48, + [0][1][1][0][RTW89_WW][5] = 48, + [0][1][1][0][RTW89_WW][6] = 48, + [0][1][1][0][RTW89_WW][7] = 48, + [0][1][1][0][RTW89_WW][8] = 48, + [0][1][1][0][RTW89_WW][9] = 48, + [0][1][1][0][RTW89_WW][10] = 48, + [0][1][1][0][RTW89_WW][11] = 48, + [0][1][1][0][RTW89_WW][12] = 44, + [0][1][1][0][RTW89_WW][13] = 0, + [0][0][2][0][RTW89_WW][0] = 60, + [0][0][2][0][RTW89_WW][1] = 60, + [0][0][2][0][RTW89_WW][2] = 60, + [0][0][2][0][RTW89_WW][3] = 60, + [0][0][2][0][RTW89_WW][4] = 60, + [0][0][2][0][RTW89_WW][5] = 60, + [0][0][2][0][RTW89_WW][6] = 60, + [0][0][2][0][RTW89_WW][7] = 60, + [0][0][2][0][RTW89_WW][8] = 60, + [0][0][2][0][RTW89_WW][9] = 60, + [0][0][2][0][RTW89_WW][10] = 60, + [0][0][2][0][RTW89_WW][11] = 56, + [0][0][2][0][RTW89_WW][12] = 52, + [0][0][2][0][RTW89_WW][13] = 0, + [0][1][2][0][RTW89_WW][0] = 48, + [0][1][2][0][RTW89_WW][1] = 48, + [0][1][2][0][RTW89_WW][2] = 48, + [0][1][2][0][RTW89_WW][3] = 48, + [0][1][2][0][RTW89_WW][4] = 48, + [0][1][2][0][RTW89_WW][5] = 48, + [0][1][2][0][RTW89_WW][6] = 48, + [0][1][2][0][RTW89_WW][7] = 48, + [0][1][2][0][RTW89_WW][8] = 48, + [0][1][2][0][RTW89_WW][9] = 48, + [0][1][2][0][RTW89_WW][10] = 48, + [0][1][2][0][RTW89_WW][11] = 48, + [0][1][2][0][RTW89_WW][12] = 44, + [0][1][2][0][RTW89_WW][13] = 0, + [0][1][2][1][RTW89_WW][0] = 36, + [0][1][2][1][RTW89_WW][1] = 36, + [0][1][2][1][RTW89_WW][2] = 36, + [0][1][2][1][RTW89_WW][3] = 36, + [0][1][2][1][RTW89_WW][4] = 36, + [0][1][2][1][RTW89_WW][5] = 36, + [0][1][2][1][RTW89_WW][6] = 36, + [0][1][2][1][RTW89_WW][7] = 36, + [0][1][2][1][RTW89_WW][8] = 36, + [0][1][2][1][RTW89_WW][9] = 36, + [0][1][2][1][RTW89_WW][10] = 36, + [0][1][2][1][RTW89_WW][11] = 36, + [0][1][2][1][RTW89_WW][12] = 36, + [0][1][2][1][RTW89_WW][13] = 0, + [1][0][2][0][RTW89_WW][0] = 0, + [1][0][2][0][RTW89_WW][1] = 0, + [1][0][2][0][RTW89_WW][2] = 60, + [1][0][2][0][RTW89_WW][3] = 60, + [1][0][2][0][RTW89_WW][4] = 60, + [1][0][2][0][RTW89_WW][5] = 60, + [1][0][2][0][RTW89_WW][6] = 60, + [1][0][2][0][RTW89_WW][7] = 60, + [1][0][2][0][RTW89_WW][8] = 60, + [1][0][2][0][RTW89_WW][9] = 60, + [1][0][2][0][RTW89_WW][10] = 60, + [1][0][2][0][RTW89_WW][11] = 0, + [1][0][2][0][RTW89_WW][12] = 0, + [1][0][2][0][RTW89_WW][13] = 0, + [1][1][2][0][RTW89_WW][0] = 0, + [1][1][2][0][RTW89_WW][1] = 0, + [1][1][2][0][RTW89_WW][2] = 48, + [1][1][2][0][RTW89_WW][3] = 48, + [1][1][2][0][RTW89_WW][4] = 48, + [1][1][2][0][RTW89_WW][5] = 48, + [1][1][2][0][RTW89_WW][6] = 48, + [1][1][2][0][RTW89_WW][7] = 48, + [1][1][2][0][RTW89_WW][8] = 48, + [1][1][2][0][RTW89_WW][9] = 44, + [1][1][2][0][RTW89_WW][10] = 40, + [1][1][2][0][RTW89_WW][11] = 0, + [1][1][2][0][RTW89_WW][12] = 0, + [1][1][2][0][RTW89_WW][13] = 0, + [1][1][2][1][RTW89_WW][0] = 0, + [1][1][2][1][RTW89_WW][1] = 0, + [1][1][2][1][RTW89_WW][2] = 36, + [1][1][2][1][RTW89_WW][3] = 36, + [1][1][2][1][RTW89_WW][4] = 36, + [1][1][2][1][RTW89_WW][5] = 36, + [1][1][2][1][RTW89_WW][6] = 36, + [1][1][2][1][RTW89_WW][7] = 36, + [1][1][2][1][RTW89_WW][8] = 36, + [1][1][2][1][RTW89_WW][9] = 36, + [1][1][2][1][RTW89_WW][10] = 36, + [1][1][2][1][RTW89_WW][11] = 0, + [1][1][2][1][RTW89_WW][12] = 0, + [1][1][2][1][RTW89_WW][13] = 0, + [0][0][0][0][RTW89_FCC][0] = 80, + [0][0][0][0][RTW89_ETSI][0] = 60, + [0][0][0][0][RTW89_MKK][0] = 72, + [0][0][0][0][RTW89_IC][0] = 80, + [0][0][0][0][RTW89_ACMA][0] = 60, + [0][0][0][0][RTW89_FCC][1] = 80, + [0][0][0][0][RTW89_ETSI][1] = 60, + [0][0][0][0][RTW89_MKK][1] = 72, + [0][0][0][0][RTW89_IC][1] = 80, + [0][0][0][0][RTW89_ACMA][1] = 60, + [0][0][0][0][RTW89_FCC][2] = 80, + [0][0][0][0][RTW89_ETSI][2] = 60, + [0][0][0][0][RTW89_MKK][2] = 72, + [0][0][0][0][RTW89_IC][2] = 80, + [0][0][0][0][RTW89_ACMA][2] = 60, + [0][0][0][0][RTW89_FCC][3] = 80, + [0][0][0][0][RTW89_ETSI][3] = 60, + [0][0][0][0][RTW89_MKK][3] = 72, + [0][0][0][0][RTW89_IC][3] = 80, + [0][0][0][0][RTW89_ACMA][3] = 60, + [0][0][0][0][RTW89_FCC][4] = 80, + [0][0][0][0][RTW89_ETSI][4] = 60, + [0][0][0][0][RTW89_MKK][4] = 72, + [0][0][0][0][RTW89_IC][4] = 80, + [0][0][0][0][RTW89_ACMA][4] = 60, + [0][0][0][0][RTW89_FCC][5] = 80, + [0][0][0][0][RTW89_ETSI][5] = 60, + [0][0][0][0][RTW89_MKK][5] = 72, + [0][0][0][0][RTW89_IC][5] = 80, + [0][0][0][0][RTW89_ACMA][5] = 60, + [0][0][0][0][RTW89_FCC][6] = 80, + [0][0][0][0][RTW89_ETSI][6] = 60, + [0][0][0][0][RTW89_MKK][6] = 72, + [0][0][0][0][RTW89_IC][6] = 80, + [0][0][0][0][RTW89_ACMA][6] = 60, + [0][0][0][0][RTW89_FCC][7] = 80, + [0][0][0][0][RTW89_ETSI][7] = 60, + [0][0][0][0][RTW89_MKK][7] = 72, + [0][0][0][0][RTW89_IC][7] = 80, + [0][0][0][0][RTW89_ACMA][7] = 60, + [0][0][0][0][RTW89_FCC][8] = 80, + [0][0][0][0][RTW89_ETSI][8] = 60, + [0][0][0][0][RTW89_MKK][8] = 72, + [0][0][0][0][RTW89_IC][8] = 80, + [0][0][0][0][RTW89_ACMA][8] = 60, + [0][0][0][0][RTW89_FCC][9] = 80, + [0][0][0][0][RTW89_ETSI][9] = 60, + [0][0][0][0][RTW89_MKK][9] = 72, + [0][0][0][0][RTW89_IC][9] = 80, + [0][0][0][0][RTW89_ACMA][9] = 60, + [0][0][0][0][RTW89_FCC][10] = 80, + [0][0][0][0][RTW89_ETSI][10] = 60, + [0][0][0][0][RTW89_MKK][10] = 72, + [0][0][0][0][RTW89_IC][10] = 80, + [0][0][0][0][RTW89_ACMA][10] = 60, + [0][0][0][0][RTW89_FCC][11] = 72, + [0][0][0][0][RTW89_ETSI][11] = 60, + [0][0][0][0][RTW89_MKK][11] = 72, + [0][0][0][0][RTW89_IC][11] = 72, + [0][0][0][0][RTW89_ACMA][11] = 60, + [0][0][0][0][RTW89_FCC][12] = 58, + [0][0][0][0][RTW89_ETSI][12] = 60, + [0][0][0][0][RTW89_MKK][12] = 72, + [0][0][0][0][RTW89_IC][12] = 58, + [0][0][0][0][RTW89_ACMA][12] = 60, + [0][0][0][0][RTW89_FCC][13] = 127, + [0][0][0][0][RTW89_ETSI][13] = 127, + [0][0][0][0][RTW89_MKK][13] = 74, + [0][0][0][0][RTW89_IC][13] = 127, + [0][0][0][0][RTW89_ACMA][13] = 127, + [0][1][0][0][RTW89_FCC][0] = 76, + [0][1][0][0][RTW89_ETSI][0] = 48, + [0][1][0][0][RTW89_MKK][0] = 60, + [0][1][0][0][RTW89_IC][0] = 76, + [0][1][0][0][RTW89_ACMA][0] = 48, + [0][1][0][0][RTW89_FCC][1] = 76, + [0][1][0][0][RTW89_ETSI][1] = 48, + [0][1][0][0][RTW89_MKK][1] = 60, + [0][1][0][0][RTW89_IC][1] = 76, + [0][1][0][0][RTW89_ACMA][1] = 48, + [0][1][0][0][RTW89_FCC][2] = 76, + [0][1][0][0][RTW89_ETSI][2] = 48, + [0][1][0][0][RTW89_MKK][2] = 60, + [0][1][0][0][RTW89_IC][2] = 76, + [0][1][0][0][RTW89_ACMA][2] = 48, + [0][1][0][0][RTW89_FCC][3] = 76, + [0][1][0][0][RTW89_ETSI][3] = 48, + [0][1][0][0][RTW89_MKK][3] = 60, + [0][1][0][0][RTW89_IC][3] = 76, + [0][1][0][0][RTW89_ACMA][3] = 48, + [0][1][0][0][RTW89_FCC][4] = 76, + [0][1][0][0][RTW89_ETSI][4] = 48, + [0][1][0][0][RTW89_MKK][4] = 60, + [0][1][0][0][RTW89_IC][4] = 76, + [0][1][0][0][RTW89_ACMA][4] = 48, + [0][1][0][0][RTW89_FCC][5] = 76, + [0][1][0][0][RTW89_ETSI][5] = 48, + [0][1][0][0][RTW89_MKK][5] = 60, + [0][1][0][0][RTW89_IC][5] = 76, + [0][1][0][0][RTW89_ACMA][5] = 48, + [0][1][0][0][RTW89_FCC][6] = 76, + [0][1][0][0][RTW89_ETSI][6] = 48, + [0][1][0][0][RTW89_MKK][6] = 60, + [0][1][0][0][RTW89_IC][6] = 76, + [0][1][0][0][RTW89_ACMA][6] = 48, + [0][1][0][0][RTW89_FCC][7] = 76, + [0][1][0][0][RTW89_ETSI][7] = 48, + [0][1][0][0][RTW89_MKK][7] = 60, + [0][1][0][0][RTW89_IC][7] = 76, + [0][1][0][0][RTW89_ACMA][7] = 48, + [0][1][0][0][RTW89_FCC][8] = 76, + [0][1][0][0][RTW89_ETSI][8] = 48, + [0][1][0][0][RTW89_MKK][8] = 60, + [0][1][0][0][RTW89_IC][8] = 76, + [0][1][0][0][RTW89_ACMA][8] = 48, + [0][1][0][0][RTW89_FCC][9] = 76, + [0][1][0][0][RTW89_ETSI][9] = 48, + [0][1][0][0][RTW89_MKK][9] = 60, + [0][1][0][0][RTW89_IC][9] = 76, + [0][1][0][0][RTW89_ACMA][9] = 48, + [0][1][0][0][RTW89_FCC][10] = 76, + [0][1][0][0][RTW89_ETSI][10] = 48, + [0][1][0][0][RTW89_MKK][10] = 60, + [0][1][0][0][RTW89_IC][10] = 76, + [0][1][0][0][RTW89_ACMA][10] = 48, + [0][1][0][0][RTW89_FCC][11] = 56, + [0][1][0][0][RTW89_ETSI][11] = 48, + [0][1][0][0][RTW89_MKK][11] = 60, + [0][1][0][0][RTW89_IC][11] = 56, + [0][1][0][0][RTW89_ACMA][11] = 48, + [0][1][0][0][RTW89_FCC][12] = 44, + [0][1][0][0][RTW89_ETSI][12] = 48, + [0][1][0][0][RTW89_MKK][12] = 60, + [0][1][0][0][RTW89_IC][12] = 44, + [0][1][0][0][RTW89_ACMA][12] = 48, + [0][1][0][0][RTW89_FCC][13] = 127, + [0][1][0][0][RTW89_ETSI][13] = 127, + [0][1][0][0][RTW89_MKK][13] = 62, + [0][1][0][0][RTW89_IC][13] = 127, + [0][1][0][0][RTW89_ACMA][13] = 127, + [1][0][0][0][RTW89_FCC][0] = 127, + [1][0][0][0][RTW89_ETSI][0] = 127, + [1][0][0][0][RTW89_MKK][0] = 127, + [1][0][0][0][RTW89_IC][0] = 127, + [1][0][0][0][RTW89_ACMA][0] = 127, + [1][0][0][0][RTW89_FCC][1] = 127, + [1][0][0][0][RTW89_ETSI][1] = 127, + [1][0][0][0][RTW89_MKK][1] = 127, + [1][0][0][0][RTW89_IC][1] = 127, + [1][0][0][0][RTW89_ACMA][1] = 127, + [1][0][0][0][RTW89_FCC][2] = 52, + [1][0][0][0][RTW89_ETSI][2] = 60, + [1][0][0][0][RTW89_MKK][2] = 72, + [1][0][0][0][RTW89_IC][2] = 52, + [1][0][0][0][RTW89_ACMA][2] = 60, + [1][0][0][0][RTW89_FCC][3] = 52, + [1][0][0][0][RTW89_ETSI][3] = 60, + [1][0][0][0][RTW89_MKK][3] = 72, + [1][0][0][0][RTW89_IC][3] = 52, + [1][0][0][0][RTW89_ACMA][3] = 60, + [1][0][0][0][RTW89_FCC][4] = 52, + [1][0][0][0][RTW89_ETSI][4] = 60, + [1][0][0][0][RTW89_MKK][4] = 72, + [1][0][0][0][RTW89_IC][4] = 52, + [1][0][0][0][RTW89_ACMA][4] = 60, + [1][0][0][0][RTW89_FCC][5] = 68, + [1][0][0][0][RTW89_ETSI][5] = 60, + [1][0][0][0][RTW89_MKK][5] = 72, + [1][0][0][0][RTW89_IC][5] = 68, + [1][0][0][0][RTW89_ACMA][5] = 60, + [1][0][0][0][RTW89_FCC][6] = 52, + [1][0][0][0][RTW89_ETSI][6] = 60, + [1][0][0][0][RTW89_MKK][6] = 72, + [1][0][0][0][RTW89_IC][6] = 52, + [1][0][0][0][RTW89_ACMA][6] = 60, + [1][0][0][0][RTW89_FCC][7] = 52, + [1][0][0][0][RTW89_ETSI][7] = 60, + [1][0][0][0][RTW89_MKK][7] = 72, + [1][0][0][0][RTW89_IC][7] = 52, + [1][0][0][0][RTW89_ACMA][7] = 60, + [1][0][0][0][RTW89_FCC][8] = 52, + [1][0][0][0][RTW89_ETSI][8] = 60, + [1][0][0][0][RTW89_MKK][8] = 72, + [1][0][0][0][RTW89_IC][8] = 52, + [1][0][0][0][RTW89_ACMA][8] = 60, + [1][0][0][0][RTW89_FCC][9] = 44, + [1][0][0][0][RTW89_ETSI][9] = 60, + [1][0][0][0][RTW89_MKK][9] = 72, + [1][0][0][0][RTW89_IC][9] = 44, + [1][0][0][0][RTW89_ACMA][9] = 60, + [1][0][0][0][RTW89_FCC][10] = 32, + [1][0][0][0][RTW89_ETSI][10] = 60, + [1][0][0][0][RTW89_MKK][10] = 70, + [1][0][0][0][RTW89_IC][10] = 32, + [1][0][0][0][RTW89_ACMA][10] = 60, + [1][0][0][0][RTW89_FCC][11] = 127, + [1][0][0][0][RTW89_ETSI][11] = 127, + [1][0][0][0][RTW89_MKK][11] = 127, + [1][0][0][0][RTW89_IC][11] = 127, + [1][0][0][0][RTW89_ACMA][11] = 127, + [1][0][0][0][RTW89_FCC][12] = 127, + [1][0][0][0][RTW89_ETSI][12] = 127, + [1][0][0][0][RTW89_MKK][12] = 127, + [1][0][0][0][RTW89_IC][12] = 127, + [1][0][0][0][RTW89_ACMA][12] = 127, + [1][0][0][0][RTW89_FCC][13] = 127, + [1][0][0][0][RTW89_ETSI][13] = 127, + [1][0][0][0][RTW89_MKK][13] = 127, + [1][0][0][0][RTW89_IC][13] = 127, + [1][0][0][0][RTW89_ACMA][13] = 127, + [1][1][0][0][RTW89_FCC][0] = 127, + [1][1][0][0][RTW89_ETSI][0] = 127, + [1][1][0][0][RTW89_MKK][0] = 127, + [1][1][0][0][RTW89_IC][0] = 127, + [1][1][0][0][RTW89_ACMA][0] = 127, + [1][1][0][0][RTW89_FCC][1] = 127, + [1][1][0][0][RTW89_ETSI][1] = 127, + [1][1][0][0][RTW89_MKK][1] = 127, + [1][1][0][0][RTW89_IC][1] = 127, + [1][1][0][0][RTW89_ACMA][1] = 127, + [1][1][0][0][RTW89_FCC][2] = 48, + [1][1][0][0][RTW89_ETSI][2] = 48, + [1][1][0][0][RTW89_MKK][2] = 60, + [1][1][0][0][RTW89_IC][2] = 48, + [1][1][0][0][RTW89_ACMA][2] = 48, + [1][1][0][0][RTW89_FCC][3] = 48, + [1][1][0][0][RTW89_ETSI][3] = 48, + [1][1][0][0][RTW89_MKK][3] = 60, + [1][1][0][0][RTW89_IC][3] = 48, + [1][1][0][0][RTW89_ACMA][3] = 48, + [1][1][0][0][RTW89_FCC][4] = 48, + [1][1][0][0][RTW89_ETSI][4] = 48, + [1][1][0][0][RTW89_MKK][4] = 60, + [1][1][0][0][RTW89_IC][4] = 48, + [1][1][0][0][RTW89_ACMA][4] = 48, + [1][1][0][0][RTW89_FCC][5] = 64, + [1][1][0][0][RTW89_ETSI][5] = 48, + [1][1][0][0][RTW89_MKK][5] = 60, + [1][1][0][0][RTW89_IC][5] = 64, + [1][1][0][0][RTW89_ACMA][5] = 48, + [1][1][0][0][RTW89_FCC][6] = 36, + [1][1][0][0][RTW89_ETSI][6] = 48, + [1][1][0][0][RTW89_MKK][6] = 60, + [1][1][0][0][RTW89_IC][6] = 36, + [1][1][0][0][RTW89_ACMA][6] = 48, + [1][1][0][0][RTW89_FCC][7] = 36, + [1][1][0][0][RTW89_ETSI][7] = 48, + [1][1][0][0][RTW89_MKK][7] = 60, + [1][1][0][0][RTW89_IC][7] = 36, + [1][1][0][0][RTW89_ACMA][7] = 48, + [1][1][0][0][RTW89_FCC][8] = 36, + [1][1][0][0][RTW89_ETSI][8] = 48, + [1][1][0][0][RTW89_MKK][8] = 60, + [1][1][0][0][RTW89_IC][8] = 36, + [1][1][0][0][RTW89_ACMA][8] = 48, + [1][1][0][0][RTW89_FCC][9] = 32, + [1][1][0][0][RTW89_ETSI][9] = 48, + [1][1][0][0][RTW89_MKK][9] = 60, + [1][1][0][0][RTW89_IC][9] = 32, + [1][1][0][0][RTW89_ACMA][9] = 48, + [1][1][0][0][RTW89_FCC][10] = 32, + [1][1][0][0][RTW89_ETSI][10] = 48, + [1][1][0][0][RTW89_MKK][10] = 58, + [1][1][0][0][RTW89_IC][10] = 32, + [1][1][0][0][RTW89_ACMA][10] = 48, + [1][1][0][0][RTW89_FCC][11] = 127, + [1][1][0][0][RTW89_ETSI][11] = 127, + [1][1][0][0][RTW89_MKK][11] = 127, + [1][1][0][0][RTW89_IC][11] = 127, + [1][1][0][0][RTW89_ACMA][11] = 127, + [1][1][0][0][RTW89_FCC][12] = 127, + [1][1][0][0][RTW89_ETSI][12] = 127, + [1][1][0][0][RTW89_MKK][12] = 127, + [1][1][0][0][RTW89_IC][12] = 127, + [1][1][0][0][RTW89_ACMA][12] = 127, + [1][1][0][0][RTW89_FCC][13] = 127, + [1][1][0][0][RTW89_ETSI][13] = 127, + [1][1][0][0][RTW89_MKK][13] = 127, + [1][1][0][0][RTW89_IC][13] = 127, + [1][1][0][0][RTW89_ACMA][13] = 127, + [0][0][1][0][RTW89_FCC][0] = 78, + [0][0][1][0][RTW89_ETSI][0] = 60, + [0][0][1][0][RTW89_MKK][0] = 76, + [0][0][1][0][RTW89_IC][0] = 78, + [0][0][1][0][RTW89_ACMA][0] = 60, + [0][0][1][0][RTW89_FCC][1] = 78, + [0][0][1][0][RTW89_ETSI][1] = 60, + [0][0][1][0][RTW89_MKK][1] = 76, + [0][0][1][0][RTW89_IC][1] = 78, + [0][0][1][0][RTW89_ACMA][1] = 60, + [0][0][1][0][RTW89_FCC][2] = 80, + [0][0][1][0][RTW89_ETSI][2] = 60, + [0][0][1][0][RTW89_MKK][2] = 76, + [0][0][1][0][RTW89_IC][2] = 80, + [0][0][1][0][RTW89_ACMA][2] = 60, + [0][0][1][0][RTW89_FCC][3] = 80, + [0][0][1][0][RTW89_ETSI][3] = 60, + [0][0][1][0][RTW89_MKK][3] = 76, + [0][0][1][0][RTW89_IC][3] = 80, + [0][0][1][0][RTW89_ACMA][3] = 60, + [0][0][1][0][RTW89_FCC][4] = 80, + [0][0][1][0][RTW89_ETSI][4] = 60, + [0][0][1][0][RTW89_MKK][4] = 76, + [0][0][1][0][RTW89_IC][4] = 80, + [0][0][1][0][RTW89_ACMA][4] = 60, + [0][0][1][0][RTW89_FCC][5] = 80, + [0][0][1][0][RTW89_ETSI][5] = 60, + [0][0][1][0][RTW89_MKK][5] = 76, + [0][0][1][0][RTW89_IC][5] = 80, + [0][0][1][0][RTW89_ACMA][5] = 60, + [0][0][1][0][RTW89_FCC][6] = 80, + [0][0][1][0][RTW89_ETSI][6] = 60, + [0][0][1][0][RTW89_MKK][6] = 76, + [0][0][1][0][RTW89_IC][6] = 80, + [0][0][1][0][RTW89_ACMA][6] = 60, + [0][0][1][0][RTW89_FCC][7] = 80, + [0][0][1][0][RTW89_ETSI][7] = 60, + [0][0][1][0][RTW89_MKK][7] = 76, + [0][0][1][0][RTW89_IC][7] = 80, + [0][0][1][0][RTW89_ACMA][7] = 60, + [0][0][1][0][RTW89_FCC][8] = 80, + [0][0][1][0][RTW89_ETSI][8] = 60, + [0][0][1][0][RTW89_MKK][8] = 76, + [0][0][1][0][RTW89_IC][8] = 80, + [0][0][1][0][RTW89_ACMA][8] = 60, + [0][0][1][0][RTW89_FCC][9] = 76, + [0][0][1][0][RTW89_ETSI][9] = 60, + [0][0][1][0][RTW89_MKK][9] = 76, + [0][0][1][0][RTW89_IC][9] = 76, + [0][0][1][0][RTW89_ACMA][9] = 60, + [0][0][1][0][RTW89_FCC][10] = 76, + [0][0][1][0][RTW89_ETSI][10] = 60, + [0][0][1][0][RTW89_MKK][10] = 76, + [0][0][1][0][RTW89_IC][10] = 76, + [0][0][1][0][RTW89_ACMA][10] = 60, + [0][0][1][0][RTW89_FCC][11] = 56, + [0][0][1][0][RTW89_ETSI][11] = 60, + [0][0][1][0][RTW89_MKK][11] = 76, + [0][0][1][0][RTW89_IC][11] = 56, + [0][0][1][0][RTW89_ACMA][11] = 60, + [0][0][1][0][RTW89_FCC][12] = 52, + [0][0][1][0][RTW89_ETSI][12] = 60, + [0][0][1][0][RTW89_MKK][12] = 76, + [0][0][1][0][RTW89_IC][12] = 52, + [0][0][1][0][RTW89_ACMA][12] = 60, + [0][0][1][0][RTW89_FCC][13] = 127, + [0][0][1][0][RTW89_ETSI][13] = 127, + [0][0][1][0][RTW89_MKK][13] = 127, + [0][0][1][0][RTW89_IC][13] = 127, + [0][0][1][0][RTW89_ACMA][13] = 127, + [0][1][1][0][RTW89_FCC][0] = 64, + [0][1][1][0][RTW89_ETSI][0] = 48, + [0][1][1][0][RTW89_MKK][0] = 68, + [0][1][1][0][RTW89_IC][0] = 64, + [0][1][1][0][RTW89_ACMA][0] = 48, + [0][1][1][0][RTW89_FCC][1] = 64, + [0][1][1][0][RTW89_ETSI][1] = 48, + [0][1][1][0][RTW89_MKK][1] = 68, + [0][1][1][0][RTW89_IC][1] = 64, + [0][1][1][0][RTW89_ACMA][1] = 48, + [0][1][1][0][RTW89_FCC][2] = 68, + [0][1][1][0][RTW89_ETSI][2] = 48, + [0][1][1][0][RTW89_MKK][2] = 68, + [0][1][1][0][RTW89_IC][2] = 68, + [0][1][1][0][RTW89_ACMA][2] = 48, + [0][1][1][0][RTW89_FCC][3] = 72, + [0][1][1][0][RTW89_ETSI][3] = 48, + [0][1][1][0][RTW89_MKK][3] = 68, + [0][1][1][0][RTW89_IC][3] = 72, + [0][1][1][0][RTW89_ACMA][3] = 48, + [0][1][1][0][RTW89_FCC][4] = 80, + [0][1][1][0][RTW89_ETSI][4] = 48, + [0][1][1][0][RTW89_MKK][4] = 68, + [0][1][1][0][RTW89_IC][4] = 80, + [0][1][1][0][RTW89_ACMA][4] = 48, + [0][1][1][0][RTW89_FCC][5] = 80, + [0][1][1][0][RTW89_ETSI][5] = 48, + [0][1][1][0][RTW89_MKK][5] = 68, + [0][1][1][0][RTW89_IC][5] = 80, + [0][1][1][0][RTW89_ACMA][5] = 48, + [0][1][1][0][RTW89_FCC][6] = 80, + [0][1][1][0][RTW89_ETSI][6] = 48, + [0][1][1][0][RTW89_MKK][6] = 68, + [0][1][1][0][RTW89_IC][6] = 80, + [0][1][1][0][RTW89_ACMA][6] = 48, + [0][1][1][0][RTW89_FCC][7] = 72, + [0][1][1][0][RTW89_ETSI][7] = 48, + [0][1][1][0][RTW89_MKK][7] = 68, + [0][1][1][0][RTW89_IC][7] = 72, + [0][1][1][0][RTW89_ACMA][7] = 48, + [0][1][1][0][RTW89_FCC][8] = 68, + [0][1][1][0][RTW89_ETSI][8] = 48, + [0][1][1][0][RTW89_MKK][8] = 68, + [0][1][1][0][RTW89_IC][8] = 68, + [0][1][1][0][RTW89_ACMA][8] = 48, + [0][1][1][0][RTW89_FCC][9] = 64, + [0][1][1][0][RTW89_ETSI][9] = 48, + [0][1][1][0][RTW89_MKK][9] = 68, + [0][1][1][0][RTW89_IC][9] = 64, + [0][1][1][0][RTW89_ACMA][9] = 48, + [0][1][1][0][RTW89_FCC][10] = 64, + [0][1][1][0][RTW89_ETSI][10] = 48, + [0][1][1][0][RTW89_MKK][10] = 68, + [0][1][1][0][RTW89_IC][10] = 64, + [0][1][1][0][RTW89_ACMA][10] = 48, + [0][1][1][0][RTW89_FCC][11] = 48, + [0][1][1][0][RTW89_ETSI][11] = 48, + [0][1][1][0][RTW89_MKK][11] = 68, + [0][1][1][0][RTW89_IC][11] = 48, + [0][1][1][0][RTW89_ACMA][11] = 48, + [0][1][1][0][RTW89_FCC][12] = 44, + [0][1][1][0][RTW89_ETSI][12] = 48, + [0][1][1][0][RTW89_MKK][12] = 68, + [0][1][1][0][RTW89_IC][12] = 44, + [0][1][1][0][RTW89_ACMA][12] = 48, + [0][1][1][0][RTW89_FCC][13] = 127, + [0][1][1][0][RTW89_ETSI][13] = 127, + [0][1][1][0][RTW89_MKK][13] = 127, + [0][1][1][0][RTW89_IC][13] = 127, + [0][1][1][0][RTW89_ACMA][13] = 127, + [0][0][2][0][RTW89_FCC][0] = 78, + [0][0][2][0][RTW89_ETSI][0] = 60, + [0][0][2][0][RTW89_MKK][0] = 76, + [0][0][2][0][RTW89_IC][0] = 78, + [0][0][2][0][RTW89_ACMA][0] = 60, + [0][0][2][0][RTW89_FCC][1] = 78, + [0][0][2][0][RTW89_ETSI][1] = 60, + [0][0][2][0][RTW89_MKK][1] = 76, + [0][0][2][0][RTW89_IC][1] = 78, + [0][0][2][0][RTW89_ACMA][1] = 60, + [0][0][2][0][RTW89_FCC][2] = 80, + [0][0][2][0][RTW89_ETSI][2] = 60, + [0][0][2][0][RTW89_MKK][2] = 76, + [0][0][2][0][RTW89_IC][2] = 80, + [0][0][2][0][RTW89_ACMA][2] = 60, + [0][0][2][0][RTW89_FCC][3] = 80, + [0][0][2][0][RTW89_ETSI][3] = 60, + [0][0][2][0][RTW89_MKK][3] = 76, + [0][0][2][0][RTW89_IC][3] = 80, + [0][0][2][0][RTW89_ACMA][3] = 60, + [0][0][2][0][RTW89_FCC][4] = 80, + [0][0][2][0][RTW89_ETSI][4] = 60, + [0][0][2][0][RTW89_MKK][4] = 76, + [0][0][2][0][RTW89_IC][4] = 80, + [0][0][2][0][RTW89_ACMA][4] = 60, + [0][0][2][0][RTW89_FCC][5] = 80, + [0][0][2][0][RTW89_ETSI][5] = 60, + [0][0][2][0][RTW89_MKK][5] = 76, + [0][0][2][0][RTW89_IC][5] = 80, + [0][0][2][0][RTW89_ACMA][5] = 60, + [0][0][2][0][RTW89_FCC][6] = 80, + [0][0][2][0][RTW89_ETSI][6] = 60, + [0][0][2][0][RTW89_MKK][6] = 76, + [0][0][2][0][RTW89_IC][6] = 80, + [0][0][2][0][RTW89_ACMA][6] = 60, + [0][0][2][0][RTW89_FCC][7] = 80, + [0][0][2][0][RTW89_ETSI][7] = 60, + [0][0][2][0][RTW89_MKK][7] = 76, + [0][0][2][0][RTW89_IC][7] = 80, + [0][0][2][0][RTW89_ACMA][7] = 60, + [0][0][2][0][RTW89_FCC][8] = 78, + [0][0][2][0][RTW89_ETSI][8] = 60, + [0][0][2][0][RTW89_MKK][8] = 76, + [0][0][2][0][RTW89_IC][8] = 78, + [0][0][2][0][RTW89_ACMA][8] = 60, + [0][0][2][0][RTW89_FCC][9] = 74, + [0][0][2][0][RTW89_ETSI][9] = 60, + [0][0][2][0][RTW89_MKK][9] = 76, + [0][0][2][0][RTW89_IC][9] = 74, + [0][0][2][0][RTW89_ACMA][9] = 60, + [0][0][2][0][RTW89_FCC][10] = 74, + [0][0][2][0][RTW89_ETSI][10] = 60, + [0][0][2][0][RTW89_MKK][10] = 76, + [0][0][2][0][RTW89_IC][10] = 74, + [0][0][2][0][RTW89_ACMA][10] = 60, + [0][0][2][0][RTW89_FCC][11] = 56, + [0][0][2][0][RTW89_ETSI][11] = 60, + [0][0][2][0][RTW89_MKK][11] = 76, + [0][0][2][0][RTW89_IC][11] = 56, + [0][0][2][0][RTW89_ACMA][11] = 60, + [0][0][2][0][RTW89_FCC][12] = 52, + [0][0][2][0][RTW89_ETSI][12] = 60, + [0][0][2][0][RTW89_MKK][12] = 76, + [0][0][2][0][RTW89_IC][12] = 52, + [0][0][2][0][RTW89_ACMA][12] = 60, + [0][0][2][0][RTW89_FCC][13] = 127, + [0][0][2][0][RTW89_ETSI][13] = 127, + [0][0][2][0][RTW89_MKK][13] = 127, + [0][0][2][0][RTW89_IC][13] = 127, + [0][0][2][0][RTW89_ACMA][13] = 127, + [0][1][2][0][RTW89_FCC][0] = 60, + [0][1][2][0][RTW89_ETSI][0] = 48, + [0][1][2][0][RTW89_MKK][0] = 70, + [0][1][2][0][RTW89_IC][0] = 60, + [0][1][2][0][RTW89_ACMA][0] = 48, + [0][1][2][0][RTW89_FCC][1] = 60, + [0][1][2][0][RTW89_ETSI][1] = 48, + [0][1][2][0][RTW89_MKK][1] = 70, + [0][1][2][0][RTW89_IC][1] = 60, + [0][1][2][0][RTW89_ACMA][1] = 48, + [0][1][2][0][RTW89_FCC][2] = 64, + [0][1][2][0][RTW89_ETSI][2] = 48, + [0][1][2][0][RTW89_MKK][2] = 70, + [0][1][2][0][RTW89_IC][2] = 64, + [0][1][2][0][RTW89_ACMA][2] = 48, + [0][1][2][0][RTW89_FCC][3] = 68, + [0][1][2][0][RTW89_ETSI][3] = 48, + [0][1][2][0][RTW89_MKK][3] = 70, + [0][1][2][0][RTW89_IC][3] = 68, + [0][1][2][0][RTW89_ACMA][3] = 48, + [0][1][2][0][RTW89_FCC][4] = 74, + [0][1][2][0][RTW89_ETSI][4] = 48, + [0][1][2][0][RTW89_MKK][4] = 70, + [0][1][2][0][RTW89_IC][4] = 74, + [0][1][2][0][RTW89_ACMA][4] = 48, + [0][1][2][0][RTW89_FCC][5] = 80, + [0][1][2][0][RTW89_ETSI][5] = 48, + [0][1][2][0][RTW89_MKK][5] = 70, + [0][1][2][0][RTW89_IC][5] = 80, + [0][1][2][0][RTW89_ACMA][5] = 48, + [0][1][2][0][RTW89_FCC][6] = 76, + [0][1][2][0][RTW89_ETSI][6] = 48, + [0][1][2][0][RTW89_MKK][6] = 70, + [0][1][2][0][RTW89_IC][6] = 76, + [0][1][2][0][RTW89_ACMA][6] = 48, + [0][1][2][0][RTW89_FCC][7] = 68, + [0][1][2][0][RTW89_ETSI][7] = 48, + [0][1][2][0][RTW89_MKK][7] = 70, + [0][1][2][0][RTW89_IC][7] = 68, + [0][1][2][0][RTW89_ACMA][7] = 48, + [0][1][2][0][RTW89_FCC][8] = 64, + [0][1][2][0][RTW89_ETSI][8] = 48, + [0][1][2][0][RTW89_MKK][8] = 70, + [0][1][2][0][RTW89_IC][8] = 64, + [0][1][2][0][RTW89_ACMA][8] = 48, + [0][1][2][0][RTW89_FCC][9] = 60, + [0][1][2][0][RTW89_ETSI][9] = 48, + [0][1][2][0][RTW89_MKK][9] = 70, + [0][1][2][0][RTW89_IC][9] = 60, + [0][1][2][0][RTW89_ACMA][9] = 48, + [0][1][2][0][RTW89_FCC][10] = 60, + [0][1][2][0][RTW89_ETSI][10] = 48, + [0][1][2][0][RTW89_MKK][10] = 70, + [0][1][2][0][RTW89_IC][10] = 60, + [0][1][2][0][RTW89_ACMA][10] = 48, + [0][1][2][0][RTW89_FCC][11] = 48, + [0][1][2][0][RTW89_ETSI][11] = 48, + [0][1][2][0][RTW89_MKK][11] = 70, + [0][1][2][0][RTW89_IC][11] = 48, + [0][1][2][0][RTW89_ACMA][11] = 48, + [0][1][2][0][RTW89_FCC][12] = 44, + [0][1][2][0][RTW89_ETSI][12] = 48, + [0][1][2][0][RTW89_MKK][12] = 70, + [0][1][2][0][RTW89_IC][12] = 44, + [0][1][2][0][RTW89_ACMA][12] = 48, + [0][1][2][0][RTW89_FCC][13] = 127, + [0][1][2][0][RTW89_ETSI][13] = 127, + [0][1][2][0][RTW89_MKK][13] = 127, + [0][1][2][0][RTW89_IC][13] = 127, + [0][1][2][0][RTW89_ACMA][13] = 127, + [0][1][2][1][RTW89_FCC][0] = 60, + [0][1][2][1][RTW89_ETSI][0] = 38, + [0][1][2][1][RTW89_MKK][0] = 58, + [0][1][2][1][RTW89_IC][0] = 60, + [0][1][2][1][RTW89_ACMA][0] = 36, + [0][1][2][1][RTW89_FCC][1] = 60, + [0][1][2][1][RTW89_ETSI][1] = 38, + [0][1][2][1][RTW89_MKK][1] = 58, + [0][1][2][1][RTW89_IC][1] = 60, + [0][1][2][1][RTW89_ACMA][1] = 36, + [0][1][2][1][RTW89_FCC][2] = 64, + [0][1][2][1][RTW89_ETSI][2] = 38, + [0][1][2][1][RTW89_MKK][2] = 58, + [0][1][2][1][RTW89_IC][2] = 64, + [0][1][2][1][RTW89_ACMA][2] = 36, + [0][1][2][1][RTW89_FCC][3] = 68, + [0][1][2][1][RTW89_ETSI][3] = 38, + [0][1][2][1][RTW89_MKK][3] = 58, + [0][1][2][1][RTW89_IC][3] = 68, + [0][1][2][1][RTW89_ACMA][3] = 36, + [0][1][2][1][RTW89_FCC][4] = 74, + [0][1][2][1][RTW89_ETSI][4] = 38, + [0][1][2][1][RTW89_MKK][4] = 58, + [0][1][2][1][RTW89_IC][4] = 74, + [0][1][2][1][RTW89_ACMA][4] = 36, + [0][1][2][1][RTW89_FCC][5] = 80, + [0][1][2][1][RTW89_ETSI][5] = 38, + [0][1][2][1][RTW89_MKK][5] = 58, + [0][1][2][1][RTW89_IC][5] = 80, + [0][1][2][1][RTW89_ACMA][5] = 36, + [0][1][2][1][RTW89_FCC][6] = 76, + [0][1][2][1][RTW89_ETSI][6] = 38, + [0][1][2][1][RTW89_MKK][6] = 58, + [0][1][2][1][RTW89_IC][6] = 76, + [0][1][2][1][RTW89_ACMA][6] = 36, + [0][1][2][1][RTW89_FCC][7] = 68, + [0][1][2][1][RTW89_ETSI][7] = 38, + [0][1][2][1][RTW89_MKK][7] = 58, + [0][1][2][1][RTW89_IC][7] = 68, + [0][1][2][1][RTW89_ACMA][7] = 36, + [0][1][2][1][RTW89_FCC][8] = 64, + [0][1][2][1][RTW89_ETSI][8] = 38, + [0][1][2][1][RTW89_MKK][8] = 58, + [0][1][2][1][RTW89_IC][8] = 64, + [0][1][2][1][RTW89_ACMA][8] = 36, + [0][1][2][1][RTW89_FCC][9] = 60, + [0][1][2][1][RTW89_ETSI][9] = 38, + [0][1][2][1][RTW89_MKK][9] = 58, + [0][1][2][1][RTW89_IC][9] = 60, + [0][1][2][1][RTW89_ACMA][9] = 36, + [0][1][2][1][RTW89_FCC][10] = 60, + [0][1][2][1][RTW89_ETSI][10] = 38, + [0][1][2][1][RTW89_MKK][10] = 58, + [0][1][2][1][RTW89_IC][10] = 60, + [0][1][2][1][RTW89_ACMA][10] = 36, + [0][1][2][1][RTW89_FCC][11] = 48, + [0][1][2][1][RTW89_ETSI][11] = 38, + [0][1][2][1][RTW89_MKK][11] = 58, + [0][1][2][1][RTW89_IC][11] = 48, + [0][1][2][1][RTW89_ACMA][11] = 36, + [0][1][2][1][RTW89_FCC][12] = 44, + [0][1][2][1][RTW89_ETSI][12] = 38, + [0][1][2][1][RTW89_MKK][12] = 58, + [0][1][2][1][RTW89_IC][12] = 44, + [0][1][2][1][RTW89_ACMA][12] = 36, + [0][1][2][1][RTW89_FCC][13] = 127, + [0][1][2][1][RTW89_ETSI][13] = 127, + [0][1][2][1][RTW89_MKK][13] = 127, + [0][1][2][1][RTW89_IC][13] = 127, + [0][1][2][1][RTW89_ACMA][13] = 127, + [1][0][2][0][RTW89_FCC][0] = 127, + [1][0][2][0][RTW89_ETSI][0] = 127, + [1][0][2][0][RTW89_MKK][0] = 127, + [1][0][2][0][RTW89_IC][0] = 127, + [1][0][2][0][RTW89_ACMA][0] = 127, + [1][0][2][0][RTW89_FCC][1] = 127, + [1][0][2][0][RTW89_ETSI][1] = 127, + [1][0][2][0][RTW89_MKK][1] = 127, + [1][0][2][0][RTW89_IC][1] = 127, + [1][0][2][0][RTW89_ACMA][1] = 127, + [1][0][2][0][RTW89_FCC][2] = 72, + [1][0][2][0][RTW89_ETSI][2] = 60, + [1][0][2][0][RTW89_MKK][2] = 72, + [1][0][2][0][RTW89_IC][2] = 72, + [1][0][2][0][RTW89_ACMA][2] = 60, + [1][0][2][0][RTW89_FCC][3] = 72, + [1][0][2][0][RTW89_ETSI][3] = 60, + [1][0][2][0][RTW89_MKK][3] = 72, + [1][0][2][0][RTW89_IC][3] = 72, + [1][0][2][0][RTW89_ACMA][3] = 60, + [1][0][2][0][RTW89_FCC][4] = 74, + [1][0][2][0][RTW89_ETSI][4] = 60, + [1][0][2][0][RTW89_MKK][4] = 72, + [1][0][2][0][RTW89_IC][4] = 74, + [1][0][2][0][RTW89_ACMA][4] = 60, + [1][0][2][0][RTW89_FCC][5] = 74, + [1][0][2][0][RTW89_ETSI][5] = 60, + [1][0][2][0][RTW89_MKK][5] = 72, + [1][0][2][0][RTW89_IC][5] = 74, + [1][0][2][0][RTW89_ACMA][5] = 60, + [1][0][2][0][RTW89_FCC][6] = 74, + [1][0][2][0][RTW89_ETSI][6] = 60, + [1][0][2][0][RTW89_MKK][6] = 72, + [1][0][2][0][RTW89_IC][6] = 74, + [1][0][2][0][RTW89_ACMA][6] = 60, + [1][0][2][0][RTW89_FCC][7] = 70, + [1][0][2][0][RTW89_ETSI][7] = 60, + [1][0][2][0][RTW89_MKK][7] = 72, + [1][0][2][0][RTW89_IC][7] = 70, + [1][0][2][0][RTW89_ACMA][7] = 60, + [1][0][2][0][RTW89_FCC][8] = 70, + [1][0][2][0][RTW89_ETSI][8] = 60, + [1][0][2][0][RTW89_MKK][8] = 72, + [1][0][2][0][RTW89_IC][8] = 70, + [1][0][2][0][RTW89_ACMA][8] = 60, + [1][0][2][0][RTW89_FCC][9] = 70, + [1][0][2][0][RTW89_ETSI][9] = 60, + [1][0][2][0][RTW89_MKK][9] = 72, + [1][0][2][0][RTW89_IC][9] = 70, + [1][0][2][0][RTW89_ACMA][9] = 60, + [1][0][2][0][RTW89_FCC][10] = 68, + [1][0][2][0][RTW89_ETSI][10] = 60, + [1][0][2][0][RTW89_MKK][10] = 72, + [1][0][2][0][RTW89_IC][10] = 68, + [1][0][2][0][RTW89_ACMA][10] = 60, + [1][0][2][0][RTW89_FCC][11] = 127, + [1][0][2][0][RTW89_ETSI][11] = 127, + [1][0][2][0][RTW89_MKK][11] = 127, + [1][0][2][0][RTW89_IC][11] = 127, + [1][0][2][0][RTW89_ACMA][11] = 127, + [1][0][2][0][RTW89_FCC][12] = 127, + [1][0][2][0][RTW89_ETSI][12] = 127, + [1][0][2][0][RTW89_MKK][12] = 127, + [1][0][2][0][RTW89_IC][12] = 127, + [1][0][2][0][RTW89_ACMA][12] = 127, + [1][0][2][0][RTW89_FCC][13] = 127, + [1][0][2][0][RTW89_ETSI][13] = 127, + [1][0][2][0][RTW89_MKK][13] = 127, + [1][0][2][0][RTW89_IC][13] = 127, + [1][0][2][0][RTW89_ACMA][13] = 127, + [1][1][2][0][RTW89_FCC][0] = 127, + [1][1][2][0][RTW89_ETSI][0] = 127, + [1][1][2][0][RTW89_MKK][0] = 127, + [1][1][2][0][RTW89_IC][0] = 127, + [1][1][2][0][RTW89_ACMA][0] = 127, + [1][1][2][0][RTW89_FCC][1] = 127, + [1][1][2][0][RTW89_ETSI][1] = 127, + [1][1][2][0][RTW89_MKK][1] = 127, + [1][1][2][0][RTW89_IC][1] = 127, + [1][1][2][0][RTW89_ACMA][1] = 127, + [1][1][2][0][RTW89_FCC][2] = 56, + [1][1][2][0][RTW89_ETSI][2] = 48, + [1][1][2][0][RTW89_MKK][2] = 70, + [1][1][2][0][RTW89_IC][2] = 56, + [1][1][2][0][RTW89_ACMA][2] = 48, + [1][1][2][0][RTW89_FCC][3] = 56, + [1][1][2][0][RTW89_ETSI][3] = 48, + [1][1][2][0][RTW89_MKK][3] = 70, + [1][1][2][0][RTW89_IC][3] = 56, + [1][1][2][0][RTW89_ACMA][3] = 48, + [1][1][2][0][RTW89_FCC][4] = 60, + [1][1][2][0][RTW89_ETSI][4] = 48, + [1][1][2][0][RTW89_MKK][4] = 70, + [1][1][2][0][RTW89_IC][4] = 60, + [1][1][2][0][RTW89_ACMA][4] = 48, + [1][1][2][0][RTW89_FCC][5] = 68, + [1][1][2][0][RTW89_ETSI][5] = 48, + [1][1][2][0][RTW89_MKK][5] = 70, + [1][1][2][0][RTW89_IC][5] = 68, + [1][1][2][0][RTW89_ACMA][5] = 48, + [1][1][2][0][RTW89_FCC][6] = 60, + [1][1][2][0][RTW89_ETSI][6] = 48, + [1][1][2][0][RTW89_MKK][6] = 70, + [1][1][2][0][RTW89_IC][6] = 60, + [1][1][2][0][RTW89_ACMA][6] = 48, + [1][1][2][0][RTW89_FCC][7] = 56, + [1][1][2][0][RTW89_ETSI][7] = 48, + [1][1][2][0][RTW89_MKK][7] = 70, + [1][1][2][0][RTW89_IC][7] = 56, + [1][1][2][0][RTW89_ACMA][7] = 48, + [1][1][2][0][RTW89_FCC][8] = 56, + [1][1][2][0][RTW89_ETSI][8] = 48, + [1][1][2][0][RTW89_MKK][8] = 70, + [1][1][2][0][RTW89_IC][8] = 56, + [1][1][2][0][RTW89_ACMA][8] = 48, + [1][1][2][0][RTW89_FCC][9] = 44, + [1][1][2][0][RTW89_ETSI][9] = 48, + [1][1][2][0][RTW89_MKK][9] = 70, + [1][1][2][0][RTW89_IC][9] = 44, + [1][1][2][0][RTW89_ACMA][9] = 48, + [1][1][2][0][RTW89_FCC][10] = 40, + [1][1][2][0][RTW89_ETSI][10] = 48, + [1][1][2][0][RTW89_MKK][10] = 70, + [1][1][2][0][RTW89_IC][10] = 40, + [1][1][2][0][RTW89_ACMA][10] = 48, + [1][1][2][0][RTW89_FCC][11] = 127, + [1][1][2][0][RTW89_ETSI][11] = 127, + [1][1][2][0][RTW89_MKK][11] = 127, + [1][1][2][0][RTW89_IC][11] = 127, + [1][1][2][0][RTW89_ACMA][11] = 127, + [1][1][2][0][RTW89_FCC][12] = 127, + [1][1][2][0][RTW89_ETSI][12] = 127, + [1][1][2][0][RTW89_MKK][12] = 127, + [1][1][2][0][RTW89_IC][12] = 127, + [1][1][2][0][RTW89_ACMA][12] = 127, + [1][1][2][0][RTW89_FCC][13] = 127, + [1][1][2][0][RTW89_ETSI][13] = 127, + [1][1][2][0][RTW89_MKK][13] = 127, + [1][1][2][0][RTW89_IC][13] = 127, + [1][1][2][0][RTW89_ACMA][13] = 127, + [1][1][2][1][RTW89_FCC][0] = 127, + [1][1][2][1][RTW89_ETSI][0] = 127, + [1][1][2][1][RTW89_MKK][0] = 127, + [1][1][2][1][RTW89_IC][0] = 127, + [1][1][2][1][RTW89_ACMA][0] = 127, + [1][1][2][1][RTW89_FCC][1] = 127, + [1][1][2][1][RTW89_ETSI][1] = 127, + [1][1][2][1][RTW89_MKK][1] = 127, + [1][1][2][1][RTW89_IC][1] = 127, + [1][1][2][1][RTW89_ACMA][1] = 127, + [1][1][2][1][RTW89_FCC][2] = 56, + [1][1][2][1][RTW89_ETSI][2] = 38, + [1][1][2][1][RTW89_MKK][2] = 58, + [1][1][2][1][RTW89_IC][2] = 56, + [1][1][2][1][RTW89_ACMA][2] = 36, + [1][1][2][1][RTW89_FCC][3] = 56, + [1][1][2][1][RTW89_ETSI][3] = 38, + [1][1][2][1][RTW89_MKK][3] = 58, + [1][1][2][1][RTW89_IC][3] = 56, + [1][1][2][1][RTW89_ACMA][3] = 36, + [1][1][2][1][RTW89_FCC][4] = 60, + [1][1][2][1][RTW89_ETSI][4] = 38, + [1][1][2][1][RTW89_MKK][4] = 58, + [1][1][2][1][RTW89_IC][4] = 60, + [1][1][2][1][RTW89_ACMA][4] = 36, + [1][1][2][1][RTW89_FCC][5] = 68, + [1][1][2][1][RTW89_ETSI][5] = 38, + [1][1][2][1][RTW89_MKK][5] = 58, + [1][1][2][1][RTW89_IC][5] = 68, + [1][1][2][1][RTW89_ACMA][5] = 36, + [1][1][2][1][RTW89_FCC][6] = 60, + [1][1][2][1][RTW89_ETSI][6] = 38, + [1][1][2][1][RTW89_MKK][6] = 58, + [1][1][2][1][RTW89_IC][6] = 60, + [1][1][2][1][RTW89_ACMA][6] = 36, + [1][1][2][1][RTW89_FCC][7] = 56, + [1][1][2][1][RTW89_ETSI][7] = 38, + [1][1][2][1][RTW89_MKK][7] = 58, + [1][1][2][1][RTW89_IC][7] = 56, + [1][1][2][1][RTW89_ACMA][7] = 36, + [1][1][2][1][RTW89_FCC][8] = 56, + [1][1][2][1][RTW89_ETSI][8] = 38, + [1][1][2][1][RTW89_MKK][8] = 58, + [1][1][2][1][RTW89_IC][8] = 56, + [1][1][2][1][RTW89_ACMA][8] = 36, + [1][1][2][1][RTW89_FCC][9] = 44, + [1][1][2][1][RTW89_ETSI][9] = 38, + [1][1][2][1][RTW89_MKK][9] = 58, + [1][1][2][1][RTW89_IC][9] = 44, + [1][1][2][1][RTW89_ACMA][9] = 36, + [1][1][2][1][RTW89_FCC][10] = 40, + [1][1][2][1][RTW89_ETSI][10] = 38, + [1][1][2][1][RTW89_MKK][10] = 58, + [1][1][2][1][RTW89_IC][10] = 40, + [1][1][2][1][RTW89_ACMA][10] = 36, + [1][1][2][1][RTW89_FCC][11] = 127, + [1][1][2][1][RTW89_ETSI][11] = 127, + [1][1][2][1][RTW89_MKK][11] = 127, + [1][1][2][1][RTW89_IC][11] = 127, + [1][1][2][1][RTW89_ACMA][11] = 127, + [1][1][2][1][RTW89_FCC][12] = 127, + [1][1][2][1][RTW89_ETSI][12] = 127, + [1][1][2][1][RTW89_MKK][12] = 127, + [1][1][2][1][RTW89_IC][12] = 127, + [1][1][2][1][RTW89_ACMA][12] = 127, + [1][1][2][1][RTW89_FCC][13] = 127, + [1][1][2][1][RTW89_ETSI][13] = 127, + [1][1][2][1][RTW89_MKK][13] = 127, + [1][1][2][1][RTW89_IC][13] = 127, + [1][1][2][1][RTW89_ACMA][13] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { + [0][0][1][0][RTW89_WW][0] = 60, + [0][0][1][0][RTW89_WW][2] = 60, + [0][0][1][0][RTW89_WW][4] = 60, + [0][0][1][0][RTW89_WW][6] = 60, + [0][0][1][0][RTW89_WW][8] = 60, + [0][0][1][0][RTW89_WW][10] = 60, + [0][0][1][0][RTW89_WW][12] = 60, + [0][0][1][0][RTW89_WW][14] = 60, + [0][0][1][0][RTW89_WW][15] = 60, + [0][0][1][0][RTW89_WW][17] = 60, + [0][0][1][0][RTW89_WW][19] = 60, + [0][0][1][0][RTW89_WW][21] = 60, + [0][0][1][0][RTW89_WW][23] = 60, + [0][0][1][0][RTW89_WW][25] = 60, + [0][0][1][0][RTW89_WW][27] = 60, + [0][0][1][0][RTW89_WW][29] = 60, + [0][0][1][0][RTW89_WW][31] = 60, + [0][0][1][0][RTW89_WW][33] = 60, + [0][0][1][0][RTW89_WW][35] = 60, + [0][0][1][0][RTW89_WW][37] = 78, + [0][0][1][0][RTW89_WW][38] = 30, + [0][0][1][0][RTW89_WW][40] = 30, + [0][0][1][0][RTW89_WW][42] = 30, + [0][0][1][0][RTW89_WW][44] = 30, + [0][0][1][0][RTW89_WW][46] = 30, + [0][0][1][0][RTW89_WW][48] = 80, + [0][0][1][0][RTW89_WW][50] = 80, + [0][0][1][0][RTW89_WW][52] = 80, + [0][1][1][0][RTW89_WW][0] = 42, + [0][1][1][0][RTW89_WW][2] = 42, + [0][1][1][0][RTW89_WW][4] = 42, + [0][1][1][0][RTW89_WW][6] = 42, + [0][1][1][0][RTW89_WW][8] = 48, + [0][1][1][0][RTW89_WW][10] = 48, + [0][1][1][0][RTW89_WW][12] = 48, + [0][1][1][0][RTW89_WW][14] = 48, + [0][1][1][0][RTW89_WW][15] = 48, + [0][1][1][0][RTW89_WW][17] = 48, + [0][1][1][0][RTW89_WW][19] = 48, + [0][1][1][0][RTW89_WW][21] = 48, + [0][1][1][0][RTW89_WW][23] = 48, + [0][1][1][0][RTW89_WW][25] = 48, + [0][1][1][0][RTW89_WW][27] = 48, + [0][1][1][0][RTW89_WW][29] = 48, + [0][1][1][0][RTW89_WW][31] = 48, + [0][1][1][0][RTW89_WW][33] = 48, + [0][1][1][0][RTW89_WW][35] = 48, + [0][1][1][0][RTW89_WW][37] = 70, + [0][1][1][0][RTW89_WW][38] = 18, + [0][1][1][0][RTW89_WW][40] = 16, + [0][1][1][0][RTW89_WW][42] = 18, + [0][1][1][0][RTW89_WW][44] = 16, + [0][1][1][0][RTW89_WW][46] = 18, + [0][1][1][0][RTW89_WW][48] = 58, + [0][1][1][0][RTW89_WW][50] = 58, + [0][1][1][0][RTW89_WW][52] = 58, + [0][0][2][0][RTW89_WW][0] = 62, + [0][0][2][0][RTW89_WW][2] = 62, + [0][0][2][0][RTW89_WW][4] = 62, + [0][0][2][0][RTW89_WW][6] = 62, + [0][0][2][0][RTW89_WW][8] = 62, + [0][0][2][0][RTW89_WW][10] = 62, + [0][0][2][0][RTW89_WW][12] = 62, + [0][0][2][0][RTW89_WW][14] = 62, + [0][0][2][0][RTW89_WW][15] = 62, + [0][0][2][0][RTW89_WW][17] = 62, + [0][0][2][0][RTW89_WW][19] = 62, + [0][0][2][0][RTW89_WW][21] = 62, + [0][0][2][0][RTW89_WW][23] = 62, + [0][0][2][0][RTW89_WW][25] = 62, + [0][0][2][0][RTW89_WW][27] = 62, + [0][0][2][0][RTW89_WW][29] = 62, + [0][0][2][0][RTW89_WW][31] = 62, + [0][0][2][0][RTW89_WW][33] = 62, + [0][0][2][0][RTW89_WW][35] = 62, + [0][0][2][0][RTW89_WW][37] = 78, + [0][0][2][0][RTW89_WW][38] = 30, + [0][0][2][0][RTW89_WW][40] = 30, + [0][0][2][0][RTW89_WW][42] = 30, + [0][0][2][0][RTW89_WW][44] = 30, + [0][0][2][0][RTW89_WW][46] = 30, + [0][0][2][0][RTW89_WW][48] = 80, + [0][0][2][0][RTW89_WW][50] = 80, + [0][0][2][0][RTW89_WW][52] = 80, + [0][1][2][0][RTW89_WW][0] = 44, + [0][1][2][0][RTW89_WW][2] = 44, + [0][1][2][0][RTW89_WW][4] = 44, + [0][1][2][0][RTW89_WW][6] = 44, + [0][1][2][0][RTW89_WW][8] = 50, + [0][1][2][0][RTW89_WW][10] = 50, + [0][1][2][0][RTW89_WW][12] = 50, + [0][1][2][0][RTW89_WW][14] = 50, + [0][1][2][0][RTW89_WW][15] = 50, + [0][1][2][0][RTW89_WW][17] = 50, + [0][1][2][0][RTW89_WW][19] = 50, + [0][1][2][0][RTW89_WW][21] = 50, + [0][1][2][0][RTW89_WW][23] = 50, + [0][1][2][0][RTW89_WW][25] = 50, + [0][1][2][0][RTW89_WW][27] = 50, + [0][1][2][0][RTW89_WW][29] = 50, + [0][1][2][0][RTW89_WW][31] = 50, + [0][1][2][0][RTW89_WW][33] = 50, + [0][1][2][0][RTW89_WW][35] = 50, + [0][1][2][0][RTW89_WW][37] = 72, + [0][1][2][0][RTW89_WW][38] = 18, + [0][1][2][0][RTW89_WW][40] = 18, + [0][1][2][0][RTW89_WW][42] = 18, + [0][1][2][0][RTW89_WW][44] = 18, + [0][1][2][0][RTW89_WW][46] = 18, + [0][1][2][0][RTW89_WW][48] = 60, + [0][1][2][0][RTW89_WW][50] = 60, + [0][1][2][0][RTW89_WW][52] = 60, + [0][1][2][1][RTW89_WW][0] = 38, + [0][1][2][1][RTW89_WW][2] = 38, + [0][1][2][1][RTW89_WW][4] = 38, + [0][1][2][1][RTW89_WW][6] = 38, + [0][1][2][1][RTW89_WW][8] = 38, + [0][1][2][1][RTW89_WW][10] = 38, + [0][1][2][1][RTW89_WW][12] = 38, + [0][1][2][1][RTW89_WW][14] = 38, + [0][1][2][1][RTW89_WW][15] = 38, + [0][1][2][1][RTW89_WW][17] = 38, + [0][1][2][1][RTW89_WW][19] = 38, + [0][1][2][1][RTW89_WW][21] = 38, + [0][1][2][1][RTW89_WW][23] = 38, + [0][1][2][1][RTW89_WW][25] = 42, + [0][1][2][1][RTW89_WW][27] = 42, + [0][1][2][1][RTW89_WW][29] = 42, + [0][1][2][1][RTW89_WW][31] = 38, + [0][1][2][1][RTW89_WW][33] = 38, + [0][1][2][1][RTW89_WW][35] = 38, + [0][1][2][1][RTW89_WW][37] = 70, + [0][1][2][1][RTW89_WW][38] = 8, + [0][1][2][1][RTW89_WW][40] = 8, + [0][1][2][1][RTW89_WW][42] = 8, + [0][1][2][1][RTW89_WW][44] = 8, + [0][1][2][1][RTW89_WW][46] = 8, + [0][1][2][1][RTW89_WW][48] = 60, + [0][1][2][1][RTW89_WW][50] = 60, + [0][1][2][1][RTW89_WW][52] = 60, + [1][0][2][0][RTW89_WW][1] = 66, + [1][0][2][0][RTW89_WW][5] = 66, + [1][0][2][0][RTW89_WW][9] = 66, + [1][0][2][0][RTW89_WW][13] = 66, + [1][0][2][0][RTW89_WW][16] = 66, + [1][0][2][0][RTW89_WW][20] = 66, + [1][0][2][0][RTW89_WW][24] = 66, + [1][0][2][0][RTW89_WW][28] = 66, + [1][0][2][0][RTW89_WW][32] = 66, + [1][0][2][0][RTW89_WW][36] = 76, + [1][0][2][0][RTW89_WW][39] = 30, + [1][0][2][0][RTW89_WW][43] = 30, + [1][0][2][0][RTW89_WW][47] = 80, + [1][0][2][0][RTW89_WW][51] = 72, + [1][1][2][0][RTW89_WW][1] = 54, + [1][1][2][0][RTW89_WW][5] = 54, + [1][1][2][0][RTW89_WW][9] = 54, + [1][1][2][0][RTW89_WW][13] = 54, + [1][1][2][0][RTW89_WW][16] = 54, + [1][1][2][0][RTW89_WW][20] = 54, + [1][1][2][0][RTW89_WW][24] = 54, + [1][1][2][0][RTW89_WW][28] = 54, + [1][1][2][0][RTW89_WW][32] = 54, + [1][1][2][0][RTW89_WW][36] = 72, + [1][1][2][0][RTW89_WW][39] = 18, + [1][1][2][0][RTW89_WW][43] = 18, + [1][1][2][0][RTW89_WW][47] = 70, + [1][1][2][0][RTW89_WW][51] = 68, + [1][1][2][1][RTW89_WW][1] = 42, + [1][1][2][1][RTW89_WW][5] = 42, + [1][1][2][1][RTW89_WW][9] = 42, + [1][1][2][1][RTW89_WW][13] = 42, + [1][1][2][1][RTW89_WW][16] = 42, + [1][1][2][1][RTW89_WW][20] = 42, + [1][1][2][1][RTW89_WW][24] = 42, + [1][1][2][1][RTW89_WW][28] = 42, + [1][1][2][1][RTW89_WW][32] = 42, + [1][1][2][1][RTW89_WW][36] = 70, + [1][1][2][1][RTW89_WW][39] = 8, + [1][1][2][1][RTW89_WW][43] = 8, + [1][1][2][1][RTW89_WW][47] = 70, + [1][1][2][1][RTW89_WW][51] = 68, + [2][0][2][0][RTW89_WW][3] = 64, + [2][0][2][0][RTW89_WW][11] = 66, + [2][0][2][0][RTW89_WW][18] = 64, + [2][0][2][0][RTW89_WW][26] = 66, + [2][0][2][0][RTW89_WW][34] = 72, + [2][0][2][0][RTW89_WW][41] = 30, + [2][0][2][0][RTW89_WW][49] = 66, + [2][1][2][0][RTW89_WW][3] = 54, + [2][1][2][0][RTW89_WW][11] = 54, + [2][1][2][0][RTW89_WW][18] = 54, + [2][1][2][0][RTW89_WW][26] = 54, + [2][1][2][0][RTW89_WW][34] = 72, + [2][1][2][0][RTW89_WW][41] = 18, + [2][1][2][0][RTW89_WW][49] = 60, + [2][1][2][1][RTW89_WW][3] = 42, + [2][1][2][1][RTW89_WW][11] = 42, + [2][1][2][1][RTW89_WW][18] = 42, + [2][1][2][1][RTW89_WW][26] = 44, + [2][1][2][1][RTW89_WW][34] = 70, + [2][1][2][1][RTW89_WW][41] = 8, + [2][1][2][1][RTW89_WW][49] = 60, + [3][0][2][0][RTW89_WW][7] = 56, + [3][0][2][0][RTW89_WW][22] = 56, + [3][0][2][0][RTW89_WW][45] = 56, + [3][1][2][0][RTW89_WW][7] = 44, + [3][1][2][0][RTW89_WW][22] = 44, + [3][1][2][0][RTW89_WW][45] = 44, + [3][1][2][1][RTW89_WW][7] = 32, + [3][1][2][1][RTW89_WW][22] = 32, + [3][1][2][1][RTW89_WW][45] = 32, + [0][0][1][0][RTW89_FCC][0] = 80, + [0][0][1][0][RTW89_ETSI][0] = 60, + [0][0][1][0][RTW89_MKK][0] = 62, + [0][0][1][0][RTW89_IC][0] = 62, + [0][0][1][0][RTW89_ACMA][0] = 60, + [0][0][1][0][RTW89_FCC][2] = 80, + [0][0][1][0][RTW89_ETSI][2] = 60, + [0][0][1][0][RTW89_MKK][2] = 62, + [0][0][1][0][RTW89_IC][2] = 62, + [0][0][1][0][RTW89_ACMA][2] = 60, + [0][0][1][0][RTW89_FCC][4] = 80, + [0][0][1][0][RTW89_ETSI][4] = 60, + [0][0][1][0][RTW89_MKK][4] = 62, + [0][0][1][0][RTW89_IC][4] = 62, + [0][0][1][0][RTW89_ACMA][4] = 60, + [0][0][1][0][RTW89_FCC][6] = 80, + [0][0][1][0][RTW89_ETSI][6] = 60, + [0][0][1][0][RTW89_MKK][6] = 62, + [0][0][1][0][RTW89_IC][6] = 62, + [0][0][1][0][RTW89_ACMA][6] = 60, + [0][0][1][0][RTW89_FCC][8] = 80, + [0][0][1][0][RTW89_ETSI][8] = 60, + [0][0][1][0][RTW89_MKK][8] = 64, + [0][0][1][0][RTW89_IC][8] = 66, + [0][0][1][0][RTW89_ACMA][8] = 60, + [0][0][1][0][RTW89_FCC][10] = 80, + [0][0][1][0][RTW89_ETSI][10] = 60, + [0][0][1][0][RTW89_MKK][10] = 64, + [0][0][1][0][RTW89_IC][10] = 66, + [0][0][1][0][RTW89_ACMA][10] = 60, + [0][0][1][0][RTW89_FCC][12] = 80, + [0][0][1][0][RTW89_ETSI][12] = 60, + [0][0][1][0][RTW89_MKK][12] = 64, + [0][0][1][0][RTW89_IC][12] = 66, + [0][0][1][0][RTW89_ACMA][12] = 60, + [0][0][1][0][RTW89_FCC][14] = 80, + [0][0][1][0][RTW89_ETSI][14] = 60, + [0][0][1][0][RTW89_MKK][14] = 62, + [0][0][1][0][RTW89_IC][14] = 66, + [0][0][1][0][RTW89_ACMA][14] = 60, + [0][0][1][0][RTW89_FCC][15] = 78, + [0][0][1][0][RTW89_ETSI][15] = 60, + [0][0][1][0][RTW89_MKK][15] = 78, + [0][0][1][0][RTW89_IC][15] = 78, + [0][0][1][0][RTW89_ACMA][15] = 60, + [0][0][1][0][RTW89_FCC][17] = 80, + [0][0][1][0][RTW89_ETSI][17] = 60, + [0][0][1][0][RTW89_MKK][17] = 78, + [0][0][1][0][RTW89_IC][17] = 80, + [0][0][1][0][RTW89_ACMA][17] = 60, + [0][0][1][0][RTW89_FCC][19] = 80, + [0][0][1][0][RTW89_ETSI][19] = 60, + [0][0][1][0][RTW89_MKK][19] = 78, + [0][0][1][0][RTW89_IC][19] = 80, + [0][0][1][0][RTW89_ACMA][19] = 60, + [0][0][1][0][RTW89_FCC][21] = 80, + [0][0][1][0][RTW89_ETSI][21] = 60, + [0][0][1][0][RTW89_MKK][21] = 78, + [0][0][1][0][RTW89_IC][21] = 80, + [0][0][1][0][RTW89_ACMA][21] = 60, + [0][0][1][0][RTW89_FCC][23] = 80, + [0][0][1][0][RTW89_ETSI][23] = 60, + [0][0][1][0][RTW89_MKK][23] = 78, + [0][0][1][0][RTW89_IC][23] = 80, + [0][0][1][0][RTW89_ACMA][23] = 60, + [0][0][1][0][RTW89_FCC][25] = 80, + [0][0][1][0][RTW89_ETSI][25] = 60, + [0][0][1][0][RTW89_MKK][25] = 78, + [0][0][1][0][RTW89_IC][25] = 127, + [0][0][1][0][RTW89_ACMA][25] = 127, + [0][0][1][0][RTW89_FCC][27] = 80, + [0][0][1][0][RTW89_ETSI][27] = 60, + [0][0][1][0][RTW89_MKK][27] = 78, + [0][0][1][0][RTW89_IC][27] = 127, + [0][0][1][0][RTW89_ACMA][27] = 127, + [0][0][1][0][RTW89_FCC][29] = 80, + [0][0][1][0][RTW89_ETSI][29] = 60, + [0][0][1][0][RTW89_MKK][29] = 78, + [0][0][1][0][RTW89_IC][29] = 127, + [0][0][1][0][RTW89_ACMA][29] = 127, + [0][0][1][0][RTW89_FCC][31] = 80, + [0][0][1][0][RTW89_ETSI][31] = 60, + [0][0][1][0][RTW89_MKK][31] = 78, + [0][0][1][0][RTW89_IC][31] = 80, + [0][0][1][0][RTW89_ACMA][31] = 60, + [0][0][1][0][RTW89_FCC][33] = 80, + [0][0][1][0][RTW89_ETSI][33] = 60, + [0][0][1][0][RTW89_MKK][33] = 78, + [0][0][1][0][RTW89_IC][33] = 80, + [0][0][1][0][RTW89_ACMA][33] = 60, + [0][0][1][0][RTW89_FCC][35] = 72, + [0][0][1][0][RTW89_ETSI][35] = 60, + [0][0][1][0][RTW89_MKK][35] = 78, + [0][0][1][0][RTW89_IC][35] = 72, + [0][0][1][0][RTW89_ACMA][35] = 60, + [0][0][1][0][RTW89_FCC][37] = 80, + [0][0][1][0][RTW89_ETSI][37] = 127, + [0][0][1][0][RTW89_MKK][37] = 78, + [0][0][1][0][RTW89_IC][37] = 80, + [0][0][1][0][RTW89_ACMA][37] = 78, + [0][0][1][0][RTW89_FCC][38] = 80, + [0][0][1][0][RTW89_ETSI][38] = 30, + [0][0][1][0][RTW89_MKK][38] = 127, + [0][0][1][0][RTW89_IC][38] = 80, + [0][0][1][0][RTW89_ACMA][38] = 78, + [0][0][1][0][RTW89_FCC][40] = 80, + [0][0][1][0][RTW89_ETSI][40] = 30, + [0][0][1][0][RTW89_MKK][40] = 127, + [0][0][1][0][RTW89_IC][40] = 80, + [0][0][1][0][RTW89_ACMA][40] = 78, + [0][0][1][0][RTW89_FCC][42] = 80, + [0][0][1][0][RTW89_ETSI][42] = 30, + [0][0][1][0][RTW89_MKK][42] = 127, + [0][0][1][0][RTW89_IC][42] = 80, + [0][0][1][0][RTW89_ACMA][42] = 78, + [0][0][1][0][RTW89_FCC][44] = 80, + [0][0][1][0][RTW89_ETSI][44] = 30, + [0][0][1][0][RTW89_MKK][44] = 127, + [0][0][1][0][RTW89_IC][44] = 80, + [0][0][1][0][RTW89_ACMA][44] = 78, + [0][0][1][0][RTW89_FCC][46] = 80, + [0][0][1][0][RTW89_ETSI][46] = 30, + [0][0][1][0][RTW89_MKK][46] = 127, + [0][0][1][0][RTW89_IC][46] = 80, + [0][0][1][0][RTW89_ACMA][46] = 78, + [0][0][1][0][RTW89_FCC][48] = 80, + [0][0][1][0][RTW89_ETSI][48] = 127, + [0][0][1][0][RTW89_MKK][48] = 127, + [0][0][1][0][RTW89_IC][48] = 127, + [0][0][1][0][RTW89_ACMA][48] = 127, + [0][0][1][0][RTW89_FCC][50] = 80, + [0][0][1][0][RTW89_ETSI][50] = 127, + [0][0][1][0][RTW89_MKK][50] = 127, + [0][0][1][0][RTW89_IC][50] = 127, + [0][0][1][0][RTW89_ACMA][50] = 127, + [0][0][1][0][RTW89_FCC][52] = 80, + [0][0][1][0][RTW89_ETSI][52] = 127, + [0][0][1][0][RTW89_MKK][52] = 127, + [0][0][1][0][RTW89_IC][52] = 127, + [0][0][1][0][RTW89_ACMA][52] = 127, + [0][1][1][0][RTW89_FCC][0] = 70, + [0][1][1][0][RTW89_ETSI][0] = 48, + [0][1][1][0][RTW89_MKK][0] = 50, + [0][1][1][0][RTW89_IC][0] = 42, + [0][1][1][0][RTW89_ACMA][0] = 48, + [0][1][1][0][RTW89_FCC][2] = 70, + [0][1][1][0][RTW89_ETSI][2] = 48, + [0][1][1][0][RTW89_MKK][2] = 50, + [0][1][1][0][RTW89_IC][2] = 42, + [0][1][1][0][RTW89_ACMA][2] = 48, + [0][1][1][0][RTW89_FCC][4] = 70, + [0][1][1][0][RTW89_ETSI][4] = 48, + [0][1][1][0][RTW89_MKK][4] = 50, + [0][1][1][0][RTW89_IC][4] = 42, + [0][1][1][0][RTW89_ACMA][4] = 48, + [0][1][1][0][RTW89_FCC][6] = 70, + [0][1][1][0][RTW89_ETSI][6] = 48, + [0][1][1][0][RTW89_MKK][6] = 50, + [0][1][1][0][RTW89_IC][6] = 42, + [0][1][1][0][RTW89_ACMA][6] = 48, + [0][1][1][0][RTW89_FCC][8] = 70, + [0][1][1][0][RTW89_ETSI][8] = 48, + [0][1][1][0][RTW89_MKK][8] = 50, + [0][1][1][0][RTW89_IC][8] = 54, + [0][1][1][0][RTW89_ACMA][8] = 48, + [0][1][1][0][RTW89_FCC][10] = 70, + [0][1][1][0][RTW89_ETSI][10] = 48, + [0][1][1][0][RTW89_MKK][10] = 50, + [0][1][1][0][RTW89_IC][10] = 54, + [0][1][1][0][RTW89_ACMA][10] = 48, + [0][1][1][0][RTW89_FCC][12] = 70, + [0][1][1][0][RTW89_ETSI][12] = 48, + [0][1][1][0][RTW89_MKK][12] = 50, + [0][1][1][0][RTW89_IC][12] = 54, + [0][1][1][0][RTW89_ACMA][12] = 48, + [0][1][1][0][RTW89_FCC][14] = 70, + [0][1][1][0][RTW89_ETSI][14] = 48, + [0][1][1][0][RTW89_MKK][14] = 50, + [0][1][1][0][RTW89_IC][14] = 54, + [0][1][1][0][RTW89_ACMA][14] = 48, + [0][1][1][0][RTW89_FCC][15] = 68, + [0][1][1][0][RTW89_ETSI][15] = 48, + [0][1][1][0][RTW89_MKK][15] = 70, + [0][1][1][0][RTW89_IC][15] = 68, + [0][1][1][0][RTW89_ACMA][15] = 48, + [0][1][1][0][RTW89_FCC][17] = 70, + [0][1][1][0][RTW89_ETSI][17] = 48, + [0][1][1][0][RTW89_MKK][17] = 72, + [0][1][1][0][RTW89_IC][17] = 70, + [0][1][1][0][RTW89_ACMA][17] = 48, + [0][1][1][0][RTW89_FCC][19] = 70, + [0][1][1][0][RTW89_ETSI][19] = 48, + [0][1][1][0][RTW89_MKK][19] = 72, + [0][1][1][0][RTW89_IC][19] = 70, + [0][1][1][0][RTW89_ACMA][19] = 48, + [0][1][1][0][RTW89_FCC][21] = 70, + [0][1][1][0][RTW89_ETSI][21] = 48, + [0][1][1][0][RTW89_MKK][21] = 72, + [0][1][1][0][RTW89_IC][21] = 70, + [0][1][1][0][RTW89_ACMA][21] = 48, + [0][1][1][0][RTW89_FCC][23] = 70, + [0][1][1][0][RTW89_ETSI][23] = 48, + [0][1][1][0][RTW89_MKK][23] = 72, + [0][1][1][0][RTW89_IC][23] = 70, + [0][1][1][0][RTW89_ACMA][23] = 48, + [0][1][1][0][RTW89_FCC][25] = 70, + [0][1][1][0][RTW89_ETSI][25] = 48, + [0][1][1][0][RTW89_MKK][25] = 70, + [0][1][1][0][RTW89_IC][25] = 127, + [0][1][1][0][RTW89_ACMA][25] = 127, + [0][1][1][0][RTW89_FCC][27] = 70, + [0][1][1][0][RTW89_ETSI][27] = 48, + [0][1][1][0][RTW89_MKK][27] = 72, + [0][1][1][0][RTW89_IC][27] = 127, + [0][1][1][0][RTW89_ACMA][27] = 127, + [0][1][1][0][RTW89_FCC][29] = 70, + [0][1][1][0][RTW89_ETSI][29] = 48, + [0][1][1][0][RTW89_MKK][29] = 72, + [0][1][1][0][RTW89_IC][29] = 127, + [0][1][1][0][RTW89_ACMA][29] = 127, + [0][1][1][0][RTW89_FCC][31] = 70, + [0][1][1][0][RTW89_ETSI][31] = 48, + [0][1][1][0][RTW89_MKK][31] = 72, + [0][1][1][0][RTW89_IC][31] = 70, + [0][1][1][0][RTW89_ACMA][31] = 48, + [0][1][1][0][RTW89_FCC][33] = 70, + [0][1][1][0][RTW89_ETSI][33] = 48, + [0][1][1][0][RTW89_MKK][33] = 72, + [0][1][1][0][RTW89_IC][33] = 70, + [0][1][1][0][RTW89_ACMA][33] = 48, + [0][1][1][0][RTW89_FCC][35] = 68, + [0][1][1][0][RTW89_ETSI][35] = 48, + [0][1][1][0][RTW89_MKK][35] = 72, + [0][1][1][0][RTW89_IC][35] = 68, + [0][1][1][0][RTW89_ACMA][35] = 48, + [0][1][1][0][RTW89_FCC][37] = 70, + [0][1][1][0][RTW89_ETSI][37] = 127, + [0][1][1][0][RTW89_MKK][37] = 72, + [0][1][1][0][RTW89_IC][37] = 70, + [0][1][1][0][RTW89_ACMA][37] = 72, + [0][1][1][0][RTW89_FCC][38] = 80, + [0][1][1][0][RTW89_ETSI][38] = 18, + [0][1][1][0][RTW89_MKK][38] = 127, + [0][1][1][0][RTW89_IC][38] = 80, + [0][1][1][0][RTW89_ACMA][38] = 74, + [0][1][1][0][RTW89_FCC][40] = 80, + [0][1][1][0][RTW89_ETSI][40] = 18, + [0][1][1][0][RTW89_MKK][40] = 127, + [0][1][1][0][RTW89_IC][40] = 80, + [0][1][1][0][RTW89_ACMA][40] = 16, + [0][1][1][0][RTW89_FCC][42] = 80, + [0][1][1][0][RTW89_ETSI][42] = 18, + [0][1][1][0][RTW89_MKK][42] = 127, + [0][1][1][0][RTW89_IC][42] = 80, + [0][1][1][0][RTW89_ACMA][42] = 78, + [0][1][1][0][RTW89_FCC][44] = 80, + [0][1][1][0][RTW89_ETSI][44] = 18, + [0][1][1][0][RTW89_MKK][44] = 127, + [0][1][1][0][RTW89_IC][44] = 80, + [0][1][1][0][RTW89_ACMA][44] = 16, + [0][1][1][0][RTW89_FCC][46] = 80, + [0][1][1][0][RTW89_ETSI][46] = 18, + [0][1][1][0][RTW89_MKK][46] = 127, + [0][1][1][0][RTW89_IC][46] = 80, + [0][1][1][0][RTW89_ACMA][46] = 78, + [0][1][1][0][RTW89_FCC][48] = 58, + [0][1][1][0][RTW89_ETSI][48] = 127, + [0][1][1][0][RTW89_MKK][48] = 127, + [0][1][1][0][RTW89_IC][48] = 127, + [0][1][1][0][RTW89_ACMA][48] = 127, + [0][1][1][0][RTW89_FCC][50] = 58, + [0][1][1][0][RTW89_ETSI][50] = 127, + [0][1][1][0][RTW89_MKK][50] = 127, + [0][1][1][0][RTW89_IC][50] = 127, + [0][1][1][0][RTW89_ACMA][50] = 127, + [0][1][1][0][RTW89_FCC][52] = 58, + [0][1][1][0][RTW89_ETSI][52] = 127, + [0][1][1][0][RTW89_MKK][52] = 127, + [0][1][1][0][RTW89_IC][52] = 127, + [0][1][1][0][RTW89_ACMA][52] = 127, + [0][0][2][0][RTW89_FCC][0] = 80, + [0][0][2][0][RTW89_ETSI][0] = 62, + [0][0][2][0][RTW89_MKK][0] = 64, + [0][0][2][0][RTW89_IC][0] = 66, + [0][0][2][0][RTW89_ACMA][0] = 62, + [0][0][2][0][RTW89_FCC][2] = 80, + [0][0][2][0][RTW89_ETSI][2] = 62, + [0][0][2][0][RTW89_MKK][2] = 64, + [0][0][2][0][RTW89_IC][2] = 66, + [0][0][2][0][RTW89_ACMA][2] = 62, + [0][0][2][0][RTW89_FCC][4] = 80, + [0][0][2][0][RTW89_ETSI][4] = 62, + [0][0][2][0][RTW89_MKK][4] = 64, + [0][0][2][0][RTW89_IC][4] = 66, + [0][0][2][0][RTW89_ACMA][4] = 62, + [0][0][2][0][RTW89_FCC][6] = 80, + [0][0][2][0][RTW89_ETSI][6] = 62, + [0][0][2][0][RTW89_MKK][6] = 64, + [0][0][2][0][RTW89_IC][6] = 66, + [0][0][2][0][RTW89_ACMA][6] = 62, + [0][0][2][0][RTW89_FCC][8] = 80, + [0][0][2][0][RTW89_ETSI][8] = 62, + [0][0][2][0][RTW89_MKK][8] = 64, + [0][0][2][0][RTW89_IC][8] = 66, + [0][0][2][0][RTW89_ACMA][8] = 62, + [0][0][2][0][RTW89_FCC][10] = 80, + [0][0][2][0][RTW89_ETSI][10] = 62, + [0][0][2][0][RTW89_MKK][10] = 64, + [0][0][2][0][RTW89_IC][10] = 66, + [0][0][2][0][RTW89_ACMA][10] = 62, + [0][0][2][0][RTW89_FCC][12] = 80, + [0][0][2][0][RTW89_ETSI][12] = 62, + [0][0][2][0][RTW89_MKK][12] = 64, + [0][0][2][0][RTW89_IC][12] = 66, + [0][0][2][0][RTW89_ACMA][12] = 62, + [0][0][2][0][RTW89_FCC][14] = 80, + [0][0][2][0][RTW89_ETSI][14] = 62, + [0][0][2][0][RTW89_MKK][14] = 64, + [0][0][2][0][RTW89_IC][14] = 66, + [0][0][2][0][RTW89_ACMA][14] = 62, + [0][0][2][0][RTW89_FCC][15] = 76, + [0][0][2][0][RTW89_ETSI][15] = 62, + [0][0][2][0][RTW89_MKK][15] = 78, + [0][0][2][0][RTW89_IC][15] = 76, + [0][0][2][0][RTW89_ACMA][15] = 62, + [0][0][2][0][RTW89_FCC][17] = 80, + [0][0][2][0][RTW89_ETSI][17] = 62, + [0][0][2][0][RTW89_MKK][17] = 78, + [0][0][2][0][RTW89_IC][17] = 80, + [0][0][2][0][RTW89_ACMA][17] = 62, + [0][0][2][0][RTW89_FCC][19] = 80, + [0][0][2][0][RTW89_ETSI][19] = 62, + [0][0][2][0][RTW89_MKK][19] = 78, + [0][0][2][0][RTW89_IC][19] = 80, + [0][0][2][0][RTW89_ACMA][19] = 62, + [0][0][2][0][RTW89_FCC][21] = 80, + [0][0][2][0][RTW89_ETSI][21] = 62, + [0][0][2][0][RTW89_MKK][21] = 78, + [0][0][2][0][RTW89_IC][21] = 80, + [0][0][2][0][RTW89_ACMA][21] = 62, + [0][0][2][0][RTW89_FCC][23] = 80, + [0][0][2][0][RTW89_ETSI][23] = 62, + [0][0][2][0][RTW89_MKK][23] = 78, + [0][0][2][0][RTW89_IC][23] = 80, + [0][0][2][0][RTW89_ACMA][23] = 62, + [0][0][2][0][RTW89_FCC][25] = 80, + [0][0][2][0][RTW89_ETSI][25] = 62, + [0][0][2][0][RTW89_MKK][25] = 78, + [0][0][2][0][RTW89_IC][25] = 127, + [0][0][2][0][RTW89_ACMA][25] = 127, + [0][0][2][0][RTW89_FCC][27] = 80, + [0][0][2][0][RTW89_ETSI][27] = 62, + [0][0][2][0][RTW89_MKK][27] = 78, + [0][0][2][0][RTW89_IC][27] = 127, + [0][0][2][0][RTW89_ACMA][27] = 127, + [0][0][2][0][RTW89_FCC][29] = 80, + [0][0][2][0][RTW89_ETSI][29] = 62, + [0][0][2][0][RTW89_MKK][29] = 78, + [0][0][2][0][RTW89_IC][29] = 127, + [0][0][2][0][RTW89_ACMA][29] = 127, + [0][0][2][0][RTW89_FCC][31] = 80, + [0][0][2][0][RTW89_ETSI][31] = 62, + [0][0][2][0][RTW89_MKK][31] = 78, + [0][0][2][0][RTW89_IC][31] = 80, + [0][0][2][0][RTW89_ACMA][31] = 62, + [0][0][2][0][RTW89_FCC][33] = 80, + [0][0][2][0][RTW89_ETSI][33] = 62, + [0][0][2][0][RTW89_MKK][33] = 78, + [0][0][2][0][RTW89_IC][33] = 80, + [0][0][2][0][RTW89_ACMA][33] = 62, + [0][0][2][0][RTW89_FCC][35] = 72, + [0][0][2][0][RTW89_ETSI][35] = 62, + [0][0][2][0][RTW89_MKK][35] = 78, + [0][0][2][0][RTW89_IC][35] = 72, + [0][0][2][0][RTW89_ACMA][35] = 62, + [0][0][2][0][RTW89_FCC][37] = 80, + [0][0][2][0][RTW89_ETSI][37] = 127, + [0][0][2][0][RTW89_MKK][37] = 78, + [0][0][2][0][RTW89_IC][37] = 80, + [0][0][2][0][RTW89_ACMA][37] = 78, + [0][0][2][0][RTW89_FCC][38] = 80, + [0][0][2][0][RTW89_ETSI][38] = 30, + [0][0][2][0][RTW89_MKK][38] = 127, + [0][0][2][0][RTW89_IC][38] = 80, + [0][0][2][0][RTW89_ACMA][38] = 78, + [0][0][2][0][RTW89_FCC][40] = 80, + [0][0][2][0][RTW89_ETSI][40] = 30, + [0][0][2][0][RTW89_MKK][40] = 127, + [0][0][2][0][RTW89_IC][40] = 80, + [0][0][2][0][RTW89_ACMA][40] = 78, + [0][0][2][0][RTW89_FCC][42] = 80, + [0][0][2][0][RTW89_ETSI][42] = 30, + [0][0][2][0][RTW89_MKK][42] = 127, + [0][0][2][0][RTW89_IC][42] = 80, + [0][0][2][0][RTW89_ACMA][42] = 78, + [0][0][2][0][RTW89_FCC][44] = 80, + [0][0][2][0][RTW89_ETSI][44] = 30, + [0][0][2][0][RTW89_MKK][44] = 127, + [0][0][2][0][RTW89_IC][44] = 80, + [0][0][2][0][RTW89_ACMA][44] = 78, + [0][0][2][0][RTW89_FCC][46] = 80, + [0][0][2][0][RTW89_ETSI][46] = 30, + [0][0][2][0][RTW89_MKK][46] = 127, + [0][0][2][0][RTW89_IC][46] = 80, + [0][0][2][0][RTW89_ACMA][46] = 78, + [0][0][2][0][RTW89_FCC][48] = 80, + [0][0][2][0][RTW89_ETSI][48] = 127, + [0][0][2][0][RTW89_MKK][48] = 127, + [0][0][2][0][RTW89_IC][48] = 127, + [0][0][2][0][RTW89_ACMA][48] = 127, + [0][0][2][0][RTW89_FCC][50] = 80, + [0][0][2][0][RTW89_ETSI][50] = 127, + [0][0][2][0][RTW89_MKK][50] = 127, + [0][0][2][0][RTW89_IC][50] = 127, + [0][0][2][0][RTW89_ACMA][50] = 127, + [0][0][2][0][RTW89_FCC][52] = 80, + [0][0][2][0][RTW89_ETSI][52] = 127, + [0][0][2][0][RTW89_MKK][52] = 127, + [0][0][2][0][RTW89_IC][52] = 127, + [0][0][2][0][RTW89_ACMA][52] = 127, + [0][1][2][0][RTW89_FCC][0] = 72, + [0][1][2][0][RTW89_ETSI][0] = 50, + [0][1][2][0][RTW89_MKK][0] = 52, + [0][1][2][0][RTW89_IC][0] = 44, + [0][1][2][0][RTW89_ACMA][0] = 50, + [0][1][2][0][RTW89_FCC][2] = 72, + [0][1][2][0][RTW89_ETSI][2] = 50, + [0][1][2][0][RTW89_MKK][2] = 52, + [0][1][2][0][RTW89_IC][2] = 44, + [0][1][2][0][RTW89_ACMA][2] = 50, + [0][1][2][0][RTW89_FCC][4] = 72, + [0][1][2][0][RTW89_ETSI][4] = 50, + [0][1][2][0][RTW89_MKK][4] = 52, + [0][1][2][0][RTW89_IC][4] = 44, + [0][1][2][0][RTW89_ACMA][4] = 50, + [0][1][2][0][RTW89_FCC][6] = 72, + [0][1][2][0][RTW89_ETSI][6] = 50, + [0][1][2][0][RTW89_MKK][6] = 52, + [0][1][2][0][RTW89_IC][6] = 44, + [0][1][2][0][RTW89_ACMA][6] = 50, + [0][1][2][0][RTW89_FCC][8] = 72, + [0][1][2][0][RTW89_ETSI][8] = 50, + [0][1][2][0][RTW89_MKK][8] = 52, + [0][1][2][0][RTW89_IC][8] = 54, + [0][1][2][0][RTW89_ACMA][8] = 50, + [0][1][2][0][RTW89_FCC][10] = 72, + [0][1][2][0][RTW89_ETSI][10] = 50, + [0][1][2][0][RTW89_MKK][10] = 52, + [0][1][2][0][RTW89_IC][10] = 54, + [0][1][2][0][RTW89_ACMA][10] = 50, + [0][1][2][0][RTW89_FCC][12] = 72, + [0][1][2][0][RTW89_ETSI][12] = 50, + [0][1][2][0][RTW89_MKK][12] = 52, + [0][1][2][0][RTW89_IC][12] = 54, + [0][1][2][0][RTW89_ACMA][12] = 50, + [0][1][2][0][RTW89_FCC][14] = 72, + [0][1][2][0][RTW89_ETSI][14] = 50, + [0][1][2][0][RTW89_MKK][14] = 52, + [0][1][2][0][RTW89_IC][14] = 54, + [0][1][2][0][RTW89_ACMA][14] = 50, + [0][1][2][0][RTW89_FCC][15] = 70, + [0][1][2][0][RTW89_ETSI][15] = 50, + [0][1][2][0][RTW89_MKK][15] = 72, + [0][1][2][0][RTW89_IC][15] = 70, + [0][1][2][0][RTW89_ACMA][15] = 50, + [0][1][2][0][RTW89_FCC][17] = 72, + [0][1][2][0][RTW89_ETSI][17] = 50, + [0][1][2][0][RTW89_MKK][17] = 72, + [0][1][2][0][RTW89_IC][17] = 72, + [0][1][2][0][RTW89_ACMA][17] = 50, + [0][1][2][0][RTW89_FCC][19] = 72, + [0][1][2][0][RTW89_ETSI][19] = 50, + [0][1][2][0][RTW89_MKK][19] = 72, + [0][1][2][0][RTW89_IC][19] = 72, + [0][1][2][0][RTW89_ACMA][19] = 50, + [0][1][2][0][RTW89_FCC][21] = 72, + [0][1][2][0][RTW89_ETSI][21] = 50, + [0][1][2][0][RTW89_MKK][21] = 72, + [0][1][2][0][RTW89_IC][21] = 72, + [0][1][2][0][RTW89_ACMA][21] = 50, + [0][1][2][0][RTW89_FCC][23] = 72, + [0][1][2][0][RTW89_ETSI][23] = 50, + [0][1][2][0][RTW89_MKK][23] = 72, + [0][1][2][0][RTW89_IC][23] = 72, + [0][1][2][0][RTW89_ACMA][23] = 50, + [0][1][2][0][RTW89_FCC][25] = 72, + [0][1][2][0][RTW89_ETSI][25] = 50, + [0][1][2][0][RTW89_MKK][25] = 72, + [0][1][2][0][RTW89_IC][25] = 127, + [0][1][2][0][RTW89_ACMA][25] = 127, + [0][1][2][0][RTW89_FCC][27] = 72, + [0][1][2][0][RTW89_ETSI][27] = 50, + [0][1][2][0][RTW89_MKK][27] = 72, + [0][1][2][0][RTW89_IC][27] = 127, + [0][1][2][0][RTW89_ACMA][27] = 127, + [0][1][2][0][RTW89_FCC][29] = 72, + [0][1][2][0][RTW89_ETSI][29] = 50, + [0][1][2][0][RTW89_MKK][29] = 72, + [0][1][2][0][RTW89_IC][29] = 127, + [0][1][2][0][RTW89_ACMA][29] = 127, + [0][1][2][0][RTW89_FCC][31] = 72, + [0][1][2][0][RTW89_ETSI][31] = 50, + [0][1][2][0][RTW89_MKK][31] = 72, + [0][1][2][0][RTW89_IC][31] = 72, + [0][1][2][0][RTW89_ACMA][31] = 50, + [0][1][2][0][RTW89_FCC][33] = 72, + [0][1][2][0][RTW89_ETSI][33] = 50, + [0][1][2][0][RTW89_MKK][33] = 72, + [0][1][2][0][RTW89_IC][33] = 72, + [0][1][2][0][RTW89_ACMA][33] = 50, + [0][1][2][0][RTW89_FCC][35] = 68, + [0][1][2][0][RTW89_ETSI][35] = 50, + [0][1][2][0][RTW89_MKK][35] = 72, + [0][1][2][0][RTW89_IC][35] = 68, + [0][1][2][0][RTW89_ACMA][35] = 50, + [0][1][2][0][RTW89_FCC][37] = 72, + [0][1][2][0][RTW89_ETSI][37] = 127, + [0][1][2][0][RTW89_MKK][37] = 72, + [0][1][2][0][RTW89_IC][37] = 72, + [0][1][2][0][RTW89_ACMA][37] = 72, + [0][1][2][0][RTW89_FCC][38] = 80, + [0][1][2][0][RTW89_ETSI][38] = 18, + [0][1][2][0][RTW89_MKK][38] = 127, + [0][1][2][0][RTW89_IC][38] = 80, + [0][1][2][0][RTW89_ACMA][38] = 76, + [0][1][2][0][RTW89_FCC][40] = 80, + [0][1][2][0][RTW89_ETSI][40] = 18, + [0][1][2][0][RTW89_MKK][40] = 127, + [0][1][2][0][RTW89_IC][40] = 80, + [0][1][2][0][RTW89_ACMA][40] = 76, + [0][1][2][0][RTW89_FCC][42] = 80, + [0][1][2][0][RTW89_ETSI][42] = 18, + [0][1][2][0][RTW89_MKK][42] = 127, + [0][1][2][0][RTW89_IC][42] = 80, + [0][1][2][0][RTW89_ACMA][42] = 78, + [0][1][2][0][RTW89_FCC][44] = 80, + [0][1][2][0][RTW89_ETSI][44] = 18, + [0][1][2][0][RTW89_MKK][44] = 127, + [0][1][2][0][RTW89_IC][44] = 80, + [0][1][2][0][RTW89_ACMA][44] = 78, + [0][1][2][0][RTW89_FCC][46] = 80, + [0][1][2][0][RTW89_ETSI][46] = 18, + [0][1][2][0][RTW89_MKK][46] = 127, + [0][1][2][0][RTW89_IC][46] = 80, + [0][1][2][0][RTW89_ACMA][46] = 78, + [0][1][2][0][RTW89_FCC][48] = 60, + [0][1][2][0][RTW89_ETSI][48] = 127, + [0][1][2][0][RTW89_MKK][48] = 127, + [0][1][2][0][RTW89_IC][48] = 127, + [0][1][2][0][RTW89_ACMA][48] = 127, + [0][1][2][0][RTW89_FCC][50] = 60, + [0][1][2][0][RTW89_ETSI][50] = 127, + [0][1][2][0][RTW89_MKK][50] = 127, + [0][1][2][0][RTW89_IC][50] = 127, + [0][1][2][0][RTW89_ACMA][50] = 127, + [0][1][2][0][RTW89_FCC][52] = 60, + [0][1][2][0][RTW89_ETSI][52] = 127, + [0][1][2][0][RTW89_MKK][52] = 127, + [0][1][2][0][RTW89_IC][52] = 127, + [0][1][2][0][RTW89_ACMA][52] = 127, + [0][1][2][1][RTW89_FCC][0] = 70, + [0][1][2][1][RTW89_ETSI][0] = 42, + [0][1][2][1][RTW89_MKK][0] = 52, + [0][1][2][1][RTW89_IC][0] = 42, + [0][1][2][1][RTW89_ACMA][0] = 38, + [0][1][2][1][RTW89_FCC][2] = 70, + [0][1][2][1][RTW89_ETSI][2] = 42, + [0][1][2][1][RTW89_MKK][2] = 52, + [0][1][2][1][RTW89_IC][2] = 42, + [0][1][2][1][RTW89_ACMA][2] = 38, + [0][1][2][1][RTW89_FCC][4] = 70, + [0][1][2][1][RTW89_ETSI][4] = 42, + [0][1][2][1][RTW89_MKK][4] = 52, + [0][1][2][1][RTW89_IC][4] = 42, + [0][1][2][1][RTW89_ACMA][4] = 38, + [0][1][2][1][RTW89_FCC][6] = 70, + [0][1][2][1][RTW89_ETSI][6] = 42, + [0][1][2][1][RTW89_MKK][6] = 52, + [0][1][2][1][RTW89_IC][6] = 42, + [0][1][2][1][RTW89_ACMA][6] = 38, + [0][1][2][1][RTW89_FCC][8] = 70, + [0][1][2][1][RTW89_ETSI][8] = 42, + [0][1][2][1][RTW89_MKK][8] = 52, + [0][1][2][1][RTW89_IC][8] = 42, + [0][1][2][1][RTW89_ACMA][8] = 38, + [0][1][2][1][RTW89_FCC][10] = 70, + [0][1][2][1][RTW89_ETSI][10] = 42, + [0][1][2][1][RTW89_MKK][10] = 52, + [0][1][2][1][RTW89_IC][10] = 42, + [0][1][2][1][RTW89_ACMA][10] = 38, + [0][1][2][1][RTW89_FCC][12] = 70, + [0][1][2][1][RTW89_ETSI][12] = 42, + [0][1][2][1][RTW89_MKK][12] = 52, + [0][1][2][1][RTW89_IC][12] = 42, + [0][1][2][1][RTW89_ACMA][12] = 38, + [0][1][2][1][RTW89_FCC][14] = 70, + [0][1][2][1][RTW89_ETSI][14] = 42, + [0][1][2][1][RTW89_MKK][14] = 52, + [0][1][2][1][RTW89_IC][14] = 42, + [0][1][2][1][RTW89_ACMA][14] = 38, + [0][1][2][1][RTW89_FCC][15] = 70, + [0][1][2][1][RTW89_ETSI][15] = 42, + [0][1][2][1][RTW89_MKK][15] = 72, + [0][1][2][1][RTW89_IC][15] = 70, + [0][1][2][1][RTW89_ACMA][15] = 38, + [0][1][2][1][RTW89_FCC][17] = 70, + [0][1][2][1][RTW89_ETSI][17] = 42, + [0][1][2][1][RTW89_MKK][17] = 72, + [0][1][2][1][RTW89_IC][17] = 70, + [0][1][2][1][RTW89_ACMA][17] = 38, + [0][1][2][1][RTW89_FCC][19] = 70, + [0][1][2][1][RTW89_ETSI][19] = 42, + [0][1][2][1][RTW89_MKK][19] = 72, + [0][1][2][1][RTW89_IC][19] = 70, + [0][1][2][1][RTW89_ACMA][19] = 38, + [0][1][2][1][RTW89_FCC][21] = 70, + [0][1][2][1][RTW89_ETSI][21] = 42, + [0][1][2][1][RTW89_MKK][21] = 72, + [0][1][2][1][RTW89_IC][21] = 70, + [0][1][2][1][RTW89_ACMA][21] = 38, + [0][1][2][1][RTW89_FCC][23] = 70, + [0][1][2][1][RTW89_ETSI][23] = 42, + [0][1][2][1][RTW89_MKK][23] = 72, + [0][1][2][1][RTW89_IC][23] = 70, + [0][1][2][1][RTW89_ACMA][23] = 38, + [0][1][2][1][RTW89_FCC][25] = 68, + [0][1][2][1][RTW89_ETSI][25] = 42, + [0][1][2][1][RTW89_MKK][25] = 72, + [0][1][2][1][RTW89_IC][25] = 127, + [0][1][2][1][RTW89_ACMA][25] = 127, + [0][1][2][1][RTW89_FCC][27] = 68, + [0][1][2][1][RTW89_ETSI][27] = 42, + [0][1][2][1][RTW89_MKK][27] = 72, + [0][1][2][1][RTW89_IC][27] = 127, + [0][1][2][1][RTW89_ACMA][27] = 127, + [0][1][2][1][RTW89_FCC][29] = 68, + [0][1][2][1][RTW89_ETSI][29] = 42, + [0][1][2][1][RTW89_MKK][29] = 72, + [0][1][2][1][RTW89_IC][29] = 127, + [0][1][2][1][RTW89_ACMA][29] = 127, + [0][1][2][1][RTW89_FCC][31] = 68, + [0][1][2][1][RTW89_ETSI][31] = 42, + [0][1][2][1][RTW89_MKK][31] = 72, + [0][1][2][1][RTW89_IC][31] = 68, + [0][1][2][1][RTW89_ACMA][31] = 38, + [0][1][2][1][RTW89_FCC][33] = 68, + [0][1][2][1][RTW89_ETSI][33] = 42, + [0][1][2][1][RTW89_MKK][33] = 72, + [0][1][2][1][RTW89_IC][33] = 68, + [0][1][2][1][RTW89_ACMA][33] = 38, + [0][1][2][1][RTW89_FCC][35] = 68, + [0][1][2][1][RTW89_ETSI][35] = 42, + [0][1][2][1][RTW89_MKK][35] = 72, + [0][1][2][1][RTW89_IC][35] = 68, + [0][1][2][1][RTW89_ACMA][35] = 38, + [0][1][2][1][RTW89_FCC][37] = 70, + [0][1][2][1][RTW89_ETSI][37] = 127, + [0][1][2][1][RTW89_MKK][37] = 72, + [0][1][2][1][RTW89_IC][37] = 70, + [0][1][2][1][RTW89_ACMA][37] = 72, + [0][1][2][1][RTW89_FCC][38] = 80, + [0][1][2][1][RTW89_ETSI][38] = 8, + [0][1][2][1][RTW89_MKK][38] = 127, + [0][1][2][1][RTW89_IC][38] = 80, + [0][1][2][1][RTW89_ACMA][38] = 76, + [0][1][2][1][RTW89_FCC][40] = 80, + [0][1][2][1][RTW89_ETSI][40] = 8, + [0][1][2][1][RTW89_MKK][40] = 127, + [0][1][2][1][RTW89_IC][40] = 80, + [0][1][2][1][RTW89_ACMA][40] = 76, + [0][1][2][1][RTW89_FCC][42] = 80, + [0][1][2][1][RTW89_ETSI][42] = 8, + [0][1][2][1][RTW89_MKK][42] = 127, + [0][1][2][1][RTW89_IC][42] = 80, + [0][1][2][1][RTW89_ACMA][42] = 78, + [0][1][2][1][RTW89_FCC][44] = 80, + [0][1][2][1][RTW89_ETSI][44] = 8, + [0][1][2][1][RTW89_MKK][44] = 127, + [0][1][2][1][RTW89_IC][44] = 80, + [0][1][2][1][RTW89_ACMA][44] = 78, + [0][1][2][1][RTW89_FCC][46] = 80, + [0][1][2][1][RTW89_ETSI][46] = 8, + [0][1][2][1][RTW89_MKK][46] = 127, + [0][1][2][1][RTW89_IC][46] = 80, + [0][1][2][1][RTW89_ACMA][46] = 78, + [0][1][2][1][RTW89_FCC][48] = 60, + [0][1][2][1][RTW89_ETSI][48] = 127, + [0][1][2][1][RTW89_MKK][48] = 127, + [0][1][2][1][RTW89_IC][48] = 127, + [0][1][2][1][RTW89_ACMA][48] = 127, + [0][1][2][1][RTW89_FCC][50] = 60, + [0][1][2][1][RTW89_ETSI][50] = 127, + [0][1][2][1][RTW89_MKK][50] = 127, + [0][1][2][1][RTW89_IC][50] = 127, + [0][1][2][1][RTW89_ACMA][50] = 127, + [0][1][2][1][RTW89_FCC][52] = 60, + [0][1][2][1][RTW89_ETSI][52] = 127, + [0][1][2][1][RTW89_MKK][52] = 127, + [0][1][2][1][RTW89_IC][52] = 127, + [0][1][2][1][RTW89_ACMA][52] = 127, + [1][0][2][0][RTW89_FCC][1] = 68, + [1][0][2][0][RTW89_ETSI][1] = 66, + [1][0][2][0][RTW89_MKK][1] = 72, + [1][0][2][0][RTW89_IC][1] = 72, + [1][0][2][0][RTW89_ACMA][1] = 72, + [1][0][2][0][RTW89_FCC][5] = 80, + [1][0][2][0][RTW89_ETSI][5] = 66, + [1][0][2][0][RTW89_MKK][5] = 72, + [1][0][2][0][RTW89_IC][5] = 72, + [1][0][2][0][RTW89_ACMA][5] = 72, + [1][0][2][0][RTW89_FCC][9] = 80, + [1][0][2][0][RTW89_ETSI][9] = 66, + [1][0][2][0][RTW89_MKK][9] = 72, + [1][0][2][0][RTW89_IC][9] = 72, + [1][0][2][0][RTW89_ACMA][9] = 72, + [1][0][2][0][RTW89_FCC][13] = 68, + [1][0][2][0][RTW89_ETSI][13] = 66, + [1][0][2][0][RTW89_MKK][13] = 72, + [1][0][2][0][RTW89_IC][13] = 72, + [1][0][2][0][RTW89_ACMA][13] = 72, + [1][0][2][0][RTW89_FCC][16] = 66, + [1][0][2][0][RTW89_ETSI][16] = 66, + [1][0][2][0][RTW89_MKK][16] = 76, + [1][0][2][0][RTW89_IC][16] = 72, + [1][0][2][0][RTW89_ACMA][16] = 72, + [1][0][2][0][RTW89_FCC][20] = 80, + [1][0][2][0][RTW89_ETSI][20] = 66, + [1][0][2][0][RTW89_MKK][20] = 76, + [1][0][2][0][RTW89_IC][20] = 80, + [1][0][2][0][RTW89_ACMA][20] = 72, + [1][0][2][0][RTW89_FCC][24] = 80, + [1][0][2][0][RTW89_ETSI][24] = 66, + [1][0][2][0][RTW89_MKK][24] = 76, + [1][0][2][0][RTW89_IC][24] = 127, + [1][0][2][0][RTW89_ACMA][24] = 127, + [1][0][2][0][RTW89_FCC][28] = 80, + [1][0][2][0][RTW89_ETSI][28] = 66, + [1][0][2][0][RTW89_MKK][28] = 76, + [1][0][2][0][RTW89_IC][28] = 127, + [1][0][2][0][RTW89_ACMA][28] = 127, + [1][0][2][0][RTW89_FCC][32] = 78, + [1][0][2][0][RTW89_ETSI][32] = 66, + [1][0][2][0][RTW89_MKK][32] = 76, + [1][0][2][0][RTW89_IC][32] = 78, + [1][0][2][0][RTW89_ACMA][32] = 66, + [1][0][2][0][RTW89_FCC][36] = 80, + [1][0][2][0][RTW89_ETSI][36] = 127, + [1][0][2][0][RTW89_MKK][36] = 76, + [1][0][2][0][RTW89_IC][36] = 80, + [1][0][2][0][RTW89_ACMA][36] = 76, + [1][0][2][0][RTW89_FCC][39] = 80, + [1][0][2][0][RTW89_ETSI][39] = 30, + [1][0][2][0][RTW89_MKK][39] = 127, + [1][0][2][0][RTW89_IC][39] = 80, + [1][0][2][0][RTW89_ACMA][39] = 76, + [1][0][2][0][RTW89_FCC][43] = 80, + [1][0][2][0][RTW89_ETSI][43] = 30, + [1][0][2][0][RTW89_MKK][43] = 127, + [1][0][2][0][RTW89_IC][43] = 80, + [1][0][2][0][RTW89_ACMA][43] = 76, + [1][0][2][0][RTW89_FCC][47] = 80, + [1][0][2][0][RTW89_ETSI][47] = 127, + [1][0][2][0][RTW89_MKK][47] = 127, + [1][0][2][0][RTW89_IC][47] = 127, + [1][0][2][0][RTW89_ACMA][47] = 127, + [1][0][2][0][RTW89_FCC][51] = 72, + [1][0][2][0][RTW89_ETSI][51] = 127, + [1][0][2][0][RTW89_MKK][51] = 127, + [1][0][2][0][RTW89_IC][51] = 127, + [1][0][2][0][RTW89_ACMA][51] = 127, + [1][1][2][0][RTW89_FCC][1] = 64, + [1][1][2][0][RTW89_ETSI][1] = 54, + [1][1][2][0][RTW89_MKK][1] = 60, + [1][1][2][0][RTW89_IC][1] = 60, + [1][1][2][0][RTW89_ACMA][1] = 60, + [1][1][2][0][RTW89_FCC][5] = 78, + [1][1][2][0][RTW89_ETSI][5] = 54, + [1][1][2][0][RTW89_MKK][5] = 60, + [1][1][2][0][RTW89_IC][5] = 60, + [1][1][2][0][RTW89_ACMA][5] = 60, + [1][1][2][0][RTW89_FCC][9] = 78, + [1][1][2][0][RTW89_ETSI][9] = 54, + [1][1][2][0][RTW89_MKK][9] = 60, + [1][1][2][0][RTW89_IC][9] = 60, + [1][1][2][0][RTW89_ACMA][9] = 60, + [1][1][2][0][RTW89_FCC][13] = 64, + [1][1][2][0][RTW89_ETSI][13] = 54, + [1][1][2][0][RTW89_MKK][13] = 60, + [1][1][2][0][RTW89_IC][13] = 60, + [1][1][2][0][RTW89_ACMA][13] = 60, + [1][1][2][0][RTW89_FCC][16] = 58, + [1][1][2][0][RTW89_ETSI][16] = 54, + [1][1][2][0][RTW89_MKK][16] = 72, + [1][1][2][0][RTW89_IC][16] = 58, + [1][1][2][0][RTW89_ACMA][16] = 60, + [1][1][2][0][RTW89_FCC][20] = 78, + [1][1][2][0][RTW89_ETSI][20] = 54, + [1][1][2][0][RTW89_MKK][20] = 72, + [1][1][2][0][RTW89_IC][20] = 78, + [1][1][2][0][RTW89_ACMA][20] = 60, + [1][1][2][0][RTW89_FCC][24] = 78, + [1][1][2][0][RTW89_ETSI][24] = 54, + [1][1][2][0][RTW89_MKK][24] = 72, + [1][1][2][0][RTW89_IC][24] = 127, + [1][1][2][0][RTW89_ACMA][24] = 127, + [1][1][2][0][RTW89_FCC][28] = 78, + [1][1][2][0][RTW89_ETSI][28] = 54, + [1][1][2][0][RTW89_MKK][28] = 72, + [1][1][2][0][RTW89_IC][28] = 127, + [1][1][2][0][RTW89_ACMA][28] = 127, + [1][1][2][0][RTW89_FCC][32] = 70, + [1][1][2][0][RTW89_ETSI][32] = 54, + [1][1][2][0][RTW89_MKK][32] = 72, + [1][1][2][0][RTW89_IC][32] = 70, + [1][1][2][0][RTW89_ACMA][32] = 54, + [1][1][2][0][RTW89_FCC][36] = 78, + [1][1][2][0][RTW89_ETSI][36] = 127, + [1][1][2][0][RTW89_MKK][36] = 72, + [1][1][2][0][RTW89_IC][36] = 78, + [1][1][2][0][RTW89_ACMA][36] = 76, + [1][1][2][0][RTW89_FCC][39] = 80, + [1][1][2][0][RTW89_ETSI][39] = 18, + [1][1][2][0][RTW89_MKK][39] = 127, + [1][1][2][0][RTW89_IC][39] = 80, + [1][1][2][0][RTW89_ACMA][39] = 74, + [1][1][2][0][RTW89_FCC][43] = 80, + [1][1][2][0][RTW89_ETSI][43] = 18, + [1][1][2][0][RTW89_MKK][43] = 127, + [1][1][2][0][RTW89_IC][43] = 80, + [1][1][2][0][RTW89_ACMA][43] = 76, + [1][1][2][0][RTW89_FCC][47] = 70, + [1][1][2][0][RTW89_ETSI][47] = 127, + [1][1][2][0][RTW89_MKK][47] = 127, + [1][1][2][0][RTW89_IC][47] = 127, + [1][1][2][0][RTW89_ACMA][47] = 127, + [1][1][2][0][RTW89_FCC][51] = 68, + [1][1][2][0][RTW89_ETSI][51] = 127, + [1][1][2][0][RTW89_MKK][51] = 127, + [1][1][2][0][RTW89_IC][51] = 127, + [1][1][2][0][RTW89_ACMA][51] = 127, + [1][1][2][1][RTW89_FCC][1] = 64, + [1][1][2][1][RTW89_ETSI][1] = 42, + [1][1][2][1][RTW89_MKK][1] = 60, + [1][1][2][1][RTW89_IC][1] = 48, + [1][1][2][1][RTW89_ACMA][1] = 48, + [1][1][2][1][RTW89_FCC][5] = 70, + [1][1][2][1][RTW89_ETSI][5] = 42, + [1][1][2][1][RTW89_MKK][5] = 60, + [1][1][2][1][RTW89_IC][5] = 48, + [1][1][2][1][RTW89_ACMA][5] = 48, + [1][1][2][1][RTW89_FCC][9] = 70, + [1][1][2][1][RTW89_ETSI][9] = 42, + [1][1][2][1][RTW89_MKK][9] = 60, + [1][1][2][1][RTW89_IC][9] = 48, + [1][1][2][1][RTW89_ACMA][9] = 48, + [1][1][2][1][RTW89_FCC][13] = 64, + [1][1][2][1][RTW89_ETSI][13] = 42, + [1][1][2][1][RTW89_MKK][13] = 60, + [1][1][2][1][RTW89_IC][13] = 48, + [1][1][2][1][RTW89_ACMA][13] = 48, + [1][1][2][1][RTW89_FCC][16] = 58, + [1][1][2][1][RTW89_ETSI][16] = 42, + [1][1][2][1][RTW89_MKK][16] = 72, + [1][1][2][1][RTW89_IC][16] = 58, + [1][1][2][1][RTW89_ACMA][16] = 48, + [1][1][2][1][RTW89_FCC][20] = 70, + [1][1][2][1][RTW89_ETSI][20] = 42, + [1][1][2][1][RTW89_MKK][20] = 72, + [1][1][2][1][RTW89_IC][20] = 70, + [1][1][2][1][RTW89_ACMA][20] = 48, + [1][1][2][1][RTW89_FCC][24] = 70, + [1][1][2][1][RTW89_ETSI][24] = 42, + [1][1][2][1][RTW89_MKK][24] = 72, + [1][1][2][1][RTW89_IC][24] = 127, + [1][1][2][1][RTW89_ACMA][24] = 127, + [1][1][2][1][RTW89_FCC][28] = 70, + [1][1][2][1][RTW89_ETSI][28] = 42, + [1][1][2][1][RTW89_MKK][28] = 72, + [1][1][2][1][RTW89_IC][28] = 127, + [1][1][2][1][RTW89_ACMA][28] = 127, + [1][1][2][1][RTW89_FCC][32] = 70, + [1][1][2][1][RTW89_ETSI][32] = 42, + [1][1][2][1][RTW89_MKK][32] = 72, + [1][1][2][1][RTW89_IC][32] = 70, + [1][1][2][1][RTW89_ACMA][32] = 42, + [1][1][2][1][RTW89_FCC][36] = 70, + [1][1][2][1][RTW89_ETSI][36] = 127, + [1][1][2][1][RTW89_MKK][36] = 72, + [1][1][2][1][RTW89_IC][36] = 70, + [1][1][2][1][RTW89_ACMA][36] = 72, + [1][1][2][1][RTW89_FCC][39] = 80, + [1][1][2][1][RTW89_ETSI][39] = 8, + [1][1][2][1][RTW89_MKK][39] = 127, + [1][1][2][1][RTW89_IC][39] = 80, + [1][1][2][1][RTW89_ACMA][39] = 74, + [1][1][2][1][RTW89_FCC][43] = 80, + [1][1][2][1][RTW89_ETSI][43] = 8, + [1][1][2][1][RTW89_MKK][43] = 127, + [1][1][2][1][RTW89_IC][43] = 80, + [1][1][2][1][RTW89_ACMA][43] = 76, + [1][1][2][1][RTW89_FCC][47] = 70, + [1][1][2][1][RTW89_ETSI][47] = 127, + [1][1][2][1][RTW89_MKK][47] = 127, + [1][1][2][1][RTW89_IC][47] = 127, + [1][1][2][1][RTW89_ACMA][47] = 127, + [1][1][2][1][RTW89_FCC][51] = 68, + [1][1][2][1][RTW89_ETSI][51] = 127, + [1][1][2][1][RTW89_MKK][51] = 127, + [1][1][2][1][RTW89_IC][51] = 127, + [1][1][2][1][RTW89_ACMA][51] = 127, + [2][0][2][0][RTW89_FCC][3] = 66, + [2][0][2][0][RTW89_ETSI][3] = 66, + [2][0][2][0][RTW89_MKK][3] = 66, + [2][0][2][0][RTW89_IC][3] = 64, + [2][0][2][0][RTW89_ACMA][3] = 66, + [2][0][2][0][RTW89_FCC][11] = 68, + [2][0][2][0][RTW89_ETSI][11] = 66, + [2][0][2][0][RTW89_MKK][11] = 66, + [2][0][2][0][RTW89_IC][11] = 66, + [2][0][2][0][RTW89_ACMA][11] = 66, + [2][0][2][0][RTW89_FCC][18] = 64, + [2][0][2][0][RTW89_ETSI][18] = 66, + [2][0][2][0][RTW89_MKK][18] = 72, + [2][0][2][0][RTW89_IC][18] = 64, + [2][0][2][0][RTW89_ACMA][18] = 66, + [2][0][2][0][RTW89_FCC][26] = 76, + [2][0][2][0][RTW89_ETSI][26] = 66, + [2][0][2][0][RTW89_MKK][26] = 72, + [2][0][2][0][RTW89_IC][26] = 127, + [2][0][2][0][RTW89_ACMA][26] = 127, + [2][0][2][0][RTW89_FCC][34] = 76, + [2][0][2][0][RTW89_ETSI][34] = 127, + [2][0][2][0][RTW89_MKK][34] = 72, + [2][0][2][0][RTW89_IC][34] = 76, + [2][0][2][0][RTW89_ACMA][34] = 72, + [2][0][2][0][RTW89_FCC][41] = 76, + [2][0][2][0][RTW89_ETSI][41] = 30, + [2][0][2][0][RTW89_MKK][41] = 127, + [2][0][2][0][RTW89_IC][41] = 76, + [2][0][2][0][RTW89_ACMA][41] = 72, + [2][0][2][0][RTW89_FCC][49] = 66, + [2][0][2][0][RTW89_ETSI][49] = 127, + [2][0][2][0][RTW89_MKK][49] = 127, + [2][0][2][0][RTW89_IC][49] = 127, + [2][0][2][0][RTW89_ACMA][49] = 127, + [2][1][2][0][RTW89_FCC][3] = 58, + [2][1][2][0][RTW89_ETSI][3] = 54, + [2][1][2][0][RTW89_MKK][3] = 54, + [2][1][2][0][RTW89_IC][3] = 54, + [2][1][2][0][RTW89_ACMA][3] = 54, + [2][1][2][0][RTW89_FCC][11] = 64, + [2][1][2][0][RTW89_ETSI][11] = 54, + [2][1][2][0][RTW89_MKK][11] = 54, + [2][1][2][0][RTW89_IC][11] = 54, + [2][1][2][0][RTW89_ACMA][11] = 54, + [2][1][2][0][RTW89_FCC][18] = 58, + [2][1][2][0][RTW89_ETSI][18] = 54, + [2][1][2][0][RTW89_MKK][18] = 72, + [2][1][2][0][RTW89_IC][18] = 58, + [2][1][2][0][RTW89_ACMA][18] = 54, + [2][1][2][0][RTW89_FCC][26] = 72, + [2][1][2][0][RTW89_ETSI][26] = 54, + [2][1][2][0][RTW89_MKK][26] = 72, + [2][1][2][0][RTW89_IC][26] = 127, + [2][1][2][0][RTW89_ACMA][26] = 127, + [2][1][2][0][RTW89_FCC][34] = 76, + [2][1][2][0][RTW89_ETSI][34] = 127, + [2][1][2][0][RTW89_MKK][34] = 72, + [2][1][2][0][RTW89_IC][34] = 76, + [2][1][2][0][RTW89_ACMA][34] = 72, + [2][1][2][0][RTW89_FCC][41] = 76, + [2][1][2][0][RTW89_ETSI][41] = 18, + [2][1][2][0][RTW89_MKK][41] = 127, + [2][1][2][0][RTW89_IC][41] = 76, + [2][1][2][0][RTW89_ACMA][41] = 72, + [2][1][2][0][RTW89_FCC][49] = 60, + [2][1][2][0][RTW89_ETSI][49] = 127, + [2][1][2][0][RTW89_MKK][49] = 127, + [2][1][2][0][RTW89_IC][49] = 127, + [2][1][2][0][RTW89_ACMA][49] = 127, + [2][1][2][1][RTW89_FCC][3] = 58, + [2][1][2][1][RTW89_ETSI][3] = 42, + [2][1][2][1][RTW89_MKK][3] = 54, + [2][1][2][1][RTW89_IC][3] = 42, + [2][1][2][1][RTW89_ACMA][3] = 42, + [2][1][2][1][RTW89_FCC][11] = 64, + [2][1][2][1][RTW89_ETSI][11] = 42, + [2][1][2][1][RTW89_MKK][11] = 54, + [2][1][2][1][RTW89_IC][11] = 42, + [2][1][2][1][RTW89_ACMA][11] = 42, + [2][1][2][1][RTW89_FCC][18] = 58, + [2][1][2][1][RTW89_ETSI][18] = 42, + [2][1][2][1][RTW89_MKK][18] = 72, + [2][1][2][1][RTW89_IC][18] = 58, + [2][1][2][1][RTW89_ACMA][18] = 42, + [2][1][2][1][RTW89_FCC][26] = 70, + [2][1][2][1][RTW89_ETSI][26] = 44, + [2][1][2][1][RTW89_MKK][26] = 72, + [2][1][2][1][RTW89_IC][26] = 127, + [2][1][2][1][RTW89_ACMA][26] = 127, + [2][1][2][1][RTW89_FCC][34] = 70, + [2][1][2][1][RTW89_ETSI][34] = 127, + [2][1][2][1][RTW89_MKK][34] = 72, + [2][1][2][1][RTW89_IC][34] = 70, + [2][1][2][1][RTW89_ACMA][34] = 72, + [2][1][2][1][RTW89_FCC][41] = 76, + [2][1][2][1][RTW89_ETSI][41] = 8, + [2][1][2][1][RTW89_MKK][41] = 127, + [2][1][2][1][RTW89_IC][41] = 76, + [2][1][2][1][RTW89_ACMA][41] = 72, + [2][1][2][1][RTW89_FCC][49] = 60, + [2][1][2][1][RTW89_ETSI][49] = 127, + [2][1][2][1][RTW89_MKK][49] = 127, + [2][1][2][1][RTW89_IC][49] = 127, + [2][1][2][1][RTW89_ACMA][49] = 127, + [3][0][2][0][RTW89_FCC][7] = 56, + [3][0][2][0][RTW89_ETSI][7] = 56, + [3][0][2][0][RTW89_MKK][7] = 56, + [3][0][2][0][RTW89_IC][7] = 56, + [3][0][2][0][RTW89_ACMA][7] = 56, + [3][0][2][0][RTW89_FCC][22] = 56, + [3][0][2][0][RTW89_ETSI][22] = 56, + [3][0][2][0][RTW89_MKK][22] = 56, + [3][0][2][0][RTW89_IC][22] = 56, + [3][0][2][0][RTW89_ACMA][22] = 56, + [3][0][2][0][RTW89_FCC][45] = 56, + [3][0][2][0][RTW89_ETSI][45] = 127, + [3][0][2][0][RTW89_MKK][45] = 127, + [3][0][2][0][RTW89_IC][45] = 127, + [3][0][2][0][RTW89_ACMA][45] = 127, + [3][1][2][0][RTW89_FCC][7] = 44, + [3][1][2][0][RTW89_ETSI][7] = 44, + [3][1][2][0][RTW89_MKK][7] = 44, + [3][1][2][0][RTW89_IC][7] = 44, + [3][1][2][0][RTW89_ACMA][7] = 44, + [3][1][2][0][RTW89_FCC][22] = 44, + [3][1][2][0][RTW89_ETSI][22] = 44, + [3][1][2][0][RTW89_MKK][22] = 44, + [3][1][2][0][RTW89_IC][22] = 44, + [3][1][2][0][RTW89_ACMA][22] = 44, + [3][1][2][0][RTW89_FCC][45] = 44, + [3][1][2][0][RTW89_ETSI][45] = 127, + [3][1][2][0][RTW89_MKK][45] = 127, + [3][1][2][0][RTW89_IC][45] = 127, + [3][1][2][0][RTW89_ACMA][45] = 127, + [3][1][2][1][RTW89_FCC][7] = 32, + [3][1][2][1][RTW89_ETSI][7] = 32, + [3][1][2][1][RTW89_MKK][7] = 32, + [3][1][2][1][RTW89_IC][7] = 32, + [3][1][2][1][RTW89_ACMA][7] = 32, + [3][1][2][1][RTW89_FCC][22] = 32, + [3][1][2][1][RTW89_ETSI][22] = 32, + [3][1][2][1][RTW89_MKK][22] = 32, + [3][1][2][1][RTW89_IC][22] = 32, + [3][1][2][1][RTW89_ACMA][22] = 32, + [3][1][2][1][RTW89_FCC][45] = 32, + [3][1][2][1][RTW89_ETSI][45] = 127, + [3][1][2][1][RTW89_MKK][45] = 127, + [3][1][2][1][RTW89_IC][45] = 127, + [3][1][2][1][RTW89_ACMA][45] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_6G_CH_NUM] = { + [0][0][1][0][RTW89_WW][0] = 72, + [0][0][1][0][RTW89_WW][2] = 72, + [0][0][1][0][RTW89_WW][4] = 72, + [0][0][1][0][RTW89_WW][6] = 72, + [0][0][1][0][RTW89_WW][8] = 72, + [0][0][1][0][RTW89_WW][10] = 72, + [0][0][1][0][RTW89_WW][12] = 72, + [0][0][1][0][RTW89_WW][14] = 72, + [0][0][1][0][RTW89_WW][15] = 72, + [0][0][1][0][RTW89_WW][17] = 72, + [0][0][1][0][RTW89_WW][19] = 72, + [0][0][1][0][RTW89_WW][21] = 72, + [0][0][1][0][RTW89_WW][23] = 72, + [0][0][1][0][RTW89_WW][25] = 72, + [0][0][1][0][RTW89_WW][27] = 72, + [0][0][1][0][RTW89_WW][29] = 72, + [0][0][1][0][RTW89_WW][30] = 72, + [0][0][1][0][RTW89_WW][32] = 72, + [0][0][1][0][RTW89_WW][34] = 72, + [0][0][1][0][RTW89_WW][36] = 72, + [0][0][1][0][RTW89_WW][38] = 72, + [0][0][1][0][RTW89_WW][40] = 72, + [0][0][1][0][RTW89_WW][42] = 72, + [0][0][1][0][RTW89_WW][44] = 72, + [0][0][1][0][RTW89_WW][45] = 72, + [0][0][1][0][RTW89_WW][47] = 72, + [0][0][1][0][RTW89_WW][49] = 72, + [0][0][1][0][RTW89_WW][51] = 72, + [0][0][1][0][RTW89_WW][53] = 72, + [0][0][1][0][RTW89_WW][55] = 72, + [0][0][1][0][RTW89_WW][57] = 72, + [0][0][1][0][RTW89_WW][59] = 72, + [0][0][1][0][RTW89_WW][60] = 72, + [0][0][1][0][RTW89_WW][62] = 72, + [0][0][1][0][RTW89_WW][64] = 72, + [0][0][1][0][RTW89_WW][66] = 72, + [0][0][1][0][RTW89_WW][68] = 72, + [0][0][1][0][RTW89_WW][70] = 72, + [0][0][1][0][RTW89_WW][72] = 72, + [0][0][1][0][RTW89_WW][74] = 72, + [0][0][1][0][RTW89_WW][75] = 72, + [0][0][1][0][RTW89_WW][77] = 72, + [0][0][1][0][RTW89_WW][79] = 72, + [0][0][1][0][RTW89_WW][81] = 72, + [0][0][1][0][RTW89_WW][83] = 72, + [0][0][1][0][RTW89_WW][85] = 72, + [0][0][1][0][RTW89_WW][87] = 72, + [0][0][1][0][RTW89_WW][89] = 72, + [0][0][1][0][RTW89_WW][90] = 72, + [0][0][1][0][RTW89_WW][92] = 72, + [0][0][1][0][RTW89_WW][94] = 72, + [0][0][1][0][RTW89_WW][96] = 72, + [0][0][1][0][RTW89_WW][98] = 72, + [0][0][1][0][RTW89_WW][100] = 72, + [0][0][1][0][RTW89_WW][102] = 72, + [0][0][1][0][RTW89_WW][104] = 72, + [0][0][1][0][RTW89_WW][105] = 72, + [0][0][1][0][RTW89_WW][107] = 72, + [0][0][1][0][RTW89_WW][109] = 72, + [0][0][1][0][RTW89_WW][111] = 0, + [0][0][1][0][RTW89_WW][113] = 0, + [0][0][1][0][RTW89_WW][115] = 0, + [0][0][1][0][RTW89_WW][117] = 0, + [0][0][1][0][RTW89_WW][119] = 0, + [0][1][1][0][RTW89_WW][0] = 60, + [0][1][1][0][RTW89_WW][2] = 60, + [0][1][1][0][RTW89_WW][4] = 60, + [0][1][1][0][RTW89_WW][6] = 60, + [0][1][1][0][RTW89_WW][8] = 60, + [0][1][1][0][RTW89_WW][10] = 60, + [0][1][1][0][RTW89_WW][12] = 60, + [0][1][1][0][RTW89_WW][14] = 60, + [0][1][1][0][RTW89_WW][15] = 60, + [0][1][1][0][RTW89_WW][17] = 60, + [0][1][1][0][RTW89_WW][19] = 60, + [0][1][1][0][RTW89_WW][21] = 60, + [0][1][1][0][RTW89_WW][23] = 60, + [0][1][1][0][RTW89_WW][25] = 60, + [0][1][1][0][RTW89_WW][27] = 60, + [0][1][1][0][RTW89_WW][29] = 60, + [0][1][1][0][RTW89_WW][30] = 60, + [0][1][1][0][RTW89_WW][32] = 60, + [0][1][1][0][RTW89_WW][34] = 60, + [0][1][1][0][RTW89_WW][36] = 60, + [0][1][1][0][RTW89_WW][38] = 60, + [0][1][1][0][RTW89_WW][40] = 60, + [0][1][1][0][RTW89_WW][42] = 60, + [0][1][1][0][RTW89_WW][44] = 60, + [0][1][1][0][RTW89_WW][45] = 60, + [0][1][1][0][RTW89_WW][47] = 60, + [0][1][1][0][RTW89_WW][49] = 60, + [0][1][1][0][RTW89_WW][51] = 60, + [0][1][1][0][RTW89_WW][53] = 60, + [0][1][1][0][RTW89_WW][55] = 60, + [0][1][1][0][RTW89_WW][57] = 60, + [0][1][1][0][RTW89_WW][59] = 60, + [0][1][1][0][RTW89_WW][60] = 60, + [0][1][1][0][RTW89_WW][62] = 60, + [0][1][1][0][RTW89_WW][64] = 60, + [0][1][1][0][RTW89_WW][66] = 60, + [0][1][1][0][RTW89_WW][68] = 60, + [0][1][1][0][RTW89_WW][70] = 60, + [0][1][1][0][RTW89_WW][72] = 60, + [0][1][1][0][RTW89_WW][74] = 60, + [0][1][1][0][RTW89_WW][75] = 60, + [0][1][1][0][RTW89_WW][77] = 60, + [0][1][1][0][RTW89_WW][79] = 60, + [0][1][1][0][RTW89_WW][81] = 60, + [0][1][1][0][RTW89_WW][83] = 60, + [0][1][1][0][RTW89_WW][85] = 60, + [0][1][1][0][RTW89_WW][87] = 60, + [0][1][1][0][RTW89_WW][89] = 60, + [0][1][1][0][RTW89_WW][90] = 60, + [0][1][1][0][RTW89_WW][92] = 60, + [0][1][1][0][RTW89_WW][94] = 60, + [0][1][1][0][RTW89_WW][96] = 60, + [0][1][1][0][RTW89_WW][98] = 60, + [0][1][1][0][RTW89_WW][100] = 60, + [0][1][1][0][RTW89_WW][102] = 60, + [0][1][1][0][RTW89_WW][104] = 60, + [0][1][1][0][RTW89_WW][105] = 60, + [0][1][1][0][RTW89_WW][107] = 60, + [0][1][1][0][RTW89_WW][109] = 60, + [0][1][1][0][RTW89_WW][111] = 0, + [0][1][1][0][RTW89_WW][113] = 0, + [0][1][1][0][RTW89_WW][115] = 0, + [0][1][1][0][RTW89_WW][117] = 0, + [0][1][1][0][RTW89_WW][119] = 0, + [0][0][2][0][RTW89_WW][0] = 72, + [0][0][2][0][RTW89_WW][2] = 72, + [0][0][2][0][RTW89_WW][4] = 72, + [0][0][2][0][RTW89_WW][6] = 72, + [0][0][2][0][RTW89_WW][8] = 72, + [0][0][2][0][RTW89_WW][10] = 72, + [0][0][2][0][RTW89_WW][12] = 72, + [0][0][2][0][RTW89_WW][14] = 72, + [0][0][2][0][RTW89_WW][15] = 72, + [0][0][2][0][RTW89_WW][17] = 72, + [0][0][2][0][RTW89_WW][19] = 72, + [0][0][2][0][RTW89_WW][21] = 72, + [0][0][2][0][RTW89_WW][23] = 72, + [0][0][2][0][RTW89_WW][25] = 72, + [0][0][2][0][RTW89_WW][27] = 72, + [0][0][2][0][RTW89_WW][29] = 72, + [0][0][2][0][RTW89_WW][30] = 72, + [0][0][2][0][RTW89_WW][32] = 72, + [0][0][2][0][RTW89_WW][34] = 72, + [0][0][2][0][RTW89_WW][36] = 72, + [0][0][2][0][RTW89_WW][38] = 72, + [0][0][2][0][RTW89_WW][40] = 72, + [0][0][2][0][RTW89_WW][42] = 72, + [0][0][2][0][RTW89_WW][44] = 72, + [0][0][2][0][RTW89_WW][45] = 72, + [0][0][2][0][RTW89_WW][47] = 72, + [0][0][2][0][RTW89_WW][49] = 72, + [0][0][2][0][RTW89_WW][51] = 72, + [0][0][2][0][RTW89_WW][53] = 72, + [0][0][2][0][RTW89_WW][55] = 72, + [0][0][2][0][RTW89_WW][57] = 72, + [0][0][2][0][RTW89_WW][59] = 72, + [0][0][2][0][RTW89_WW][60] = 72, + [0][0][2][0][RTW89_WW][62] = 72, + [0][0][2][0][RTW89_WW][64] = 72, + [0][0][2][0][RTW89_WW][66] = 72, + [0][0][2][0][RTW89_WW][68] = 72, + [0][0][2][0][RTW89_WW][70] = 72, + [0][0][2][0][RTW89_WW][72] = 72, + [0][0][2][0][RTW89_WW][74] = 72, + [0][0][2][0][RTW89_WW][75] = 72, + [0][0][2][0][RTW89_WW][77] = 72, + [0][0][2][0][RTW89_WW][79] = 72, + [0][0][2][0][RTW89_WW][81] = 72, + [0][0][2][0][RTW89_WW][83] = 72, + [0][0][2][0][RTW89_WW][85] = 72, + [0][0][2][0][RTW89_WW][87] = 72, + [0][0][2][0][RTW89_WW][89] = 72, + [0][0][2][0][RTW89_WW][90] = 72, + [0][0][2][0][RTW89_WW][92] = 72, + [0][0][2][0][RTW89_WW][94] = 72, + [0][0][2][0][RTW89_WW][96] = 72, + [0][0][2][0][RTW89_WW][98] = 72, + [0][0][2][0][RTW89_WW][100] = 72, + [0][0][2][0][RTW89_WW][102] = 72, + [0][0][2][0][RTW89_WW][104] = 72, + [0][0][2][0][RTW89_WW][105] = 72, + [0][0][2][0][RTW89_WW][107] = 72, + [0][0][2][0][RTW89_WW][109] = 72, + [0][0][2][0][RTW89_WW][111] = 0, + [0][0][2][0][RTW89_WW][113] = 0, + [0][0][2][0][RTW89_WW][115] = 0, + [0][0][2][0][RTW89_WW][117] = 0, + [0][0][2][0][RTW89_WW][119] = 0, + [0][1][2][0][RTW89_WW][0] = 60, + [0][1][2][0][RTW89_WW][2] = 60, + [0][1][2][0][RTW89_WW][4] = 60, + [0][1][2][0][RTW89_WW][6] = 60, + [0][1][2][0][RTW89_WW][8] = 60, + [0][1][2][0][RTW89_WW][10] = 60, + [0][1][2][0][RTW89_WW][12] = 60, + [0][1][2][0][RTW89_WW][14] = 60, + [0][1][2][0][RTW89_WW][15] = 60, + [0][1][2][0][RTW89_WW][17] = 60, + [0][1][2][0][RTW89_WW][19] = 60, + [0][1][2][0][RTW89_WW][21] = 60, + [0][1][2][0][RTW89_WW][23] = 60, + [0][1][2][0][RTW89_WW][25] = 60, + [0][1][2][0][RTW89_WW][27] = 60, + [0][1][2][0][RTW89_WW][29] = 60, + [0][1][2][0][RTW89_WW][30] = 60, + [0][1][2][0][RTW89_WW][32] = 60, + [0][1][2][0][RTW89_WW][34] = 60, + [0][1][2][0][RTW89_WW][36] = 60, + [0][1][2][0][RTW89_WW][38] = 60, + [0][1][2][0][RTW89_WW][40] = 60, + [0][1][2][0][RTW89_WW][42] = 60, + [0][1][2][0][RTW89_WW][44] = 60, + [0][1][2][0][RTW89_WW][45] = 60, + [0][1][2][0][RTW89_WW][47] = 60, + [0][1][2][0][RTW89_WW][49] = 60, + [0][1][2][0][RTW89_WW][51] = 60, + [0][1][2][0][RTW89_WW][53] = 60, + [0][1][2][0][RTW89_WW][55] = 60, + [0][1][2][0][RTW89_WW][57] = 60, + [0][1][2][0][RTW89_WW][59] = 60, + [0][1][2][0][RTW89_WW][60] = 60, + [0][1][2][0][RTW89_WW][62] = 60, + [0][1][2][0][RTW89_WW][64] = 60, + [0][1][2][0][RTW89_WW][66] = 60, + [0][1][2][0][RTW89_WW][68] = 60, + [0][1][2][0][RTW89_WW][70] = 60, + [0][1][2][0][RTW89_WW][72] = 60, + [0][1][2][0][RTW89_WW][74] = 60, + [0][1][2][0][RTW89_WW][75] = 60, + [0][1][2][0][RTW89_WW][77] = 60, + [0][1][2][0][RTW89_WW][79] = 60, + [0][1][2][0][RTW89_WW][81] = 60, + [0][1][2][0][RTW89_WW][83] = 60, + [0][1][2][0][RTW89_WW][85] = 60, + [0][1][2][0][RTW89_WW][87] = 60, + [0][1][2][0][RTW89_WW][89] = 60, + [0][1][2][0][RTW89_WW][90] = 60, + [0][1][2][0][RTW89_WW][92] = 60, + [0][1][2][0][RTW89_WW][94] = 60, + [0][1][2][0][RTW89_WW][96] = 60, + [0][1][2][0][RTW89_WW][98] = 60, + [0][1][2][0][RTW89_WW][100] = 60, + [0][1][2][0][RTW89_WW][102] = 60, + [0][1][2][0][RTW89_WW][104] = 60, + [0][1][2][0][RTW89_WW][105] = 60, + [0][1][2][0][RTW89_WW][107] = 60, + [0][1][2][0][RTW89_WW][109] = 60, + [0][1][2][0][RTW89_WW][111] = 0, + [0][1][2][0][RTW89_WW][113] = 0, + [0][1][2][0][RTW89_WW][115] = 0, + [0][1][2][0][RTW89_WW][117] = 0, + [0][1][2][0][RTW89_WW][119] = 0, + [0][1][2][1][RTW89_WW][0] = 48, + [0][1][2][1][RTW89_WW][2] = 48, + [0][1][2][1][RTW89_WW][4] = 48, + [0][1][2][1][RTW89_WW][6] = 48, + [0][1][2][1][RTW89_WW][8] = 48, + [0][1][2][1][RTW89_WW][10] = 48, + [0][1][2][1][RTW89_WW][12] = 48, + [0][1][2][1][RTW89_WW][14] = 48, + [0][1][2][1][RTW89_WW][15] = 48, + [0][1][2][1][RTW89_WW][17] = 48, + [0][1][2][1][RTW89_WW][19] = 48, + [0][1][2][1][RTW89_WW][21] = 48, + [0][1][2][1][RTW89_WW][23] = 48, + [0][1][2][1][RTW89_WW][25] = 48, + [0][1][2][1][RTW89_WW][27] = 48, + [0][1][2][1][RTW89_WW][29] = 48, + [0][1][2][1][RTW89_WW][30] = 48, + [0][1][2][1][RTW89_WW][32] = 48, + [0][1][2][1][RTW89_WW][34] = 48, + [0][1][2][1][RTW89_WW][36] = 48, + [0][1][2][1][RTW89_WW][38] = 48, + [0][1][2][1][RTW89_WW][40] = 48, + [0][1][2][1][RTW89_WW][42] = 48, + [0][1][2][1][RTW89_WW][44] = 48, + [0][1][2][1][RTW89_WW][45] = 48, + [0][1][2][1][RTW89_WW][47] = 48, + [0][1][2][1][RTW89_WW][49] = 48, + [0][1][2][1][RTW89_WW][51] = 48, + [0][1][2][1][RTW89_WW][53] = 48, + [0][1][2][1][RTW89_WW][55] = 48, + [0][1][2][1][RTW89_WW][57] = 48, + [0][1][2][1][RTW89_WW][59] = 48, + [0][1][2][1][RTW89_WW][60] = 48, + [0][1][2][1][RTW89_WW][62] = 48, + [0][1][2][1][RTW89_WW][64] = 48, + [0][1][2][1][RTW89_WW][66] = 48, + [0][1][2][1][RTW89_WW][68] = 48, + [0][1][2][1][RTW89_WW][70] = 48, + [0][1][2][1][RTW89_WW][72] = 48, + [0][1][2][1][RTW89_WW][74] = 48, + [0][1][2][1][RTW89_WW][75] = 48, + [0][1][2][1][RTW89_WW][77] = 48, + [0][1][2][1][RTW89_WW][79] = 48, + [0][1][2][1][RTW89_WW][81] = 48, + [0][1][2][1][RTW89_WW][83] = 48, + [0][1][2][1][RTW89_WW][85] = 48, + [0][1][2][1][RTW89_WW][87] = 48, + [0][1][2][1][RTW89_WW][89] = 48, + [0][1][2][1][RTW89_WW][90] = 48, + [0][1][2][1][RTW89_WW][92] = 48, + [0][1][2][1][RTW89_WW][94] = 48, + [0][1][2][1][RTW89_WW][96] = 48, + [0][1][2][1][RTW89_WW][98] = 48, + [0][1][2][1][RTW89_WW][100] = 48, + [0][1][2][1][RTW89_WW][102] = 48, + [0][1][2][1][RTW89_WW][104] = 48, + [0][1][2][1][RTW89_WW][105] = 48, + [0][1][2][1][RTW89_WW][107] = 48, + [0][1][2][1][RTW89_WW][109] = 48, + [0][1][2][1][RTW89_WW][111] = 0, + [0][1][2][1][RTW89_WW][113] = 0, + [0][1][2][1][RTW89_WW][115] = 0, + [0][1][2][1][RTW89_WW][117] = 0, + [0][1][2][1][RTW89_WW][119] = 0, + [1][0][2][0][RTW89_WW][1] = 72, + [1][0][2][0][RTW89_WW][5] = 72, + [1][0][2][0][RTW89_WW][9] = 72, + [1][0][2][0][RTW89_WW][13] = 72, + [1][0][2][0][RTW89_WW][16] = 72, + [1][0][2][0][RTW89_WW][20] = 72, + [1][0][2][0][RTW89_WW][24] = 72, + [1][0][2][0][RTW89_WW][28] = 72, + [1][0][2][0][RTW89_WW][31] = 72, + [1][0][2][0][RTW89_WW][35] = 72, + [1][0][2][0][RTW89_WW][39] = 72, + [1][0][2][0][RTW89_WW][43] = 72, + [1][0][2][0][RTW89_WW][46] = 72, + [1][0][2][0][RTW89_WW][50] = 72, + [1][0][2][0][RTW89_WW][54] = 72, + [1][0][2][0][RTW89_WW][58] = 72, + [1][0][2][0][RTW89_WW][61] = 72, + [1][0][2][0][RTW89_WW][65] = 72, + [1][0][2][0][RTW89_WW][69] = 72, + [1][0][2][0][RTW89_WW][73] = 72, + [1][0][2][0][RTW89_WW][76] = 72, + [1][0][2][0][RTW89_WW][80] = 72, + [1][0][2][0][RTW89_WW][84] = 72, + [1][0][2][0][RTW89_WW][88] = 72, + [1][0][2][0][RTW89_WW][91] = 72, + [1][0][2][0][RTW89_WW][95] = 72, + [1][0][2][0][RTW89_WW][99] = 72, + [1][0][2][0][RTW89_WW][103] = 72, + [1][0][2][0][RTW89_WW][106] = 72, + [1][0][2][0][RTW89_WW][110] = 0, + [1][0][2][0][RTW89_WW][114] = 0, + [1][0][2][0][RTW89_WW][118] = 0, + [1][1][2][0][RTW89_WW][1] = 60, + [1][1][2][0][RTW89_WW][5] = 60, + [1][1][2][0][RTW89_WW][9] = 60, + [1][1][2][0][RTW89_WW][13] = 60, + [1][1][2][0][RTW89_WW][16] = 60, + [1][1][2][0][RTW89_WW][20] = 60, + [1][1][2][0][RTW89_WW][24] = 60, + [1][1][2][0][RTW89_WW][28] = 60, + [1][1][2][0][RTW89_WW][31] = 60, + [1][1][2][0][RTW89_WW][35] = 60, + [1][1][2][0][RTW89_WW][39] = 60, + [1][1][2][0][RTW89_WW][43] = 60, + [1][1][2][0][RTW89_WW][46] = 60, + [1][1][2][0][RTW89_WW][50] = 60, + [1][1][2][0][RTW89_WW][54] = 60, + [1][1][2][0][RTW89_WW][58] = 60, + [1][1][2][0][RTW89_WW][61] = 60, + [1][1][2][0][RTW89_WW][65] = 60, + [1][1][2][0][RTW89_WW][69] = 60, + [1][1][2][0][RTW89_WW][73] = 60, + [1][1][2][0][RTW89_WW][76] = 60, + [1][1][2][0][RTW89_WW][80] = 60, + [1][1][2][0][RTW89_WW][84] = 60, + [1][1][2][0][RTW89_WW][88] = 60, + [1][1][2][0][RTW89_WW][91] = 60, + [1][1][2][0][RTW89_WW][95] = 60, + [1][1][2][0][RTW89_WW][99] = 60, + [1][1][2][0][RTW89_WW][103] = 60, + [1][1][2][0][RTW89_WW][106] = 60, + [1][1][2][0][RTW89_WW][110] = 0, + [1][1][2][0][RTW89_WW][114] = 0, + [1][1][2][0][RTW89_WW][118] = 0, + [1][1][2][1][RTW89_WW][1] = 48, + [1][1][2][1][RTW89_WW][5] = 48, + [1][1][2][1][RTW89_WW][9] = 48, + [1][1][2][1][RTW89_WW][13] = 48, + [1][1][2][1][RTW89_WW][16] = 48, + [1][1][2][1][RTW89_WW][20] = 48, + [1][1][2][1][RTW89_WW][24] = 48, + [1][1][2][1][RTW89_WW][28] = 48, + [1][1][2][1][RTW89_WW][31] = 48, + [1][1][2][1][RTW89_WW][35] = 48, + [1][1][2][1][RTW89_WW][39] = 48, + [1][1][2][1][RTW89_WW][43] = 48, + [1][1][2][1][RTW89_WW][46] = 48, + [1][1][2][1][RTW89_WW][50] = 48, + [1][1][2][1][RTW89_WW][54] = 48, + [1][1][2][1][RTW89_WW][58] = 48, + [1][1][2][1][RTW89_WW][61] = 48, + [1][1][2][1][RTW89_WW][65] = 48, + [1][1][2][1][RTW89_WW][69] = 48, + [1][1][2][1][RTW89_WW][73] = 48, + [1][1][2][1][RTW89_WW][76] = 48, + [1][1][2][1][RTW89_WW][80] = 48, + [1][1][2][1][RTW89_WW][84] = 48, + [1][1][2][1][RTW89_WW][88] = 48, + [1][1][2][1][RTW89_WW][91] = 48, + [1][1][2][1][RTW89_WW][95] = 48, + [1][1][2][1][RTW89_WW][99] = 48, + [1][1][2][1][RTW89_WW][103] = 48, + [1][1][2][1][RTW89_WW][106] = 48, + [1][1][2][1][RTW89_WW][110] = 0, + [1][1][2][1][RTW89_WW][114] = 0, + [1][1][2][1][RTW89_WW][118] = 0, + [2][0][2][0][RTW89_WW][3] = 64, + [2][0][2][0][RTW89_WW][11] = 64, + [2][0][2][0][RTW89_WW][18] = 64, + [2][0][2][0][RTW89_WW][26] = 64, + [2][0][2][0][RTW89_WW][33] = 64, + [2][0][2][0][RTW89_WW][41] = 64, + [2][0][2][0][RTW89_WW][48] = 64, + [2][0][2][0][RTW89_WW][56] = 64, + [2][0][2][0][RTW89_WW][63] = 64, + [2][0][2][0][RTW89_WW][71] = 64, + [2][0][2][0][RTW89_WW][78] = 64, + [2][0][2][0][RTW89_WW][86] = 64, + [2][0][2][0][RTW89_WW][93] = 64, + [2][0][2][0][RTW89_WW][101] = 64, + [2][0][2][0][RTW89_WW][108] = 0, + [2][0][2][0][RTW89_WW][116] = 0, + [2][1][2][0][RTW89_WW][3] = 52, + [2][1][2][0][RTW89_WW][11] = 52, + [2][1][2][0][RTW89_WW][18] = 52, + [2][1][2][0][RTW89_WW][26] = 52, + [2][1][2][0][RTW89_WW][33] = 52, + [2][1][2][0][RTW89_WW][41] = 52, + [2][1][2][0][RTW89_WW][48] = 52, + [2][1][2][0][RTW89_WW][56] = 52, + [2][1][2][0][RTW89_WW][63] = 52, + [2][1][2][0][RTW89_WW][71] = 52, + [2][1][2][0][RTW89_WW][78] = 52, + [2][1][2][0][RTW89_WW][86] = 52, + [2][1][2][0][RTW89_WW][93] = 52, + [2][1][2][0][RTW89_WW][101] = 52, + [2][1][2][0][RTW89_WW][108] = 0, + [2][1][2][0][RTW89_WW][116] = 0, + [2][1][2][1][RTW89_WW][3] = 40, + [2][1][2][1][RTW89_WW][11] = 40, + [2][1][2][1][RTW89_WW][18] = 40, + [2][1][2][1][RTW89_WW][26] = 40, + [2][1][2][1][RTW89_WW][33] = 40, + [2][1][2][1][RTW89_WW][41] = 40, + [2][1][2][1][RTW89_WW][48] = 40, + [2][1][2][1][RTW89_WW][56] = 40, + [2][1][2][1][RTW89_WW][63] = 40, + [2][1][2][1][RTW89_WW][71] = 40, + [2][1][2][1][RTW89_WW][78] = 40, + [2][1][2][1][RTW89_WW][86] = 40, + [2][1][2][1][RTW89_WW][93] = 40, + [2][1][2][1][RTW89_WW][101] = 40, + [2][1][2][1][RTW89_WW][108] = 0, + [2][1][2][1][RTW89_WW][116] = 0, + [3][0][2][0][RTW89_WW][7] = 56, + [3][0][2][0][RTW89_WW][22] = 56, + [3][0][2][0][RTW89_WW][37] = 56, + [3][0][2][0][RTW89_WW][52] = 56, + [3][0][2][0][RTW89_WW][67] = 56, + [3][0][2][0][RTW89_WW][82] = 56, + [3][0][2][0][RTW89_WW][97] = 56, + [3][0][2][0][RTW89_WW][112] = 0, + [3][1][2][0][RTW89_WW][7] = 44, + [3][1][2][0][RTW89_WW][22] = 44, + [3][1][2][0][RTW89_WW][37] = 44, + [3][1][2][0][RTW89_WW][52] = 44, + [3][1][2][0][RTW89_WW][67] = 44, + [3][1][2][0][RTW89_WW][82] = 44, + [3][1][2][0][RTW89_WW][97] = 44, + [3][1][2][0][RTW89_WW][112] = 0, + [3][1][2][1][RTW89_WW][7] = 32, + [3][1][2][1][RTW89_WW][22] = 32, + [3][1][2][1][RTW89_WW][37] = 32, + [3][1][2][1][RTW89_WW][52] = 32, + [3][1][2][1][RTW89_WW][67] = 32, + [3][1][2][1][RTW89_WW][82] = 32, + [3][1][2][1][RTW89_WW][97] = 32, + [3][1][2][1][RTW89_WW][112] = 0, + [0][0][1][0][RTW89_FCC][0] = 72, + [0][0][1][0][RTW89_FCC][2] = 72, + [0][0][1][0][RTW89_FCC][4] = 72, + [0][0][1][0][RTW89_FCC][6] = 72, + [0][0][1][0][RTW89_FCC][8] = 72, + [0][0][1][0][RTW89_FCC][10] = 72, + [0][0][1][0][RTW89_FCC][12] = 72, + [0][0][1][0][RTW89_FCC][14] = 72, + [0][0][1][0][RTW89_FCC][15] = 72, + [0][0][1][0][RTW89_FCC][17] = 72, + [0][0][1][0][RTW89_FCC][19] = 72, + [0][0][1][0][RTW89_FCC][21] = 72, + [0][0][1][0][RTW89_FCC][23] = 72, + [0][0][1][0][RTW89_FCC][25] = 72, + [0][0][1][0][RTW89_FCC][27] = 72, + [0][0][1][0][RTW89_FCC][29] = 72, + [0][0][1][0][RTW89_FCC][30] = 72, + [0][0][1][0][RTW89_FCC][32] = 72, + [0][0][1][0][RTW89_FCC][34] = 72, + [0][0][1][0][RTW89_FCC][36] = 72, + [0][0][1][0][RTW89_FCC][38] = 72, + [0][0][1][0][RTW89_FCC][40] = 72, + [0][0][1][0][RTW89_FCC][42] = 72, + [0][0][1][0][RTW89_FCC][44] = 72, + [0][0][1][0][RTW89_FCC][45] = 72, + [0][0][1][0][RTW89_FCC][47] = 72, + [0][0][1][0][RTW89_FCC][49] = 72, + [0][0][1][0][RTW89_FCC][51] = 72, + [0][0][1][0][RTW89_FCC][53] = 72, + [0][0][1][0][RTW89_FCC][55] = 72, + [0][0][1][0][RTW89_FCC][57] = 72, + [0][0][1][0][RTW89_FCC][59] = 72, + [0][0][1][0][RTW89_FCC][60] = 72, + [0][0][1][0][RTW89_FCC][62] = 72, + [0][0][1][0][RTW89_FCC][64] = 72, + [0][0][1][0][RTW89_FCC][66] = 72, + [0][0][1][0][RTW89_FCC][68] = 72, + [0][0][1][0][RTW89_FCC][70] = 72, + [0][0][1][0][RTW89_FCC][72] = 72, + [0][0][1][0][RTW89_FCC][74] = 72, + [0][0][1][0][RTW89_FCC][75] = 72, + [0][0][1][0][RTW89_FCC][77] = 72, + [0][0][1][0][RTW89_FCC][79] = 72, + [0][0][1][0][RTW89_FCC][81] = 72, + [0][0][1][0][RTW89_FCC][83] = 72, + [0][0][1][0][RTW89_FCC][85] = 72, + [0][0][1][0][RTW89_FCC][87] = 72, + [0][0][1][0][RTW89_FCC][89] = 72, + [0][0][1][0][RTW89_FCC][90] = 72, + [0][0][1][0][RTW89_FCC][92] = 72, + [0][0][1][0][RTW89_FCC][94] = 72, + [0][0][1][0][RTW89_FCC][96] = 72, + [0][0][1][0][RTW89_FCC][98] = 72, + [0][0][1][0][RTW89_FCC][100] = 72, + [0][0][1][0][RTW89_FCC][102] = 72, + [0][0][1][0][RTW89_FCC][104] = 72, + [0][0][1][0][RTW89_FCC][105] = 72, + [0][0][1][0][RTW89_FCC][107] = 72, + [0][0][1][0][RTW89_FCC][109] = 72, + [0][0][1][0][RTW89_FCC][111] = 127, + [0][0][1][0][RTW89_FCC][113] = 127, + [0][0][1][0][RTW89_FCC][115] = 127, + [0][0][1][0][RTW89_FCC][117] = 127, + [0][0][1][0][RTW89_FCC][119] = 127, + [0][1][1][0][RTW89_FCC][0] = 60, + [0][1][1][0][RTW89_FCC][2] = 60, + [0][1][1][0][RTW89_FCC][4] = 60, + [0][1][1][0][RTW89_FCC][6] = 60, + [0][1][1][0][RTW89_FCC][8] = 60, + [0][1][1][0][RTW89_FCC][10] = 60, + [0][1][1][0][RTW89_FCC][12] = 60, + [0][1][1][0][RTW89_FCC][14] = 60, + [0][1][1][0][RTW89_FCC][15] = 60, + [0][1][1][0][RTW89_FCC][17] = 60, + [0][1][1][0][RTW89_FCC][19] = 60, + [0][1][1][0][RTW89_FCC][21] = 60, + [0][1][1][0][RTW89_FCC][23] = 60, + [0][1][1][0][RTW89_FCC][25] = 60, + [0][1][1][0][RTW89_FCC][27] = 60, + [0][1][1][0][RTW89_FCC][29] = 60, + [0][1][1][0][RTW89_FCC][30] = 60, + [0][1][1][0][RTW89_FCC][32] = 60, + [0][1][1][0][RTW89_FCC][34] = 60, + [0][1][1][0][RTW89_FCC][36] = 60, + [0][1][1][0][RTW89_FCC][38] = 60, + [0][1][1][0][RTW89_FCC][40] = 60, + [0][1][1][0][RTW89_FCC][42] = 60, + [0][1][1][0][RTW89_FCC][44] = 60, + [0][1][1][0][RTW89_FCC][45] = 60, + [0][1][1][0][RTW89_FCC][47] = 60, + [0][1][1][0][RTW89_FCC][49] = 60, + [0][1][1][0][RTW89_FCC][51] = 60, + [0][1][1][0][RTW89_FCC][53] = 60, + [0][1][1][0][RTW89_FCC][55] = 60, + [0][1][1][0][RTW89_FCC][57] = 60, + [0][1][1][0][RTW89_FCC][59] = 60, + [0][1][1][0][RTW89_FCC][60] = 60, + [0][1][1][0][RTW89_FCC][62] = 60, + [0][1][1][0][RTW89_FCC][64] = 60, + [0][1][1][0][RTW89_FCC][66] = 60, + [0][1][1][0][RTW89_FCC][68] = 60, + [0][1][1][0][RTW89_FCC][70] = 60, + [0][1][1][0][RTW89_FCC][72] = 60, + [0][1][1][0][RTW89_FCC][74] = 60, + [0][1][1][0][RTW89_FCC][75] = 60, + [0][1][1][0][RTW89_FCC][77] = 60, + [0][1][1][0][RTW89_FCC][79] = 60, + [0][1][1][0][RTW89_FCC][81] = 60, + [0][1][1][0][RTW89_FCC][83] = 60, + [0][1][1][0][RTW89_FCC][85] = 60, + [0][1][1][0][RTW89_FCC][87] = 60, + [0][1][1][0][RTW89_FCC][89] = 60, + [0][1][1][0][RTW89_FCC][90] = 60, + [0][1][1][0][RTW89_FCC][92] = 60, + [0][1][1][0][RTW89_FCC][94] = 60, + [0][1][1][0][RTW89_FCC][96] = 60, + [0][1][1][0][RTW89_FCC][98] = 60, + [0][1][1][0][RTW89_FCC][100] = 60, + [0][1][1][0][RTW89_FCC][102] = 60, + [0][1][1][0][RTW89_FCC][104] = 60, + [0][1][1][0][RTW89_FCC][105] = 60, + [0][1][1][0][RTW89_FCC][107] = 60, + [0][1][1][0][RTW89_FCC][109] = 60, + [0][1][1][0][RTW89_FCC][111] = 127, + [0][1][1][0][RTW89_FCC][113] = 127, + [0][1][1][0][RTW89_FCC][115] = 127, + [0][1][1][0][RTW89_FCC][117] = 127, + [0][1][1][0][RTW89_FCC][119] = 127, + [0][0][2][0][RTW89_FCC][0] = 72, + [0][0][2][0][RTW89_FCC][2] = 72, + [0][0][2][0][RTW89_FCC][4] = 72, + [0][0][2][0][RTW89_FCC][6] = 72, + [0][0][2][0][RTW89_FCC][8] = 72, + [0][0][2][0][RTW89_FCC][10] = 72, + [0][0][2][0][RTW89_FCC][12] = 72, + [0][0][2][0][RTW89_FCC][14] = 72, + [0][0][2][0][RTW89_FCC][15] = 72, + [0][0][2][0][RTW89_FCC][17] = 72, + [0][0][2][0][RTW89_FCC][19] = 72, + [0][0][2][0][RTW89_FCC][21] = 72, + [0][0][2][0][RTW89_FCC][23] = 72, + [0][0][2][0][RTW89_FCC][25] = 72, + [0][0][2][0][RTW89_FCC][27] = 72, + [0][0][2][0][RTW89_FCC][29] = 72, + [0][0][2][0][RTW89_FCC][30] = 72, + [0][0][2][0][RTW89_FCC][32] = 72, + [0][0][2][0][RTW89_FCC][34] = 72, + [0][0][2][0][RTW89_FCC][36] = 72, + [0][0][2][0][RTW89_FCC][38] = 72, + [0][0][2][0][RTW89_FCC][40] = 72, + [0][0][2][0][RTW89_FCC][42] = 72, + [0][0][2][0][RTW89_FCC][44] = 72, + [0][0][2][0][RTW89_FCC][45] = 72, + [0][0][2][0][RTW89_FCC][47] = 72, + [0][0][2][0][RTW89_FCC][49] = 72, + [0][0][2][0][RTW89_FCC][51] = 72, + [0][0][2][0][RTW89_FCC][53] = 72, + [0][0][2][0][RTW89_FCC][55] = 72, + [0][0][2][0][RTW89_FCC][57] = 72, + [0][0][2][0][RTW89_FCC][59] = 72, + [0][0][2][0][RTW89_FCC][60] = 72, + [0][0][2][0][RTW89_FCC][62] = 72, + [0][0][2][0][RTW89_FCC][64] = 72, + [0][0][2][0][RTW89_FCC][66] = 72, + [0][0][2][0][RTW89_FCC][68] = 72, + [0][0][2][0][RTW89_FCC][70] = 72, + [0][0][2][0][RTW89_FCC][72] = 72, + [0][0][2][0][RTW89_FCC][74] = 72, + [0][0][2][0][RTW89_FCC][75] = 72, + [0][0][2][0][RTW89_FCC][77] = 72, + [0][0][2][0][RTW89_FCC][79] = 72, + [0][0][2][0][RTW89_FCC][81] = 72, + [0][0][2][0][RTW89_FCC][83] = 72, + [0][0][2][0][RTW89_FCC][85] = 72, + [0][0][2][0][RTW89_FCC][87] = 72, + [0][0][2][0][RTW89_FCC][89] = 72, + [0][0][2][0][RTW89_FCC][90] = 72, + [0][0][2][0][RTW89_FCC][92] = 72, + [0][0][2][0][RTW89_FCC][94] = 72, + [0][0][2][0][RTW89_FCC][96] = 72, + [0][0][2][0][RTW89_FCC][98] = 72, + [0][0][2][0][RTW89_FCC][100] = 72, + [0][0][2][0][RTW89_FCC][102] = 72, + [0][0][2][0][RTW89_FCC][104] = 72, + [0][0][2][0][RTW89_FCC][105] = 72, + [0][0][2][0][RTW89_FCC][107] = 72, + [0][0][2][0][RTW89_FCC][109] = 72, + [0][0][2][0][RTW89_FCC][111] = 127, + [0][0][2][0][RTW89_FCC][113] = 127, + [0][0][2][0][RTW89_FCC][115] = 127, + [0][0][2][0][RTW89_FCC][117] = 127, + [0][0][2][0][RTW89_FCC][119] = 127, + [0][1][2][0][RTW89_FCC][0] = 60, + [0][1][2][0][RTW89_FCC][2] = 60, + [0][1][2][0][RTW89_FCC][4] = 60, + [0][1][2][0][RTW89_FCC][6] = 60, + [0][1][2][0][RTW89_FCC][8] = 60, + [0][1][2][0][RTW89_FCC][10] = 60, + [0][1][2][0][RTW89_FCC][12] = 60, + [0][1][2][0][RTW89_FCC][14] = 60, + [0][1][2][0][RTW89_FCC][15] = 60, + [0][1][2][0][RTW89_FCC][17] = 60, + [0][1][2][0][RTW89_FCC][19] = 60, + [0][1][2][0][RTW89_FCC][21] = 60, + [0][1][2][0][RTW89_FCC][23] = 60, + [0][1][2][0][RTW89_FCC][25] = 60, + [0][1][2][0][RTW89_FCC][27] = 60, + [0][1][2][0][RTW89_FCC][29] = 60, + [0][1][2][0][RTW89_FCC][30] = 60, + [0][1][2][0][RTW89_FCC][32] = 60, + [0][1][2][0][RTW89_FCC][34] = 60, + [0][1][2][0][RTW89_FCC][36] = 60, + [0][1][2][0][RTW89_FCC][38] = 60, + [0][1][2][0][RTW89_FCC][40] = 60, + [0][1][2][0][RTW89_FCC][42] = 60, + [0][1][2][0][RTW89_FCC][44] = 60, + [0][1][2][0][RTW89_FCC][45] = 60, + [0][1][2][0][RTW89_FCC][47] = 60, + [0][1][2][0][RTW89_FCC][49] = 60, + [0][1][2][0][RTW89_FCC][51] = 60, + [0][1][2][0][RTW89_FCC][53] = 60, + [0][1][2][0][RTW89_FCC][55] = 60, + [0][1][2][0][RTW89_FCC][57] = 60, + [0][1][2][0][RTW89_FCC][59] = 60, + [0][1][2][0][RTW89_FCC][60] = 60, + [0][1][2][0][RTW89_FCC][62] = 60, + [0][1][2][0][RTW89_FCC][64] = 60, + [0][1][2][0][RTW89_FCC][66] = 60, + [0][1][2][0][RTW89_FCC][68] = 60, + [0][1][2][0][RTW89_FCC][70] = 60, + [0][1][2][0][RTW89_FCC][72] = 60, + [0][1][2][0][RTW89_FCC][74] = 60, + [0][1][2][0][RTW89_FCC][75] = 60, + [0][1][2][0][RTW89_FCC][77] = 60, + [0][1][2][0][RTW89_FCC][79] = 60, + [0][1][2][0][RTW89_FCC][81] = 60, + [0][1][2][0][RTW89_FCC][83] = 60, + [0][1][2][0][RTW89_FCC][85] = 60, + [0][1][2][0][RTW89_FCC][87] = 60, + [0][1][2][0][RTW89_FCC][89] = 60, + [0][1][2][0][RTW89_FCC][90] = 60, + [0][1][2][0][RTW89_FCC][92] = 60, + [0][1][2][0][RTW89_FCC][94] = 60, + [0][1][2][0][RTW89_FCC][96] = 60, + [0][1][2][0][RTW89_FCC][98] = 60, + [0][1][2][0][RTW89_FCC][100] = 60, + [0][1][2][0][RTW89_FCC][102] = 60, + [0][1][2][0][RTW89_FCC][104] = 60, + [0][1][2][0][RTW89_FCC][105] = 60, + [0][1][2][0][RTW89_FCC][107] = 60, + [0][1][2][0][RTW89_FCC][109] = 60, + [0][1][2][0][RTW89_FCC][111] = 127, + [0][1][2][0][RTW89_FCC][113] = 127, + [0][1][2][0][RTW89_FCC][115] = 127, + [0][1][2][0][RTW89_FCC][117] = 127, + [0][1][2][0][RTW89_FCC][119] = 127, + [0][1][2][1][RTW89_FCC][0] = 48, + [0][1][2][1][RTW89_FCC][2] = 48, + [0][1][2][1][RTW89_FCC][4] = 48, + [0][1][2][1][RTW89_FCC][6] = 48, + [0][1][2][1][RTW89_FCC][8] = 48, + [0][1][2][1][RTW89_FCC][10] = 48, + [0][1][2][1][RTW89_FCC][12] = 48, + [0][1][2][1][RTW89_FCC][14] = 48, + [0][1][2][1][RTW89_FCC][15] = 48, + [0][1][2][1][RTW89_FCC][17] = 48, + [0][1][2][1][RTW89_FCC][19] = 48, + [0][1][2][1][RTW89_FCC][21] = 48, + [0][1][2][1][RTW89_FCC][23] = 48, + [0][1][2][1][RTW89_FCC][25] = 48, + [0][1][2][1][RTW89_FCC][27] = 48, + [0][1][2][1][RTW89_FCC][29] = 48, + [0][1][2][1][RTW89_FCC][30] = 48, + [0][1][2][1][RTW89_FCC][32] = 48, + [0][1][2][1][RTW89_FCC][34] = 48, + [0][1][2][1][RTW89_FCC][36] = 48, + [0][1][2][1][RTW89_FCC][38] = 48, + [0][1][2][1][RTW89_FCC][40] = 48, + [0][1][2][1][RTW89_FCC][42] = 48, + [0][1][2][1][RTW89_FCC][44] = 48, + [0][1][2][1][RTW89_FCC][45] = 48, + [0][1][2][1][RTW89_FCC][47] = 48, + [0][1][2][1][RTW89_FCC][49] = 48, + [0][1][2][1][RTW89_FCC][51] = 48, + [0][1][2][1][RTW89_FCC][53] = 48, + [0][1][2][1][RTW89_FCC][55] = 48, + [0][1][2][1][RTW89_FCC][57] = 48, + [0][1][2][1][RTW89_FCC][59] = 48, + [0][1][2][1][RTW89_FCC][60] = 48, + [0][1][2][1][RTW89_FCC][62] = 48, + [0][1][2][1][RTW89_FCC][64] = 48, + [0][1][2][1][RTW89_FCC][66] = 48, + [0][1][2][1][RTW89_FCC][68] = 48, + [0][1][2][1][RTW89_FCC][70] = 48, + [0][1][2][1][RTW89_FCC][72] = 48, + [0][1][2][1][RTW89_FCC][74] = 48, + [0][1][2][1][RTW89_FCC][75] = 48, + [0][1][2][1][RTW89_FCC][77] = 48, + [0][1][2][1][RTW89_FCC][79] = 48, + [0][1][2][1][RTW89_FCC][81] = 48, + [0][1][2][1][RTW89_FCC][83] = 48, + [0][1][2][1][RTW89_FCC][85] = 48, + [0][1][2][1][RTW89_FCC][87] = 48, + [0][1][2][1][RTW89_FCC][89] = 48, + [0][1][2][1][RTW89_FCC][90] = 48, + [0][1][2][1][RTW89_FCC][92] = 48, + [0][1][2][1][RTW89_FCC][94] = 48, + [0][1][2][1][RTW89_FCC][96] = 48, + [0][1][2][1][RTW89_FCC][98] = 48, + [0][1][2][1][RTW89_FCC][100] = 48, + [0][1][2][1][RTW89_FCC][102] = 48, + [0][1][2][1][RTW89_FCC][104] = 48, + [0][1][2][1][RTW89_FCC][105] = 48, + [0][1][2][1][RTW89_FCC][107] = 48, + [0][1][2][1][RTW89_FCC][109] = 48, + [0][1][2][1][RTW89_FCC][111] = 127, + [0][1][2][1][RTW89_FCC][113] = 127, + [0][1][2][1][RTW89_FCC][115] = 127, + [0][1][2][1][RTW89_FCC][117] = 127, + [0][1][2][1][RTW89_FCC][119] = 127, + [1][0][2][0][RTW89_FCC][1] = 72, + [1][0][2][0][RTW89_FCC][5] = 72, + [1][0][2][0][RTW89_FCC][9] = 72, + [1][0][2][0][RTW89_FCC][13] = 72, + [1][0][2][0][RTW89_FCC][16] = 72, + [1][0][2][0][RTW89_FCC][20] = 72, + [1][0][2][0][RTW89_FCC][24] = 72, + [1][0][2][0][RTW89_FCC][28] = 72, + [1][0][2][0][RTW89_FCC][31] = 72, + [1][0][2][0][RTW89_FCC][35] = 72, + [1][0][2][0][RTW89_FCC][39] = 72, + [1][0][2][0][RTW89_FCC][43] = 72, + [1][0][2][0][RTW89_FCC][46] = 72, + [1][0][2][0][RTW89_FCC][50] = 72, + [1][0][2][0][RTW89_FCC][54] = 72, + [1][0][2][0][RTW89_FCC][58] = 72, + [1][0][2][0][RTW89_FCC][61] = 72, + [1][0][2][0][RTW89_FCC][65] = 72, + [1][0][2][0][RTW89_FCC][69] = 72, + [1][0][2][0][RTW89_FCC][73] = 72, + [1][0][2][0][RTW89_FCC][76] = 72, + [1][0][2][0][RTW89_FCC][80] = 72, + [1][0][2][0][RTW89_FCC][84] = 72, + [1][0][2][0][RTW89_FCC][88] = 72, + [1][0][2][0][RTW89_FCC][91] = 72, + [1][0][2][0][RTW89_FCC][95] = 72, + [1][0][2][0][RTW89_FCC][99] = 72, + [1][0][2][0][RTW89_FCC][103] = 72, + [1][0][2][0][RTW89_FCC][106] = 72, + [1][0][2][0][RTW89_FCC][110] = 127, + [1][0][2][0][RTW89_FCC][114] = 127, + [1][0][2][0][RTW89_FCC][118] = 127, + [1][1][2][0][RTW89_FCC][1] = 60, + [1][1][2][0][RTW89_FCC][5] = 60, + [1][1][2][0][RTW89_FCC][9] = 60, + [1][1][2][0][RTW89_FCC][13] = 60, + [1][1][2][0][RTW89_FCC][16] = 60, + [1][1][2][0][RTW89_FCC][20] = 60, + [1][1][2][0][RTW89_FCC][24] = 60, + [1][1][2][0][RTW89_FCC][28] = 60, + [1][1][2][0][RTW89_FCC][31] = 60, + [1][1][2][0][RTW89_FCC][35] = 60, + [1][1][2][0][RTW89_FCC][39] = 60, + [1][1][2][0][RTW89_FCC][43] = 60, + [1][1][2][0][RTW89_FCC][46] = 60, + [1][1][2][0][RTW89_FCC][50] = 60, + [1][1][2][0][RTW89_FCC][54] = 60, + [1][1][2][0][RTW89_FCC][58] = 60, + [1][1][2][0][RTW89_FCC][61] = 60, + [1][1][2][0][RTW89_FCC][65] = 60, + [1][1][2][0][RTW89_FCC][69] = 60, + [1][1][2][0][RTW89_FCC][73] = 60, + [1][1][2][0][RTW89_FCC][76] = 60, + [1][1][2][0][RTW89_FCC][80] = 60, + [1][1][2][0][RTW89_FCC][84] = 60, + [1][1][2][0][RTW89_FCC][88] = 60, + [1][1][2][0][RTW89_FCC][91] = 60, + [1][1][2][0][RTW89_FCC][95] = 60, + [1][1][2][0][RTW89_FCC][99] = 60, + [1][1][2][0][RTW89_FCC][103] = 60, + [1][1][2][0][RTW89_FCC][106] = 60, + [1][1][2][0][RTW89_FCC][110] = 127, + [1][1][2][0][RTW89_FCC][114] = 127, + [1][1][2][0][RTW89_FCC][118] = 127, + [1][1][2][1][RTW89_FCC][1] = 48, + [1][1][2][1][RTW89_FCC][5] = 48, + [1][1][2][1][RTW89_FCC][9] = 48, + [1][1][2][1][RTW89_FCC][13] = 48, + [1][1][2][1][RTW89_FCC][16] = 48, + [1][1][2][1][RTW89_FCC][20] = 48, + [1][1][2][1][RTW89_FCC][24] = 48, + [1][1][2][1][RTW89_FCC][28] = 48, + [1][1][2][1][RTW89_FCC][31] = 48, + [1][1][2][1][RTW89_FCC][35] = 48, + [1][1][2][1][RTW89_FCC][39] = 48, + [1][1][2][1][RTW89_FCC][43] = 48, + [1][1][2][1][RTW89_FCC][46] = 48, + [1][1][2][1][RTW89_FCC][50] = 48, + [1][1][2][1][RTW89_FCC][54] = 48, + [1][1][2][1][RTW89_FCC][58] = 48, + [1][1][2][1][RTW89_FCC][61] = 48, + [1][1][2][1][RTW89_FCC][65] = 48, + [1][1][2][1][RTW89_FCC][69] = 48, + [1][1][2][1][RTW89_FCC][73] = 48, + [1][1][2][1][RTW89_FCC][76] = 48, + [1][1][2][1][RTW89_FCC][80] = 48, + [1][1][2][1][RTW89_FCC][84] = 48, + [1][1][2][1][RTW89_FCC][88] = 48, + [1][1][2][1][RTW89_FCC][91] = 48, + [1][1][2][1][RTW89_FCC][95] = 48, + [1][1][2][1][RTW89_FCC][99] = 48, + [1][1][2][1][RTW89_FCC][103] = 48, + [1][1][2][1][RTW89_FCC][106] = 48, + [1][1][2][1][RTW89_FCC][110] = 127, + [1][1][2][1][RTW89_FCC][114] = 127, + [1][1][2][1][RTW89_FCC][118] = 127, + [2][0][2][0][RTW89_FCC][3] = 64, + [2][0][2][0][RTW89_FCC][11] = 64, + [2][0][2][0][RTW89_FCC][18] = 64, + [2][0][2][0][RTW89_FCC][26] = 64, + [2][0][2][0][RTW89_FCC][33] = 64, + [2][0][2][0][RTW89_FCC][41] = 64, + [2][0][2][0][RTW89_FCC][48] = 64, + [2][0][2][0][RTW89_FCC][56] = 64, + [2][0][2][0][RTW89_FCC][63] = 64, + [2][0][2][0][RTW89_FCC][71] = 64, + [2][0][2][0][RTW89_FCC][78] = 64, + [2][0][2][0][RTW89_FCC][86] = 64, + [2][0][2][0][RTW89_FCC][93] = 64, + [2][0][2][0][RTW89_FCC][101] = 64, + [2][0][2][0][RTW89_FCC][108] = 127, + [2][0][2][0][RTW89_FCC][116] = 127, + [2][1][2][0][RTW89_FCC][3] = 52, + [2][1][2][0][RTW89_FCC][11] = 52, + [2][1][2][0][RTW89_FCC][18] = 52, + [2][1][2][0][RTW89_FCC][26] = 52, + [2][1][2][0][RTW89_FCC][33] = 52, + [2][1][2][0][RTW89_FCC][41] = 52, + [2][1][2][0][RTW89_FCC][48] = 52, + [2][1][2][0][RTW89_FCC][56] = 52, + [2][1][2][0][RTW89_FCC][63] = 52, + [2][1][2][0][RTW89_FCC][71] = 52, + [2][1][2][0][RTW89_FCC][78] = 52, + [2][1][2][0][RTW89_FCC][86] = 52, + [2][1][2][0][RTW89_FCC][93] = 52, + [2][1][2][0][RTW89_FCC][101] = 52, + [2][1][2][0][RTW89_FCC][108] = 127, + [2][1][2][0][RTW89_FCC][116] = 127, + [2][1][2][1][RTW89_FCC][3] = 40, + [2][1][2][1][RTW89_FCC][11] = 40, + [2][1][2][1][RTW89_FCC][18] = 40, + [2][1][2][1][RTW89_FCC][26] = 40, + [2][1][2][1][RTW89_FCC][33] = 40, + [2][1][2][1][RTW89_FCC][41] = 40, + [2][1][2][1][RTW89_FCC][48] = 40, + [2][1][2][1][RTW89_FCC][56] = 40, + [2][1][2][1][RTW89_FCC][63] = 40, + [2][1][2][1][RTW89_FCC][71] = 40, + [2][1][2][1][RTW89_FCC][78] = 40, + [2][1][2][1][RTW89_FCC][86] = 40, + [2][1][2][1][RTW89_FCC][93] = 40, + [2][1][2][1][RTW89_FCC][101] = 40, + [2][1][2][1][RTW89_FCC][108] = 127, + [2][1][2][1][RTW89_FCC][116] = 127, + [3][0][2][0][RTW89_FCC][7] = 56, + [3][0][2][0][RTW89_FCC][22] = 56, + [3][0][2][0][RTW89_FCC][37] = 56, + [3][0][2][0][RTW89_FCC][52] = 56, + [3][0][2][0][RTW89_FCC][67] = 56, + [3][0][2][0][RTW89_FCC][82] = 56, + [3][0][2][0][RTW89_FCC][97] = 56, + [3][0][2][0][RTW89_FCC][112] = 127, + [3][1][2][0][RTW89_FCC][7] = 44, + [3][1][2][0][RTW89_FCC][22] = 44, + [3][1][2][0][RTW89_FCC][37] = 44, + [3][1][2][0][RTW89_FCC][52] = 44, + [3][1][2][0][RTW89_FCC][67] = 44, + [3][1][2][0][RTW89_FCC][82] = 44, + [3][1][2][0][RTW89_FCC][97] = 44, + [3][1][2][0][RTW89_FCC][112] = 127, + [3][1][2][1][RTW89_FCC][7] = 32, + [3][1][2][1][RTW89_FCC][22] = 32, + [3][1][2][1][RTW89_FCC][37] = 32, + [3][1][2][1][RTW89_FCC][52] = 32, + [3][1][2][1][RTW89_FCC][67] = 32, + [3][1][2][1][RTW89_FCC][82] = 32, + [3][1][2][1][RTW89_FCC][97] = 32, + [3][1][2][1][RTW89_FCC][112] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = { + [0][0][RTW89_WW][0] = 32, + [0][0][RTW89_WW][1] = 32, + [0][0][RTW89_WW][2] = 32, + [0][0][RTW89_WW][3] = 32, + [0][0][RTW89_WW][4] = 32, + [0][0][RTW89_WW][5] = 32, + [0][0][RTW89_WW][6] = 32, + [0][0][RTW89_WW][7] = 32, + [0][0][RTW89_WW][8] = 32, + [0][0][RTW89_WW][9] = 32, + [0][0][RTW89_WW][10] = 32, + [0][0][RTW89_WW][11] = 32, + [0][0][RTW89_WW][12] = 32, + [0][0][RTW89_WW][13] = 0, + [0][1][RTW89_WW][0] = 20, + [0][1][RTW89_WW][1] = 22, + [0][1][RTW89_WW][2] = 22, + [0][1][RTW89_WW][3] = 22, + [0][1][RTW89_WW][4] = 22, + [0][1][RTW89_WW][5] = 22, + [0][1][RTW89_WW][6] = 22, + [0][1][RTW89_WW][7] = 22, + [0][1][RTW89_WW][8] = 22, + [0][1][RTW89_WW][9] = 22, + [0][1][RTW89_WW][10] = 22, + [0][1][RTW89_WW][11] = 22, + [0][1][RTW89_WW][12] = 20, + [0][1][RTW89_WW][13] = 0, + [1][0][RTW89_WW][0] = 42, + [1][0][RTW89_WW][1] = 44, + [1][0][RTW89_WW][2] = 44, + [1][0][RTW89_WW][3] = 44, + [1][0][RTW89_WW][4] = 44, + [1][0][RTW89_WW][5] = 44, + [1][0][RTW89_WW][6] = 44, + [1][0][RTW89_WW][7] = 44, + [1][0][RTW89_WW][8] = 44, + [1][0][RTW89_WW][9] = 44, + [1][0][RTW89_WW][10] = 44, + [1][0][RTW89_WW][11] = 44, + [1][0][RTW89_WW][12] = 38, + [1][0][RTW89_WW][13] = 0, + [1][1][RTW89_WW][0] = 32, + [1][1][RTW89_WW][1] = 32, + [1][1][RTW89_WW][2] = 32, + [1][1][RTW89_WW][3] = 32, + [1][1][RTW89_WW][4] = 32, + [1][1][RTW89_WW][5] = 32, + [1][1][RTW89_WW][6] = 32, + [1][1][RTW89_WW][7] = 32, + [1][1][RTW89_WW][8] = 32, + [1][1][RTW89_WW][9] = 32, + [1][1][RTW89_WW][10] = 32, + [1][1][RTW89_WW][11] = 32, + [1][1][RTW89_WW][12] = 32, + [1][1][RTW89_WW][13] = 0, + [2][0][RTW89_WW][0] = 56, + [2][0][RTW89_WW][1] = 56, + [2][0][RTW89_WW][2] = 56, + [2][0][RTW89_WW][3] = 56, + [2][0][RTW89_WW][4] = 56, + [2][0][RTW89_WW][5] = 56, + [2][0][RTW89_WW][6] = 56, + [2][0][RTW89_WW][7] = 56, + [2][0][RTW89_WW][8] = 56, + [2][0][RTW89_WW][9] = 56, + [2][0][RTW89_WW][10] = 56, + [2][0][RTW89_WW][11] = 56, + [2][0][RTW89_WW][12] = 56, + [2][0][RTW89_WW][13] = 0, + [2][1][RTW89_WW][0] = 44, + [2][1][RTW89_WW][1] = 44, + [2][1][RTW89_WW][2] = 44, + [2][1][RTW89_WW][3] = 44, + [2][1][RTW89_WW][4] = 44, + [2][1][RTW89_WW][5] = 44, + [2][1][RTW89_WW][6] = 44, + [2][1][RTW89_WW][7] = 44, + [2][1][RTW89_WW][8] = 44, + [2][1][RTW89_WW][9] = 44, + [2][1][RTW89_WW][10] = 44, + [2][1][RTW89_WW][11] = 44, + [2][1][RTW89_WW][12] = 42, + [2][1][RTW89_WW][13] = 0, + [0][0][RTW89_FCC][0] = 68, + [0][0][RTW89_ETSI][0] = 36, + [0][0][RTW89_MKK][0] = 38, + [0][0][RTW89_IC][0] = 68, + [0][0][RTW89_ACMA][0] = 32, + [0][0][RTW89_FCC][1] = 68, + [0][0][RTW89_ETSI][1] = 40, + [0][0][RTW89_MKK][1] = 44, + [0][0][RTW89_IC][1] = 68, + [0][0][RTW89_ACMA][1] = 32, + [0][0][RTW89_FCC][2] = 72, + [0][0][RTW89_ETSI][2] = 40, + [0][0][RTW89_MKK][2] = 44, + [0][0][RTW89_IC][2] = 72, + [0][0][RTW89_ACMA][2] = 32, + [0][0][RTW89_FCC][3] = 76, + [0][0][RTW89_ETSI][3] = 40, + [0][0][RTW89_MKK][3] = 44, + [0][0][RTW89_IC][3] = 76, + [0][0][RTW89_ACMA][3] = 32, + [0][0][RTW89_FCC][4] = 76, + [0][0][RTW89_ETSI][4] = 40, + [0][0][RTW89_MKK][4] = 44, + [0][0][RTW89_IC][4] = 76, + [0][0][RTW89_ACMA][4] = 32, + [0][0][RTW89_FCC][5] = 84, + [0][0][RTW89_ETSI][5] = 40, + [0][0][RTW89_MKK][5] = 44, + [0][0][RTW89_IC][5] = 84, + [0][0][RTW89_ACMA][5] = 32, + [0][0][RTW89_FCC][6] = 74, + [0][0][RTW89_ETSI][6] = 40, + [0][0][RTW89_MKK][6] = 44, + [0][0][RTW89_IC][6] = 74, + [0][0][RTW89_ACMA][6] = 32, + [0][0][RTW89_FCC][7] = 74, + [0][0][RTW89_ETSI][7] = 40, + [0][0][RTW89_MKK][7] = 44, + [0][0][RTW89_IC][7] = 74, + [0][0][RTW89_ACMA][7] = 32, + [0][0][RTW89_FCC][8] = 70, + [0][0][RTW89_ETSI][8] = 40, + [0][0][RTW89_MKK][8] = 44, + [0][0][RTW89_IC][8] = 70, + [0][0][RTW89_ACMA][8] = 32, + [0][0][RTW89_FCC][9] = 66, + [0][0][RTW89_ETSI][9] = 40, + [0][0][RTW89_MKK][9] = 44, + [0][0][RTW89_IC][9] = 66, + [0][0][RTW89_ACMA][9] = 32, + [0][0][RTW89_FCC][10] = 66, + [0][0][RTW89_ETSI][10] = 40, + [0][0][RTW89_MKK][10] = 44, + [0][0][RTW89_IC][10] = 66, + [0][0][RTW89_ACMA][10] = 32, + [0][0][RTW89_FCC][11] = 56, + [0][0][RTW89_ETSI][11] = 40, + [0][0][RTW89_MKK][11] = 44, + [0][0][RTW89_IC][11] = 56, + [0][0][RTW89_ACMA][11] = 32, + [0][0][RTW89_FCC][12] = 32, + [0][0][RTW89_ETSI][12] = 36, + [0][0][RTW89_MKK][12] = 38, + [0][0][RTW89_IC][12] = 32, + [0][0][RTW89_ACMA][12] = 32, + [0][0][RTW89_FCC][13] = 127, + [0][0][RTW89_ETSI][13] = 127, + [0][0][RTW89_MKK][13] = 127, + [0][0][RTW89_IC][13] = 127, + [0][0][RTW89_ACMA][13] = 127, + [0][1][RTW89_FCC][0] = 62, + [0][1][RTW89_ETSI][0] = 24, + [0][1][RTW89_MKK][0] = 26, + [0][1][RTW89_IC][0] = 62, + [0][1][RTW89_ACMA][0] = 20, + [0][1][RTW89_FCC][1] = 62, + [0][1][RTW89_ETSI][1] = 26, + [0][1][RTW89_MKK][1] = 32, + [0][1][RTW89_IC][1] = 62, + [0][1][RTW89_ACMA][1] = 22, + [0][1][RTW89_FCC][2] = 66, + [0][1][RTW89_ETSI][2] = 26, + [0][1][RTW89_MKK][2] = 32, + [0][1][RTW89_IC][2] = 66, + [0][1][RTW89_ACMA][2] = 22, + [0][1][RTW89_FCC][3] = 70, + [0][1][RTW89_ETSI][3] = 26, + [0][1][RTW89_MKK][3] = 32, + [0][1][RTW89_IC][3] = 70, + [0][1][RTW89_ACMA][3] = 22, + [0][1][RTW89_FCC][4] = 74, + [0][1][RTW89_ETSI][4] = 26, + [0][1][RTW89_MKK][4] = 32, + [0][1][RTW89_IC][4] = 74, + [0][1][RTW89_ACMA][4] = 22, + [0][1][RTW89_FCC][5] = 74, + [0][1][RTW89_ETSI][5] = 26, + [0][1][RTW89_MKK][5] = 32, + [0][1][RTW89_IC][5] = 74, + [0][1][RTW89_ACMA][5] = 22, + [0][1][RTW89_FCC][6] = 72, + [0][1][RTW89_ETSI][6] = 26, + [0][1][RTW89_MKK][6] = 32, + [0][1][RTW89_IC][6] = 72, + [0][1][RTW89_ACMA][6] = 22, + [0][1][RTW89_FCC][7] = 68, + [0][1][RTW89_ETSI][7] = 26, + [0][1][RTW89_MKK][7] = 32, + [0][1][RTW89_IC][7] = 68, + [0][1][RTW89_ACMA][7] = 22, + [0][1][RTW89_FCC][8] = 64, + [0][1][RTW89_ETSI][8] = 26, + [0][1][RTW89_MKK][8] = 32, + [0][1][RTW89_IC][8] = 64, + [0][1][RTW89_ACMA][8] = 22, + [0][1][RTW89_FCC][9] = 60, + [0][1][RTW89_ETSI][9] = 26, + [0][1][RTW89_MKK][9] = 32, + [0][1][RTW89_IC][9] = 60, + [0][1][RTW89_ACMA][9] = 22, + [0][1][RTW89_FCC][10] = 60, + [0][1][RTW89_ETSI][10] = 26, + [0][1][RTW89_MKK][10] = 32, + [0][1][RTW89_IC][10] = 60, + [0][1][RTW89_ACMA][10] = 22, + [0][1][RTW89_FCC][11] = 52, + [0][1][RTW89_ETSI][11] = 26, + [0][1][RTW89_MKK][11] = 32, + [0][1][RTW89_IC][11] = 52, + [0][1][RTW89_ACMA][11] = 22, + [0][1][RTW89_FCC][12] = 30, + [0][1][RTW89_ETSI][12] = 22, + [0][1][RTW89_MKK][12] = 26, + [0][1][RTW89_IC][12] = 30, + [0][1][RTW89_ACMA][12] = 20, + [0][1][RTW89_FCC][13] = 127, + [0][1][RTW89_ETSI][13] = 127, + [0][1][RTW89_MKK][13] = 127, + [0][1][RTW89_IC][13] = 127, + [0][1][RTW89_ACMA][13] = 127, + [1][0][RTW89_FCC][0] = 78, + [1][0][RTW89_ETSI][0] = 48, + [1][0][RTW89_MKK][0] = 48, + [1][0][RTW89_IC][0] = 78, + [1][0][RTW89_ACMA][0] = 42, + [1][0][RTW89_FCC][1] = 78, + [1][0][RTW89_ETSI][1] = 48, + [1][0][RTW89_MKK][1] = 48, + [1][0][RTW89_IC][1] = 78, + [1][0][RTW89_ACMA][1] = 44, + [1][0][RTW89_FCC][2] = 82, + [1][0][RTW89_ETSI][2] = 48, + [1][0][RTW89_MKK][2] = 48, + [1][0][RTW89_IC][2] = 82, + [1][0][RTW89_ACMA][2] = 44, + [1][0][RTW89_FCC][3] = 84, + [1][0][RTW89_ETSI][3] = 48, + [1][0][RTW89_MKK][3] = 48, + [1][0][RTW89_IC][3] = 84, + [1][0][RTW89_ACMA][3] = 44, + [1][0][RTW89_FCC][4] = 84, + [1][0][RTW89_ETSI][4] = 48, + [1][0][RTW89_MKK][4] = 48, + [1][0][RTW89_IC][4] = 84, + [1][0][RTW89_ACMA][4] = 44, + [1][0][RTW89_FCC][5] = 84, + [1][0][RTW89_ETSI][5] = 48, + [1][0][RTW89_MKK][5] = 48, + [1][0][RTW89_IC][5] = 84, + [1][0][RTW89_ACMA][5] = 44, + [1][0][RTW89_FCC][6] = 78, + [1][0][RTW89_ETSI][6] = 46, + [1][0][RTW89_MKK][6] = 48, + [1][0][RTW89_IC][6] = 78, + [1][0][RTW89_ACMA][6] = 44, + [1][0][RTW89_FCC][7] = 78, + [1][0][RTW89_ETSI][7] = 48, + [1][0][RTW89_MKK][7] = 48, + [1][0][RTW89_IC][7] = 78, + [1][0][RTW89_ACMA][7] = 44, + [1][0][RTW89_FCC][8] = 78, + [1][0][RTW89_ETSI][8] = 48, + [1][0][RTW89_MKK][8] = 48, + [1][0][RTW89_IC][8] = 78, + [1][0][RTW89_ACMA][8] = 44, + [1][0][RTW89_FCC][9] = 74, + [1][0][RTW89_ETSI][9] = 48, + [1][0][RTW89_MKK][9] = 48, + [1][0][RTW89_IC][9] = 74, + [1][0][RTW89_ACMA][9] = 44, + [1][0][RTW89_FCC][10] = 74, + [1][0][RTW89_ETSI][10] = 48, + [1][0][RTW89_MKK][10] = 48, + [1][0][RTW89_IC][10] = 74, + [1][0][RTW89_ACMA][10] = 44, + [1][0][RTW89_FCC][11] = 72, + [1][0][RTW89_ETSI][11] = 48, + [1][0][RTW89_MKK][11] = 48, + [1][0][RTW89_IC][11] = 72, + [1][0][RTW89_ACMA][11] = 44, + [1][0][RTW89_FCC][12] = 38, + [1][0][RTW89_ETSI][12] = 48, + [1][0][RTW89_MKK][12] = 48, + [1][0][RTW89_IC][12] = 38, + [1][0][RTW89_ACMA][12] = 42, + [1][0][RTW89_FCC][13] = 127, + [1][0][RTW89_ETSI][13] = 127, + [1][0][RTW89_MKK][13] = 127, + [1][0][RTW89_IC][13] = 127, + [1][0][RTW89_ACMA][13] = 127, + [1][1][RTW89_FCC][0] = 66, + [1][1][RTW89_ETSI][0] = 34, + [1][1][RTW89_MKK][0] = 36, + [1][1][RTW89_IC][0] = 66, + [1][1][RTW89_ACMA][0] = 32, + [1][1][RTW89_FCC][1] = 66, + [1][1][RTW89_ETSI][1] = 36, + [1][1][RTW89_MKK][1] = 36, + [1][1][RTW89_IC][1] = 66, + [1][1][RTW89_ACMA][1] = 32, + [1][1][RTW89_FCC][2] = 70, + [1][1][RTW89_ETSI][2] = 36, + [1][1][RTW89_MKK][2] = 36, + [1][1][RTW89_IC][2] = 70, + [1][1][RTW89_ACMA][2] = 32, + [1][1][RTW89_FCC][3] = 74, + [1][1][RTW89_ETSI][3] = 36, + [1][1][RTW89_MKK][3] = 36, + [1][1][RTW89_IC][3] = 74, + [1][1][RTW89_ACMA][3] = 32, + [1][1][RTW89_FCC][4] = 74, + [1][1][RTW89_ETSI][4] = 36, + [1][1][RTW89_MKK][4] = 36, + [1][1][RTW89_IC][4] = 74, + [1][1][RTW89_ACMA][4] = 32, + [1][1][RTW89_FCC][5] = 74, + [1][1][RTW89_ETSI][5] = 36, + [1][1][RTW89_MKK][5] = 36, + [1][1][RTW89_IC][5] = 74, + [1][1][RTW89_ACMA][5] = 32, + [1][1][RTW89_FCC][6] = 74, + [1][1][RTW89_ETSI][6] = 36, + [1][1][RTW89_MKK][6] = 36, + [1][1][RTW89_IC][6] = 74, + [1][1][RTW89_ACMA][6] = 32, + [1][1][RTW89_FCC][7] = 74, + [1][1][RTW89_ETSI][7] = 36, + [1][1][RTW89_MKK][7] = 36, + [1][1][RTW89_IC][7] = 74, + [1][1][RTW89_ACMA][7] = 32, + [1][1][RTW89_FCC][8] = 70, + [1][1][RTW89_ETSI][8] = 36, + [1][1][RTW89_MKK][8] = 36, + [1][1][RTW89_IC][8] = 70, + [1][1][RTW89_ACMA][8] = 32, + [1][1][RTW89_FCC][9] = 66, + [1][1][RTW89_ETSI][9] = 36, + [1][1][RTW89_MKK][9] = 36, + [1][1][RTW89_IC][9] = 66, + [1][1][RTW89_ACMA][9] = 32, + [1][1][RTW89_FCC][10] = 66, + [1][1][RTW89_ETSI][10] = 36, + [1][1][RTW89_MKK][10] = 36, + [1][1][RTW89_IC][10] = 66, + [1][1][RTW89_ACMA][10] = 32, + [1][1][RTW89_FCC][11] = 48, + [1][1][RTW89_ETSI][11] = 36, + [1][1][RTW89_MKK][11] = 36, + [1][1][RTW89_IC][11] = 48, + [1][1][RTW89_ACMA][11] = 32, + [1][1][RTW89_FCC][12] = 32, + [1][1][RTW89_ETSI][12] = 36, + [1][1][RTW89_MKK][12] = 36, + [1][1][RTW89_IC][12] = 32, + [1][1][RTW89_ACMA][12] = 32, + [1][1][RTW89_FCC][13] = 127, + [1][1][RTW89_ETSI][13] = 127, + [1][1][RTW89_MKK][13] = 127, + [1][1][RTW89_IC][13] = 127, + [1][1][RTW89_ACMA][13] = 127, + [2][0][RTW89_FCC][0] = 78, + [2][0][RTW89_ETSI][0] = 60, + [2][0][RTW89_MKK][0] = 60, + [2][0][RTW89_IC][0] = 78, + [2][0][RTW89_ACMA][0] = 56, + [2][0][RTW89_FCC][1] = 78, + [2][0][RTW89_ETSI][1] = 60, + [2][0][RTW89_MKK][1] = 60, + [2][0][RTW89_IC][1] = 78, + [2][0][RTW89_ACMA][1] = 56, + [2][0][RTW89_FCC][2] = 80, + [2][0][RTW89_ETSI][2] = 60, + [2][0][RTW89_MKK][2] = 60, + [2][0][RTW89_IC][2] = 80, + [2][0][RTW89_ACMA][2] = 56, + [2][0][RTW89_FCC][3] = 80, + [2][0][RTW89_ETSI][3] = 60, + [2][0][RTW89_MKK][3] = 60, + [2][0][RTW89_IC][3] = 80, + [2][0][RTW89_ACMA][3] = 56, + [2][0][RTW89_FCC][4] = 80, + [2][0][RTW89_ETSI][4] = 60, + [2][0][RTW89_MKK][4] = 60, + [2][0][RTW89_IC][4] = 80, + [2][0][RTW89_ACMA][4] = 56, + [2][0][RTW89_FCC][5] = 84, + [2][0][RTW89_ETSI][5] = 60, + [2][0][RTW89_MKK][5] = 60, + [2][0][RTW89_IC][5] = 84, + [2][0][RTW89_ACMA][5] = 56, + [2][0][RTW89_FCC][6] = 76, + [2][0][RTW89_ETSI][6] = 58, + [2][0][RTW89_MKK][6] = 60, + [2][0][RTW89_IC][6] = 76, + [2][0][RTW89_ACMA][6] = 56, + [2][0][RTW89_FCC][7] = 76, + [2][0][RTW89_ETSI][7] = 60, + [2][0][RTW89_MKK][7] = 60, + [2][0][RTW89_IC][7] = 76, + [2][0][RTW89_ACMA][7] = 56, + [2][0][RTW89_FCC][8] = 76, + [2][0][RTW89_ETSI][8] = 60, + [2][0][RTW89_MKK][8] = 60, + [2][0][RTW89_IC][8] = 76, + [2][0][RTW89_ACMA][8] = 56, + [2][0][RTW89_FCC][9] = 74, + [2][0][RTW89_ETSI][9] = 60, + [2][0][RTW89_MKK][9] = 60, + [2][0][RTW89_IC][9] = 74, + [2][0][RTW89_ACMA][9] = 56, + [2][0][RTW89_FCC][10] = 74, + [2][0][RTW89_ETSI][10] = 60, + [2][0][RTW89_MKK][10] = 60, + [2][0][RTW89_IC][10] = 74, + [2][0][RTW89_ACMA][10] = 56, + [2][0][RTW89_FCC][11] = 66, + [2][0][RTW89_ETSI][11] = 60, + [2][0][RTW89_MKK][11] = 60, + [2][0][RTW89_IC][11] = 66, + [2][0][RTW89_ACMA][11] = 56, + [2][0][RTW89_FCC][12] = 56, + [2][0][RTW89_ETSI][12] = 60, + [2][0][RTW89_MKK][12] = 60, + [2][0][RTW89_IC][12] = 56, + [2][0][RTW89_ACMA][12] = 56, + [2][0][RTW89_FCC][13] = 127, + [2][0][RTW89_ETSI][13] = 127, + [2][0][RTW89_MKK][13] = 127, + [2][0][RTW89_IC][13] = 127, + [2][0][RTW89_ACMA][13] = 127, + [2][1][RTW89_FCC][0] = 70, + [2][1][RTW89_ETSI][0] = 48, + [2][1][RTW89_MKK][0] = 48, + [2][1][RTW89_IC][0] = 70, + [2][1][RTW89_ACMA][0] = 44, + [2][1][RTW89_FCC][1] = 70, + [2][1][RTW89_ETSI][1] = 48, + [2][1][RTW89_MKK][1] = 48, + [2][1][RTW89_IC][1] = 70, + [2][1][RTW89_ACMA][1] = 44, + [2][1][RTW89_FCC][2] = 74, + [2][1][RTW89_ETSI][2] = 48, + [2][1][RTW89_MKK][2] = 48, + [2][1][RTW89_IC][2] = 74, + [2][1][RTW89_ACMA][2] = 44, + [2][1][RTW89_FCC][3] = 78, + [2][1][RTW89_ETSI][3] = 48, + [2][1][RTW89_MKK][3] = 48, + [2][1][RTW89_IC][3] = 78, + [2][1][RTW89_ACMA][3] = 44, + [2][1][RTW89_FCC][4] = 80, + [2][1][RTW89_ETSI][4] = 48, + [2][1][RTW89_MKK][4] = 48, + [2][1][RTW89_IC][4] = 80, + [2][1][RTW89_ACMA][4] = 44, + [2][1][RTW89_FCC][5] = 80, + [2][1][RTW89_ETSI][5] = 48, + [2][1][RTW89_MKK][5] = 48, + [2][1][RTW89_IC][5] = 80, + [2][1][RTW89_ACMA][5] = 44, + [2][1][RTW89_FCC][6] = 78, + [2][1][RTW89_ETSI][6] = 46, + [2][1][RTW89_MKK][6] = 48, + [2][1][RTW89_IC][6] = 78, + [2][1][RTW89_ACMA][6] = 44, + [2][1][RTW89_FCC][7] = 78, + [2][1][RTW89_ETSI][7] = 48, + [2][1][RTW89_MKK][7] = 48, + [2][1][RTW89_IC][7] = 78, + [2][1][RTW89_ACMA][7] = 44, + [2][1][RTW89_FCC][8] = 74, + [2][1][RTW89_ETSI][8] = 48, + [2][1][RTW89_MKK][8] = 48, + [2][1][RTW89_IC][8] = 74, + [2][1][RTW89_ACMA][8] = 44, + [2][1][RTW89_FCC][9] = 70, + [2][1][RTW89_ETSI][9] = 48, + [2][1][RTW89_MKK][9] = 48, + [2][1][RTW89_IC][9] = 70, + [2][1][RTW89_ACMA][9] = 44, + [2][1][RTW89_FCC][10] = 70, + [2][1][RTW89_ETSI][10] = 48, + [2][1][RTW89_MKK][10] = 48, + [2][1][RTW89_IC][10] = 70, + [2][1][RTW89_ACMA][10] = 44, + [2][1][RTW89_FCC][11] = 60, + [2][1][RTW89_ETSI][11] = 48, + [2][1][RTW89_MKK][11] = 48, + [2][1][RTW89_IC][11] = 60, + [2][1][RTW89_ACMA][11] = 44, + [2][1][RTW89_FCC][12] = 44, + [2][1][RTW89_ETSI][12] = 46, + [2][1][RTW89_MKK][12] = 48, + [2][1][RTW89_IC][12] = 44, + [2][1][RTW89_ACMA][12] = 42, + [2][1][RTW89_FCC][13] = 127, + [2][1][RTW89_ETSI][13] = 127, + [2][1][RTW89_MKK][13] = 127, + [2][1][RTW89_IC][13] = 127, + [2][1][RTW89_ACMA][13] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { + [0][0][RTW89_WW][0] = 24, + [0][0][RTW89_WW][2] = 24, + [0][0][RTW89_WW][4] = 24, + [0][0][RTW89_WW][6] = 24, + [0][0][RTW89_WW][8] = 24, + [0][0][RTW89_WW][10] = 24, + [0][0][RTW89_WW][12] = 24, + [0][0][RTW89_WW][14] = 24, + [0][0][RTW89_WW][15] = 24, + [0][0][RTW89_WW][17] = 24, + [0][0][RTW89_WW][19] = 24, + [0][0][RTW89_WW][21] = 24, + [0][0][RTW89_WW][23] = 24, + [0][0][RTW89_WW][25] = 32, + [0][0][RTW89_WW][27] = 32, + [0][0][RTW89_WW][29] = 32, + [0][0][RTW89_WW][31] = 24, + [0][0][RTW89_WW][33] = 24, + [0][0][RTW89_WW][35] = 24, + [0][0][RTW89_WW][37] = 44, + [0][0][RTW89_WW][38] = 30, + [0][0][RTW89_WW][40] = 30, + [0][0][RTW89_WW][42] = 30, + [0][0][RTW89_WW][44] = 30, + [0][0][RTW89_WW][46] = 30, + [0][0][RTW89_WW][48] = 32, + [0][0][RTW89_WW][50] = 32, + [0][0][RTW89_WW][52] = 32, + [0][1][RTW89_WW][0] = 0, + [0][1][RTW89_WW][2] = 4, + [0][1][RTW89_WW][4] = 0, + [0][1][RTW89_WW][6] = 0, + [0][1][RTW89_WW][8] = 12, + [0][1][RTW89_WW][10] = 12, + [0][1][RTW89_WW][12] = 12, + [0][1][RTW89_WW][14] = 12, + [0][1][RTW89_WW][15] = 12, + [0][1][RTW89_WW][17] = 12, + [0][1][RTW89_WW][19] = 12, + [0][1][RTW89_WW][21] = 12, + [0][1][RTW89_WW][23] = 12, + [0][1][RTW89_WW][25] = 20, + [0][1][RTW89_WW][27] = 18, + [0][1][RTW89_WW][29] = 18, + [0][1][RTW89_WW][31] = 12, + [0][1][RTW89_WW][33] = 12, + [0][1][RTW89_WW][35] = 12, + [0][1][RTW89_WW][37] = 34, + [0][1][RTW89_WW][38] = 18, + [0][1][RTW89_WW][40] = 18, + [0][1][RTW89_WW][42] = 18, + [0][1][RTW89_WW][44] = 18, + [0][1][RTW89_WW][46] = 18, + [0][1][RTW89_WW][48] = 20, + [0][1][RTW89_WW][50] = 20, + [0][1][RTW89_WW][52] = 20, + [1][0][RTW89_WW][0] = 34, + [1][0][RTW89_WW][2] = 34, + [1][0][RTW89_WW][4] = 34, + [1][0][RTW89_WW][6] = 34, + [1][0][RTW89_WW][8] = 34, + [1][0][RTW89_WW][10] = 34, + [1][0][RTW89_WW][12] = 34, + [1][0][RTW89_WW][14] = 34, + [1][0][RTW89_WW][15] = 34, + [1][0][RTW89_WW][17] = 34, + [1][0][RTW89_WW][19] = 34, + [1][0][RTW89_WW][21] = 34, + [1][0][RTW89_WW][23] = 34, + [1][0][RTW89_WW][25] = 42, + [1][0][RTW89_WW][27] = 44, + [1][0][RTW89_WW][29] = 44, + [1][0][RTW89_WW][31] = 34, + [1][0][RTW89_WW][33] = 34, + [1][0][RTW89_WW][35] = 34, + [1][0][RTW89_WW][37] = 52, + [1][0][RTW89_WW][38] = 30, + [1][0][RTW89_WW][40] = 30, + [1][0][RTW89_WW][42] = 30, + [1][0][RTW89_WW][44] = 30, + [1][0][RTW89_WW][46] = 30, + [1][0][RTW89_WW][48] = 44, + [1][0][RTW89_WW][50] = 44, + [1][0][RTW89_WW][52] = 44, + [1][1][RTW89_WW][0] = 10, + [1][1][RTW89_WW][2] = 14, + [1][1][RTW89_WW][4] = 10, + [1][1][RTW89_WW][6] = 10, + [1][1][RTW89_WW][8] = 20, + [1][1][RTW89_WW][10] = 20, + [1][1][RTW89_WW][12] = 22, + [1][1][RTW89_WW][14] = 22, + [1][1][RTW89_WW][15] = 22, + [1][1][RTW89_WW][17] = 22, + [1][1][RTW89_WW][19] = 22, + [1][1][RTW89_WW][21] = 22, + [1][1][RTW89_WW][23] = 22, + [1][1][RTW89_WW][25] = 30, + [1][1][RTW89_WW][27] = 32, + [1][1][RTW89_WW][29] = 32, + [1][1][RTW89_WW][31] = 22, + [1][1][RTW89_WW][33] = 22, + [1][1][RTW89_WW][35] = 22, + [1][1][RTW89_WW][37] = 42, + [1][1][RTW89_WW][38] = 18, + [1][1][RTW89_WW][40] = 18, + [1][1][RTW89_WW][42] = 18, + [1][1][RTW89_WW][44] = 18, + [1][1][RTW89_WW][46] = 18, + [1][1][RTW89_WW][48] = 32, + [1][1][RTW89_WW][50] = 32, + [1][1][RTW89_WW][52] = 32, + [2][0][RTW89_WW][0] = 46, + [2][0][RTW89_WW][2] = 46, + [2][0][RTW89_WW][4] = 46, + [2][0][RTW89_WW][6] = 46, + [2][0][RTW89_WW][8] = 48, + [2][0][RTW89_WW][10] = 48, + [2][0][RTW89_WW][12] = 46, + [2][0][RTW89_WW][14] = 46, + [2][0][RTW89_WW][15] = 48, + [2][0][RTW89_WW][17] = 48, + [2][0][RTW89_WW][19] = 48, + [2][0][RTW89_WW][21] = 48, + [2][0][RTW89_WW][23] = 48, + [2][0][RTW89_WW][25] = 54, + [2][0][RTW89_WW][27] = 54, + [2][0][RTW89_WW][29] = 54, + [2][0][RTW89_WW][31] = 48, + [2][0][RTW89_WW][33] = 48, + [2][0][RTW89_WW][35] = 48, + [2][0][RTW89_WW][37] = 66, + [2][0][RTW89_WW][38] = 30, + [2][0][RTW89_WW][40] = 30, + [2][0][RTW89_WW][42] = 30, + [2][0][RTW89_WW][44] = 30, + [2][0][RTW89_WW][46] = 30, + [2][0][RTW89_WW][48] = 56, + [2][0][RTW89_WW][50] = 56, + [2][0][RTW89_WW][52] = 56, + [2][1][RTW89_WW][0] = 20, + [2][1][RTW89_WW][2] = 18, + [2][1][RTW89_WW][4] = 22, + [2][1][RTW89_WW][6] = 22, + [2][1][RTW89_WW][8] = 34, + [2][1][RTW89_WW][10] = 34, + [2][1][RTW89_WW][12] = 36, + [2][1][RTW89_WW][14] = 36, + [2][1][RTW89_WW][15] = 36, + [2][1][RTW89_WW][17] = 36, + [2][1][RTW89_WW][19] = 36, + [2][1][RTW89_WW][21] = 36, + [2][1][RTW89_WW][23] = 36, + [2][1][RTW89_WW][25] = 42, + [2][1][RTW89_WW][27] = 42, + [2][1][RTW89_WW][29] = 42, + [2][1][RTW89_WW][31] = 36, + [2][1][RTW89_WW][33] = 36, + [2][1][RTW89_WW][35] = 36, + [2][1][RTW89_WW][37] = 50, + [2][1][RTW89_WW][38] = 18, + [2][1][RTW89_WW][40] = 18, + [2][1][RTW89_WW][42] = 18, + [2][1][RTW89_WW][44] = 18, + [2][1][RTW89_WW][46] = 18, + [2][1][RTW89_WW][48] = 44, + [2][1][RTW89_WW][50] = 44, + [2][1][RTW89_WW][52] = 44, + [0][0][RTW89_FCC][0] = 52, + [0][0][RTW89_ETSI][0] = 32, + [0][0][RTW89_MKK][0] = 26, + [0][0][RTW89_IC][0] = 24, + [0][0][RTW89_ACMA][0] = 24, + [0][0][RTW89_FCC][2] = 52, + [0][0][RTW89_ETSI][2] = 32, + [0][0][RTW89_MKK][2] = 26, + [0][0][RTW89_IC][2] = 24, + [0][0][RTW89_ACMA][2] = 24, + [0][0][RTW89_FCC][4] = 52, + [0][0][RTW89_ETSI][4] = 32, + [0][0][RTW89_MKK][4] = 26, + [0][0][RTW89_IC][4] = 24, + [0][0][RTW89_ACMA][4] = 24, + [0][0][RTW89_FCC][6] = 52, + [0][0][RTW89_ETSI][6] = 32, + [0][0][RTW89_MKK][6] = 26, + [0][0][RTW89_IC][6] = 24, + [0][0][RTW89_ACMA][6] = 24, + [0][0][RTW89_FCC][8] = 52, + [0][0][RTW89_ETSI][8] = 30, + [0][0][RTW89_MKK][8] = 26, + [0][0][RTW89_IC][8] = 52, + [0][0][RTW89_ACMA][8] = 24, + [0][0][RTW89_FCC][10] = 52, + [0][0][RTW89_ETSI][10] = 30, + [0][0][RTW89_MKK][10] = 26, + [0][0][RTW89_IC][10] = 52, + [0][0][RTW89_ACMA][10] = 24, + [0][0][RTW89_FCC][12] = 52, + [0][0][RTW89_ETSI][12] = 30, + [0][0][RTW89_MKK][12] = 24, + [0][0][RTW89_IC][12] = 52, + [0][0][RTW89_ACMA][12] = 24, + [0][0][RTW89_FCC][14] = 52, + [0][0][RTW89_ETSI][14] = 30, + [0][0][RTW89_MKK][14] = 24, + [0][0][RTW89_IC][14] = 52, + [0][0][RTW89_ACMA][14] = 24, + [0][0][RTW89_FCC][15] = 52, + [0][0][RTW89_ETSI][15] = 32, + [0][0][RTW89_MKK][15] = 46, + [0][0][RTW89_IC][15] = 52, + [0][0][RTW89_ACMA][15] = 24, + [0][0][RTW89_FCC][17] = 52, + [0][0][RTW89_ETSI][17] = 32, + [0][0][RTW89_MKK][17] = 48, + [0][0][RTW89_IC][17] = 52, + [0][0][RTW89_ACMA][17] = 24, + [0][0][RTW89_FCC][19] = 52, + [0][0][RTW89_ETSI][19] = 32, + [0][0][RTW89_MKK][19] = 48, + [0][0][RTW89_IC][19] = 52, + [0][0][RTW89_ACMA][19] = 24, + [0][0][RTW89_FCC][21] = 52, + [0][0][RTW89_ETSI][21] = 32, + [0][0][RTW89_MKK][21] = 48, + [0][0][RTW89_IC][21] = 52, + [0][0][RTW89_ACMA][21] = 24, + [0][0][RTW89_FCC][23] = 52, + [0][0][RTW89_ETSI][23] = 32, + [0][0][RTW89_MKK][23] = 48, + [0][0][RTW89_IC][23] = 52, + [0][0][RTW89_ACMA][23] = 24, + [0][0][RTW89_FCC][25] = 52, + [0][0][RTW89_ETSI][25] = 32, + [0][0][RTW89_MKK][25] = 48, + [0][0][RTW89_IC][25] = 127, + [0][0][RTW89_ACMA][25] = 127, + [0][0][RTW89_FCC][27] = 52, + [0][0][RTW89_ETSI][27] = 32, + [0][0][RTW89_MKK][27] = 48, + [0][0][RTW89_IC][27] = 127, + [0][0][RTW89_ACMA][27] = 127, + [0][0][RTW89_FCC][29] = 52, + [0][0][RTW89_ETSI][29] = 32, + [0][0][RTW89_MKK][29] = 48, + [0][0][RTW89_IC][29] = 127, + [0][0][RTW89_ACMA][29] = 127, + [0][0][RTW89_FCC][31] = 52, + [0][0][RTW89_ETSI][31] = 32, + [0][0][RTW89_MKK][31] = 48, + [0][0][RTW89_IC][31] = 52, + [0][0][RTW89_ACMA][31] = 24, + [0][0][RTW89_FCC][33] = 52, + [0][0][RTW89_ETSI][33] = 32, + [0][0][RTW89_MKK][33] = 48, + [0][0][RTW89_IC][33] = 52, + [0][0][RTW89_ACMA][33] = 24, + [0][0][RTW89_FCC][35] = 52, + [0][0][RTW89_ETSI][35] = 32, + [0][0][RTW89_MKK][35] = 48, + [0][0][RTW89_IC][35] = 52, + [0][0][RTW89_ACMA][35] = 24, + [0][0][RTW89_FCC][37] = 52, + [0][0][RTW89_ETSI][37] = 127, + [0][0][RTW89_MKK][37] = 44, + [0][0][RTW89_IC][37] = 52, + [0][0][RTW89_ACMA][37] = 52, + [0][0][RTW89_FCC][38] = 84, + [0][0][RTW89_ETSI][38] = 30, + [0][0][RTW89_MKK][38] = 127, + [0][0][RTW89_IC][38] = 84, + [0][0][RTW89_ACMA][38] = 84, + [0][0][RTW89_FCC][40] = 84, + [0][0][RTW89_ETSI][40] = 30, + [0][0][RTW89_MKK][40] = 127, + [0][0][RTW89_IC][40] = 84, + [0][0][RTW89_ACMA][40] = 84, + [0][0][RTW89_FCC][42] = 84, + [0][0][RTW89_ETSI][42] = 30, + [0][0][RTW89_MKK][42] = 127, + [0][0][RTW89_IC][42] = 84, + [0][0][RTW89_ACMA][42] = 84, + [0][0][RTW89_FCC][44] = 84, + [0][0][RTW89_ETSI][44] = 30, + [0][0][RTW89_MKK][44] = 127, + [0][0][RTW89_IC][44] = 84, + [0][0][RTW89_ACMA][44] = 84, + [0][0][RTW89_FCC][46] = 84, + [0][0][RTW89_ETSI][46] = 30, + [0][0][RTW89_MKK][46] = 127, + [0][0][RTW89_IC][46] = 84, + [0][0][RTW89_ACMA][46] = 84, + [0][0][RTW89_FCC][48] = 32, + [0][0][RTW89_ETSI][48] = 127, + [0][0][RTW89_MKK][48] = 127, + [0][0][RTW89_IC][48] = 127, + [0][0][RTW89_ACMA][48] = 127, + [0][0][RTW89_FCC][50] = 32, + [0][0][RTW89_ETSI][50] = 127, + [0][0][RTW89_MKK][50] = 127, + [0][0][RTW89_IC][50] = 127, + [0][0][RTW89_ACMA][50] = 127, + [0][0][RTW89_FCC][52] = 32, + [0][0][RTW89_ETSI][52] = 127, + [0][0][RTW89_MKK][52] = 127, + [0][0][RTW89_IC][52] = 127, + [0][0][RTW89_ACMA][52] = 127, + [0][1][RTW89_FCC][0] = 34, + [0][1][RTW89_ETSI][0] = 20, + [0][1][RTW89_MKK][0] = 12, + [0][1][RTW89_IC][0] = 0, + [0][1][RTW89_ACMA][0] = 12, + [0][1][RTW89_FCC][2] = 38, + [0][1][RTW89_ETSI][2] = 20, + [0][1][RTW89_MKK][2] = 12, + [0][1][RTW89_IC][2] = 4, + [0][1][RTW89_ACMA][2] = 12, + [0][1][RTW89_FCC][4] = 34, + [0][1][RTW89_ETSI][4] = 20, + [0][1][RTW89_MKK][4] = 14, + [0][1][RTW89_IC][4] = 0, + [0][1][RTW89_ACMA][4] = 12, + [0][1][RTW89_FCC][6] = 34, + [0][1][RTW89_ETSI][6] = 20, + [0][1][RTW89_MKK][6] = 14, + [0][1][RTW89_IC][6] = 0, + [0][1][RTW89_ACMA][6] = 12, + [0][1][RTW89_FCC][8] = 34, + [0][1][RTW89_ETSI][8] = 18, + [0][1][RTW89_MKK][8] = 14, + [0][1][RTW89_IC][8] = 34, + [0][1][RTW89_ACMA][8] = 12, + [0][1][RTW89_FCC][10] = 34, + [0][1][RTW89_ETSI][10] = 18, + [0][1][RTW89_MKK][10] = 14, + [0][1][RTW89_IC][10] = 34, + [0][1][RTW89_ACMA][10] = 12, + [0][1][RTW89_FCC][12] = 38, + [0][1][RTW89_ETSI][12] = 18, + [0][1][RTW89_MKK][12] = 12, + [0][1][RTW89_IC][12] = 38, + [0][1][RTW89_ACMA][12] = 12, + [0][1][RTW89_FCC][14] = 34, + [0][1][RTW89_ETSI][14] = 18, + [0][1][RTW89_MKK][14] = 12, + [0][1][RTW89_IC][14] = 34, + [0][1][RTW89_ACMA][14] = 12, + [0][1][RTW89_FCC][15] = 34, + [0][1][RTW89_ETSI][15] = 20, + [0][1][RTW89_MKK][15] = 32, + [0][1][RTW89_IC][15] = 34, + [0][1][RTW89_ACMA][15] = 12, + [0][1][RTW89_FCC][17] = 34, + [0][1][RTW89_ETSI][17] = 20, + [0][1][RTW89_MKK][17] = 34, + [0][1][RTW89_IC][17] = 34, + [0][1][RTW89_ACMA][17] = 12, + [0][1][RTW89_FCC][19] = 38, + [0][1][RTW89_ETSI][19] = 20, + [0][1][RTW89_MKK][19] = 34, + [0][1][RTW89_IC][19] = 38, + [0][1][RTW89_ACMA][19] = 12, + [0][1][RTW89_FCC][21] = 38, + [0][1][RTW89_ETSI][21] = 20, + [0][1][RTW89_MKK][21] = 34, + [0][1][RTW89_IC][21] = 38, + [0][1][RTW89_ACMA][21] = 12, + [0][1][RTW89_FCC][23] = 38, + [0][1][RTW89_ETSI][23] = 20, + [0][1][RTW89_MKK][23] = 34, + [0][1][RTW89_IC][23] = 38, + [0][1][RTW89_ACMA][23] = 12, + [0][1][RTW89_FCC][25] = 38, + [0][1][RTW89_ETSI][25] = 20, + [0][1][RTW89_MKK][25] = 34, + [0][1][RTW89_IC][25] = 127, + [0][1][RTW89_ACMA][25] = 127, + [0][1][RTW89_FCC][27] = 38, + [0][1][RTW89_ETSI][27] = 18, + [0][1][RTW89_MKK][27] = 34, + [0][1][RTW89_IC][27] = 127, + [0][1][RTW89_ACMA][27] = 127, + [0][1][RTW89_FCC][29] = 38, + [0][1][RTW89_ETSI][29] = 18, + [0][1][RTW89_MKK][29] = 34, + [0][1][RTW89_IC][29] = 127, + [0][1][RTW89_ACMA][29] = 127, + [0][1][RTW89_FCC][31] = 38, + [0][1][RTW89_ETSI][31] = 18, + [0][1][RTW89_MKK][31] = 34, + [0][1][RTW89_IC][31] = 34, + [0][1][RTW89_ACMA][31] = 12, + [0][1][RTW89_FCC][33] = 34, + [0][1][RTW89_ETSI][33] = 18, + [0][1][RTW89_MKK][33] = 34, + [0][1][RTW89_IC][33] = 34, + [0][1][RTW89_ACMA][33] = 12, + [0][1][RTW89_FCC][35] = 34, + [0][1][RTW89_ETSI][35] = 18, + [0][1][RTW89_MKK][35] = 34, + [0][1][RTW89_IC][35] = 34, + [0][1][RTW89_ACMA][35] = 12, + [0][1][RTW89_FCC][37] = 38, + [0][1][RTW89_ETSI][37] = 127, + [0][1][RTW89_MKK][37] = 34, + [0][1][RTW89_IC][37] = 38, + [0][1][RTW89_ACMA][37] = 38, + [0][1][RTW89_FCC][38] = 82, + [0][1][RTW89_ETSI][38] = 18, + [0][1][RTW89_MKK][38] = 127, + [0][1][RTW89_IC][38] = 82, + [0][1][RTW89_ACMA][38] = 84, + [0][1][RTW89_FCC][40] = 82, + [0][1][RTW89_ETSI][40] = 18, + [0][1][RTW89_MKK][40] = 127, + [0][1][RTW89_IC][40] = 82, + [0][1][RTW89_ACMA][40] = 84, + [0][1][RTW89_FCC][42] = 82, + [0][1][RTW89_ETSI][42] = 18, + [0][1][RTW89_MKK][42] = 127, + [0][1][RTW89_IC][42] = 82, + [0][1][RTW89_ACMA][42] = 84, + [0][1][RTW89_FCC][44] = 82, + [0][1][RTW89_ETSI][44] = 18, + [0][1][RTW89_MKK][44] = 127, + [0][1][RTW89_IC][44] = 82, + [0][1][RTW89_ACMA][44] = 84, + [0][1][RTW89_FCC][46] = 82, + [0][1][RTW89_ETSI][46] = 18, + [0][1][RTW89_MKK][46] = 127, + [0][1][RTW89_IC][46] = 82, + [0][1][RTW89_ACMA][46] = 84, + [0][1][RTW89_FCC][48] = 20, + [0][1][RTW89_ETSI][48] = 127, + [0][1][RTW89_MKK][48] = 127, + [0][1][RTW89_IC][48] = 127, + [0][1][RTW89_ACMA][48] = 127, + [0][1][RTW89_FCC][50] = 20, + [0][1][RTW89_ETSI][50] = 127, + [0][1][RTW89_MKK][50] = 127, + [0][1][RTW89_IC][50] = 127, + [0][1][RTW89_ACMA][50] = 127, + [0][1][RTW89_FCC][52] = 20, + [0][1][RTW89_ETSI][52] = 127, + [0][1][RTW89_MKK][52] = 127, + [0][1][RTW89_IC][52] = 127, + [0][1][RTW89_ACMA][52] = 127, + [1][0][RTW89_FCC][0] = 62, + [1][0][RTW89_ETSI][0] = 42, + [1][0][RTW89_MKK][0] = 36, + [1][0][RTW89_IC][0] = 36, + [1][0][RTW89_ACMA][0] = 34, + [1][0][RTW89_FCC][2] = 62, + [1][0][RTW89_ETSI][2] = 42, + [1][0][RTW89_MKK][2] = 36, + [1][0][RTW89_IC][2] = 36, + [1][0][RTW89_ACMA][2] = 34, + [1][0][RTW89_FCC][4] = 62, + [1][0][RTW89_ETSI][4] = 42, + [1][0][RTW89_MKK][4] = 34, + [1][0][RTW89_IC][4] = 36, + [1][0][RTW89_ACMA][4] = 34, + [1][0][RTW89_FCC][6] = 62, + [1][0][RTW89_ETSI][6] = 42, + [1][0][RTW89_MKK][6] = 34, + [1][0][RTW89_IC][6] = 36, + [1][0][RTW89_ACMA][6] = 34, + [1][0][RTW89_FCC][8] = 62, + [1][0][RTW89_ETSI][8] = 42, + [1][0][RTW89_MKK][8] = 36, + [1][0][RTW89_IC][8] = 62, + [1][0][RTW89_ACMA][8] = 34, + [1][0][RTW89_FCC][10] = 62, + [1][0][RTW89_ETSI][10] = 42, + [1][0][RTW89_MKK][10] = 36, + [1][0][RTW89_IC][10] = 62, + [1][0][RTW89_ACMA][10] = 34, + [1][0][RTW89_FCC][12] = 64, + [1][0][RTW89_ETSI][12] = 42, + [1][0][RTW89_MKK][12] = 36, + [1][0][RTW89_IC][12] = 64, + [1][0][RTW89_ACMA][12] = 34, + [1][0][RTW89_FCC][14] = 62, + [1][0][RTW89_ETSI][14] = 42, + [1][0][RTW89_MKK][14] = 36, + [1][0][RTW89_IC][14] = 62, + [1][0][RTW89_ACMA][14] = 34, + [1][0][RTW89_FCC][15] = 62, + [1][0][RTW89_ETSI][15] = 42, + [1][0][RTW89_MKK][15] = 54, + [1][0][RTW89_IC][15] = 62, + [1][0][RTW89_ACMA][15] = 34, + [1][0][RTW89_FCC][17] = 62, + [1][0][RTW89_ETSI][17] = 42, + [1][0][RTW89_MKK][17] = 58, + [1][0][RTW89_IC][17] = 62, + [1][0][RTW89_ACMA][17] = 34, + [1][0][RTW89_FCC][19] = 62, + [1][0][RTW89_ETSI][19] = 42, + [1][0][RTW89_MKK][19] = 58, + [1][0][RTW89_IC][19] = 62, + [1][0][RTW89_ACMA][19] = 34, + [1][0][RTW89_FCC][21] = 62, + [1][0][RTW89_ETSI][21] = 42, + [1][0][RTW89_MKK][21] = 58, + [1][0][RTW89_IC][21] = 62, + [1][0][RTW89_ACMA][21] = 34, + [1][0][RTW89_FCC][23] = 62, + [1][0][RTW89_ETSI][23] = 42, + [1][0][RTW89_MKK][23] = 58, + [1][0][RTW89_IC][23] = 62, + [1][0][RTW89_ACMA][23] = 34, + [1][0][RTW89_FCC][25] = 62, + [1][0][RTW89_ETSI][25] = 42, + [1][0][RTW89_MKK][25] = 58, + [1][0][RTW89_IC][25] = 127, + [1][0][RTW89_ACMA][25] = 127, + [1][0][RTW89_FCC][27] = 62, + [1][0][RTW89_ETSI][27] = 44, + [1][0][RTW89_MKK][27] = 58, + [1][0][RTW89_IC][27] = 127, + [1][0][RTW89_ACMA][27] = 127, + [1][0][RTW89_FCC][29] = 62, + [1][0][RTW89_ETSI][29] = 44, + [1][0][RTW89_MKK][29] = 58, + [1][0][RTW89_IC][29] = 127, + [1][0][RTW89_ACMA][29] = 127, + [1][0][RTW89_FCC][31] = 62, + [1][0][RTW89_ETSI][31] = 44, + [1][0][RTW89_MKK][31] = 58, + [1][0][RTW89_IC][31] = 62, + [1][0][RTW89_ACMA][31] = 34, + [1][0][RTW89_FCC][33] = 62, + [1][0][RTW89_ETSI][33] = 44, + [1][0][RTW89_MKK][33] = 58, + [1][0][RTW89_IC][33] = 62, + [1][0][RTW89_ACMA][33] = 34, + [1][0][RTW89_FCC][35] = 62, + [1][0][RTW89_ETSI][35] = 44, + [1][0][RTW89_MKK][35] = 58, + [1][0][RTW89_IC][35] = 62, + [1][0][RTW89_ACMA][35] = 34, + [1][0][RTW89_FCC][37] = 64, + [1][0][RTW89_ETSI][37] = 127, + [1][0][RTW89_MKK][37] = 52, + [1][0][RTW89_IC][37] = 64, + [1][0][RTW89_ACMA][37] = 64, + [1][0][RTW89_FCC][38] = 84, + [1][0][RTW89_ETSI][38] = 30, + [1][0][RTW89_MKK][38] = 127, + [1][0][RTW89_IC][38] = 84, + [1][0][RTW89_ACMA][38] = 84, + [1][0][RTW89_FCC][40] = 84, + [1][0][RTW89_ETSI][40] = 30, + [1][0][RTW89_MKK][40] = 127, + [1][0][RTW89_IC][40] = 84, + [1][0][RTW89_ACMA][40] = 84, + [1][0][RTW89_FCC][42] = 84, + [1][0][RTW89_ETSI][42] = 30, + [1][0][RTW89_MKK][42] = 127, + [1][0][RTW89_IC][42] = 84, + [1][0][RTW89_ACMA][42] = 84, + [1][0][RTW89_FCC][44] = 84, + [1][0][RTW89_ETSI][44] = 30, + [1][0][RTW89_MKK][44] = 127, + [1][0][RTW89_IC][44] = 84, + [1][0][RTW89_ACMA][44] = 84, + [1][0][RTW89_FCC][46] = 84, + [1][0][RTW89_ETSI][46] = 30, + [1][0][RTW89_MKK][46] = 127, + [1][0][RTW89_IC][46] = 84, + [1][0][RTW89_ACMA][46] = 84, + [1][0][RTW89_FCC][48] = 44, + [1][0][RTW89_ETSI][48] = 127, + [1][0][RTW89_MKK][48] = 127, + [1][0][RTW89_IC][48] = 127, + [1][0][RTW89_ACMA][48] = 127, + [1][0][RTW89_FCC][50] = 44, + [1][0][RTW89_ETSI][50] = 127, + [1][0][RTW89_MKK][50] = 127, + [1][0][RTW89_IC][50] = 127, + [1][0][RTW89_ACMA][50] = 127, + [1][0][RTW89_FCC][52] = 44, + [1][0][RTW89_ETSI][52] = 127, + [1][0][RTW89_MKK][52] = 127, + [1][0][RTW89_IC][52] = 127, + [1][0][RTW89_ACMA][52] = 127, + [1][1][RTW89_FCC][0] = 42, + [1][1][RTW89_ETSI][0] = 32, + [1][1][RTW89_MKK][0] = 22, + [1][1][RTW89_IC][0] = 10, + [1][1][RTW89_ACMA][0] = 22, + [1][1][RTW89_FCC][2] = 44, + [1][1][RTW89_ETSI][2] = 32, + [1][1][RTW89_MKK][2] = 22, + [1][1][RTW89_IC][2] = 14, + [1][1][RTW89_ACMA][2] = 22, + [1][1][RTW89_FCC][4] = 42, + [1][1][RTW89_ETSI][4] = 32, + [1][1][RTW89_MKK][4] = 20, + [1][1][RTW89_IC][4] = 10, + [1][1][RTW89_ACMA][4] = 22, + [1][1][RTW89_FCC][6] = 42, + [1][1][RTW89_ETSI][6] = 32, + [1][1][RTW89_MKK][6] = 20, + [1][1][RTW89_IC][6] = 10, + [1][1][RTW89_ACMA][6] = 22, + [1][1][RTW89_FCC][8] = 44, + [1][1][RTW89_ETSI][8] = 32, + [1][1][RTW89_MKK][8] = 20, + [1][1][RTW89_IC][8] = 44, + [1][1][RTW89_ACMA][8] = 22, + [1][1][RTW89_FCC][10] = 44, + [1][1][RTW89_ETSI][10] = 32, + [1][1][RTW89_MKK][10] = 20, + [1][1][RTW89_IC][10] = 44, + [1][1][RTW89_ACMA][10] = 22, + [1][1][RTW89_FCC][12] = 46, + [1][1][RTW89_ETSI][12] = 32, + [1][1][RTW89_MKK][12] = 22, + [1][1][RTW89_IC][12] = 46, + [1][1][RTW89_ACMA][12] = 22, + [1][1][RTW89_FCC][14] = 42, + [1][1][RTW89_ETSI][14] = 32, + [1][1][RTW89_MKK][14] = 22, + [1][1][RTW89_IC][14] = 40, + [1][1][RTW89_ACMA][14] = 22, + [1][1][RTW89_FCC][15] = 42, + [1][1][RTW89_ETSI][15] = 30, + [1][1][RTW89_MKK][15] = 42, + [1][1][RTW89_IC][15] = 42, + [1][1][RTW89_ACMA][15] = 22, + [1][1][RTW89_FCC][17] = 42, + [1][1][RTW89_ETSI][17] = 30, + [1][1][RTW89_MKK][17] = 44, + [1][1][RTW89_IC][17] = 42, + [1][1][RTW89_ACMA][17] = 22, + [1][1][RTW89_FCC][19] = 42, + [1][1][RTW89_ETSI][19] = 30, + [1][1][RTW89_MKK][19] = 44, + [1][1][RTW89_IC][19] = 42, + [1][1][RTW89_ACMA][19] = 22, + [1][1][RTW89_FCC][21] = 42, + [1][1][RTW89_ETSI][21] = 30, + [1][1][RTW89_MKK][21] = 44, + [1][1][RTW89_IC][21] = 42, + [1][1][RTW89_ACMA][21] = 22, + [1][1][RTW89_FCC][23] = 42, + [1][1][RTW89_ETSI][23] = 30, + [1][1][RTW89_MKK][23] = 44, + [1][1][RTW89_IC][23] = 42, + [1][1][RTW89_ACMA][23] = 22, + [1][1][RTW89_FCC][25] = 42, + [1][1][RTW89_ETSI][25] = 30, + [1][1][RTW89_MKK][25] = 44, + [1][1][RTW89_IC][25] = 127, + [1][1][RTW89_ACMA][25] = 127, + [1][1][RTW89_FCC][27] = 42, + [1][1][RTW89_ETSI][27] = 32, + [1][1][RTW89_MKK][27] = 44, + [1][1][RTW89_IC][27] = 127, + [1][1][RTW89_ACMA][27] = 127, + [1][1][RTW89_FCC][29] = 42, + [1][1][RTW89_ETSI][29] = 32, + [1][1][RTW89_MKK][29] = 44, + [1][1][RTW89_IC][29] = 127, + [1][1][RTW89_ACMA][29] = 127, + [1][1][RTW89_FCC][31] = 42, + [1][1][RTW89_ETSI][31] = 32, + [1][1][RTW89_MKK][31] = 44, + [1][1][RTW89_IC][31] = 38, + [1][1][RTW89_ACMA][31] = 22, + [1][1][RTW89_FCC][33] = 40, + [1][1][RTW89_ETSI][33] = 32, + [1][1][RTW89_MKK][33] = 44, + [1][1][RTW89_IC][33] = 38, + [1][1][RTW89_ACMA][33] = 22, + [1][1][RTW89_FCC][35] = 40, + [1][1][RTW89_ETSI][35] = 32, + [1][1][RTW89_MKK][35] = 44, + [1][1][RTW89_IC][35] = 38, + [1][1][RTW89_ACMA][35] = 22, + [1][1][RTW89_FCC][37] = 48, + [1][1][RTW89_ETSI][37] = 127, + [1][1][RTW89_MKK][37] = 42, + [1][1][RTW89_IC][37] = 48, + [1][1][RTW89_ACMA][37] = 48, + [1][1][RTW89_FCC][38] = 84, + [1][1][RTW89_ETSI][38] = 18, + [1][1][RTW89_MKK][38] = 127, + [1][1][RTW89_IC][38] = 84, + [1][1][RTW89_ACMA][38] = 82, + [1][1][RTW89_FCC][40] = 84, + [1][1][RTW89_ETSI][40] = 18, + [1][1][RTW89_MKK][40] = 127, + [1][1][RTW89_IC][40] = 84, + [1][1][RTW89_ACMA][40] = 82, + [1][1][RTW89_FCC][42] = 84, + [1][1][RTW89_ETSI][42] = 18, + [1][1][RTW89_MKK][42] = 127, + [1][1][RTW89_IC][42] = 84, + [1][1][RTW89_ACMA][42] = 84, + [1][1][RTW89_FCC][44] = 84, + [1][1][RTW89_ETSI][44] = 18, + [1][1][RTW89_MKK][44] = 127, + [1][1][RTW89_IC][44] = 84, + [1][1][RTW89_ACMA][44] = 84, + [1][1][RTW89_FCC][46] = 84, + [1][1][RTW89_ETSI][46] = 18, + [1][1][RTW89_MKK][46] = 127, + [1][1][RTW89_IC][46] = 84, + [1][1][RTW89_ACMA][46] = 84, + [1][1][RTW89_FCC][48] = 32, + [1][1][RTW89_ETSI][48] = 127, + [1][1][RTW89_MKK][48] = 127, + [1][1][RTW89_IC][48] = 127, + [1][1][RTW89_ACMA][48] = 127, + [1][1][RTW89_FCC][50] = 32, + [1][1][RTW89_ETSI][50] = 127, + [1][1][RTW89_MKK][50] = 127, + [1][1][RTW89_IC][50] = 127, + [1][1][RTW89_ACMA][50] = 127, + [1][1][RTW89_FCC][52] = 32, + [1][1][RTW89_ETSI][52] = 127, + [1][1][RTW89_MKK][52] = 127, + [1][1][RTW89_IC][52] = 127, + [1][1][RTW89_ACMA][52] = 127, + [2][0][RTW89_FCC][0] = 70, + [2][0][RTW89_ETSI][0] = 54, + [2][0][RTW89_MKK][0] = 48, + [2][0][RTW89_IC][0] = 46, + [2][0][RTW89_ACMA][0] = 48, + [2][0][RTW89_FCC][2] = 70, + [2][0][RTW89_ETSI][2] = 54, + [2][0][RTW89_MKK][2] = 48, + [2][0][RTW89_IC][2] = 46, + [2][0][RTW89_ACMA][2] = 48, + [2][0][RTW89_FCC][4] = 70, + [2][0][RTW89_ETSI][4] = 54, + [2][0][RTW89_MKK][4] = 48, + [2][0][RTW89_IC][4] = 46, + [2][0][RTW89_ACMA][4] = 48, + [2][0][RTW89_FCC][6] = 70, + [2][0][RTW89_ETSI][6] = 54, + [2][0][RTW89_MKK][6] = 48, + [2][0][RTW89_IC][6] = 46, + [2][0][RTW89_ACMA][6] = 48, + [2][0][RTW89_FCC][8] = 70, + [2][0][RTW89_ETSI][8] = 54, + [2][0][RTW89_MKK][8] = 48, + [2][0][RTW89_IC][8] = 66, + [2][0][RTW89_ACMA][8] = 48, + [2][0][RTW89_FCC][10] = 70, + [2][0][RTW89_ETSI][10] = 54, + [2][0][RTW89_MKK][10] = 48, + [2][0][RTW89_IC][10] = 66, + [2][0][RTW89_ACMA][10] = 48, + [2][0][RTW89_FCC][12] = 70, + [2][0][RTW89_ETSI][12] = 54, + [2][0][RTW89_MKK][12] = 46, + [2][0][RTW89_IC][12] = 66, + [2][0][RTW89_ACMA][12] = 48, + [2][0][RTW89_FCC][14] = 70, + [2][0][RTW89_ETSI][14] = 54, + [2][0][RTW89_MKK][14] = 46, + [2][0][RTW89_IC][14] = 66, + [2][0][RTW89_ACMA][14] = 48, + [2][0][RTW89_FCC][15] = 70, + [2][0][RTW89_ETSI][15] = 54, + [2][0][RTW89_MKK][15] = 68, + [2][0][RTW89_IC][15] = 70, + [2][0][RTW89_ACMA][15] = 48, + [2][0][RTW89_FCC][17] = 70, + [2][0][RTW89_ETSI][17] = 54, + [2][0][RTW89_MKK][17] = 70, + [2][0][RTW89_IC][17] = 70, + [2][0][RTW89_ACMA][17] = 48, + [2][0][RTW89_FCC][19] = 70, + [2][0][RTW89_ETSI][19] = 54, + [2][0][RTW89_MKK][19] = 70, + [2][0][RTW89_IC][19] = 70, + [2][0][RTW89_ACMA][19] = 48, + [2][0][RTW89_FCC][21] = 70, + [2][0][RTW89_ETSI][21] = 54, + [2][0][RTW89_MKK][21] = 70, + [2][0][RTW89_IC][21] = 70, + [2][0][RTW89_ACMA][21] = 48, + [2][0][RTW89_FCC][23] = 70, + [2][0][RTW89_ETSI][23] = 54, + [2][0][RTW89_MKK][23] = 70, + [2][0][RTW89_IC][23] = 70, + [2][0][RTW89_ACMA][23] = 48, + [2][0][RTW89_FCC][25] = 70, + [2][0][RTW89_ETSI][25] = 54, + [2][0][RTW89_MKK][25] = 70, + [2][0][RTW89_IC][25] = 127, + [2][0][RTW89_ACMA][25] = 127, + [2][0][RTW89_FCC][27] = 70, + [2][0][RTW89_ETSI][27] = 54, + [2][0][RTW89_MKK][27] = 70, + [2][0][RTW89_IC][27] = 127, + [2][0][RTW89_ACMA][27] = 127, + [2][0][RTW89_FCC][29] = 70, + [2][0][RTW89_ETSI][29] = 54, + [2][0][RTW89_MKK][29] = 70, + [2][0][RTW89_IC][29] = 127, + [2][0][RTW89_ACMA][29] = 127, + [2][0][RTW89_FCC][31] = 70, + [2][0][RTW89_ETSI][31] = 54, + [2][0][RTW89_MKK][31] = 70, + [2][0][RTW89_IC][31] = 72, + [2][0][RTW89_ACMA][31] = 48, + [2][0][RTW89_FCC][33] = 72, + [2][0][RTW89_ETSI][33] = 54, + [2][0][RTW89_MKK][33] = 70, + [2][0][RTW89_IC][33] = 72, + [2][0][RTW89_ACMA][33] = 48, + [2][0][RTW89_FCC][35] = 72, + [2][0][RTW89_ETSI][35] = 54, + [2][0][RTW89_MKK][35] = 70, + [2][0][RTW89_IC][35] = 72, + [2][0][RTW89_ACMA][35] = 48, + [2][0][RTW89_FCC][37] = 70, + [2][0][RTW89_ETSI][37] = 127, + [2][0][RTW89_MKK][37] = 66, + [2][0][RTW89_IC][37] = 70, + [2][0][RTW89_ACMA][37] = 76, + [2][0][RTW89_FCC][38] = 84, + [2][0][RTW89_ETSI][38] = 30, + [2][0][RTW89_MKK][38] = 127, + [2][0][RTW89_IC][38] = 84, + [2][0][RTW89_ACMA][38] = 84, + [2][0][RTW89_FCC][40] = 84, + [2][0][RTW89_ETSI][40] = 30, + [2][0][RTW89_MKK][40] = 127, + [2][0][RTW89_IC][40] = 84, + [2][0][RTW89_ACMA][40] = 84, + [2][0][RTW89_FCC][42] = 84, + [2][0][RTW89_ETSI][42] = 30, + [2][0][RTW89_MKK][42] = 127, + [2][0][RTW89_IC][42] = 84, + [2][0][RTW89_ACMA][42] = 84, + [2][0][RTW89_FCC][44] = 84, + [2][0][RTW89_ETSI][44] = 30, + [2][0][RTW89_MKK][44] = 127, + [2][0][RTW89_IC][44] = 84, + [2][0][RTW89_ACMA][44] = 84, + [2][0][RTW89_FCC][46] = 84, + [2][0][RTW89_ETSI][46] = 30, + [2][0][RTW89_MKK][46] = 127, + [2][0][RTW89_IC][46] = 84, + [2][0][RTW89_ACMA][46] = 84, + [2][0][RTW89_FCC][48] = 56, + [2][0][RTW89_ETSI][48] = 127, + [2][0][RTW89_MKK][48] = 127, + [2][0][RTW89_IC][48] = 127, + [2][0][RTW89_ACMA][48] = 127, + [2][0][RTW89_FCC][50] = 56, + [2][0][RTW89_ETSI][50] = 127, + [2][0][RTW89_MKK][50] = 127, + [2][0][RTW89_IC][50] = 127, + [2][0][RTW89_ACMA][50] = 127, + [2][0][RTW89_FCC][52] = 56, + [2][0][RTW89_ETSI][52] = 127, + [2][0][RTW89_MKK][52] = 127, + [2][0][RTW89_IC][52] = 127, + [2][0][RTW89_ACMA][52] = 127, + [2][1][RTW89_FCC][0] = 50, + [2][1][RTW89_ETSI][0] = 42, + [2][1][RTW89_MKK][0] = 36, + [2][1][RTW89_IC][0] = 20, + [2][1][RTW89_ACMA][0] = 36, + [2][1][RTW89_FCC][2] = 50, + [2][1][RTW89_ETSI][2] = 42, + [2][1][RTW89_MKK][2] = 36, + [2][1][RTW89_IC][2] = 18, + [2][1][RTW89_ACMA][2] = 36, + [2][1][RTW89_FCC][4] = 50, + [2][1][RTW89_ETSI][4] = 42, + [2][1][RTW89_MKK][4] = 36, + [2][1][RTW89_IC][4] = 22, + [2][1][RTW89_ACMA][4] = 36, + [2][1][RTW89_FCC][6] = 50, + [2][1][RTW89_ETSI][6] = 42, + [2][1][RTW89_MKK][6] = 36, + [2][1][RTW89_IC][6] = 22, + [2][1][RTW89_ACMA][6] = 36, + [2][1][RTW89_FCC][8] = 50, + [2][1][RTW89_ETSI][8] = 42, + [2][1][RTW89_MKK][8] = 34, + [2][1][RTW89_IC][8] = 50, + [2][1][RTW89_ACMA][8] = 36, + [2][1][RTW89_FCC][10] = 50, + [2][1][RTW89_ETSI][10] = 42, + [2][1][RTW89_MKK][10] = 34, + [2][1][RTW89_IC][10] = 50, + [2][1][RTW89_ACMA][10] = 36, + [2][1][RTW89_FCC][12] = 52, + [2][1][RTW89_ETSI][12] = 42, + [2][1][RTW89_MKK][12] = 36, + [2][1][RTW89_IC][12] = 52, + [2][1][RTW89_ACMA][12] = 36, + [2][1][RTW89_FCC][14] = 52, + [2][1][RTW89_ETSI][14] = 42, + [2][1][RTW89_MKK][14] = 36, + [2][1][RTW89_IC][14] = 52, + [2][1][RTW89_ACMA][14] = 36, + [2][1][RTW89_FCC][15] = 50, + [2][1][RTW89_ETSI][15] = 42, + [2][1][RTW89_MKK][15] = 54, + [2][1][RTW89_IC][15] = 50, + [2][1][RTW89_ACMA][15] = 36, + [2][1][RTW89_FCC][17] = 50, + [2][1][RTW89_ETSI][17] = 42, + [2][1][RTW89_MKK][17] = 56, + [2][1][RTW89_IC][17] = 50, + [2][1][RTW89_ACMA][17] = 36, + [2][1][RTW89_FCC][19] = 50, + [2][1][RTW89_ETSI][19] = 42, + [2][1][RTW89_MKK][19] = 56, + [2][1][RTW89_IC][19] = 50, + [2][1][RTW89_ACMA][19] = 36, + [2][1][RTW89_FCC][21] = 50, + [2][1][RTW89_ETSI][21] = 42, + [2][1][RTW89_MKK][21] = 56, + [2][1][RTW89_IC][21] = 50, + [2][1][RTW89_ACMA][21] = 36, + [2][1][RTW89_FCC][23] = 50, + [2][1][RTW89_ETSI][23] = 42, + [2][1][RTW89_MKK][23] = 56, + [2][1][RTW89_IC][23] = 50, + [2][1][RTW89_ACMA][23] = 36, + [2][1][RTW89_FCC][25] = 50, + [2][1][RTW89_ETSI][25] = 42, + [2][1][RTW89_MKK][25] = 56, + [2][1][RTW89_IC][25] = 127, + [2][1][RTW89_ACMA][25] = 127, + [2][1][RTW89_FCC][27] = 50, + [2][1][RTW89_ETSI][27] = 42, + [2][1][RTW89_MKK][27] = 56, + [2][1][RTW89_IC][27] = 127, + [2][1][RTW89_ACMA][27] = 127, + [2][1][RTW89_FCC][29] = 50, + [2][1][RTW89_ETSI][29] = 42, + [2][1][RTW89_MKK][29] = 56, + [2][1][RTW89_IC][29] = 127, + [2][1][RTW89_ACMA][29] = 127, + [2][1][RTW89_FCC][31] = 50, + [2][1][RTW89_ETSI][31] = 42, + [2][1][RTW89_MKK][31] = 56, + [2][1][RTW89_IC][31] = 50, + [2][1][RTW89_ACMA][31] = 36, + [2][1][RTW89_FCC][33] = 50, + [2][1][RTW89_ETSI][33] = 42, + [2][1][RTW89_MKK][33] = 56, + [2][1][RTW89_IC][33] = 50, + [2][1][RTW89_ACMA][33] = 36, + [2][1][RTW89_FCC][35] = 50, + [2][1][RTW89_ETSI][35] = 42, + [2][1][RTW89_MKK][35] = 56, + [2][1][RTW89_IC][35] = 50, + [2][1][RTW89_ACMA][35] = 36, + [2][1][RTW89_FCC][37] = 50, + [2][1][RTW89_ETSI][37] = 127, + [2][1][RTW89_MKK][37] = 54, + [2][1][RTW89_IC][37] = 50, + [2][1][RTW89_ACMA][37] = 60, + [2][1][RTW89_FCC][38] = 84, + [2][1][RTW89_ETSI][38] = 18, + [2][1][RTW89_MKK][38] = 127, + [2][1][RTW89_IC][38] = 84, + [2][1][RTW89_ACMA][38] = 84, + [2][1][RTW89_FCC][40] = 84, + [2][1][RTW89_ETSI][40] = 18, + [2][1][RTW89_MKK][40] = 127, + [2][1][RTW89_IC][40] = 84, + [2][1][RTW89_ACMA][40] = 84, + [2][1][RTW89_FCC][42] = 84, + [2][1][RTW89_ETSI][42] = 18, + [2][1][RTW89_MKK][42] = 127, + [2][1][RTW89_IC][42] = 84, + [2][1][RTW89_ACMA][42] = 84, + [2][1][RTW89_FCC][44] = 84, + [2][1][RTW89_ETSI][44] = 18, + [2][1][RTW89_MKK][44] = 127, + [2][1][RTW89_IC][44] = 84, + [2][1][RTW89_ACMA][44] = 84, + [2][1][RTW89_FCC][46] = 84, + [2][1][RTW89_ETSI][46] = 18, + [2][1][RTW89_MKK][46] = 127, + [2][1][RTW89_IC][46] = 84, + [2][1][RTW89_ACMA][46] = 84, + [2][1][RTW89_FCC][48] = 44, + [2][1][RTW89_ETSI][48] = 127, + [2][1][RTW89_MKK][48] = 127, + [2][1][RTW89_IC][48] = 127, + [2][1][RTW89_ACMA][48] = 127, + [2][1][RTW89_FCC][50] = 44, + [2][1][RTW89_ETSI][50] = 127, + [2][1][RTW89_MKK][50] = 127, + [2][1][RTW89_IC][50] = 127, + [2][1][RTW89_ACMA][50] = 127, + [2][1][RTW89_FCC][52] = 44, + [2][1][RTW89_ETSI][52] = 127, + [2][1][RTW89_MKK][52] = 127, + [2][1][RTW89_IC][52] = 127, + [2][1][RTW89_ACMA][52] = 127, +}; + +const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_6G_CH_NUM] = { + [0][0][RTW89_WW][0] = 76, + [0][0][RTW89_WW][2] = 76, + [0][0][RTW89_WW][4] = 76, + [0][0][RTW89_WW][6] = 76, + [0][0][RTW89_WW][8] = 76, + [0][0][RTW89_WW][10] = 76, + [0][0][RTW89_WW][12] = 76, + [0][0][RTW89_WW][14] = 76, + [0][0][RTW89_WW][15] = 76, + [0][0][RTW89_WW][17] = 76, + [0][0][RTW89_WW][19] = 76, + [0][0][RTW89_WW][21] = 76, + [0][0][RTW89_WW][23] = 76, + [0][0][RTW89_WW][25] = 76, + [0][0][RTW89_WW][27] = 76, + [0][0][RTW89_WW][29] = 76, + [0][0][RTW89_WW][30] = 76, + [0][0][RTW89_WW][32] = 76, + [0][0][RTW89_WW][34] = 76, + [0][0][RTW89_WW][36] = 76, + [0][0][RTW89_WW][38] = 76, + [0][0][RTW89_WW][40] = 76, + [0][0][RTW89_WW][42] = 76, + [0][0][RTW89_WW][44] = 76, + [0][0][RTW89_WW][45] = 76, + [0][0][RTW89_WW][47] = 76, + [0][0][RTW89_WW][49] = 76, + [0][0][RTW89_WW][51] = 76, + [0][0][RTW89_WW][53] = 76, + [0][0][RTW89_WW][55] = 76, + [0][0][RTW89_WW][57] = 76, + [0][0][RTW89_WW][59] = 76, + [0][0][RTW89_WW][60] = 76, + [0][0][RTW89_WW][62] = 76, + [0][0][RTW89_WW][64] = 76, + [0][0][RTW89_WW][66] = 76, + [0][0][RTW89_WW][68] = 76, + [0][0][RTW89_WW][70] = 76, + [0][0][RTW89_WW][72] = 76, + [0][0][RTW89_WW][74] = 76, + [0][0][RTW89_WW][75] = 76, + [0][0][RTW89_WW][77] = 76, + [0][0][RTW89_WW][79] = 76, + [0][0][RTW89_WW][81] = 76, + [0][0][RTW89_WW][83] = 76, + [0][0][RTW89_WW][85] = 76, + [0][0][RTW89_WW][87] = 76, + [0][0][RTW89_WW][89] = 76, + [0][0][RTW89_WW][90] = 76, + [0][0][RTW89_WW][92] = 76, + [0][0][RTW89_WW][94] = 76, + [0][0][RTW89_WW][96] = 76, + [0][0][RTW89_WW][98] = 76, + [0][0][RTW89_WW][100] = 76, + [0][0][RTW89_WW][102] = 76, + [0][0][RTW89_WW][104] = 76, + [0][0][RTW89_WW][105] = 76, + [0][0][RTW89_WW][107] = 76, + [0][0][RTW89_WW][109] = 76, + [0][0][RTW89_WW][111] = 0, + [0][0][RTW89_WW][113] = 0, + [0][0][RTW89_WW][115] = 0, + [0][0][RTW89_WW][117] = 0, + [0][0][RTW89_WW][119] = 0, + [0][1][RTW89_WW][0] = 76, + [0][1][RTW89_WW][2] = 76, + [0][1][RTW89_WW][4] = 76, + [0][1][RTW89_WW][6] = 76, + [0][1][RTW89_WW][8] = 76, + [0][1][RTW89_WW][10] = 76, + [0][1][RTW89_WW][12] = 76, + [0][1][RTW89_WW][14] = 76, + [0][1][RTW89_WW][15] = 76, + [0][1][RTW89_WW][17] = 76, + [0][1][RTW89_WW][19] = 76, + [0][1][RTW89_WW][21] = 76, + [0][1][RTW89_WW][23] = 76, + [0][1][RTW89_WW][25] = 76, + [0][1][RTW89_WW][27] = 76, + [0][1][RTW89_WW][29] = 76, + [0][1][RTW89_WW][30] = 76, + [0][1][RTW89_WW][32] = 76, + [0][1][RTW89_WW][34] = 76, + [0][1][RTW89_WW][36] = 76, + [0][1][RTW89_WW][38] = 76, + [0][1][RTW89_WW][40] = 76, + [0][1][RTW89_WW][42] = 76, + [0][1][RTW89_WW][44] = 76, + [0][1][RTW89_WW][45] = 76, + [0][1][RTW89_WW][47] = 76, + [0][1][RTW89_WW][49] = 76, + [0][1][RTW89_WW][51] = 76, + [0][1][RTW89_WW][53] = 76, + [0][1][RTW89_WW][55] = 76, + [0][1][RTW89_WW][57] = 76, + [0][1][RTW89_WW][59] = 76, + [0][1][RTW89_WW][60] = 76, + [0][1][RTW89_WW][62] = 76, + [0][1][RTW89_WW][64] = 76, + [0][1][RTW89_WW][66] = 76, + [0][1][RTW89_WW][68] = 76, + [0][1][RTW89_WW][70] = 76, + [0][1][RTW89_WW][72] = 76, + [0][1][RTW89_WW][74] = 76, + [0][1][RTW89_WW][75] = 76, + [0][1][RTW89_WW][77] = 76, + [0][1][RTW89_WW][79] = 76, + [0][1][RTW89_WW][81] = 76, + [0][1][RTW89_WW][83] = 76, + [0][1][RTW89_WW][85] = 76, + [0][1][RTW89_WW][87] = 76, + [0][1][RTW89_WW][89] = 76, + [0][1][RTW89_WW][90] = 76, + [0][1][RTW89_WW][92] = 76, + [0][1][RTW89_WW][94] = 76, + [0][1][RTW89_WW][96] = 76, + [0][1][RTW89_WW][98] = 76, + [0][1][RTW89_WW][100] = 76, + [0][1][RTW89_WW][102] = 76, + [0][1][RTW89_WW][104] = 76, + [0][1][RTW89_WW][105] = 76, + [0][1][RTW89_WW][107] = 76, + [0][1][RTW89_WW][109] = 76, + [0][1][RTW89_WW][111] = 0, + [0][1][RTW89_WW][113] = 0, + [0][1][RTW89_WW][115] = 0, + [0][1][RTW89_WW][117] = 0, + [0][1][RTW89_WW][119] = 0, + [1][0][RTW89_WW][0] = 76, + [1][0][RTW89_WW][2] = 76, + [1][0][RTW89_WW][4] = 76, + [1][0][RTW89_WW][6] = 76, + [1][0][RTW89_WW][8] = 76, + [1][0][RTW89_WW][10] = 76, + [1][0][RTW89_WW][12] = 76, + [1][0][RTW89_WW][14] = 76, + [1][0][RTW89_WW][15] = 76, + [1][0][RTW89_WW][17] = 76, + [1][0][RTW89_WW][19] = 76, + [1][0][RTW89_WW][21] = 76, + [1][0][RTW89_WW][23] = 76, + [1][0][RTW89_WW][25] = 76, + [1][0][RTW89_WW][27] = 76, + [1][0][RTW89_WW][29] = 76, + [1][0][RTW89_WW][30] = 76, + [1][0][RTW89_WW][32] = 76, + [1][0][RTW89_WW][34] = 76, + [1][0][RTW89_WW][36] = 76, + [1][0][RTW89_WW][38] = 76, + [1][0][RTW89_WW][40] = 76, + [1][0][RTW89_WW][42] = 76, + [1][0][RTW89_WW][44] = 76, + [1][0][RTW89_WW][45] = 76, + [1][0][RTW89_WW][47] = 76, + [1][0][RTW89_WW][49] = 76, + [1][0][RTW89_WW][51] = 76, + [1][0][RTW89_WW][53] = 76, + [1][0][RTW89_WW][55] = 76, + [1][0][RTW89_WW][57] = 76, + [1][0][RTW89_WW][59] = 76, + [1][0][RTW89_WW][60] = 76, + [1][0][RTW89_WW][62] = 76, + [1][0][RTW89_WW][64] = 76, + [1][0][RTW89_WW][66] = 76, + [1][0][RTW89_WW][68] = 76, + [1][0][RTW89_WW][70] = 76, + [1][0][RTW89_WW][72] = 76, + [1][0][RTW89_WW][74] = 76, + [1][0][RTW89_WW][75] = 76, + [1][0][RTW89_WW][77] = 76, + [1][0][RTW89_WW][79] = 76, + [1][0][RTW89_WW][81] = 76, + [1][0][RTW89_WW][83] = 76, + [1][0][RTW89_WW][85] = 76, + [1][0][RTW89_WW][87] = 76, + [1][0][RTW89_WW][89] = 76, + [1][0][RTW89_WW][90] = 76, + [1][0][RTW89_WW][92] = 76, + [1][0][RTW89_WW][94] = 76, + [1][0][RTW89_WW][96] = 76, + [1][0][RTW89_WW][98] = 76, + [1][0][RTW89_WW][100] = 76, + [1][0][RTW89_WW][102] = 76, + [1][0][RTW89_WW][104] = 76, + [1][0][RTW89_WW][105] = 76, + [1][0][RTW89_WW][107] = 76, + [1][0][RTW89_WW][109] = 76, + [1][0][RTW89_WW][111] = 0, + [1][0][RTW89_WW][113] = 0, + [1][0][RTW89_WW][115] = 0, + [1][0][RTW89_WW][117] = 0, + [1][0][RTW89_WW][119] = 0, + [1][1][RTW89_WW][0] = 76, + [1][1][RTW89_WW][2] = 76, + [1][1][RTW89_WW][4] = 76, + [1][1][RTW89_WW][6] = 76, + [1][1][RTW89_WW][8] = 76, + [1][1][RTW89_WW][10] = 76, + [1][1][RTW89_WW][12] = 76, + [1][1][RTW89_WW][14] = 76, + [1][1][RTW89_WW][15] = 76, + [1][1][RTW89_WW][17] = 76, + [1][1][RTW89_WW][19] = 76, + [1][1][RTW89_WW][21] = 76, + [1][1][RTW89_WW][23] = 76, + [1][1][RTW89_WW][25] = 76, + [1][1][RTW89_WW][27] = 76, + [1][1][RTW89_WW][29] = 76, + [1][1][RTW89_WW][30] = 76, + [1][1][RTW89_WW][32] = 76, + [1][1][RTW89_WW][34] = 76, + [1][1][RTW89_WW][36] = 76, + [1][1][RTW89_WW][38] = 76, + [1][1][RTW89_WW][40] = 76, + [1][1][RTW89_WW][42] = 76, + [1][1][RTW89_WW][44] = 76, + [1][1][RTW89_WW][45] = 76, + [1][1][RTW89_WW][47] = 76, + [1][1][RTW89_WW][49] = 76, + [1][1][RTW89_WW][51] = 76, + [1][1][RTW89_WW][53] = 76, + [1][1][RTW89_WW][55] = 76, + [1][1][RTW89_WW][57] = 76, + [1][1][RTW89_WW][59] = 76, + [1][1][RTW89_WW][60] = 76, + [1][1][RTW89_WW][62] = 76, + [1][1][RTW89_WW][64] = 76, + [1][1][RTW89_WW][66] = 76, + [1][1][RTW89_WW][68] = 76, + [1][1][RTW89_WW][70] = 76, + [1][1][RTW89_WW][72] = 76, + [1][1][RTW89_WW][74] = 76, + [1][1][RTW89_WW][75] = 76, + [1][1][RTW89_WW][77] = 76, + [1][1][RTW89_WW][79] = 76, + [1][1][RTW89_WW][81] = 76, + [1][1][RTW89_WW][83] = 76, + [1][1][RTW89_WW][85] = 76, + [1][1][RTW89_WW][87] = 76, + [1][1][RTW89_WW][89] = 76, + [1][1][RTW89_WW][90] = 76, + [1][1][RTW89_WW][92] = 76, + [1][1][RTW89_WW][94] = 76, + [1][1][RTW89_WW][96] = 76, + [1][1][RTW89_WW][98] = 76, + [1][1][RTW89_WW][100] = 76, + [1][1][RTW89_WW][102] = 76, + [1][1][RTW89_WW][104] = 76, + [1][1][RTW89_WW][105] = 76, + [1][1][RTW89_WW][107] = 76, + [1][1][RTW89_WW][109] = 76, + [1][1][RTW89_WW][111] = 0, + [1][1][RTW89_WW][113] = 0, + [1][1][RTW89_WW][115] = 0, + [1][1][RTW89_WW][117] = 0, + [1][1][RTW89_WW][119] = 0, + [2][0][RTW89_WW][0] = 76, + [2][0][RTW89_WW][2] = 76, + [2][0][RTW89_WW][4] = 76, + [2][0][RTW89_WW][6] = 76, + [2][0][RTW89_WW][8] = 76, + [2][0][RTW89_WW][10] = 76, + [2][0][RTW89_WW][12] = 76, + [2][0][RTW89_WW][14] = 76, + [2][0][RTW89_WW][15] = 76, + [2][0][RTW89_WW][17] = 76, + [2][0][RTW89_WW][19] = 76, + [2][0][RTW89_WW][21] = 76, + [2][0][RTW89_WW][23] = 76, + [2][0][RTW89_WW][25] = 76, + [2][0][RTW89_WW][27] = 76, + [2][0][RTW89_WW][29] = 76, + [2][0][RTW89_WW][30] = 76, + [2][0][RTW89_WW][32] = 76, + [2][0][RTW89_WW][34] = 76, + [2][0][RTW89_WW][36] = 76, + [2][0][RTW89_WW][38] = 76, + [2][0][RTW89_WW][40] = 76, + [2][0][RTW89_WW][42] = 76, + [2][0][RTW89_WW][44] = 76, + [2][0][RTW89_WW][45] = 76, + [2][0][RTW89_WW][47] = 76, + [2][0][RTW89_WW][49] = 76, + [2][0][RTW89_WW][51] = 76, + [2][0][RTW89_WW][53] = 76, + [2][0][RTW89_WW][55] = 76, + [2][0][RTW89_WW][57] = 76, + [2][0][RTW89_WW][59] = 76, + [2][0][RTW89_WW][60] = 76, + [2][0][RTW89_WW][62] = 76, + [2][0][RTW89_WW][64] = 76, + [2][0][RTW89_WW][66] = 76, + [2][0][RTW89_WW][68] = 76, + [2][0][RTW89_WW][70] = 76, + [2][0][RTW89_WW][72] = 76, + [2][0][RTW89_WW][74] = 76, + [2][0][RTW89_WW][75] = 76, + [2][0][RTW89_WW][77] = 76, + [2][0][RTW89_WW][79] = 76, + [2][0][RTW89_WW][81] = 76, + [2][0][RTW89_WW][83] = 76, + [2][0][RTW89_WW][85] = 76, + [2][0][RTW89_WW][87] = 76, + [2][0][RTW89_WW][89] = 76, + [2][0][RTW89_WW][90] = 76, + [2][0][RTW89_WW][92] = 76, + [2][0][RTW89_WW][94] = 76, + [2][0][RTW89_WW][96] = 76, + [2][0][RTW89_WW][98] = 76, + [2][0][RTW89_WW][100] = 76, + [2][0][RTW89_WW][102] = 76, + [2][0][RTW89_WW][104] = 76, + [2][0][RTW89_WW][105] = 76, + [2][0][RTW89_WW][107] = 76, + [2][0][RTW89_WW][109] = 76, + [2][0][RTW89_WW][111] = 0, + [2][0][RTW89_WW][113] = 0, + [2][0][RTW89_WW][115] = 0, + [2][0][RTW89_WW][117] = 0, + [2][0][RTW89_WW][119] = 0, + [2][1][RTW89_WW][0] = 76, + [2][1][RTW89_WW][2] = 76, + [2][1][RTW89_WW][4] = 76, + [2][1][RTW89_WW][6] = 76, + [2][1][RTW89_WW][8] = 76, + [2][1][RTW89_WW][10] = 76, + [2][1][RTW89_WW][12] = 76, + [2][1][RTW89_WW][14] = 76, + [2][1][RTW89_WW][15] = 76, + [2][1][RTW89_WW][17] = 76, + [2][1][RTW89_WW][19] = 76, + [2][1][RTW89_WW][21] = 76, + [2][1][RTW89_WW][23] = 76, + [2][1][RTW89_WW][25] = 76, + [2][1][RTW89_WW][27] = 76, + [2][1][RTW89_WW][29] = 76, + [2][1][RTW89_WW][30] = 76, + [2][1][RTW89_WW][32] = 76, + [2][1][RTW89_WW][34] = 76, + [2][1][RTW89_WW][36] = 76, + [2][1][RTW89_WW][38] = 76, + [2][1][RTW89_WW][40] = 76, + [2][1][RTW89_WW][42] = 76, + [2][1][RTW89_WW][44] = 76, + [2][1][RTW89_WW][45] = 76, + [2][1][RTW89_WW][47] = 76, + [2][1][RTW89_WW][49] = 76, + [2][1][RTW89_WW][51] = 76, + [2][1][RTW89_WW][53] = 76, + [2][1][RTW89_WW][55] = 76, + [2][1][RTW89_WW][57] = 76, + [2][1][RTW89_WW][59] = 76, + [2][1][RTW89_WW][60] = 76, + [2][1][RTW89_WW][62] = 76, + [2][1][RTW89_WW][64] = 76, + [2][1][RTW89_WW][66] = 76, + [2][1][RTW89_WW][68] = 76, + [2][1][RTW89_WW][70] = 76, + [2][1][RTW89_WW][72] = 76, + [2][1][RTW89_WW][74] = 76, + [2][1][RTW89_WW][75] = 76, + [2][1][RTW89_WW][77] = 76, + [2][1][RTW89_WW][79] = 76, + [2][1][RTW89_WW][81] = 76, + [2][1][RTW89_WW][83] = 76, + [2][1][RTW89_WW][85] = 76, + [2][1][RTW89_WW][87] = 76, + [2][1][RTW89_WW][89] = 76, + [2][1][RTW89_WW][90] = 76, + [2][1][RTW89_WW][92] = 76, + [2][1][RTW89_WW][94] = 76, + [2][1][RTW89_WW][96] = 76, + [2][1][RTW89_WW][98] = 76, + [2][1][RTW89_WW][100] = 76, + [2][1][RTW89_WW][102] = 76, + [2][1][RTW89_WW][104] = 76, + [2][1][RTW89_WW][105] = 76, + [2][1][RTW89_WW][107] = 76, + [2][1][RTW89_WW][109] = 76, + [2][1][RTW89_WW][111] = 0, + [2][1][RTW89_WW][113] = 0, + [2][1][RTW89_WW][115] = 0, + [2][1][RTW89_WW][117] = 0, + [2][1][RTW89_WW][119] = 0, + [0][0][RTW89_FCC][0] = 76, + [0][0][RTW89_FCC][2] = 76, + [0][0][RTW89_FCC][4] = 76, + [0][0][RTW89_FCC][6] = 76, + [0][0][RTW89_FCC][8] = 76, + [0][0][RTW89_FCC][10] = 76, + [0][0][RTW89_FCC][12] = 76, + [0][0][RTW89_FCC][14] = 76, + [0][0][RTW89_FCC][15] = 76, + [0][0][RTW89_FCC][17] = 76, + [0][0][RTW89_FCC][19] = 76, + [0][0][RTW89_FCC][21] = 76, + [0][0][RTW89_FCC][23] = 76, + [0][0][RTW89_FCC][25] = 76, + [0][0][RTW89_FCC][27] = 76, + [0][0][RTW89_FCC][29] = 76, + [0][0][RTW89_FCC][30] = 76, + [0][0][RTW89_FCC][32] = 76, + [0][0][RTW89_FCC][34] = 76, + [0][0][RTW89_FCC][36] = 76, + [0][0][RTW89_FCC][38] = 76, + [0][0][RTW89_FCC][40] = 76, + [0][0][RTW89_FCC][42] = 76, + [0][0][RTW89_FCC][44] = 76, + [0][0][RTW89_FCC][45] = 76, + [0][0][RTW89_FCC][47] = 76, + [0][0][RTW89_FCC][49] = 76, + [0][0][RTW89_FCC][51] = 76, + [0][0][RTW89_FCC][53] = 76, + [0][0][RTW89_FCC][55] = 76, + [0][0][RTW89_FCC][57] = 76, + [0][0][RTW89_FCC][59] = 76, + [0][0][RTW89_FCC][60] = 76, + [0][0][RTW89_FCC][62] = 76, + [0][0][RTW89_FCC][64] = 76, + [0][0][RTW89_FCC][66] = 76, + [0][0][RTW89_FCC][68] = 76, + [0][0][RTW89_FCC][70] = 76, + [0][0][RTW89_FCC][72] = 76, + [0][0][RTW89_FCC][74] = 76, + [0][0][RTW89_FCC][75] = 76, + [0][0][RTW89_FCC][77] = 76, + [0][0][RTW89_FCC][79] = 76, + [0][0][RTW89_FCC][81] = 76, + [0][0][RTW89_FCC][83] = 76, + [0][0][RTW89_FCC][85] = 76, + [0][0][RTW89_FCC][87] = 76, + [0][0][RTW89_FCC][89] = 76, + [0][0][RTW89_FCC][90] = 76, + [0][0][RTW89_FCC][92] = 76, + [0][0][RTW89_FCC][94] = 76, + [0][0][RTW89_FCC][96] = 76, + [0][0][RTW89_FCC][98] = 76, + [0][0][RTW89_FCC][100] = 76, + [0][0][RTW89_FCC][102] = 76, + [0][0][RTW89_FCC][104] = 76, + [0][0][RTW89_FCC][105] = 76, + [0][0][RTW89_FCC][107] = 76, + [0][0][RTW89_FCC][109] = 76, + [0][0][RTW89_FCC][111] = 127, + [0][0][RTW89_FCC][113] = 127, + [0][0][RTW89_FCC][115] = 127, + [0][0][RTW89_FCC][117] = 127, + [0][0][RTW89_FCC][119] = 127, + [0][1][RTW89_FCC][0] = 76, + [0][1][RTW89_FCC][2] = 76, + [0][1][RTW89_FCC][4] = 76, + [0][1][RTW89_FCC][6] = 76, + [0][1][RTW89_FCC][8] = 76, + [0][1][RTW89_FCC][10] = 76, + [0][1][RTW89_FCC][12] = 76, + [0][1][RTW89_FCC][14] = 76, + [0][1][RTW89_FCC][15] = 76, + [0][1][RTW89_FCC][17] = 76, + [0][1][RTW89_FCC][19] = 76, + [0][1][RTW89_FCC][21] = 76, + [0][1][RTW89_FCC][23] = 76, + [0][1][RTW89_FCC][25] = 76, + [0][1][RTW89_FCC][27] = 76, + [0][1][RTW89_FCC][29] = 76, + [0][1][RTW89_FCC][30] = 76, + [0][1][RTW89_FCC][32] = 76, + [0][1][RTW89_FCC][34] = 76, + [0][1][RTW89_FCC][36] = 76, + [0][1][RTW89_FCC][38] = 76, + [0][1][RTW89_FCC][40] = 76, + [0][1][RTW89_FCC][42] = 76, + [0][1][RTW89_FCC][44] = 76, + [0][1][RTW89_FCC][45] = 76, + [0][1][RTW89_FCC][47] = 76, + [0][1][RTW89_FCC][49] = 76, + [0][1][RTW89_FCC][51] = 76, + [0][1][RTW89_FCC][53] = 76, + [0][1][RTW89_FCC][55] = 76, + [0][1][RTW89_FCC][57] = 76, + [0][1][RTW89_FCC][59] = 76, + [0][1][RTW89_FCC][60] = 76, + [0][1][RTW89_FCC][62] = 76, + [0][1][RTW89_FCC][64] = 76, + [0][1][RTW89_FCC][66] = 76, + [0][1][RTW89_FCC][68] = 76, + [0][1][RTW89_FCC][70] = 76, + [0][1][RTW89_FCC][72] = 76, + [0][1][RTW89_FCC][74] = 76, + [0][1][RTW89_FCC][75] = 76, + [0][1][RTW89_FCC][77] = 76, + [0][1][RTW89_FCC][79] = 76, + [0][1][RTW89_FCC][81] = 76, + [0][1][RTW89_FCC][83] = 76, + [0][1][RTW89_FCC][85] = 76, + [0][1][RTW89_FCC][87] = 76, + [0][1][RTW89_FCC][89] = 76, + [0][1][RTW89_FCC][90] = 76, + [0][1][RTW89_FCC][92] = 76, + [0][1][RTW89_FCC][94] = 76, + [0][1][RTW89_FCC][96] = 76, + [0][1][RTW89_FCC][98] = 76, + [0][1][RTW89_FCC][100] = 76, + [0][1][RTW89_FCC][102] = 76, + [0][1][RTW89_FCC][104] = 76, + [0][1][RTW89_FCC][105] = 76, + [0][1][RTW89_FCC][107] = 76, + [0][1][RTW89_FCC][109] = 76, + [0][1][RTW89_FCC][111] = 127, + [0][1][RTW89_FCC][113] = 127, + [0][1][RTW89_FCC][115] = 127, + [0][1][RTW89_FCC][117] = 127, + [0][1][RTW89_FCC][119] = 127, + [1][0][RTW89_FCC][0] = 76, + [1][0][RTW89_FCC][2] = 76, + [1][0][RTW89_FCC][4] = 76, + [1][0][RTW89_FCC][6] = 76, + [1][0][RTW89_FCC][8] = 76, + [1][0][RTW89_FCC][10] = 76, + [1][0][RTW89_FCC][12] = 76, + [1][0][RTW89_FCC][14] = 76, + [1][0][RTW89_FCC][15] = 76, + [1][0][RTW89_FCC][17] = 76, + [1][0][RTW89_FCC][19] = 76, + [1][0][RTW89_FCC][21] = 76, + [1][0][RTW89_FCC][23] = 76, + [1][0][RTW89_FCC][25] = 76, + [1][0][RTW89_FCC][27] = 76, + [1][0][RTW89_FCC][29] = 76, + [1][0][RTW89_FCC][30] = 76, + [1][0][RTW89_FCC][32] = 76, + [1][0][RTW89_FCC][34] = 76, + [1][0][RTW89_FCC][36] = 76, + [1][0][RTW89_FCC][38] = 76, + [1][0][RTW89_FCC][40] = 76, + [1][0][RTW89_FCC][42] = 76, + [1][0][RTW89_FCC][44] = 76, + [1][0][RTW89_FCC][45] = 76, + [1][0][RTW89_FCC][47] = 76, + [1][0][RTW89_FCC][49] = 76, + [1][0][RTW89_FCC][51] = 76, + [1][0][RTW89_FCC][53] = 76, + [1][0][RTW89_FCC][55] = 76, + [1][0][RTW89_FCC][57] = 76, + [1][0][RTW89_FCC][59] = 76, + [1][0][RTW89_FCC][60] = 76, + [1][0][RTW89_FCC][62] = 76, + [1][0][RTW89_FCC][64] = 76, + [1][0][RTW89_FCC][66] = 76, + [1][0][RTW89_FCC][68] = 76, + [1][0][RTW89_FCC][70] = 76, + [1][0][RTW89_FCC][72] = 76, + [1][0][RTW89_FCC][74] = 76, + [1][0][RTW89_FCC][75] = 76, + [1][0][RTW89_FCC][77] = 76, + [1][0][RTW89_FCC][79] = 76, + [1][0][RTW89_FCC][81] = 76, + [1][0][RTW89_FCC][83] = 76, + [1][0][RTW89_FCC][85] = 76, + [1][0][RTW89_FCC][87] = 76, + [1][0][RTW89_FCC][89] = 76, + [1][0][RTW89_FCC][90] = 76, + [1][0][RTW89_FCC][92] = 76, + [1][0][RTW89_FCC][94] = 76, + [1][0][RTW89_FCC][96] = 76, + [1][0][RTW89_FCC][98] = 76, + [1][0][RTW89_FCC][100] = 76, + [1][0][RTW89_FCC][102] = 76, + [1][0][RTW89_FCC][104] = 76, + [1][0][RTW89_FCC][105] = 76, + [1][0][RTW89_FCC][107] = 76, + [1][0][RTW89_FCC][109] = 76, + [1][0][RTW89_FCC][111] = 127, + [1][0][RTW89_FCC][113] = 127, + [1][0][RTW89_FCC][115] = 127, + [1][0][RTW89_FCC][117] = 127, + [1][0][RTW89_FCC][119] = 127, + [1][1][RTW89_FCC][0] = 76, + [1][1][RTW89_FCC][2] = 76, + [1][1][RTW89_FCC][4] = 76, + [1][1][RTW89_FCC][6] = 76, + [1][1][RTW89_FCC][8] = 76, + [1][1][RTW89_FCC][10] = 76, + [1][1][RTW89_FCC][12] = 76, + [1][1][RTW89_FCC][14] = 76, + [1][1][RTW89_FCC][15] = 76, + [1][1][RTW89_FCC][17] = 76, + [1][1][RTW89_FCC][19] = 76, + [1][1][RTW89_FCC][21] = 76, + [1][1][RTW89_FCC][23] = 76, + [1][1][RTW89_FCC][25] = 76, + [1][1][RTW89_FCC][27] = 76, + [1][1][RTW89_FCC][29] = 76, + [1][1][RTW89_FCC][30] = 76, + [1][1][RTW89_FCC][32] = 76, + [1][1][RTW89_FCC][34] = 76, + [1][1][RTW89_FCC][36] = 76, + [1][1][RTW89_FCC][38] = 76, + [1][1][RTW89_FCC][40] = 76, + [1][1][RTW89_FCC][42] = 76, + [1][1][RTW89_FCC][44] = 76, + [1][1][RTW89_FCC][45] = 76, + [1][1][RTW89_FCC][47] = 76, + [1][1][RTW89_FCC][49] = 76, + [1][1][RTW89_FCC][51] = 76, + [1][1][RTW89_FCC][53] = 76, + [1][1][RTW89_FCC][55] = 76, + [1][1][RTW89_FCC][57] = 76, + [1][1][RTW89_FCC][59] = 76, + [1][1][RTW89_FCC][60] = 76, + [1][1][RTW89_FCC][62] = 76, + [1][1][RTW89_FCC][64] = 76, + [1][1][RTW89_FCC][66] = 76, + [1][1][RTW89_FCC][68] = 76, + [1][1][RTW89_FCC][70] = 76, + [1][1][RTW89_FCC][72] = 76, + [1][1][RTW89_FCC][74] = 76, + [1][1][RTW89_FCC][75] = 76, + [1][1][RTW89_FCC][77] = 76, + [1][1][RTW89_FCC][79] = 76, + [1][1][RTW89_FCC][81] = 76, + [1][1][RTW89_FCC][83] = 76, + [1][1][RTW89_FCC][85] = 76, + [1][1][RTW89_FCC][87] = 76, + [1][1][RTW89_FCC][89] = 76, + [1][1][RTW89_FCC][90] = 76, + [1][1][RTW89_FCC][92] = 76, + [1][1][RTW89_FCC][94] = 76, + [1][1][RTW89_FCC][96] = 76, + [1][1][RTW89_FCC][98] = 76, + [1][1][RTW89_FCC][100] = 76, + [1][1][RTW89_FCC][102] = 76, + [1][1][RTW89_FCC][104] = 76, + [1][1][RTW89_FCC][105] = 76, + [1][1][RTW89_FCC][107] = 76, + [1][1][RTW89_FCC][109] = 76, + [1][1][RTW89_FCC][111] = 127, + [1][1][RTW89_FCC][113] = 127, + [1][1][RTW89_FCC][115] = 127, + [1][1][RTW89_FCC][117] = 127, + [1][1][RTW89_FCC][119] = 127, + [2][0][RTW89_FCC][0] = 76, + [2][0][RTW89_FCC][2] = 76, + [2][0][RTW89_FCC][4] = 76, + [2][0][RTW89_FCC][6] = 76, + [2][0][RTW89_FCC][8] = 76, + [2][0][RTW89_FCC][10] = 76, + [2][0][RTW89_FCC][12] = 76, + [2][0][RTW89_FCC][14] = 76, + [2][0][RTW89_FCC][15] = 76, + [2][0][RTW89_FCC][17] = 76, + [2][0][RTW89_FCC][19] = 76, + [2][0][RTW89_FCC][21] = 76, + [2][0][RTW89_FCC][23] = 76, + [2][0][RTW89_FCC][25] = 76, + [2][0][RTW89_FCC][27] = 76, + [2][0][RTW89_FCC][29] = 76, + [2][0][RTW89_FCC][30] = 76, + [2][0][RTW89_FCC][32] = 76, + [2][0][RTW89_FCC][34] = 76, + [2][0][RTW89_FCC][36] = 76, + [2][0][RTW89_FCC][38] = 76, + [2][0][RTW89_FCC][40] = 76, + [2][0][RTW89_FCC][42] = 76, + [2][0][RTW89_FCC][44] = 76, + [2][0][RTW89_FCC][45] = 76, + [2][0][RTW89_FCC][47] = 76, + [2][0][RTW89_FCC][49] = 76, + [2][0][RTW89_FCC][51] = 76, + [2][0][RTW89_FCC][53] = 76, + [2][0][RTW89_FCC][55] = 76, + [2][0][RTW89_FCC][57] = 76, + [2][0][RTW89_FCC][59] = 76, + [2][0][RTW89_FCC][60] = 76, + [2][0][RTW89_FCC][62] = 76, + [2][0][RTW89_FCC][64] = 76, + [2][0][RTW89_FCC][66] = 76, + [2][0][RTW89_FCC][68] = 76, + [2][0][RTW89_FCC][70] = 76, + [2][0][RTW89_FCC][72] = 76, + [2][0][RTW89_FCC][74] = 76, + [2][0][RTW89_FCC][75] = 76, + [2][0][RTW89_FCC][77] = 76, + [2][0][RTW89_FCC][79] = 76, + [2][0][RTW89_FCC][81] = 76, + [2][0][RTW89_FCC][83] = 76, + [2][0][RTW89_FCC][85] = 76, + [2][0][RTW89_FCC][87] = 76, + [2][0][RTW89_FCC][89] = 76, + [2][0][RTW89_FCC][90] = 76, + [2][0][RTW89_FCC][92] = 76, + [2][0][RTW89_FCC][94] = 76, + [2][0][RTW89_FCC][96] = 76, + [2][0][RTW89_FCC][98] = 76, + [2][0][RTW89_FCC][100] = 76, + [2][0][RTW89_FCC][102] = 76, + [2][0][RTW89_FCC][104] = 76, + [2][0][RTW89_FCC][105] = 76, + [2][0][RTW89_FCC][107] = 76, + [2][0][RTW89_FCC][109] = 76, + [2][0][RTW89_FCC][111] = 127, + [2][0][RTW89_FCC][113] = 127, + [2][0][RTW89_FCC][115] = 127, + [2][0][RTW89_FCC][117] = 127, + [2][0][RTW89_FCC][119] = 127, + [2][1][RTW89_FCC][0] = 76, + [2][1][RTW89_FCC][2] = 76, + [2][1][RTW89_FCC][4] = 76, + [2][1][RTW89_FCC][6] = 76, + [2][1][RTW89_FCC][8] = 76, + [2][1][RTW89_FCC][10] = 76, + [2][1][RTW89_FCC][12] = 76, + [2][1][RTW89_FCC][14] = 76, + [2][1][RTW89_FCC][15] = 76, + [2][1][RTW89_FCC][17] = 76, + [2][1][RTW89_FCC][19] = 76, + [2][1][RTW89_FCC][21] = 76, + [2][1][RTW89_FCC][23] = 76, + [2][1][RTW89_FCC][25] = 76, + [2][1][RTW89_FCC][27] = 76, + [2][1][RTW89_FCC][29] = 76, + [2][1][RTW89_FCC][30] = 76, + [2][1][RTW89_FCC][32] = 76, + [2][1][RTW89_FCC][34] = 76, + [2][1][RTW89_FCC][36] = 76, + [2][1][RTW89_FCC][38] = 76, + [2][1][RTW89_FCC][40] = 76, + [2][1][RTW89_FCC][42] = 76, + [2][1][RTW89_FCC][44] = 76, + [2][1][RTW89_FCC][45] = 76, + [2][1][RTW89_FCC][47] = 76, + [2][1][RTW89_FCC][49] = 76, + [2][1][RTW89_FCC][51] = 76, + [2][1][RTW89_FCC][53] = 76, + [2][1][RTW89_FCC][55] = 76, + [2][1][RTW89_FCC][57] = 76, + [2][1][RTW89_FCC][59] = 76, + [2][1][RTW89_FCC][60] = 76, + [2][1][RTW89_FCC][62] = 76, + [2][1][RTW89_FCC][64] = 76, + [2][1][RTW89_FCC][66] = 76, + [2][1][RTW89_FCC][68] = 76, + [2][1][RTW89_FCC][70] = 76, + [2][1][RTW89_FCC][72] = 76, + [2][1][RTW89_FCC][74] = 76, + [2][1][RTW89_FCC][75] = 76, + [2][1][RTW89_FCC][77] = 76, + [2][1][RTW89_FCC][79] = 76, + [2][1][RTW89_FCC][81] = 76, + [2][1][RTW89_FCC][83] = 76, + [2][1][RTW89_FCC][85] = 76, + [2][1][RTW89_FCC][87] = 76, + [2][1][RTW89_FCC][89] = 76, + [2][1][RTW89_FCC][90] = 76, + [2][1][RTW89_FCC][92] = 76, + [2][1][RTW89_FCC][94] = 76, + [2][1][RTW89_FCC][96] = 76, + [2][1][RTW89_FCC][98] = 76, + [2][1][RTW89_FCC][100] = 76, + [2][1][RTW89_FCC][102] = 76, + [2][1][RTW89_FCC][104] = 76, + [2][1][RTW89_FCC][105] = 76, + [2][1][RTW89_FCC][107] = 76, + [2][1][RTW89_FCC][109] = 76, + [2][1][RTW89_FCC][111] = 127, + [2][1][RTW89_FCC][113] = 127, + [2][1][RTW89_FCC][115] = 127, + [2][1][RTW89_FCC][117] = 127, + [2][1][RTW89_FCC][119] = 127, +}; + const struct rtw89_phy_table rtw89_8852c_phy_bb_table = { .regs = rtw89_8852c_phy_bb_regs, .n_regs = ARRAY_SIZE(rtw89_8852c_phy_bb_regs), @@ -13695,3 +19316,9 @@ const struct rtw89_phy_table rtw89_8852c_phy_nctl_table = { .n_regs = ARRAY_SIZE(rtw89_8852c_phy_nctl_regs), .rf_path = 0, /* don't care */ }; + +const struct rtw89_txpwr_table rtw89_8852c_byr_table = { + .data = rtw89_8852c_txpwr_byrate, + .size = ARRAY_SIZE(rtw89_8852c_txpwr_byrate), + .load = rtw89_phy_load_txpwr_byrate, +}; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h index 807021ab9f1d..0c0df2ac8cff 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h @@ -12,5 +12,23 @@ extern const struct rtw89_phy_table rtw89_8852c_phy_bb_gain_table; extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table; +extern const struct rtw89_txpwr_table rtw89_8852c_byr_table; +extern const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM] + [RTW89_REGD_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] + [RTW89_RS_LMT_NUM][RTW89_BF_NUM] + [RTW89_REGD_NUM][RTW89_6G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_5G_CH_NUM]; +extern const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] + [RTW89_REGD_NUM][RTW89_6G_CH_NUM]; #endif -- cgit v1.2.3 From c6badab206d5a2330c896ab1b77d79e80c6f16aa Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Thu, 14 Apr 2022 14:20:17 +0800 Subject: rtw89: 8852c: add TX power track tables TX power track tables are used to do TX power compensation according to thermal value. In order to work in existing and new chip, add new 6G entries, and change type from u8 to s8. Internal version table is HALRF_027_00_031. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/phy.h | 28 +++-- drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c | 8 +- .../net/wireless/realtek/rtw89/rtw8852a_table.c | 24 ++-- .../net/wireless/realtek/rtw89/rtw8852c_table.c | 139 +++++++++++++++++++++ .../net/wireless/realtek/rtw89/rtw8852c_table.h | 1 + 5 files changed, 172 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index adcfcb4c2429..9c97e77d9707 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -233,18 +233,22 @@ struct rtw89_txpwr_byrate_cfg { #define DELTA_SWINGIDX_SIZE 30 struct rtw89_txpwr_track_cfg { - const u8 (*delta_swingidx_5gb_n)[DELTA_SWINGIDX_SIZE]; - const u8 (*delta_swingidx_5gb_p)[DELTA_SWINGIDX_SIZE]; - const u8 (*delta_swingidx_5ga_n)[DELTA_SWINGIDX_SIZE]; - const u8 (*delta_swingidx_5ga_p)[DELTA_SWINGIDX_SIZE]; - const u8 *delta_swingidx_2gb_n; - const u8 *delta_swingidx_2gb_p; - const u8 *delta_swingidx_2ga_n; - const u8 *delta_swingidx_2ga_p; - const u8 *delta_swingidx_2g_cck_b_n; - const u8 *delta_swingidx_2g_cck_b_p; - const u8 *delta_swingidx_2g_cck_a_n; - const u8 *delta_swingidx_2g_cck_a_p; + const s8 (*delta_swingidx_6gb_n)[DELTA_SWINGIDX_SIZE]; + const s8 (*delta_swingidx_6gb_p)[DELTA_SWINGIDX_SIZE]; + const s8 (*delta_swingidx_6ga_n)[DELTA_SWINGIDX_SIZE]; + const s8 (*delta_swingidx_6ga_p)[DELTA_SWINGIDX_SIZE]; + const s8 (*delta_swingidx_5gb_n)[DELTA_SWINGIDX_SIZE]; + const s8 (*delta_swingidx_5gb_p)[DELTA_SWINGIDX_SIZE]; + const s8 (*delta_swingidx_5ga_n)[DELTA_SWINGIDX_SIZE]; + const s8 (*delta_swingidx_5ga_p)[DELTA_SWINGIDX_SIZE]; + const s8 *delta_swingidx_2gb_n; + const s8 *delta_swingidx_2gb_p; + const s8 *delta_swingidx_2ga_n; + const s8 *delta_swingidx_2ga_p; + const s8 *delta_swingidx_2g_cck_b_n; + const s8 *delta_swingidx_2g_cck_b_p; + const s8 *delta_swingidx_2g_cck_a_n; + const s8 *delta_swingidx_2g_cck_a_p; }; struct rtw89_phy_dig_gain_cfg { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c index ad272854c442..aa782534e76b 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c @@ -2907,10 +2907,10 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; u8 ch = rtwdev->hal.current_channel; u8 subband = rtwdev->hal.current_subband; - const u8 *thm_up_a = NULL; - const u8 *thm_down_a = NULL; - const u8 *thm_up_b = NULL; - const u8 *thm_down_b = NULL; + const s8 *thm_up_a = NULL; + const s8 *thm_down_a = NULL; + const s8 *thm_up_b = NULL; + const s8 *thm_down_b = NULL; u8 thermal = 0xff; s8 thm_ofst[64] = {0}; u32 tmp = 0; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c index 8c4749f10641..99479bbb0939 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c @@ -43313,7 +43313,7 @@ static const struct rtw89_txpwr_byrate_cfg rtw89_8852a_txpwr_byrate[] = { { 1, 0, 4, 0, 4, 0x00000000, }, }; -static const u8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = { +static const s8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = { {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11}, {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, @@ -43322,7 +43322,7 @@ static const u8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = { 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9}, }; -static const u8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = { +static const s8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = { {0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11}, {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, @@ -43331,7 +43331,7 @@ static const u8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = { 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9}, }; -static const u8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = { +static const s8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = { {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11}, {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, @@ -43340,7 +43340,7 @@ static const u8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = { 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9}, }; -static const u8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = { +static const s8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = { {0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11}, {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, @@ -43349,35 +43349,35 @@ static const u8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = { 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9}, }; -static const u8 _txpwr_track_delta_swingidx_2gb_n[] = { +static const s8 _txpwr_track_delta_swingidx_2gb_n[] = { 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7}; -static const u8 _txpwr_track_delta_swingidx_2gb_p[] = { +static const s8 _txpwr_track_delta_swingidx_2gb_p[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3}; -static const u8 _txpwr_track_delta_swingidx_2ga_n[] = { +static const s8 _txpwr_track_delta_swingidx_2ga_n[] = { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5}; -static const u8 _txpwr_track_delta_swingidx_2ga_p[] = { +static const s8 _txpwr_track_delta_swingidx_2ga_p[] = { 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}; -static const u8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = { +static const s8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = { 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7}; -static const u8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = { +static const s8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3}; -static const u8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = { +static const s8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5}; -static const u8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { +static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c index bd0c4ff5ca4d..e7852d286137 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -13702,6 +13702,126 @@ static const struct rtw89_txpwr_byrate_cfg rtw89_8852c_txpwr_byrate[] = { { 2, 0, 4, 0, 4, 0x00000000, }, }; +static const s8 _txpwr_track_delta_swingidx_6gb_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, +}; + +static const s8 _txpwr_track_delta_swingidx_6gb_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, + 11, 12, 12, 13, 14, 14, 15, 15, 16, 17, 17, 18}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 13, 14, 15, 15, 16, 16}, +}; + +static const s8 _txpwr_track_delta_swingidx_6ga_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, +}; + +static const s8 _txpwr_track_delta_swingidx_6ga_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}, +}; + +static const s8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10}, + {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8}, + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, + 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12}, +}; + +static const s8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 16}, +}; + +static const s8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6}, + {0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3}, + {0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, + 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8}, +}; + +static const s8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14}, +}; + +static const s8 _txpwr_track_delta_swingidx_2gb_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 +}; + +static const s8 _txpwr_track_delta_swingidx_2gb_p[] = { + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 +}; + +static const s8 _txpwr_track_delta_swingidx_2ga_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2, + -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3 +}; + +static const s8 _txpwr_track_delta_swingidx_2ga_p[] = { + 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, + 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 +}; + +static const s8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 +}; + +static const s8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = { + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 +}; + +static const s8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = { + 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -2, -2, + -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3 +}; + +static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = { + 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, + 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 +}; + const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM] [RTW89_REGD_NUM] = { [0][0][RTW89_ACMA] = 0, @@ -19322,3 +19442,22 @@ const struct rtw89_txpwr_table rtw89_8852c_byr_table = { .size = ARRAY_SIZE(rtw89_8852c_txpwr_byrate), .load = rtw89_phy_load_txpwr_byrate, }; + +const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg = { + .delta_swingidx_6gb_n = _txpwr_track_delta_swingidx_6gb_n, + .delta_swingidx_6gb_p = _txpwr_track_delta_swingidx_6gb_p, + .delta_swingidx_6ga_n = _txpwr_track_delta_swingidx_6ga_n, + .delta_swingidx_6ga_p = _txpwr_track_delta_swingidx_6ga_p, + .delta_swingidx_5gb_n = _txpwr_track_delta_swingidx_5gb_n, + .delta_swingidx_5gb_p = _txpwr_track_delta_swingidx_5gb_p, + .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n, + .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p, + .delta_swingidx_2gb_n = _txpwr_track_delta_swingidx_2gb_n, + .delta_swingidx_2gb_p = _txpwr_track_delta_swingidx_2gb_p, + .delta_swingidx_2ga_n = _txpwr_track_delta_swingidx_2ga_n, + .delta_swingidx_2ga_p = _txpwr_track_delta_swingidx_2ga_p, + .delta_swingidx_2g_cck_b_n = _txpwr_track_delta_swingidx_2g_cck_b_n, + .delta_swingidx_2g_cck_b_p = _txpwr_track_delta_swingidx_2g_cck_b_p, + .delta_swingidx_2g_cck_a_n = _txpwr_track_delta_swingidx_2g_cck_a_n, + .delta_swingidx_2g_cck_a_p = _txpwr_track_delta_swingidx_2g_cck_a_p, +}; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h index 0c0df2ac8cff..616282c9bb98 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h @@ -13,6 +13,7 @@ extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table; extern const struct rtw89_txpwr_table rtw89_8852c_byr_table; +extern const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg; extern const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM] [RTW89_REGD_NUM]; extern const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] -- cgit v1.2.3 From c7845551bf667894c5eb4320265d8717f056a8ac Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:18 +0800 Subject: rtw89: 8852c: phy: configure TSSI bandedge TSSI is used to manage TX power with thermal value as a factor. This patch is to configure bandedge to TX proper waveform. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 1 + drivers/net/wireless/realtek/rtw89/phy.c | 106 +++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/phy.h | 36 +++++++ drivers/net/wireless/realtek/rtw89/reg.h | 5 + drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 + .../net/wireless/realtek/rtw89/rtw8852c_table.c | 7 ++ .../net/wireless/realtek/rtw89/rtw8852c_table.h | 1 + 8 files changed, 158 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 1dd558d89567..fd6b17b6498b 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2380,6 +2380,7 @@ struct rtw89_chip_info { const struct rtw89_phy_table *nctl_table; const struct rtw89_txpwr_table *byr_table; const struct rtw89_phy_dig_gain_table *dig_table; + const struct rtw89_phy_tssi_dbw_table *tssi_dbw_table; const s8 (*txpwr_lmt_2g)[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [RTW89_RS_LMT_NUM][RTW89_BF_NUM] [RTW89_REGD_NUM][RTW89_2G_CH_NUM]; diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index eed6dbd0d5db..aff03261ab72 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -3420,3 +3420,109 @@ rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl) _rfk_handler[p->flag](rtwdev, p); } EXPORT_SYMBOL(rtw89_rfk_parser); + +#define RTW89_TSSI_FAST_MODE_NUM 4 + +static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = { + {0xD934, 0xff0000}, + {0xD934, 0xff000000}, + {0xD938, 0xff}, + {0xD934, 0xff00}, +}; + +static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = { + {0xD930, 0xff0000}, + {0xD930, 0xff000000}, + {0xD934, 0xff}, + {0xD930, 0xff00}, +}; + +static +void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev, + enum rtw89_mac_idx mac_idx, + enum rtw89_tssi_bandedge_cfg bandedge_cfg, + u32 val) +{ + const struct rtw89_reg_def *regs; + u32 reg; + int i; + + if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) + regs = rtw89_tssi_fastmode_regs_flat; + else + regs = rtw89_tssi_fastmode_regs_level; + + for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) { + reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx); + rtw89_write32_mask(rtwdev, reg, regs[i].mask, val); + } +} + +static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = { + {0xD91C, 0xff000000}, + {0xD920, 0xff}, + {0xD920, 0xff00}, + {0xD920, 0xff0000}, + {0xD920, 0xff000000}, + {0xD924, 0xff}, + {0xD924, 0xff00}, + {0xD914, 0xff000000}, + {0xD918, 0xff}, + {0xD918, 0xff00}, + {0xD918, 0xff0000}, + {0xD918, 0xff000000}, + {0xD91C, 0xff}, + {0xD91C, 0xff00}, + {0xD91C, 0xff0000}, +}; + +static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = { + {0xD910, 0xff}, + {0xD910, 0xff00}, + {0xD910, 0xff0000}, + {0xD910, 0xff000000}, + {0xD914, 0xff}, + {0xD914, 0xff00}, + {0xD914, 0xff0000}, + {0xD908, 0xff}, + {0xD908, 0xff00}, + {0xD908, 0xff0000}, + {0xD908, 0xff000000}, + {0xD90C, 0xff}, + {0xD90C, 0xff00}, + {0xD90C, 0xff0000}, + {0xD90C, 0xff000000}, +}; + +void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, + enum rtw89_mac_idx mac_idx, + enum rtw89_tssi_bandedge_cfg bandedge_cfg) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_reg_def *regs; + const u32 *data; + u32 reg; + int i; + + if (bandedge_cfg >= RTW89_TSSI_CFG_NUM) + return; + + if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) + regs = rtw89_tssi_bandedge_regs_flat; + else + regs = rtw89_tssi_bandedge_regs_level; + + data = chip->tssi_dbw_table->data[bandedge_cfg]; + + for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) { + reg = rtw89_mac_reg_by_idx(regs[i].addr, mac_idx); + rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]); + } + + reg = rtw89_mac_reg_by_idx(R_AX_BANDEDGE_CFG, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg); + + rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg, + data[RTW89_TSSI_SBW20]); +} +EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg); diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index 9c97e77d9707..b8531bb7e606 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -221,6 +221,35 @@ enum rtw89_dig_gain_tia_idx { RTW89_DIG_GAIN_TIA_IDX1 = 1 }; +enum rtw89_tssi_bandedge_cfg { + RTW89_TSSI_BANDEDGE_FLAT, + RTW89_TSSI_BANDEDGE_LOW, + RTW89_TSSI_BANDEDGE_MID, + RTW89_TSSI_BANDEDGE_HIGH, + + RTW89_TSSI_CFG_NUM, +}; + +enum rtw89_tssi_sbw_idx { + RTW89_TSSI_SBW20, + RTW89_TSSI_SBW40_0, + RTW89_TSSI_SBW40_1, + RTW89_TSSI_SBW80_0, + RTW89_TSSI_SBW80_1, + RTW89_TSSI_SBW80_2, + RTW89_TSSI_SBW80_3, + RTW89_TSSI_SBW160_0, + RTW89_TSSI_SBW160_1, + RTW89_TSSI_SBW160_2, + RTW89_TSSI_SBW160_3, + RTW89_TSSI_SBW160_4, + RTW89_TSSI_SBW160_5, + RTW89_TSSI_SBW160_6, + RTW89_TSSI_SBW160_7, + + RTW89_TSSI_SBW_NUM, +}; + struct rtw89_txpwr_byrate_cfg { enum rtw89_band band; enum rtw89_nss nss; @@ -263,6 +292,10 @@ struct rtw89_phy_dig_gain_table { const struct rtw89_phy_dig_gain_cfg *cfg_tia_a; }; +struct rtw89_phy_tssi_dbw_table { + u32 data[RTW89_TSSI_CFG_NUM][RTW89_TSSI_SBW_NUM]; +}; + struct rtw89_phy_reg3_tbl { const struct rtw89_reg3_def *reg3; int size; @@ -446,5 +479,8 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask, void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev); void rtw89_phy_dig(struct rtw89_dev *rtwdev); void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); +void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, + enum rtw89_mac_idx mac_idx, + enum rtw89_tssi_bandedge_cfg bandedge_cfg); #endif diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 98465d746989..cd7916085e00 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -2958,6 +2958,11 @@ #define R_AX_PWR_MACID_LMT_TABLE0 0xD36C #define R_AX_PWR_MACID_LMT_TABLE127 0xD568 +#define R_AX_TSSI_CTRL_HEAD 0xD908 +#define R_AX_BANDEDGE_CFG 0xD94C +#define B_AX_BANDEDGE_CFG_IDX_MASK GENMASK(31, 30) +#define R_AX_TSSI_CTRL_TAIL 0xD95C + #define R_AX_TXPWR_IMR 0xD9E0 #define R_AX_TXPWR_IMR_C1 0xF9E0 #define R_AX_TXPWR_ISR 0xD9E4 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 2c5bd381ebf5..cb93287d4722 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2113,6 +2113,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .txpwr_factor_rf = 2, .txpwr_factor_mac = 1, .dig_table = &rtw89_8852a_phy_dig_table, + .tssi_dbw_table = NULL, .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), .support_bw160 = false, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 3f727dd42064..1b5f8da2e9e8 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -674,6 +674,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .txpwr_factor_rf = 2, .txpwr_factor_mac = 1, .dig_table = NULL, + .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table, .hw_sec_hdr = true, .sec_ctrl_efuse_size = 4, .physical_efuse_size = 1216, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c index e7852d286137..477c46041c94 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -19461,3 +19461,10 @@ const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg = { .delta_swingidx_2g_cck_a_n = _txpwr_track_delta_swingidx_2g_cck_a_n, .delta_swingidx_2g_cck_a_p = _txpwr_track_delta_swingidx_2g_cck_a_p, }; + +const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table = { + .data[RTW89_TSSI_BANDEDGE_FLAT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .data[RTW89_TSSI_BANDEDGE_LOW] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .data[RTW89_TSSI_BANDEDGE_MID] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + .data[RTW89_TSSI_BANDEDGE_HIGH] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, +}; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h index 616282c9bb98..7d71a92e2d27 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.h @@ -13,6 +13,7 @@ extern const struct rtw89_phy_table rtw89_8852c_phy_radioa_table; extern const struct rtw89_phy_table rtw89_8852c_phy_radiob_table; extern const struct rtw89_phy_table rtw89_8852c_phy_nctl_table; extern const struct rtw89_txpwr_table rtw89_8852c_byr_table; +extern const struct rtw89_phy_tssi_dbw_table rtw89_8852c_tssi_dbw_table; extern const struct rtw89_txpwr_track_cfg rtw89_8852c_trk_cfg; extern const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM] [RTW89_REGD_NUM]; -- cgit v1.2.3 From cc99eefa61f08a655a6471ee31e3096ad2e329d2 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:19 +0800 Subject: rtw89: 8852c: add BB initial and reset functions chip_ops::bb_sethw is to initialize BB settings out of BB parameters tables. Once switching channel or initialing, we do chip_ops::bb_reset to reset hardware counters and states to make things in expectation. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/reg.h | 26 ++++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 182 ++++++++++++++++++++++++++ 2 files changed, 208 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index cd7916085e00..a7daca1d462c 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3280,6 +3280,10 @@ #define B_ANAPAR_FLTRST BIT(22) #define B_ANAPAR_CRXBB GENMASK(18, 16) #define B_ANAPAR_14 GENMASK(15, 0) +#define R_RFE_E_A2 0x0334 +#define R_RFE_O_SEL_A2 0x0338 +#define R_RFE_SEL0_A2 0x033C +#define R_RFE_SEL32_A2 0x0340 #define R_SWSI_DATA_V1 0x0370 #define B_SWSI_DATA_VAL_V1 GENMASK(19, 0) #define B_SWSI_DATA_ADDR_V1 GENMASK(27, 20) @@ -3340,6 +3344,8 @@ #define B_PMAC_PTX_EN BIT(4) #define R_PMAC_TX_CNT 0x09C8 #define B_PMAC_TX_CNT_MSK GENMASK(31, 0) +#define R_DBCC_80P80_SEL_EVM_RPT 0x0A10 +#define B_DBCC_80P80_SEL_EVM_RPT_EN BIT(0) #define R_CCX 0x0C00 #define B_CCX_EDCCA_OPT_MSK GENMASK(6, 4) #define B_MEASUREMENT_TRIG_MSK BIT(2) @@ -3380,6 +3386,8 @@ #define R_BRK_ASYNC_RST_EN_1 0x0DC0 #define R_BRK_ASYNC_RST_EN_2 0x0DC4 #define R_BRK_ASYNC_RST_EN_3 0x0DC8 +#define R_S0_HW_SI_DIS 0x1200 +#define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28) #define R_P0_RXCK 0x12A0 #define B_P0_RXCK_VAL GENMASK(18, 16) #define B_P0_RXCK_ON BIT(19) @@ -3460,16 +3468,24 @@ #define B_TXFIR_CCD GENMASK(23, 0) #define R_TXFIRE 0x231c #define B_TXFIR_CEF GENMASK(23, 0) +#define R_11B_RX_V1 0x2320 +#define B_11B_RXCCA_DIS_V1 BIT(0) #define R_RXCCA 0x2344 #define B_RXCCA_DIS BIT(31) +#define R_RXCCA_V1 0x2320 +#define B_RXCCA_DIS_V1 BIT(0) #define R_RXSC 0x237C #define B_RXSC_EN BIT(0) #define R_RXSCOBC 0x23B0 #define B_RXSCOBC_TH GENMASK(18, 0) #define R_RXSCOCCK 0x23B4 #define B_RXSCOCCK_TH GENMASK(18, 0) +#define R_DBCC_80P80_SEL_EVM_RPT2 0x2A10 +#define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0) #define R_P1_EN_SOUND_WO_NDP 0x2D7C #define B_P1_EN_SOUND_WO_NDP BIT(1) +#define R_S1_HW_SI_DIS 0x3200 +#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28) #define R_P1_DBGMOD 0x32B8 #define B_P1_DBGMOD_ON BIT(30) #define R_S1_RXDC 0x32D4 @@ -3614,6 +3630,16 @@ #define R_P0_RFCTM 0x5864 #define B_P0_RFCTM_VAL GENMASK(25, 20) #define R_P0_RFCTM_RDY BIT(26) +#define R_P0_TRSW 0x5868 +#define B_P0_TRSW_B BIT(0) +#define B_P0_TRSW_A BIT(1) +#define B_P0_TRSW_X BIT(2) +#define B_P0_TRSW_SO_A2 GENMASK(7, 5) +#define R_P0_RFM 0x5894 +#define B_P0_RFM_DIS_WL BIT(7) +#define B_P0_RFM_TX_OPT BIT(6) +#define B_P0_RFM_BT_EN BIT(5) +#define B_P0_RFM_OUT GENMASK(4, 0) #define R_P0_TXDPD 0x58D4 #define B_P0_TXDPD GENMASK(31, 28) #define R_P0_TXPW_RSTB 0x58DC diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 1b5f8da2e9e8..9c6bef665c76 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -495,6 +495,186 @@ static void rtw8852c_power_trim(struct rtw89_dev *rtwdev) rtw8852c_pa_bias_trim(rtwdev); } +static void rtw8852c_bb_reset_all(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + /*HW SI reset*/ + rtw89_phy_write32_mask(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, + 0x7); + rtw89_phy_write32_mask(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, + 0x7); + + udelay(1); + + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, + phy_idx); + /*HW SI reset*/ + rtw89_phy_write32_mask(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, + 0x0); + rtw89_phy_write32_mask(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, + 0x0); + + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, + phy_idx); +} + +static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, bool en) +{ + struct rtw89_hal *hal = &rtwdev->hal; + + if (en) { + rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, + B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, + B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, + phy_idx); + if (hal->current_band_type == RTW89_BAND_2G) + rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0); + } else { + rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1); + rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, + B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, + B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); + fsleep(1); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, + phy_idx); + } +} + +static void rtw8852c_bb_reset(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw8852c_bb_reset_all(rtwdev, phy_idx); +} + +static +void rtw8852c_bb_gpio_trsw(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + u8 tx_path_en, u8 trsw_tx, + u8 trsw_rx, u8 trsw, u8 trsw_b) +{ + static const u32 path_cr_bases[] = {0x5868, 0x7868}; + u32 mask_ofst = 16; + u32 cr; + u32 val; + + if (path >= ARRAY_SIZE(path_cr_bases)) + return; + + cr = path_cr_bases[path]; + + mask_ofst += (tx_path_en * 4 + trsw_tx * 2 + trsw_rx) * 2; + val = FIELD_PREP(B_P0_TRSW_A, trsw) | FIELD_PREP(B_P0_TRSW_B, trsw_b); + + rtw89_phy_write32_mask(rtwdev, cr, (B_P0_TRSW_A | B_P0_TRSW_B) << mask_ofst, val); +} + +enum rtw8852c_rfe_src { + PAPE_RFM, + TRSW_RFM, + LNAON_RFM, +}; + +static +void rtw8852c_bb_gpio_rfm(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + enum rtw8852c_rfe_src src, u8 dis_tx_gnt_wl, + u8 active_tx_opt, u8 act_bt_en, u8 rfm_output_val) +{ + static const u32 path_cr_bases[] = {0x5894, 0x7894}; + static const u32 masks[] = {0, 8, 16}; + u32 mask, mask_ofst; + u32 cr; + u32 val; + + if (src >= ARRAY_SIZE(masks) || path >= ARRAY_SIZE(path_cr_bases)) + return; + + mask_ofst = masks[src]; + cr = path_cr_bases[path]; + + val = FIELD_PREP(B_P0_RFM_DIS_WL, dis_tx_gnt_wl) | + FIELD_PREP(B_P0_RFM_TX_OPT, active_tx_opt) | + FIELD_PREP(B_P0_RFM_BT_EN, act_bt_en) | + FIELD_PREP(B_P0_RFM_OUT, rfm_output_val); + mask = 0xff << mask_ofst; + + rtw89_phy_write32_mask(rtwdev, cr, mask, val); +} + +static void rtw8852c_bb_gpio_init(struct rtw89_dev *rtwdev) +{ + static const u32 cr_bases[] = {0x5800, 0x7800}; + u32 addr; + u8 i; + + for (i = 0; i < ARRAY_SIZE(cr_bases); i++) { + addr = cr_bases[i]; + rtw89_phy_write32_set(rtwdev, (addr | 0x68), B_P0_TRSW_A); + rtw89_phy_write32_clr(rtwdev, (addr | 0x68), B_P0_TRSW_X); + rtw89_phy_write32_clr(rtwdev, (addr | 0x68), B_P0_TRSW_SO_A2); + rtw89_phy_write32(rtwdev, (addr | 0x80), 0x77777777); + rtw89_phy_write32(rtwdev, (addr | 0x84), 0x77777777); + } + + rtw89_phy_write32(rtwdev, R_RFE_E_A2, 0xffffffff); + rtw89_phy_write32(rtwdev, R_RFE_O_SEL_A2, 0); + rtw89_phy_write32(rtwdev, R_RFE_SEL0_A2, 0); + rtw89_phy_write32(rtwdev, R_RFE_SEL32_A2, 0); + + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 0, 0, 0, 1); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 0, 1, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 1, 0, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 0, 1, 1, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 0, 0, 0, 1); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 0, 1, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 1, 0, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_A, 1, 1, 1, 1, 0); + + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 0, 0, 0, 1); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 0, 1, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 1, 0, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 0, 1, 1, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 0, 0, 0, 1); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 0, 1, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 1, 0, 1, 0); + rtw8852c_bb_gpio_trsw(rtwdev, RF_PATH_B, 1, 1, 1, 1, 0); + + rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, PAPE_RFM, 0, 0, 0, 0x0); + rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, TRSW_RFM, 0, 0, 0, 0x4); + rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_A, LNAON_RFM, 0, 0, 0, 0x8); + + rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, PAPE_RFM, 0, 0, 0, 0x0); + rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, TRSW_RFM, 0, 0, 0, 0x4); + rtw8852c_bb_gpio_rfm(rtwdev, RF_PATH_B, LNAON_RFM, 0, 0, 0, 0x8); +} + +static void rtw8852c_bb_macid_ctrl_init(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + u32 addr; + + for (addr = R_AX_PWR_MACID_LMT_TABLE0; + addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4) + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0); +} + +static void rtw8852c_bb_sethw(struct rtw89_dev *rtwdev) +{ + rtw89_phy_write32_set(rtwdev, R_DBCC_80P80_SEL_EVM_RPT, + B_DBCC_80P80_SEL_EVM_RPT_EN); + rtw89_phy_write32_set(rtwdev, R_DBCC_80P80_SEL_EVM_RPT2, + B_DBCC_80P80_SEL_EVM_RPT2_EN); + + rtw8852c_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0); + rtw8852c_bb_gpio_init(rtwdev); +} + static void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx) @@ -632,6 +812,8 @@ static void rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev) static const struct rtw89_chip_ops rtw8852c_chip_ops = { .enable_bb_rf = rtw8852c_mac_enable_bb_rf, .disable_bb_rf = rtw8852c_mac_disable_bb_rf, + .bb_reset = rtw8852c_bb_reset, + .bb_sethw = rtw8852c_bb_sethw, .read_efuse = rtw8852c_read_efuse, .read_phycap = rtw8852c_read_phycap, .power_trim = rtw8852c_power_trim, -- cgit v1.2.3 From e885871ee809aaadb06e8c63d5c3e90c8c07ccac Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Thu, 14 Apr 2022 14:20:20 +0800 Subject: rtw89: 8852c: support bb gain info Add parser for bb gain table and configure bb gain table for 8852c. While ctrl_ch, obtain bb gain error settings and write them to phy. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 37 ++++ drivers/net/wireless/realtek/rtw89/phy.c | 243 ++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 178 +++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c.h | 1 + 4 files changed, 459 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index fd6b17b6498b..8ceee254d627 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -3000,6 +3000,41 @@ struct rtw89_hw_scan_info { u8 op_band; }; +enum rtw89_phy_bb_gain_band { + RTW89_BB_GAIN_BAND_2G = 0, + RTW89_BB_GAIN_BAND_5G_L = 1, + RTW89_BB_GAIN_BAND_5G_M = 2, + RTW89_BB_GAIN_BAND_5G_H = 3, + RTW89_BB_GAIN_BAND_6G_L = 4, + RTW89_BB_GAIN_BAND_6G_M = 5, + RTW89_BB_GAIN_BAND_6G_H = 6, + RTW89_BB_GAIN_BAND_6G_UH = 7, + + RTW89_BB_GAIN_BAND_NR, +}; + +enum rtw89_phy_bb_rxsc_num { + RTW89_BB_RXSC_NUM_40 = 9, /* SC: 0, 1~8 */ + RTW89_BB_RXSC_NUM_80 = 13, /* SC: 0, 1~8, 9~12 */ + RTW89_BB_RXSC_NUM_160 = 15, /* SC: 0, 1~8, 9~12, 13~14 */ +}; + +struct rtw89_phy_bb_gain_info { + s8 lna_gain[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM]; + s8 tia_gain[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][TIA_GAIN_NUM]; + s8 lna_gain_bypass[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM]; + s8 lna_op1db[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX][LNA_GAIN_NUM]; + s8 tia_lna_op1db[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX] + [LNA_GAIN_NUM + 1]; /* TIA0_LNA0~6 + TIA1_LNA6 */ + s8 rpl_ofst_20[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX]; + s8 rpl_ofst_40[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX] + [RTW89_BB_RXSC_NUM_40]; + s8 rpl_ofst_80[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX] + [RTW89_BB_RXSC_NUM_80]; + s8 rpl_ofst_160[RTW89_BB_GAIN_BAND_NR][RF_PATH_MAX] + [RTW89_BB_RXSC_NUM_160]; +}; + struct rtw89_dev { struct ieee80211_hw *hw; struct device *dev; @@ -3062,6 +3097,8 @@ struct rtw89_dev { struct rtw89_env_monitor_info env_monitor; struct rtw89_dig_info dig; struct rtw89_phy_ch_info ch_info; + struct rtw89_phy_bb_gain_info bb_gain; + struct delayed_work track_work; struct delayed_work coex_act1_work; struct delayed_work coex_bt_devinfo_work; diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index aff03261ab72..7d0593d8fafe 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -790,6 +790,245 @@ static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev, rtw89_phy_write32(rtwdev, reg->addr, reg->data); } +union rtw89_phy_bb_gain_arg { + u32 addr; + struct { + union { + u8 type; + struct { + u8 rxsc_start:4; + u8 bw:4; + }; + }; + u8 path; + u8 gain_band; + u8 cfg_type; + }; +} __packed; + +static void +rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev, + union rtw89_phy_bb_gain_arg arg, u32 data) +{ + struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain; + u8 type = arg.type; + u8 path = arg.path; + u8 gband = arg.gain_band; + int i; + + switch (type) { + case 0: + for (i = 0; i < 4; i++, data >>= 8) + gain->lna_gain[gband][path][i] = data & 0xff; + break; + case 1: + for (i = 4; i < 7; i++, data >>= 8) + gain->lna_gain[gband][path][i] = data & 0xff; + break; + case 2: + for (i = 0; i < 2; i++, data >>= 8) + gain->tia_gain[gband][path][i] = data & 0xff; + break; + default: + rtw89_warn(rtwdev, + "bb gain error {0x%x:0x%x} with unknown type: %d\n", + arg.addr, data, type); + break; + } +} + +enum rtw89_phy_bb_rxsc_start_idx { + RTW89_BB_RXSC_START_IDX_FULL = 0, + RTW89_BB_RXSC_START_IDX_20 = 1, + RTW89_BB_RXSC_START_IDX_20_1 = 5, + RTW89_BB_RXSC_START_IDX_40 = 9, + RTW89_BB_RXSC_START_IDX_80 = 13, +}; + +static void +rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev, + union rtw89_phy_bb_gain_arg arg, u32 data) +{ + struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain; + u8 rxsc_start = arg.rxsc_start; + u8 bw = arg.bw; + u8 path = arg.path; + u8 gband = arg.gain_band; + u8 rxsc; + s8 ofst; + int i; + + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + gain->rpl_ofst_20[gband][path] = (s8)data; + break; + case RTW89_CHANNEL_WIDTH_40: + if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { + gain->rpl_ofst_40[gband][path][0] = (s8)data; + } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { + for (i = 0; i < 2; i++, data >>= 8) { + rxsc = RTW89_BB_RXSC_START_IDX_20 + i; + ofst = (s8)(data & 0xff); + gain->rpl_ofst_40[gband][path][rxsc] = ofst; + } + } + break; + case RTW89_CHANNEL_WIDTH_80: + if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { + gain->rpl_ofst_80[gband][path][0] = (s8)data; + } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { + for (i = 0; i < 4; i++, data >>= 8) { + rxsc = RTW89_BB_RXSC_START_IDX_20 + i; + ofst = (s8)(data & 0xff); + gain->rpl_ofst_80[gband][path][rxsc] = ofst; + } + } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { + for (i = 0; i < 2; i++, data >>= 8) { + rxsc = RTW89_BB_RXSC_START_IDX_40 + i; + ofst = (s8)(data & 0xff); + gain->rpl_ofst_80[gband][path][rxsc] = ofst; + } + } + break; + case RTW89_CHANNEL_WIDTH_160: + if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { + gain->rpl_ofst_160[gband][path][0] = (s8)data; + } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { + for (i = 0; i < 4; i++, data >>= 8) { + rxsc = RTW89_BB_RXSC_START_IDX_20 + i; + ofst = (s8)(data & 0xff); + gain->rpl_ofst_160[gband][path][rxsc] = ofst; + } + } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) { + for (i = 0; i < 4; i++, data >>= 8) { + rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i; + ofst = (s8)(data & 0xff); + gain->rpl_ofst_160[gband][path][rxsc] = ofst; + } + } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { + for (i = 0; i < 4; i++, data >>= 8) { + rxsc = RTW89_BB_RXSC_START_IDX_40 + i; + ofst = (s8)(data & 0xff); + gain->rpl_ofst_160[gband][path][rxsc] = ofst; + } + } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) { + for (i = 0; i < 2; i++, data >>= 8) { + rxsc = RTW89_BB_RXSC_START_IDX_80 + i; + ofst = (s8)(data & 0xff); + gain->rpl_ofst_160[gband][path][rxsc] = ofst; + } + } + break; + default: + rtw89_warn(rtwdev, + "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n", + arg.addr, data, bw); + break; + } +} + +static void +rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev, + union rtw89_phy_bb_gain_arg arg, u32 data) +{ + struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain; + u8 type = arg.type; + u8 path = arg.path; + u8 gband = arg.gain_band; + int i; + + switch (type) { + case 0: + for (i = 0; i < 4; i++, data >>= 8) + gain->lna_gain_bypass[gband][path][i] = data & 0xff; + break; + case 1: + for (i = 4; i < 7; i++, data >>= 8) + gain->lna_gain_bypass[gband][path][i] = data & 0xff; + break; + default: + rtw89_warn(rtwdev, + "bb gain bypass {0x%x:0x%x} with unknown type: %d\n", + arg.addr, data, type); + break; + } +} + +static void +rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev, + union rtw89_phy_bb_gain_arg arg, u32 data) +{ + struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain; + u8 type = arg.type; + u8 path = arg.path; + u8 gband = arg.gain_band; + int i; + + switch (type) { + case 0: + for (i = 0; i < 4; i++, data >>= 8) + gain->lna_op1db[gband][path][i] = data & 0xff; + break; + case 1: + for (i = 4; i < 7; i++, data >>= 8) + gain->lna_op1db[gband][path][i] = data & 0xff; + break; + case 2: + for (i = 0; i < 4; i++, data >>= 8) + gain->tia_lna_op1db[gband][path][i] = data & 0xff; + break; + case 3: + for (i = 4; i < 8; i++, data >>= 8) + gain->tia_lna_op1db[gband][path][i] = data & 0xff; + break; + default: + rtw89_warn(rtwdev, + "bb gain op1db {0x%x:0x%x} with unknown type: %d\n", + arg.addr, data, type); + break; + } +} + +static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev, + const struct rtw89_reg2_def *reg, + enum rtw89_rf_path rf_path, + void *extra_data) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr }; + + if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR) + return; + + if (arg.path >= chip->rf_path_num) + return; + + if (arg.addr >= 0xf9 && arg.addr <= 0xfe) { + rtw89_warn(rtwdev, "bb gain table with flow ctrl\n"); + return; + } + + switch (arg.cfg_type) { + case 0: + rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data); + break; + case 1: + rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data); + break; + case 2: + rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data); + break; + case 3: + rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data); + break; + default: + rtw89_warn(rtwdev, + "bb gain {0x%x:0x%x} with unknown cfg type: %d\n", + arg.addr, reg->data, arg.cfg_type); + break; + } +} + static void rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg, @@ -1033,9 +1272,13 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_phy_table *bb_table = chip->bb_table; + const struct rtw89_phy_table *bb_gain_table = chip->bb_gain_table; rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL); rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0); + if (bb_gain_table) + rtw89_phy_init_reg(rtwdev, bb_gain_table, + rtw89_phy_config_bb_gain, NULL); rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 9c6bef665c76..dba279938347 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -495,6 +495,184 @@ static void rtw8852c_power_trim(struct rtw89_dev *rtwdev) rtw8852c_pa_bias_trim(rtwdev); } +struct rtw8852c_bb_gain { + u32 gain_g[BB_PATH_NUM_8852C]; + u32 gain_a[BB_PATH_NUM_8852C]; + u32 gain_mask; +}; + +static const struct rtw8852c_bb_gain bb_gain_lna[LNA_GAIN_NUM] = { + { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, + .gain_mask = 0xff000000 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x000000ff }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x0000ff00 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0xff000000 }, + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0x000000ff }, +}; + +static const struct rtw8852c_bb_gain bb_gain_tia[TIA_GAIN_NUM] = { + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0xff000000 }, +}; + +struct rtw8852c_bb_gain_bypass { + u32 gain_g[BB_PATH_NUM_8852C]; + u32 gain_a[BB_PATH_NUM_8852C]; + u32 gain_mask_g; + u32 gain_mask_a; +}; + +static +const struct rtw8852c_bb_gain_bypass bb_gain_bypass_lna[LNA_GAIN_NUM] = { + { .gain_g = {0x4BB8, 0x4C7C}, .gain_a = {0x4BB4, 0x4C78}, + .gain_mask_g = 0xff000000, .gain_mask_a = 0xff}, + { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78}, + .gain_mask_g = 0xff, .gain_mask_a = 0xff00}, + { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78}, + .gain_mask_g = 0xff00, .gain_mask_a = 0xff0000}, + { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB4, 0x4C78}, + .gain_mask_g = 0xff0000, .gain_mask_a = 0xff000000}, + { .gain_g = {0x4BBC, 0x4C80}, .gain_a = {0x4BB8, 0x4C7C}, + .gain_mask_g = 0xff000000, .gain_mask_a = 0xff}, + { .gain_g = {0x4BC0, 0x4C84}, .gain_a = {0x4BB8, 0x4C7C}, + .gain_mask_g = 0xff, .gain_mask_a = 0xff00}, + { .gain_g = {0x4BC0, 0x4C84}, .gain_a = {0x4BB8, 0x4C7C}, + .gain_mask_g = 0xff00, .gain_mask_a = 0xff0000}, +}; + +struct rtw8852c_bb_gain_op1db { + struct { + u32 lna[BB_PATH_NUM_8852C]; + u32 tia_lna[BB_PATH_NUM_8852C]; + u32 mask; + } reg[LNA_GAIN_NUM]; + u32 reg_tia0_lna6[BB_PATH_NUM_8852C]; + u32 mask_tia0_lna6; +}; + +static const struct rtw8852c_bb_gain_op1db bb_gain_op1db_a = { + .reg = { + { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754}, + .mask = 0xff}, + { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754}, + .mask = 0xff00}, + { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754}, + .mask = 0xff0000}, + { .lna = {0x4668, 0x474c}, .tia_lna = {0x4670, 0x4754}, + .mask = 0xff000000}, + { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758}, + .mask = 0xff}, + { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758}, + .mask = 0xff00}, + { .lna = {0x466c, 0x4750}, .tia_lna = {0x4674, 0x4758}, + .mask = 0xff0000}, + }, + .reg_tia0_lna6 = {0x4674, 0x4758}, + .mask_tia0_lna6 = 0xff000000, +}; + +static enum rtw89_phy_bb_gain_band +rtw8852c_mapping_gain_band(enum rtw89_subband subband) +{ + switch (subband) { + default: + case RTW89_CH_2G: + return RTW89_BB_GAIN_BAND_2G; + case RTW89_CH_5G_BAND_1: + return RTW89_BB_GAIN_BAND_5G_L; + case RTW89_CH_5G_BAND_3: + return RTW89_BB_GAIN_BAND_5G_M; + case RTW89_CH_5G_BAND_4: + return RTW89_BB_GAIN_BAND_5G_H; + case RTW89_CH_6G_BAND_IDX0: + case RTW89_CH_6G_BAND_IDX1: + return RTW89_BB_GAIN_BAND_6G_L; + case RTW89_CH_6G_BAND_IDX2: + case RTW89_CH_6G_BAND_IDX3: + return RTW89_BB_GAIN_BAND_6G_M; + case RTW89_CH_6G_BAND_IDX4: + case RTW89_CH_6G_BAND_IDX5: + return RTW89_BB_GAIN_BAND_6G_H; + case RTW89_CH_6G_BAND_IDX6: + case RTW89_CH_6G_BAND_IDX7: + return RTW89_BB_GAIN_BAND_6G_UH; + } +} + +static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev, + enum rtw89_subband subband, + enum rtw89_rf_path path) +{ + const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain; + u8 gain_band = rtw8852c_mapping_gain_band(subband); + s32 val; + u32 reg; + u32 mask; + int i; + + for (i = 0; i < LNA_GAIN_NUM; i++) { + if (subband == RTW89_CH_2G) + reg = bb_gain_lna[i].gain_g[path]; + else + reg = bb_gain_lna[i].gain_a[path]; + + mask = bb_gain_lna[i].gain_mask; + val = gain->lna_gain[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + + if (subband == RTW89_CH_2G) { + reg = bb_gain_bypass_lna[i].gain_g[path]; + mask = bb_gain_bypass_lna[i].gain_mask_g; + } else { + reg = bb_gain_bypass_lna[i].gain_a[path]; + mask = bb_gain_bypass_lna[i].gain_mask_a; + } + + val = gain->lna_gain_bypass[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + + if (subband != RTW89_CH_2G) { + reg = bb_gain_op1db_a.reg[i].lna[path]; + mask = bb_gain_op1db_a.reg[i].mask; + val = gain->lna_op1db[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + + reg = bb_gain_op1db_a.reg[i].tia_lna[path]; + mask = bb_gain_op1db_a.reg[i].mask; + val = gain->tia_lna_op1db[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + } + } + + if (subband != RTW89_CH_2G) { + reg = bb_gain_op1db_a.reg_tia0_lna6[path]; + mask = bb_gain_op1db_a.mask_tia0_lna6; + val = gain->tia_lna_op1db[gain_band][path][7]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + } + + for (i = 0; i < TIA_GAIN_NUM; i++) { + if (subband == RTW89_CH_2G) + reg = bb_gain_tia[i].gain_g[path]; + else + reg = bb_gain_tia[i].gain_a[path]; + + mask = bb_gain_tia[i].gain_mask; + val = gain->tia_gain[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + } +} + static void rtw8852c_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h index d0594716040b..d1c5b4367a9d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h @@ -8,6 +8,7 @@ #include "core.h" #define RF_PATH_NUM_8852C 2 +#define BB_PATH_NUM_8852C 2 struct rtw8852c_u_efuse { u8 rsvd[0x38]; -- cgit v1.2.3 From e6b17cbd34e3f7aa88169e21c814692c151c7418 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:21 +0800 Subject: rtw89: 8852c: add efuse gain offset parser Define efuse struct to access gain offset, and store them for further use by setting channel. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-8-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 17 +++++ drivers/net/wireless/realtek/rtw89/reg.h | 19 +++++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 101 ++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c.h | 18 ++++- 4 files changed, 151 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 8ceee254d627..f34aca70908e 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -74,6 +74,16 @@ enum rtw89_subband { RTW89_SUBBAND_NR, }; +enum rtw89_gain_offset { + RTW89_GAIN_OFFSET_2G_CCK, + RTW89_GAIN_OFFSET_2G_OFDM, + RTW89_GAIN_OFFSET_5G_LOW, + RTW89_GAIN_OFFSET_5G_MID, + RTW89_GAIN_OFFSET_5G_HIGH, + + RTW89_GAIN_OFFSET_NR, +}; + enum rtw89_hci_type { RTW89_HCI_TYPE_PCIE, RTW89_HCI_TYPE_USB, @@ -3035,6 +3045,12 @@ struct rtw89_phy_bb_gain_info { [RTW89_BB_RXSC_NUM_160]; }; +struct rtw89_phy_efuse_gain { + bool offset_valid; + s8 offset[RF_PATH_MAX][RTW89_GAIN_OFFSET_NR]; /* S(8, 0) */ + s8 offset_base[RTW89_PHY_MAX]; /* S(8, 4) */ +}; + struct rtw89_dev { struct ieee80211_hw *hw; struct device *dev; @@ -3098,6 +3114,7 @@ struct rtw89_dev { struct rtw89_dig_info dig; struct rtw89_phy_ch_info ch_info; struct rtw89_phy_bb_gain_info bb_gain; + struct rtw89_phy_efuse_gain efuse_gain; struct delayed_work track_work; struct delayed_work coex_act1_work; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index a7daca1d462c..bd5526ffb8db 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3470,6 +3470,8 @@ #define B_TXFIR_CEF GENMASK(23, 0) #define R_11B_RX_V1 0x2320 #define B_11B_RXCCA_DIS_V1 BIT(0) +#define R_RPL_OFST 0x2340 +#define B_RPL_OFST_MASK GENMASK(14, 8) #define R_RXCCA 0x2344 #define B_RXCCA_DIS BIT(31) #define R_RXCCA_V1 0x2320 @@ -3570,6 +3572,11 @@ #define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) #define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4 #define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) +#define R_PATH0_G_TIA0_LNA6_OP1DB_V1 0x4694 +#define B_PATH0_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0) +#define R_PATH0_G_TIA1_LNA6_OP1DB_V1 0x4694 +#define B_PATH0_R_G_OFST_MASK GENMASK(23, 16) +#define B_PATH0_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8) #define R_P0_NBIIDX 0x469C #define B_P0_NBIIDX_VAL GENMASK(11, 0) #define B_P0_NBIIDX_NOTCH_EN BIT(12) @@ -3587,6 +3594,8 @@ #define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) #define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778 #define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) +#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778 +#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0) #define R_P1_NBIIDX 0x4770 #define B_P1_NBIIDX_VAL GENMASK(11, 0) #define B_P1_NBIIDX_NOTCH_EN BIT(12) @@ -3601,6 +3610,14 @@ #define R_CHBW_MOD 0x4978 #define B_CHBW_MOD_PRICH GENMASK(11, 8) #define B_CHBW_MOD_SBW GENMASK(13, 12) +#define R_RPL_BIAS_COMP 0x4DF0 +#define B_RPL_BIAS_COMP_MASK GENMASK(7, 0) +#define R_RPL_PATHAB 0x4E0C +#define B_RPL_PATHB_MASK GENMASK(23, 16) +#define B_RPL_PATHA_MASK GENMASK(15, 8) +#define R_RSSI_M_PATHAB 0x4E2C +#define B_RSSI_M_PATHB_MASK GENMASK(15, 8) +#define B_RSSI_M_PATHA_MASK GENMASK(7, 0) #define R_DCFO_COMP_S0_V1 0x4A40 #define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0) #define R_BMODE_PDTH_V1 0x4B64 @@ -3669,6 +3686,8 @@ #define B_S0_DACKQ7_K GENMASK(15, 8) #define R_S0_DACKQ8 0x5E98 #define B_S0_DACKQ8_K GENMASK(15, 8) +#define R_RPL_BIAS_COMP1 0x6DF0 +#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0) #define R_P1_TMETER 0x7810 #define B_P1_TMETER GENMASK(15, 10) #define B_P1_TMETER_DIS BIT(16) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index dba279938347..bb935632ce40 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -317,6 +317,41 @@ static void rtw8852c_efuse_parsing_tssi(struct rtw89_dev *rtwdev, } } +static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low) +{ + if (high) + *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3); + if (low) + *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3); + + return data != 0xff; +} + +static void rtw8852c_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev, + struct rtw8852c_efuse *map) +{ + struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; + bool valid = false; + + valid |= _decode_efuse_gain(map->rx_gain_2g_cck, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]); + valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]); + valid |= _decode_efuse_gain(map->rx_gain_5g_low, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]); + valid |= _decode_efuse_gain(map->rx_gain_5g_mid, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]); + valid |= _decode_efuse_gain(map->rx_gain_5g_high, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]); + + gain->offset_valid = valid; +} + static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map) { struct rtw89_efuse *efuse = &rtwdev->efuse; @@ -327,6 +362,7 @@ static int rtw8852c_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map) efuse->country_code[0] = map->country_code[0]; efuse->country_code[1] = map->country_code[1]; rtw8852c_efuse_parsing_tssi(rtwdev, map); + rtw8852c_efuse_parsing_gain_offset(rtwdev, map); switch (rtwdev->hci.type) { case RTW89_HCI_TYPE_PCIE: @@ -673,6 +709,63 @@ static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev, } } +static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev, + const struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx, + enum rtw89_rf_path path) +{ + static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA0_LNA6_OP1DB_V1, + R_PATH1_G_TIA0_LNA6_OP1DB_V1}; + static const u32 rpl_mask[2] = {B_RPL_PATHA_MASK, B_RPL_PATHB_MASK}; + static const u32 rpl_tb_mask[2] = {B_RSSI_M_PATHA_MASK, B_RSSI_M_PATHB_MASK}; + struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain; + enum rtw89_gain_offset gain_band; + s32 offset_q0, offset_base_q4; + s32 tmp = 0; + + if (!efuse_gain->offset_valid) + return; + + if (rtwdev->dbcc_en && path == RF_PATH_B) + phy_idx = RTW89_PHY_1; + + if (param->band_type == RTW89_BAND_2G) { + offset_q0 = efuse_gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK]; + offset_base_q4 = efuse_gain->offset_base[phy_idx]; + + tmp = clamp_t(s32, (-offset_q0 << 3) + (offset_base_q4 >> 1), + S8_MIN >> 1, S8_MAX >> 1); + rtw89_phy_write32_mask(rtwdev, R_RPL_OFST, B_RPL_OFST_MASK, tmp & 0x7f); + } + + switch (param->subband_type) { + default: + case RTW89_CH_2G: + gain_band = RTW89_GAIN_OFFSET_2G_OFDM; + break; + case RTW89_CH_5G_BAND_1: + gain_band = RTW89_GAIN_OFFSET_5G_LOW; + break; + case RTW89_CH_5G_BAND_3: + gain_band = RTW89_GAIN_OFFSET_5G_MID; + break; + case RTW89_CH_5G_BAND_4: + gain_band = RTW89_GAIN_OFFSET_5G_HIGH; + break; + } + + offset_q0 = -efuse_gain->offset[path][gain_band]; + offset_base_q4 = efuse_gain->offset_base[phy_idx]; + + tmp = (offset_q0 << 2) + (offset_base_q4 >> 2); + tmp = clamp_t(s32, -tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[path], B_PATH0_R_G_OFST_MASK, tmp & 0xff); + + tmp = clamp_t(s32, offset_q0 << 4, S8_MIN, S8_MAX); + rtw89_phy_write32_idx(rtwdev, R_RPL_PATHAB, rpl_mask[path], tmp & 0xff, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSSI_M_PATHAB, rpl_tb_mask[path], tmp & 0xff, phy_idx); +} + static void rtw8852c_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { @@ -844,6 +937,8 @@ static void rtw8852c_bb_macid_ctrl_init(struct rtw89_dev *rtwdev, static void rtw8852c_bb_sethw(struct rtw89_dev *rtwdev) { + struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; + rtw89_phy_write32_set(rtwdev, R_DBCC_80P80_SEL_EVM_RPT, B_DBCC_80P80_SEL_EVM_RPT_EN); rtw89_phy_write32_set(rtwdev, R_DBCC_80P80_SEL_EVM_RPT2, @@ -851,6 +946,12 @@ static void rtw8852c_bb_sethw(struct rtw89_dev *rtwdev) rtw8852c_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0); rtw8852c_bb_gpio_init(rtwdev); + + /* read these registers after loading BB parameters */ + gain->offset_base[RTW89_PHY_0] = + rtw89_phy_read32_mask(rtwdev, R_RPL_BIAS_COMP, B_RPL_BIAS_COMP_MASK); + gain->offset_base[RTW89_PHY_1] = + rtw89_phy_read32_mask(rtwdev, R_RPL_BIAS_COMP1, B_RPL_BIAS_COMP1_MASK); } static diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h index d1c5b4367a9d..ac642808a81f 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h @@ -59,13 +59,23 @@ struct rtw8852c_efuse { u8 rsvd7[3]; u8 path_a_therm; u8 path_b_therm; - u8 rsvd8[46]; + u8 rsvd8[2]; + u8 rx_gain_2g_ofdm; + u8 rsvd9; + u8 rx_gain_2g_cck; + u8 rsvd10; + u8 rx_gain_5g_low; + u8 rsvd11; + u8 rx_gain_5g_mid; + u8 rsvd12; + u8 rx_gain_5g_high; + u8 rsvd13[35]; u8 bw40_1s_tssi_6g_a[TSSI_MCS_6G_CH_GROUP_NUM]; - u8 rsvd9[10]; + u8 rsvd14[10]; u8 bw40_1s_tssi_6g_b[TSSI_MCS_6G_CH_GROUP_NUM]; - u8 rsvd10[110]; + u8 rsvd15[110]; u8 channel_plan_6g; - u8 rsvd11[71]; + u8 rsvd16[71]; union { struct rtw8852c_u_efuse u; struct rtw8852c_e_efuse e; -- cgit v1.2.3 From 7b9c98c7a484f33c610d957f5fb9a281d1642828 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:22 +0800 Subject: rtw89: 8852c: add HFC parameters HFC is short for HCI flow control, and these parameters is used to configure PCI quota for TX/RX. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-9-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 32 +++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index bb935632ce40..8be3cb22eb11 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -11,6 +11,37 @@ #include "rtw8852c.h" #include "rtw8852c_table.h" +static const struct rtw89_hfc_ch_cfg rtw8852c_hfc_chcfg_pcie[] = { + {13, 1614, grp_0}, /* ACH 0 */ + {13, 1614, grp_0}, /* ACH 1 */ + {13, 1614, grp_0}, /* ACH 2 */ + {13, 1614, grp_0}, /* ACH 3 */ + {13, 1614, grp_1}, /* ACH 4 */ + {13, 1614, grp_1}, /* ACH 5 */ + {13, 1614, grp_1}, /* ACH 6 */ + {13, 1614, grp_1}, /* ACH 7 */ + {13, 1614, grp_0}, /* B0MGQ */ + {13, 1614, grp_0}, /* B0HIQ */ + {13, 1614, grp_1}, /* B1MGQ */ + {13, 1614, grp_1}, /* B1HIQ */ + {40, 0, 0} /* FWCMDQ */ +}; + +static const struct rtw89_hfc_pub_cfg rtw8852c_hfc_pubcfg_pcie = { + 1614, /* Group 0 */ + 1614, /* Group 1 */ + 3228, /* Public Max */ + 0 /* WP threshold */ +}; + +static const struct rtw89_hfc_param_ini rtw8852c_hfc_param_ini_pcie[] = { + [RTW89_QTA_SCC] = {rtw8852c_hfc_chcfg_pcie, &rtw8852c_hfc_pubcfg_pcie, + &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH}, + [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie, + RTW89_HCIFC_POH}, + [RTW89_QTA_INVALID] = {NULL}, +}; + static const struct rtw89_dle_mem rtw8852c_dle_mem_pcie[] = { [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size19, &rtw89_mac_size.ple_size19, &rtw89_mac_size.wde_qt18, @@ -1116,6 +1147,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .chip_id = RTL8852C, .ops = &rtw8852c_chip_ops, .fw_name = "rtw89/rtw8852c_fw.bin", + .hfc_param_ini = rtw8852c_hfc_param_ini_pcie, .dle_mem = rtw8852c_dle_mem_pcie, .rf_base_addr = {0xe000, 0xf000}, .pwr_on_seq = NULL, -- cgit v1.2.3 From bb865ba6ea83b561bfee2f74a3cb89ff5b983ecb Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:23 +0800 Subject: rtw89: 8852c: add set channel function of RF part Prepare functions to configure channel and bandwidth accordingly. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-10-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/reg.h | 24 +++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 214 ++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 14 ++ 3 files changed, 252 insertions(+) create mode 100644 drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c create mode 100644 drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index bd5526ffb8db..d666c6e2a9de 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3158,6 +3158,23 @@ #define RR_DTXLOK 0x08 #define RR_RSV2 0x09 #define RR_CFGCH 0x18 +#define RR_CFGCH_V1 0x10018 +#define RR_CFGCH_BAND1 GENMASK(17, 16) +#define CFGCH_BAND1_2G 0 +#define CFGCH_BAND1_5G 1 +#define CFGCH_BAND1_6G 3 +#define RR_CFGCH_BAND0 GENMASK(9, 8) +#define CFGCH_BAND0_2G 0 +#define CFGCH_BAND0_5G 1 +#define CFGCH_BAND0_6G 0 +#define RR_CFGCH_BW GENMASK(11, 10) +#define RR_CFGCH_CH GENMASK(7, 0) +#define CFGCH_BW_20M 3 +#define CFGCH_BW_40M 2 +#define CFGCH_BW_80M 1 +#define CFGCH_BW_160M 0 +#define RR_APK 0x19 +#define RR_APK_MOD GENMASK(5, 4) #define RR_BTC 0x1a #define RR_BTC_TXBB GENMASK(14, 12) #define RR_BTC_RXBB GENMASK(11, 10) @@ -3176,8 +3193,10 @@ #define RR_RXK_SEL2G BIT(8) #define RR_LUTWA 0x33 #define RR_LUTWA_MASK GENMASK(9, 0) +#define RR_LUTWA_M2 GENMASK(4, 0) #define RR_LUTWD1 0x3e #define RR_LUTWD0 0x3f +#define RR_LUTWD0_LB GENMASK(5, 0) #define RR_TM 0x42 #define RR_TM_TRI BIT(19) #define RR_TM_VAL GENMASK(6, 1) @@ -3260,6 +3279,7 @@ #define RR_LUTDBG 0xdf #define RR_LUTDBG_LOK BIT(2) #define RR_LUTWE2 0xee +#define RR_LUTWE2_RTXBW BIT(2) #define RR_LUTWE 0xef #define RR_LUTWE_LOK BIT(2) #define RR_RFC 0xf0 @@ -3856,6 +3876,10 @@ #define B_IQKINF2_FCNT GENMASK(23, 16) #define B_IQKINF2_KCNT GENMASK(15, 8) #define B_IQKINF2_NCTLV GENMAKS(7, 0) +#define R_P0_CFCH_BW0 0xC0D4 +#define B_P0_CFCH_BW0 GENMASK(27, 26) +#define R_P0_CFCH_BW1 0xC0D8 +#define B_P0_CFCH_BW1 GENMASK(8, 5) /* WiFi CPU local domain */ #define R_AX_WDT_CTRL 0x0040 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c new file mode 100644 index 000000000000..56f0876c03fb --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#include "debug.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852c.h" +#include "rtw8852c_rfk.h" + +static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n", + rtwdev->dbcc_en, phy_idx); + + if (!rtwdev->dbcc_en) + return RF_AB; + + if (phy_idx == RTW89_PHY_0) + return RF_A; + else + return RF_B; +} + +static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + enum rtw89_bandwidth bw, bool is_dav) +{ + u32 rf_reg18; + u32 reg_reg18_addr; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); + if (is_dav) + reg_reg18_addr = RR_CFGCH; + else + reg_reg18_addr = RR_CFGCH_V1; + + rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK); + rf_reg18 &= ~RR_CFGCH_BW; + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + case RTW89_CHANNEL_WIDTH_10: + case RTW89_CHANNEL_WIDTH_20: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf); + break; + case RTW89_CHANNEL_WIDTH_40: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xf); + break; + case RTW89_CHANNEL_WIDTH_80: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xd); + break; + case RTW89_CHANNEL_WIDTH_160: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_160M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb); + break; + default: + break; + } + + rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18); +} + +static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_bandwidth bw) +{ + bool is_dav; + u8 kpath, path; + u32 tmp = 0; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < 2; path++) { + if (!(kpath & BIT(path))) + continue; + + is_dav = true; + _bw_setting(rtwdev, path, bw, is_dav); + is_dav = false; + _bw_setting(rtwdev, path, bw, is_dav); + if (rtwdev->dbcc_en) + continue; + + if (path == RF_PATH_B && rtwdev->hal.cv == CHIP_CAV) { + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); + tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_APK, RR_APK_MOD, 0x3); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK, tmp); + fsleep(100); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); + } + } +} + +static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + u8 central_ch, enum rtw89_band band, bool is_dav) +{ + u32 rf_reg18; + u32 reg_reg18_addr; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); + if (is_dav) + reg_reg18_addr = 0x18; + else + reg_reg18_addr = 0x10018; + + rf_reg18 = rtw89_read_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK); + rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BAND0 | RR_CFGCH_CH); + rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch); + + switch (band) { + case RTW89_BAND_2G: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_2G); + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_2G); + break; + case RTW89_BAND_5G: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G); + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G); + break; + case RTW89_BAND_6G: + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_6G); + rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_6G); + break; + default: + break; + } + rtw89_write_rf(rtwdev, path, reg_reg18_addr, RFREG_MASK, rf_reg18); + fsleep(100); +} + +static void _ctrl_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + u8 central_ch, enum rtw89_band band) +{ + u8 kpath, path; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===>%s\n", __func__); + if (band != RTW89_BAND_6G) { + if ((central_ch > 14 && central_ch < 36) || + (central_ch > 64 && central_ch < 100) || + (central_ch > 144 && central_ch < 149) || central_ch > 177) + return; + } else { + if (central_ch > 253 || central_ch == 2) + return; + } + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < 2; path++) { + if (kpath & BIT(path)) { + _ch_setting(rtwdev, path, central_ch, band, true); + _ch_setting(rtwdev, path, central_ch, band, false); + } + } +} + +static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_bandwidth bw) +{ + u8 kpath; + u8 path; + u32 val; + + kpath = _kpath(rtwdev, phy); + for (path = 0; path < 2; path++) { + if (!(kpath & BIT(path))) + continue; + + rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0xa); + switch (bw) { + case RTW89_CHANNEL_WIDTH_20: + val = 0x1b; + break; + case RTW89_CHANNEL_WIDTH_40: + val = 0x13; + break; + case RTW89_CHANNEL_WIDTH_80: + val = 0xb; + break; + case RTW89_CHANNEL_WIDTH_160: + default: + val = 0x3; + break; + } + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, val); + rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0); + } +} + +static +void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + u8 central_ch, enum rtw89_band band, + enum rtw89_bandwidth bw) +{ + _ctrl_ch(rtwdev, phy, central_ch, band); + _ctrl_bw(rtwdev, phy, bw); + _rxbb_bw(rtwdev, phy, bw); +} + +void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx) +{ + rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, param->center_chan, param->band_type, + param->bandwidth); +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h new file mode 100644 index 000000000000..0e75555c1612 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#ifndef __RTW89_8852C_RFK_H__ +#define __RTW89_8852C_RFK_H__ + +#include "core.h" + +void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx); + +#endif -- cgit v1.2.3 From 63fb5c981590f0bdf1708077b94563ee609efe8f Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:24 +0800 Subject: rtw89: 8852c: set channel of MAC part Configure channel and bandwidth of MAC registers to work properly. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-11-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/reg.h | 4 ++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 71 +++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index d666c6e2a9de..44a2d984ebe1 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -1802,6 +1802,10 @@ #define R_AX_WMAC_RFMOD 0xC010 #define R_AX_WMAC_RFMOD_C1 0xE010 #define B_AX_WMAC_RFMOD_MASK GENMASK(1, 0) +#define AX_WMAC_RFMOD_20M 0 +#define AX_WMAC_RFMOD_40M 1 +#define AX_WMAC_RFMOD_80M 2 +#define AX_WMAC_RFMOD_160M 3 #define R_AX_GID_POSITION0 0xC070 #define R_AX_GID_POSITION0_C1 0xE070 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 8be3cb22eb11..07ff3bd50f62 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -562,6 +562,77 @@ static void rtw8852c_power_trim(struct rtw89_dev *rtwdev) rtw8852c_pa_bias_trim(rtwdev); } +static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + u8 mac_idx) +{ + u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, + mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx); + u8 txsc20 = 0, txsc40 = 0, txsc80 = 0; + u8 rf_mod_val = 0, chk_rate_mask = 0; + u32 txsc; + + switch (param->bandwidth) { + case RTW89_CHANNEL_WIDTH_160: + txsc80 = rtw89_phy_get_txsc(rtwdev, param, + RTW89_CHANNEL_WIDTH_80); + fallthrough; + case RTW89_CHANNEL_WIDTH_80: + txsc40 = rtw89_phy_get_txsc(rtwdev, param, + RTW89_CHANNEL_WIDTH_40); + fallthrough; + case RTW89_CHANNEL_WIDTH_40: + txsc20 = rtw89_phy_get_txsc(rtwdev, param, + RTW89_CHANNEL_WIDTH_20); + break; + default: + break; + } + + switch (param->bandwidth) { + case RTW89_CHANNEL_WIDTH_160: + rf_mod_val = AX_WMAC_RFMOD_160M; + txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20) | + FIELD_PREP(B_AX_TXSC_40M_MASK, txsc40) | + FIELD_PREP(B_AX_TXSC_80M_MASK, txsc80); + break; + case RTW89_CHANNEL_WIDTH_80: + rf_mod_val = AX_WMAC_RFMOD_80M; + txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20) | + FIELD_PREP(B_AX_TXSC_40M_MASK, txsc40); + break; + case RTW89_CHANNEL_WIDTH_40: + rf_mod_val = AX_WMAC_RFMOD_40M; + txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20); + break; + case RTW89_CHANNEL_WIDTH_20: + default: + rf_mod_val = AX_WMAC_RFMOD_20M; + txsc = 0; + break; + } + rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, rf_mod_val); + rtw89_write32(rtwdev, sub_carr, txsc); + + switch (param->band_type) { + case RTW89_BAND_2G: + chk_rate_mask = B_AX_BAND_MODE; + break; + case RTW89_BAND_5G: + case RTW89_BAND_6G: + chk_rate_mask = B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6; + break; + default: + rtw89_warn(rtwdev, "Invalid band_type:%d\n", param->band_type); + return; + } + rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE | B_AX_CHECK_CCK_EN | + B_AX_RTS_LIMIT_IN_OFDM6); + rtw89_write8_set(rtwdev, chk_rate, chk_rate_mask); +} + struct rtw8852c_bb_gain { u32 gain_g[BB_PATH_NUM_8852C]; u32 gain_a[BB_PATH_NUM_8852C]; -- cgit v1.2.3 From 1b00e9236a7131284609d10e6f63ff2864ebe650 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:25 +0800 Subject: rtw89: 8852c: add set channel of BB part BB does many settings during setting channel. First is to configure CCK for 2G channels, and then basic channel and bandwidth settings with a encoded channel index that will report to driver when we receive packets. Configure spur elimination to avoid spur of CSI and NBI tones in certain frequencies. Also, it initializes BT grant to arrange path sharing with BT according to band. Finally, reset TSSI and BB hardware to ensure it stays in initial state. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-12-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/phy.h | 9 + drivers/net/wireless/realtek/rtw89/reg.h | 93 +++- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 718 ++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/util.h | 30 ++ 4 files changed, 846 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index b8531bb7e606..3ca5efa4c097 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -307,6 +307,15 @@ const struct rtw89_phy_reg3_tbl _name ## _tbl = { \ .size = ARRAY_SIZE(_name), \ } +struct rtw89_nbi_reg_def { + struct rtw89_reg_def notch1_idx; + struct rtw89_reg_def notch1_frac_idx; + struct rtw89_reg_def notch1_en; + struct rtw89_reg_def notch2_idx; + struct rtw89_reg_def notch2_frac_idx; + struct rtw89_reg_def notch2_en; +}; + extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX]; extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX]; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 44a2d984ebe1..0f08b2581797 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -2962,6 +2962,8 @@ #define R_AX_PWR_MACID_LMT_TABLE0 0xD36C #define R_AX_PWR_MACID_LMT_TABLE127 0xD568 +#define R_P80_AT_HIGH_FREQ_BB_WRP 0xD848 +#define B_P80_AT_HIGH_FREQ_BB_WRP BIT(28) #define R_AX_TSSI_CTRL_HEAD 0xD908 #define R_AX_BANDEDGE_CFG 0xD94C #define B_AX_BANDEDGE_CFG_IDX_MASK GENMASK(31, 30) @@ -3192,9 +3194,9 @@ #define RR_RXKPLL_POW BIT(19) #define RR_RSV4 0x1f #define RR_RXK 0x20 -#define RR_RXK_PLLEN BIT(5) -#define RR_RXK_SEL5G BIT(7) #define RR_RXK_SEL2G BIT(8) +#define RR_RXK_SEL5G BIT(7) +#define RR_RXK_PLLEN BIT(5) #define RR_LUTWA 0x33 #define RR_LUTWA_MASK GENMASK(9, 0) #define RR_LUTWA_M2 GENMASK(4, 0) @@ -3320,8 +3322,9 @@ #define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8) #define B_SWSI_READ_ADDR_V1 GENMASK(10, 0) #define R_UPD_CLK_ADC 0x0700 -#define B_UPD_CLK_ADC_ON BIT(24) #define B_UPD_CLK_ADC_VAL GENMASK(26, 25) +#define B_UPD_CLK_ADC_ON BIT(24) +#define B_ENABLE_CCK BIT(5) #define R_RSTB_ASYNC 0x0704 #define B_RSTB_ASYNC_ALL BIT(1) #define R_MAC_PIN_SEL 0x0734 @@ -3368,6 +3371,8 @@ #define B_PMAC_PTX_EN BIT(4) #define R_PMAC_TX_CNT 0x09C8 #define B_PMAC_TX_CNT_MSK GENMASK(31, 0) +#define R_P80_AT_HIGH_FREQ 0x09D8 +#define B_P80_AT_HIGH_FREQ BIT(26) #define R_DBCC_80P80_SEL_EVM_RPT 0x0A10 #define B_DBCC_80P80_SEL_EVM_RPT_EN BIT(0) #define R_CCX 0x0C00 @@ -3400,6 +3405,14 @@ #define B_PD_HIT_DIS BIT(9) #define R_IOQ_IQK_DPK 0x0C60 #define B_IOQ_IQK_DPK_EN BIT(1) +#define R_GNT_BT_WGT_EN 0x0C6C +#define B_GNT_BT_WGT_EN BIT(21) +#define R_PD_ARBITER_OFF 0x0C80 +#define B_PD_ARBITER_OFF BIT(31) +#define R_SNDCCA_A1 0x0C9C +#define B_SNDCCA_A1_EN GENMASK(19, 12) +#define R_SNDCCA_A2 0x0CA0 +#define B_SNDCCA_A2_VAL GENMASK(19, 12) #define R_P0_EN_SOUND_WO_NDP 0x0D7C #define B_P0_EN_SOUND_WO_NDP BIT(1) #define R_SPOOF_ASYNC_RST 0x0D84 @@ -3506,6 +3519,9 @@ #define B_RXSCOBC_TH GENMASK(18, 0) #define R_RXSCOCCK 0x23B4 #define B_RXSCOCCK_TH GENMASK(18, 0) +#define R_P80_AT_HIGH_FREQ_RU_ALLOC 0x2410 +#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1 BIT(14) +#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0 BIT(13) #define R_DBCC_80P80_SEL_EVM_RPT2 0x2A10 #define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0) #define R_P1_EN_SOUND_WO_NDP 0x2D7C @@ -3540,6 +3556,12 @@ #define R_CFO_TRK0 0x4404 #define R_CFO_TRK1 0x440C #define B_CFO_TRK_MSK GENMASK(14, 10) +#define R_T2F_GI_COMB 0x4424 +#define B_T2F_GI_COMB_EN BIT(2) +#define R_BT_DYN_DC_EST_EN 0x441C +#define B_BT_DYN_DC_EST_EN_MSK BIT(31) +#define R_ASSIGN_SBD_OPT 0x4450 +#define B_ASSIGN_SBD_OPT_EN BIT(24) #define R_DCFO_COMP_S0 0x448C #define B_DCFO_COMP_S0_MSK GENMASK(11, 0) #define R_DCFO_WEIGHT 0x4490 @@ -3554,6 +3576,22 @@ #define B_TXPWR_MSK GENMASK(30, 22) #define R_TXNSS_MAP 0x45B4 #define B_TXNSS_MAP_MSK GENMASK(20, 17) +#define R_PCOEFF0_V1 0x45BC +#define B_PCOEFF01_MSK_V1 GENMASK(23, 0) +#define R_PCOEFF2_V1 0x45CC +#define B_PCOEFF23_MSK_V1 GENMASK(23, 0) +#define R_PCOEFF4_V1 0x45D0 +#define B_PCOEFF45_MSK_V1 GENMASK(23, 0) +#define R_PCOEFF6_V1 0x45D4 +#define B_PCOEFF67_MSK_V1 GENMASK(23, 0) +#define R_PCOEFF8_V1 0x45D8 +#define B_PCOEFF89_MSK_V1 GENMASK(23, 0) +#define R_PCOEFFA_V1 0x45C0 +#define B_PCOEFFAB_MSK_V1 GENMASK(23, 0) +#define R_PCOEFFC_V1 0x45C4 +#define B_PCOEFFCD_MSK_V1 GENMASK(23, 0) +#define R_PCOEFFE_V1 0x45C8 +#define B_PCOEFFEF_MSK_V1 GENMASK(23, 0) #define R_PATH0_IB_PKPW 0x4628 #define B_PATH0_IB_PKPW_MSK GENMASK(11, 6) #define R_PATH0_LNA_ERR1 0x462C @@ -3601,6 +3639,14 @@ #define R_PATH0_G_TIA1_LNA6_OP1DB_V1 0x4694 #define B_PATH0_R_G_OFST_MASK GENMASK(23, 16) #define B_PATH0_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8) +#define R_CDD_EVM_CHK_EN 0x46C0 +#define B_CDD_EVM_CHK_EN BIT(0) +#define R_PATH0_BAND_SEL_V1 0x4738 +#define B_PATH0_BAND_SEL_MSK_V1 BIT(17) +#define R_PATH0_BT_SHARE_V1 0x4738 +#define B_PATH0_BT_SHARE_V1 BIT(19) +#define R_PATH0_BTG_PATH_V1 0x4738 +#define B_PATH0_BTG_PATH_V1 BIT(22) #define R_P0_NBIIDX 0x469C #define B_P0_NBIIDX_VAL GENMASK(11, 0) #define B_P0_NBIIDX_NOTCH_EN BIT(12) @@ -3614,12 +3660,20 @@ #define B_PATH1_BTG_SHEN GENMASK(18, 17) #define R_PATH1_RXB_INIT 0x472C #define B_PATH1_RXB_INIT_IDX_MSK GENMASK(9, 5) +#define R_PATH1_G_LNA6_OP1DB_V1 0x476C +#define B_PATH1_G_LNA6_OP1DB_V1 GENMASK(31, 24) #define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774 #define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) #define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778 #define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) #define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778 #define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0) +#define R_PATH1_BAND_SEL_V1 0x4AA4 +#define B_PATH1_BAND_SEL_MSK_V1 BIT(17) +#define R_PATH1_BT_SHARE_V1 0x4AA4 +#define B_PATH1_BT_SHARE_V1 BIT(19) +#define R_PATH1_BTG_PATH_V1 0x4AA4 +#define B_PATH1_BTG_PATH_V1 BIT(22) #define R_P1_NBIIDX 0x4770 #define B_P1_NBIIDX_VAL GENMASK(11, 0) #define B_P1_NBIIDX_NOTCH_EN BIT(12) @@ -3631,9 +3685,28 @@ #define R_FC0_BW 0x4974 #define B_FC0_BW_INV GENMASK(6, 0) #define B_FC0_BW_SET GENMASK(31, 30) +#define B_ANT_RX_BT_SEG0 GENMASK(25, 22) +#define B_ANT_RX_1RCCA_SEG1 GENMASK(21, 18) +#define B_ANT_RX_1RCCA_SEG0 GENMASK(17, 14) #define R_CHBW_MOD 0x4978 -#define B_CHBW_MOD_PRICH GENMASK(11, 8) +#define B_BT_SHARE BIT(14) #define B_CHBW_MOD_SBW GENMASK(13, 12) +#define B_CHBW_MOD_PRICH GENMASK(11, 8) +#define B_ANT_RX_SEG0 GENMASK(3, 0) +#define R_BK_FC0_INV_V1 0x4A1C +#define B_BK_FC0_INV_MSK_V1 GENMASK(18, 0) +#define R_CCK_FC0_INV_V1 0x4A20 +#define B_CCK_FC0_INV_MSK_V1 GENMASK(18, 0) +#define R_PATH0_5MDET 0x4C4C +#define B_PATH0_5MDET_EN BIT(12) +#define B_PATH0_5MDET_SB2 BIT(8) +#define B_PATH0_5MDET_SB0 BIT(6) +#define B_PATH0_5MDET_TH GENMASK(5, 0) +#define R_PATH1_5MDET 0x4D10 +#define B_PATH1_5MDET_EN BIT(12) +#define B_PATH1_5MDET_SB2 BIT(8) +#define B_PATH1_5MDET_SB0 BIT(6) +#define B_PATH1_5MDET_TH GENMASK(5, 0) #define R_RPL_BIAS_COMP 0x4DF0 #define B_RPL_BIAS_COMP_MASK GENMASK(7, 0) #define R_RPL_PATHAB 0x4E0C @@ -3642,6 +3715,10 @@ #define R_RSSI_M_PATHAB 0x4E2C #define B_RSSI_M_PATHB_MASK GENMASK(15, 8) #define B_RSSI_M_PATHA_MASK GENMASK(7, 0) +#define R_FC0_V1 0x4E30 +#define B_FC0_MSK_V1 GENMASK(12, 0) +#define R_RX_BW40_2XFFT_EN_V1 0x4E30 +#define B_RX_BW40_2XFFT_EN_MSK_V1 BIT(26) #define R_DCFO_COMP_S0_V1 0x4A40 #define B_DCFO_COMP_S0_V1_MSK GENMASK(13, 0) #define R_BMODE_PDTH_V1 0x4B64 @@ -3880,6 +3957,14 @@ #define B_IQKINF2_FCNT GENMASK(23, 16) #define B_IQKINF2_KCNT GENMASK(15, 8) #define B_IQKINF2_NCTLV GENMAKS(7, 0) +#define R_PATH0_SAMPL_DLY_T_V1 0xC0D4 +#define B_PATH0_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26) +#define R_PATH1_SAMPL_DLY_T_V1 0xC1D4 +#define B_PATH1_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26) +#define R_PATH0_BW_SEL_V1 0xC0D8 +#define B_PATH0_BW_SEL_MSK_V1 GENMASK(8, 5) +#define R_PATH1_BW_SEL_V1 0xC1D8 +#define B_PATH1_BW_SEL_MSK_V1 GENMASK(8, 5) #define R_P0_CFCH_BW0 0xC0D4 #define B_P0_CFCH_BW0 GENMASK(27, 26) #define R_P0_CFCH_BW1 0xC0D8 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 07ff3bd50f62..c5377e8843dd 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -9,7 +9,9 @@ #include "phy.h" #include "reg.h" #include "rtw8852c.h" +#include "rtw8852c_rfk.h" #include "rtw8852c_table.h" +#include "util.h" static const struct rtw89_hfc_ch_cfg rtw8852c_hfc_chcfg_pcie[] = { {13, 1614, grp_0}, /* ACH 0 */ @@ -129,6 +131,8 @@ static const struct rtw89_imr_info rtw8852c_imr_info = { .tmac_imr_set = B_AX_TMAC_IMR_SET_V1, }; +static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg); + static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev) { u32 val32; @@ -633,6 +637,40 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev, rtw89_write8_set(rtwdev, chk_rate, chk_rate_mask); } +static const u32 rtw8852c_sco_barker_threshold[14] = { + 0x1fe4f, 0x1ff5e, 0x2006c, 0x2017b, 0x2028a, 0x20399, 0x204a8, 0x205b6, + 0x206c5, 0x207d4, 0x208e3, 0x209f2, 0x20b00, 0x20d8a +}; + +static const u32 rtw8852c_sco_cck_threshold[14] = { + 0x2bdac, 0x2bf21, 0x2c095, 0x2c209, 0x2c37e, 0x2c4f2, 0x2c666, 0x2c7db, + 0x2c94f, 0x2cac3, 0x2cc38, 0x2cdac, 0x2cf21, 0x2d29e +}; + +static int rtw8852c_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 central_ch, + u8 primary_ch, enum rtw89_bandwidth bw) +{ + u8 ch_element; + + if (bw == RTW89_CHANNEL_WIDTH_20) { + ch_element = central_ch - 1; + } else if (bw == RTW89_CHANNEL_WIDTH_40) { + if (primary_ch == 1) + ch_element = central_ch - 1 + 2; + else + ch_element = central_ch - 1 - 2; + } else { + rtw89_warn(rtwdev, "Invalid BW:%d for CCK\n", bw); + return -EINVAL; + } + rtw89_phy_write32_mask(rtwdev, R_BK_FC0_INV_V1, B_BK_FC0_INV_MSK_V1, + rtw8852c_sco_barker_threshold[ch_element]); + rtw89_phy_write32_mask(rtwdev, R_CCK_FC0_INV_V1, B_CCK_FC0_INV_MSK_V1, + rtw8852c_sco_cck_threshold[ch_element]); + + return 0; +} + struct rtw8852c_bb_gain { u32 gain_g[BB_PATH_NUM_8852C]; u32 gain_a[BB_PATH_NUM_8852C]; @@ -811,6 +849,76 @@ static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev, } } +static +const u8 rtw8852c_ch_base_table[16] = {1, 0xff, + 36, 100, 132, 149, 0xff, + 1, 33, 65, 97, 129, 161, 193, 225, 0xff}; +#define RTW8852C_CH_BASE_IDX_2G 0 +#define RTW8852C_CH_BASE_IDX_5G_FIRST 2 +#define RTW8852C_CH_BASE_IDX_5G_LAST 5 +#define RTW8852C_CH_BASE_IDX_6G_FIRST 7 +#define RTW8852C_CH_BASE_IDX_6G_LAST 14 + +#define RTW8852C_CH_BASE_IDX_MASK GENMASK(7, 4) +#define RTW8852C_CH_OFFSET_MASK GENMASK(3, 0) + +static u8 rtw8852c_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band) +{ + u8 chan_idx; + u8 last, first; + u8 idx; + + switch (band) { + case RTW89_BAND_2G: + chan_idx = FIELD_PREP(RTW8852C_CH_BASE_IDX_MASK, RTW8852C_CH_BASE_IDX_2G) | + FIELD_PREP(RTW8852C_CH_OFFSET_MASK, central_ch); + return chan_idx; + case RTW89_BAND_5G: + first = RTW8852C_CH_BASE_IDX_5G_FIRST; + last = RTW8852C_CH_BASE_IDX_5G_LAST; + break; + case RTW89_BAND_6G: + first = RTW8852C_CH_BASE_IDX_6G_FIRST; + last = RTW8852C_CH_BASE_IDX_6G_LAST; + break; + default: + rtw89_warn(rtwdev, "Unsupported band %d\n", band); + return 0; + } + + for (idx = last; idx >= first; idx--) + if (central_ch >= rtw8852c_ch_base_table[idx]) + break; + + if (idx < first) { + rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch); + return 0; + } + + chan_idx = FIELD_PREP(RTW8852C_CH_BASE_IDX_MASK, idx) | + FIELD_PREP(RTW8852C_CH_OFFSET_MASK, + (central_ch - rtw8852c_ch_base_table[idx]) >> 1); + return chan_idx; +} + +static void rtw8852c_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx, + u8 *ch, enum nl80211_band *band) +{ + u8 idx, offset; + + idx = FIELD_GET(RTW8852C_CH_BASE_IDX_MASK, chan_idx); + offset = FIELD_GET(RTW8852C_CH_OFFSET_MASK, chan_idx); + + if (idx == RTW8852C_CH_BASE_IDX_2G) { + *band = NL80211_BAND_2GHZ; + *ch = offset; + return; + } + + *band = idx <= RTW8852C_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ; + *ch = rtw8852c_ch_base_table[idx] + (offset << 1); +} + static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev, const struct rtw89_channel_params *param, enum rtw89_phy_idx phy_idx, @@ -868,6 +976,478 @@ static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev, rtw89_phy_write32_idx(rtwdev, R_RSSI_M_PATHAB, rpl_tb_mask[path], tmp & 0xff, phy_idx); } +static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev, + const struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx) +{ + u8 sco; + u16 central_freq = param->center_freq; + u8 central_ch = param->center_chan; + u8 band = param->band_type; + u8 subband = param->subband_type; + bool is_2g = band == RTW89_BAND_2G; + u8 chan_idx; + + if (!central_freq) { + rtw89_warn(rtwdev, "Invalid central_freq\n"); + return; + } + + if (phy_idx == RTW89_PHY_0) { + /* Path A */ + rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_A); + rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_A); + + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_SEL_MSK_V1, 1, + phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_SEL_MSK_V1, 0, + phy_idx); + /* Path B */ + if (!rtwdev->dbcc_en) { + rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B); + rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_B); + + if (is_2g) + rtw89_phy_write32_idx(rtwdev, + R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_SEL_MSK_V1, + 1, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, + R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_SEL_MSK_V1, + 0, phy_idx); + rtw89_phy_write32_clr(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL); + } else { + if (is_2g) + rtw89_phy_write32_clr(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL); + else + rtw89_phy_write32_set(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL); + } + /* SCO compensate FC setting */ + rtw89_phy_write32_idx(rtwdev, R_FC0_V1, B_FC0_MSK_V1, + central_freq, phy_idx); + /* round_up((1/fc0)*pow(2,18)) */ + sco = DIV_ROUND_CLOSEST(1 << 18, central_freq); + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV, sco, + phy_idx); + } else { + /* Path B */ + rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B); + rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_B); + + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_SEL_MSK_V1, + 1, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_SEL_MSK_V1, + 0, phy_idx); + /* SCO compensate FC setting */ + rtw89_phy_write32_idx(rtwdev, R_FC0_V1, B_FC0_MSK_V1, + central_freq, phy_idx); + /* round_up((1/fc0)*pow(2,18)) */ + sco = DIV_ROUND_CLOSEST(1 << 18, central_freq); + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV, sco, + phy_idx); + } + /* CCK parameters */ + if (band == RTW89_BAND_2G) { + if (central_ch == 14) { + rtw89_phy_write32_mask(rtwdev, R_PCOEFF0_V1, + B_PCOEFF01_MSK_V1, 0x3b13ff); + rtw89_phy_write32_mask(rtwdev, R_PCOEFF2_V1, + B_PCOEFF23_MSK_V1, 0x1c42de); + rtw89_phy_write32_mask(rtwdev, R_PCOEFF4_V1, + B_PCOEFF45_MSK_V1, 0xfdb0ad); + rtw89_phy_write32_mask(rtwdev, R_PCOEFF6_V1, + B_PCOEFF67_MSK_V1, 0xf60f6e); + rtw89_phy_write32_mask(rtwdev, R_PCOEFF8_V1, + B_PCOEFF89_MSK_V1, 0xfd8f92); + rtw89_phy_write32_mask(rtwdev, R_PCOEFFA_V1, + B_PCOEFFAB_MSK_V1, 0x2d011); + rtw89_phy_write32_mask(rtwdev, R_PCOEFFC_V1, + B_PCOEFFCD_MSK_V1, 0x1c02c); + rtw89_phy_write32_mask(rtwdev, R_PCOEFFE_V1, + B_PCOEFFEF_MSK_V1, 0xfff00a); + } else { + rtw89_phy_write32_mask(rtwdev, R_PCOEFF0_V1, + B_PCOEFF01_MSK_V1, 0x3d23ff); + rtw89_phy_write32_mask(rtwdev, R_PCOEFF2_V1, + B_PCOEFF23_MSK_V1, 0x29b354); + rtw89_phy_write32_mask(rtwdev, R_PCOEFF4_V1, + B_PCOEFF45_MSK_V1, 0xfc1c8); + rtw89_phy_write32_mask(rtwdev, R_PCOEFF6_V1, + B_PCOEFF67_MSK_V1, 0xfdb053); + rtw89_phy_write32_mask(rtwdev, R_PCOEFF8_V1, + B_PCOEFF89_MSK_V1, 0xf86f9a); + rtw89_phy_write32_mask(rtwdev, R_PCOEFFA_V1, + B_PCOEFFAB_MSK_V1, 0xfaef92); + rtw89_phy_write32_mask(rtwdev, R_PCOEFFC_V1, + B_PCOEFFCD_MSK_V1, 0xfe5fcc); + rtw89_phy_write32_mask(rtwdev, R_PCOEFFE_V1, + B_PCOEFFEF_MSK_V1, 0xffdff5); + } + } + + chan_idx = rtw8852c_encode_chan_idx(rtwdev, param->primary_chan, band); + rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx); +} + +static void rtw8852c_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path) +{ + static const u32 adc_sel[2] = {0xC0EC, 0xC1EC}; + static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4}; + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0); + break; + case RTW89_CHANNEL_WIDTH_10: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1); + break; + case RTW89_CHANNEL_WIDTH_20: + case RTW89_CHANNEL_WIDTH_40: + case RTW89_CHANNEL_WIDTH_80: + case RTW89_CHANNEL_WIDTH_160: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + break; + default: + rtw89_warn(rtwdev, "Fail to set ADC\n"); + } +} + +static void rtw8852c_edcca_per20_bitmap_sifs(struct rtw89_dev *rtwdev, u8 bw, + enum rtw89_phy_idx phy_idx) +{ + if (bw == RTW89_CHANNEL_WIDTH_20) { + rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A1, B_SNDCCA_A1_EN, 0xff, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A2, B_SNDCCA_A2_VAL, 0, phy_idx); + } else { + rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A1, B_SNDCCA_A1_EN, 0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_SNDCCA_A2, B_SNDCCA_A2_VAL, 0, phy_idx); + } +} + +static void +rtw8852c_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw, + enum rtw89_phy_idx phy_idx) +{ + u8 mod_sbw = 0; + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + case RTW89_CHANNEL_WIDTH_10: + case RTW89_CHANNEL_WIDTH_20: + if (bw == RTW89_CHANNEL_WIDTH_5) + mod_sbw = 0x1; + else if (bw == RTW89_CHANNEL_WIDTH_10) + mod_sbw = 0x2; + else if (bw == RTW89_CHANNEL_WIDTH_20) + mod_sbw = 0x0; + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, + mod_sbw, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, 0x0, + phy_idx); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, + B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, + B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1, + B_PATH0_BW_SEL_MSK_V1, 0xf); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, + B_PATH1_BW_SEL_MSK_V1, 0xf); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x1, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, + pri_ch, + phy_idx); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, + B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, + B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1, + B_PATH0_BW_SEL_MSK_V1, 0xf); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, + B_PATH1_BW_SEL_MSK_V1, 0xf); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x2, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, + pri_ch, + phy_idx); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, + B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, + B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1, + B_PATH0_BW_SEL_MSK_V1, 0xd); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, + B_PATH1_BW_SEL_MSK_V1, 0xd); + break; + case RTW89_CHANNEL_WIDTH_160: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x3, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0, + phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH, + pri_ch, + phy_idx); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, + B_PATH0_SAMPL_DLY_T_MSK_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, + B_PATH1_SAMPL_DLY_T_MSK_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BW_SEL_V1, + B_PATH0_BW_SEL_MSK_V1, 0xb); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, + B_PATH1_BW_SEL_MSK_V1, 0xb); + break; + default: + rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw, + pri_ch); + } + + if (bw == RTW89_CHANNEL_WIDTH_40) { + rtw89_phy_write32_idx(rtwdev, R_RX_BW40_2XFFT_EN_V1, + B_RX_BW40_2XFFT_EN_MSK_V1, 0x1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_T2F_GI_COMB, B_T2F_GI_COMB_EN, 1, phy_idx); + } else { + rtw89_phy_write32_idx(rtwdev, R_RX_BW40_2XFFT_EN_V1, + B_RX_BW40_2XFFT_EN_MSK_V1, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_T2F_GI_COMB, B_T2F_GI_COMB_EN, 0, phy_idx); + } + + if (phy_idx == RTW89_PHY_0) { + rtw8852c_bw_setting(rtwdev, bw, RF_PATH_A); + if (!rtwdev->dbcc_en) + rtw8852c_bw_setting(rtwdev, bw, RF_PATH_B); + } else { + rtw8852c_bw_setting(rtwdev, bw, RF_PATH_B); + } + + rtw8852c_edcca_per20_bitmap_sifs(rtwdev, bw, phy_idx); +} + +static u32 rtw8852c_spur_freq(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param) +{ + u8 center_chan = param->center_chan; + u8 bw = param->bandwidth; + + switch (param->band_type) { + case RTW89_BAND_2G: + if (bw == RTW89_CHANNEL_WIDTH_20) { + if (center_chan >= 5 && center_chan <= 8) + return 2440; + if (center_chan == 13) + return 2480; + } else if (bw == RTW89_CHANNEL_WIDTH_40) { + if (center_chan >= 3 && center_chan <= 10) + return 2440; + } + break; + case RTW89_BAND_5G: + if (center_chan == 151 || center_chan == 153 || + center_chan == 155 || center_chan == 163) + return 5760; + break; + case RTW89_BAND_6G: + if (center_chan == 195 || center_chan == 197 || + center_chan == 199 || center_chan == 207) + return 6920; + break; + default: + break; + } + + return 0; +} + +#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */ +#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */ +#define MAX_TONE_NUM 2048 + +static void rtw8852c_set_csi_tone_idx(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx) +{ + u32 spur_freq; + s32 freq_diff, csi_idx, csi_tone_idx; + + spur_freq = rtw8852c_spur_freq(rtwdev, param); + if (spur_freq == 0) { + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, 0, phy_idx); + return; + } + + freq_diff = (spur_freq - param->center_freq) * 1000000; + csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125); + s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx); + + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, csi_tone_idx, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, 1, phy_idx); +} + +static const struct rtw89_nbi_reg_def rtw8852c_nbi_reg_def[] = { + [RF_PATH_A] = { + .notch1_idx = {0x4C14, 0xFF}, + .notch1_frac_idx = {0x4C14, 0xC00}, + .notch1_en = {0x4C14, 0x1000}, + .notch2_idx = {0x4C20, 0xFF}, + .notch2_frac_idx = {0x4C20, 0xC00}, + .notch2_en = {0x4C20, 0x1000}, + }, + [RF_PATH_B] = { + .notch1_idx = {0x4CD8, 0xFF}, + .notch1_frac_idx = {0x4CD8, 0xC00}, + .notch1_en = {0x4CD8, 0x1000}, + .notch2_idx = {0x4CE4, 0xFF}, + .notch2_frac_idx = {0x4CE4, 0xC00}, + .notch2_en = {0x4CE4, 0x1000}, + }, +}; + +static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_rf_path path) +{ + const struct rtw89_nbi_reg_def *nbi = &rtw8852c_nbi_reg_def[path]; + u32 spur_freq, fc; + s32 freq_diff; + s32 nbi_idx, nbi_tone_idx; + s32 nbi_frac_idx, nbi_frac_tone_idx; + bool notch2_chk = false; + + spur_freq = rtw8852c_spur_freq(rtwdev, param); + if (spur_freq == 0) { + rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0); + rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0); + return; + } + + fc = param->center_freq; + if (param->bandwidth == RTW89_CHANNEL_WIDTH_160) { + fc = (spur_freq > fc) ? fc + 40 : fc - 40; + if ((fc > spur_freq && param->center_chan < param->primary_chan) || + (fc < spur_freq && param->center_chan > param->primary_chan)) + notch2_chk = true; + } + + freq_diff = (spur_freq - fc) * 1000000; + nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5, &nbi_frac_idx); + + if (param->bandwidth == RTW89_CHANNEL_WIDTH_20) { + s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx); + } else { + u16 tone_para = (param->bandwidth == RTW89_CHANNEL_WIDTH_40) ? 128 : 256; + + s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx); + } + nbi_frac_tone_idx = s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125); + + if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && notch2_chk) { + rtw89_phy_write32_mask(rtwdev, nbi->notch2_idx.addr, + nbi->notch2_idx.mask, nbi_tone_idx); + rtw89_phy_write32_mask(rtwdev, nbi->notch2_frac_idx.addr, + nbi->notch2_frac_idx.mask, nbi_frac_tone_idx); + rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 0); + rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 1); + rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0); + } else { + rtw89_phy_write32_mask(rtwdev, nbi->notch1_idx.addr, + nbi->notch1_idx.mask, nbi_tone_idx); + rtw89_phy_write32_mask(rtwdev, nbi->notch1_frac_idx.addr, + nbi->notch1_frac_idx.mask, nbi_frac_tone_idx); + rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0); + rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 1); + rtw89_phy_write32_mask(rtwdev, nbi->notch2_en.addr, nbi->notch2_en.mask, 0); + } +} + +static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx) +{ + rtw8852c_set_csi_tone_idx(rtwdev, param, phy_idx); + + if (phy_idx == RTW89_PHY_0) { + rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_A); + if (!rtwdev->dbcc_en) + rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B); + } else { + rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B); + } +} + +static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx) +{ + u8 pri_ch = param->primary_chan; + bool mask_5m_low; + bool mask_5m_en; + + switch (param->bandwidth) { + case RTW89_CHANNEL_WIDTH_40: + mask_5m_en = true; + mask_5m_low = pri_ch == 2; + break; + case RTW89_CHANNEL_WIDTH_80: + mask_5m_en = ((pri_ch == 3) || (pri_ch == 4)); + mask_5m_low = pri_ch == 4; + break; + default: + mask_5m_en = false; + mask_5m_low = false; + break; + } + + if (!mask_5m_en) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x0); + rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT, + B_ASSIGN_SBD_OPT_EN, 0x0, phy_idx); + } else { + if (mask_5m_low) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB0, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET, B_PATH0_5MDET_SB0, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET, B_PATH1_5MDET_SB0, 0x0); + } + rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT, B_ASSIGN_SBD_OPT_EN, 0x1, phy_idx); + } +} + static void rtw8852c_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { @@ -1056,6 +1636,97 @@ static void rtw8852c_bb_sethw(struct rtw89_dev *rtwdev) rtw89_phy_read32_mask(rtwdev, R_RPL_BIAS_COMP1, B_RPL_BIAS_COMP1_MASK); } +static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *param, + enum rtw89_phy_idx phy_idx) +{ + bool cck_en = param->band_type == RTW89_BAND_2G; + u8 pri_ch_idx = param->pri_ch_idx; + u32 mask, reg; + u32 ru_alloc_msk[2] = {B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0, + B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1}; + + if (param->band_type == RTW89_BAND_2G) + rtw8852c_ctrl_sco_cck(rtwdev, param->center_chan, + param->primary_chan, param->bandwidth); + + rtw8852c_ctrl_ch(rtwdev, param, phy_idx); + rtw8852c_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx); + if (cck_en) { + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1); + rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0); + rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, + B_PD_ARBITER_OFF, 0x0, phy_idx); + } else { + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0); + rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 1); + rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, + B_PD_ARBITER_OFF, 0x1, phy_idx); + } + + rtw8852c_spur_elimination(rtwdev, param, phy_idx); + rtw8852c_ctrl_btg(rtwdev, param->band_type == RTW89_BAND_2G); + rtw8852c_5m_mask(rtwdev, param, phy_idx); + + if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && + rtwdev->hal.cv != CHIP_CAV) { + rtw89_phy_write32_idx(rtwdev, R_P80_AT_HIGH_FREQ, + B_P80_AT_HIGH_FREQ, 0x0, phy_idx); + reg = rtw89_mac_reg_by_idx(R_P80_AT_HIGH_FREQ_BB_WRP, + phy_idx); + if (param->primary_chan > param->center_chan) { + rtw89_phy_write32_mask(rtwdev, + R_P80_AT_HIGH_FREQ_RU_ALLOC, + ru_alloc_msk[phy_idx], 1); + rtw89_write32_mask(rtwdev, reg, + B_P80_AT_HIGH_FREQ_BB_WRP, 1); + } else { + rtw89_phy_write32_mask(rtwdev, + R_P80_AT_HIGH_FREQ_RU_ALLOC, + ru_alloc_msk[phy_idx], 0); + rtw89_write32_mask(rtwdev, reg, + B_P80_AT_HIGH_FREQ_BB_WRP, 0); + } + } + + if (param->band_type == RTW89_BAND_6G && + param->bandwidth == RTW89_CHANNEL_WIDTH_160) + rtw89_phy_write32_idx(rtwdev, R_CDD_EVM_CHK_EN, + B_CDD_EVM_CHK_EN, 0, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_CDD_EVM_CHK_EN, + B_CDD_EVM_CHK_EN, 1, phy_idx); + + if (!rtwdev->dbcc_en) { + mask = B_P0_TXPW_RSTB_TSSI | B_P0_TXPW_RSTB_MANON; + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x3); + mask = B_P1_TXPW_RSTB_TSSI | B_P1_TXPW_RSTB_MANON; + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x3); + } else { + if (phy_idx == RTW89_PHY_0) { + mask = B_P0_TXPW_RSTB_TSSI | B_P0_TXPW_RSTB_MANON; + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, mask, 0x3); + } else { + mask = B_P1_TXPW_RSTB_TSSI | B_P1_TXPW_RSTB_MANON; + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, mask, 0x3); + } + } + + rtw8852c_bb_reset_all(rtwdev, phy_idx); +} + +static void rtw8852c_set_channel(struct rtw89_dev *rtwdev, + struct rtw89_channel_params *params) +{ + rtw8852c_set_channel_mac(rtwdev, params, RTW89_MAC_0); + rtw8852c_set_channel_bb(rtwdev, params, RTW89_PHY_0); + rtw8852c_set_channel_rf(rtwdev, params, RTW89_PHY_0); +} + static void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx) @@ -1091,6 +1762,52 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, } } +static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) +{ + if (btg) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, + B_PATH0_BT_SHARE_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, + B_PATH0_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, + B_PATH1_G_LNA6_OP1DB_V1, 0x20); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, + B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, + B_PATH1_BT_SHARE_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, + B_PATH1_BTG_PATH_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_BT_SHARE, 0x1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_BT_SEG0, 0x2); + rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN, + B_BT_DYN_DC_EST_EN_MSK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, + 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, + B_PATH0_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, + B_PATH0_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, + B_PATH1_G_LNA6_OP1DB_V1, 0x1a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, + B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, + B_PATH1_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, + B_PATH1_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xf); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P2, 0x4); + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_BT_SHARE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_BT_SEG0, 0x0); + rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN, + B_BT_DYN_DC_EST_EN_MSK, 0x0); + rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, + 0x0); + } +} + static void rtw8852c_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) { @@ -1195,6 +1912,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .disable_bb_rf = rtw8852c_mac_disable_bb_rf, .bb_reset = rtw8852c_bb_reset, .bb_sethw = rtw8852c_bb_sethw, + .set_channel = rtw8852c_set_channel, .read_efuse = rtw8852c_read_efuse, .read_phycap = rtw8852c_read_phycap, .power_trim = rtw8852c_power_trim, diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h index 229e81009de6..1ae80b7561da 100644 --- a/drivers/net/wireless/realtek/rtw89/util.h +++ b/drivers/net/wireless/realtek/rtw89/util.h @@ -14,4 +14,34 @@ #define rtw89_for_each_rtwvif(rtwdev, rtwvif) \ list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list) +/* The result of negative dividend and positive divisor is undefined, but it + * should be one case of round-down or round-up. So, make it round-down if the + * result is round-up. + * Note: the maximum value of divisor is 0x7FFF_FFFF, because we cast it to + * signed value to make compiler to use signed divide instruction. + */ +static inline s32 s32_div_u32_round_down(s32 dividend, u32 divisor, s32 *remainder) +{ + s32 i_divisor = (s32)divisor; + s32 i_remainder; + s32 quotient; + + quotient = dividend / i_divisor; + i_remainder = dividend % i_divisor; + + if (i_remainder < 0) { + quotient--; + i_remainder += i_divisor; + } + + if (remainder) + *remainder = i_remainder; + return quotient; +} + +static inline s32 s32_div_u32_round_closest(s32 dividend, u32 divisor) +{ + return s32_div_u32_round_down(dividend + divisor / 2, divisor, NULL); +} + #endif -- cgit v1.2.3 From 79dafcd4ff6fc5edb86528345e91b417d0bd016e Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 14 Apr 2022 14:20:26 +0800 Subject: rtw89: 8852c: add help function of set channel During setting channel, we need to backup/restore and disable/enable some settings. The settings include SCH (scheduler channel), PPDU status report, and RF components, DFS and ADC. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220414062027.62638-13-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 40 +++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index c5377e8843dd..510614630bfb 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1727,6 +1727,45 @@ static void rtw8852c_set_channel(struct rtw89_dev *rtwdev, rtw8852c_set_channel_rf(rtwdev, params, RTW89_PHY_0); } +static void rtw8852c_dfs_en(struct rtw89_dev *rtwdev, bool en) +{ + if (en) + rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 1); + else + rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 0); +} + +static void rtw8852c_adc_en(struct rtw89_dev *rtwdev, bool en) +{ + if (en) + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, + 0x0); + else + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, + 0xf); +} + +static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter, + struct rtw89_channel_help_params *p) +{ + u8 phy_idx = RTW89_PHY_0; + + if (enter) { + rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL); + rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); + rtw8852c_dfs_en(rtwdev, false); + rtw8852c_adc_en(rtwdev, false); + fsleep(40); + rtw8852c_bb_reset_en(rtwdev, phy_idx, false); + } else { + rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); + rtw8852c_adc_en(rtwdev, true); + rtw8852c_dfs_en(rtwdev, true); + rtw8852c_bb_reset_en(rtwdev, phy_idx, true); + rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en); + } +} + static void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx) @@ -1913,6 +1952,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .bb_reset = rtw8852c_bb_reset, .bb_sethw = rtw8852c_bb_sethw, .set_channel = rtw8852c_set_channel, + .set_channel_help = rtw8852c_set_channel_help, .read_efuse = rtw8852c_read_efuse, .read_phycap = rtw8852c_read_phycap, .power_trim = rtw8852c_power_trim, -- cgit v1.2.3 From 54d5ecc1710e4b43a73305bce5d91dc954fbb872 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 20 Apr 2022 09:02:14 +0000 Subject: wl12xx: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220420090214.2588618-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/tx.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index e20e18cd04ae..7bd3ce2f0804 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -855,11 +855,9 @@ void wl1271_tx_work(struct work_struct *work) int ret; mutex_lock(&wl->mutex); - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } ret = wlcore_tx_work_locked(wl); if (ret < 0) { -- cgit v1.2.3 From c94e36908467011d6f793d7667cd0462a8c68710 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Wed, 20 Apr 2022 09:02:47 +0000 Subject: wl12xx: scan: use pm_runtime_resume_and_get() instead of pm_runtime_get_sync() Using pm_runtime_resume_and_get() to replace pm_runtime_get_sync and pm_runtime_put_noidle. This change is just to simplify the code, no actual functional changes. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220420090247.2588680-1-chi.minghao@zte.com.cn --- drivers/net/wireless/ti/wlcore/scan.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index 29fa51c37e88..b414305acc32 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -53,11 +53,9 @@ void wl1271_scan_complete_work(struct work_struct *work) wl->scan.req = NULL; wl->scan_wlvif = NULL; - ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) { - pm_runtime_put_noidle(wl->dev); + ret = pm_runtime_resume_and_get(wl->dev); + if (ret < 0) goto out; - } if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { /* restore hardware connection monitoring template */ -- cgit v1.2.3 From 9cbdadf0097fc4a77071c61ed28a07eaac97f8f6 Mon Sep 17 00:00:00 2001 From: Po-Hao Huang Date: Wed, 20 Apr 2022 17:30:57 +0800 Subject: rtw88: fix uninitialized 'tim_offset' warning This avoids below warning and makes compiler happy. error: uninitialized symbol 'tim_offset' Signed-off-by: Po-Hao Huang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220420093058.31646-1-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index c1af2704bb86..3545d51c6951 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -1047,7 +1047,7 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw, struct rtw_vif *rtwvif; struct sk_buff *skb_new; struct cfg80211_ssid *ssid; - u16 tim_offset; + u16 tim_offset = 0; if (rsvd_pkt->type == RSVD_DUMMY) { skb_new = alloc_skb(1, GFP_KERNEL); -- cgit v1.2.3 From 9ebacb1e7e75f2a7a9745d42da7031d3a1741029 Mon Sep 17 00:00:00 2001 From: Po-Hao Huang Date: Wed, 20 Apr 2022 17:30:58 +0800 Subject: rtw88: pci: 8821c: Disable 21ce completion timeout Disable this capability to avoid timeout errors on certain platforms. Without it, pci bus might stuck and leads to disconnection. Signed-off-by: Po-Hao Huang Signed-off-by: Ping-Ke Shih Tested-by: Chris Chiu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220420093058.31646-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/pci.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 33042b63a151..3ef0de70af32 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1482,12 +1482,15 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev) static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) { + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_chip_info *chip = rtwdev->chip; + struct pci_dev *pdev = rtwpci->pdev; const struct rtw_intf_phy_para *para; u16 cut; u16 value; u16 offset; int i; + int ret; cut = BIT(0) << rtwdev->hal.cut_version; @@ -1520,6 +1523,15 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) } rtw_pci_link_cfg(rtwdev); + + /* Disable 8821ce completion timeout by default */ + if (chip->id == RTW_CHIP_TYPE_8821C) { + ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TMOUT_DIS); + if (ret) + rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n", + ret); + } } static int __maybe_unused rtw_pci_suspend(struct device *dev) -- cgit v1.2.3 From 948e521c728536db40dc7fba5a641f2115161802 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:50 +0800 Subject: rtw89: pci: add variant IMR/ISR and configure functions 8852CE uses different but similar IMR/ISR registers, and its masks are also different in various states, so add config_intr_mask ops to configure masks according to under_recovery or low_power states. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 168 ++++++++++++++++++++----- drivers/net/wireless/realtek/rtw89/pci.h | 123 ++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852ae.c | 4 + drivers/net/wireless/realtek/rtw89/rtw8852ce.c | 4 + 4 files changed, 270 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index ecb419010f0c..0d9cd8033c77 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -612,9 +612,9 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, } } -static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, - struct rtw89_pci *rtwpci, - struct rtw89_pci_isrs *isrs) +void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, + struct rtw89_pci_isrs *isrs) { isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; @@ -624,6 +624,28 @@ static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); } +EXPORT_SYMBOL(rtw89_pci_recognize_intrs); + +void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, + struct rtw89_pci_isrs *isrs) +{ + isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; + isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? + rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; + isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? + rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; + isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? + rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; + + if (isrs->halt_c2h_isrs) + rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); + if (isrs->isrs[0]) + rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); + if (isrs->isrs[1]) + rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); +} +EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) { @@ -631,21 +653,39 @@ static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); } -static void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, - struct rtw89_pci *rtwpci) +void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) { rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); } +EXPORT_SYMBOL(rtw89_pci_enable_intr); -static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, - struct rtw89_pci *rtwpci) +void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) { rtw89_write32(rtwdev, R_AX_HIMR0, 0); rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); } +EXPORT_SYMBOL(rtw89_pci_disable_intr); + +void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) +{ + rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); + rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); + rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); + rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); +} +EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); + +void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) +{ + rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); + rtw89_write32(rtwdev, R_AX_HIMR0, 0); + rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, 0); + rtw89_write32(rtwdev, R_AX_HIMR1, 0); +} +EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) { @@ -653,9 +693,9 @@ static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) unsigned long flags; spin_lock_irqsave(&rtwpci->irq_lock, flags); - rtwpci->under_recovery = true; - rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); - rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); + rtw89_chip_disable_intr(rtwdev, rtwpci); + rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); + rtw89_chip_enable_intr(rtwdev, rtwpci); spin_unlock_irqrestore(&rtwpci->irq_lock, flags); } @@ -665,8 +705,9 @@ static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) unsigned long flags; spin_lock_irqsave(&rtwpci->irq_lock, flags); - rtwpci->under_recovery = false; - rtw89_pci_enable_intr(rtwdev, rtwpci); + rtw89_chip_disable_intr(rtwdev, rtwpci); + rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); + rtw89_chip_enable_intr(rtwdev, rtwpci); spin_unlock_irqrestore(&rtwpci->irq_lock, flags); } @@ -678,7 +719,7 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) unsigned long flags; spin_lock_irqsave(&rtwpci->irq_lock, flags); - rtw89_pci_recognize_intrs(rtwdev, rtwpci, &isrs); + rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); spin_unlock_irqrestore(&rtwpci->irq_lock, flags); if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) @@ -716,7 +757,7 @@ static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) goto exit; } - rtw89_pci_disable_intr(rtwdev, rtwpci); + rtw89_chip_disable_intr(rtwdev, rtwpci); exit: spin_unlock_irqrestore(&rtwpci->irq_lock, flags); @@ -1313,32 +1354,42 @@ static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) spin_unlock_bh(&rtwpci->trx_lock); } -static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) +static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) { struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; unsigned long flags; - rtw89_core_napi_start(rtwdev); - spin_lock_irqsave(&rtwpci->irq_lock, flags); rtwpci->running = true; - rtw89_pci_enable_intr(rtwdev, rtwpci); + rtw89_chip_enable_intr(rtwdev, rtwpci); spin_unlock_irqrestore(&rtwpci->irq_lock, flags); - - return 0; } -static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) +static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) { struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; - struct pci_dev *pdev = rtwpci->pdev; unsigned long flags; spin_lock_irqsave(&rtwpci->irq_lock, flags); rtwpci->running = false; - rtw89_pci_disable_intr(rtwdev, rtwpci); + rtw89_chip_disable_intr(rtwdev, rtwpci); spin_unlock_irqrestore(&rtwpci->irq_lock, flags); +} + +static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) +{ + rtw89_core_napi_start(rtwdev); + rtw89_pci_enable_intr_lock(rtwdev); + + return 0; +} +static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + + rtw89_pci_disable_intr_lock(rtwdev); synchronize_irq(pdev->irq); rtw89_core_napi_stop(rtwdev); } @@ -2924,22 +2975,81 @@ static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, skb_queue_len(&rtwpci->h2c_queue), true); } -static void rtw89_pci_default_intr_mask(struct rtw89_dev *rtwdev) +void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) { struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; + + if (rtwpci->under_recovery) { + rtwpci->intrs[0] = 0; + rtwpci->intrs[1] = 0; + } else { + rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | + B_AX_RXDMA_INT_EN | + B_AX_RXP1DMA_INT_EN | + B_AX_RPQDMA_INT_EN | + B_AX_RXDMA_STUCK_INT_EN | + B_AX_RDU_INT_EN | + B_AX_RPQBD_FULL_INT_EN | + B_AX_HS0ISR_IND_INT_EN; + + rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; + } +} +EXPORT_SYMBOL(rtw89_pci_config_intr_mask); + +static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; + rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN; + rtwpci->intrs[0] = 0; + rtwpci->intrs[1] = 0; +} + +static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | + B_AX_HS1ISR_IND_INT_EN | + B_AX_HS0ISR_IND_INT_EN; + rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN; rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | B_AX_RXDMA_INT_EN | B_AX_RXP1DMA_INT_EN | B_AX_RPQDMA_INT_EN | B_AX_RXDMA_STUCK_INT_EN | B_AX_RDU_INT_EN | - B_AX_RPQBD_FULL_INT_EN | - B_AX_HS0ISR_IND_INT_EN; + B_AX_RPQBD_FULL_INT_EN; + rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; +} + +static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | + B_AX_HS0ISR_IND_INT_EN; + rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN; + rtwpci->intrs[0] = 0; + rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; +} - rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; +void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + + if (rtwpci->under_recovery) + rtw89_pci_recovery_intr_mask_v1(rtwdev); + else if (rtwpci->low_power) + rtw89_pci_low_power_intr_mask_v1(rtwdev); + else + rtw89_pci_default_intr_mask_v1(rtwdev); } +EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, struct pci_dev *pdev) @@ -2963,7 +3073,7 @@ static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, goto err_free_vector; } - rtw89_pci_default_intr_mask(rtwdev); + rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); return 0; @@ -3285,7 +3395,7 @@ static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) if (work_done < budget && napi_complete_done(napi, work_done)) { spin_lock_irqsave(&rtwpci->irq_lock, flags); if (likely(rtwpci->running)) - rtw89_pci_enable_intr(rtwdev, rtwpci); + rtw89_chip_enable_intr(rtwdev, rtwpci); spin_unlock_irqrestore(&rtwpci->irq_lock, flags); } diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index a085d1f27a12..aa804b577df2 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -97,6 +97,16 @@ #define B_AX_HALT_C2H_INT_EN BIT(21) #define R_AX_HISR0 0x01A4 +#define R_AX_HIMR1 0x01A8 +#define B_AX_GPIO18_INT_EN BIT(2) +#define B_AX_GPIO17_INT_EN BIT(1) +#define B_AX_GPIO16_INT_EN BIT(0) + +#define R_AX_HISR1 0x01AC +#define B_AX_GPIO18_INT BIT(2) +#define B_AX_GPIO17_INT BIT(1) +#define B_AX_GPIO16_INT BIT(0) + #define R_AX_MDIO_CFG 0x10A0 #define B_AX_MDIO_PHY_ADDR_MASK GENMASK(13, 12) #define B_AX_MDIO_RFLAG BIT(9) @@ -104,6 +114,7 @@ #define B_AX_MDIO_ADDR_MASK GENMASK(4, 0) #define R_AX_PCIE_HIMR00 0x10B0 +#define R_AX_HAXI_HIMR00 0x10B0 #define B_AX_HC00ISR_IND_INT_EN BIT(27) #define B_AX_HD1ISR_IND_INT_EN BIT(26) #define B_AX_HD0ISR_IND_INT_EN BIT(25) @@ -132,6 +143,7 @@ #define B_AX_RXDMA_INT_EN BIT(0) #define R_AX_PCIE_HISR00 0x10B4 +#define R_AX_HAXI_HISR00 0x10B4 #define B_AX_HC00ISR_IND_INT BIT(27) #define B_AX_HD1ISR_IND_INT BIT(26) #define B_AX_HD0ISR_IND_INT BIT(25) @@ -159,6 +171,10 @@ #define B_AX_RXP1DMA_INT BIT(1) #define B_AX_RXDMA_INT BIT(0) +#define R_AX_HAXI_HIMR10 0x11E0 +#define B_AX_TXDMA_CH11_INT_EN_V1 BIT(1) +#define B_AX_TXDMA_CH10_INT_EN_V1 BIT(0) + #define R_AX_PCIE_HIMR10 0x13B0 #define B_AX_HC10ISR_IND_INT_EN BIT(28) #define B_AX_TXDMA_CH11_INT_EN BIT(12) @@ -169,6 +185,22 @@ #define B_AX_TXDMA_CH11_INT BIT(12) #define B_AX_TXDMA_CH10_INT BIT(11) +#define R_AX_PCIE_HIMR00_V1 0x30B0 +#define B_AX_HCI_AXIDMA_INT_EN BIT(29) +#define B_AX_HC00ISR_IND_INT_EN_V1 BIT(28) +#define B_AX_HD1ISR_IND_INT_EN_V1 BIT(27) +#define B_AX_HD0ISR_IND_INT_EN_V1 BIT(26) +#define B_AX_HS1ISR_IND_INT_EN BIT(25) +#define B_AX_PCIE_DBG_STE_INT_EN BIT(13) + +#define R_AX_PCIE_HISR00_V1 0x30B4 +#define B_AX_HCI_AXIDMA_INT BIT(29) +#define B_AX_HC00ISR_IND_INT_V1 BIT(28) +#define B_AX_HD1ISR_IND_INT_V1 BIT(27) +#define B_AX_HD0ISR_IND_INT_V1 BIT(26) +#define B_AX_HS1ISR_IND_INT BIT(25) +#define B_AX_PCIE_DBG_STE_INT BIT(13) + /* TX/RX */ #define R_AX_RXQ_RXBD_IDX 0x1050 #define R_AX_RPQ_RXBD_IDX 0x1054 @@ -612,6 +644,17 @@ enum mac_ax_io_rcy_tmr { MAC_AX_IO_RCY_ANA_TMR_DEF = 0xFE }; +enum rtw89_pci_intr_mask_cfg { + RTW89_PCI_INTR_MASK_RESET, + RTW89_PCI_INTR_MASK_NORMAL, + RTW89_PCI_INTR_MASK_LOW_POWER, + RTW89_PCI_INTR_MASK_RECOVERY_START, + RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE, +}; + +struct rtw89_pci_isrs; +struct rtw89_pci; + struct rtw89_pci_ch_dma_addr { u32 num; u32 idx; @@ -661,6 +704,12 @@ struct rtw89_pci_info { u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev, void *txaddr_info_addr, u32 total_len, dma_addr_t dma, u8 *add_info_nr); + void (*config_intr_mask)(struct rtw89_dev *rtwdev); + void (*enable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); + void (*disable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); + void (*recognize_intrs)(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, + struct rtw89_pci_isrs *isrs); }; struct rtw89_pci_bd_ram { @@ -807,6 +856,7 @@ struct rtw89_pci_rx_ring { }; struct rtw89_pci_isrs { + u32 ind_isrs; u32 halt_c2h_isrs; u32 isrs[2]; }; @@ -819,12 +869,14 @@ struct rtw89_pci { /* protect TRX resources (exclude RXQ) */ spinlock_t trx_lock; bool running; + bool low_power; bool under_recovery; struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM]; struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM]; struct sk_buff_head h2c_queue; struct sk_buff_head h2c_release_queue; + u32 ind_intrs; u32 halt_c2h_intrs; u32 intrs[2]; void __iomem *mmap; @@ -931,6 +983,18 @@ u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, void *txaddr_info_addr, u32 total_len, dma_addr_t dma, u8 *add_info_nr); +void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev); +void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev); +void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); +void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); +void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); +void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci); +void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, + struct rtw89_pci_isrs *isrs); +void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, + struct rtw89_pci_isrs *isrs); static inline u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev, @@ -943,4 +1007,63 @@ u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev, dma, add_info_nr); } +static inline void rtw89_chip_config_intr_mask(struct rtw89_dev *rtwdev, + enum rtw89_pci_intr_mask_cfg cfg) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + const struct rtw89_pci_info *info = rtwdev->pci_info; + + switch (cfg) { + default: + case RTW89_PCI_INTR_MASK_RESET: + rtwpci->low_power = false; + rtwpci->under_recovery = false; + break; + case RTW89_PCI_INTR_MASK_NORMAL: + rtwpci->low_power = false; + break; + case RTW89_PCI_INTR_MASK_LOW_POWER: + rtwpci->low_power = true; + break; + case RTW89_PCI_INTR_MASK_RECOVERY_START: + rtwpci->under_recovery = true; + break; + case RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE: + rtwpci->under_recovery = false; + break; + } + + rtw89_debug(rtwdev, RTW89_DBG_HCI, + "Configure PCI interrupt mask mode low_power=%d under_recovery=%d\n", + rtwpci->low_power, rtwpci->under_recovery); + + info->config_intr_mask(rtwdev); +} + +static inline +void rtw89_chip_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + + info->enable_intr(rtwdev, rtwpci); +} + +static inline +void rtw89_chip_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + + info->disable_intr(rtwdev, rtwpci); +} + +static inline +void rtw89_chip_recognize_intrs(struct rtw89_dev *rtwdev, + struct rtw89_pci *rtwpci, + struct rtw89_pci_isrs *isrs) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + + info->recognize_intrs(rtwdev, rtwpci, isrs); +} + #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 61a1693535d8..73b5dd05235a 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -42,6 +42,10 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .ltr_set = rtw89_pci_ltr_set, .fill_txaddr_info = rtw89_pci_fill_txaddr_info, + .config_intr_mask = rtw89_pci_config_intr_mask, + .enable_intr = rtw89_pci_enable_intr, + .disable_intr = rtw89_pci_disable_intr, + .recognize_intrs = rtw89_pci_recognize_intrs, }; static const struct rtw89_driver_info rtw89_8852ae_info = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index aeafac553f40..4021a56a82bc 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -43,6 +43,10 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .ltr_set = rtw89_pci_ltr_set_v1, .fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1, + .config_intr_mask = rtw89_pci_config_intr_mask_v1, + .enable_intr = rtw89_pci_enable_intr_v1, + .disable_intr = rtw89_pci_disable_intr_v1, + .recognize_intrs = rtw89_pci_recognize_intrs_v1, }; static const struct rtw89_driver_info rtw89_8852ce_info = { -- cgit v1.2.3 From e1757e80450164ede118993a889979e65ead591b Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:51 +0800 Subject: rtw89: pci: add variant RPWM/CPWM to enter low power mode RPWM/CPWM are registers that can set and check low power mode. Since chips use different address, add a field to access them in common flow. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 6 +++--- drivers/net/wireless/realtek/rtw89/pci.c | 14 ++++++++------ drivers/net/wireless/realtek/rtw89/pci.h | 5 +++++ drivers/net/wireless/realtek/rtw89/rtw8852ae.c | 3 +++ drivers/net/wireless/realtek/rtw89/rtw8852ce.c | 2 ++ 5 files changed, 21 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 127d5d74c426..05b94842fe66 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1027,7 +1027,7 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, return 0; rpwm_req_num = rtwdev->mac.rpwm_seq_num; - cpwm_rsp_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, + cpwm_rsp_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_RSP_SEQ_NUM); if (rpwm_req_num != cpwm_rsp_seq) @@ -1036,11 +1036,11 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) & CPWM_SEQ_NUM_MAX; - cpwm_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_SEQ_NUM); + cpwm_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_SEQ_NUM); if (cpwm_seq != rtwdev->mac.cpwm_seq_num) return -EPERM; - cpwm_status = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_STATE); + cpwm_status = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_STATE); if (cpwm_status != req_pwr_state) return -EPERM; diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 0d9cd8033c77..338a032490e1 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -3486,6 +3486,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct ieee80211_hw *hw; struct rtw89_dev *rtwdev; const struct rtw89_driver_info *info; + const struct rtw89_pci_info *pci_info; int driver_data_size; int ret; @@ -3496,20 +3497,21 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; } + info = (const struct rtw89_driver_info *)id->driver_data; + pci_info = info->bus.pci; + rtwdev = hw->priv; rtwdev->hw = hw; rtwdev->dev = &pdev->dev; + rtwdev->chip = info->chip; + rtwdev->pci_info = info->bus.pci; rtwdev->hci.ops = &rtw89_pci_ops; rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; - rtwdev->hci.rpwm_addr = R_AX_PCIE_HRPWM; - rtwdev->hci.cpwm_addr = R_AX_CPWM; + rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; + rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); - info = (const struct rtw89_driver_info *)id->driver_data; - rtwdev->chip = info->chip; - rtwdev->pci_info = info->bus.pci; - ret = rtw89_core_init(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to initialise core\n"); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index aa804b577df2..c203c8cbf163 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -481,6 +481,9 @@ #define R_AX_PCIE_RX_PREF_ADV 0x13F4 #define B_AX_RXDMA_PREF_ADV_EN BIT(0) +#define R_AX_PCIE_HRPWM_V1 0x30C0 +#define R_AX_PCIE_CRPWM 0x30C4 + #define RTW89_PCI_TXBD_NUM_MAX 256 #define RTW89_PCI_RXBD_NUM_MAX 256 #define RTW89_PCI_TXWD_NUM_MAX 512 @@ -698,6 +701,8 @@ struct rtw89_pci_info { u32 dma_busy2_reg; u32 dma_busy3_reg; + u32 rpwm_addr; + u32 cpwm_addr; const struct rtw89_pci_ch_dma_addr_set *dma_addr_set; int (*ltr_set)(struct rtw89_dev *rtwdev, bool en); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 73b5dd05235a..c6937e5943ea 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -6,6 +6,7 @@ #include #include "pci.h" +#include "reg.h" #include "rtw8852a.h" static const struct rtw89_pci_info rtw8852a_pci_info = { @@ -38,6 +39,8 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .dma_busy2_reg = R_AX_PCIE_DMA_BUSY2, .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1, + .rpwm_addr = R_AX_PCIE_HRPWM, + .cpwm_addr = R_AX_CPWM, .dma_addr_set = &rtw89_pci_ch_dma_addr_set, .ltr_set = rtw89_pci_ltr_set, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index 4021a56a82bc..4d71cc87f7ba 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -39,6 +39,8 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .dma_busy2_reg = R_AX_HAXI_DMA_BUSY2, .dma_busy3_reg = R_AX_HAXI_DMA_BUSY3, + .rpwm_addr = R_AX_PCIE_HRPWM_V1, + .cpwm_addr = R_AX_PCIE_CRPWM, .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1, .ltr_set = rtw89_pci_ltr_set_v1, -- cgit v1.2.3 From 837202684657c2829bcdc057c1d25db751a6587b Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:52 +0800 Subject: rtw89: pci: reclaim TX BD only if it really need To reclaim TX BD, we need to read hardware reading index to determine if any DMA is complete. Since this IO spends time, do this thing only if we really need it when TX BD has no free buffer corresponding to target skb. The experimental result shows that reading counter decreases from 26,000 to 130 per second. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 338a032490e1..c1bb44bcd48e 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -412,9 +412,12 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, u8 txch = tx_ring->txch; if (!list_empty(&txwd->list)) { - rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", - txch, seq); - return; + rtw89_pci_reclaim_txbd(rtwdev, tx_ring); + if (!list_empty(&txwd->list)) { + rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", + txch, seq); + return; + } } /* currently, support for only one frame */ @@ -458,7 +461,6 @@ static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, } tx_ring = &rtwpci->tx_rings[txch]; - rtw89_pci_reclaim_txbd(rtwdev, tx_ring); wd_ring = &tx_ring->wd_ring; txwd = &wd_ring->pages[seq]; @@ -915,6 +917,10 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, if (!cnt) goto out_unlock; rtw89_pci_release_tx(rtwdev, rx_ring, cnt); + + bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); + if (bd_cnt == 0) + rtw89_pci_reclaim_txbd(rtwdev, tx_ring); } bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); -- cgit v1.2.3 From c83dcd0508e231f909be3f52fee5ccf4844896c8 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:53 +0800 Subject: rtw89: pci: add a separate interrupt handler for low power mode In lower power mode, there are very low amount of RX, and it must process in a separated function instead of schedule_napi(), because the existing napi_poll does many things to optimize performance, but not all registers can access in low power mode. The simple way is to use threadfn to process the simple thing. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 3 +++ drivers/net/wireless/realtek/rtw89/pci.c | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 6e8a65b021ce..796b205854ac 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -1420,7 +1420,10 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, { rtw89_core_hw_to_sband_rate(rx_status); rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu); + /* In low power mode, it does RX in thread context. */ + local_bh_disable(); ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi); + local_bh_enable(); rtwdev->napi_budget_countdown--; } diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index c1bb44bcd48e..e8bcecbe77e1 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -713,6 +713,18 @@ static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) spin_unlock_irqrestore(&rtwpci->irq_lock, flags); } +static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + int budget = NAPI_POLL_WEIGHT; + + /* To prevent RXQ get stuck due to run out of budget. */ + rtwdev->napi_budget_countdown = budget; + + rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); + rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); +} + static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) { struct rtw89_dev *rtwdev = dev; @@ -733,6 +745,11 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) if (unlikely(rtwpci->under_recovery)) return IRQ_HANDLED; + if (unlikely(rtwpci->low_power)) { + rtw89_pci_low_power_interrupt_handler(rtwdev); + goto enable_intr; + } + if (likely(rtwpci->running)) { local_bh_disable(); napi_schedule(&rtwdev->napi); @@ -740,6 +757,12 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) } return IRQ_HANDLED; + +enable_intr: + spin_lock_irqsave(&rtwpci->irq_lock, flags); + rtw89_chip_enable_intr(rtwdev, rtwpci); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); + return IRQ_HANDLED; } static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) -- cgit v1.2.3 From 98816def1973dfb493143de93446a273b9f83327 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:54 +0800 Subject: rtw89: ser: re-enable interrupt in threadfn if under_recovery Normally, we re-enable interrupt by napi_poll, but for this special situation, we must turn it on immediately because napi_poll isn't scheduled. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index e8bcecbe77e1..ad3db5aa890c 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -743,7 +743,7 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); if (unlikely(rtwpci->under_recovery)) - return IRQ_HANDLED; + goto enable_intr; if (unlikely(rtwpci->low_power)) { rtw89_pci_low_power_interrupt_handler(rtwdev); -- cgit v1.2.3 From 52edbb9fb78a24f355830da2e668db13689e3da6 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:55 +0800 Subject: rtw89: ps: access TX/RX rings via another registers in low power mode In low power mode, we need to pause PCI to configure IMR and PCI ring index registers accordingly, because the regular registers are power-off in this mode. In the transition moment named paused in code, we can't touch ring index, so don't kick off DMA immediately. Instead, queue them into pending queue, and kick off after the moment. There are three low power modes, which are RF off/clock gate/power gate, but PCI enter low power mode in later two modes only. So, add a mask to achieve this. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 14 ++++ drivers/net/wireless/realtek/rtw89/pci.c | 103 ++++++++++++++++++++++++- drivers/net/wireless/realtek/rtw89/pci.h | 16 ++++ drivers/net/wireless/realtek/rtw89/ps.c | 34 +++++++- drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852ae.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 2 + drivers/net/wireless/realtek/rtw89/rtw8852ce.c | 10 +++ 8 files changed, 177 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index f34aca70908e..27a3ceda90b9 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2036,6 +2036,8 @@ struct rtw89_hci_ops { void (*reset)(struct rtw89_dev *rtwdev); int (*start)(struct rtw89_dev *rtwdev); void (*stop)(struct rtw89_dev *rtwdev); + void (*pause)(struct rtw89_dev *rtwdev, bool pause); + void (*switch_mode)(struct rtw89_dev *rtwdev, bool low_power); void (*recalc_int_mit)(struct rtw89_dev *rtwdev); u8 (*read8)(struct rtw89_dev *rtwdev, u32 addr); @@ -2067,6 +2069,7 @@ struct rtw89_hci_info { enum rtw89_hci_type type; u32 rpwm_addr; u32 cpwm_addr; + bool paused; }; struct rtw89_chip_ops { @@ -2428,6 +2431,7 @@ struct rtw89_chip_info { u8 rf_para_dlink_num; const struct rtw89_btc_rf_trx_para *rf_para_dlink; u8 ps_mode_supported; + u8 low_power_hci_modes; u32 h2c_cctl_func_id; u32 hci_func_en_addr; @@ -3167,6 +3171,16 @@ static inline int rtw89_hci_deinit(struct rtw89_dev *rtwdev) return rtwdev->hci.ops->deinit(rtwdev); } +static inline void rtw89_hci_pause(struct rtw89_dev *rtwdev, bool pause) +{ + rtwdev->hci.ops->pause(rtwdev, pause); +} + +static inline void rtw89_hci_switch_mode(struct rtw89_dev *rtwdev, bool low_power) +{ + rtwdev->hci.ops->switch_mode(rtwdev, low_power); +} + static inline void rtw89_hci_recalc_int_mit(struct rtw89_dev *rtwdev) { rtwdev->hci.ops->recalc_int_mit(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index ad3db5aa890c..31c62d116f44 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -919,6 +919,21 @@ u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) return cnt; } +static +u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, + u8 txch) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; + u32 cnt; + + spin_lock_bh(&rtwpci->trx_lock); + cnt = rtw89_pci_get_avail_txbd_num(tx_ring); + spin_unlock_bh(&rtwpci->trx_lock); + + return cnt; +} + static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 txch) { @@ -961,6 +976,9 @@ out_unlock: static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 txch) { + if (rtwdev->hci.paused) + return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); + if (txch == RTW89_TXCH_CH12) return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); @@ -969,12 +987,17 @@ static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) { + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; u32 host_idx, addr; + spin_lock_bh(&rtwpci->trx_lock); + addr = bd_ring->addr.idx; host_idx = bd_ring->wp; rtw89_write16(rtwdev, addr, host_idx); + + spin_unlock_bh(&rtwpci->trx_lock); } static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, @@ -995,9 +1018,27 @@ static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; - spin_lock_bh(&rtwpci->trx_lock); + if (rtwdev->hci.paused) { + set_bit(txch, rtwpci->kick_map); + return; + } + __rtw89_pci_tx_kick_off(rtwdev, tx_ring); - spin_unlock_bh(&rtwpci->trx_lock); +} + +static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct rtw89_pci_tx_ring *tx_ring; + int txch; + + for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { + if (!test_and_clear_bit(txch, rtwpci->kick_map)) + continue; + + tx_ring = &rtwpci->tx_rings[txch]; + __rtw89_pci_tx_kick_off(rtwdev, tx_ring); + } } static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) @@ -1423,6 +1464,62 @@ static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) rtw89_core_napi_stop(rtwdev); } +static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + + if (pause) { + rtw89_pci_disable_intr_lock(rtwdev); + synchronize_irq(pdev->irq); + if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) + napi_synchronize(&rtwdev->napi); + } else { + rtw89_pci_enable_intr_lock(rtwdev); + rtw89_pci_tx_kick_off_pending(rtwdev); + } +} + +static +void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + const struct rtw89_pci_info *info = rtwdev->pci_info; + const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; + const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; + struct rtw89_pci_tx_ring *tx_ring; + struct rtw89_pci_rx_ring *rx_ring; + int i; + + if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) + return; + + for (i = 0; i < RTW89_TXCH_NUM; i++) { + tx_ring = &rtwpci->tx_rings[i]; + tx_ring->bd_ring.addr.idx = low_power ? + bd_idx_addr->tx_bd_addrs[i] : + dma_addr_set->tx[i].idx; + } + + for (i = 0; i < RTW89_RXCH_NUM; i++) { + rx_ring = &rtwpci->rx_rings[i]; + rx_ring->bd_ring.addr.idx = low_power ? + bd_idx_addr->rx_bd_addrs[i] : + dma_addr_set->rx[i].idx; + } +} + +static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) +{ + enum rtw89_pci_intr_mask_cfg cfg; + + WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); + + cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; + rtw89_chip_config_intr_mask(rtwdev, cfg); + rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); +} + static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) @@ -3488,6 +3585,8 @@ static const struct rtw89_hci_ops rtw89_pci_ops = { .reset = rtw89_pci_ops_reset, .start = rtw89_pci_ops_start, .stop = rtw89_pci_ops_stop, + .pause = rtw89_pci_ops_pause, + .switch_mode = rtw89_pci_ops_switch_mode, .recalc_int_mit = rtw89_pci_recalc_int_mit, .read8 = rtw89_pci_ops_read8, diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index c203c8cbf163..bb585ed19190 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -202,6 +202,15 @@ #define B_AX_PCIE_DBG_STE_INT BIT(13) /* TX/RX */ +#define R_AX_DRV_FW_HSK_0 0x01B0 +#define R_AX_DRV_FW_HSK_1 0x01B4 +#define R_AX_DRV_FW_HSK_2 0x01B8 +#define R_AX_DRV_FW_HSK_3 0x01BC +#define R_AX_DRV_FW_HSK_4 0x01C0 +#define R_AX_DRV_FW_HSK_5 0x01C4 +#define R_AX_DRV_FW_HSK_6 0x01C8 +#define R_AX_DRV_FW_HSK_7 0x01CC + #define R_AX_RXQ_RXBD_IDX 0x1050 #define R_AX_RPQ_RXBD_IDX 0x1054 #define R_AX_ACH0_TXBD_IDX 0x1058 @@ -658,6 +667,11 @@ enum rtw89_pci_intr_mask_cfg { struct rtw89_pci_isrs; struct rtw89_pci; +struct rtw89_pci_bd_idx_addr { + u32 tx_bd_addrs[RTW89_TXCH_NUM]; + u32 rx_bd_addrs[RTW89_RXCH_NUM]; +}; + struct rtw89_pci_ch_dma_addr { u32 num; u32 idx; @@ -703,6 +717,7 @@ struct rtw89_pci_info { u32 rpwm_addr; u32 cpwm_addr; + const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power; const struct rtw89_pci_ch_dma_addr_set *dma_addr_set; int (*ltr_set)(struct rtw89_dev *rtwdev, bool en); @@ -880,6 +895,7 @@ struct rtw89_pci { struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM]; struct sk_buff_head h2c_queue; struct sk_buff_head h2c_release_queue; + DECLARE_BITMAP(kick_map, RTW89_TXCH_NUM); u32 ind_intrs; u32 halt_c2h_intrs; diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index 7eaa01e41ef2..a90b33720588 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -29,6 +29,36 @@ static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid) return 0; } +static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev, + bool enter) +{ + ieee80211_stop_queues(rtwdev->hw); + rtwdev->hci.paused = true; + flush_work(&rtwdev->txq_work); + ieee80211_wake_queues(rtwdev->hw); + + rtw89_hci_pause(rtwdev, true); + rtw89_mac_power_mode_change(rtwdev, enter); + rtw89_hci_switch_mode(rtwdev, enter); + rtw89_hci_pause(rtwdev, false); + + rtwdev->hci.paused = false; + + if (!enter) { + local_bh_disable(); + napi_schedule(&rtwdev->napi); + local_bh_enable(); + } +} + +static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter) +{ + if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode)) + rtw89_ps_power_mode_change_with_hci(rtwdev, enter); + else + rtw89_mac_power_mode_change(rtwdev, enter); +} + static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev) { if (!rtwdev->ps_mode) @@ -37,7 +67,7 @@ static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev) if (test_and_set_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) return; - rtw89_mac_power_mode_change(rtwdev, true); + rtw89_ps_power_mode_change(rtwdev, true); } void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev) @@ -46,7 +76,7 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev) return; if (test_and_clear_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) - rtw89_mac_power_mode_change(rtwdev, false); + rtw89_ps_power_mode_change(rtwdev, false); } static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index cb93287d4722..5af618709ded 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2150,6 +2150,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) | BIT(RTW89_PS_MODE_CLK_GATED) | BIT(RTW89_PS_MODE_PWR_GATED), + .low_power_hci_modes = 0, .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD, .hci_func_en_addr = R_AX_HCI_FUNC_EN, .h2c_desc_size = sizeof(struct rtw89_txwd_body), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index c6937e5943ea..190c4aefb02e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -41,6 +41,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM, .cpwm_addr = R_AX_CPWM, + .bd_idx_addr_low_power = NULL, .dma_addr_set = &rtw89_pci_ch_dma_addr_set, .ltr_set = rtw89_pci_ltr_set, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 510614630bfb..1302d4324473 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2006,6 +2006,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .dav_log_efuse_size = 16, .phycap_addr = 0x590, .phycap_size = 0x60, + .low_power_hci_modes = BIT(RTW89_PS_MODE_CLK_GATED) | + BIT(RTW89_PS_MODE_PWR_GATED), .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_V1, .hci_func_en_addr = R_AX_HCI_FUNC_EN_V1, .h2c_desc_size = sizeof(struct rtw89_rxdesc_short), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index 4d71cc87f7ba..fc0394494013 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -9,6 +9,15 @@ #include "reg.h" #include "rtw8852c.h" +static const struct rtw89_pci_bd_idx_addr rtw8852c_bd_idx_addr_low_power = { + .tx_bd_addrs = {R_AX_DRV_FW_HSK_0, R_AX_DRV_FW_HSK_1, R_AX_DRV_FW_HSK_2, + R_AX_DRV_FW_HSK_3, 0, 0, + 0, 0, R_AX_DRV_FW_HSK_4, + 0, 0, 0, + R_AX_DRV_FW_HSK_5}, + .rx_bd_addrs = {R_AX_DRV_FW_HSK_6, R_AX_DRV_FW_HSK_7}, +}; + static const struct rtw89_pci_info rtw8852c_pci_info = { .txbd_trunc_mode = MAC_AX_BD_TRUNC, .rxbd_trunc_mode = MAC_AX_BD_TRUNC, @@ -41,6 +50,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM_V1, .cpwm_addr = R_AX_PCIE_CRPWM, + .bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power, .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1, .ltr_set = rtw89_pci_ltr_set_v1, -- cgit v1.2.3 From d7259cdbd05598ec7e1e5a14f4c3644d80331758 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:56 +0800 Subject: rtw89: pci: allow to process RPP prior to TX BD RPP is to report certain skb(s) can be freed, and TX BD indicates which TX descriptors can be freed. Normally, TX BD is happened before RPP. In low power mode, RPP can happen ahead, so change flow to handle this case. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-8-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 31c62d116f44..2bdce7024f25 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -382,6 +382,10 @@ static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx } list_del_init(&txwd->list); + + /* this skb has been freed by RPP */ + if (skb_queue_len(&txwd->queue) == 0) + rtw89_pci_enqueue_txwd(tx_ring, txwd); } } @@ -413,18 +417,12 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, if (!list_empty(&txwd->list)) { rtw89_pci_reclaim_txbd(rtwdev, tx_ring); - if (!list_empty(&txwd->list)) { + /* In low power mode, RPP can receive before updating of TX BD. + * In normal mode, it should not happen so give it a warning. + */ + if (!rtwpci->low_power && !list_empty(&txwd->list)) rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", txch, seq); - return; - } - } - - /* currently, support for only one frame */ - if (skb_queue_len(&txwd->queue) != 1) { - rtw89_warn(rtwdev, "empty pending queue %d page %d\n", - txch, seq); - return; } skb_queue_walk_safe(&txwd->queue, skb, tmp) { @@ -437,7 +435,8 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); } - rtw89_pci_enqueue_txwd(tx_ring, txwd); + if (list_empty(&txwd->list)) + rtw89_pci_enqueue_txwd(tx_ring, txwd); } static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, -- cgit v1.2.3 From fc5f311fce742d906294360e378c1df631d2d692 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:57 +0800 Subject: rtw89: don't flush hci queues and send h2c if power is off When disconnecting, it warns somethings after power is off, and we can't do HCI IO. So, add this patch to avoid below messages: rtw89_8852ce 0000:03:00.0: timed out to flush pci txch: 11 rtw89_8852ce 0000:03:00.0: failed to pre-release fwcmd Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-9-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 7 +++++++ drivers/net/wireless/realtek/rtw89/core.h | 3 +++ 2 files changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 796b205854ac..e3317deafa1d 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -856,6 +856,13 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev, u32 cnt; int ret; + if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { + rtw89_debug(rtwdev, RTW89_DBG_FW, + "ignore h2c due to power is off with firmware state=%d\n", + test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)); + return 0; + } + tx_req.skb = skb; tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD; if (fwdl) diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 27a3ceda90b9..6ca915cb7a29 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -3199,6 +3199,9 @@ static inline void rtw89_hci_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) static inline void rtw89_hci_flush_queues(struct rtw89_dev *rtwdev, u32 queues, bool drop) { + if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) + return; + if (rtwdev->hci.ops->flush_queues) return rtwdev->hci.ops->flush_queues(rtwdev, queues, drop); } -- cgit v1.2.3 From 16b44ed0ffd3ccb4bfe741a6ed88c08faf84efa4 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:58 +0800 Subject: rtw89: add RF H2C to notify firmware IQK results in hardware has two copies that are used by firmware to switch these two to support MCC. This H2C tell firmware the corresponding channel and band of each IQK results, and currrent one. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-10-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 8 ++++++ drivers/net/wireless/realtek/rtw89/fw.c | 39 +++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/fw.h | 12 +++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 14 ++++++++++ 4 files changed, 73 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 6ca915cb7a29..9be74d8673cf 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2634,6 +2634,13 @@ struct rtw89_dack_info { #define RTW89_IQK_CHS_NR 2 #define RTW89_IQK_PATH_NR 4 + +struct rtw89_mcc_info { + u8 ch[RTW89_IQK_CHS_NR]; + u8 band[RTW89_IQK_CHS_NR]; + u8 table_idx; +}; + struct rtw89_iqk_info { bool lok_cor_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; bool lok_fin_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; @@ -3105,6 +3112,7 @@ struct rtw89_dev { struct rtw89_dack_info dack; struct rtw89_iqk_info iqk; struct rtw89_dpk_info dpk; + struct rtw89_mcc_info mcc; bool is_tssi_mode[RF_PATH_MAX]; bool is_bt_iqk_timeout; diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 7bef8b4288f0..e4be785709d1 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -1785,6 +1785,45 @@ fail: return -EBUSY; } +int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + struct rtw89_fw_h2c_rf_get_mccch *mccch; + struct sk_buff *skb; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); + return -ENOMEM; + } + skb_put(skb, sizeof(*mccch)); + mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; + + mccch->ch_0 = cpu_to_le32(mcc_info->ch[0]); + mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]); + mccch->band_0 = cpu_to_le32(mcc_info->band[0]); + mccch->band_1 = cpu_to_le32(mcc_info->band[1]); + mccch->current_channel = cpu_to_le32(rtwdev->hal.current_channel); + mccch->current_band_type = cpu_to_le32(rtwdev->hal.current_band_type); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, + H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, + sizeof(*mccch)); + + if (rtw89_h2c_tx(rtwdev, skb, false)) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return -EBUSY; +} +EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); + int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, bool rack, bool dack) diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index aaabfc0dfd71..95a55c4213db 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -2538,6 +2538,17 @@ struct rtw89_fw_h2c_rf_reg_info { #define H2C_CL_OUTSRC_RF_REG_A 0x8 #define H2C_CL_OUTSRC_RF_REG_B 0x9 +#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa +#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2 + +struct rtw89_fw_h2c_rf_get_mccch { + __le32 ch_0; + __le32 ch_1; + __le32 band_0; + __le32 band_1; + __le32 current_channel; + __le32 current_band_type; +} __packed; #define RTW89_FW_RSVD_PLE_SIZE 0x800 @@ -2602,6 +2613,7 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, struct rtw89_fw_h2c_rf_reg_info *info, u16 len, u8 page); +int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev); int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, bool rack, bool dack); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 1302d4324473..3ee57df0a639 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1766,6 +1766,18 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter, } } +static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + + memset(mcc_info, 0, sizeof(*mcc_info)); +} + +static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) +{ + rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); +} + static void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx) @@ -1955,6 +1967,8 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .set_channel_help = rtw8852c_set_channel_help, .read_efuse = rtw8852c_read_efuse, .read_phycap = rtw8852c_read_phycap, + .rfk_init = rtw8852c_rfk_init, + .rfk_channel = rtw8852c_rfk_channel, .power_trim = rtw8852c_power_trim, .read_rf = rtw89_phy_read_rf_v1, .write_rf = rtw89_phy_write_rf_v1, -- cgit v1.2.3 From cd89a47105dc697ee8a1e36f09147c2f0a03c830 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:08:59 +0800 Subject: rtw89: 8852c: configure default BB TX/RX path 8852c propose new API to configure BB TX/RX path. Without fix patch, it can't transmit any packet. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-11-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 9 ++ drivers/net/wireless/realtek/rtw89/phy.c | 1 + drivers/net/wireless/realtek/rtw89/reg.h | 76 +++++++++- drivers/net/wireless/realtek/rtw89/rtw8852a.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 194 ++++++++++++++++++++++++++ 5 files changed, 279 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 9be74d8673cf..d544cf29e588 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2104,6 +2104,7 @@ struct rtw89_chip_ops { struct rtw89_rx_phy_ppdu *phy_ppdu, struct ieee80211_rx_status *status); void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en); + void (*cfg_txrx_path)(struct rtw89_dev *rtwdev); void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx); int (*pwr_on_func)(struct rtw89_dev *rtwdev); @@ -3633,6 +3634,14 @@ static inline void rtw89_chip_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, chip->ops->bb_ctrl_btc_preagc(rtwdev, bt_en); } +static inline void rtw89_chip_cfg_txrx_path(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (chip->ops->cfg_txrx_path) + chip->ops->cfg_txrx_path(rtwdev); +} + static inline void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 7d0593d8fafe..33494e8451cf 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -3592,6 +3592,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) rtw89_load_txpwr_table(rtwdev, chip->byr_table); rtw89_chip_set_txpwr_ctrl(rtwdev); rtw89_chip_power_trim(rtwdev); + rtw89_chip_cfg_txrx_path(rtwdev); } void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 0f08b2581797..6dc11e8e2a83 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -2962,6 +2962,45 @@ #define R_AX_PWR_MACID_LMT_TABLE0 0xD36C #define R_AX_PWR_MACID_LMT_TABLE127 0xD568 +#define R_AX_PATH_COM0 0xD800 +#define AX_PATH_COM0_DFVAL 0x00000000 +#define AX_PATH_COM0_PATHA 0x08888880 +#define AX_PATH_COM0_PATHB 0x11111100 +#define AX_PATH_COM0_PATHAB 0x19999980 +#define R_AX_PATH_COM1 0xD804 +#define AX_PATH_COM1_DFVAL 0x00000000 +#define AX_PATH_COM1_PATHA 0x11111111 +#define AX_PATH_COM1_PATHB 0x22222222 +#define AX_PATH_COM1_PATHAB 0x33333333 +#define R_AX_PATH_COM2 0xD808 +#define AX_PATH_COM2_DFVAL 0x00000000 +#define AX_PATH_COM2_PATHA 0x01209111 +#define AX_PATH_COM2_PATHB 0x01209222 +#define AX_PATH_COM2_PATHAB 0x01209333 +#define R_AX_PATH_COM3 0xD80C +#define AX_PATH_COM3_DFVAL 0x49249249 +#define R_AX_PATH_COM4 0xD810 +#define AX_PATH_COM4_DFVAL 0x1C9C9C49 +#define R_AX_PATH_COM5 0xD814 +#define AX_PATH_COM5_DFVAL 0x39393939 +#define R_AX_PATH_COM6 0xD818 +#define AX_PATH_COM6_DFVAL 0x39393939 +#define R_AX_PATH_COM7 0xD81C +#define AX_PATH_COM7_DFVAL 0x39393939 +#define AX_PATH_COM7_PATHA 0x39393939 +#define AX_PATH_COM7_PATHB 0x39383939 +#define AX_PATH_COM7_PATHAB 0x39393939 +#define R_AX_PATH_COM8 0xD820 +#define AX_PATH_COM8_DFVAL 0x00000000 +#define AX_PATH_COM8_PATHA 0x00003939 +#define AX_PATH_COM8_PATHB 0x00003938 +#define AX_PATH_COM8_PATHAB 0x00003939 +#define R_AX_PATH_COM9 0xD824 +#define AX_PATH_COM9_DFVAL 0x000007C0 +#define R_AX_PATH_COM10 0xD828 +#define AX_PATH_COM10_DFVAL 0xE0000000 +#define R_AX_PATH_COM11 0xD82C +#define AX_PATH_COM11_DFVAL 0x00000000 #define R_P80_AT_HIGH_FREQ_BB_WRP 0xD848 #define B_P80_AT_HIGH_FREQ_BB_WRP BIT(28) #define R_AX_TSSI_CTRL_HEAD 0xD908 @@ -3097,6 +3136,9 @@ #define R_AX_LTE_WDATA 0xDAF4 #define R_AX_LTE_RDATA 0xDAF8 +#define R_AX_MACID_ANT_TABLE 0xDC00 +#define R_AX_MACID_ANT_TABLE_LAST 0xDDFC + #define CMAC1_START_ADDR 0xE000 #define CMAC1_END_ADDR 0xFFFF #define R_AX_CMAC_REG_END 0xFFFF @@ -3360,9 +3402,10 @@ #define R_PMAC_RXMOD 0x0994 #define B_PMAC_RXMOD_MSK GENMASK(7, 4) #define R_MAC_SEL 0x09A4 -#define B_MAC_SEL_MOD GENMASK(4, 2) -#define B_MAC_SEL_DPD_EN BIT(10) +#define B_MAC_SEL_OFDM_TRI_FILTER BIT(31) #define B_MAC_SEL_PWR_EN BIT(16) +#define B_MAC_SEL_DPD_EN BIT(10) +#define B_MAC_SEL_MOD GENMASK(4, 2) #define R_PMAC_TX_CTRL 0x09C0 #define B_PMAC_TXEN_DIS BIT(0) #define R_PMAC_TX_PRD 0x09C4 @@ -3413,8 +3456,16 @@ #define B_SNDCCA_A1_EN GENMASK(19, 12) #define R_SNDCCA_A2 0x0CA0 #define B_SNDCCA_A2_VAL GENMASK(19, 12) +#define R_RXHT_MCS_LIMIT 0x0D18 +#define B_RXHT_MCS_LIMIT GENMASK(9, 8) +#define R_RXVHT_MCS_LIMIT 0x0D18 +#define B_RXVHT_MCS_LIMIT GENMASK(22, 21) #define R_P0_EN_SOUND_WO_NDP 0x0D7C #define B_P0_EN_SOUND_WO_NDP BIT(1) +#define R_RXHE 0x0D80 +#define B_RXHETB_MAX_NSS GENMASK(25, 23) +#define B_RXHE_MAX_NSS GENMASK(16, 14) +#define B_RXHE_USER_MAX GENMASK(13, 6) #define R_SPOOF_ASYNC_RST 0x0D84 #define B_SPOOF_ASYNC_RST BIT(15) #define R_NDP_BRK0 0xDA0 @@ -3634,6 +3685,8 @@ #define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) #define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4 #define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) +#define R_PATH0_G_LNA6_OP1DB_V1 0x4688 +#define B_PATH0_G_LNA6_OP1DB_V1 GENMASK(31, 24) #define R_PATH0_G_TIA0_LNA6_OP1DB_V1 0x4694 #define B_PATH0_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0) #define R_PATH0_G_TIA1_LNA6_OP1DB_V1 0x4694 @@ -3650,6 +3703,9 @@ #define R_P0_NBIIDX 0x469C #define B_P0_NBIIDX_VAL GENMASK(11, 0) #define B_P0_NBIIDX_NOTCH_EN BIT(12) +#define R_P0_BACKOFF_IBADC_V1 0x469C +#define B_P0_BACKOFF_IBADC_V1 GENMASK(31, 26) +#define B_P0_NBIIDX_NOTCH_EN_V1 BIT(12) #define R_P1_MODE 0x4718 #define B_P1_MODE_SEL GENMASK(31, 30) #define R_PATH1_LNA_INIT 0x473C @@ -3668,6 +3724,8 @@ #define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5) #define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778 #define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0) +#define R_PATH1_G_TIA1_LNA6_OP1DB_V1 0x4778 +#define B_PATH1_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8) #define R_PATH1_BAND_SEL_V1 0x4AA4 #define B_PATH1_BAND_SEL_MSK_V1 BIT(17) #define R_PATH1_BT_SHARE_V1 0x4AA4 @@ -3693,15 +3751,29 @@ #define B_CHBW_MOD_SBW GENMASK(13, 12) #define B_CHBW_MOD_PRICH GENMASK(11, 8) #define B_ANT_RX_SEG0 GENMASK(3, 0) +#define R_P1_BACKOFF_IBADC_V1 0x49F0 +#define B_P1_BACKOFF_IBADC_V1 GENMASK(31, 26) #define R_BK_FC0_INV_V1 0x4A1C #define B_BK_FC0_INV_MSK_V1 GENMASK(18, 0) #define R_CCK_FC0_INV_V1 0x4A20 #define B_CCK_FC0_INV_MSK_V1 GENMASK(18, 0) +#define R_PATH0_RXBB_V1 0x4AD4 +#define B_PATH0_RXBB_MSK_V1 GENMASK(31, 0) +#define R_PATH1_RXBB_V1 0x4AE0 +#define B_PATH1_RXBB_MSK_V1 GENMASK(31, 0) +#define R_PATH0_BT_BACKOFF_V1 0x4AE4 +#define B_PATH0_BT_BACKOFF_V1 GENMASK(23, 0) +#define R_PATH1_BT_BACKOFF_V1 0x4AEC +#define B_PATH1_BT_BACKOFF_V1 GENMASK(23, 0) +#define R_PATH0_FRC_FIR_TYPE_V1 0x4C00 +#define B_PATH0_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0) #define R_PATH0_5MDET 0x4C4C #define B_PATH0_5MDET_EN BIT(12) #define B_PATH0_5MDET_SB2 BIT(8) #define B_PATH0_5MDET_SB0 BIT(6) #define B_PATH0_5MDET_TH GENMASK(5, 0) +#define R_PATH1_FRC_FIR_TYPE_V1 0x4CC4 +#define B_PATH1_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0) #define R_PATH1_5MDET 0x4D10 #define B_PATH1_5MDET_EN BIT(12) #define B_PATH1_5MDET_SB2 BIT(8) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 5af618709ded..81bd0c4fe21b 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2066,6 +2066,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { .ctrl_btg = rtw8852a_ctrl_btg, .query_ppdu = rtw8852a_query_ppdu, .bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc, + .cfg_txrx_path = NULL, .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset, .pwr_on_func = NULL, .pwr_off_func = NULL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 3ee57df0a639..290c453d8c23 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1813,6 +1813,199 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, } } +static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path) +{ + struct rtw89_hal *hal = &rtwdev->hal; + u32 rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; + u32 rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; + + if (rtwdev->dbcc_en) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, B_ANT_RX_SEG0, 1); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_ANT_RX_SEG0, 2, + RTW89_PHY_1); + + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG0, + 1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG1, + 1); + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG0, 2, + RTW89_PHY_1); + rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_ANT_RX_1RCCA_SEG1, 2, + RTW89_PHY_1); + + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, + B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, + B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 8); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); + + rtw89_phy_write32_idx(rtwdev, R_RXHT_MCS_LIMIT, + B_RXHT_MCS_LIMIT, 0, RTW89_PHY_1); + rtw89_phy_write32_idx(rtwdev, R_RXVHT_MCS_LIMIT, + B_RXVHT_MCS_LIMIT, 0, RTW89_PHY_1); + rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHE_USER_MAX, 1, + RTW89_PHY_1); + rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0, + RTW89_PHY_1); + rtw89_phy_write32_idx(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0, + RTW89_PHY_1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3); + } else { + if (rx_path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, + B_ANT_RX_SEG0, 1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, + B_ANT_RX_1RCCA_SEG0, 1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, + B_ANT_RX_1RCCA_SEG1, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, + B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, + B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, + 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, + 0); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, + rst_mask0, 1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, + rst_mask0, 3); + } else if (rx_path == RF_PATH_B) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, + B_ANT_RX_SEG0, 2); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, + B_ANT_RX_1RCCA_SEG0, 2); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, + B_ANT_RX_1RCCA_SEG1, 2); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, + B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, + B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, + 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, + 0); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, + rst_mask1, 1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, + rst_mask1, 3); + } else { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD, + B_ANT_RX_SEG0, 3); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, + B_ANT_RX_1RCCA_SEG0, 3); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW, + B_ANT_RX_1RCCA_SEG1, 3); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, + B_RXHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, + B_RXVHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, + 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, + 1); + rtw8852c_ctrl_btg(rtwdev, hal->current_band_type == RTW89_BAND_2G); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, + rst_mask0, 1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, + rst_mask0, 3); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, + rst_mask1, 1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, + rst_mask1, 3); + } + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 8); + } +} + +static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, + enum rtw89_mac_idx mac_idx) +{ + struct rtw89_reg2_def path_com[] = { + {R_AX_PATH_COM0, AX_PATH_COM0_DFVAL}, + {R_AX_PATH_COM1, AX_PATH_COM1_DFVAL}, + {R_AX_PATH_COM2, AX_PATH_COM2_DFVAL}, + {R_AX_PATH_COM3, AX_PATH_COM3_DFVAL}, + {R_AX_PATH_COM4, AX_PATH_COM4_DFVAL}, + {R_AX_PATH_COM5, AX_PATH_COM5_DFVAL}, + {R_AX_PATH_COM6, AX_PATH_COM6_DFVAL}, + {R_AX_PATH_COM7, AX_PATH_COM7_DFVAL}, + {R_AX_PATH_COM8, AX_PATH_COM8_DFVAL}, + {R_AX_PATH_COM9, AX_PATH_COM9_DFVAL}, + {R_AX_PATH_COM10, AX_PATH_COM10_DFVAL}, + {R_AX_PATH_COM11, AX_PATH_COM11_DFVAL}, + }; + u32 addr; + u32 reg; + u8 cr_size = ARRAY_SIZE(path_com); + u8 i = 0; + + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, RTW89_PHY_0); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, RTW89_PHY_1); + + for (addr = R_AX_MACID_ANT_TABLE; + addr <= R_AX_MACID_ANT_TABLE_LAST; addr += 4) { + reg = rtw89_mac_reg_by_idx(addr, mac_idx); + rtw89_write32(rtwdev, reg, 0); + } + + if (tx_path == RF_PATH_A) { + path_com[0].data = AX_PATH_COM0_PATHA; + path_com[1].data = AX_PATH_COM1_PATHA; + path_com[2].data = AX_PATH_COM2_PATHA; + path_com[7].data = AX_PATH_COM7_PATHA; + path_com[8].data = AX_PATH_COM8_PATHA; + } else if (tx_path == RF_PATH_B) { + path_com[0].data = AX_PATH_COM0_PATHB; + path_com[1].data = AX_PATH_COM1_PATHB; + path_com[2].data = AX_PATH_COM2_PATHB; + path_com[7].data = AX_PATH_COM7_PATHB; + path_com[8].data = AX_PATH_COM8_PATHB; + } else if (tx_path == RF_PATH_AB) { + path_com[0].data = AX_PATH_COM0_PATHAB; + path_com[1].data = AX_PATH_COM1_PATHAB; + path_com[2].data = AX_PATH_COM2_PATHAB; + path_com[7].data = AX_PATH_COM7_PATHAB; + path_com[8].data = AX_PATH_COM8_PATHAB; + } else { + rtw89_warn(rtwdev, "[Invalid Tx Path]Tx Path: %d\n", tx_path); + return; + } + + for (i = 0; i < cr_size; i++) { + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "0x%x = 0x%x\n", + path_com[i].addr, path_com[i].data); + reg = rtw89_mac_reg_by_idx(path_com[i].addr, mac_idx); + rtw89_write32(rtwdev, reg, path_com[i].data); + } +} + +static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + + rtw8852c_bb_cfg_rx_path(rtwdev, RF_PATH_AB); + + if (hal->rx_nss == 1) { + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); + } else { + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); + } + + rtw8852c_ctrl_tx_path_tmac(rtwdev, RF_PATH_AB, RTW89_MAC_0); +} + static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) { if (btg) { @@ -1973,6 +2166,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .read_rf = rtw89_phy_read_rf_v1, .write_rf = rtw89_phy_write_rf_v1, .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, + .cfg_txrx_path = rtw8852c_bb_cfg_txrx_path, .pwr_on_func = rtw8852c_pwr_on_func, .pwr_off_func = rtw8852c_pwr_off_func, .fill_txdesc = rtw89_core_fill_txdesc_v1, -- cgit v1.2.3 From af0cac159b1c96633436b71a4a790d77a1d75858 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:09:00 +0800 Subject: rtw89: 8852c: implement chip_ops related to TX power Three chip_ops are implemented in this patch. The ::set_txpwr_ctrl and ::init_txpwr_unit are called when we up interface and then configure TX power registers to initial values. The ::set_txpwr_ctrl is to configure 'txpwr_ref' to make basic output TX power of OFDM and CCK rate to be the same. The ::init_txpwr_unit is to initialize TSSI (a method to do TX power compensation depends on thermal value) control and bandedge. The ::set_txpwr is called once switching channel. First, it sets TX power for each rate section (e.g. CCK, OFDM), and then sets TX power offset between 1SS and 2SS rate. Finally, it sets TX power limit to prevent power over regulation. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-12-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 324 ++++++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c.h | 1 + 2 files changed, 325 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 290c453d8c23..adcc2b597419 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1778,6 +1778,32 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); } +static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, s16 ref) +{ + s8 ofst_int = 0; + u8 base_cw_0db = 0x27; + u16 tssi_16dbm_cw = 0x12c; + s16 pwr_s10_3 = 0; + s16 rf_pwr_cw = 0; + u16 bb_pwr_cw = 0; + u32 pwr_cw = 0; + u32 tssi_ofst_cw = 0; + + pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3); + bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3); + rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3); + rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63); + pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw; + + tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)); + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n", + tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw); + + return (tssi_ofst_cw << 18) | (pwr_cw << 9) | (ref & GENMASK(8, 0)); +} + static void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, s8 pw_ofst, enum rtw89_mac_idx mac_idx) @@ -1813,6 +1839,301 @@ void rtw8852c_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, } } +static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + static const u32 addr[RF_PATH_NUM_8852C] = {0x5800, 0x7800}; + const u32 mask = 0x7FFFFFF; + const u8 ofst_ofdm = 0x4; + const u8 ofst_cck = 0x8; + s16 ref_ofdm = 0; + s16 ref_cck = 0; + u32 val; + u8 i; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n"); + + rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL, + GENMASK(27, 10), 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); + val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm); + + for (i = 0; i < RF_PATH_NUM_8852C; i++) + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, + phy_idx); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n"); + val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck); + + for (i = 0; i < RF_PATH_NUM_8852C; i++) + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, + phy_idx); +} + +static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + u8 ch = rtwdev->hal.current_channel; + static const u8 rs[] = { + RTW89_RS_CCK, + RTW89_RS_OFDM, + RTW89_RS_MCS, + RTW89_RS_HEDCM, + }; + s8 tmp; + u8 i, j; + u32 val, shf, addr = R_AX_PWR_BY_RATE; + struct rtw89_rate_desc cur; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr byrate with ch=%d\n", ch); + + for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) { + for (i = 0; i < ARRAY_SIZE(rs); i++) { + if (cur.nss >= rtw89_rs_nss_max[rs[i]]) + continue; + + val = 0; + cur.rs = rs[i]; + + for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) { + cur.idx = j; + shf = (j % 4) * 8; + tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur); + val |= (tmp << shf); + + if ((j + 1) % 4) + continue; + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + val = 0; + addr += 4; + } + } + } +} + +static void rtw8852c_set_txpwr_offset(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + struct rtw89_rate_desc desc = { + .nss = RTW89_NSS_1, + .rs = RTW89_RS_OFFSET, + }; + u32 val = 0; + s8 v; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n"); + + for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) { + v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc); + val |= ((v & 0xf) << (4 * desc.idx)); + } + + rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL, + GENMASK(19, 0), val); +} + +static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev, + u8 tx_shape_idx, + enum rtw89_phy_idx phy_idx) +{ +#define __DFIR_CFG_MASK 0xffffff +#define __DFIR_CFG_NR 8 +#define __DECL_DFIR_VAR(_prefix, _name, _val...) \ + static const u32 _prefix ## _ ## _name[] = {_val}; \ + static_assert(ARRAY_SIZE(_prefix ## _ ## _name) == __DFIR_CFG_NR) +#define __DECL_DFIR_PARAM(_name, _val...) __DECL_DFIR_VAR(param, _name, _val) +#define __DECL_DFIR_ADDR(_name, _val...) __DECL_DFIR_VAR(addr, _name, _val) + + __DECL_DFIR_PARAM(flat, + 0x003D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053, + 0x00F86F9A, 0x00FAEF92, 0x00FE5FCC, 0x00FFDFF5); + __DECL_DFIR_PARAM(sharp, + 0x003D83FF, 0x002C636A, 0x0013F204, 0x00008090, + 0x00F87FB0, 0x00F99F83, 0x00FDBFBA, 0x00003FF5); + __DECL_DFIR_PARAM(sharp_14, + 0x003B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E, + 0x00FD8F92, 0x0002D011, 0x0001C02C, 0x00FFF00A); + __DECL_DFIR_ADDR(filter, + 0x45BC, 0x45CC, 0x45D0, 0x45D4, 0x45D8, 0x45C0, + 0x45C4, 0x45C8); + u8 ch = rtwdev->hal.current_channel; + const u32 *param; + int i; + + if (ch > 14) { + rtw89_warn(rtwdev, + "set tx shape dfir by unknown ch: %d on 2G\n", ch); + return; + } + + if (ch == 14) + param = param_sharp_14; + else + param = tx_shape_idx == 0 ? param_flat : param_sharp; + + for (i = 0; i < __DFIR_CFG_NR; i++) { + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "set tx shape dfir: 0x%x: 0x%x\n", addr_filter[i], + param[i]); + rtw89_phy_write32_idx(rtwdev, addr_filter[i], __DFIR_CFG_MASK, + param[i], phy_idx); + } + +#undef __DECL_DFIR_ADDR +#undef __DECL_DFIR_PARAM +#undef __DECL_DFIR_VAR +#undef __DFIR_CFG_NR +#undef __DFIR_CFG_MASK +} + +static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + u8 band = rtwdev->hal.current_band_type; + u8 regd = rtw89_regd_get(rtwdev, band); + u8 tx_shape_cck = rtw89_8852c_tx_shape[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd]; + + if (band == RTW89_BAND_2G) + rtw8852c_bb_set_tx_shape_dfir(rtwdev, tx_shape_cck, phy_idx); + + rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev, + (enum rtw89_mac_idx)phy_idx, + tx_shape_ofdm); +} + +static void rtw8852c_set_txpwr_limit(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ +#define __MAC_TXPWR_LMT_PAGE_SIZE 40 + u8 ch = rtwdev->hal.current_channel; + u8 bw = rtwdev->hal.current_band_width; + struct rtw89_txpwr_limit lmt[NTX_NUM_8852C]; + u32 addr, val; + const s8 *ptr; + u8 i, j, k; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw); + + for (i = 0; i < NTX_NUM_8852C; i++) { + rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i); + + for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) { + addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i; + ptr = (s8 *)&lmt[i] + j; + val = 0; + + for (k = 0; k < 4; k++) + val |= (ptr[k] << (8 * k)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + } + } +#undef __MAC_TXPWR_LMT_PAGE_SIZE +} + +static void rtw8852c_set_txpwr_limit_ru(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ +#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24 + u8 ch = rtwdev->hal.current_channel; + u8 bw = rtwdev->hal.current_band_width; + struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852C]; + u32 addr, val; + const s8 *ptr; + u8 i, j, k; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw); + + for (i = 0; i < NTX_NUM_8852C; i++) { + rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i); + + for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) { + addr = R_AX_PWR_RU_LMT + j + + __MAC_TXPWR_LMT_RU_PAGE_SIZE * i; + ptr = (s8 *)&lmt_ru[i] + j; + val = 0; + + for (k = 0; k < 4; k++) + val |= (ptr[k] << (8 * k)); + + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); + } + } + +#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE +} + +static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev) +{ + rtw8852c_set_txpwr_byrate(rtwdev, RTW89_PHY_0); + rtw8852c_set_txpwr_offset(rtwdev, RTW89_PHY_0); + rtw8852c_set_tx_shape(rtwdev, RTW89_PHY_0); + rtw8852c_set_txpwr_limit(rtwdev, RTW89_PHY_0); + rtw8852c_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0); +} + +static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev) +{ + rtw8852c_set_txpwr_ref(rtwdev, RTW89_PHY_0); +} + +static void +rtw8852c_init_tssi_ctrl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + static const struct rtw89_reg2_def ctrl_ini[] = { + {0xD938, 0x00010100}, + {0xD93C, 0x0500D500}, + {0xD940, 0x00000500}, + {0xD944, 0x00000005}, + {0xD94C, 0x00220000}, + {0xD950, 0x00030000}, + }; + u32 addr; + int i; + + for (addr = R_AX_TSSI_CTRL_HEAD; addr <= R_AX_TSSI_CTRL_TAIL; addr += 4) + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0); + + for (i = 0; i < ARRAY_SIZE(ctrl_ini); i++) + rtw89_mac_txpwr_write32(rtwdev, phy_idx, ctrl_ini[i].addr, + ctrl_ini[i].data); + + rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev, + (enum rtw89_mac_idx)phy_idx, + RTW89_TSSI_BANDEDGE_FLAT); +} + +static int +rtw8852c_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + int ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333); + if (ret) + return ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000); + if (ret) + return ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff); + if (ret) + return ret; + + rtw8852c_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ? + RTW89_MAC_1 : + RTW89_MAC_0); + rtw8852c_init_tssi_ctrl(rtwdev, phy_idx); + + return 0; +} + static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path) { struct rtw89_hal *hal = &rtwdev->hal; @@ -2163,6 +2484,9 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .rfk_init = rtw8852c_rfk_init, .rfk_channel = rtw8852c_rfk_channel, .power_trim = rtw8852c_power_trim, + .set_txpwr = rtw8852c_set_txpwr, + .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl, + .init_txpwr_unit = rtw8852c_init_txpwr_unit, .read_rf = rtw89_phy_read_rf_v1, .write_rf = rtw89_phy_write_rf_v1, .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.h b/drivers/net/wireless/realtek/rtw89/rtw8852c.h index ac642808a81f..558dd0f048f2 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.h @@ -9,6 +9,7 @@ #define RF_PATH_NUM_8852C 2 #define BB_PATH_NUM_8852C 2 +#define NTX_NUM_8852C 2 struct rtw8852c_u_efuse { u8 rsvd[0x38]; -- cgit v1.2.3 From 3ecca403d9bf714be570ca2a31569485618c9da0 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:09:01 +0800 Subject: rtw89: 8852c: implement chip_ops::get_thermal Read thermal value, and then we can use EWMA thermal value to do RF calibrations if the value is changed over a threshold. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-13-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index adcc2b597419..daf467eaef66 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2327,6 +2327,17 @@ static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) rtw8852c_ctrl_tx_path_tmac(rtwdev, RF_PATH_AB, RTW89_MAC_0); } +static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) +{ + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0); + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); + + fsleep(200); + + return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL); +} + static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) { if (btg) { @@ -2487,6 +2498,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .set_txpwr = rtw8852c_set_txpwr, .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl, .init_txpwr_unit = rtw8852c_init_txpwr_unit, + .get_thermal = rtw8852c_get_thermal, .read_rf = rtw89_phy_read_rf_v1, .write_rf = rtw89_phy_write_rf_v1, .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, -- cgit v1.2.3 From f4ae7ccc2bbfa597db09ed5000a1efa4feb0d962 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:09:02 +0800 Subject: rtw89: 8852c: fill freq and band of RX status by PPDU report Hardware reports PPDU status containing encoded channel index to driver, so we decode it and then fill freq and band. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-14-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 33 +++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index daf467eaef66..f66e4e091e43 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2440,6 +2440,38 @@ static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev) btc->cx.wl.status.map.init_ok = true; } +static void rtw8852c_fill_freq_with_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + u8 chan_idx = phy_ppdu->chan_idx; + enum nl80211_band band; + u8 ch; + + if (chan_idx == 0) + return; + + rtw8852c_decode_chan_idx(rtwdev, chan_idx, &ch, &band); + status->freq = ieee80211_channel_to_frequency(ch, band); + status->band = band; +} + +static void rtw8852c_query_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + u8 path; + s8 *rx_power = phy_ppdu->rssi; + + status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]); + for (path = 0; path < rtwdev->chip->rf_path_num; path++) { + status->chains |= BIT(path); + status->chain_signal[path] = rx_power[path]; + } + if (phy_ppdu->valid) + rtw8852c_fill_freq_with_ppdu(rtwdev, phy_ppdu, status); +} + static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev) { int ret; @@ -2499,6 +2531,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl, .init_txpwr_unit = rtw8852c_init_txpwr_unit, .get_thermal = rtw8852c_get_thermal, + .query_ppdu = rtw8852c_query_ppdu, .read_rf = rtw89_phy_read_rf_v1, .write_rf = rtw89_phy_write_rf_v1, .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, -- cgit v1.2.3 From 2fb822f82a59db899ba7b3a615cb0ddbc8c04f0f Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Thu, 21 Apr 2022 20:09:03 +0800 Subject: rtw89: 8852c: add chip_ops related to BTC Add some chip_ops to support BT coexistence to work properly. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421120903.73715-15-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/reg.h | 7 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 187 ++++++++++++++++++++++++++ 2 files changed, 194 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 6dc11e8e2a83..83cb509d2fbe 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3048,11 +3048,18 @@ #define B_AX_BT_CNT_RST_V1 BIT(1) #define B_AX_BT_CNT_EN BIT(0) +#define R_BTC_BT_CNT_HIGH 0xDA14 +#define R_BTC_BT_CNT_LOW 0xDA18 + #define R_AX_BTC_FUNC_EN 0xDA20 #define R_AX_BTC_FUNC_EN_C1 0xFA20 #define B_AX_PTA_WL_TX_EN BIT(1) #define B_AX_PTA_EDCCA_EN BIT(0) +#define R_BTC_COEX_WL_REQ 0xDA24 +#define B_BTC_TX_BCN_HI BIT(22) +#define B_BTC_RSP_ACK_HI BIT(10) + #define R_BTC_BREAK_TABLE 0xDA2C #define BTC_BREAK_PARAM 0xf0ffffff diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index f66e4e091e43..858611c64e6b 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2338,6 +2338,33 @@ static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL); } +static void rtw8852c_btc_set_rfe(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_module *module = &btc->mdinfo; + + module->rfe_type = rtwdev->efuse.rfe_type; + module->cv = rtwdev->hal.cv; + module->bt_solo = 0; + module->switch_type = BTC_SWITCH_INTERNAL; + + if (module->rfe_type > 0) + module->ant.num = (module->rfe_type % 2 ? 2 : 3); + else + module->ant.num = 2; + + module->ant.diversity = 0; + module->ant.isolation = 10; + + if (module->ant.num == 3) { + module->ant.type = BTC_ANT_DEDICATED; + module->bt_pos = BTC_BT_ALONE; + } else { + module->ant.type = BTC_ANT_SHARED; + module->bt_pos = BTC_BT_BTG; + } +} + static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg) { if (btg) { @@ -2440,6 +2467,159 @@ static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev) btc->cx.wl.status.map.init_ok = true; } +static +void rtw8852c_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state) +{ + u32 bitmap = 0; + u32 reg = 0; + + switch (map) { + case BTC_PRI_MASK_TX_RESP: + reg = R_BTC_COEX_WL_REQ; + bitmap = B_BTC_RSP_ACK_HI; + break; + case BTC_PRI_MASK_BEACON: + reg = R_BTC_COEX_WL_REQ; + bitmap = B_BTC_TX_BCN_HI; + break; + default: + return; + } + + if (state) + rtw89_write32_set(rtwdev, reg, bitmap); + else + rtw89_write32_clr(rtwdev, reg, bitmap); +} + +union rtw8852c_btc_wl_txpwr_ctrl { + u32 txpwr_val; + struct { + union { + u16 ctrl_all_time; + struct { + s16 data:9; + u16 rsvd:6; + u16 flag:1; + } all_time; + }; + union { + u16 ctrl_gnt_bt; + struct { + s16 data:9; + u16 rsvd:7; + } gnt_bt; + }; + }; +} __packed; + +static void +rtw8852c_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val) +{ + union rtw8852c_btc_wl_txpwr_ctrl arg = { .txpwr_val = txpwr_val }; + s32 val; + +#define __write_ctrl(_reg, _msk, _val, _en, _cond) \ +do { \ + const typeof(_msk) __msk = _msk; \ + const typeof(_en) __en = _en; \ + u32 _wrt = FIELD_PREP(__msk, _val); \ + BUILD_BUG_ON((__msk & __en) != 0); \ + if (_cond) \ + _wrt |= __en; \ + else \ + _wrt &= ~__en; \ + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, _reg, \ + __msk | __en, _wrt); \ +} while (0) + + switch (arg.ctrl_all_time) { + case 0xffff: + val = 0; + break; + default: + val = arg.all_time.data; + break; + } + + __write_ctrl(R_AX_PWR_RATE_CTRL, B_AX_FORCE_PWR_BY_RATE_VALUE_MASK, + val, B_AX_FORCE_PWR_BY_RATE_EN, + arg.ctrl_all_time != 0xffff); + + switch (arg.ctrl_gnt_bt) { + case 0xffff: + val = 0; + break; + default: + val = arg.gnt_bt.data; + break; + }; + + __write_ctrl(R_AX_PWR_COEXT_CTRL, B_AX_TXAGC_BT_MASK, val, + B_AX_TXAGC_BT_EN, arg.ctrl_gnt_bt != 0xffff); + +#undef __write_ctrl +} + +static +s8 rtw8852c_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) +{ + return clamp_t(s8, val, -100, 0) + 100; +} + +static +void rtw8852c_btc_bt_aci_imp(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_bt_link_info *b = &bt->link_info; + + /* fix LNA2 = level-5 for BT ACI issue at BTG */ + if (btc->dm.wl_btg_rx && b->profile_cnt.now != 0) + dm->trx_para_level = 1; +} + +static +void rtw8852c_btc_update_bt_cnt(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_cx *cx = &btc->cx; + u32 val; + + val = rtw89_read32(rtwdev, R_BTC_BT_CNT_HIGH); + cx->cnt_bt[BTC_BCNT_HIPRI_TX] = FIELD_GET(B_AX_STATIS_BT_HI_TX_MASK, val); + cx->cnt_bt[BTC_BCNT_HIPRI_RX] = FIELD_GET(B_AX_STATIS_BT_HI_RX_MASK, val); + + val = rtw89_read32(rtwdev, R_BTC_BT_CNT_LOW); + cx->cnt_bt[BTC_BCNT_LOPRI_TX] = FIELD_GET(B_AX_STATIS_BT_LO_TX_1_MASK, val); + cx->cnt_bt[BTC_BCNT_LOPRI_RX] = FIELD_GET(B_AX_STATIS_BT_LO_RX_1_MASK, val); + + /* clock-gate off before reset counter*/ + rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G); + rtw89_write32_clr(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_RST); + rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_RST); + rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G); +} + +static +void rtw8852c_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) +{ + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x620); + + /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */ + if (state) + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, + RFREG_MASK, 0x179c); + else + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, + RFREG_MASK, 0x208); + + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); +} + static void rtw8852c_fill_freq_with_ppdu(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct ieee80211_rx_status *status) @@ -2546,7 +2726,14 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .resume_sch_tx = rtw89_mac_resume_sch_tx_v1, .h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v1, + .btc_set_rfe = rtw8852c_btc_set_rfe, .btc_init_cfg = rtw8852c_btc_init_cfg, + .btc_set_wl_pri = rtw8852c_btc_set_wl_pri, + .btc_set_wl_txpwr_ctrl = rtw8852c_btc_set_wl_txpwr_ctrl, + .btc_get_bt_rssi = rtw8852c_btc_get_bt_rssi, + .btc_bt_aci_imp = rtw8852c_btc_bt_aci_imp, + .btc_update_bt_cnt = rtw8852c_btc_update_bt_cnt, + .btc_wl_s1_standby = rtw8852c_btc_wl_s1_standby, }; const struct rtw89_chip_info rtw8852c_chip_info = { -- cgit v1.2.3 From 798e2df5067caeb38afe501decec02abdac8a0d7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Apr 2022 06:44:24 +0300 Subject: mlxsw: reg: Extend MDDQ by device_info Extend existing MDDQ register by possibility to query information about devices residing on a line card. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 62 ++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 23589d3b160a..521c1b195a3e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11643,7 +11643,11 @@ MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1); enum mlxsw_reg_mddq_query_type { MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1, - MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME = 3, + MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices + * on the slot, data_valid + * will be '0'. + */ + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME, }; /* reg_mddq_query_type @@ -11657,6 +11661,28 @@ MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8); */ MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4); +/* reg_mddq_response_msg_seq + * Response message sequential number. For a specific request, the response + * message sequential number is the following one. In addition, the last + * message should be 0. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8); + +/* reg_mddq_request_msg_seq + * Request message sequential number. + * The first message number should be 0. + * Access: Index + */ +MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8); + +/* reg_mddq_data_valid + * If set, the data in the data field is valid and contain the information + * for the queried index. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1); + /* reg_mddq_slot_info_provisioned * If set, the INI file is applied and the card is provisioned. * Access: RO @@ -11743,6 +11769,40 @@ mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index, *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload); } +/* reg_mddq_device_info_flash_owner + * If set, the device is the flash owner. Otherwise, a shared flash + * is used by this device (another device is the flash owner). + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1); + +/* reg_mddq_device_info_device_index + * Device index. The first device should number 0. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8); + +static inline void +mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index, + u8 request_msg_seq) +{ + __mlxsw_reg_mddq_pack(payload, slot_index, + MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO); + mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq); +} + +static inline void +mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq, + bool *p_data_valid, bool *p_flash_owner, + u8 *p_device_index) +{ + *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload); + *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload); + if (p_flash_owner) + *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload); + *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload); +} + #define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20 /* reg_mddq_slot_ascii_name -- cgit v1.2.3 From 8e2e10f651129d6e270a0b9a8bd3aaf0aa3f06d4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Apr 2022 06:44:25 +0300 Subject: mlxsw: core_linecards: Probe provisioned line cards for devices and attach them In case the line card is provisioned, go over all possible existing devices (gearboxes) on it and attach them, so devlink core is aware of them. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.h | 1 + .../net/ethernet/mellanox/mlxsw/core_linecards.c | 99 ++++++++++++++++++++++ 2 files changed, 100 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c2a891287047..d008282d7f2e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -581,6 +581,7 @@ struct mlxsw_linecard { active:1; u16 hw_revision; u16 ini_version; + struct list_head device_list; }; struct mlxsw_linecard_types_info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 5c9869dcf674..9dd8a56add4a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -87,11 +87,101 @@ static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard) return linecard->name; } +struct mlxsw_linecard_device { + struct list_head list; + u8 index; + struct mlxsw_linecard *linecard; + struct devlink_linecard_device *devlink_device; +}; + +static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + u8 device_index, bool flash_owner) +{ + struct mlxsw_linecard_device *device; + int err; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; + device->index = device_index; + device->linecard = linecard; + + device->devlink_device = devlink_linecard_device_create(linecard->devlink_linecard, + device_index, NULL); + if (IS_ERR(device->devlink_device)) { + err = PTR_ERR(device->devlink_device); + goto err_devlink_linecard_device_attach; + } + + list_add_tail(&device->list, &linecard->device_list); + return 0; + +err_devlink_linecard_device_attach: + kfree(device); + return err; +} + +static void mlxsw_linecard_device_detach(struct mlxsw_core *mlxsw_core, + struct mlxsw_linecard *linecard, + struct mlxsw_linecard_device *device) +{ + list_del(&device->list); + devlink_linecard_device_destroy(linecard->devlink_linecard, + device->devlink_device); + kfree(device); +} + +static void mlxsw_linecard_devices_detach(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + struct mlxsw_linecard_device *device, *tmp; + + list_for_each_entry_safe(device, tmp, &linecard->device_list, list) + mlxsw_linecard_device_detach(mlxsw_core, linecard, device); +} + +static int mlxsw_linecard_devices_attach(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + u8 msg_seq = 0; + int err; + + do { + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + bool flash_owner; + bool data_valid; + u8 device_index; + + mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index, + msg_seq); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); + if (err) + return err; + mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, + &data_valid, &flash_owner, + &device_index); + if (!data_valid) + break; + err = mlxsw_linecard_device_attach(mlxsw_core, linecard, + device_index, flash_owner); + if (err) + goto rollback; + } while (msg_seq); + + return 0; + +rollback: + mlxsw_linecard_devices_detach(linecard); + return err; +} + static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard) { linecard->provisioned = false; linecard->ready = false; linecard->active = false; + mlxsw_linecard_devices_detach(linecard); devlink_linecard_provision_fail(linecard->devlink_linecard); } @@ -232,6 +322,7 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, { struct mlxsw_linecards *linecards = linecard->linecards; const char *type; + int err; type = mlxsw_linecard_types_lookup(linecards, card_type); mlxsw_linecard_status_event_done(linecard, @@ -249,6 +340,11 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, return PTR_ERR(type); } } + err = mlxsw_linecard_devices_attach(linecard); + if (err) { + mlxsw_linecard_provision_fail(linecard); + return err; + } linecard->provisioned = true; linecard->hw_revision = hw_revision; linecard->ini_version = ini_version; @@ -261,6 +357,7 @@ static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard) mlxsw_linecard_status_event_done(linecard, MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION); linecard->provisioned = false; + mlxsw_linecard_devices_detach(linecard); devlink_linecard_provision_clear(linecard->devlink_linecard); } @@ -840,6 +937,7 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core, linecard->slot_index = slot_index; linecard->linecards = linecards; mutex_init(&linecard->lock); + INIT_LIST_HEAD(&linecard->device_list); devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core), slot_index, &mlxsw_linecard_ops, @@ -885,6 +983,7 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core, mlxsw_core_flush_owq(); if (linecard->active) mlxsw_linecard_active_clear(linecard); + mlxsw_linecard_devices_detach(linecard); devlink_linecard_destroy(linecard->devlink_linecard); mutex_destroy(&linecard->lock); } -- cgit v1.2.3 From 3b37130f48555f64be626e619e362e708c2cbd79 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Apr 2022 06:44:27 +0300 Subject: mlxsw: core_linecards: Expose HW revision and INI version Implement info_get() to expose HW revision of a linecard and loaded INI version. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- Documentation/networking/devlink/mlxsw.rst | 18 +++++++++++++ .../net/ethernet/mellanox/mlxsw/core_linecards.c | 31 ++++++++++++++++++++++ 2 files changed, 49 insertions(+) (limited to 'drivers') diff --git a/Documentation/networking/devlink/mlxsw.rst b/Documentation/networking/devlink/mlxsw.rst index cf857cb4ba8f..da1fbb265a11 100644 --- a/Documentation/networking/devlink/mlxsw.rst +++ b/Documentation/networking/devlink/mlxsw.rst @@ -58,6 +58,24 @@ The ``mlxsw`` driver reports the following versions - running - Three digit firmware version +Line card info versions +======================= + +The ``mlxsw`` driver reports the following versions for line cards + +.. list-table:: devlink line card info versions implemented + :widths: 5 5 90 + + * - Name + - Type + - Description + * - ``hw.revision`` + - fixed + - The hardware revision for this line card + * - ``ini.version`` + - running + - Version of line card INI loaded + Driver-specific Traps ===================== diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 9dd8a56add4a..b5f5b31bd31e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -834,12 +834,43 @@ static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard, *type_priv = ini_file; } +static int +mlxsw_linecard_info_get(struct devlink_linecard *devlink_linecard, void *priv, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct mlxsw_linecard *linecard = priv; + char buf[32]; + int err; + + mutex_lock(&linecard->lock); + if (!linecard->provisioned) { + err = 0; + goto unlock; + } + + sprintf(buf, "%d", linecard->hw_revision); + err = devlink_info_version_fixed_put(req, "hw.revision", buf); + if (err) + goto unlock; + + sprintf(buf, "%d", linecard->ini_version); + err = devlink_info_version_running_put(req, "ini.version", buf); + if (err) + goto unlock; + +unlock: + mutex_unlock(&linecard->lock); + return err; +} + static const struct devlink_linecard_ops mlxsw_linecard_ops = { .provision = mlxsw_linecard_provision, .unprovision = mlxsw_linecard_unprovision, .same_provision = mlxsw_linecard_same_provision, .types_count = mlxsw_linecard_types_count, .types_get = mlxsw_linecard_types_get, + .info_get = mlxsw_linecard_info_get, }; struct mlxsw_linecard_status_event { -- cgit v1.2.3 From c38e9bf338121b8c27e0458d817d534d005bc517 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Apr 2022 06:44:29 +0300 Subject: mlxsw: reg: Extend MDDQ device_info by FW version fields Add FW version fields to MDDQ device_info. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/core_linecards.c | 3 ++- drivers/net/ethernet/mellanox/mlxsw/reg.h | 27 +++++++++++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index b5f5b31bd31e..42fe93ac629d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -160,7 +160,8 @@ static int mlxsw_linecard_devices_attach(struct mlxsw_linecard *linecard) return err; mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, &data_valid, &flash_owner, - &device_index); + &device_index, NULL, + NULL, NULL); if (!data_valid) break; err = mlxsw_linecard_device_attach(mlxsw_core, linecard, diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 521c1b195a3e..04c4d7a4bd83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11782,6 +11782,24 @@ MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1); */ MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8); +/* reg_mddq_device_info_fw_major + * Major FW version number. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16); + +/* reg_mddq_device_info_fw_minor + * Minor FW version number. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16); + +/* reg_mddq_device_info_fw_sub_minor + * Sub-minor FW version number. + * Access: RO + */ +MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16); + static inline void mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index, u8 request_msg_seq) @@ -11794,13 +11812,20 @@ mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index, static inline void mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq, bool *p_data_valid, bool *p_flash_owner, - u8 *p_device_index) + u8 *p_device_index, u16 *p_fw_major, + u16 *p_fw_minor, u16 *p_fw_sub_minor) { *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload); *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload); if (p_flash_owner) *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload); *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload); + if (p_fw_major) + *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload); + if (p_fw_minor) + *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload); + if (p_fw_sub_minor) + *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload); } #define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20 -- cgit v1.2.3 From e932b4bdbd7cf51907d733376ddcd5b679c02923 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Apr 2022 06:44:30 +0300 Subject: mlxsw: core_linecards: Expose device FW version over device info Extend MDDQ to obtain FW version of line card device and implement device_info_get() op to fill up the info with that. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- Documentation/networking/devlink/mlxsw.rst | 15 +++ .../net/ethernet/mellanox/mlxsw/core_linecards.c | 108 ++++++++++++++++++++- 2 files changed, 119 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/Documentation/networking/devlink/mlxsw.rst b/Documentation/networking/devlink/mlxsw.rst index da1fbb265a11..0af345680510 100644 --- a/Documentation/networking/devlink/mlxsw.rst +++ b/Documentation/networking/devlink/mlxsw.rst @@ -76,6 +76,21 @@ The ``mlxsw`` driver reports the following versions for line cards - running - Version of line card INI loaded +Line card device info versions +============================== + +The ``mlxsw`` driver reports the following versions for line card devices + +.. list-table:: devlink line card device info versions implemented + :widths: 5 5 90 + + * - Name + - Type + - Description + * - ``fw.version`` + - running + - Three digit firmware version + Driver-specific Traps ===================== diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 42fe93ac629d..2abd31a62776 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -87,13 +87,31 @@ static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard) return linecard->name; } +struct mlxsw_linecard_device_info { + u16 fw_major; + u16 fw_minor; + u16 fw_sub_minor; +}; + struct mlxsw_linecard_device { struct list_head list; u8 index; struct mlxsw_linecard *linecard; struct devlink_linecard_device *devlink_device; + struct mlxsw_linecard_device_info info; }; +static struct mlxsw_linecard_device * +mlxsw_linecard_device_lookup(struct mlxsw_linecard *linecard, u8 index) +{ + struct mlxsw_linecard_device *device; + + list_for_each_entry(device, &linecard->device_list, list) + if (device->index == index) + return device; + return NULL; +} + static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core, struct mlxsw_linecard *linecard, u8 device_index, bool flash_owner) @@ -108,7 +126,7 @@ static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core, device->linecard = linecard; device->devlink_device = devlink_linecard_device_create(linecard->devlink_linecard, - device_index, NULL); + device_index, device); if (IS_ERR(device->devlink_device)) { err = PTR_ERR(device->devlink_device); goto err_devlink_linecard_device_attach; @@ -177,6 +195,77 @@ rollback: return err; } +static void mlxsw_linecard_device_update(struct mlxsw_linecard *linecard, + u8 device_index, + struct mlxsw_linecard_device_info *info) +{ + struct mlxsw_linecard_device *device; + + device = mlxsw_linecard_device_lookup(linecard, device_index); + if (!device) + return; + device->info = *info; +} + +static int mlxsw_linecard_devices_update(struct mlxsw_linecard *linecard) +{ + struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; + u8 msg_seq = 0; + + do { + struct mlxsw_linecard_device_info info; + char mddq_pl[MLXSW_REG_MDDQ_LEN]; + bool data_valid; + u8 device_index; + int err; + + mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index, + msg_seq); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); + if (err) + return err; + mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, + &data_valid, NULL, + &device_index, + &info.fw_major, + &info.fw_minor, + &info.fw_sub_minor); + if (!data_valid) + break; + mlxsw_linecard_device_update(linecard, device_index, &info); + } while (msg_seq); + + return 0; +} + +static int +mlxsw_linecard_device_info_get(struct devlink_linecard_device *devlink_linecard_device, + void *priv, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct mlxsw_linecard_device *device = priv; + struct mlxsw_linecard_device_info *info; + struct mlxsw_linecard *linecard; + char buf[32]; + + linecard = device->linecard; + mutex_lock(&linecard->lock); + if (!linecard->active) { + mutex_unlock(&linecard->lock); + return 0; + } + + info = &device->info; + + sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor, + info->fw_sub_minor); + mutex_unlock(&linecard->lock); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); +} + static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard) { linecard->provisioned = false; @@ -390,11 +479,18 @@ static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard) return 0; } -static void mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) +static int mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) { + int err; + + err = mlxsw_linecard_devices_update(linecard); + if (err) + return err; + mlxsw_linecard_active_ops_call(linecard); linecard->active = true; devlink_linecard_activate(linecard->devlink_linecard); + return 0; } static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard) @@ -443,8 +539,11 @@ static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards, goto out; } - if (active && linecard->active != active) - mlxsw_linecard_active_set(linecard); + if (active && linecard->active != active) { + err = mlxsw_linecard_active_set(linecard); + if (err) + goto out; + } if (!active && linecard->active != active) mlxsw_linecard_active_clear(linecard); @@ -872,6 +971,7 @@ static const struct devlink_linecard_ops mlxsw_linecard_ops = { .types_count = mlxsw_linecard_types_count, .types_get = mlxsw_linecard_types_get, .info_get = mlxsw_linecard_info_get, + .device_info_get = mlxsw_linecard_device_info_get, }; struct mlxsw_linecard_status_event { -- cgit v1.2.3 From 985e254c738ce735d744beeb4bcf77ba7d81cf3a Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Thu, 21 Apr 2022 18:34:41 +0800 Subject: net: mscc: ocelot: Remove useless code payload only memset but no use at all, so we drop them. Signed-off-by: Haowen Bai Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_vcap.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index c8701ac955a8..1e74bdb215ec 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -672,12 +672,10 @@ static void is1_entry_set(struct ocelot *ocelot, int ix, { const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1]; struct ocelot_vcap_key_vlan *tag = &filter->vlan; - struct ocelot_vcap_u64 payload; struct vcap_data data; int row = ix / 2; u32 type; - memset(&payload, 0, sizeof(payload)); memset(&data, 0, sizeof(data)); /* Read row */ @@ -813,11 +811,9 @@ static void es0_entry_set(struct ocelot *ocelot, int ix, { const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0]; struct ocelot_vcap_key_vlan *tag = &filter->vlan; - struct ocelot_vcap_u64 payload; struct vcap_data data; int row = ix; - memset(&payload, 0, sizeof(payload)); memset(&data, 0, sizeof(data)); /* Read row */ -- cgit v1.2.3 From 60d78e9fce8886b403a21fecb0dd13f9c0f12e1f Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 21 Apr 2022 21:51:48 +0800 Subject: ethernet: broadcom/sb1250-mac: remove BUG_ON in sbmac_probe() Replace the BUG_ON() with returning error code to handle the fault more gracefully. Signed-off-by: Yang Yingliang Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/sb1250-mac.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index a1a38456c9a3..5d5f10180158 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2534,7 +2534,12 @@ static int sbmac_probe(struct platform_device *pldev) int err; res = platform_get_resource(pldev, IORESOURCE_MEM, 0); - BUG_ON(!res); + if (!res) { + printk(KERN_ERR "%s: failed to get resource\n", + dev_name(&pldev->dev)); + err = -EINVAL; + goto out_out; + } sbm_base = ioremap(res->start, resource_size(res)); if (!sbm_base) { printk(KERN_ERR "%s: unable to map device registers\n", -- cgit v1.2.3 From a00e41bf2f47343b8d0a265cc7710f89e9233dcd Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 21 Apr 2022 18:49:02 +0300 Subject: net: ethernet: mtk_eth_soc: add check for allocation failure Check if the kzalloc() failed. Fixes: 804775dfc288 ("net: ethernet: mtk_eth_soc: add support for Wireless Ethernet Dispatch (WED)") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_wed.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 5530f7991d1d..8f0cd3196aac 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -827,6 +827,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, goto unlock; hw = kzalloc(sizeof(*hw), GFP_KERNEL); + if (!hw) + goto unlock; hw->node = np; hw->regs = regs; hw->eth = eth; -- cgit v1.2.3 From c5794097b269f15961ed78f7f27b50e51766dec9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 21 Apr 2022 13:53:33 -0500 Subject: net: ipa: compute proper aggregation limit The aggregation byte limit for an endpoint is currently computed based on the endpoint's receive buffer size. However, some bytes at the front of each receive buffer are reserved on the assumption that--as with SKBs--it might be useful to insert data (such as headers) before what lands in the buffer. The aggregation byte limit currently doesn't take into account that reserved space, and as a result, aggregation could require space past that which is available in the buffer. Fix this by reducing the size used to compute the aggregation byte limit by the NET_SKB_PAD offset reserved for each receive buffer. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 888e94278a84..e133eb2bebcf 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -130,9 +130,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, */ if (data->endpoint.config.aggregation) { limit += SZ_1K * aggr_byte_limit_max(ipa->version); - if (buffer_size > limit) { + if (buffer_size - NET_SKB_PAD > limit) { dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n", - data->endpoint_id, buffer_size, limit); + data->endpoint_id, + buffer_size - NET_SKB_PAD, limit); return false; } @@ -739,6 +740,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) if (endpoint->data->aggregation) { if (!endpoint->toward_ipa) { const struct ipa_endpoint_rx_data *rx_data; + u32 buffer_size; bool close_eof; u32 limit; @@ -746,7 +748,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); - limit = ipa_aggr_size_kb(rx_data->buffer_size); + buffer_size = rx_data->buffer_size; + limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD); val |= aggr_byte_limit_encoded(version, limit); limit = IPA_AGGR_TIME_LIMIT; -- cgit v1.2.3 From 68d57a07bfe5bb29b80cd8b8fa24c9d1ea104124 Mon Sep 17 00:00:00 2001 From: Srinivasan Raju Date: Thu, 24 Feb 2022 18:20:07 +0000 Subject: wireless: add plfxlc driver for pureLiFi X, XL, XC devices This is a driver for pureLiFi X, XL, XC devices which use light to transmit data, so they are not compatible with normal Wi-Fi devices. The driver uses separate NL80211_BAND_LC band to distinguish from Wi-Fi. The driver is based on 802.11 softMAC Architecture and uses native 802.11 for configuration and management. Station and Ad-Hoc modes are supported. The driver is compiled and tested in ARM, x86 architectures and compiled in powerpc architecture. This driver implementation has been based on the zd1211rw driver. Signed-off-by: Srinivasan Raju Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220224182042.132466-3-srini.raju@purelifi.com --- MAINTAINERS | 6 + drivers/net/wireless/Kconfig | 1 + drivers/net/wireless/Makefile | 1 + drivers/net/wireless/purelifi/Kconfig | 17 + drivers/net/wireless/purelifi/Makefile | 2 + drivers/net/wireless/purelifi/plfxlc/Kconfig | 14 + drivers/net/wireless/purelifi/plfxlc/Makefile | 3 + drivers/net/wireless/purelifi/plfxlc/chip.c | 99 +++ drivers/net/wireless/purelifi/plfxlc/chip.h | 70 ++ drivers/net/wireless/purelifi/plfxlc/firmware.c | 276 ++++++++ drivers/net/wireless/purelifi/plfxlc/intf.h | 52 ++ drivers/net/wireless/purelifi/plfxlc/mac.c | 754 ++++++++++++++++++++ drivers/net/wireless/purelifi/plfxlc/mac.h | 184 +++++ drivers/net/wireless/purelifi/plfxlc/usb.c | 892 ++++++++++++++++++++++++ drivers/net/wireless/purelifi/plfxlc/usb.h | 198 ++++++ 15 files changed, 2569 insertions(+) create mode 100644 drivers/net/wireless/purelifi/Kconfig create mode 100644 drivers/net/wireless/purelifi/Makefile create mode 100644 drivers/net/wireless/purelifi/plfxlc/Kconfig create mode 100644 drivers/net/wireless/purelifi/plfxlc/Makefile create mode 100644 drivers/net/wireless/purelifi/plfxlc/chip.c create mode 100644 drivers/net/wireless/purelifi/plfxlc/chip.h create mode 100644 drivers/net/wireless/purelifi/plfxlc/firmware.c create mode 100644 drivers/net/wireless/purelifi/plfxlc/intf.h create mode 100644 drivers/net/wireless/purelifi/plfxlc/mac.c create mode 100644 drivers/net/wireless/purelifi/plfxlc/mac.h create mode 100644 drivers/net/wireless/purelifi/plfxlc/usb.c create mode 100644 drivers/net/wireless/purelifi/plfxlc/usb.h (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index ba405f6ec31a..f9a37b926aee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15970,6 +15970,12 @@ T: git git://linuxtv.org/media_tree.git F: Documentation/admin-guide/media/pulse8-cec.rst F: drivers/media/cec/usb/pulse8/ +PURELIFI PLFXLC DRIVER +M: Srinivasan Raju +L: linux-wireless@vger.kernel.org +S: Supported +F: drivers/net/wireless/purelifi/plfxlc/ + PVRUSB2 VIDEO4LINUX DRIVER M: Mike Isely L: pvrusb2@isely.net (subscribers-only) diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index e78ff7af6517..cb1c15012dd0 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -28,6 +28,7 @@ source "drivers/net/wireless/intersil/Kconfig" source "drivers/net/wireless/marvell/Kconfig" source "drivers/net/wireless/mediatek/Kconfig" source "drivers/net/wireless/microchip/Kconfig" +source "drivers/net/wireless/purelifi/Kconfig" source "drivers/net/wireless/ralink/Kconfig" source "drivers/net/wireless/realtek/Kconfig" source "drivers/net/wireless/rsi/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 76885e5f0ea7..abf3e5c87ca7 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/ obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/ +obj-$(CONFIG_WLAN_VENDOR_PURELIFI) += purelifi/ obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/ obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/ diff --git a/drivers/net/wireless/purelifi/Kconfig b/drivers/net/wireless/purelifi/Kconfig new file mode 100644 index 000000000000..e39afec3dcae --- /dev/null +++ b/drivers/net/wireless/purelifi/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +config WLAN_VENDOR_PURELIFI + bool "pureLiFi devices" + default y + help + If you have a pureLiFi device, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_PURELIFI + +source "drivers/net/wireless/purelifi/plfxlc/Kconfig" + +endif # WLAN_VENDOR_PURELIFI diff --git a/drivers/net/wireless/purelifi/Makefile b/drivers/net/wireless/purelifi/Makefile new file mode 100644 index 000000000000..0bd6880b022c --- /dev/null +++ b/drivers/net/wireless/purelifi/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_PLFXLC) := plfxlc/ diff --git a/drivers/net/wireless/purelifi/plfxlc/Kconfig b/drivers/net/wireless/purelifi/plfxlc/Kconfig new file mode 100644 index 000000000000..4e0be27a5e0e --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config PLFXLC + tristate "pureLiFi X, XL, XC device support" + depends on CFG80211 && MAC80211 && USB + help + This option adds support for pureLiFi LiFi wireless USB + adapters. The pureLiFi X, XL, XC USB devices are based on + 802.11 OFDM PHY but uses light as the transmission medium. + The driver supports common 802.11 encryption/authentication + methods including Open, WPA, WPA2-Personal and + WPA2-Enterprise (802.1X). + + To compile this driver as a module, choose m here. The module will + be called plfxlc. diff --git a/drivers/net/wireless/purelifi/plfxlc/Makefile b/drivers/net/wireless/purelifi/plfxlc/Makefile new file mode 100644 index 000000000000..7ed954e871d6 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_PLFXLC) := plfxlc.o +plfxlc-objs += chip.o firmware.o usb.o mac.o diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.c b/drivers/net/wireless/purelifi/plfxlc/chip.c new file mode 100644 index 000000000000..a5ec10b66ed5 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/chip.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 pureLiFi + */ + +#include +#include + +#include "chip.h" +#include "mac.h" +#include "usb.h" + +void plfxlc_chip_init(struct plfxlc_chip *chip, + struct ieee80211_hw *hw, + struct usb_interface *intf) +{ + memset(chip, 0, sizeof(*chip)); + mutex_init(&chip->mutex); + plfxlc_usb_init(&chip->usb, hw, intf); +} + +void plfxlc_chip_release(struct plfxlc_chip *chip) +{ + plfxlc_usb_release(&chip->usb); + mutex_destroy(&chip->mutex); +} + +int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval, + u8 dtim_period, int type) +{ + if (!interval || + (chip->beacon_set && + le16_to_cpu(chip->beacon_interval) == interval)) + return 0; + + chip->beacon_interval = cpu_to_le16(interval); + chip->beacon_set = true; + return plfxlc_usb_wreq(chip->usb.ez_usb, + &chip->beacon_interval, + sizeof(chip->beacon_interval), + USB_REQ_BEACON_INTERVAL_WR); +} + +int plfxlc_chip_init_hw(struct plfxlc_chip *chip) +{ + unsigned char *addr = plfxlc_mac_get_perm_addr(plfxlc_chip_to_mac(chip)); + struct usb_device *udev = interface_to_usbdev(chip->usb.intf); + + pr_info("plfxlc chip %04x:%04x v%02x %pM %s\n", + le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct), + le16_to_cpu(udev->descriptor.bcdDevice), + addr, + plfxlc_speed(udev->speed)); + + return plfxlc_set_beacon_interval(chip, 100, 0, 0); +} + +int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value) +{ + int r; + __le16 radio_on = cpu_to_le16(value); + + r = plfxlc_usb_wreq(chip->usb.ez_usb, &radio_on, + sizeof(value), USB_REQ_POWER_WR); + if (r) + dev_err(plfxlc_chip_dev(chip), "POWER_WR failed (%d)\n", r); + return r; +} + +int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip) +{ + plfxlc_usb_enable_tx(&chip->usb); + return plfxlc_usb_enable_rx(&chip->usb); +} + +void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip) +{ + u8 value = 0; + + plfxlc_usb_wreq(chip->usb.ez_usb, + &value, sizeof(value), USB_REQ_RXTX_WR); + plfxlc_usb_disable_rx(&chip->usb); + plfxlc_usb_disable_tx(&chip->usb); +} + +int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate) +{ + int r; + + if (!chip) + return -EINVAL; + + r = plfxlc_usb_wreq(chip->usb.ez_usb, + &rate, sizeof(rate), USB_REQ_RATE_WR); + if (r) + dev_err(plfxlc_chip_dev(chip), "RATE_WR failed (%d)\n", r); + return r; +} diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.h b/drivers/net/wireless/purelifi/plfxlc/chip.h new file mode 100644 index 000000000000..dc04bbed07ac --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/chip.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 pureLiFi + */ + +#ifndef PLFXLC_CHIP_H +#define PLFXLC_CHIP_H + +#include + +#include "usb.h" + +enum unit_type { + STA = 0, + AP = 1, +}; + +enum { + PLFXLC_RADIO_OFF = 0, + PLFXLC_RADIO_ON = 1, +}; + +struct plfxlc_chip { + struct plfxlc_usb usb; + struct mutex mutex; /* lock to protect chip data */ + enum unit_type unit_type; + u16 link_led; + u8 beacon_set; + u16 beacon_interval; +}; + +struct plfxlc_mc_hash { + u32 low; + u32 high; +}; + +#define plfxlc_chip_dev(chip) (&(chip)->usb.intf->dev) + +void plfxlc_chip_init(struct plfxlc_chip *chip, + struct ieee80211_hw *hw, + struct usb_interface *intf); + +void plfxlc_chip_release(struct plfxlc_chip *chip); + +void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip); + +int plfxlc_chip_init_hw(struct plfxlc_chip *chip); + +int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip); + +int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate); + +int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval, + u8 dtim_period, int type); + +int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value); + +static inline struct plfxlc_chip *plfxlc_usb_to_chip(struct plfxlc_usb + *usb) +{ + return container_of(usb, struct plfxlc_chip, usb); +} + +static inline void plfxlc_mc_add_all(struct plfxlc_mc_hash *hash) +{ + hash->low = 0xffffffff; + hash->high = 0xffffffff; +} + +#endif /* PLFXLC_CHIP_H */ diff --git a/drivers/net/wireless/purelifi/plfxlc/firmware.c b/drivers/net/wireless/purelifi/plfxlc/firmware.c new file mode 100644 index 000000000000..8a3529d07927 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/firmware.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 pureLiFi + */ + +#include +#include + +#include "mac.h" +#include "usb.h" + +static int send_vendor_request(struct usb_device *udev, int request, + unsigned char *buffer, int buffer_size) +{ + return usb_control_msg(udev, + usb_rcvctrlpipe(udev, 0), + request, 0xC0, 0, 0, + buffer, buffer_size, PLF_USB_TIMEOUT); +} + +static int send_vendor_command(struct usb_device *udev, int request, + unsigned char *buffer, int buffer_size) +{ + return usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + request, USB_TYPE_VENDOR /*0x40*/, 0, 0, + buffer, buffer_size, PLF_USB_TIMEOUT); +} + +int plfxlc_download_fpga(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + unsigned char *fpga_dmabuff = NULL; + const struct firmware *fw = NULL; + int blk_tran_len = PLF_BULK_TLEN; + unsigned char *fw_data; + const char *fw_name; + int r, actual_length; + int fw_data_i = 0; + + if ((le16_to_cpu(udev->descriptor.idVendor) == + PURELIFI_X_VENDOR_ID_0) && + (le16_to_cpu(udev->descriptor.idProduct) == + PURELIFI_X_PRODUCT_ID_0)) { + fw_name = "plfxlc/lifi-x.bin"; + dev_dbg(&intf->dev, "bin file for X selected\n"); + + } else if ((le16_to_cpu(udev->descriptor.idVendor)) == + PURELIFI_XC_VENDOR_ID_0 && + (le16_to_cpu(udev->descriptor.idProduct) == + PURELIFI_XC_PRODUCT_ID_0)) { + fw_name = "plfxlc/lifi-xc.bin"; + dev_dbg(&intf->dev, "bin file for XC selected\n"); + + } else { + r = -EINVAL; + goto error; + } + + r = request_firmware(&fw, fw_name, &intf->dev); + if (r) { + dev_err(&intf->dev, "request_firmware failed (%d)\n", r); + goto error; + } + fpga_dmabuff = kmalloc(PLF_FPGA_STATUS_LEN, GFP_KERNEL); + + if (!fpga_dmabuff) { + r = -ENOMEM; + goto error_free_fw; + } + send_vendor_request(udev, PLF_VNDR_FPGA_SET_REQ, + fpga_dmabuff, PLF_FPGA_STATUS_LEN); + + send_vendor_command(udev, PLF_VNDR_FPGA_SET_CMD, NULL, 0); + + if (fpga_dmabuff[0] != PLF_FPGA_MG) { + dev_err(&intf->dev, "fpga_dmabuff[0] is wrong\n"); + r = -EINVAL; + goto error_free_fw; + } + + for (fw_data_i = 0; fw_data_i < fw->size;) { + int tbuf_idx; + + if ((fw->size - fw_data_i) < blk_tran_len) + blk_tran_len = fw->size - fw_data_i; + + fw_data = kmemdup(&fw->data[fw_data_i], blk_tran_len, + GFP_KERNEL); + if (!fw_data) { + r = -ENOMEM; + goto error_free_fw; + } + + for (tbuf_idx = 0; tbuf_idx < blk_tran_len; tbuf_idx++) { + /* u8 bit reverse */ + fw_data[tbuf_idx] = bitrev8(fw_data[tbuf_idx]); + } + r = usb_bulk_msg(udev, + usb_sndbulkpipe(interface_to_usbdev(intf), + fpga_dmabuff[0] & 0xff), + fw_data, + blk_tran_len, + &actual_length, + 2 * PLF_USB_TIMEOUT); + + if (r) + dev_err(&intf->dev, "Bulk msg failed (%d)\n", r); + + kfree(fw_data); + fw_data_i += blk_tran_len; + } + + kfree(fpga_dmabuff); + fpga_dmabuff = kmalloc(PLF_FPGA_STATE_LEN, GFP_KERNEL); + if (!fpga_dmabuff) { + r = -ENOMEM; + goto error_free_fw; + } + memset(fpga_dmabuff, 0xff, PLF_FPGA_STATE_LEN); + + send_vendor_request(udev, PLF_VNDR_FPGA_STATE_REQ, fpga_dmabuff, + PLF_FPGA_STATE_LEN); + + dev_dbg(&intf->dev, "%*ph\n", 8, fpga_dmabuff); + + if (fpga_dmabuff[0] != 0) { + r = -EINVAL; + goto error_free_fw; + } + + send_vendor_command(udev, PLF_VNDR_FPGA_STATE_CMD, NULL, 0); + + msleep(PLF_MSLEEP_TIME); + +error_free_fw: + kfree(fpga_dmabuff); + release_firmware(fw); +error: + return r; +} + +int plfxlc_download_xl_firmware(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + const struct firmware *fwp = NULL; + struct plfxlc_firmware_file file = {0}; + const char *fw_pack; + int s, r; + u8 *buf; + u32 i; + + r = send_vendor_command(udev, PLF_VNDR_XL_FW_CMD, NULL, 0); + msleep(PLF_MSLEEP_TIME); + + if (r) { + dev_err(&intf->dev, "vendor command failed (%d)\n", r); + return -EINVAL; + } + /* Code for single pack file download */ + + fw_pack = "plfxlc/lifi-xl.bin"; + + r = request_firmware(&fwp, fw_pack, &intf->dev); + if (r) { + dev_err(&intf->dev, "Request_firmware failed (%d)\n", r); + return -EINVAL; + } + file.total_files = get_unaligned_le32(&fwp->data[0]); + file.total_size = get_unaligned_le32(&fwp->size); + + dev_dbg(&intf->dev, "XL Firmware (%d, %d)\n", + file.total_files, file.total_size); + + buf = kzalloc(PLF_XL_BUF_LEN, GFP_KERNEL); + if (!buf) { + release_firmware(fwp); + return -ENOMEM; + } + + if (file.total_files > 10) { + dev_err(&intf->dev, "Too many files (%d)\n", file.total_files); + release_firmware(fwp); + kfree(buf); + return -EINVAL; + } + + /* Download firmware files in multiple steps */ + for (s = 0; s < file.total_files; s++) { + buf[0] = s; + r = send_vendor_command(udev, PLF_VNDR_XL_FILE_CMD, buf, + PLF_XL_BUF_LEN); + + if (s < file.total_files - 1) + file.size = get_unaligned_le32(&fwp->data[4 + ((s + 1) * 4)]) + - get_unaligned_le32(&fwp->data[4 + (s) * 4]); + else + file.size = file.total_size - + get_unaligned_le32(&fwp->data[4 + (s) * 4]); + + if (file.size > file.total_size || file.size > 60000) { + dev_err(&intf->dev, "File size is too large (%d)\n", file.size); + break; + } + + file.start_addr = get_unaligned_le32(&fwp->data[4 + (s * 4)]); + + if (file.size % PLF_XL_BUF_LEN && s < 2) + file.size += PLF_XL_BUF_LEN - file.size % PLF_XL_BUF_LEN; + + file.control_packets = file.size / PLF_XL_BUF_LEN; + + for (i = 0; i < file.control_packets; i++) { + memcpy(buf, + &fwp->data[file.start_addr + (i * PLF_XL_BUF_LEN)], + PLF_XL_BUF_LEN); + r = send_vendor_command(udev, PLF_VNDR_XL_DATA_CMD, buf, + PLF_XL_BUF_LEN); + } + dev_dbg(&intf->dev, "fw-dw step=%d,r=%d size=%d\n", s, r, + file.size); + } + release_firmware(fwp); + kfree(buf); + + /* Code for single pack file download ends fw download finish */ + + r = send_vendor_command(udev, PLF_VNDR_XL_EX_CMD, NULL, 0); + dev_dbg(&intf->dev, "Download fpga (4) (%d)\n", r); + + return 0; +} + +int plfxlc_upload_mac_and_serial(struct usb_interface *intf, + unsigned char *hw_address, + unsigned char *serial_number) +{ + struct usb_device *udev = interface_to_usbdev(intf); + unsigned long long firmware_version; + unsigned char *dma_buffer = NULL; + + dma_buffer = kmalloc(PLF_SERIAL_LEN, GFP_KERNEL); + if (!dma_buffer) + return -ENOMEM; + + BUILD_BUG_ON(ETH_ALEN > PLF_SERIAL_LEN); + BUILD_BUG_ON(PLF_FW_VER_LEN > PLF_SERIAL_LEN); + + send_vendor_request(udev, PLF_MAC_VENDOR_REQUEST, dma_buffer, + ETH_ALEN); + + memcpy(hw_address, dma_buffer, ETH_ALEN); + + send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST, + dma_buffer, PLF_SERIAL_LEN); + + send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST, + dma_buffer, PLF_SERIAL_LEN); + + memcpy(serial_number, dma_buffer, PLF_SERIAL_LEN); + + memset(dma_buffer, 0x00, PLF_SERIAL_LEN); + + send_vendor_request(udev, PLF_FIRMWARE_VERSION_VENDOR_REQUEST, + (unsigned char *)dma_buffer, PLF_FW_VER_LEN); + + memcpy(&firmware_version, dma_buffer, PLF_FW_VER_LEN); + + dev_info(&intf->dev, "Firmware Version: %llu\n", firmware_version); + kfree(dma_buffer); + + dev_dbg(&intf->dev, "Mac: %pM\n", hw_address); + + return 0; +} + diff --git a/drivers/net/wireless/purelifi/plfxlc/intf.h b/drivers/net/wireless/purelifi/plfxlc/intf.h new file mode 100644 index 000000000000..5ae89343b579 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/intf.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 pureLiFi + */ + +#define PURELIFI_BYTE_NUM_ALIGNMENT 4 +#define ETH_ALEN 6 +#define AP_USER_LIMIT 8 + +#define PLF_VNDR_FPGA_STATE_REQ 0x30 +#define PLF_VNDR_FPGA_SET_REQ 0x33 +#define PLF_VNDR_FPGA_SET_CMD 0x34 +#define PLF_VNDR_FPGA_STATE_CMD 0x35 + +#define PLF_VNDR_XL_FW_CMD 0x80 +#define PLF_VNDR_XL_DATA_CMD 0x81 +#define PLF_VNDR_XL_FILE_CMD 0x82 +#define PLF_VNDR_XL_EX_CMD 0x83 + +#define PLF_MAC_VENDOR_REQUEST 0x36 +#define PLF_SERIAL_NUMBER_VENDOR_REQUEST 0x37 +#define PLF_FIRMWARE_VERSION_VENDOR_REQUEST 0x39 +#define PLF_SERIAL_LEN 14 +#define PLF_FW_VER_LEN 8 + +struct rx_status { + __be16 rssi; + u8 rate_idx; + u8 pad; + __be64 crc_error_count; +} __packed; + +enum plf_usb_req_enum { + USB_REQ_TEST_WR = 0, + USB_REQ_MAC_WR = 1, + USB_REQ_POWER_WR = 2, + USB_REQ_RXTX_WR = 3, + USB_REQ_BEACON_WR = 4, + USB_REQ_BEACON_INTERVAL_WR = 5, + USB_REQ_RTS_CTS_RATE_WR = 6, + USB_REQ_HASH_WR = 7, + USB_REQ_DATA_TX = 8, + USB_REQ_RATE_WR = 9, + USB_REQ_SET_FREQ = 15 +}; + +struct plf_usb_req { + __be32 id; /* should be plf_usb_req_enum */ + __be32 len; + u8 buf[512]; +}; + diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c new file mode 100644 index 000000000000..90e552532701 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/mac.c @@ -0,0 +1,754 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 pureLiFi + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "chip.h" +#include "mac.h" +#include "usb.h" + +static const struct ieee80211_rate plfxlc_rates[] = { + { .bitrate = 10, + .hw_value = PURELIFI_CCK_RATE_1M, + .flags = 0 }, + { .bitrate = 20, + .hw_value = PURELIFI_CCK_RATE_2M, + .hw_value_short = PURELIFI_CCK_RATE_2M + | PURELIFI_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = PURELIFI_CCK_RATE_5_5M, + .hw_value_short = PURELIFI_CCK_RATE_5_5M + | PURELIFI_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = PURELIFI_CCK_RATE_11M, + .hw_value_short = PURELIFI_CCK_RATE_11M + | PURELIFI_CCK_PREA_SHORT, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 60, + .hw_value = PURELIFI_OFDM_RATE_6M, + .flags = 0 }, + { .bitrate = 90, + .hw_value = PURELIFI_OFDM_RATE_9M, + .flags = 0 }, + { .bitrate = 120, + .hw_value = PURELIFI_OFDM_RATE_12M, + .flags = 0 }, + { .bitrate = 180, + .hw_value = PURELIFI_OFDM_RATE_18M, + .flags = 0 }, + { .bitrate = 240, + .hw_value = PURELIFI_OFDM_RATE_24M, + .flags = 0 }, + { .bitrate = 360, + .hw_value = PURELIFI_OFDM_RATE_36M, + .flags = 0 }, + { .bitrate = 480, + .hw_value = PURELIFI_OFDM_RATE_48M, + .flags = 0 }, + { .bitrate = 540, + .hw_value = PURELIFI_OFDM_RATE_54M, + .flags = 0 } +}; + +static const struct ieee80211_channel plfxlc_channels[] = { + { .center_freq = 2412, .hw_value = 1 }, + { .center_freq = 2417, .hw_value = 2 }, + { .center_freq = 2422, .hw_value = 3 }, + { .center_freq = 2427, .hw_value = 4 }, + { .center_freq = 2432, .hw_value = 5 }, + { .center_freq = 2437, .hw_value = 6 }, + { .center_freq = 2442, .hw_value = 7 }, + { .center_freq = 2447, .hw_value = 8 }, + { .center_freq = 2452, .hw_value = 9 }, + { .center_freq = 2457, .hw_value = 10 }, + { .center_freq = 2462, .hw_value = 11 }, + { .center_freq = 2467, .hw_value = 12 }, + { .center_freq = 2472, .hw_value = 13 }, + { .center_freq = 2484, .hw_value = 14 }, +}; + +int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address) +{ + SET_IEEE80211_PERM_ADDR(hw, hw_address); + return 0; +} + +int plfxlc_mac_init_hw(struct ieee80211_hw *hw) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct plfxlc_chip *chip = &mac->chip; + int r; + + r = plfxlc_chip_init_hw(chip); + if (r) { + dev_warn(plfxlc_mac_dev(mac), "init hw failed (%d)\n", r); + return r; + } + + dev_dbg(plfxlc_mac_dev(mac), "irq_disabled (%d)\n", irqs_disabled()); + regulatory_hint(hw->wiphy, "00"); + return r; +} + +void plfxlc_mac_release(struct plfxlc_mac *mac) +{ + plfxlc_chip_release(&mac->chip); + lockdep_assert_held(&mac->lock); +} + +int plfxlc_op_start(struct ieee80211_hw *hw) +{ + plfxlc_hw_mac(hw)->chip.usb.initialized = 1; + return 0; +} + +void plfxlc_op_stop(struct ieee80211_hw *hw) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + + clear_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); +} + +int plfxlc_restore_settings(struct plfxlc_mac *mac) +{ + int beacon_interval, beacon_period; + struct sk_buff *beacon; + + spin_lock_irq(&mac->lock); + beacon_interval = mac->beacon.interval; + beacon_period = mac->beacon.period; + spin_unlock_irq(&mac->lock); + + if (mac->type != NL80211_IFTYPE_ADHOC) + return 0; + + if (mac->vif) { + beacon = ieee80211_beacon_get(mac->hw, mac->vif); + if (beacon) { + /*beacon is hardcoded in firmware */ + kfree_skb(beacon); + /* Returned skb is used only once and lowlevel + * driver is responsible for freeing it. + */ + } + } + + plfxlc_set_beacon_interval(&mac->chip, beacon_interval, + beacon_period, mac->type); + + spin_lock_irq(&mac->lock); + mac->beacon.last_update = jiffies; + spin_unlock_irq(&mac->lock); + + return 0; +} + +static void plfxlc_mac_tx_status(struct ieee80211_hw *hw, + struct sk_buff *skb, + int ackssi, + struct tx_status *tx_status) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int success = 1; + + ieee80211_tx_info_clear_status(info); + if (tx_status) + success = !tx_status->failure; + + if (success) + info->flags |= IEEE80211_TX_STAT_ACK; + else + info->flags &= ~IEEE80211_TX_STAT_ACK; + + info->status.ack_signal = 50; + ieee80211_tx_status_irqsafe(hw, skb); +} + +void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hw *hw = info->rate_driver_data[0]; + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct sk_buff_head *q = NULL; + + ieee80211_tx_info_clear_status(info); + skb_pull(skb, sizeof(struct plfxlc_ctrlset)); + + if (unlikely(error || + (info->flags & IEEE80211_TX_CTL_NO_ACK))) { + ieee80211_tx_status_irqsafe(hw, skb); + return; + } + + q = &mac->ack_wait_queue; + + skb_queue_tail(q, skb); + while (skb_queue_len(q)/* > PURELIFI_MAC_MAX_ACK_WAITERS*/) { + plfxlc_mac_tx_status(hw, skb_dequeue(q), + mac->ack_pending ? + mac->ack_signal : 0, + NULL); + mac->ack_pending = 0; + } +} + +static int plfxlc_fill_ctrlset(struct plfxlc_mac *mac, struct sk_buff *skb) +{ + unsigned int frag_len = skb->len; + struct plfxlc_ctrlset *cs; + u32 temp_payload_len = 0; + unsigned int tmp; + u32 temp_len = 0; + + if (skb_headroom(skb) < sizeof(struct plfxlc_ctrlset)) { + dev_dbg(plfxlc_mac_dev(mac), "Not enough hroom(1)\n"); + return 1; + } + + cs = (void *)skb_push(skb, sizeof(struct plfxlc_ctrlset)); + temp_payload_len = frag_len; + temp_len = temp_payload_len + + sizeof(struct plfxlc_ctrlset) - + sizeof(cs->id) - sizeof(cs->len); + + /* Data packet lengths must be multiple of four bytes and must + * not be a multiple of 512 bytes. First, it is attempted to + * append the data packet in the tailroom of the skb. In rare + * occasions, the tailroom is too small. In this case, the + * content of the packet is shifted into the headroom of the skb + * by memcpy. Headroom is allocated at startup (below in this + * file). Therefore, there will be always enough headroom. The + * call skb_headroom is an additional safety which might be + * dropped. + */ + /* check if 32 bit aligned and align data */ + tmp = skb->len & 3; + if (tmp) { + if (skb_tailroom(skb) < (3 - tmp)) { + if (skb_headroom(skb) >= 4 - tmp) { + u8 len; + u8 *src_pt; + u8 *dest_pt; + + len = skb->len; + src_pt = skb->data; + dest_pt = skb_push(skb, 4 - tmp); + memmove(dest_pt, src_pt, len); + } else { + return -ENOBUFS; + } + } else { + skb_put(skb, 4 - tmp); + } + temp_len += 4 - tmp; + } + + /* check if not multiple of 512 and align data */ + tmp = skb->len & 0x1ff; + if (!tmp) { + if (skb_tailroom(skb) < 4) { + if (skb_headroom(skb) >= 4) { + u8 len = skb->len; + u8 *src_pt = skb->data; + u8 *dest_pt = skb_push(skb, 4); + + memmove(dest_pt, src_pt, len); + } else { + /* should never happen because + * sufficient headroom was reserved + */ + return -ENOBUFS; + } + } else { + skb_put(skb, 4); + } + temp_len += 4; + } + + cs->id = cpu_to_be32(USB_REQ_DATA_TX); + cs->len = cpu_to_be32(temp_len); + cs->payload_len_nw = cpu_to_be32(temp_payload_len); + + return 0; +} + +static void plfxlc_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct plfxlc_header *plhdr = (void *)skb->data; + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct plfxlc_usb *usb = &mac->chip.usb; + unsigned long flags; + int r; + + r = plfxlc_fill_ctrlset(mac, skb); + if (r) + goto fail; + + info->rate_driver_data[0] = hw; + + if (plhdr->frametype == IEEE80211_FTYPE_DATA) { + u8 *dst_mac = plhdr->dmac; + u8 sidx; + bool found = false; + struct plfxlc_usb_tx *tx = &usb->tx; + + for (sidx = 0; sidx < MAX_STA_NUM; sidx++) { + if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG)) + continue; + if (memcmp(tx->station[sidx].mac, dst_mac, ETH_ALEN)) + continue; + found = true; + break; + } + + /* Default to broadcast address for unknown MACs */ + if (!found) + sidx = STA_BROADCAST_INDEX; + + /* Stop OS from sending packets, if the queue is half full */ + if (skb_queue_len(&tx->station[sidx].data_list) > 60) + ieee80211_stop_queues(plfxlc_usb_to_hw(usb)); + + /* Schedule packet for transmission if queue is not full */ + if (skb_queue_len(&tx->station[sidx].data_list) > 256) + goto fail; + skb_queue_tail(&tx->station[sidx].data_list, skb); + plfxlc_send_packet_from_data_queue(usb); + + } else { + spin_lock_irqsave(&usb->tx.lock, flags); + r = plfxlc_usb_wreq_async(&mac->chip.usb, skb->data, skb->len, + USB_REQ_DATA_TX, plfxlc_tx_urb_complete, skb); + spin_unlock_irqrestore(&usb->tx.lock, flags); + if (r) + goto fail; + } + return; + +fail: + dev_kfree_skb(skb); +} + +static int plfxlc_filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, + struct ieee80211_rx_status *stats) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct sk_buff_head *q; + int i, position = 0; + unsigned long flags; + struct sk_buff *skb; + bool found = false; + + if (!ieee80211_is_ack(rx_hdr->frame_control)) + return 0; + + dev_dbg(plfxlc_mac_dev(mac), "ACK Received\n"); + + /* code based on zy driver, this logic may need fix */ + q = &mac->ack_wait_queue; + spin_lock_irqsave(&q->lock, flags); + + skb_queue_walk(q, skb) { + struct ieee80211_hdr *tx_hdr; + + position++; + + if (mac->ack_pending && skb_queue_is_first(q, skb)) + continue; + if (mac->ack_pending == 0) + break; + + tx_hdr = (struct ieee80211_hdr *)skb->data; + if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1))) { + found = 1; + break; + } + } + + if (found) { + for (i = 1; i < position; i++) + skb = __skb_dequeue(q); + if (i == position) { + plfxlc_mac_tx_status(hw, skb, + mac->ack_pending ? + mac->ack_signal : 0, + NULL); + mac->ack_pending = 0; + } + + mac->ack_pending = skb_queue_len(q) ? 1 : 0; + mac->ack_signal = stats->signal; + } + + spin_unlock_irqrestore(&q->lock, flags); + return 1; +} + +int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, + unsigned int length) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + struct ieee80211_rx_status stats; + const struct rx_status *status; + unsigned int payload_length; + struct plfxlc_usb_tx *tx; + struct sk_buff *skb; + int need_padding; + __le16 fc; + int sidx; + + /* Packet blockade during disabled interface. */ + if (!mac->vif) + return 0; + + status = (struct rx_status *)buffer; + + memset(&stats, 0, sizeof(stats)); + + stats.flag = 0; + stats.freq = 2412; + stats.band = NL80211_BAND_LC; + mac->rssi = -15 * be16_to_cpu(status->rssi) / 10; + + stats.signal = mac->rssi; + + if (status->rate_idx > 7) + stats.rate_idx = 0; + else + stats.rate_idx = status->rate_idx; + + mac->crc_errors = be64_to_cpu(status->crc_error_count); + + /* TODO bad frame check for CRC error*/ + if (plfxlc_filter_ack(hw, (struct ieee80211_hdr *)buffer, &stats) && + !mac->pass_ctrl) + return 0; + + buffer += sizeof(struct rx_status); + payload_length = get_unaligned_be32(buffer); + + if (payload_length > 1560) { + dev_err(plfxlc_mac_dev(mac), " > MTU %u\n", payload_length); + return 0; + } + buffer += sizeof(u32); + + fc = get_unaligned((__le16 *)buffer); + need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); + + tx = &mac->chip.usb.tx; + + for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { + if (memcmp(&buffer[10], tx->station[sidx].mac, ETH_ALEN)) + continue; + if (tx->station[sidx].flag & STATION_CONNECTED_FLAG) { + tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG; + break; + } + } + + if (sidx == MAX_STA_NUM - 1) { + for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { + if (tx->station[sidx].flag & STATION_CONNECTED_FLAG) + continue; + memcpy(tx->station[sidx].mac, &buffer[10], ETH_ALEN); + tx->station[sidx].flag |= STATION_CONNECTED_FLAG; + tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG; + break; + } + } + + switch (buffer[0]) { + case IEEE80211_STYPE_PROBE_REQ: + dev_dbg(plfxlc_mac_dev(mac), "Probe request\n"); + break; + case IEEE80211_STYPE_ASSOC_REQ: + dev_dbg(plfxlc_mac_dev(mac), "Association request\n"); + break; + case IEEE80211_STYPE_AUTH: + dev_dbg(plfxlc_mac_dev(mac), "Authentication req\n"); + break; + case IEEE80211_FTYPE_DATA: + dev_dbg(plfxlc_mac_dev(mac), "802.11 data frame\n"); + break; + } + + skb = dev_alloc_skb(payload_length + (need_padding ? 2 : 0)); + if (!skb) + return -ENOMEM; + + if (need_padding) + /* Make sure that the payload data is 4 byte aligned. */ + skb_reserve(skb, 2); + + skb_put_data(skb, buffer, payload_length); + memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); + ieee80211_rx_irqsafe(hw, skb); + return 0; +} + +static int plfxlc_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + static const char * const iftype80211[] = { + [NL80211_IFTYPE_STATION] = "Station", + [NL80211_IFTYPE_ADHOC] = "Adhoc" + }; + + if (mac->type != NL80211_IFTYPE_UNSPECIFIED) + return -EOPNOTSUPP; + + if (vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_STATION) { + dev_dbg(plfxlc_mac_dev(mac), "%s %s\n", __func__, + iftype80211[vif->type]); + mac->type = vif->type; + mac->vif = vif; + return 0; + } + dev_dbg(plfxlc_mac_dev(mac), "unsupported iftype\n"); + return -EOPNOTSUPP; +} + +static void plfxlc_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + + mac->type = NL80211_IFTYPE_UNSPECIFIED; + mac->vif = NULL; +} + +static int plfxlc_op_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} + +#define SUPPORTED_FIF_FLAGS \ + (FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \ + FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC) +static void plfxlc_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, + u64 multicast) +{ + struct plfxlc_mc_hash hash = { + .low = multicast, + .high = multicast >> 32, + }; + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + unsigned long flags; + + /* Only deal with supported flags */ + *new_flags &= SUPPORTED_FIF_FLAGS; + + /* If multicast parameter + * (as returned by plfxlc_op_prepare_multicast) + * has changed, no bit in changed_flags is set. To handle this + * situation, we do not return if changed_flags is 0. If we do so, + * we will have some issue with IPv6 which uses multicast for link + * layer address resolution. + */ + if (*new_flags & (FIF_ALLMULTI)) + plfxlc_mc_add_all(&hash); + + spin_lock_irqsave(&mac->lock, flags); + mac->pass_failed_fcs = !!(*new_flags & FIF_FCSFAIL); + mac->pass_ctrl = !!(*new_flags & FIF_CONTROL); + mac->multicast_hash = hash; + spin_unlock_irqrestore(&mac->lock, flags); + + /* no handling required for FIF_OTHER_BSS as we don't currently + * do BSSID filtering + */ + /* FIXME: in future it would be nice to enable the probe response + * filter (so that the driver doesn't see them) until + * FIF_BCN_PRBRESP_PROMISC is set. however due to atomicity here, we'd + * have to schedule work to enable prbresp reception, which might + * happen too late. For now we'll just listen and forward them all the + * time. + */ +} + +static void plfxlc_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + int associated; + + dev_dbg(plfxlc_mac_dev(mac), "changes: %x\n", changes); + + if (mac->type != NL80211_IFTYPE_ADHOC) { /* for STATION */ + associated = is_valid_ether_addr(bss_conf->bssid); + goto exit_all; + } + /* for ADHOC */ + associated = true; + if (changes & BSS_CHANGED_BEACON) { + struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); + + if (beacon) { + /*beacon is hardcoded in firmware */ + kfree_skb(beacon); + /*Returned skb is used only once and + * low-level driver is + * responsible for freeing it. + */ + } + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + u16 interval = 0; + u8 period = 0; + + if (bss_conf->enable_beacon) { + period = bss_conf->dtim_period; + interval = bss_conf->beacon_int; + } + + spin_lock_irq(&mac->lock); + mac->beacon.period = period; + mac->beacon.interval = interval; + mac->beacon.last_update = jiffies; + spin_unlock_irq(&mac->lock); + + plfxlc_set_beacon_interval(&mac->chip, interval, + period, mac->type); + } +exit_all: + spin_lock_irq(&mac->lock); + mac->associated = associated; + spin_unlock_irq(&mac->lock); +} + +static int plfxlc_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + stats->dot11ACKFailureCount = 0; + stats->dot11RTSFailureCount = 0; + stats->dot11FCSErrorCount = 0; + stats->dot11RTSSuccessCount = 0; + return 0; +} + +static const char et_strings[][ETH_GSTRING_LEN] = { + "phy_rssi", + "phy_rx_crc_err" +}; + +static int plfxlc_get_et_sset_count(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(et_strings); + + return 0; +} + +static void plfxlc_get_et_strings(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data) +{ + if (sset == ETH_SS_STATS) + memcpy(data, *et_strings, sizeof(et_strings)); +} + +static void plfxlc_get_et_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data) +{ + struct plfxlc_mac *mac = plfxlc_hw_mac(hw); + + data[0] = mac->rssi; + data[1] = mac->crc_errors; +} + +static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + return 0; +} + +static const struct ieee80211_ops plfxlc_ops = { + .tx = plfxlc_op_tx, + .start = plfxlc_op_start, + .stop = plfxlc_op_stop, + .add_interface = plfxlc_op_add_interface, + .remove_interface = plfxlc_op_remove_interface, + .set_rts_threshold = plfxlc_set_rts_threshold, + .config = plfxlc_op_config, + .configure_filter = plfxlc_op_configure_filter, + .bss_info_changed = plfxlc_op_bss_info_changed, + .get_stats = plfxlc_get_stats, + .get_et_sset_count = plfxlc_get_et_sset_count, + .get_et_stats = plfxlc_get_et_stats, + .get_et_strings = plfxlc_get_et_strings, +}; + +struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf) +{ + struct ieee80211_hw *hw; + struct plfxlc_mac *mac; + + hw = ieee80211_alloc_hw(sizeof(struct plfxlc_mac), &plfxlc_ops); + if (!hw) { + dev_dbg(&intf->dev, "out of memory\n"); + return NULL; + } + set_wiphy_dev(hw->wiphy, &intf->dev); + + mac = plfxlc_hw_mac(hw); + memset(mac, 0, sizeof(*mac)); + spin_lock_init(&mac->lock); + mac->hw = hw; + + mac->type = NL80211_IFTYPE_UNSPECIFIED; + + memcpy(mac->channels, plfxlc_channels, sizeof(plfxlc_channels)); + memcpy(mac->rates, plfxlc_rates, sizeof(plfxlc_rates)); + mac->band.n_bitrates = ARRAY_SIZE(plfxlc_rates); + mac->band.bitrates = mac->rates; + mac->band.n_channels = ARRAY_SIZE(plfxlc_channels); + mac->band.channels = mac->channels; + hw->wiphy->bands[NL80211_BAND_LC] = &mac->band; + hw->conf.chandef.width = NL80211_CHAN_WIDTH_20; + + ieee80211_hw_set(hw, RX_INCLUDES_FCS); + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); + ieee80211_hw_set(hw, MFP_CAPABLE); + + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); + hw->max_signal = 100; + hw->queues = 1; + /* 4 for 32 bit alignment if no tailroom */ + hw->extra_tx_headroom = sizeof(struct plfxlc_ctrlset) + 4; + /* Tell mac80211 that we support multi rate retries */ + hw->max_rates = IEEE80211_TX_MAX_RATES; + hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */ + + skb_queue_head_init(&mac->ack_wait_queue); + mac->ack_pending = 0; + + plfxlc_chip_init(&mac->chip, hw, intf); + + SET_IEEE80211_DEV(hw, &intf->dev); + return hw; +} diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h new file mode 100644 index 000000000000..49b92413729b --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/mac.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 pureLiFi + */ + +#ifndef PLFXLC_MAC_H +#define PLFXLC_MAC_H + +#include +#include + +#include "chip.h" + +#define PURELIFI_CCK 0x00 +#define PURELIFI_OFDM 0x10 +#define PURELIFI_CCK_PREA_SHORT 0x20 + +#define PURELIFI_OFDM_PLCP_RATE_6M 0xb +#define PURELIFI_OFDM_PLCP_RATE_9M 0xf +#define PURELIFI_OFDM_PLCP_RATE_12M 0xa +#define PURELIFI_OFDM_PLCP_RATE_18M 0xe +#define PURELIFI_OFDM_PLCP_RATE_24M 0x9 +#define PURELIFI_OFDM_PLCP_RATE_36M 0xd +#define PURELIFI_OFDM_PLCP_RATE_48M 0x8 +#define PURELIFI_OFDM_PLCP_RATE_54M 0xc + +#define PURELIFI_CCK_RATE_1M (PURELIFI_CCK | 0x00) +#define PURELIFI_CCK_RATE_2M (PURELIFI_CCK | 0x01) +#define PURELIFI_CCK_RATE_5_5M (PURELIFI_CCK | 0x02) +#define PURELIFI_CCK_RATE_11M (PURELIFI_CCK | 0x03) +#define PURELIFI_OFDM_RATE_6M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_6M) +#define PURELIFI_OFDM_RATE_9M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_9M) +#define PURELIFI_OFDM_RATE_12M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_12M) +#define PURELIFI_OFDM_RATE_18M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_18M) +#define PURELIFI_OFDM_RATE_24M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_24M) +#define PURELIFI_OFDM_RATE_36M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_36M) +#define PURELIFI_OFDM_RATE_48M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_48M) +#define PURELIFI_OFDM_RATE_54M (PURELIFI_OFDM | PURELIFI_OFDM_PLCP_RATE_54M) + +#define PURELIFI_RX_ERROR 0x80 +#define PURELIFI_RX_CRC32_ERROR 0x10 + +#define PLF_REGDOMAIN_FCC 0x10 +#define PLF_REGDOMAIN_IC 0x20 +#define PLF_REGDOMAIN_ETSI 0x30 +#define PLF_REGDOMAIN_SPAIN 0x31 +#define PLF_REGDOMAIN_FRANCE 0x32 +#define PLF_REGDOMAIN_JAPAN_2 0x40 +#define PLF_REGDOMAIN_JAPAN 0x41 +#define PLF_REGDOMAIN_JAPAN_3 0x49 + +#define PLF_RX_ERROR 0x80 +#define PLF_RX_CRC32_ERROR 0x10 + +enum { + MODULATION_RATE_BPSK_1_2 = 0, + MODULATION_RATE_BPSK_3_4, + MODULATION_RATE_QPSK_1_2, + MODULATION_RATE_QPSK_3_4, + MODULATION_RATE_QAM16_1_2, + MODULATION_RATE_QAM16_3_4, + MODULATION_RATE_QAM64_1_2, + MODULATION_RATE_QAM64_3_4, + MODULATION_RATE_AUTO, + MODULATION_RATE_NUM +}; + +#define plfxlc_mac_dev(mac) plfxlc_chip_dev(&(mac)->chip) + +#define PURELIFI_MAC_STATS_BUFFER_SIZE 16 +#define PURELIFI_MAC_MAX_ACK_WAITERS 50 + +struct plfxlc_ctrlset { + /* id should be plf_usb_req_enum */ + __be32 id; + __be32 len; + u8 modulation; + u8 control; + u8 service; + u8 pad; + __le16 packet_length; + __le16 current_length; + __le16 next_frame_length; + __le16 tx_length; + __be32 payload_len_nw; +} __packed; + +/* overlay */ +struct plfxlc_header { + struct plfxlc_ctrlset plf_ctrl; + u32 frametype; + u8 *dmac; +} __packed; + +struct tx_status { + u8 type; + u8 id; + u8 rate; + u8 pad; + u8 mac[ETH_ALEN]; + u8 retry; + u8 failure; +} __packed; + +struct beacon { + struct delayed_work watchdog_work; + struct sk_buff *cur_beacon; + unsigned long last_update; + u16 interval; + u8 period; +}; + +enum plfxlc_device_flags { + PURELIFI_DEVICE_RUNNING, +}; + +struct plfxlc_mac { + struct ieee80211_hw *hw; + struct ieee80211_vif *vif; + struct beacon beacon; + struct work_struct set_rts_cts_work; + struct work_struct process_intr; + struct plfxlc_mc_hash multicast_hash; + struct sk_buff_head ack_wait_queue; + struct ieee80211_channel channels[14]; + struct ieee80211_rate rates[12]; + struct ieee80211_supported_band band; + struct plfxlc_chip chip; + spinlock_t lock; /* lock for mac data */ + u8 intr_buffer[USB_MAX_EP_INT_BUFFER]; + char serial_number[PURELIFI_SERIAL_LEN]; + unsigned char hw_address[ETH_ALEN]; + u8 default_regdomain; + unsigned long flags; + bool pass_failed_fcs; + bool pass_ctrl; + bool ack_pending; + int ack_signal; + int associated; + u8 regdomain; + u8 channel; + int type; + u64 crc_errors; + u64 rssi; +}; + +static inline struct plfxlc_mac * +plfxlc_hw_mac(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +static inline struct plfxlc_mac * +plfxlc_chip_to_mac(struct plfxlc_chip *chip) +{ + return container_of(chip, struct plfxlc_mac, chip); +} + +static inline struct plfxlc_mac * +plfxlc_usb_to_mac(struct plfxlc_usb *usb) +{ + return plfxlc_chip_to_mac(plfxlc_usb_to_chip(usb)); +} + +static inline u8 *plfxlc_mac_get_perm_addr(struct plfxlc_mac *mac) +{ + return mac->hw->wiphy->perm_addr; +} + +struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf); +void plfxlc_mac_release(struct plfxlc_mac *mac); + +int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address); +int plfxlc_mac_init_hw(struct ieee80211_hw *hw); + +int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, + unsigned int length); +void plfxlc_mac_tx_failed(struct urb *urb); +void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error); +int plfxlc_op_start(struct ieee80211_hw *hw); +void plfxlc_op_stop(struct ieee80211_hw *hw); +int plfxlc_restore_settings(struct plfxlc_mac *mac); + +#endif /* PLFXLC_MAC_H */ diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c new file mode 100644 index 000000000000..6f338b1572a1 --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -0,0 +1,892 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 pureLiFi + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mac.h" +#include "usb.h" +#include "chip.h" + +static const struct usb_device_id usb_ids[] = { + { USB_DEVICE(PURELIFI_X_VENDOR_ID_0, PURELIFI_X_PRODUCT_ID_0), + .driver_info = DEVICE_LIFI_X }, + { USB_DEVICE(PURELIFI_XC_VENDOR_ID_0, PURELIFI_XC_PRODUCT_ID_0), + .driver_info = DEVICE_LIFI_XC }, + { USB_DEVICE(PURELIFI_XL_VENDOR_ID_0, PURELIFI_XL_PRODUCT_ID_0), + .driver_info = DEVICE_LIFI_XL }, + {} +}; + +void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_tx *tx = &usb->tx; + struct sk_buff *skb = NULL; + unsigned long flags; + u8 last_served_sidx; + + spin_lock_irqsave(&tx->lock, flags); + last_served_sidx = usb->sidx; + do { + usb->sidx = (usb->sidx + 1) % MAX_STA_NUM; + if (!(tx->station[usb->sidx].flag & STATION_CONNECTED_FLAG)) + continue; + if (!(tx->station[usb->sidx].flag & STATION_FIFO_FULL_FLAG)) + skb = skb_peek(&tx->station[usb->sidx].data_list); + } while ((usb->sidx != last_served_sidx) && (!skb)); + + if (skb) { + skb = skb_dequeue(&tx->station[usb->sidx].data_list); + plfxlc_usb_wreq_async(usb, skb->data, skb->len, USB_REQ_DATA_TX, + plfxlc_tx_urb_complete, skb); + if (skb_queue_len(&tx->station[usb->sidx].data_list) <= 60) + ieee80211_wake_queues(plfxlc_usb_to_hw(usb)); + } + spin_unlock_irqrestore(&tx->lock, flags); +} + +static void handle_rx_packet(struct plfxlc_usb *usb, const u8 *buffer, + unsigned int length) +{ + plfxlc_mac_rx(plfxlc_usb_to_hw(usb), buffer, length); +} + +static void rx_urb_complete(struct urb *urb) +{ + struct plfxlc_usb_tx *tx; + struct plfxlc_usb *usb; + unsigned int length; + const u8 *buffer; + u16 status; + u8 sidx; + int r; + + if (!urb) { + pr_err("urb is NULL\n"); + return; + } + if (!urb->context) { + pr_err("urb ctx is NULL\n"); + return; + } + usb = urb->context; + + if (usb->initialized != 1) { + pr_err("usb is not initialized\n"); + return; + } + + tx = &usb->tx; + switch (urb->status) { + case 0: + break; + case -ESHUTDOWN: + case -EINVAL: + case -ENODEV: + case -ENOENT: + case -ECONNRESET: + case -EPIPE: + dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); + return; + default: + dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); + if (tx->submitted_urbs++ < PURELIFI_URB_RETRY_MAX) { + dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit %d", urb, + tx->submitted_urbs++); + goto resubmit; + } else { + dev_dbg(plfxlc_urb_dev(urb), "urb %p max resubmits reached", urb); + tx->submitted_urbs = 0; + return; + } + } + + buffer = urb->transfer_buffer; + length = le32_to_cpu(*(__le32 *)(buffer + sizeof(struct rx_status))) + + sizeof(u32); + + if (urb->actual_length != (PLF_MSG_STATUS_OFFSET + 1)) { + if (usb->initialized && usb->link_up) + handle_rx_packet(usb, buffer, length); + goto resubmit; + } + + status = buffer[PLF_MSG_STATUS_OFFSET]; + + switch (status) { + case STATION_FIFO_ALMOST_FULL_NOT_MESSAGE: + dev_dbg(&usb->intf->dev, + "FIFO full not packet receipt\n"); + tx->mac_fifo_full = 1; + for (sidx = 0; sidx < MAX_STA_NUM; sidx++) + tx->station[sidx].flag |= STATION_FIFO_FULL_FLAG; + break; + case STATION_FIFO_ALMOST_FULL_MESSAGE: + dev_dbg(&usb->intf->dev, "FIFO full packet receipt\n"); + + for (sidx = 0; sidx < MAX_STA_NUM; sidx++) + tx->station[sidx].flag &= STATION_ACTIVE_FLAG; + + plfxlc_send_packet_from_data_queue(usb); + break; + case STATION_CONNECT_MESSAGE: + usb->link_up = 1; + dev_dbg(&usb->intf->dev, "ST_CONNECT_MSG packet receipt\n"); + break; + case STATION_DISCONNECT_MESSAGE: + usb->link_up = 0; + dev_dbg(&usb->intf->dev, "ST_DISCONN_MSG packet receipt\n"); + break; + default: + dev_dbg(&usb->intf->dev, "Unknown packet receipt\n"); + break; + } + +resubmit: + r = usb_submit_urb(urb, GFP_ATOMIC); + if (r) + dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit fail (%d)\n", urb, r); +} + +static struct urb *alloc_rx_urb(struct plfxlc_usb *usb) +{ + struct usb_device *udev = plfxlc_usb_to_usbdev(usb); + struct urb *urb; + void *buffer; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return NULL; + + buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL, + &urb->transfer_dma); + if (!buffer) { + usb_free_urb(urb); + return NULL; + } + + usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN), + buffer, USB_MAX_RX_SIZE, + rx_urb_complete, usb); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + return urb; +} + +static void free_rx_urb(struct urb *urb) +{ + if (!urb) + return; + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); +} + +static int __lf_x_usb_enable_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + struct urb **urbs; + int i, r; + + r = -ENOMEM; + urbs = kcalloc(RX_URBS_COUNT, sizeof(struct urb *), GFP_KERNEL); + if (!urbs) + goto error; + + for (i = 0; i < RX_URBS_COUNT; i++) { + urbs[i] = alloc_rx_urb(usb); + if (!urbs[i]) + goto error; + } + + spin_lock_irq(&rx->lock); + + dev_dbg(plfxlc_usb_dev(usb), "irq_disabled %d\n", irqs_disabled()); + + if (rx->urbs) { + spin_unlock_irq(&rx->lock); + r = 0; + goto error; + } + rx->urbs = urbs; + rx->urbs_count = RX_URBS_COUNT; + spin_unlock_irq(&rx->lock); + + for (i = 0; i < RX_URBS_COUNT; i++) { + r = usb_submit_urb(urbs[i], GFP_KERNEL); + if (r) + goto error_submit; + } + + return 0; + +error_submit: + for (i = 0; i < RX_URBS_COUNT; i++) + usb_kill_urb(urbs[i]); + spin_lock_irq(&rx->lock); + rx->urbs = NULL; + rx->urbs_count = 0; + spin_unlock_irq(&rx->lock); +error: + if (urbs) { + for (i = 0; i < RX_URBS_COUNT; i++) + free_rx_urb(urbs[i]); + } + return r; +} + +int plfxlc_usb_enable_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + int r; + + mutex_lock(&rx->setup_mutex); + r = __lf_x_usb_enable_rx(usb); + if (!r) + usb->rx_usb_enabled = 1; + + mutex_unlock(&rx->setup_mutex); + + return r; +} + +static void __lf_x_usb_disable_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + unsigned long flags; + unsigned int count; + struct urb **urbs; + int i; + + spin_lock_irqsave(&rx->lock, flags); + urbs = rx->urbs; + count = rx->urbs_count; + spin_unlock_irqrestore(&rx->lock, flags); + + if (!urbs) + return; + + for (i = 0; i < count; i++) { + usb_kill_urb(urbs[i]); + free_rx_urb(urbs[i]); + } + kfree(urbs); + rx->urbs = NULL; + rx->urbs_count = 0; +} + +void plfxlc_usb_disable_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + + mutex_lock(&rx->setup_mutex); + __lf_x_usb_disable_rx(usb); + usb->rx_usb_enabled = 0; + mutex_unlock(&rx->setup_mutex); +} + +void plfxlc_usb_disable_tx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_tx *tx = &usb->tx; + unsigned long flags; + + clear_bit(PLF_BIT_ENABLED, &tx->enabled); + + /* kill all submitted tx-urbs */ + usb_kill_anchored_urbs(&tx->submitted); + + spin_lock_irqsave(&tx->lock, flags); + WARN_ON(!skb_queue_empty(&tx->submitted_skbs)); + WARN_ON(tx->submitted_urbs != 0); + tx->submitted_urbs = 0; + spin_unlock_irqrestore(&tx->lock, flags); + + /* The stopped state is ignored, relying on ieee80211_wake_queues() + * in a potentionally following plfxlc_usb_enable_tx(). + */ +} + +void plfxlc_usb_enable_tx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_tx *tx = &usb->tx; + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); + set_bit(PLF_BIT_ENABLED, &tx->enabled); + tx->submitted_urbs = 0; + ieee80211_wake_queues(plfxlc_usb_to_hw(usb)); + tx->stopped = 0; + spin_unlock_irqrestore(&tx->lock, flags); +} + +void plfxlc_tx_urb_complete(struct urb *urb) +{ + struct ieee80211_tx_info *info; + struct plfxlc_usb *usb; + struct sk_buff *skb; + + skb = urb->context; + info = IEEE80211_SKB_CB(skb); + /* grab 'usb' pointer before handing off the skb (since + * it might be freed by plfxlc_mac_tx_to_dev or mac80211) + */ + usb = &plfxlc_hw_mac(info->rate_driver_data[0])->chip.usb; + + switch (urb->status) { + case 0: + break; + case -ESHUTDOWN: + case -EINVAL: + case -ENODEV: + case -ENOENT: + case -ECONNRESET: + case -EPIPE: + dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); + break; + default: + dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); + return; + } + + plfxlc_mac_tx_to_dev(skb, urb->status); + plfxlc_send_packet_from_data_queue(usb); + usb_free_urb(urb); +} + +static inline void init_usb_rx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_rx *rx = &usb->rx; + + spin_lock_init(&rx->lock); + mutex_init(&rx->setup_mutex); + + if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) + rx->usb_packet_size = 512; + else + rx->usb_packet_size = 64; + + if (rx->fragment_length != 0) + dev_dbg(plfxlc_usb_dev(usb), "fragment_length error\n"); +} + +static inline void init_usb_tx(struct plfxlc_usb *usb) +{ + struct plfxlc_usb_tx *tx = &usb->tx; + + spin_lock_init(&tx->lock); + clear_bit(PLF_BIT_ENABLED, &tx->enabled); + tx->stopped = 0; + skb_queue_head_init(&tx->submitted_skbs); + init_usb_anchor(&tx->submitted); +} + +void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw, + struct usb_interface *intf) +{ + memset(usb, 0, sizeof(*usb)); + usb->intf = usb_get_intf(intf); + usb_set_intfdata(usb->intf, hw); + init_usb_tx(usb); + init_usb_rx(usb); +} + +void plfxlc_usb_release(struct plfxlc_usb *usb) +{ + plfxlc_op_stop(plfxlc_usb_to_hw(usb)); + plfxlc_usb_disable_tx(usb); + plfxlc_usb_disable_rx(usb); + usb_set_intfdata(usb->intf, NULL); + usb_put_intf(usb->intf); +} + +const char *plfxlc_speed(enum usb_device_speed speed) +{ + switch (speed) { + case USB_SPEED_LOW: + return "low"; + case USB_SPEED_FULL: + return "full"; + case USB_SPEED_HIGH: + return "high"; + default: + return "unknown"; + } +} + +int plfxlc_usb_init_hw(struct plfxlc_usb *usb) +{ + int r; + + r = usb_reset_configuration(plfxlc_usb_to_usbdev(usb)); + if (r) { + dev_err(plfxlc_usb_dev(usb), "cfg reset failed (%d)\n", r); + return r; + } + return 0; +} + +static void get_usb_req(struct usb_device *udev, void *buffer, + u32 buffer_len, enum plf_usb_req_enum usb_req_id, + struct plf_usb_req *usb_req) +{ + __be32 payload_len_nw = cpu_to_be32(buffer_len + FCS_LEN); + const u8 *buffer_src_p = buffer; + u8 *buffer_dst = usb_req->buf; + u32 temp_usb_len = 0; + + usb_req->id = cpu_to_be32(usb_req_id); + usb_req->len = cpu_to_be32(0); + + /* Copy buffer length into the transmitted buffer, as it is important + * for the Rx MAC to know its exact length. + */ + if (usb_req->id == cpu_to_be32(USB_REQ_BEACON_WR)) { + memcpy(buffer_dst, &payload_len_nw, sizeof(payload_len_nw)); + buffer_dst += sizeof(payload_len_nw); + temp_usb_len += sizeof(payload_len_nw); + } + + memcpy(buffer_dst, buffer_src_p, buffer_len); + buffer_dst += buffer_len; + buffer_src_p += buffer_len; + temp_usb_len += buffer_len; + + /* Set the FCS_LEN (4) bytes as 0 for CRC checking. */ + memset(buffer_dst, 0, FCS_LEN); + buffer_dst += FCS_LEN; + temp_usb_len += FCS_LEN; + + /* Round the packet to be transmitted to 4 bytes. */ + if (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT) { + memset(buffer_dst, 0, PURELIFI_BYTE_NUM_ALIGNMENT - + (temp_usb_len % + PURELIFI_BYTE_NUM_ALIGNMENT)); + buffer_dst += PURELIFI_BYTE_NUM_ALIGNMENT - + (temp_usb_len % + PURELIFI_BYTE_NUM_ALIGNMENT); + temp_usb_len += PURELIFI_BYTE_NUM_ALIGNMENT - + (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT); + } + + usb_req->len = cpu_to_be32(temp_usb_len); +} + +int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer, + int buffer_len, enum plf_usb_req_enum usb_req_id, + usb_complete_t complete_fn, + void *context) +{ + struct usb_device *udev = interface_to_usbdev(usb->ez_usb); + struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC); + int r; + + usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), + (void *)buffer, buffer_len, complete_fn, context); + + r = usb_submit_urb(urb, GFP_ATOMIC); + if (r) + dev_err(&udev->dev, "Async write submit failed (%d)\n", r); + + return r; +} + +int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len, + enum plf_usb_req_enum usb_req_id) +{ + struct usb_device *udev = interface_to_usbdev(ez_usb); + unsigned char *dma_buffer = NULL; + struct plf_usb_req usb_req; + int usb_bulk_msg_len; + int actual_length; + int r; + + get_usb_req(udev, buffer, buffer_len, usb_req_id, &usb_req); + usb_bulk_msg_len = sizeof(__le32) + sizeof(__le32) + + be32_to_cpu(usb_req.len); + + dma_buffer = kmemdup(&usb_req, usb_bulk_msg_len, GFP_KERNEL); + + if (!dma_buffer) { + r = -ENOMEM; + goto error; + } + + r = usb_bulk_msg(udev, + usb_sndbulkpipe(udev, EP_DATA_OUT), + dma_buffer, usb_bulk_msg_len, + &actual_length, USB_BULK_MSG_TIMEOUT_MS); + kfree(dma_buffer); +error: + if (r) { + r = -ENOMEM; + dev_err(&udev->dev, "usb_bulk_msg failed (%d)\n", r); + } + + return r; +} + +static void slif_data_plane_sap_timer_callb(struct timer_list *t) +{ + struct plfxlc_usb *usb = from_timer(usb, t, tx.tx_retry_timer); + + plfxlc_send_packet_from_data_queue(usb); + timer_setup(&usb->tx.tx_retry_timer, + slif_data_plane_sap_timer_callb, 0); + mod_timer(&usb->tx.tx_retry_timer, jiffies + TX_RETRY_BACKOFF_JIFF); +} + +static void sta_queue_cleanup_timer_callb(struct timer_list *t) +{ + struct plfxlc_usb *usb = from_timer(usb, t, sta_queue_cleanup); + struct plfxlc_usb_tx *tx = &usb->tx; + int sidx; + + for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { + if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG)) + continue; + if (tx->station[sidx].flag & STATION_HEARTBEAT_FLAG) { + tx->station[sidx].flag ^= STATION_HEARTBEAT_FLAG; + } else { + memset(tx->station[sidx].mac, 0, ETH_ALEN); + tx->station[sidx].flag = 0; + } + } + timer_setup(&usb->sta_queue_cleanup, + sta_queue_cleanup_timer_callb, 0); + mod_timer(&usb->sta_queue_cleanup, jiffies + STA_QUEUE_CLEANUP_JIFF); +} + +static int probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + u8 serial_number[PURELIFI_SERIAL_LEN]; + struct ieee80211_hw *hw = NULL; + struct plfxlc_usb_tx *tx; + struct plfxlc_chip *chip; + struct plfxlc_usb *usb; + u8 hw_address[ETH_ALEN]; + unsigned int i; + int r = 0; + + hw = plfxlc_mac_alloc_hw(intf); + + if (!hw) { + r = -ENOMEM; + goto error; + } + + chip = &plfxlc_hw_mac(hw)->chip; + usb = &chip->usb; + usb->ez_usb = intf; + tx = &usb->tx; + + r = plfxlc_upload_mac_and_serial(intf, hw_address, serial_number); + if (r) { + dev_err(&intf->dev, "MAC and Serial upload failed (%d)\n", r); + goto error; + } + + chip->unit_type = STA; + dev_err(&intf->dev, "Unit type is station"); + + r = plfxlc_mac_preinit_hw(hw, hw_address); + if (r) { + dev_err(&intf->dev, "Init mac failed (%d)\n", r); + goto error; + } + + r = ieee80211_register_hw(hw); + if (r) { + dev_err(&intf->dev, "Register device failed (%d)\n", r); + goto error; + } + + if ((le16_to_cpu(interface_to_usbdev(intf)->descriptor.idVendor) == + PURELIFI_XL_VENDOR_ID_0) && + (le16_to_cpu(interface_to_usbdev(intf)->descriptor.idProduct) == + PURELIFI_XL_PRODUCT_ID_0)) { + r = plfxlc_download_xl_firmware(intf); + } else { + r = plfxlc_download_fpga(intf); + } + if (r != 0) { + dev_err(&intf->dev, "FPGA download failed (%d)\n", r); + goto error; + } + + tx->mac_fifo_full = 0; + spin_lock_init(&tx->lock); + + msleep(PLF_MSLEEP_TIME); + r = plfxlc_usb_init_hw(usb); + if (r < 0) { + dev_err(&intf->dev, "usb_init_hw failed (%d)\n", r); + goto error; + } + + msleep(PLF_MSLEEP_TIME); + r = plfxlc_chip_switch_radio(chip, PLFXLC_RADIO_ON); + if (r < 0) { + dev_dbg(&intf->dev, "chip_switch_radio_on failed (%d)\n", r); + goto error; + } + + msleep(PLF_MSLEEP_TIME); + r = plfxlc_chip_set_rate(chip, 8); + if (r < 0) { + dev_dbg(&intf->dev, "chip_set_rate failed (%d)\n", r); + goto error; + } + + msleep(PLF_MSLEEP_TIME); + r = plfxlc_usb_wreq(usb->ez_usb, + hw_address, ETH_ALEN, USB_REQ_MAC_WR); + if (r < 0) { + dev_dbg(&intf->dev, "MAC_WR failure (%d)\n", r); + goto error; + } + + plfxlc_chip_enable_rxtx(chip); + + /* Initialise the data plane Tx queue */ + for (i = 0; i < MAX_STA_NUM; i++) { + skb_queue_head_init(&tx->station[i].data_list); + tx->station[i].flag = 0; + } + + tx->station[STA_BROADCAST_INDEX].flag |= STATION_CONNECTED_FLAG; + for (i = 0; i < ETH_ALEN; i++) + tx->station[STA_BROADCAST_INDEX].mac[i] = 0xFF; + + timer_setup(&tx->tx_retry_timer, slif_data_plane_sap_timer_callb, 0); + tx->tx_retry_timer.expires = jiffies + TX_RETRY_BACKOFF_JIFF; + add_timer(&tx->tx_retry_timer); + + timer_setup(&usb->sta_queue_cleanup, + sta_queue_cleanup_timer_callb, 0); + usb->sta_queue_cleanup.expires = jiffies + STA_QUEUE_CLEANUP_JIFF; + add_timer(&usb->sta_queue_cleanup); + + plfxlc_mac_init_hw(hw); + usb->initialized = true; + return 0; +error: + if (hw) { + plfxlc_mac_release(plfxlc_hw_mac(hw)); + ieee80211_unregister_hw(hw); + ieee80211_free_hw(hw); + } + dev_err(&intf->dev, "pureLifi:Device error"); + return r; +} + +static void disconnect(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf); + struct plfxlc_mac *mac; + struct plfxlc_usb *usb; + + /* Either something really bad happened, or + * we're just dealing with a DEVICE_INSTALLER. + */ + if (!hw) + return; + + mac = plfxlc_hw_mac(hw); + usb = &mac->chip.usb; + + del_timer_sync(&usb->tx.tx_retry_timer); + del_timer_sync(&usb->sta_queue_cleanup); + + ieee80211_unregister_hw(hw); + + plfxlc_chip_disable_rxtx(&mac->chip); + + /* If the disconnect has been caused by a removal of the + * driver module, the reset allows reloading of the driver. If the + * reset will not be executed here, the upload of the firmware in the + * probe function caused by the reloading of the driver will fail. + */ + usb_reset_device(interface_to_usbdev(intf)); + + plfxlc_mac_release(mac); + ieee80211_free_hw(hw); +} + +static void plfxlc_usb_resume(struct plfxlc_usb *usb) +{ + struct plfxlc_mac *mac = plfxlc_usb_to_mac(usb); + int r; + + r = plfxlc_op_start(plfxlc_usb_to_hw(usb)); + if (r < 0) { + dev_warn(plfxlc_usb_dev(usb), + "Device resume failed (%d)\n", r); + + if (usb->was_running) + set_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); + + usb_queue_reset_device(usb->intf); + return; + } + + if (mac->type != NL80211_IFTYPE_UNSPECIFIED) { + r = plfxlc_restore_settings(mac); + if (r < 0) { + dev_dbg(plfxlc_usb_dev(usb), + "Restore failed (%d)\n", r); + return; + } + } +} + +static void plfxlc_usb_stop(struct plfxlc_usb *usb) +{ + plfxlc_op_stop(plfxlc_usb_to_hw(usb)); + plfxlc_usb_disable_tx(usb); + plfxlc_usb_disable_rx(usb); + + usb->initialized = false; +} + +static int pre_reset(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(intf); + struct plfxlc_mac *mac; + struct plfxlc_usb *usb; + + if (!hw || intf->condition != USB_INTERFACE_BOUND) + return 0; + + mac = plfxlc_hw_mac(hw); + usb = &mac->chip.usb; + + usb->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); + + plfxlc_usb_stop(usb); + + return 0; +} + +static int post_reset(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(intf); + struct plfxlc_mac *mac; + struct plfxlc_usb *usb; + + if (!hw || intf->condition != USB_INTERFACE_BOUND) + return 0; + + mac = plfxlc_hw_mac(hw); + usb = &mac->chip.usb; + + if (usb->was_running) + plfxlc_usb_resume(usb); + + return 0; +} + +#ifdef CONFIG_PM + +static struct plfxlc_usb *get_plfxlc_usb(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf); + struct plfxlc_mac *mac; + + /* Either something really bad happened, or + * we're just dealing with a DEVICE_INSTALLER. + */ + if (!hw) + return NULL; + + mac = plfxlc_hw_mac(hw); + return &mac->chip.usb; +} + +static int suspend(struct usb_interface *interface, + pm_message_t message) +{ + struct plfxlc_usb *pl = get_plfxlc_usb(interface); + struct plfxlc_mac *mac = plfxlc_usb_to_mac(pl); + + if (!pl || !plfxlc_usb_dev(pl)) + return -ENODEV; + if (pl->initialized == 0) + return 0; + pl->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); + plfxlc_usb_stop(pl); + return 0; +} + +static int resume(struct usb_interface *interface) +{ + struct plfxlc_usb *pl = get_plfxlc_usb(interface); + + if (!pl || !plfxlc_usb_dev(pl)) + return -ENODEV; + if (pl->was_running) + plfxlc_usb_resume(pl); + return 0; +} + +#endif + +static struct usb_driver driver = { + .name = KBUILD_MODNAME, + .id_table = usb_ids, + .probe = probe, + .disconnect = disconnect, + .pre_reset = pre_reset, + .post_reset = post_reset, +#ifdef CONFIG_PM + .suspend = suspend, + .resume = resume, +#endif + .disable_hub_initiated_lpm = 1, +}; + +static int __init usb_init(void) +{ + int r; + + r = usb_register(&driver); + if (r) { + pr_err("%s usb_register() failed %d\n", driver.name, r); + return r; + } + + pr_debug("Driver initialized :%s\n", driver.name); + return 0; +} + +static void __exit usb_exit(void) +{ + usb_deregister(&driver); + pr_debug("%s %s\n", driver.name, __func__); +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("USB driver for pureLiFi devices"); +MODULE_AUTHOR("pureLiFi"); +MODULE_VERSION("1.0"); +MODULE_FIRMWARE("plfxlc/lifi-x.bin"); +MODULE_DEVICE_TABLE(usb, usb_ids); + +module_init(usb_init); +module_exit(usb_exit); diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.h b/drivers/net/wireless/purelifi/plfxlc/usb.h new file mode 100644 index 000000000000..ba2defd593bd --- /dev/null +++ b/drivers/net/wireless/purelifi/plfxlc/usb.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 pureLiFi + */ + +#ifndef PLFXLC_USB_H +#define PLFXLC_USB_H + +#include +#include +#include +#include +#include + +#include "intf.h" + +#define USB_BULK_MSG_TIMEOUT_MS 2000 + +#define PURELIFI_X_VENDOR_ID_0 0x16C1 +#define PURELIFI_X_PRODUCT_ID_0 0x1CDE +#define PURELIFI_XC_VENDOR_ID_0 0x2EF5 +#define PURELIFI_XC_PRODUCT_ID_0 0x0008 +#define PURELIFI_XL_VENDOR_ID_0 0x2EF5 +#define PURELIFI_XL_PRODUCT_ID_0 0x000A /* Station */ + +#define PLF_FPGA_STATUS_LEN 2 +#define PLF_FPGA_STATE_LEN 9 +#define PLF_BULK_TLEN 16384 +#define PLF_FPGA_MG 6 /* Magic check */ +#define PLF_XL_BUF_LEN 64 +#define PLF_MSG_STATUS_OFFSET 7 + +#define PLF_USB_TIMEOUT 1000 +#define PLF_MSLEEP_TIME 200 + +#define PURELIFI_URB_RETRY_MAX 5 + +#define plfxlc_usb_dev(usb) (&(usb)->intf->dev) + +/* Tx retry backoff timer (in milliseconds) */ +#define TX_RETRY_BACKOFF_MS 10 +#define STA_QUEUE_CLEANUP_MS 5000 + +/* Tx retry backoff timer (in jiffies) */ +#define TX_RETRY_BACKOFF_JIFF ((TX_RETRY_BACKOFF_MS * HZ) / 1000) +#define STA_QUEUE_CLEANUP_JIFF ((STA_QUEUE_CLEANUP_MS * HZ) / 1000) + +/* Ensures that MAX_TRANSFER_SIZE is even. */ +#define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1) +#define plfxlc_urb_dev(urb) (&(urb)->dev->dev) + +#define STATION_FIFO_ALMOST_FULL_MESSAGE 0 +#define STATION_FIFO_ALMOST_FULL_NOT_MESSAGE 1 +#define STATION_CONNECT_MESSAGE 2 +#define STATION_DISCONNECT_MESSAGE 3 + +int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len, + enum plf_usb_req_enum usb_req_id); +void plfxlc_tx_urb_complete(struct urb *urb); + +enum { + USB_MAX_RX_SIZE = 4800, + USB_MAX_EP_INT_BUFFER = 64, +}; + +struct plfxlc_usb_interrupt { + spinlock_t lock; /* spin lock for usb interrupt buffer */ + struct urb *urb; + void *buffer; + int interval; +}; + +#define RX_URBS_COUNT 5 + +struct plfxlc_usb_rx { + spinlock_t lock; /* spin lock for rx urb */ + struct mutex setup_mutex; /* mutex lockt for rx urb */ + u8 fragment[2 * USB_MAX_RX_SIZE]; + unsigned int fragment_length; + unsigned int usb_packet_size; + struct urb **urbs; + int urbs_count; +}; + +struct plf_station { + /* 7...3 | 2 | 1 | 0 | + * Reserved | Heartbeat | FIFO full | Connected | + */ + unsigned char flag; + unsigned char mac[ETH_ALEN]; + struct sk_buff_head data_list; +}; + +struct plfxlc_firmware_file { + u32 total_files; + u32 total_size; + u32 size; + u32 start_addr; + u32 control_packets; +} __packed; + +#define STATION_CONNECTED_FLAG 0x1 +#define STATION_FIFO_FULL_FLAG 0x2 +#define STATION_HEARTBEAT_FLAG 0x4 +#define STATION_ACTIVE_FLAG 0xFD + +#define PURELIFI_SERIAL_LEN 256 +#define STA_BROADCAST_INDEX (AP_USER_LIMIT) +#define MAX_STA_NUM (AP_USER_LIMIT + 1) + +struct plfxlc_usb_tx { + unsigned long enabled; + spinlock_t lock; /* spinlock for USB tx */ + u8 mac_fifo_full; + struct sk_buff_head submitted_skbs; + struct usb_anchor submitted; + int submitted_urbs; + bool stopped; + struct timer_list tx_retry_timer; + struct plf_station station[MAX_STA_NUM]; +}; + +/* Contains the usb parts. The structure doesn't require a lock because intf + * will not be changed after initialization. + */ +struct plfxlc_usb { + struct timer_list sta_queue_cleanup; + struct plfxlc_usb_rx rx; + struct plfxlc_usb_tx tx; + struct usb_interface *intf; + struct usb_interface *ez_usb; + u8 req_buf[64]; /* plfxlc_usb_iowrite16v needs 62 bytes */ + u8 sidx; /* store last served */ + bool rx_usb_enabled; + bool initialized; + bool was_running; + bool link_up; +}; + +enum endpoints { + EP_DATA_IN = 2, + EP_DATA_OUT = 8, +}; + +enum devicetype { + DEVICE_LIFI_X = 0, + DEVICE_LIFI_XC = 1, + DEVICE_LIFI_XL = 1, +}; + +enum { + PLF_BIT_ENABLED = 1, + PLF_BIT_MAX = 2, +}; + +int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer, + int buffer_len, enum plf_usb_req_enum usb_req_id, + usb_complete_t complete_fn, void *context); + +static inline struct usb_device * +plfxlc_usb_to_usbdev(struct plfxlc_usb *usb) +{ + return interface_to_usbdev(usb->intf); +} + +static inline struct ieee80211_hw * +plfxlc_intf_to_hw(struct usb_interface *intf) +{ + return usb_get_intfdata(intf); +} + +static inline struct ieee80211_hw * +plfxlc_usb_to_hw(struct plfxlc_usb *usb) +{ + return plfxlc_intf_to_hw(usb->intf); +} + +void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw, + struct usb_interface *intf); +void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb); +void plfxlc_usb_release(struct plfxlc_usb *usb); +void plfxlc_usb_disable_rx(struct plfxlc_usb *usb); +void plfxlc_usb_enable_tx(struct plfxlc_usb *usb); +void plfxlc_usb_disable_tx(struct plfxlc_usb *usb); +int plfxlc_usb_tx(struct plfxlc_usb *usb, struct sk_buff *skb); +int plfxlc_usb_enable_rx(struct plfxlc_usb *usb); +int plfxlc_usb_init_hw(struct plfxlc_usb *usb); +const char *plfxlc_speed(enum usb_device_speed speed); + +/* Firmware declarations */ +int plfxlc_download_xl_firmware(struct usb_interface *intf); +int plfxlc_download_fpga(struct usb_interface *intf); + +int plfxlc_upload_mac_and_serial(struct usb_interface *intf, + unsigned char *hw_address, + unsigned char *serial_number); + +#endif /* PLFXLC_USB_H */ -- cgit v1.2.3 From 2b1c9dbf047b57f1df988d90046fc3394926cc0b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 Apr 2022 12:08:59 +0200 Subject: net: ieee802154: at86rf230: Call _xmit_hw_error() when failing to offload frames If we end up at this location, it means that there was likely a hardware issue (either a bus error when asynchronously offloading the packet to the transceiver, or the transceiver took too long for some state change). In this case it was decided to return IEEE802154_SYSTEM_ERROR through the ieee802154_xmit_hw_error() helper dedicated to non IEEE802.15.4 specific errors. Let's use this helper instead of (almost) open-coding it. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220407100903.1695973-7-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/at86rf230.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 563031ce76f0..0536ccd55e70 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -346,8 +346,7 @@ at86rf230_async_error_recover_complete(void *context) if (lp->was_tx) { lp->was_tx = 0; - dev_kfree_skb_any(lp->tx_skb); - ieee802154_wake_queue(lp->hw); + ieee802154_xmit_hw_error(lp->hw, lp->tx_skb); } } -- cgit v1.2.3 From 6ec9630b1abe7622da75fd6559e3e9468655b96b Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 Apr 2022 12:09:00 +0200 Subject: net: ieee802154: at86rf230: Forward Tx trac errors Commit 493bc90a9683 ("at86rf230: add debugfs support") brought trac support as part of a debugfs feature, in order to add some testing capabilities involving ack handling. As we want to collect trac errors but do not need the debugfs feature anymore, let's partially revert this commit, keeping the Tx trac handling part which still makes sense. This allows to always return the trac error directly to the core with the recently introduced ieee802154_xmit_error() helper. Suggested-by: Alexander Aring Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220407100903.1695973-8-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/Kconfig | 7 -- drivers/net/ieee802154/at86rf230.c | 127 ++++++------------------------------- 2 files changed, 21 insertions(+), 113 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 0f7c6dc2ed15..95da876c5613 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -33,13 +33,6 @@ config IEEE802154_AT86RF230 This driver can also be built as a module. To do so, say M here. the module will be called 'at86rf230'. -config IEEE802154_AT86RF230_DEBUGFS - depends on IEEE802154_AT86RF230 - bool "AT86RF230 debugfs interface" - depends on DEBUG_FS - help - This option compiles debugfs code for the at86rf230 driver. - config IEEE802154_MRF24J40 tristate "Microchip MRF24J40 transceiver driver" depends on IEEE802154_DRIVERS && MAC802154 diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 0536ccd55e70..1c681c1609d3 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -72,19 +71,11 @@ struct at86rf230_state_change { void (*complete)(void *context); u8 from_state; u8 to_state; + int trac; bool free; }; -struct at86rf230_trac { - u64 success; - u64 success_data_pending; - u64 success_wait_for_ack; - u64 channel_access_failure; - u64 no_ack; - u64 invalid; -}; - struct at86rf230_local { struct spi_device *spi; @@ -104,8 +95,6 @@ struct at86rf230_local { u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; - - struct at86rf230_trac trac; }; #define AT86RF2XX_NUMREGS 0x3F @@ -652,7 +641,11 @@ at86rf230_tx_complete(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); + if (ctx->trac == IEEE802154_SUCCESS) + ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); + else + ieee802154_xmit_error(lp->hw, lp->tx_skb, ctx->trac); + kfree(ctx); } @@ -671,30 +664,21 @@ at86rf230_tx_trac_check(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; + u8 trac = TRAC_MASK(ctx->buf[1]); - if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) { - u8 trac = TRAC_MASK(ctx->buf[1]); - - switch (trac) { - case TRAC_SUCCESS: - lp->trac.success++; - break; - case TRAC_SUCCESS_DATA_PENDING: - lp->trac.success_data_pending++; - break; - case TRAC_CHANNEL_ACCESS_FAILURE: - lp->trac.channel_access_failure++; - break; - case TRAC_NO_ACK: - lp->trac.no_ack++; - break; - case TRAC_INVALID: - lp->trac.invalid++; - break; - default: - WARN_ONCE(1, "received tx trac status %d\n", trac); - break; - } + switch (trac) { + case TRAC_SUCCESS: + case TRAC_SUCCESS_DATA_PENDING: + ctx->trac = IEEE802154_SUCCESS; + break; + case TRAC_CHANNEL_ACCESS_FAILURE: + ctx->trac = IEEE802154_CHANNEL_ACCESS_FAILURE; + break; + case TRAC_NO_ACK: + ctx->trac = IEEE802154_NO_ACK; + break; + default: + ctx->trac = IEEE802154_SYSTEM_ERROR; } at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on); @@ -736,25 +720,6 @@ at86rf230_rx_trac_check(void *context) u8 *buf = ctx->buf; int rc; - if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) { - u8 trac = TRAC_MASK(buf[1]); - - switch (trac) { - case TRAC_SUCCESS: - lp->trac.success++; - break; - case TRAC_SUCCESS_WAIT_FOR_ACK: - lp->trac.success_wait_for_ack++; - break; - case TRAC_INVALID: - lp->trac.invalid++; - break; - default: - WARN_ONCE(1, "received rx trac status %d\n", trac); - break; - } - } - buf[0] = CMD_FB; ctx->trx.len = AT86RF2XX_MAX_BUF; ctx->msg.complete = at86rf230_rx_read_frame_complete; @@ -950,10 +915,6 @@ at86rf230_start(struct ieee802154_hw *hw) { struct at86rf230_local *lp = hw->priv; - /* reset trac stats on start */ - if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) - memset(&lp->trac, 0, sizeof(struct at86rf230_trac)); - at86rf230_awake(lp); enable_irq(lp->spi->irq); @@ -1581,47 +1542,6 @@ not_supp: return rc; } -#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS -static struct dentry *at86rf230_debugfs_root; - -static int at86rf230_stats_show(struct seq_file *file, void *offset) -{ - struct at86rf230_local *lp = file->private; - - seq_printf(file, "SUCCESS:\t\t%8llu\n", lp->trac.success); - seq_printf(file, "SUCCESS_DATA_PENDING:\t%8llu\n", - lp->trac.success_data_pending); - seq_printf(file, "SUCCESS_WAIT_FOR_ACK:\t%8llu\n", - lp->trac.success_wait_for_ack); - seq_printf(file, "CHANNEL_ACCESS_FAILURE:\t%8llu\n", - lp->trac.channel_access_failure); - seq_printf(file, "NO_ACK:\t\t\t%8llu\n", lp->trac.no_ack); - seq_printf(file, "INVALID:\t\t%8llu\n", lp->trac.invalid); - return 0; -} -DEFINE_SHOW_ATTRIBUTE(at86rf230_stats); - -static void at86rf230_debugfs_init(struct at86rf230_local *lp) -{ - char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-"; - - strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN); - - at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL); - - debugfs_create_file("trac_stats", 0444, at86rf230_debugfs_root, lp, - &at86rf230_stats_fops); -} - -static void at86rf230_debugfs_remove(void) -{ - debugfs_remove_recursive(at86rf230_debugfs_root); -} -#else -static void at86rf230_debugfs_init(struct at86rf230_local *lp) { } -static void at86rf230_debugfs_remove(void) { } -#endif - static int at86rf230_probe(struct spi_device *spi) { struct ieee802154_hw *hw; @@ -1718,16 +1638,12 @@ static int at86rf230_probe(struct spi_device *spi) /* going into sleep by default */ at86rf230_sleep(lp); - at86rf230_debugfs_init(lp); - rc = ieee802154_register_hw(lp->hw); if (rc) - goto free_debugfs; + goto free_dev; return rc; -free_debugfs: - at86rf230_debugfs_remove(); free_dev: ieee802154_free_hw(lp->hw); @@ -1742,7 +1658,6 @@ static int at86rf230_remove(struct spi_device *spi) at86rf230_write_subreg(lp, SR_IRQ_MASK, 0); ieee802154_unregister_hw(lp->hw); ieee802154_free_hw(lp->hw); - at86rf230_debugfs_remove(); dev_dbg(&spi->dev, "unregistered at86rf230\n"); return 0; -- cgit v1.2.3 From 35f34ee102a5b2376f4b4700ac017b8f2770e754 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 Apr 2022 12:09:01 +0200 Subject: net: ieee802154: atusb: Call _xmit_hw_error() upon transmission error ieee802154_xmit_hw_error() is the right helper to call when a transmission has failed for a non-determined (and probably not IEEE802.15.4 specific) reason. Let's use this helper instead of open-coding it. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220407100903.1695973-9-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/atusb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 9b41c7654f6b..2c338783893d 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -206,9 +206,7 @@ static void atusb_tx_done(struct atusb *atusb, u8 seq) * unlikely case now that seq == expect is then true, but can * happen and fail with a tx_skb = NULL; */ - ieee802154_wake_queue(atusb->hw); - if (atusb->tx_skb) - dev_kfree_skb_irq(atusb->tx_skb); + ieee802154_xmit_hw_error(atusb->hw, atusb->tx_skb); } } -- cgit v1.2.3 From ab191c1cff9cd77ca14ac888795029422669b4e1 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 Apr 2022 12:09:02 +0200 Subject: net: ieee802154: ca8210: Use core return codes instead of hardcoding them All the error codes defined in this driver are generic and already defined in the ieee802154 main header. Let's just get rid of these extra definition and switch to the core's values. There is no functional change. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220407100903.1695973-10-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/ca8210.c | 178 +++++++++++++++------------------------- 1 file changed, 68 insertions(+), 110 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index fc74fa0f1ddd..116aece191cd 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -89,48 +89,6 @@ #define CA8210_TEST_INT_FILE_NAME "ca8210_test" #define CA8210_TEST_INT_FIFO_SIZE 256 -/* MAC status enumerations */ -#define MAC_SUCCESS (0x00) -#define MAC_ERROR (0x01) -#define MAC_CANCELLED (0x02) -#define MAC_READY_FOR_POLL (0x03) -#define MAC_COUNTER_ERROR (0xDB) -#define MAC_IMPROPER_KEY_TYPE (0xDC) -#define MAC_IMPROPER_SECURITY_LEVEL (0xDD) -#define MAC_UNSUPPORTED_LEGACY (0xDE) -#define MAC_UNSUPPORTED_SECURITY (0xDF) -#define MAC_BEACON_LOST (0xE0) -#define MAC_CHANNEL_ACCESS_FAILURE (0xE1) -#define MAC_DENIED (0xE2) -#define MAC_DISABLE_TRX_FAILURE (0xE3) -#define MAC_SECURITY_ERROR (0xE4) -#define MAC_FRAME_TOO_LONG (0xE5) -#define MAC_INVALID_GTS (0xE6) -#define MAC_INVALID_HANDLE (0xE7) -#define MAC_INVALID_PARAMETER (0xE8) -#define MAC_NO_ACK (0xE9) -#define MAC_NO_BEACON (0xEA) -#define MAC_NO_DATA (0xEB) -#define MAC_NO_SHORT_ADDRESS (0xEC) -#define MAC_OUT_OF_CAP (0xED) -#define MAC_PAN_ID_CONFLICT (0xEE) -#define MAC_REALIGNMENT (0xEF) -#define MAC_TRANSACTION_EXPIRED (0xF0) -#define MAC_TRANSACTION_OVERFLOW (0xF1) -#define MAC_TX_ACTIVE (0xF2) -#define MAC_UNAVAILABLE_KEY (0xF3) -#define MAC_UNSUPPORTED_ATTRIBUTE (0xF4) -#define MAC_INVALID_ADDRESS (0xF5) -#define MAC_ON_TIME_TOO_LONG (0xF6) -#define MAC_PAST_TIME (0xF7) -#define MAC_TRACKING_OFF (0xF8) -#define MAC_INVALID_INDEX (0xF9) -#define MAC_LIMIT_REACHED (0xFA) -#define MAC_READ_ONLY (0xFB) -#define MAC_SCAN_IN_PROGRESS (0xFC) -#define MAC_SUPERFRAME_OVERLAP (0xFD) -#define MAC_SYSTEM_ERROR (0xFF) - /* HWME attribute IDs */ #define HWME_EDTHRESHOLD (0x04) #define HWME_EDVALUE (0x06) @@ -551,58 +509,58 @@ static int link_to_linux_err(int link_status) return link_status; } switch (link_status) { - case MAC_SUCCESS: - case MAC_REALIGNMENT: + case IEEE802154_SUCCESS: + case IEEE802154_REALIGNMENT: return 0; - case MAC_IMPROPER_KEY_TYPE: + case IEEE802154_IMPROPER_KEY_TYPE: return -EKEYREJECTED; - case MAC_IMPROPER_SECURITY_LEVEL: - case MAC_UNSUPPORTED_LEGACY: - case MAC_DENIED: + case IEEE802154_IMPROPER_SECURITY_LEVEL: + case IEEE802154_UNSUPPORTED_LEGACY: + case IEEE802154_DENIED: return -EACCES; - case MAC_BEACON_LOST: - case MAC_NO_ACK: - case MAC_NO_BEACON: + case IEEE802154_BEACON_LOST: + case IEEE802154_NO_ACK: + case IEEE802154_NO_BEACON: return -ENETUNREACH; - case MAC_CHANNEL_ACCESS_FAILURE: - case MAC_TX_ACTIVE: - case MAC_SCAN_IN_PROGRESS: + case IEEE802154_CHANNEL_ACCESS_FAILURE: + case IEEE802154_TX_ACTIVE: + case IEEE802154_SCAN_IN_PROGRESS: return -EBUSY; - case MAC_DISABLE_TRX_FAILURE: - case MAC_OUT_OF_CAP: + case IEEE802154_DISABLE_TRX_FAILURE: + case IEEE802154_OUT_OF_CAP: return -EAGAIN; - case MAC_FRAME_TOO_LONG: + case IEEE802154_FRAME_TOO_LONG: return -EMSGSIZE; - case MAC_INVALID_GTS: - case MAC_PAST_TIME: + case IEEE802154_INVALID_GTS: + case IEEE802154_PAST_TIME: return -EBADSLT; - case MAC_INVALID_HANDLE: + case IEEE802154_INVALID_HANDLE: return -EBADMSG; - case MAC_INVALID_PARAMETER: - case MAC_UNSUPPORTED_ATTRIBUTE: - case MAC_ON_TIME_TOO_LONG: - case MAC_INVALID_INDEX: + case IEEE802154_INVALID_PARAMETER: + case IEEE802154_UNSUPPORTED_ATTRIBUTE: + case IEEE802154_ON_TIME_TOO_LONG: + case IEEE802154_INVALID_INDEX: return -EINVAL; - case MAC_NO_DATA: + case IEEE802154_NO_DATA: return -ENODATA; - case MAC_NO_SHORT_ADDRESS: + case IEEE802154_NO_SHORT_ADDRESS: return -EFAULT; - case MAC_PAN_ID_CONFLICT: + case IEEE802154_PAN_ID_CONFLICT: return -EADDRINUSE; - case MAC_TRANSACTION_EXPIRED: + case IEEE802154_TRANSACTION_EXPIRED: return -ETIME; - case MAC_TRANSACTION_OVERFLOW: + case IEEE802154_TRANSACTION_OVERFLOW: return -ENOBUFS; - case MAC_UNAVAILABLE_KEY: + case IEEE802154_UNAVAILABLE_KEY: return -ENOKEY; - case MAC_INVALID_ADDRESS: + case IEEE802154_INVALID_ADDRESS: return -ENXIO; - case MAC_TRACKING_OFF: - case MAC_SUPERFRAME_OVERLAP: + case IEEE802154_TRACKING_OFF: + case IEEE802154_SUPERFRAME_OVERLAP: return -EREMOTEIO; - case MAC_LIMIT_REACHED: + case IEEE802154_LIMIT_REACHED: return -EDQUOT; - case MAC_READ_ONLY: + case IEEE802154_READ_ONLY: return -EROFS; default: return -EPROTO; @@ -754,7 +712,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl) ca8210_net_rx(priv->hw, buf, len); if (buf[0] == SPI_MCPS_DATA_CONFIRM) { - if (buf[3] == MAC_TRANSACTION_OVERFLOW) { + if (buf[3] == IEEE802154_TRANSACTION_OVERFLOW) { dev_info( &priv->spi->dev, "Waiting for transaction overflow to stabilise...\n"); @@ -1128,7 +1086,7 @@ static u8 tdme_setsfr_request_sync( ); if (ret) { dev_crit(&spi->dev, "cascoda_api_downstream returned %d", ret); - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_TDME_SETSFR_CONFIRM) { @@ -1137,7 +1095,7 @@ static u8 tdme_setsfr_request_sync( "sync response to SPI_TDME_SETSFR_REQUEST was not SPI_TDME_SETSFR_CONFIRM, it was %d\n", response.command_id ); - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } return response.pdata.tdme_set_sfr_cnf.status; @@ -1151,7 +1109,7 @@ static u8 tdme_setsfr_request_sync( */ static u8 tdme_chipinit(void *device_ref) { - u8 status = MAC_SUCCESS; + u8 status = IEEE802154_SUCCESS; u8 sfr_address; struct spi_device *spi = device_ref; struct preamble_cfg_sfr pre_cfg_value = { @@ -1220,7 +1178,7 @@ static u8 tdme_chipinit(void *device_ref) goto finish; finish: - if (status != MAC_SUCCESS) { + if (status != IEEE802154_SUCCESS) { dev_err( &spi->dev, "failed to set sfr at %#03x, status = %#03x\n", @@ -1287,7 +1245,7 @@ static u8 tdme_checkpibattribute( const void *pib_attribute_value ) { - u8 status = MAC_SUCCESS; + u8 status = IEEE802154_SUCCESS; u8 value; value = *((u8 *)pib_attribute_value); @@ -1296,52 +1254,52 @@ static u8 tdme_checkpibattribute( /* PHY */ case PHY_TRANSMIT_POWER: if (value > 0x3F) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case PHY_CCA_MODE: if (value > 0x03) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; /* MAC */ case MAC_BATT_LIFE_EXT_PERIODS: if (value < 6 || value > 41) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_PAYLOAD: if (pib_attribute_length > MAX_BEACON_PAYLOAD_LENGTH) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_PAYLOAD_LENGTH: if (value > MAX_BEACON_PAYLOAD_LENGTH) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_BEACON_ORDER: if (value > 15) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_BE: if (value < 3 || value > 8) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_CSMA_BACKOFFS: if (value > 5) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_MAX_FRAME_RETRIES: if (value > 7) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_MIN_BE: if (value > 8) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_RESPONSE_WAIT_TIME: if (value < 2 || value > 64) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_SUPERFRAME_ORDER: if (value > 15) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; /* boolean */ case MAC_ASSOCIATED_PAN_COORD: @@ -1353,16 +1311,16 @@ static u8 tdme_checkpibattribute( case MAC_RX_ON_WHEN_IDLE: case MAC_SECURITY_ENABLED: if (value > 1) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; /* MAC SEC */ case MAC_AUTO_REQUEST_SECURITY_LEVEL: if (value > 7) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; case MAC_AUTO_REQUEST_KEY_ID_MODE: if (value > 3) - status = MAC_INVALID_PARAMETER; + status = IEEE802154_INVALID_PARAMETER; break; default: break; @@ -1522,9 +1480,9 @@ static u8 mcps_data_request( if (ca8210_spi_transfer(device_ref, &command.command_id, command.length + 2)) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; - return MAC_SUCCESS; + return IEEE802154_SUCCESS; } /** @@ -1553,11 +1511,11 @@ static u8 mlme_reset_request_sync( &response.command_id, device_ref)) { dev_err(&spi->dev, "cascoda_api_downstream failed\n"); - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_MLME_RESET_CONFIRM) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; status = response.pdata.status; @@ -1600,7 +1558,7 @@ static u8 mlme_set_request_sync( */ if (tdme_checkpibattribute( pib_attribute, pib_attribute_length, pib_attribute_value)) { - return MAC_INVALID_PARAMETER; + return IEEE802154_INVALID_PARAMETER; } if (pib_attribute == PHY_CURRENT_CHANNEL) { @@ -1636,11 +1594,11 @@ static u8 mlme_set_request_sync( command.length + 2, &response.command_id, device_ref)) { - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_MLME_SET_CONFIRM) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; return response.pdata.status; } @@ -1678,11 +1636,11 @@ static u8 hwme_set_request_sync( command.length + 2, &response.command_id, device_ref)) { - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_HWME_SET_CONFIRM) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; return response.pdata.hwme_set_cnf.status; } @@ -1714,13 +1672,13 @@ static u8 hwme_get_request_sync( command.length + 2, &response.command_id, device_ref)) { - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; } if (response.command_id != SPI_HWME_GET_CONFIRM) - return MAC_SYSTEM_ERROR; + return IEEE802154_SYSTEM_ERROR; - if (response.pdata.hwme_get_cnf.status == MAC_SUCCESS) { + if (response.pdata.hwme_get_cnf.status == IEEE802154_SUCCESS) { *hw_attribute_length = response.pdata.hwme_get_cnf.hw_attribute_length; memcpy( @@ -1770,7 +1728,7 @@ static int ca8210_async_xmit_complete( "Link transmission unsuccessful, status = %d\n", status ); - if (status != MAC_TRANSACTION_OVERFLOW) { + if (status != IEEE802154_TRANSACTION_OVERFLOW) { dev_kfree_skb_any(priv->tx_skb); ieee802154_wake_queue(priv->hw); return 0; @@ -2436,7 +2394,7 @@ static int ca8210_test_check_upstream(u8 *buf, void *device_ref) if (ret) { response[0] = SPI_MLME_SET_CONFIRM; response[1] = 3; - response[2] = MAC_INVALID_PARAMETER; + response[2] = IEEE802154_INVALID_PARAMETER; response[3] = buf[2]; response[4] = buf[3]; if (cascoda_api_upstream) -- cgit v1.2.3 From 510ce586320dafc4eebfcb4472a0f972e6c35ada Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Thu, 7 Apr 2022 12:09:03 +0200 Subject: net: ieee802154: ca8210: Call _xmit_error() when a transmission fails ieee802154_xmit_error() is the right helper to call when a transmission has failed. Let's use it instead of open-coding it. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220407100903.1695973-11-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/ca8210.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 116aece191cd..3c00310d26e8 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1729,8 +1729,7 @@ static int ca8210_async_xmit_complete( status ); if (status != IEEE802154_TRANSACTION_OVERFLOW) { - dev_kfree_skb_any(priv->tx_skb); - ieee802154_wake_queue(priv->hw); + ieee802154_xmit_error(priv->hw, priv->tx_skb, status); return 0; } } -- cgit v1.2.3 From cc271ab86606c963e97915d84be34ae4bd067578 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Fri, 22 Apr 2022 12:01:24 +0900 Subject: wwan_hwsim: Avoid flush_scheduled_work() usage Flushing system-wide workqueues is dangerous and will be forbidden. Replace system_wq with local wwan_wq. While we are at it, make err_clean_devs: label of wwan_hwsim_init() behave like wwan_hwsim_exit(), for it is theoretically possible to call wwan_hwsim_debugfs_devcreate_write()/wwan_hwsim_debugfs_devdestroy_write() by the moment wwan_hwsim_init_devs() returns. Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa Reviewed-by: Sergey Ryazanov Reviewed-by: Loic Poulain Link: https://lore.kernel.org/r/7390d51f-60e2-3cee-5277-b819a55ceabe@I-love.SAKURA.ne.jp Signed-off-by: Jakub Kicinski --- drivers/net/wwan/wwan_hwsim.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c index 5b62cf3b3c42..fad642f9ffd8 100644 --- a/drivers/net/wwan/wwan_hwsim.c +++ b/drivers/net/wwan/wwan_hwsim.c @@ -33,6 +33,7 @@ static struct dentry *wwan_hwsim_debugfs_devcreate; static DEFINE_SPINLOCK(wwan_hwsim_devs_lock); static LIST_HEAD(wwan_hwsim_devs); static unsigned int wwan_hwsim_dev_idx; +static struct workqueue_struct *wwan_wq; struct wwan_hwsim_dev { struct list_head list; @@ -371,7 +372,7 @@ static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file, * waiting this callback to finish in the debugfs_remove() call. So, * use workqueue. */ - schedule_work(&port->del_work); + queue_work(wwan_wq, &port->del_work); return count; } @@ -416,7 +417,7 @@ static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file, * waiting this callback to finish in the debugfs_remove() call. So, * use workqueue. */ - schedule_work(&dev->del_work); + queue_work(wwan_wq, &dev->del_work); return count; } @@ -506,9 +507,15 @@ static int __init wwan_hwsim_init(void) if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128) return -EINVAL; + wwan_wq = alloc_workqueue("wwan_wq", 0, 0); + if (!wwan_wq) + return -ENOMEM; + wwan_hwsim_class = class_create(THIS_MODULE, "wwan_hwsim"); - if (IS_ERR(wwan_hwsim_class)) - return PTR_ERR(wwan_hwsim_class); + if (IS_ERR(wwan_hwsim_class)) { + err = PTR_ERR(wwan_hwsim_class); + goto err_wq_destroy; + } wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL); wwan_hwsim_debugfs_devcreate = @@ -523,9 +530,13 @@ static int __init wwan_hwsim_init(void) return 0; err_clean_devs: + debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */ wwan_hwsim_free_devs(); + flush_workqueue(wwan_wq); /* Wait deletion works completion */ debugfs_remove(wwan_hwsim_debugfs_topdir); class_destroy(wwan_hwsim_class); +err_wq_destroy: + destroy_workqueue(wwan_wq); return err; } @@ -534,9 +545,10 @@ static void __exit wwan_hwsim_exit(void) { debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */ wwan_hwsim_free_devs(); - flush_scheduled_work(); /* Wait deletion works completion */ + flush_workqueue(wwan_wq); /* Wait deletion works completion */ debugfs_remove(wwan_hwsim_debugfs_topdir); class_destroy(wwan_hwsim_class); + destroy_workqueue(wwan_wq); } module_init(wwan_hwsim_init); -- cgit v1.2.3 From fb0a43f5bd454dfae94aeb293b32669c6ef83b37 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Sat, 23 Apr 2022 21:17:27 +0530 Subject: net: phy: LAN937x: add interrupt support for link detection Added the config_intr and handle_interrupt for the LAN937x phy which is same as the LAN87xx phy. Signed-off-by: Arun Ramadoss Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20220423154727.29052-1-arun.ramadoss@microchip.com Signed-off-by: Paolo Abeni --- drivers/net/phy/microchip_t1.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index 796fbcb7dafe..d4c93d59bc53 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -792,6 +792,8 @@ static struct phy_driver microchip_t1_phy_driver[] = { .flags = PHY_POLL_CABLE_TEST, .features = PHY_BASIC_T1_FEATURES, .config_init = lan87xx_config_init, + .config_intr = lan87xx_phy_config_intr, + .handle_interrupt = lan87xx_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, .config_aneg = lan87xx_config_aneg, -- cgit v1.2.3 From de6dd626d7082eda383ec77a5e06093c82122d10 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Sun, 24 Apr 2022 16:58:31 +0530 Subject: net: dsa: ksz: added the generic port_stp_state_set function The ksz8795 and ksz9477 uses the same algorithm for the port_stp_state_set function except the register address is different. So moved the algorithm to the ksz_common.c and used the dev_ops for register read and write. This function can also used for the lan937x part. Hence making it generic for all the parts. Signed-off-by: Arun Ramadoss Reviewed-by: Florian Fainelli Link: https://lore.kernel.org/r/20220424112831.11504-1-arun.ramadoss@microchip.com Signed-off-by: Paolo Abeni --- drivers/net/dsa/microchip/ksz8795.c | 35 +---------------------------- drivers/net/dsa/microchip/ksz8795_reg.h | 3 --- drivers/net/dsa/microchip/ksz9477.c | 33 +-------------------------- drivers/net/dsa/microchip/ksz9477_reg.h | 4 ---- drivers/net/dsa/microchip/ksz_common.c | 40 +++++++++++++++++++++++++++++++++ drivers/net/dsa/microchip/ksz_common.h | 7 ++++++ 6 files changed, 49 insertions(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index b2752978cb09..f91deea9368e 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1027,40 +1027,7 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - struct ksz_device *dev = ds->priv; - struct ksz_port *p; - u8 data; - - ksz_pread8(dev, port, P_STP_CTRL, &data); - data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); - - switch (state) { - case BR_STATE_DISABLED: - data |= PORT_LEARN_DISABLE; - break; - case BR_STATE_LISTENING: - data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - break; - case BR_STATE_LEARNING: - data |= PORT_RX_ENABLE; - break; - case BR_STATE_FORWARDING: - data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - break; - case BR_STATE_BLOCKING: - data |= PORT_LEARN_DISABLE; - break; - default: - dev_err(ds->dev, "invalid STP state: %d\n", state); - return; - } - - ksz_pwrite8(dev, port, P_STP_CTRL, data); - - p = &dev->ports[port]; - p->stp_state = state; - - ksz_update_port_member(dev, port); + ksz_port_stp_state_set(ds, port, state, P_STP_CTRL); } static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h index d74defcd86b4..4109433b6b6c 100644 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -160,9 +160,6 @@ #define PORT_DISCARD_NON_VID BIT(5) #define PORT_FORCE_FLOW_CTRL BIT(4) #define PORT_BACK_PRESSURE BIT(3) -#define PORT_TX_ENABLE BIT(2) -#define PORT_RX_ENABLE BIT(1) -#define PORT_LEARN_DISABLE BIT(0) #define REG_PORT_1_CTRL_3 0x13 #define REG_PORT_2_CTRL_3 0x23 diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 8222c8a6c5ec..4f617fee9a4e 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -517,38 +517,7 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port, static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - struct ksz_device *dev = ds->priv; - struct ksz_port *p = &dev->ports[port]; - u8 data; - - ksz_pread8(dev, port, P_STP_CTRL, &data); - data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); - - switch (state) { - case BR_STATE_DISABLED: - data |= PORT_LEARN_DISABLE; - break; - case BR_STATE_LISTENING: - data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - break; - case BR_STATE_LEARNING: - data |= PORT_RX_ENABLE; - break; - case BR_STATE_FORWARDING: - data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - break; - case BR_STATE_BLOCKING: - data |= PORT_LEARN_DISABLE; - break; - default: - dev_err(ds->dev, "invalid STP state: %d\n", state); - return; - } - - ksz_pwrite8(dev, port, P_STP_CTRL, data); - p->stp_state = state; - - ksz_update_port_member(dev, port); + ksz_port_stp_state_set(ds, port, state, P_STP_CTRL); } static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index 0bd58467181f..7a2c8d4767af 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -1586,10 +1586,6 @@ #define REG_PORT_LUE_MSTP_STATE 0x0B04 -#define PORT_TX_ENABLE BIT(2) -#define PORT_RX_ENABLE BIT(1) -#define PORT_LEARN_DISABLE BIT(0) - /* C - PTP */ #define REG_PTP_PORT_RX_DELAY__2 0x0C00 diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 8014b18d9391..9b9f570ebb0b 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -372,6 +372,46 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) } EXPORT_SYMBOL_GPL(ksz_enable_port); +void ksz_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state, int reg) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *p; + u8 data; + + ksz_pread8(dev, port, reg, &data); + data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); + + switch (state) { + case BR_STATE_DISABLED: + data |= PORT_LEARN_DISABLE; + break; + case BR_STATE_LISTENING: + data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); + break; + case BR_STATE_LEARNING: + data |= PORT_RX_ENABLE; + break; + case BR_STATE_FORWARDING: + data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); + break; + case BR_STATE_BLOCKING: + data |= PORT_LEARN_DISABLE; + break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; + } + + ksz_pwrite8(dev, port, reg, data); + + p = &dev->ports[port]; + p->stp_state = state; + + ksz_update_port_member(dev, port); +} +EXPORT_SYMBOL_GPL(ksz_port_stp_state_set); + struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) { struct dsa_switch *ds; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 485d4a948c38..4d978832c448 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -165,6 +165,8 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge); +void ksz_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state, int reg); void ksz_port_fast_age(struct dsa_switch *ds, int port); int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); @@ -292,6 +294,11 @@ static inline void ksz_regmap_unlock(void *__mtx) mutex_unlock(mtx); } +/* STP State Defines */ +#define PORT_TX_ENABLE BIT(2) +#define PORT_RX_ENABLE BIT(1) +#define PORT_LEARN_DISABLE BIT(0) + /* Regmap tables generation */ #define KSZ_SPI_OP_RD 3 #define KSZ_SPI_OP_WR 2 -- cgit v1.2.3 From 561215482cc69d1c758944d4463b3d5d96d37bd1 Mon Sep 17 00:00:00 2001 From: Ethan Yang Date: Mon, 25 Apr 2022 13:40:28 +0800 Subject: net: usb: qmi_wwan: add support for Sierra Wireless EM7590 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add support for Sierra Wireless EM7590 0xc081 composition. Signed-off-by: Ethan Yang Acked-by: Bjørn Mork Link: https://lore.kernel.org/r/20220425054028.5444-1-etyang@sierrawireless.com Signed-off-by: Paolo Abeni --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9e2f48c2e85e..79f8bd849b1a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1350,6 +1350,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */ {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ + {QMI_QUIRK_SET_DTR(0x1199, 0xc081, 8)}, /* Sierra Wireless EM7590 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ -- cgit v1.2.3 From fae4630840323abad04e6aeaeda14e9f4d01806f Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 25 Apr 2022 22:28:02 +0100 Subject: net: dsa: mt753x: fix pcs conversion regression Daniel Golle reports that the conversion of mt753x to phylink PCS caused an oops as below. The problem is with the placement of the PCS initialisation, which occurs after mt7531_setup() has been called. However, burited in this function is a call to setup the CPU port, which requires the PCS structure to be already setup. Fix this by changing the initialisation order. Unable to handle kernel NULL pointer dereference at virtual address 0000000000000020 Mem abort info: ESR = 0x96000005 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 FSC = 0x05: level 1 translation fault Data abort info: ISV = 0, ISS = 0x00000005 CM = 0, WnR = 0 user pgtable: 4k pages, 39-bit VAs, pgdp=0000000046057000 [0000000000000020] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000 Internal error: Oops: 96000005 [#1] SMP Modules linked in: CPU: 0 PID: 32 Comm: kworker/u4:1 Tainted: G S 5.18.0-rc3-next-20220422+ #0 Hardware name: Bananapi BPI-R64 (DT) Workqueue: events_unbound deferred_probe_work_func pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : mt7531_cpu_port_config+0xcc/0x1b0 lr : mt7531_cpu_port_config+0xc0/0x1b0 sp : ffffffc008d5b980 x29: ffffffc008d5b990 x28: ffffff80060562c8 x27: 00000000f805633b x26: ffffff80001a8880 x25: 00000000000009c4 x24: 0000000000000016 x23: ffffff8005eb6470 x22: 0000000000003600 x21: ffffff8006948080 x20: 0000000000000000 x19: 0000000000000006 x18: 0000000000000000 x17: 0000000000000001 x16: 0000000000000001 x15: 02963607fcee069e x14: 0000000000000000 x13: 0000000000000030 x12: 0101010101010101 x11: ffffffc037302000 x10: 0000000000000870 x9 : ffffffc008d5b800 x8 : ffffff800028f950 x7 : 0000000000000001 x6 : 00000000662b3000 x5 : 00000000000002f0 x4 : 0000000000000000 x3 : ffffff800028f080 x2 : 0000000000000000 x1 : ffffff800028f080 x0 : 0000000000000000 Call trace: mt7531_cpu_port_config+0xcc/0x1b0 mt753x_cpu_port_enable+0x24/0x1f0 mt7531_setup+0x49c/0x5c0 mt753x_setup+0x20/0x31c dsa_register_switch+0x8bc/0x1020 mt7530_probe+0x118/0x200 mdio_probe+0x30/0x64 really_probe.part.0+0x98/0x280 __driver_probe_device+0x94/0x140 driver_probe_device+0x40/0x114 __device_attach_driver+0xb0/0x10c bus_for_each_drv+0x64/0xa0 __device_attach+0xa8/0x16c device_initial_probe+0x10/0x20 bus_probe_device+0x94/0x9c deferred_probe_work_func+0x80/0xb4 process_one_work+0x200/0x3a0 worker_thread+0x260/0x4c0 kthread+0xd4/0xe0 ret_from_fork+0x10/0x20 Code: 9409e911 937b7e60 8b0002a0 f9405800 (f9401005) ---[ end trace 0000000000000000 ]--- Reported-by: Daniel Golle Tested-by: Daniel Golle Fixes: cbd1f243bc41 ("net: dsa: mt7530: partially convert to phylink_pcs") Signed-off-by: Russell King (Oracle) Reviewed-by: Florian Fainelli Link: https://lore.kernel.org/r/E1nj6FW-007WZB-5Y@rmk-PC.armlinux.org.uk Signed-off-by: Jakub Kicinski --- drivers/net/dsa/mt7530.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index c4ea73d996e8..e55d962fef9f 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -3035,9 +3035,16 @@ static int mt753x_setup(struct dsa_switch *ds) { struct mt7530_priv *priv = ds->priv; - int ret = priv->info->sw_setup(ds); - int i; + int i, ret; + + /* Initialise the PCS devices */ + for (i = 0; i < priv->ds->num_ports; i++) { + priv->pcs[i].pcs.ops = priv->info->pcs_ops; + priv->pcs[i].priv = priv; + priv->pcs[i].port = i; + } + ret = priv->info->sw_setup(ds); if (ret) return ret; @@ -3049,13 +3056,6 @@ mt753x_setup(struct dsa_switch *ds) if (ret && priv->irq) mt7530_free_irq_common(priv); - /* Initialise the PCS devices */ - for (i = 0; i < priv->ds->num_ports; i++) { - priv->pcs[i].pcs.ops = priv->info->pcs_ops; - priv->pcs[i].priv = priv; - priv->pcs[i].port = i; - } - return ret; } -- cgit v1.2.3 From dac173db114d1309dadf50465ca549231deaf59b Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 25 Apr 2022 16:26:43 -0500 Subject: net: wan: atp: remove unused eeprom_delay() atp.h is included only by atp.c, which does not use eeprom_delay(). Remove the unused definition. Signed-off-by: Bjorn Helgaas Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/realtek/atp.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h index 63f0d2d0e87b..b202184eddd4 100644 --- a/drivers/net/ethernet/realtek/atp.h +++ b/drivers/net/ethernet/realtek/atp.h @@ -255,10 +255,6 @@ static inline void write_word_mode0(short ioaddr, unsigned short value) #define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */ #define EE_DATA_READ 0x08 /* EEPROM chip data out. */ -/* Delay between EEPROM clock transitions. */ -#define eeprom_delay(ticks) \ -do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0) - /* The EEPROM commands include the alway-set leading bit. */ #define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17) #define EE_READ(offset) (((6 << 6) + (offset)) << 17) -- cgit v1.2.3 From e39f63fe0d94ddc8a54929e90ecf93078538d63b Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 25 Apr 2022 16:26:44 -0500 Subject: net: remove comments that mention obsolete __SLOW_DOWN_IO The only remaining definitions of __SLOW_DOWN_IO (for alpha and ia64) do nothing, and the only mentions in networking are in comments. Remove these mentions. Signed-off-by: Bjorn Helgaas Signed-off-by: Jakub Kicinski --- drivers/atm/nicstarmac.c | 5 ----- drivers/net/ethernet/dec/tulip/winbond-840.c | 2 -- drivers/net/ethernet/natsemi/natsemi.c | 2 -- 3 files changed, 9 deletions(-) (limited to 'drivers') diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c index e0dda9062e6b..791f69a07ddf 100644 --- a/drivers/atm/nicstarmac.c +++ b/drivers/atm/nicstarmac.c @@ -14,11 +14,6 @@ typedef void __iomem *virt_addr_t; #define CYCLE_DELAY 5 -/* - This was the original definition -#define osp_MicroDelay(microsec) \ - do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0) -*/ #define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \ udelay((useconds));} /* diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 86b1d23eba83..1db19463fd46 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -474,8 +474,6 @@ err_out_netdev: No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that made udelay() unreliable. - The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is - deprecated. */ #define eeprom_delay(ee_addr) ioread32(ee_addr) diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 82a22711ce45..50bca486a244 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -989,8 +989,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that made udelay() unreliable. - The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is - deprecated. */ #define eeprom_delay(ee_addr) readl(ee_addr) -- cgit v1.2.3 From b1190d5175acb2c9feea69871737274eb6f462ff Mon Sep 17 00:00:00 2001 From: Marcel Ziswiler Date: Mon, 25 Apr 2022 17:48:56 +0200 Subject: net: stmmac: dwmac-imx: comment spelling fix Fix spelling in comment. Fixes: 94abdad6974a ("net: ethernet: dwmac: add ethernet glue logic for NXP imx8 chip") Signed-off-by: Marcel Ziswiler Link: https://lore.kernel.org/r/20220425154856.169499-1-marcel@ziswiler.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 84651207a1de..bd52fb7cf486 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -197,9 +197,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev) } if (of_machine_is_compatible("fsl,imx8mp")) { - /* Binding doc describes the propety: + /* Binding doc describes the property: is required by i.MX8MP. - is optinoal for i.MX8DXL. + is optional for i.MX8DXL. */ dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode"); if (IS_ERR(dwmac->intf_regmap)) -- cgit v1.2.3 From 255ca28a659d3cfb069f73c7644853ed93aecdb0 Mon Sep 17 00:00:00 2001 From: Andrejs Cainikovs Date: Fri, 22 Apr 2022 11:03:12 +0200 Subject: mwifiex: Select firmware based on strapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some WiFi/Bluetooth modules might have different host connection options, allowing to either use SDIO for both WiFi and Bluetooth, or SDIO for WiFi and UART for Bluetooth. It is possible to detect whether a module has SDIO-SDIO or SDIO-UART connection by reading its host strap register. This change introduces a way to automatically select appropriate firmware depending of the connection method, and removes a need of symlinking or overwriting the original firmware file with a required one. Host strap register used in this commit comes from the NXP driver [1] hosted at Code Aurora. [1] https://source.codeaurora.org/external/imx/linux-imx/tree/drivers/net/wireless/nxp/mxm_wifiex/wlan_src/mlinux/moal_sdio_mmc.c?h=rel_imx_5.4.70_2.3.2&id=688b67b2c7220b01521ffe560da7eee33042c7bd#n1274 Signed-off-by: Andrejs Cainikovs Reviewed-by: Alvin Šipraga Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220422090313.125857-2-andrejs.cainikovs@toradex.com --- drivers/net/wireless/marvell/mwifiex/sdio.c | 21 ++++++++++++++++++++- drivers/net/wireless/marvell/mwifiex/sdio.h | 5 +++++ 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index bde9e4bbfffe..ae85118481ee 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -182,6 +182,9 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = { .host_int_rsr_reg = 0x4, .host_int_status_reg = 0x0C, .host_int_mask_reg = 0x08, + .host_strap_reg = 0xF4, + .host_strap_mask = 0x01, + .host_strap_value = 0x00, .status_reg_0 = 0xE8, .status_reg_1 = 0xE9, .sdio_int_mask = 0xff, @@ -283,6 +286,9 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = { .host_int_rsr_reg = 0x4, .host_int_status_reg = 0x0C, .host_int_mask_reg = 0x08, + .host_strap_reg = 0xF4, + .host_strap_mask = 0x01, + .host_strap_value = 0x00, .status_reg_0 = 0xE8, .status_reg_1 = 0xE9, .sdio_int_mask = 0xff, @@ -536,6 +542,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) struct mwifiex_sdio_device *data = (void *)id->driver_data; card->firmware = data->firmware; + card->firmware_sdiouart = data->firmware_sdiouart; card->reg = data->reg; card->max_ports = data->max_ports; card->mp_agg_pkt_limit = data->mp_agg_pkt_limit; @@ -2439,6 +2446,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) int ret; struct sdio_mmc_card *card = adapter->card; struct sdio_func *func = card->func; + const char *firmware = card->firmware; /* save adapter pointer in card */ card->adapter = adapter; @@ -2455,7 +2463,18 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) return ret; } - strcpy(adapter->fw_name, card->firmware); + /* Select correct firmware (sdsd or sdiouart) firmware based on the strapping + * option + */ + if (card->firmware_sdiouart) { + u8 val; + + mwifiex_read_reg(adapter, card->reg->host_strap_reg, &val); + if ((val & card->reg->host_strap_mask) == card->reg->host_strap_value) + firmware = card->firmware_sdiouart; + } + strcpy(adapter->fw_name, firmware); + if (card->fw_dump_enh) { adapter->mem_type_mapping_tbl = generic_mem_type_map; adapter->num_mem_types = 1; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index 5648512c9300..ad2c28cbb630 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -196,6 +196,9 @@ struct mwifiex_sdio_card_reg { u8 host_int_rsr_reg; u8 host_int_status_reg; u8 host_int_mask_reg; + u8 host_strap_reg; + u8 host_strap_mask; + u8 host_strap_value; u8 status_reg_0; u8 status_reg_1; u8 sdio_int_mask; @@ -241,6 +244,7 @@ struct sdio_mmc_card { struct completion fw_done; const char *firmware; + const char *firmware_sdiouart; const struct mwifiex_sdio_card_reg *reg; u8 max_ports; u8 mp_agg_pkt_limit; @@ -274,6 +278,7 @@ struct sdio_mmc_card { struct mwifiex_sdio_device { const char *firmware; + const char *firmware_sdiouart; const struct mwifiex_sdio_card_reg *reg; u8 max_ports; u8 mp_agg_pkt_limit; -- cgit v1.2.3 From 562354ab9f0aa4fcd8f2184506dcb9c18a792182 Mon Sep 17 00:00:00 2001 From: Andrejs Cainikovs Date: Fri, 22 Apr 2022 11:03:13 +0200 Subject: mwifiex: Add SD8997 SDIO-UART firmware With a recent change now it is possible to detect the strapping option on SD8997, which allows to pick up a correct firmware for either SDIO-SDIO or SDIO-UART. This commit enables SDIO-UART firmware on SD8997. Signed-off-by: Andrejs Cainikovs Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220422090313.125857-3-andrejs.cainikovs@toradex.com --- drivers/net/wireless/marvell/mwifiex/sdio.c | 2 ++ drivers/net/wireless/marvell/mwifiex/sdio.h | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index ae85118481ee..e931672bb836 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -408,6 +408,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = { static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { .firmware = SD8997_DEFAULT_FW_NAME, + .firmware_sdiouart = SD8997_SDIOUART_FW_NAME, .reg = &mwifiex_reg_sd8997, .max_ports = 32, .mp_agg_pkt_limit = 16, @@ -3176,3 +3177,4 @@ MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8997_SDIOUART_FW_NAME); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index ad2c28cbb630..28e8f76bdd58 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -39,6 +39,7 @@ #define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin" #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin" #define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin" +#define SD8997_SDIOUART_FW_NAME "mrvl/sdiouart8997_combo_v4.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 -- cgit v1.2.3 From fc6234d7e2e322e84ed3c5c6c05a54f611faa9ab Mon Sep 17 00:00:00 2001 From: Kevin Lo Date: Fri, 22 Apr 2022 22:50:54 +0800 Subject: rtw88: use the correct bit in the REG_HCI_OPT_CTRL register Write the BIT_USB_SUS_DIS bit rather than BIT_BT_DIG_CLK_EN to the REG_HCI_OPT_CTRL register for fixing failure to PCIe power on. Signed-off-by: Kevin Lo Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/YmLAzuyPr0P4Y6BP@ns.kevlo.org --- drivers/net/wireless/realtek/rtw88/mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index d1678aed9d9c..caf2603da2d6 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -75,7 +75,7 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev) switch (rtw_hci_type(rtwdev)) { case RTW_HCI_TYPE_PCIE: - rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN); + rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS); break; case RTW_HCI_TYPE_USB: break; -- cgit v1.2.3 From 746285cf81dc19502ab238249d75f5990bd2d231 Mon Sep 17 00:00:00 2001 From: Alexander Wetzel Date: Fri, 22 Apr 2022 16:52:28 +0200 Subject: rtl818x: Prevent using not initialized queues Using not existing queues can panic the kernel with rtl8180/rtl8185 cards. Ignore the skb priority for those cards, they only have one tx queue. Pierre Asselin (pa@panix.com) reported the kernel crash in the Gentoo forum: https://forums.gentoo.org/viewtopic-t-1147832-postdays-0-postorder-asc-start-25.html He also confirmed that this patch fixes the issue. In summary this happened: After updating wpa_supplicant from 2.9 to 2.10 the kernel crashed with a "divide error: 0000" when connecting to an AP. Control port tx now tries to use IEEE80211_AC_VO for the priority, which wpa_supplicants starts to use in 2.10. Since only the rtl8187se part of the driver supports QoS, the priority of the skb is set to IEEE80211_AC_BE (2) by mac80211 for rtl8180/rtl8185 cards. rtl8180 is then unconditionally reading out the priority and finally crashes on drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c line 544 without this patch: idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries "ring->entries" is zero for rtl8180/rtl8185 cards, tx_ring[2] never got initialized. Cc: stable@vger.kernel.org Reported-by: pa@panix.com Tested-by: pa@panix.com Signed-off-by: Alexander Wetzel Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220422145228.7567-1-alexander@wetzel-home.de --- drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index 2477e18c7cae..025619cd14e8 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -460,8 +460,10 @@ static void rtl8180_tx(struct ieee80211_hw *dev, struct rtl8180_priv *priv = dev->priv; struct rtl8180_tx_ring *ring; struct rtl8180_tx_desc *entry; + unsigned int prio = 0; unsigned long flags; - unsigned int idx, prio, hw_prio; + unsigned int idx, hw_prio; + dma_addr_t mapping; u32 tx_flags; u8 rc_flags; @@ -470,7 +472,9 @@ static void rtl8180_tx(struct ieee80211_hw *dev, /* do arithmetic and then convert to le16 */ u16 frame_duration = 0; - prio = skb_get_queue_mapping(skb); + /* rtl8180/rtl8185 only has one useable tx queue */ + if (dev->queues > IEEE80211_AC_BK) + prio = skb_get_queue_mapping(skb); ring = &priv->tx_ring[prio]; mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len, -- cgit v1.2.3 From 21947f3a74d6c90508e1065d649791c9a38d515b Mon Sep 17 00:00:00 2001 From: Hamid Zamani Date: Sat, 23 Apr 2022 15:42:37 +0430 Subject: brcmfmac: use ISO3166 country code and 0 rev as fallback on brcmfmac43602 chips This uses ISO3166 country code and 0 rev on brcmfmac43602 chips. Without this patch 80 MHz width is not selected on 5 GHz channels. Commit a21bf90e927f ("brcmfmac: use ISO3166 country code and 0 rev as fallback on some devices") provides a way to specify chips for using the fallback case. Before commit 151a7c12c4fc ("Revert "brcmfmac: use ISO3166 country code and 0 rev as fallback"") brcmfmac43602 devices works correctly and for this specific case 80 MHz width is selected. Signed-off-by: Hamid Zamani Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220423111237.60892-1-hzamani.cs91@gmail.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index f0ad1e23f3c8..360b103fe898 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -7481,6 +7481,7 @@ static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr) { switch (drvr->bus_if->chip) { case BRCM_CC_4345_CHIP_ID: + case BRCM_CC_43602_CHIP_ID: return true; default: return false; -- cgit v1.2.3 From 8c783024d6acbd7a18b94ae7ed0e0ed4f163d0ba Mon Sep 17 00:00:00 2001 From: Guo Zhengkui Date: Mon, 25 Apr 2022 11:17:23 +0800 Subject: rtlwifi: btcoex: fix if == else warning Fix the following coccicheck warning: drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c:1604:2-4: WARNING: possible condition with no effect (if == else). Signed-off-by: Guo Zhengkui Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220425031725.5808-1-guozhengkui@vivo.com --- .../wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index a18dffc8753a..67d0b9aee064 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -1600,18 +1600,10 @@ static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist, coex_dm->auto_tdma_adjust = false; } } else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { - /* HID+A2DP */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->auto_tdma_adjust = false; - } else { - /*for low BT RSSI*/ - btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->auto_tdma_adjust = false; - } + /* HID+A2DP (no need to consider BT RSSI) */ + btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->auto_tdma_adjust = false; btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } else if ((bt_link_info->pan_only) || -- cgit v1.2.3 From b6f6301041a3f27cb3085abb395c2607ac71671c Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Wed, 6 Apr 2022 15:11:03 +0530 Subject: ath11k: Do not put HW in DBS mode for WCN6750 Though WCN6750 is a single PDEV device, it is not a DBS solution. So, do not put HW in DBS mode for WCN6750. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00573-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220406094107.17878-10-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 2 +- drivers/net/wireless/ath/ath11k/wmi.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index f011db4bcd2b..06958f358307 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1213,7 +1213,7 @@ static int ath11k_core_start(struct ath11k_base *ab, } /* put hardware to DBS mode */ - if (ab->hw_params.single_pdev_only) { + if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) { ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS); if (ret) { ath11k_err(ab, "failed to send dbs mode: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index dcf31bdad47e..d7243819d9bd 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include @@ -8225,7 +8226,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; /* It's overwritten when service_ext_ready is handled */ - if (ab->hw_params.single_pdev_only) + if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; /* TODO: Init remaining wmi soc resources required */ -- cgit v1.2.3 From 95959d702ede5fffd1abfcfa739b5179828cfe24 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Wed, 6 Apr 2022 15:11:04 +0530 Subject: ath11k: WMI changes to support WCN6750 WCN6750 is a single PDEV non-DBS chip which supports 2G, 5G and 6G bands. It is a single LMAC device which can be either hooked to 2G/5G/6G bands. Add WMI changes to support WCN6750. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00573-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220406094107.17878-11-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/wmi.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index d7243819d9bd..3c0ac1e29479 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -391,6 +391,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id; ab->target_pdev_count++; + if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) && + !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP)) + return -EINVAL; + /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from * band to band for a single radio, need to see how this should be * handled. @@ -398,7 +402,9 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g; pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g; - } else if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { + } + + if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g; pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g; pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g; @@ -408,8 +414,6 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio); pdev_cap->nss_ratio_info = WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio); - } else { - return -EINVAL; } /* tx/rx chainmask reported from fw depends on the actual hw chains used, -- cgit v1.2.3 From 33b67a4b4e64275b6f2cbc4318f1596c70659111 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Wed, 6 Apr 2022 15:11:05 +0530 Subject: ath11k: Update WBM idle ring HP after FW mode on Currently, WBM idle ring HP is updated much before the shadow configuration is sent to the FW. Any update to the shadow registers before FW mode on request would not be reflected on to the actual HW registers failing to bring up the device. Send FW mode ON QMI request before WBM idle ring HP update to fix this problem. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00573-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220406094107.17878-12-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/ce.c | 4 +-- drivers/net/wireless/ath/ath11k/core.c | 45 +++++++++++++++++++++++----------- 2 files changed, 32 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index aaa7b05ff49d..c14c51f38709 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved. */ #include "dp_rx.h" @@ -918,9 +919,6 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab) int i; int ret; - ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2, - &ab->qmi.ce_cfg.shadow_reg_v2_len); - for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 06958f358307..36d265454e26 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1124,21 +1124,14 @@ static void ath11k_core_pdev_destroy(struct ath11k_base *ab) ath11k_debugfs_pdev_destroy(ab); } -static int ath11k_core_start(struct ath11k_base *ab, - enum ath11k_firmware_mode mode) +static int ath11k_core_start(struct ath11k_base *ab) { int ret; - ret = ath11k_qmi_firmware_start(ab, mode); - if (ret) { - ath11k_err(ab, "failed to attach wmi: %d\n", ret); - return ret; - } - ret = ath11k_wmi_attach(ab); if (ret) { ath11k_err(ab, "failed to attach wmi: %d\n", ret); - goto err_firmware_stop; + return ret; } ret = ath11k_htc_init(ab); @@ -1238,8 +1231,23 @@ err_hif_stop: ath11k_hif_stop(ab); err_wmi_detach: ath11k_wmi_detach(ab); -err_firmware_stop: - ath11k_qmi_firmware_stop(ab); + + return ret; +} + +static int ath11k_core_start_firmware(struct ath11k_base *ab, + enum ath11k_firmware_mode mode) +{ + int ret; + + ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2, + &ab->qmi.ce_cfg.shadow_reg_v2_len); + + ret = ath11k_qmi_firmware_start(ab, mode); + if (ret) { + ath11k_err(ab, "failed to send firmware start: %d\n", ret); + return ret; + } return ret; } @@ -1269,16 +1277,22 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) { int ret; + ret = ath11k_core_start_firmware(ab, ATH11K_FIRMWARE_MODE_NORMAL); + if (ret) { + ath11k_err(ab, "failed to start firmware: %d\n", ret); + return ret; + } + ret = ath11k_ce_init_pipes(ab); if (ret) { ath11k_err(ab, "failed to initialize CE: %d\n", ret); - return ret; + goto err_firmware_stop; } ret = ath11k_dp_alloc(ab); if (ret) { ath11k_err(ab, "failed to init DP: %d\n", ret); - return ret; + goto err_firmware_stop; } switch (ath11k_crypto_mode) { @@ -1299,7 +1313,7 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); mutex_lock(&ab->core_lock); - ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL); + ret = ath11k_core_start(ab); if (ret) { ath11k_err(ab, "failed to start core: %d\n", ret); goto err_dp_free; @@ -1328,6 +1342,9 @@ err_core_stop: err_dp_free: ath11k_dp_free(ab); mutex_unlock(&ab->core_lock); +err_firmware_stop: + ath11k_qmi_firmware_stop(ab); + return ret; } -- cgit v1.2.3 From 161c64de239c7018e0295e7e0520a19f00aa32dc Mon Sep 17 00:00:00 2001 From: Hari Chandrakanthan Date: Sat, 23 Apr 2022 12:36:47 +0300 Subject: ath11k: disable spectral scan during spectral deinit When ath11k modules are removed using rmmod with spectral scan enabled, crash is observed. Different crash trace is observed for each crash. Send spectral scan disable WMI command to firmware before cleaning the spectral dbring in the spectral_deinit API to avoid this crash. call trace from one of the crash observed: [ 1252.880802] Unable to handle kernel NULL pointer dereference at virtual address 00000008 [ 1252.882722] pgd = 0f42e886 [ 1252.890955] [00000008] *pgd=00000000 [ 1252.893478] Internal error: Oops: 5 [#1] PREEMPT SMP ARM [ 1253.093035] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.4.89 #0 [ 1253.115261] Hardware name: Generic DT based system [ 1253.121149] PC is at ath11k_spectral_process_data+0x434/0x574 [ath11k] [ 1253.125940] LR is at 0x88e31017 [ 1253.132448] pc : [<7f9387b8>] lr : [<88e31017>] psr: a0000193 [ 1253.135488] sp : 80d01bc8 ip : 00000001 fp : 970e0000 [ 1253.141737] r10: 88e31000 r9 : 970ec000 r8 : 00000080 [ 1253.146946] r7 : 94734040 r6 : a0000113 r5 : 00000057 r4 : 00000000 [ 1253.152159] r3 : e18cb694 r2 : 00000217 r1 : 1df1f000 r0 : 00000001 [ 1253.158755] Flags: NzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment user [ 1253.165266] Control: 10c0383d Table: 5e71006a DAC: 00000055 [ 1253.172472] Process swapper/0 (pid: 0, stack limit = 0x60870141) [ 1253.458055] [<7f9387b8>] (ath11k_spectral_process_data [ath11k]) from [<7f917fdc>] (ath11k_dbring_buffer_release_event+0x214/0x2e4 [ath11k]) [ 1253.466139] [<7f917fdc>] (ath11k_dbring_buffer_release_event [ath11k]) from [<7f8ea3c4>] (ath11k_wmi_tlv_op_rx+0x1840/0x29cc [ath11k]) [ 1253.478807] [<7f8ea3c4>] (ath11k_wmi_tlv_op_rx [ath11k]) from [<7f8fe868>] (ath11k_htc_rx_completion_handler+0x180/0x4e0 [ath11k]) [ 1253.490699] [<7f8fe868>] (ath11k_htc_rx_completion_handler [ath11k]) from [<7f91308c>] (ath11k_ce_per_engine_service+0x2c4/0x3b4 [ath11k]) [ 1253.502386] [<7f91308c>] (ath11k_ce_per_engine_service [ath11k]) from [<7f9a4198>] (ath11k_pci_ce_tasklet+0x28/0x80 [ath11k_pci]) [ 1253.514811] [<7f9a4198>] (ath11k_pci_ce_tasklet [ath11k_pci]) from [<8032227c>] (tasklet_action_common.constprop.2+0x64/0xe8) [ 1253.526476] [<8032227c>] (tasklet_action_common.constprop.2) from [<803021e8>] (__do_softirq+0x130/0x2d0) [ 1253.537756] [<803021e8>] (__do_softirq) from [<80322610>] (irq_exit+0xcc/0xe8) [ 1253.547304] [<80322610>] (irq_exit) from [<8036a4a4>] (__handle_domain_irq+0x60/0xb4) [ 1253.554428] [<8036a4a4>] (__handle_domain_irq) from [<805eb348>] (gic_handle_irq+0x4c/0x90) [ 1253.562321] [<805eb348>] (gic_handle_irq) from [<80301a78>] (__irq_svc+0x58/0x8c) Tested-on: QCN6122 hw1.0 AHB WLAN.HK.2.6.0.1-00851-QCAHKSWPL_SILICONZ-1 Signed-off-by: Hari Chandrakanthan Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1649396345-349-1-git-send-email-quic_haric@quicinc.com --- drivers/net/wireless/ath/ath11k/spectral.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c index 2b18871d5f7c..516a7b4cd180 100644 --- a/drivers/net/wireless/ath/ath11k/spectral.c +++ b/drivers/net/wireless/ath/ath11k/spectral.c @@ -212,7 +212,10 @@ static int ath11k_spectral_scan_config(struct ath11k *ar, return -ENODEV; arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED); + + spin_lock_bh(&ar->spectral.lock); ar->spectral.mode = mode; + spin_unlock_bh(&ar->spectral.lock); ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id, ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR, @@ -843,9 +846,6 @@ static inline void ath11k_spectral_ring_free(struct ath11k *ar) { struct ath11k_spectral *sp = &ar->spectral; - if (!sp->enabled) - return; - ath11k_dbring_srng_cleanup(ar, &sp->rx_ring); ath11k_dbring_buf_cleanup(ar, &sp->rx_ring); } @@ -897,15 +897,16 @@ void ath11k_spectral_deinit(struct ath11k_base *ab) if (!sp->enabled) continue; - ath11k_spectral_debug_unregister(ar); - ath11k_spectral_ring_free(ar); + mutex_lock(&ar->conf_mutex); + ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED); + mutex_unlock(&ar->conf_mutex); spin_lock_bh(&sp->lock); - - sp->mode = ATH11K_SPECTRAL_DISABLED; sp->enabled = false; - spin_unlock_bh(&sp->lock); + + ath11k_spectral_debug_unregister(ar); + ath11k_spectral_ring_free(ar); } } -- cgit v1.2.3 From 66721bb4bbf25e990e003a2303ef86ce34556bac Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Wed, 20 Apr 2022 22:35:01 -0400 Subject: ath11k: read country code from SMBIOS for WCN6855/QCA6390 This read the country code from SMBIOS and send the country code to firmware, firmware will indicate the regulatory domain info of the country code and then ath11k will use the info. dmesg: [ 1242.637173] ath11k_pci 0000:02:00.0: chip_id 0x2 chip_family 0xb board_id 0xff soc_id 0x400c0200 [ 1242.637176] ath11k_pci 0000:02:00.0: fw_version 0x110b09e5 fw_build_timestamp 2021-06-22 09:32 fw_build_id QC_IMAGE_VERSION_STRING=WLAN.HSP.1.1-02533-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 [ 1242.637253] ath11k_pci 0000:02:00.0: worldwide regdomain setting from SMBIOS [ 1242.637259] ath11k_pci 0000:02:00.0: bdf variant name not found. [ 1242.637261] ath11k_pci 0000:02:00.0: SMBIOS bdf variant name not set. [ 1242.637263] ath11k_pci 0000:02:00.0: DT bdf variant name not set. [ 1242.927543] ath11k_pci 0000:02:00.0: set current country pdev id 0 alpha2 00 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220421023501.32167-1-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 26 ++++++++++++++++++++++++-- drivers/net/wireless/ath/ath11k/core.h | 30 ++++++++++++++++++++++++++++-- drivers/net/wireless/ath/ath11k/mac.c | 11 +++++++++++ 3 files changed, 63 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 36d265454e26..7e074b7716e7 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -544,7 +544,7 @@ int ath11k_core_resume(struct ath11k_base *ab) } EXPORT_SYMBOL(ath11k_core_resume); -static void ath11k_core_check_bdfext(const struct dmi_header *hdr, void *data) +static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data) { struct ath11k_base *ab = data; const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC; @@ -566,6 +566,28 @@ static void ath11k_core_check_bdfext(const struct dmi_header *hdr, void *data) return; } + spin_lock_bh(&ab->base_lock); + + switch (smbios->country_code_flag) { + case ATH11K_SMBIOS_CC_ISO: + ab->new_alpha2[0] = (smbios->cc_code >> 8) & 0xff; + ab->new_alpha2[1] = smbios->cc_code & 0xff; + ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot smbios cc_code %c%c\n", + ab->new_alpha2[0], ab->new_alpha2[1]); + break; + case ATH11K_SMBIOS_CC_WW: + ab->new_alpha2[0] = '0'; + ab->new_alpha2[1] = '0'; + ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot smbios worldwide regdomain\n"); + break; + default: + ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot ignore smbios country code setting %d\n", + smbios->country_code_flag); + break; + } + + spin_unlock_bh(&ab->base_lock); + if (!smbios->bdf_enabled) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n"); return; @@ -605,7 +627,7 @@ static void ath11k_core_check_bdfext(const struct dmi_header *hdr, void *data) int ath11k_core_check_smbios(struct ath11k_base *ab) { ab->qmi.target.bdf_ext[0] = '\0'; - dmi_walk(ath11k_core_check_bdfext, ab); + dmi_walk(ath11k_core_check_cc_code_bdfext, ab); if (ab->qmi.target.bdf_ext[0] == '\0') return -ENODATA; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index fa299bfb4efc..7a505531acf9 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -169,12 +169,38 @@ struct ath11k_ext_irq_grp { struct net_device napi_ndev; }; +enum ath11k_smbios_cc_type { + /* disable country code setting from SMBIOS */ + ATH11K_SMBIOS_CC_DISABLE = 0, + + /* set country code by ANSI country name, based on ISO3166-1 alpha2 */ + ATH11K_SMBIOS_CC_ISO = 1, + + /* worldwide regdomain */ + ATH11K_SMBIOS_CC_WW = 2, +}; + struct ath11k_smbios_bdf { struct dmi_header hdr; - u32 padding; + + u8 features_disabled; + + /* enum ath11k_smbios_cc_type */ + u8 country_code_flag; + + /* To set specific country, you need to set country code + * flag=ATH11K_SMBIOS_CC_ISO first, then if country is United + * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'= + * 0x53). To set country to INDONESIA, then country code value = + * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag = + * ATH11K_SMBIOS_CC_WW, then you can use worldwide regulatory + * setting. + */ + u16 cc_code; + u8 bdf_enabled; u8 bdf_ext[]; -}; +} __packed; #define HEHANDLE_CAP_PHYINFO_SIZE 3 #define HECAP_PHYINFO_SIZE 9 diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index c987f365c63a..c025bcb25c00 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -8836,6 +8836,17 @@ static int __ath11k_mac_register(struct ath11k *ar) goto err_unregister_hw; } + if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) { + struct wmi_set_current_country_params set_current_param = {}; + + memcpy(&set_current_param.alpha2, ab->new_alpha2, 2); + memcpy(&ar->alpha2, ab->new_alpha2, 2); + ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + if (ret) + ath11k_warn(ar->ab, + "failed set cc code for mac register: %d\n", ret); + } + ret = ath11k_debugfs_register(ar); if (ret) { ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret); -- cgit v1.2.3 From 7471f7d273ac51f044dad3d78af6afb87a14bb67 Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Sun, 24 Apr 2022 17:45:22 +0800 Subject: ath10k: simplify if-if to if-else Use if and else instead of if(A) and if (!A). Signed-off-by: Wan Jiabing Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220424094522.105262-1-wanjiabing@vivo.com --- drivers/net/wireless/ath/ath10k/mac.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index b11aaee8b8c0..a46f56a1efca 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4118,11 +4118,10 @@ void ath10k_offchan_tx_work(struct work_struct *work) peer = ath10k_peer_find(ar, vdev_id, peer_addr); spin_unlock_bh(&ar->data_lock); - if (peer) + if (peer) { ath10k_warn(ar, "peer %pM on vdev %d already present\n", peer_addr, vdev_id); - - if (!peer) { + } else { ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, peer_addr, WMI_PEER_TYPE_DEFAULT); -- cgit v1.2.3 From a5f3aed5889eac9ed72f071dd439b9c671d1092f Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Sun, 24 Apr 2022 17:45:50 +0800 Subject: wil6210: simplify if-if to if-else Use if and else instead of if(A) and if (!A). Signed-off-by: Wan Jiabing Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220424094552.105466-1-wanjiabing@vivo.com --- drivers/net/wireless/ath/wil6210/cfg80211.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 764d1d14132b..8f2638f5b87b 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1653,10 +1653,9 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, params->seq_len, params->seq); return -EINVAL; } - } - - if (!IS_ERR(cs)) + } else { wil_del_rx_key(key_index, key_usage, cs); + } if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) { wil_err(wil, -- cgit v1.2.3 From 77f2accb501ab7ff3d734bbf09b5872cba4285f1 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Wed, 27 Apr 2022 08:51:24 +0200 Subject: net: lan966x: Change the PTP pin used to read/write the PHC. To read/write a value to a PHC, it is required to use a PTP pin. Currently it is used pin 5, but change to pin 7 as is the last pin. All the other pins will have different functions. Signed-off-by: Horatiu Vultur Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c index 0a1041da4384..3e455a3fad08 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -16,7 +16,7 @@ */ #define LAN966X_1PPB_FORMAT 3480517749LL -#define TOD_ACC_PIN 0x5 +#define TOD_ACC_PIN 0x7 enum { PTP_PIN_ACTION_IDLE = 0, -- cgit v1.2.3 From 3adc11e5fc5fc0925aa298c8ef327224459f80bf Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Wed, 27 Apr 2022 08:51:25 +0200 Subject: net: lan966x: Add registers used to configure the PTP pin Add registers that are used to configure the PTP pins. These registers are used to enable the interrupts per PTP pin and to set the waveform generated by the pin. Signed-off-by: Horatiu Vultur Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan966x/lan966x_regs.h | 40 ++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h index 2f59285bef29..8265ad89f0bc 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -684,6 +684,24 @@ enum lan966x_target { /* FDMA:FDMA:FDMA_ERRORS */ #define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4) +/* PTP:PTP_CFG:PTP_PIN_INTR */ +#define PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 0, 0, 1, 4) + +#define PTP_PIN_INTR_INTR_PTP GENMASK(7, 0) +#define PTP_PIN_INTR_INTR_PTP_SET(x)\ + FIELD_PREP(PTP_PIN_INTR_INTR_PTP, x) +#define PTP_PIN_INTR_INTR_PTP_GET(x)\ + FIELD_GET(PTP_PIN_INTR_INTR_PTP, x) + +/* PTP:PTP_CFG:PTP_PIN_INTR_ENA */ +#define PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 4, 0, 1, 4) + +#define PTP_PIN_INTR_ENA_INTR_ENA GENMASK(7, 0) +#define PTP_PIN_INTR_ENA_INTR_ENA_SET(x)\ + FIELD_PREP(PTP_PIN_INTR_ENA_INTR_ENA, x) +#define PTP_PIN_INTR_ENA_INTR_ENA_GET(x)\ + FIELD_GET(PTP_PIN_INTR_ENA_INTR_ENA, x) + /* PTP:PTP_CFG:PTP_DOM_CFG */ #define PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 512, 0, 1, 16, 12, 0, 1, 4) @@ -717,6 +735,12 @@ enum lan966x_target { #define PTP_PIN_CFG_PIN_SYNC_GET(x)\ FIELD_GET(PTP_PIN_CFG_PIN_SYNC, x) +#define PTP_PIN_CFG_PIN_SELECT GENMASK(23, 21) +#define PTP_PIN_CFG_PIN_SELECT_SET(x)\ + FIELD_PREP(PTP_PIN_CFG_PIN_SELECT, x) +#define PTP_PIN_CFG_PIN_SELECT_GET(x)\ + FIELD_GET(PTP_PIN_CFG_PIN_SELECT, x) + #define PTP_PIN_CFG_PIN_DOM GENMASK(17, 16) #define PTP_PIN_CFG_PIN_DOM_SET(x)\ FIELD_PREP(PTP_PIN_CFG_PIN_DOM, x) @@ -744,6 +768,22 @@ enum lan966x_target { #define PTP_TOD_NSEC_TOD_NSEC_GET(x)\ FIELD_GET(PTP_TOD_NSEC_TOD_NSEC, x) +/* PTP:PTP_PINS:WF_HIGH_PERIOD */ +#define PTP_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\ + 0, 1, 0, g, 8, 64, 24, 0, 1, 4) + +#define PTP_WF_HIGH_PERIOD_PIN_WFH(x) ((x) & GENMASK(29, 0)) +#define PTP_WF_HIGH_PERIOD_PIN_WFH_M GENMASK(29, 0) +#define PTP_WF_HIGH_PERIOD_PIN_WFH_X(x) ((x) & GENMASK(29, 0)) + +/* PTP:PTP_PINS:WF_LOW_PERIOD */ +#define PTP_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\ + 0, 1, 0, g, 8, 64, 28, 0, 1, 4) + +#define PTP_WF_LOW_PERIOD_PIN_WFL(x) ((x) & GENMASK(29, 0)) +#define PTP_WF_LOW_PERIOD_PIN_WFL_M GENMASK(29, 0) +#define PTP_WF_LOW_PERIOD_PIN_WFL_X(x) ((x) & GENMASK(29, 0)) + /* PTP:PTP_TS_FIFO:PTP_TWOSTEP_CTRL */ #define PTP_TWOSTEP_CTRL __REG(TARGET_PTP, 0, 1, 612, 0, 1, 12, 0, 0, 1, 4) -- cgit v1.2.3 From 2b7ff2588ec21dde8a3a66dc927e4e653b175ddb Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Wed, 27 Apr 2022 08:51:26 +0200 Subject: net: lan966x: Add support for PTP_PF_PEROUT Lan966x has 8 PTP programmable pins, where the last pins is hardcoded to be used by PHC0, which does the frame timestamping. All the rest of the PTP pins can be shared between the PHCs and can have different functions like perout or extts. For now add support for PTP_FS_PEROUT. The HW is not able to support absolute start time but can use the nsec for phase adjustment when generating PPS. Signed-off-by: Horatiu Vultur Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan966x/lan966x_main.h | 2 + .../net/ethernet/microchip/lan966x/lan966x_ptp.c | 167 +++++++++++++++++++++ 2 files changed, 169 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 5213263c4e87..76255e2a86f3 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -56,6 +56,7 @@ #define LAN966X_PHC_COUNT 3 #define LAN966X_PHC_PORT 0 +#define LAN966X_PHC_PINS_NUM 7 #define IFH_REW_OP_NOOP 0x0 #define IFH_REW_OP_ONE_STEP_PTP 0x3 @@ -177,6 +178,7 @@ struct lan966x_stat_layout { struct lan966x_phc { struct ptp_clock *clock; struct ptp_clock_info info; + struct ptp_pin_desc pins[LAN966X_PHC_PINS_NUM]; struct hwtstamp_config hwtstamp_config; struct lan966x *lan966x; u8 index; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c index 3e455a3fad08..3199a266ed3d 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -493,6 +493,158 @@ static int lan966x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } +static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + struct ptp_clock_info *info; + int i; + + /* Currently support only 1 channel */ + if (chan != 0) + return -1; + + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + break; + default: + return -1; + } + + /* The PTP pins are shared by all the PHC. So it is required to see if + * the pin is connected to another PHC. The pin is connected to another + * PHC if that pin already has a function on that PHC. + */ + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + info = &lan966x->phc[i].info; + + /* Ignore the check with ourself */ + if (ptp == info) + continue; + + if (info->pin_config[pin].func == PTP_PF_PEROUT) + return -1; + } + + return 0; +} + +static int lan966x_ptp_perout(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + struct timespec64 ts_phase, ts_period; + unsigned long flags; + s64 wf_high, wf_low; + bool pps = false; + int pin; + + if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index); + if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) + return -EINVAL; + + if (!on) { + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_IDLE) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + return 0; + } + + if (rq->perout.period.sec == 1 && + rq->perout.period.nsec == 0) + pps = true; + + if (rq->perout.flags & PTP_PEROUT_PHASE) { + ts_phase.tv_sec = rq->perout.phase.sec; + ts_phase.tv_nsec = rq->perout.phase.nsec; + } else { + ts_phase.tv_sec = rq->perout.start.sec; + ts_phase.tv_nsec = rq->perout.start.nsec; + } + + if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) { + dev_warn(lan966x->dev, + "Absolute time not supported!\n"); + return -EINVAL; + } + + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on; + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + + wf_high = timespec64_to_ns(&ts_on); + } else { + wf_high = 5000; + } + + if (pps) { + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(ts_phase.tv_nsec), + lan966x, PTP_WF_LOW_PERIOD(pin)); + lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high), + lan966x, PTP_WF_HIGH_PERIOD(pin)); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(3), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + return 0; + } + + ts_period.tv_sec = rq->perout.period.sec; + ts_period.tv_nsec = rq->perout.period.nsec; + + wf_low = timespec64_to_ns(&ts_period); + wf_low -= wf_high; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_wr(PTP_WF_LOW_PERIOD_PIN_WFL(wf_low), + lan966x, PTP_WF_LOW_PERIOD(pin)); + lan_wr(PTP_WF_HIGH_PERIOD_PIN_WFH(wf_high), + lan966x, PTP_WF_HIGH_PERIOD(pin)); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_CLOCK) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SYNC_SET(0), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SYNC, + lan966x, PTP_PIN_CFG(pin)); + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + +static int lan966x_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + return lan966x_ptp_perout(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + + return 0; +} + static struct ptp_clock_info lan966x_ptp_clock_info = { .owner = THIS_MODULE, .name = "lan966x ptp", @@ -501,6 +653,10 @@ static struct ptp_clock_info lan966x_ptp_clock_info = { .settime64 = lan966x_ptp_settime64, .adjtime = lan966x_ptp_adjtime, .adjfine = lan966x_ptp_adjfine, + .verify = lan966x_ptp_verify, + .enable = lan966x_ptp_enable, + .n_per_out = LAN966X_PHC_PINS_NUM, + .n_pins = LAN966X_PHC_PINS_NUM, }; static int lan966x_ptp_phc_init(struct lan966x *lan966x, @@ -508,8 +664,19 @@ static int lan966x_ptp_phc_init(struct lan966x *lan966x, struct ptp_clock_info *clock_info) { struct lan966x_phc *phc = &lan966x->phc[index]; + struct ptp_pin_desc *p; + int i; + + for (i = 0; i < LAN966X_PHC_PINS_NUM; i++) { + p = &phc->pins[i]; + + snprintf(p->name, sizeof(p->name), "pin%d", i); + p->index = i; + p->func = PTP_PF_NONE; + } phc->info = *clock_info; + phc->info.pin_config = &phc->pins[0]; phc->clock = ptp_clock_register(&phc->info, lan966x->dev); if (IS_ERR(phc->clock)) return PTR_ERR(phc->clock); -- cgit v1.2.3 From f3d8e0a9c28ba0bb3716dd5e8697a075ea36fdd8 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Wed, 27 Apr 2022 08:51:27 +0200 Subject: net: lan966x: Add support for PTP_PF_EXTTS Extend the PTP programmable pins to implement also PTP_PF_EXTTS function. The PTP pin can be configured to capture only on the rising edge of the PPS signal. And once an event is seen then an interrupt is generated and the local time counter is saved. The interrupt is shared between all the pins. Signed-off-by: Horatiu Vultur Signed-off-by: David S. Miller --- .../net/ethernet/microchip/lan966x/lan966x_main.c | 17 ++++ .../net/ethernet/microchip/lan966x/lan966x_main.h | 2 + .../net/ethernet/microchip/lan966x/lan966x_ptp.c | 109 ++++++++++++++++++++- 3 files changed, 127 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index afec115e46eb..6579c7062aa5 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -692,6 +692,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x) if (lan966x->ptp_irq) devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x); + + if (lan966x->ptp_ext_irq) + devm_free_irq(lan966x->dev, lan966x->ptp_ext_irq, lan966x); } static int lan966x_probe_port(struct lan966x *lan966x, u32 p, @@ -1058,6 +1061,20 @@ static int lan966x_probe(struct platform_device *pdev) lan966x->fdma = true; } + if (lan966x->ptp) { + lan966x->ptp_ext_irq = platform_get_irq_byname(pdev, "ptp-ext"); + if (lan966x->ptp_ext_irq > 0) { + err = devm_request_threaded_irq(&pdev->dev, + lan966x->ptp_ext_irq, NULL, + lan966x_ptp_ext_irq_handler, + IRQF_ONESHOT, + "ptp-ext irq", lan966x); + if (err) + return dev_err_probe(&pdev->dev, err, + "Unable to use ptp-ext irq"); + } + } + /* init switch */ lan966x_init(lan966x); lan966x_stats_init(lan966x); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 76255e2a86f3..3b86ddddc756 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -233,6 +233,7 @@ struct lan966x { int ana_irq; int ptp_irq; int fdma_irq; + int ptp_ext_irq; /* worqueue for fdb */ struct workqueue_struct *fdb_work; @@ -394,6 +395,7 @@ int lan966x_ptp_txtstamp_request(struct lan966x_port *port, void lan966x_ptp_txtstamp_release(struct lan966x_port *port, struct sk_buff *skb); irqreturn_t lan966x_ptp_irq_handler(int irq, void *args); +irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args); int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev); int lan966x_fdma_change_mtu(struct lan966x *lan966x); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c index 3199a266ed3d..3a621c5165bc 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c @@ -321,6 +321,63 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args) return IRQ_HANDLED; } +irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args) +{ + struct lan966x *lan966x = args; + struct lan966x_phc *phc; + unsigned long flags; + u64 time = 0; + time64_t s; + int pin, i; + s64 ns; + + if (!(lan_rd(lan966x, PTP_PIN_INTR))) + return IRQ_NONE; + + /* Go through all domains and see which pin generated the interrupt */ + for (i = 0; i < LAN966X_PHC_COUNT; ++i) { + struct ptp_clock_event ptp_event = {0}; + + phc = &lan966x->phc[i]; + pin = ptp_find_pin_unlocked(phc->clock, PTP_PF_EXTTS, 0); + if (pin == -1) + continue; + + if (!(lan_rd(lan966x, PTP_PIN_INTR) & BIT(pin))) + continue; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + + /* Enable to get the new interrupt. + * By writing 1 it clears the bit + */ + lan_wr(BIT(pin), lan966x, PTP_PIN_INTR); + + /* Get current time */ + s = lan_rd(lan966x, PTP_TOD_SEC_MSB(pin)); + s <<= 32; + s |= lan_rd(lan966x, PTP_TOD_SEC_LSB(pin)); + ns = lan_rd(lan966x, PTP_TOD_NSEC(pin)); + ns &= PTP_TOD_NSEC_TOD_NSEC; + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + if ((ns & 0xFFFFFFF0) == 0x3FFFFFF0) { + s--; + ns &= 0xf; + ns += 999999984; + } + time = ktime_set(s, ns); + + ptp_event.index = pin; + ptp_event.timestamp = time; + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_clock_event(phc->clock, &ptp_event); + } + + return IRQ_HANDLED; +} + static int lan966x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); @@ -508,6 +565,7 @@ static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, switch (func) { case PTP_PF_NONE: case PTP_PF_PEROUT: + case PTP_PF_EXTTS: break; default: return -1; @@ -524,7 +582,8 @@ static int lan966x_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, if (ptp == info) continue; - if (info->pin_config[pin].func == PTP_PF_PEROUT) + if (info->pin_config[pin].func == PTP_PF_PEROUT || + info->pin_config[pin].func == PTP_PF_EXTTS) return -1; } @@ -632,12 +691,59 @@ static int lan966x_ptp_perout(struct ptp_clock_info *ptp, return 0; } +static int lan966x_ptp_extts(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info); + struct lan966x *lan966x = phc->lan966x; + unsigned long flags; + int pin; + u32 val; + + if (lan966x->ptp_ext_irq <= 0) + return -EOPNOTSUPP; + + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index); + if (pin == -1 || pin >= LAN966X_PHC_PINS_NUM) + return -EINVAL; + + spin_lock_irqsave(&lan966x->ptp_clock_lock, flags); + lan_rmw(PTP_PIN_CFG_PIN_ACTION_SET(PTP_PIN_ACTION_SAVE) | + PTP_PIN_CFG_PIN_SYNC_SET(on ? 3 : 0) | + PTP_PIN_CFG_PIN_DOM_SET(phc->index) | + PTP_PIN_CFG_PIN_SELECT_SET(pin), + PTP_PIN_CFG_PIN_ACTION | + PTP_PIN_CFG_PIN_SYNC | + PTP_PIN_CFG_PIN_DOM | + PTP_PIN_CFG_PIN_SELECT, + lan966x, PTP_PIN_CFG(pin)); + + val = lan_rd(lan966x, PTP_PIN_INTR_ENA); + if (on) + val |= BIT(pin); + else + val &= ~BIT(pin); + lan_wr(val, lan966x, PTP_PIN_INTR_ENA); + + spin_unlock_irqrestore(&lan966x->ptp_clock_lock, flags); + + return 0; +} + static int lan966x_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { switch (rq->type) { case PTP_CLK_REQ_PEROUT: return lan966x_ptp_perout(ptp, rq, on); + case PTP_CLK_REQ_EXTTS: + return lan966x_ptp_extts(ptp, rq, on); default: return -EOPNOTSUPP; } @@ -656,6 +762,7 @@ static struct ptp_clock_info lan966x_ptp_clock_info = { .verify = lan966x_ptp_verify, .enable = lan966x_ptp_enable, .n_per_out = LAN966X_PHC_PINS_NUM, + .n_ext_ts = LAN966X_PHC_PINS_NUM, .n_pins = LAN966X_PHC_PINS_NUM, }; -- cgit v1.2.3 From 41c335c8212387a7563b0eb7f885591ad49970be Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 Apr 2022 10:54:31 -0700 Subject: net: atm: remove support for Fujitsu FireStream ATM devices This driver received nothing but automated fixes (mostly spelling and compiler warnings) since git era begun. Since it's using virt_to_bus it's unlikely to be used on any modern platform. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- arch/mips/configs/gpr_defconfig | 1 - arch/mips/configs/mtx1_defconfig | 1 - drivers/atm/Kconfig | 10 - drivers/atm/Makefile | 1 - drivers/atm/firestream.c | 2057 -------------------------------------- drivers/atm/firestream.h | 502 ---------- 6 files changed, 2572 deletions(-) delete mode 100644 drivers/atm/firestream.c delete mode 100644 drivers/atm/firestream.h (limited to 'drivers') diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index b0489437621a..605e778dff74 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -178,7 +178,6 @@ CONFIG_NETCONSOLE=m CONFIG_ATM_TCP=m CONFIG_ATM_LANAI=m CONFIG_ATM_ENI=m -CONFIG_ATM_FIRESTREAM=m CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index c98099f0b354..de95e7fe5a77 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -255,7 +255,6 @@ CONFIG_ARCNET_COM20020_CS=m CONFIG_ATM_TCP=m CONFIG_ATM_LANAI=m CONFIG_ATM_ENI=m -CONFIG_ATM_FIRESTREAM=m CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 7be08e24955c..360c98ad29eb 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -146,16 +146,6 @@ config ATM_ENI_BURST_RX_2W try this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or 8W are also set may or may not improve throughput. -config ATM_FIRESTREAM - tristate "Fujitsu FireStream (FS50/FS155) " - depends on PCI && VIRT_TO_BUS - help - Driver for the Fujitsu FireStream 155 (MB86697) and - FireStream 50 (MB86695) ATM PCI chips. - - To compile this driver as a module, choose M here: the module will - be called firestream. - config ATM_ZATM tristate "ZeitNet ZN1221/ZN1225" depends on PCI && VIRT_TO_BUS diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile index 99ecbc280643..7d38fdaddd09 100644 --- a/drivers/atm/Makefile +++ b/drivers/atm/Makefile @@ -26,7 +26,6 @@ endif obj-$(CONFIG_ATM_DUMMY) += adummy.o obj-$(CONFIG_ATM_TCP) += atmtcp.o -obj-$(CONFIG_ATM_FIRESTREAM) += firestream.o obj-$(CONFIG_ATM_LANAI) += lanai.o obj-$(CONFIG_ATM_HE) += he.o diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c deleted file mode 100644 index 4f67404fe64c..000000000000 --- a/drivers/atm/firestream.c +++ /dev/null @@ -1,2057 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -/* drivers/atm/firestream.c - FireStream 155 (MB86697) and - * FireStream 50 (MB86695) device driver - */ - -/* Written & (C) 2000 by R.E.Wolff@BitWizard.nl - * Copied snippets from zatm.c by Werner Almesberger, EPFL LRC/ICA - * and ambassador.c Copyright (C) 1995-1999 Madge Networks Ltd - */ - -/* -*/ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* for request_region */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "firestream.h" - -static int loopback = 0; -static int num=0x5a; - -/* According to measurements (but they look suspicious to me!) done in - * '97, 37% of the packets are one cell in size. So it pays to have - * buffers allocated at that size. A large jump in percentage of - * packets occurs at packets around 536 bytes in length. So it also - * pays to have those pre-allocated. Unfortunately, we can't fully - * take advantage of this as the majority of the packets is likely to - * be TCP/IP (As where obviously the measurement comes from) There the - * link would be opened with say a 1500 byte MTU, and we can't handle - * smaller buffers more efficiently than the larger ones. -- REW - */ - -/* Due to the way Linux memory management works, specifying "576" as - * an allocation size here isn't going to help. They are allocated - * from 1024-byte regions anyway. With the size of the sk_buffs (quite - * large), it doesn't pay to allocate the smallest size (64) -- REW */ - -/* This is all guesswork. Hard numbers to back this up or disprove this, - * are appreciated. -- REW */ - -/* The last entry should be about 64k. However, the "buffer size" is - * passed to the chip in a 16 bit field. I don't know how "65536" - * would be interpreted. -- REW */ - -#define NP FS_NR_FREE_POOLS -static int rx_buf_sizes[NP] = {128, 256, 512, 1024, 2048, 4096, 16384, 65520}; -/* log2: 7 8 9 10 11 12 14 16 */ - -#if 0 -static int rx_pool_sizes[NP] = {1024, 1024, 512, 256, 128, 64, 32, 32}; -#else -/* debug */ -static int rx_pool_sizes[NP] = {128, 128, 128, 64, 64, 64, 32, 32}; -#endif -/* log2: 10 10 9 8 7 6 5 5 */ -/* sumlog2: 17 18 18 18 18 18 19 21 */ -/* mem allocated: 128k 256k 256k 256k 256k 256k 512k 2M */ -/* tot mem: almost 4M */ - -/* NP is shorter, so that it fits on a single line. */ -#undef NP - - -/* Small hardware gotcha: - - The FS50 CAM (VP/VC match registers) always take the lowest channel - number that matches. This is not a problem. - - However, they also ignore whether the channel is enabled or - not. This means that if you allocate channel 0 to 1.2 and then - channel 1 to 0.0, then disabeling channel 0 and writing 0 to the - match channel for channel 0 will "steal" the traffic from channel - 1, even if you correctly disable channel 0. - - Workaround: - - - When disabling channels, write an invalid VP/VC value to the - match register. (We use 0xffffffff, which in the worst case - matches VP/VC = /, but I expect it not to match - anything as some "when not in use, program to 0" bits are now - programmed to 1...) - - - Don't initialize the match registers to 0, as 0.0 is a valid - channel. -*/ - - -/* Optimization hints and tips. - - The FireStream chips are very capable of reducing the amount of - "interrupt-traffic" for the CPU. This driver requests an interrupt on EVERY - action. You could try to minimize this a bit. - - Besides that, the userspace->kernel copy and the PCI bus are the - performance limiting issues for this driver. - - You could queue up a bunch of outgoing packets without telling the - FireStream. I'm not sure that's going to win you much though. The - Linux layer won't tell us in advance when it's not going to give us - any more packets in a while. So this is tricky to implement right without - introducing extra delays. - - -- REW - */ - - - - -/* The strings that define what the RX queue entry is all about. */ -/* Fujitsu: Please tell me which ones can have a pointer to a - freepool descriptor! */ -static char *res_strings[] = { - "RX OK: streaming not EOP", - "RX OK: streaming EOP", - "RX OK: Single buffer packet", - "RX OK: packet mode", - "RX OK: F4 OAM (end to end)", - "RX OK: F4 OAM (Segment)", - "RX OK: F5 OAM (end to end)", - "RX OK: F5 OAM (Segment)", - "RX OK: RM cell", - "RX OK: TRANSP cell", - "RX OK: TRANSPC cell", - "Unmatched cell", - "reserved 12", - "reserved 13", - "reserved 14", - "Unrecognized cell", - "reserved 16", - "reassembly abort: AAL5 abort", - "packet purged", - "packet ageing timeout", - "channel ageing timeout", - "calculated length error", - "programmed length limit error", - "aal5 crc32 error", - "oam transp or transpc crc10 error", - "reserved 25", - "reserved 26", - "reserved 27", - "reserved 28", - "reserved 29", - "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */ - "reassembly abort: no buffers", - "receive buffer overflow", - "change in GFC", - "receive buffer full", - "low priority discard - no receive descriptor", - "low priority discard - missing end of packet", - "reserved 37", - "reserved 38", - "reserved 39", - "reserved 40", - "reserved 41", - "reserved 42", - "reserved 43", - "reserved 44", - "reserved 45", - "reserved 46", - "reserved 47", - "reserved 48", - "reserved 49", - "reserved 50", - "reserved 51", - "reserved 52", - "reserved 53", - "reserved 54", - "reserved 55", - "reserved 56", - "reserved 57", - "reserved 58", - "reserved 59", - "reserved 60", - "reserved 61", - "reserved 62", - "reserved 63", -}; - -static char *irq_bitname[] = { - "LPCO", - "DPCO", - "RBRQ0_W", - "RBRQ1_W", - "RBRQ2_W", - "RBRQ3_W", - "RBRQ0_NF", - "RBRQ1_NF", - "RBRQ2_NF", - "RBRQ3_NF", - "BFP_SC", - "INIT", - "INIT_ERR", - "USCEO", - "UPEC0", - "VPFCO", - "CRCCO", - "HECO", - "TBRQ_W", - "TBRQ_NF", - "CTPQ_E", - "GFC_C0", - "PCI_FTL", - "CSQ_W", - "CSQ_NF", - "EXT_INT", - "RXDMA_S" -}; - - -#define PHY_EOF -1 -#define PHY_CLEARALL -2 - -struct reginit_item { - int reg, val; -}; - - -static struct reginit_item PHY_NTC_INIT[] = { - { PHY_CLEARALL, 0x40 }, - { 0x12, 0x0001 }, - { 0x13, 0x7605 }, - { 0x1A, 0x0001 }, - { 0x1B, 0x0005 }, - { 0x38, 0x0003 }, - { 0x39, 0x0006 }, /* changed here to make loopback */ - { 0x01, 0x5262 }, - { 0x15, 0x0213 }, - { 0x00, 0x0003 }, - { PHY_EOF, 0}, /* -1 signals end of list */ -}; - - -/* Safetyfeature: If the card interrupts more than this number of times - in a jiffy (1/100th of a second) then we just disable the interrupt and - print a message. This prevents the system from hanging. - - 150000 packets per second is close to the limit a PC is going to have - anyway. We therefore have to disable this for production. -- REW */ -#undef IRQ_RATE_LIMIT // 100 - -/* Interrupts work now. Unlike serial cards, ATM cards don't work all - that great without interrupts. -- REW */ -#undef FS_POLL_FREQ // 100 - -/* - This driver can spew a whole lot of debugging output at you. If you - need maximum performance, you should disable the DEBUG define. To - aid in debugging in the field, I'm leaving the compile-time debug - features enabled, and disable them "runtime". That allows me to - instruct people with problems to enable debugging without requiring - them to recompile... -- REW -*/ -#define DEBUG - -#ifdef DEBUG -#define fs_dprintk(f, str...) if (fs_debug & f) printk (str) -#else -#define fs_dprintk(f, str...) /* nothing */ -#endif - - -static int fs_keystream = 0; - -#ifdef DEBUG -/* I didn't forget to set this to zero before shipping. Hit me with a stick - if you get this with the debug default not set to zero again. -- REW */ -static int fs_debug = 0; -#else -#define fs_debug 0 -#endif - -#ifdef MODULE -#ifdef DEBUG -module_param(fs_debug, int, 0644); -#endif -module_param(loopback, int, 0); -module_param(num, int, 0); -module_param(fs_keystream, int, 0); -/* XXX Add rx_buf_sizes, and rx_pool_sizes As per request Amar. -- REW */ -#endif - - -#define FS_DEBUG_FLOW 0x00000001 -#define FS_DEBUG_OPEN 0x00000002 -#define FS_DEBUG_QUEUE 0x00000004 -#define FS_DEBUG_IRQ 0x00000008 -#define FS_DEBUG_INIT 0x00000010 -#define FS_DEBUG_SEND 0x00000020 -#define FS_DEBUG_PHY 0x00000040 -#define FS_DEBUG_CLEANUP 0x00000080 -#define FS_DEBUG_QOS 0x00000100 -#define FS_DEBUG_TXQ 0x00000200 -#define FS_DEBUG_ALLOC 0x00000400 -#define FS_DEBUG_TXMEM 0x00000800 -#define FS_DEBUG_QSIZE 0x00001000 - - -#define func_enter() fs_dprintk(FS_DEBUG_FLOW, "fs: enter %s\n", __func__) -#define func_exit() fs_dprintk(FS_DEBUG_FLOW, "fs: exit %s\n", __func__) - - -static struct fs_dev *fs_boards = NULL; - -#ifdef DEBUG - -static void my_hd (void *addr, int len) -{ - int j, ch; - unsigned char *ptr = addr; - - while (len > 0) { - printk ("%p ", ptr); - for (j=0;j < ((len < 16)?len:16);j++) { - printk ("%02x %s", ptr[j], (j==7)?" ":""); - } - for ( ;j < 16;j++) { - printk (" %s", (j==7)?" ":""); - } - for (j=0;j < ((len < 16)?len:16);j++) { - ch = ptr[j]; - printk ("%c", (ch < 0x20)?'.':((ch > 0x7f)?'.':ch)); - } - printk ("\n"); - ptr += 16; - len -= 16; - } -} -#else /* DEBUG */ -static void my_hd (void *addr, int len){} -#endif /* DEBUG */ - -/********** free an skb (as per ATM device driver documentation) **********/ - -/* Hmm. If this is ATM specific, why isn't there an ATM routine for this? - * I copied it over from the ambassador driver. -- REW */ - -static inline void fs_kfree_skb (struct sk_buff * skb) -{ - if (ATM_SKB(skb)->vcc->pop) - ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb); - else - dev_kfree_skb_any (skb); -} - - - - -/* It seems the ATM forum recommends this horribly complicated 16bit - * floating point format. Turns out the Ambassador uses the exact same - * encoding. I just copied it over. If Mitch agrees, I'll move it over - * to the atm_misc file or something like that. (and remove it from - * here and the ambassador driver) -- REW - */ - -/* The good thing about this format is that it is monotonic. So, - a conversion routine need not be very complicated. To be able to - round "nearest" we need to take along a few extra bits. Lets - put these after 16 bits, so that we can just return the top 16 - bits of the 32bit number as the result: - - int mr (unsigned int rate, int r) - { - int e = 16+9; - static int round[4]={0, 0, 0xffff, 0x8000}; - if (!rate) return 0; - while (rate & 0xfc000000) { - rate >>= 1; - e++; - } - while (! (rate & 0xfe000000)) { - rate <<= 1; - e--; - } - -// Now the mantissa is in positions bit 16-25. Excepf for the "hidden 1" that's in bit 26. - rate &= ~0x02000000; -// Next add in the exponent - rate |= e << (16+9); -// And perform the rounding: - return (rate + round[r]) >> 16; - } - - 14 lines-of-code. Compare that with the 120 that the Ambassador - guys needed. (would be 8 lines shorter if I'd try to really reduce - the number of lines: - - int mr (unsigned int rate, int r) - { - int e = 16+9; - static int round[4]={0, 0, 0xffff, 0x8000}; - if (!rate) return 0; - for (; rate & 0xfc000000 ;rate >>= 1, e++); - for (;!(rate & 0xfe000000);rate <<= 1, e--); - return ((rate & ~0x02000000) | (e << (16+9)) + round[r]) >> 16; - } - - Exercise for the reader: Remove one more line-of-code, without - cheating. (Just joining two lines is cheating). (I know it's - possible, don't think you've beat me if you found it... If you - manage to lose two lines or more, keep me updated! ;-) - - -- REW */ - - -#define ROUND_UP 1 -#define ROUND_DOWN 2 -#define ROUND_NEAREST 3 -/********** make rate (not quite as much fun as Horizon) **********/ - -static int make_rate(unsigned int rate, int r, - u16 *bits, unsigned int *actual) -{ - unsigned char exp = -1; /* hush gcc */ - unsigned int man = -1; /* hush gcc */ - - fs_dprintk (FS_DEBUG_QOS, "make_rate %u", rate); - - /* rates in cells per second, ITU format (nasty 16-bit floating-point) - given 5-bit e and 9-bit m: - rate = EITHER (1+m/2^9)*2^e OR 0 - bits = EITHER 1<<14 | e<<9 | m OR 0 - (bit 15 is "reserved", bit 14 "non-zero") - smallest rate is 0 (special representation) - largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1) - smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0) - simple algorithm: - find position of top bit, this gives e - remove top bit and shift (rounding if feeling clever) by 9-e - */ - /* Ambassador ucode bug: please don't set bit 14! so 0 rate not - representable. // This should move into the ambassador driver - when properly merged. -- REW */ - - if (rate > 0xffc00000U) { - /* larger than largest representable rate */ - - if (r == ROUND_UP) { - return -EINVAL; - } else { - exp = 31; - man = 511; - } - - } else if (rate) { - /* representable rate */ - - exp = 31; - man = rate; - - /* invariant: rate = man*2^(exp-31) */ - while (!(man & (1<<31))) { - exp = exp - 1; - man = man<<1; - } - - /* man has top bit set - rate = (2^31+(man-2^31))*2^(exp-31) - rate = (1+(man-2^31)/2^31)*2^exp - */ - man = man<<1; - man &= 0xffffffffU; /* a nop on 32-bit systems */ - /* rate = (1+man/2^32)*2^exp - - exp is in the range 0 to 31, man is in the range 0 to 2^32-1 - time to lose significance... we want m in the range 0 to 2^9-1 - rounding presents a minor problem... we first decide which way - we are rounding (based on given rounding direction and possibly - the bits of the mantissa that are to be discarded). - */ - - switch (r) { - case ROUND_DOWN: { - /* just truncate */ - man = man>>(32-9); - break; - } - case ROUND_UP: { - /* check all bits that we are discarding */ - if (man & (~0U>>9)) { - man = (man>>(32-9)) + 1; - if (man == (1<<9)) { - /* no need to check for round up outside of range */ - man = 0; - exp += 1; - } - } else { - man = (man>>(32-9)); - } - break; - } - case ROUND_NEAREST: { - /* check msb that we are discarding */ - if (man & (1<<(32-9-1))) { - man = (man>>(32-9)) + 1; - if (man == (1<<9)) { - /* no need to check for round up outside of range */ - man = 0; - exp += 1; - } - } else { - man = (man>>(32-9)); - } - break; - } - } - - } else { - /* zero rate - not representable */ - - if (r == ROUND_DOWN) { - return -EINVAL; - } else { - exp = 0; - man = 0; - } - } - - fs_dprintk (FS_DEBUG_QOS, "rate: man=%u, exp=%hu", man, exp); - - if (bits) - *bits = /* (1<<14) | */ (exp<<9) | man; - - if (actual) - *actual = (exp >= 9) - ? (1 << exp) + (man << (exp-9)) - : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp)); - - return 0; -} - - - - -/* FireStream access routines */ -/* For DEEP-DOWN debugging these can be rigged to intercept accesses to - certain registers or to just log all accesses. */ - -static inline void write_fs (struct fs_dev *dev, int offset, u32 val) -{ - writel (val, dev->base + offset); -} - - -static inline u32 read_fs (struct fs_dev *dev, int offset) -{ - return readl (dev->base + offset); -} - - - -static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q) -{ - return bus_to_virt (read_fs (dev, Q_WP(q->offset)) & Q_ADDR_MASK); -} - - -static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe) -{ - u32 wp; - struct FS_QENTRY *cqe; - - /* XXX Sanity check: the write pointer can be checked to be - still the same as the value passed as qe... -- REW */ - /* udelay (5); */ - while ((wp = read_fs (dev, Q_WP (q->offset))) & Q_FULL) { - fs_dprintk (FS_DEBUG_TXQ, "Found queue at %x full. Waiting.\n", - q->offset); - schedule (); - } - - wp &= ~0xf; - cqe = bus_to_virt (wp); - if (qe != cqe) { - fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe); - } - - write_fs (dev, Q_WP(q->offset), Q_INCWRAP); - - { - static int c; - if (!(c++ % 100)) - { - int rp, wp; - rp = read_fs (dev, Q_RP(q->offset)); - wp = read_fs (dev, Q_WP(q->offset)); - fs_dprintk (FS_DEBUG_TXQ, "q at %d: %x-%x: %x entries.\n", - q->offset, rp, wp, wp-rp); - } - } -} - -#ifdef DEBUG_EXTRA -static struct FS_QENTRY pq[60]; -static int qp; - -static struct FS_BPENTRY dq[60]; -static int qd; -static void *da[60]; -#endif - -static void submit_queue (struct fs_dev *dev, struct queue *q, - u32 cmd, u32 p1, u32 p2, u32 p3) -{ - struct FS_QENTRY *qe; - - qe = get_qentry (dev, q); - qe->cmd = cmd; - qe->p0 = p1; - qe->p1 = p2; - qe->p2 = p3; - submit_qentry (dev, q, qe); - -#ifdef DEBUG_EXTRA - pq[qp].cmd = cmd; - pq[qp].p0 = p1; - pq[qp].p1 = p2; - pq[qp].p2 = p3; - qp++; - if (qp >= 60) qp = 0; -#endif -} - -/* Test the "other" way one day... -- REW */ -#if 1 -#define submit_command submit_queue -#else - -static void submit_command (struct fs_dev *dev, struct queue *q, - u32 cmd, u32 p1, u32 p2, u32 p3) -{ - write_fs (dev, CMDR0, cmd); - write_fs (dev, CMDR1, p1); - write_fs (dev, CMDR2, p2); - write_fs (dev, CMDR3, p3); -} -#endif - - - -static void process_return_queue (struct fs_dev *dev, struct queue *q) -{ - long rq; - struct FS_QENTRY *qe; - void *tc; - - while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { - fs_dprintk (FS_DEBUG_QUEUE, "reaping return queue entry at %lx\n", rq); - qe = bus_to_virt (rq); - - fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x. (%d)\n", - qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe)); - - switch (STATUS_CODE (qe)) { - case 5: - tc = bus_to_virt (qe->p0); - fs_dprintk (FS_DEBUG_ALLOC, "Free tc: %p\n", tc); - kfree (tc); - break; - } - - write_fs (dev, Q_RP(q->offset), Q_INCWRAP); - } -} - - -static void process_txdone_queue (struct fs_dev *dev, struct queue *q) -{ - long rq; - long tmp; - struct FS_QENTRY *qe; - struct sk_buff *skb; - struct FS_BPENTRY *td; - - while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { - fs_dprintk (FS_DEBUG_QUEUE, "reaping txdone entry at %lx\n", rq); - qe = bus_to_virt (rq); - - fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x: %d\n", - qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe)); - - if (STATUS_CODE (qe) != 2) - fs_dprintk (FS_DEBUG_TXMEM, "queue entry: %08x %08x %08x %08x: %d\n", - qe->cmd, qe->p0, qe->p1, qe->p2, STATUS_CODE (qe)); - - - switch (STATUS_CODE (qe)) { - case 0x01: /* This is for AAL0 where we put the chip in streaming mode */ - fallthrough; - case 0x02: - /* Process a real txdone entry. */ - tmp = qe->p0; - if (tmp & 0x0f) - printk (KERN_WARNING "td not aligned: %ld\n", tmp); - tmp &= ~0x0f; - td = bus_to_virt (tmp); - - fs_dprintk (FS_DEBUG_QUEUE, "Pool entry: %08x %08x %08x %08x %p.\n", - td->flags, td->next, td->bsa, td->aal_bufsize, td->skb ); - - skb = td->skb; - if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) { - FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL; - wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait); - } - td->dev->ntxpckts--; - - { - static int c=0; - - if (!(c++ % 100)) { - fs_dprintk (FS_DEBUG_QSIZE, "[%d]", td->dev->ntxpckts); - } - } - - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); - - fs_dprintk (FS_DEBUG_TXMEM, "i"); - fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); - fs_kfree_skb (skb); - - fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td); - memset (td, ATM_POISON_FREE, sizeof(struct FS_BPENTRY)); - kfree (td); - break; - default: - /* Here we get the tx purge inhibit command ... */ - /* Action, I believe, is "don't do anything". -- REW */ - ; - } - - write_fs (dev, Q_RP(q->offset), Q_INCWRAP); - } -} - - -static void process_incoming (struct fs_dev *dev, struct queue *q) -{ - long rq; - struct FS_QENTRY *qe; - struct FS_BPENTRY *pe; - struct sk_buff *skb; - unsigned int channo; - struct atm_vcc *atm_vcc; - - while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { - fs_dprintk (FS_DEBUG_QUEUE, "reaping incoming queue entry at %lx\n", rq); - qe = bus_to_virt (rq); - - fs_dprintk (FS_DEBUG_QUEUE, "queue entry: %08x %08x %08x %08x. ", - qe->cmd, qe->p0, qe->p1, qe->p2); - - fs_dprintk (FS_DEBUG_QUEUE, "-> %x: %s\n", - STATUS_CODE (qe), - res_strings[STATUS_CODE(qe)]); - - pe = bus_to_virt (qe->p0); - fs_dprintk (FS_DEBUG_QUEUE, "Pool entry: %08x %08x %08x %08x %p %p.\n", - pe->flags, pe->next, pe->bsa, pe->aal_bufsize, - pe->skb, pe->fp); - - channo = qe->cmd & 0xffff; - - if (channo < dev->nchannels) - atm_vcc = dev->atm_vccs[channo]; - else - atm_vcc = NULL; - - /* Single buffer packet */ - switch (STATUS_CODE (qe)) { - case 0x1: - /* Fall through for streaming mode */ - fallthrough; - case 0x2:/* Packet received OK.... */ - if (atm_vcc) { - skb = pe->skb; - pe->fp->n--; -#if 0 - fs_dprintk (FS_DEBUG_QUEUE, "Got skb: %p\n", skb); - if (FS_DEBUG_QUEUE & fs_debug) my_hd (bus_to_virt (pe->bsa), 0x20); -#endif - skb_put (skb, qe->p1 & 0xffff); - ATM_SKB(skb)->vcc = atm_vcc; - atomic_inc(&atm_vcc->stats->rx); - __net_timestamp(skb); - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); - atm_vcc->push (atm_vcc, skb); - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe); - kfree (pe); - } else { - printk (KERN_ERR "Got a receive on a non-open channel %d.\n", channo); - } - break; - case 0x17:/* AAL 5 CRC32 error. IFF the length field is nonzero, a buffer - has been consumed and needs to be processed. -- REW */ - if (qe->p1 & 0xffff) { - pe = bus_to_virt (qe->p0); - pe->fp->n--; - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", pe->skb); - dev_kfree_skb_any (pe->skb); - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", pe); - kfree (pe); - } - if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); - break; - case 0x1f: /* Reassembly abort: no buffers. */ - /* Silently increment error counter. */ - if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); - break; - default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ - printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", - STATUS_CODE(qe), res_strings[STATUS_CODE (qe)]); - } - write_fs (dev, Q_RP(q->offset), Q_INCWRAP); - } -} - - - -#define DO_DIRECTION(tp) ((tp)->traffic_class != ATM_NONE) - -static int fs_open(struct atm_vcc *atm_vcc) -{ - struct fs_dev *dev; - struct fs_vcc *vcc; - struct fs_transmit_config *tc; - struct atm_trafprm * txtp; - struct atm_trafprm * rxtp; - /* struct fs_receive_config *rc;*/ - /* struct FS_QENTRY *qe; */ - int error; - int bfp; - int to; - unsigned short tmc0; - short vpi = atm_vcc->vpi; - int vci = atm_vcc->vci; - - func_enter (); - - dev = FS_DEV(atm_vcc->dev); - fs_dprintk (FS_DEBUG_OPEN, "fs: open on dev: %p, vcc at %p\n", - dev, atm_vcc); - - if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) - set_bit(ATM_VF_ADDR, &atm_vcc->flags); - - if ((atm_vcc->qos.aal != ATM_AAL5) && - (atm_vcc->qos.aal != ATM_AAL2)) - return -EINVAL; /* XXX AAL0 */ - - fs_dprintk (FS_DEBUG_OPEN, "fs: (itf %d): open %d.%d\n", - atm_vcc->dev->number, atm_vcc->vpi, atm_vcc->vci); - - /* XXX handle qos parameters (rate limiting) ? */ - - vcc = kmalloc(sizeof(struct fs_vcc), GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc VCC: %p(%zd)\n", vcc, sizeof(struct fs_vcc)); - if (!vcc) { - clear_bit(ATM_VF_ADDR, &atm_vcc->flags); - return -ENOMEM; - } - - atm_vcc->dev_data = vcc; - vcc->last_skb = NULL; - - init_waitqueue_head (&vcc->close_wait); - - txtp = &atm_vcc->qos.txtp; - rxtp = &atm_vcc->qos.rxtp; - - if (!test_bit(ATM_VF_PARTIAL, &atm_vcc->flags)) { - if (IS_FS50(dev)) { - /* Increment the channel numer: take a free one next time. */ - for (to=33;to;to--, dev->channo++) { - /* We only have 32 channels */ - if (dev->channo >= 32) - dev->channo = 0; - /* If we need to do RX, AND the RX is inuse, try the next */ - if (DO_DIRECTION(rxtp) && dev->atm_vccs[dev->channo]) - continue; - /* If we need to do TX, AND the TX is inuse, try the next */ - if (DO_DIRECTION(txtp) && test_bit (dev->channo, dev->tx_inuse)) - continue; - /* Ok, both are free! (or not needed) */ - break; - } - if (!to) { - printk ("No more free channels for FS50..\n"); - kfree(vcc); - return -EBUSY; - } - vcc->channo = dev->channo; - dev->channo &= dev->channel_mask; - - } else { - vcc->channo = (vpi << FS155_VCI_BITS) | (vci); - if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) || - ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) { - printk ("Channel is in use for FS155.\n"); - kfree(vcc); - return -EBUSY; - } - } - fs_dprintk (FS_DEBUG_OPEN, "OK. Allocated channel %x(%d).\n", - vcc->channo, vcc->channo); - } - - if (DO_DIRECTION (txtp)) { - tc = kmalloc (sizeof (struct fs_transmit_config), GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc tc: %p(%zd)\n", - tc, sizeof (struct fs_transmit_config)); - if (!tc) { - fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n"); - kfree(vcc); - return -ENOMEM; - } - - /* Allocate the "open" entry from the high priority txq. This makes - it most likely that the chip will notice it. It also prevents us - from having to wait for completion. On the other hand, we may - need to wait for completion anyway, to see if it completed - successfully. */ - - switch (atm_vcc->qos.aal) { - case ATM_AAL2: - case ATM_AAL0: - tc->flags = 0 - | TC_FLAGS_TRANSPARENT_PAYLOAD - | TC_FLAGS_PACKET - | (1 << 28) - | TC_FLAGS_TYPE_UBR /* XXX Change to VBR -- PVDL */ - | TC_FLAGS_CAL0; - break; - case ATM_AAL5: - tc->flags = 0 - | TC_FLAGS_AAL5 - | TC_FLAGS_PACKET /* ??? */ - | TC_FLAGS_TYPE_CBR - | TC_FLAGS_CAL0; - break; - default: - printk ("Unknown aal: %d\n", atm_vcc->qos.aal); - tc->flags = 0; - } - /* Docs are vague about this atm_hdr field. By the way, the FS - * chip makes odd errors if lower bits are set.... -- REW */ - tc->atm_hdr = (vpi << 20) | (vci << 4); - tmc0 = 0; - { - int pcr = atm_pcr_goal (txtp); - - fs_dprintk (FS_DEBUG_OPEN, "pcr = %d.\n", pcr); - - /* XXX Hmm. officially we're only allowed to do this if rounding - is round_down -- REW */ - if (IS_FS50(dev)) { - if (pcr > 51840000/53/8) pcr = 51840000/53/8; - } else { - if (pcr > 155520000/53/8) pcr = 155520000/53/8; - } - if (!pcr) { - /* no rate cap */ - tmc0 = IS_FS50(dev)?0x61BE:0x64c9; /* Just copied over the bits from Fujitsu -- REW */ - } else { - int r; - if (pcr < 0) { - r = ROUND_DOWN; - pcr = -pcr; - } else { - r = ROUND_UP; - } - error = make_rate (pcr, r, &tmc0, NULL); - if (error) { - kfree(tc); - kfree(vcc); - return error; - } - } - fs_dprintk (FS_DEBUG_OPEN, "pcr = %d.\n", pcr); - } - - tc->TMC[0] = tmc0 | 0x4000; - tc->TMC[1] = 0; /* Unused */ - tc->TMC[2] = 0; /* Unused */ - tc->TMC[3] = 0; /* Unused */ - - tc->spec = 0; /* UTOPIA address, UDF, HEC: Unused -> 0 */ - tc->rtag[0] = 0; /* What should I do with routing tags??? - -- Not used -- AS -- Thanks -- REW*/ - tc->rtag[1] = 0; - tc->rtag[2] = 0; - - if (fs_debug & FS_DEBUG_OPEN) { - fs_dprintk (FS_DEBUG_OPEN, "TX config record:\n"); - my_hd (tc, sizeof (*tc)); - } - - /* We now use the "submit_command" function to submit commands to - the firestream. There is a define up near the definition of - that routine that switches this routine between immediate write - to the immediate command registers and queuing the commands in - the HPTXQ for execution. This last technique might be more - efficient if we know we're going to submit a whole lot of - commands in one go, but this driver is not setup to be able to - use such a construct. So it probably doen't matter much right - now. -- REW */ - - /* The command is IMMediate and INQueue. The parameters are out-of-line.. */ - submit_command (dev, &dev->hp_txq, - QE_CMD_CONFIG_TX | QE_CMD_IMM_INQ | vcc->channo, - virt_to_bus (tc), 0, 0); - - submit_command (dev, &dev->hp_txq, - QE_CMD_TX_EN | QE_CMD_IMM_INQ | vcc->channo, - 0, 0, 0); - set_bit (vcc->channo, dev->tx_inuse); - } - - if (DO_DIRECTION (rxtp)) { - dev->atm_vccs[vcc->channo] = atm_vcc; - - for (bfp = 0;bfp < FS_NR_FREE_POOLS; bfp++) - if (atm_vcc->qos.rxtp.max_sdu <= dev->rx_fp[bfp].bufsize) break; - if (bfp >= FS_NR_FREE_POOLS) { - fs_dprintk (FS_DEBUG_OPEN, "No free pool fits sdu: %d.\n", - atm_vcc->qos.rxtp.max_sdu); - /* XXX Cleanup? -- Would just calling fs_close work??? -- REW */ - - /* XXX clear tx inuse. Close TX part? */ - dev->atm_vccs[vcc->channo] = NULL; - kfree (vcc); - return -EINVAL; - } - - switch (atm_vcc->qos.aal) { - case ATM_AAL0: - case ATM_AAL2: - submit_command (dev, &dev->hp_txq, - QE_CMD_CONFIG_RX | QE_CMD_IMM_INQ | vcc->channo, - RC_FLAGS_TRANSP | - RC_FLAGS_BFPS_BFP * bfp | - RC_FLAGS_RXBM_PSB, 0, 0); - break; - case ATM_AAL5: - submit_command (dev, &dev->hp_txq, - QE_CMD_CONFIG_RX | QE_CMD_IMM_INQ | vcc->channo, - RC_FLAGS_AAL5 | - RC_FLAGS_BFPS_BFP * bfp | - RC_FLAGS_RXBM_PSB, 0, 0); - break; - } - if (IS_FS50 (dev)) { - submit_command (dev, &dev->hp_txq, - QE_CMD_REG_WR | QE_CMD_IMM_INQ, - 0x80 + vcc->channo, - (vpi << 16) | vci, 0 ); /* XXX -- Use defines. */ - } - submit_command (dev, &dev->hp_txq, - QE_CMD_RX_EN | QE_CMD_IMM_INQ | vcc->channo, - 0, 0, 0); - } - - /* Indicate we're done! */ - set_bit(ATM_VF_READY, &atm_vcc->flags); - - func_exit (); - return 0; -} - - -static void fs_close(struct atm_vcc *atm_vcc) -{ - struct fs_dev *dev = FS_DEV (atm_vcc->dev); - struct fs_vcc *vcc = FS_VCC (atm_vcc); - struct atm_trafprm * txtp; - struct atm_trafprm * rxtp; - - func_enter (); - - clear_bit(ATM_VF_READY, &atm_vcc->flags); - - fs_dprintk (FS_DEBUG_QSIZE, "--==**[%d]**==--", dev->ntxpckts); - if (vcc->last_skb) { - fs_dprintk (FS_DEBUG_QUEUE, "Waiting for skb %p to be sent.\n", - vcc->last_skb); - /* We're going to wait for the last packet to get sent on this VC. It would - be impolite not to send them don't you think? - XXX - We don't know which packets didn't get sent. So if we get interrupted in - this sleep_on, we'll lose any reference to these packets. Memory leak! - On the other hand, it's awfully convenient that we can abort a "close" that - is taking too long. Maybe just use non-interruptible sleep on? -- REW */ - wait_event_interruptible(vcc->close_wait, !vcc->last_skb); - } - - txtp = &atm_vcc->qos.txtp; - rxtp = &atm_vcc->qos.rxtp; - - - /* See App note XXX (Unpublished as of now) for the reason for the - removal of the "CMD_IMM_INQ" part of the TX_PURGE_INH... -- REW */ - - if (DO_DIRECTION (txtp)) { - submit_command (dev, &dev->hp_txq, - QE_CMD_TX_PURGE_INH | /*QE_CMD_IMM_INQ|*/ vcc->channo, 0,0,0); - clear_bit (vcc->channo, dev->tx_inuse); - } - - if (DO_DIRECTION (rxtp)) { - submit_command (dev, &dev->hp_txq, - QE_CMD_RX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0); - dev->atm_vccs [vcc->channo] = NULL; - - /* This means that this is configured as a receive channel */ - if (IS_FS50 (dev)) { - /* Disable the receive filter. Is 0/0 indeed an invalid receive - channel? -- REW. Yes it is. -- Hang. Ok. I'll use -1 - (0xfff...) -- REW */ - submit_command (dev, &dev->hp_txq, - QE_CMD_REG_WR | QE_CMD_IMM_INQ, - 0x80 + vcc->channo, -1, 0 ); - } - } - - fs_dprintk (FS_DEBUG_ALLOC, "Free vcc: %p\n", vcc); - kfree (vcc); - - func_exit (); -} - - -static int fs_send (struct atm_vcc *atm_vcc, struct sk_buff *skb) -{ - struct fs_dev *dev = FS_DEV (atm_vcc->dev); - struct fs_vcc *vcc = FS_VCC (atm_vcc); - struct FS_BPENTRY *td; - - func_enter (); - - fs_dprintk (FS_DEBUG_TXMEM, "I"); - fs_dprintk (FS_DEBUG_SEND, "Send: atm_vcc %p skb %p vcc %p dev %p\n", - atm_vcc, skb, vcc, dev); - - fs_dprintk (FS_DEBUG_ALLOC, "Alloc t-skb: %p (atm_send)\n", skb); - - ATM_SKB(skb)->vcc = atm_vcc; - - vcc->last_skb = skb; - - td = kmalloc (sizeof (struct FS_BPENTRY), GFP_ATOMIC); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc transd: %p(%zd)\n", td, sizeof (struct FS_BPENTRY)); - if (!td) { - /* Oops out of mem */ - return -ENOMEM; - } - - fs_dprintk (FS_DEBUG_SEND, "first word in buffer: %x\n", - *(int *) skb->data); - - td->flags = TD_EPI | TD_DATA | skb->len; - td->next = 0; - td->bsa = virt_to_bus (skb->data); - td->skb = skb; - td->dev = dev; - dev->ntxpckts++; - -#ifdef DEBUG_EXTRA - da[qd] = td; - dq[qd].flags = td->flags; - dq[qd].next = td->next; - dq[qd].bsa = td->bsa; - dq[qd].skb = td->skb; - dq[qd].dev = td->dev; - qd++; - if (qd >= 60) qd = 0; -#endif - - submit_queue (dev, &dev->hp_txq, - QE_TRANSMIT_DE | vcc->channo, - virt_to_bus (td), 0, - virt_to_bus (td)); - - fs_dprintk (FS_DEBUG_QUEUE, "in send: txq %d txrq %d\n", - read_fs (dev, Q_EA (dev->hp_txq.offset)) - - read_fs (dev, Q_SA (dev->hp_txq.offset)), - read_fs (dev, Q_EA (dev->tx_relq.offset)) - - read_fs (dev, Q_SA (dev->tx_relq.offset))); - - func_exit (); - return 0; -} - - -/* Some function placeholders for functions we don't yet support. */ - -#if 0 -static int fs_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) -{ - func_enter (); - func_exit (); - return -ENOIOCTLCMD; -} - - -static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen) -{ - func_enter (); - func_exit (); - return 0; -} - - -static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,unsigned int optlen) -{ - func_enter (); - func_exit (); - return 0; -} - - -static void fs_phy_put(struct atm_dev *dev,unsigned char value, - unsigned long addr) -{ - func_enter (); - func_exit (); -} - - -static unsigned char fs_phy_get(struct atm_dev *dev,unsigned long addr) -{ - func_enter (); - func_exit (); - return 0; -} - - -static int fs_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags) -{ - func_enter (); - func_exit (); - return 0; -}; - -#endif - - -static const struct atmdev_ops ops = { - .open = fs_open, - .close = fs_close, - .send = fs_send, - .owner = THIS_MODULE, - /* ioctl: fs_ioctl, */ - /* change_qos: fs_change_qos, */ - - /* For now implement these internally here... */ - /* phy_put: fs_phy_put, */ - /* phy_get: fs_phy_get, */ -}; - - -static void undocumented_pci_fix(struct pci_dev *pdev) -{ - u32 tint; - - /* The Windows driver says: */ - /* Switch off FireStream Retry Limit Threshold - */ - - /* The register at 0x28 is documented as "reserved", no further - comments. */ - - pci_read_config_dword (pdev, 0x28, &tint); - if (tint != 0x80) { - tint = 0x80; - pci_write_config_dword (pdev, 0x28, tint); - } -} - - - -/************************************************************************** - * PHY routines * - **************************************************************************/ - -static void write_phy(struct fs_dev *dev, int regnum, int val) -{ - submit_command (dev, &dev->hp_txq, QE_CMD_PRP_WR | QE_CMD_IMM_INQ, - regnum, val, 0); -} - -static int init_phy(struct fs_dev *dev, struct reginit_item *reginit) -{ - int i; - - func_enter (); - while (reginit->reg != PHY_EOF) { - if (reginit->reg == PHY_CLEARALL) { - /* "PHY_CLEARALL means clear all registers. Numregisters is in "val". */ - for (i=0;ival;i++) { - write_phy (dev, i, 0); - } - } else { - write_phy (dev, reginit->reg, reginit->val); - } - reginit++; - } - func_exit (); - return 0; -} - -static void reset_chip (struct fs_dev *dev) -{ - int i; - - write_fs (dev, SARMODE0, SARMODE0_SRTS0); - - /* Undocumented delay */ - udelay (128); - - /* The "internal registers are documented to all reset to zero, but - comments & code in the Windows driver indicates that the pools are - NOT reset. */ - for (i=0;i < FS_NR_FREE_POOLS;i++) { - write_fs (dev, FP_CNF (RXB_FP(i)), 0); - write_fs (dev, FP_SA (RXB_FP(i)), 0); - write_fs (dev, FP_EA (RXB_FP(i)), 0); - write_fs (dev, FP_CNT (RXB_FP(i)), 0); - write_fs (dev, FP_CTU (RXB_FP(i)), 0); - } - - /* The same goes for the match channel registers, although those are - NOT documented that way in the Windows driver. -- REW */ - /* The Windows driver DOES write 0 to these registers somewhere in - the init sequence. However, a small hardware-feature, will - prevent reception of data on VPI/VCI = 0/0 (Unless the channel - allocated happens to have no disabled channels that have a lower - number. -- REW */ - - /* Clear the match channel registers. */ - if (IS_FS50 (dev)) { - for (i=0;i 0x10 alignment not yet implemented (hard!)\n"); - return NULL; -} - -static int init_q(struct fs_dev *dev, struct queue *txq, int queue, - int nentries, int is_rq) -{ - int sz = nentries * sizeof (struct FS_QENTRY); - struct FS_QENTRY *p; - - func_enter (); - - fs_dprintk (FS_DEBUG_INIT, "Initializing queue at %x: %d entries:\n", - queue, nentries); - - p = aligned_kmalloc (sz, GFP_KERNEL, 0x10); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc queue: %p(%d)\n", p, sz); - - if (!p) return 0; - - write_fs (dev, Q_SA(queue), virt_to_bus(p)); - write_fs (dev, Q_EA(queue), virt_to_bus(p+nentries-1)); - write_fs (dev, Q_WP(queue), virt_to_bus(p)); - write_fs (dev, Q_RP(queue), virt_to_bus(p)); - if (is_rq) { - /* Configuration for the receive queue: 0: interrupt immediately, - no pre-warning to empty queues: We do our best to keep the - queue filled anyway. */ - write_fs (dev, Q_CNF(queue), 0 ); - } - - txq->sa = p; - txq->ea = p; - txq->offset = queue; - - func_exit (); - return 1; -} - - -static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue, - int bufsize, int nr_buffers) -{ - func_enter (); - - fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue); - - write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME); - write_fs (dev, FP_SA(queue), 0); - write_fs (dev, FP_EA(queue), 0); - write_fs (dev, FP_CTU(queue), 0); - write_fs (dev, FP_CNT(queue), 0); - - fp->offset = queue; - fp->bufsize = bufsize; - fp->nr_buffers = nr_buffers; - - func_exit (); - return 1; -} - - -static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *fp) -{ -#if 0 - /* This seems to be unreliable.... */ - return read_fs (dev, FP_CNT (fp->offset)); -#else - return fp->n; -#endif -} - - -/* Check if this gets going again if a pool ever runs out. -- Yes, it - does. I've seen "receive abort: no buffers" and things started - working again after that... -- REW */ - -static void top_off_fp (struct fs_dev *dev, struct freepool *fp, - gfp_t gfp_flags) -{ - struct FS_BPENTRY *qe, *ne; - struct sk_buff *skb; - int n = 0; - u32 qe_tmp; - - fs_dprintk (FS_DEBUG_QUEUE, "Topping off queue at %x (%d-%d/%d)\n", - fp->offset, read_fs (dev, FP_CNT (fp->offset)), fp->n, - fp->nr_buffers); - while (nr_buffers_in_freepool(dev, fp) < fp->nr_buffers) { - - skb = alloc_skb (fp->bufsize, gfp_flags); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-skb: %p(%d)\n", skb, fp->bufsize); - if (!skb) break; - ne = kmalloc (sizeof (struct FS_BPENTRY), gfp_flags); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc rec-d: %p(%zd)\n", ne, sizeof (struct FS_BPENTRY)); - if (!ne) { - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", skb); - dev_kfree_skb_any (skb); - break; - } - - fs_dprintk (FS_DEBUG_QUEUE, "Adding skb %p desc %p -> %p(%p) ", - skb, ne, skb->data, skb->head); - n++; - ne->flags = FP_FLAGS_EPI | fp->bufsize; - ne->next = virt_to_bus (NULL); - ne->bsa = virt_to_bus (skb->data); - ne->aal_bufsize = fp->bufsize; - ne->skb = skb; - ne->fp = fp; - - /* - * FIXME: following code encodes and decodes - * machine pointers (could be 64-bit) into a - * 32-bit register. - */ - - qe_tmp = read_fs (dev, FP_EA(fp->offset)); - fs_dprintk (FS_DEBUG_QUEUE, "link at %x\n", qe_tmp); - if (qe_tmp) { - qe = bus_to_virt ((long) qe_tmp); - qe->next = virt_to_bus(ne); - qe->flags &= ~FP_FLAGS_EPI; - } else - write_fs (dev, FP_SA(fp->offset), virt_to_bus(ne)); - - write_fs (dev, FP_EA(fp->offset), virt_to_bus (ne)); - fp->n++; /* XXX Atomic_inc? */ - write_fs (dev, FP_CTU(fp->offset), 1); - } - - fs_dprintk (FS_DEBUG_QUEUE, "Added %d entries. \n", n); -} - -static void free_queue(struct fs_dev *dev, struct queue *txq) -{ - func_enter (); - - write_fs (dev, Q_SA(txq->offset), 0); - write_fs (dev, Q_EA(txq->offset), 0); - write_fs (dev, Q_RP(txq->offset), 0); - write_fs (dev, Q_WP(txq->offset), 0); - /* Configuration ? */ - - fs_dprintk (FS_DEBUG_ALLOC, "Free queue: %p\n", txq->sa); - kfree (txq->sa); - - func_exit (); -} - -static void free_freepool(struct fs_dev *dev, struct freepool *fp) -{ - func_enter (); - - write_fs (dev, FP_CNF(fp->offset), 0); - write_fs (dev, FP_SA (fp->offset), 0); - write_fs (dev, FP_EA (fp->offset), 0); - write_fs (dev, FP_CNT(fp->offset), 0); - write_fs (dev, FP_CTU(fp->offset), 0); - - func_exit (); -} - - - -static irqreturn_t fs_irq (int irq, void *dev_id) -{ - int i; - u32 status; - struct fs_dev *dev = dev_id; - - status = read_fs (dev, ISR); - if (!status) - return IRQ_NONE; - - func_enter (); - -#ifdef IRQ_RATE_LIMIT - /* Aaargh! I'm ashamed. This costs more lines-of-code than the actual - interrupt routine!. (Well, used to when I wrote that comment) -- REW */ - { - static int lastjif; - static int nintr=0; - - if (lastjif == jiffies) { - if (++nintr > IRQ_RATE_LIMIT) { - free_irq (dev->irq, dev_id); - printk (KERN_ERR "fs: Too many interrupts. Turning off interrupt %d.\n", - dev->irq); - } - } else { - lastjif = jiffies; - nintr = 0; - } - } -#endif - fs_dprintk (FS_DEBUG_QUEUE, "in intr: txq %d txrq %d\n", - read_fs (dev, Q_EA (dev->hp_txq.offset)) - - read_fs (dev, Q_SA (dev->hp_txq.offset)), - read_fs (dev, Q_EA (dev->tx_relq.offset)) - - read_fs (dev, Q_SA (dev->tx_relq.offset))); - - /* print the bits in the ISR register. */ - if (fs_debug & FS_DEBUG_IRQ) { - /* The FS_DEBUG things are unnecessary here. But this way it is - clear for grep that these are debug prints. */ - fs_dprintk (FS_DEBUG_IRQ, "IRQ status:"); - for (i=0;i<27;i++) - if (status & (1 << i)) - fs_dprintk (FS_DEBUG_IRQ, " %s", irq_bitname[i]); - fs_dprintk (FS_DEBUG_IRQ, "\n"); - } - - if (status & ISR_RBRQ0_W) { - fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (0)!!!!\n"); - process_incoming (dev, &dev->rx_rq[0]); - /* items mentioned on RBRQ0 are from FP 0 or 1. */ - top_off_fp (dev, &dev->rx_fp[0], GFP_ATOMIC); - top_off_fp (dev, &dev->rx_fp[1], GFP_ATOMIC); - } - - if (status & ISR_RBRQ1_W) { - fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (1)!!!!\n"); - process_incoming (dev, &dev->rx_rq[1]); - top_off_fp (dev, &dev->rx_fp[2], GFP_ATOMIC); - top_off_fp (dev, &dev->rx_fp[3], GFP_ATOMIC); - } - - if (status & ISR_RBRQ2_W) { - fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (2)!!!!\n"); - process_incoming (dev, &dev->rx_rq[2]); - top_off_fp (dev, &dev->rx_fp[4], GFP_ATOMIC); - top_off_fp (dev, &dev->rx_fp[5], GFP_ATOMIC); - } - - if (status & ISR_RBRQ3_W) { - fs_dprintk (FS_DEBUG_IRQ, "Iiiin-coming (3)!!!!\n"); - process_incoming (dev, &dev->rx_rq[3]); - top_off_fp (dev, &dev->rx_fp[6], GFP_ATOMIC); - top_off_fp (dev, &dev->rx_fp[7], GFP_ATOMIC); - } - - if (status & ISR_CSQ_W) { - fs_dprintk (FS_DEBUG_IRQ, "Command executed ok!\n"); - process_return_queue (dev, &dev->st_q); - } - - if (status & ISR_TBRQ_W) { - fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n"); - process_txdone_queue (dev, &dev->tx_relq); - } - - func_exit (); - return IRQ_HANDLED; -} - - -#ifdef FS_POLL_FREQ -static void fs_poll (struct timer_list *t) -{ - struct fs_dev *dev = from_timer(dev, t, timer); - - fs_irq (0, dev); - dev->timer.expires = jiffies + FS_POLL_FREQ; - add_timer (&dev->timer); -} -#endif - -static int fs_init(struct fs_dev *dev) -{ - struct pci_dev *pci_dev; - int isr, to; - int i; - - func_enter (); - pci_dev = dev->pci_dev; - - printk (KERN_INFO "found a FireStream %d card, base %16llx, irq%d.\n", - IS_FS50(dev)?50:155, - (unsigned long long)pci_resource_start(pci_dev, 0), - dev->pci_dev->irq); - - if (fs_debug & FS_DEBUG_INIT) - my_hd ((unsigned char *) dev, sizeof (*dev)); - - undocumented_pci_fix (pci_dev); - - dev->hw_base = pci_resource_start(pci_dev, 0); - - dev->base = ioremap(dev->hw_base, 0x1000); - if (!dev->base) - return 1; - - reset_chip (dev); - - write_fs (dev, SARMODE0, 0 - | (0 * SARMODE0_SHADEN) /* We don't use shadow registers. */ - | (1 * SARMODE0_INTMODE_READCLEAR) - | (1 * SARMODE0_CWRE) - | (IS_FS50(dev) ? SARMODE0_PRPWT_FS50_5: - SARMODE0_PRPWT_FS155_3) - | (1 * SARMODE0_CALSUP_1) - | (IS_FS50(dev) ? (0 - | SARMODE0_RXVCS_32 - | SARMODE0_ABRVCS_32 - | SARMODE0_TXVCS_32): - (0 - | SARMODE0_RXVCS_1k - | SARMODE0_ABRVCS_1k - | SARMODE0_TXVCS_1k))); - - /* 10ms * 100 is 1 second. That should be enough, as AN3:9 says it takes - 1ms. */ - to = 100; - while (--to) { - isr = read_fs (dev, ISR); - - /* This bit is documented as "RESERVED" */ - if (isr & ISR_INIT_ERR) { - printk (KERN_ERR "Error initializing the FS... \n"); - goto unmap; - } - if (isr & ISR_INIT) { - fs_dprintk (FS_DEBUG_INIT, "Ha! Initialized OK!\n"); - break; - } - - /* Try again after 10ms. */ - msleep(10); - } - - if (!to) { - printk (KERN_ERR "timeout initializing the FS... \n"); - goto unmap; - } - - /* XXX fix for fs155 */ - dev->channel_mask = 0x1f; - dev->channo = 0; - - /* AN3: 10 */ - write_fs (dev, SARMODE1, 0 - | (fs_keystream * SARMODE1_DEFHEC) /* XXX PHY */ - | ((loopback == 1) * SARMODE1_TSTLP) /* XXX Loopback mode enable... */ - | (1 * SARMODE1_DCRM) - | (1 * SARMODE1_DCOAM) - | (0 * SARMODE1_OAMCRC) - | (0 * SARMODE1_DUMPE) - | (0 * SARMODE1_GPLEN) - | (0 * SARMODE1_GNAM) - | (0 * SARMODE1_GVAS) - | (0 * SARMODE1_GPAS) - | (1 * SARMODE1_GPRI) - | (0 * SARMODE1_PMS) - | (0 * SARMODE1_GFCR) - | (1 * SARMODE1_HECM2) - | (1 * SARMODE1_HECM1) - | (1 * SARMODE1_HECM0) - | (1 << 12) /* That's what hang's driver does. Program to 0 */ - | (0 * 0xff) /* XXX FS155 */); - - - /* Cal prescale etc */ - - /* AN3: 11 */ - write_fs (dev, TMCONF, 0x0000000f); - write_fs (dev, CALPRESCALE, 0x01010101 * num); - write_fs (dev, 0x80, 0x000F00E4); - - /* AN3: 12 */ - write_fs (dev, CELLOSCONF, 0 - | ( 0 * CELLOSCONF_CEN) - | ( CELLOSCONF_SC1) - | (0x80 * CELLOSCONF_COBS) - | (num * CELLOSCONF_COPK) /* Changed from 0xff to 0x5a */ - | (num * CELLOSCONF_COST));/* after a hint from Hang. - * performance jumped 50->70... */ - - /* Magic value by Hang */ - write_fs (dev, CELLOSCONF_COST, 0x0B809191); - - if (IS_FS50 (dev)) { - write_fs (dev, RAS0, RAS0_DCD_XHLT); - dev->atm_dev->ci_range.vpi_bits = 12; - dev->atm_dev->ci_range.vci_bits = 16; - dev->nchannels = FS50_NR_CHANNELS; - } else { - write_fs (dev, RAS0, RAS0_DCD_XHLT - | (((1 << FS155_VPI_BITS) - 1) * RAS0_VPSEL) - | (((1 << FS155_VCI_BITS) - 1) * RAS0_VCSEL)); - /* We can chose the split arbitrarily. We might be able to - support more. Whatever. This should do for now. */ - dev->atm_dev->ci_range.vpi_bits = FS155_VPI_BITS; - dev->atm_dev->ci_range.vci_bits = FS155_VCI_BITS; - - /* Address bits we can't use should be compared to 0. */ - write_fs (dev, RAC, 0); - - /* Manual (AN9, page 6) says ASF1=0 means compare Utopia address - * too. I can't find ASF1 anywhere. Anyway, we AND with just the - * other bits, then compare with 0, which is exactly what we - * want. */ - write_fs (dev, RAM, (1 << (28 - FS155_VPI_BITS - FS155_VCI_BITS)) - 1); - dev->nchannels = FS155_NR_CHANNELS; - } - dev->atm_vccs = kcalloc (dev->nchannels, sizeof (struct atm_vcc *), - GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc atmvccs: %p(%zd)\n", - dev->atm_vccs, dev->nchannels * sizeof (struct atm_vcc *)); - - if (!dev->atm_vccs) { - printk (KERN_WARNING "Couldn't allocate memory for VCC buffers. Woops!\n"); - /* XXX Clean up..... */ - goto unmap; - } - - dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc tx_inuse: %p(%d)\n", - dev->atm_vccs, dev->nchannels / 8); - - if (!dev->tx_inuse) { - printk (KERN_WARNING "Couldn't allocate memory for tx_inuse bits!\n"); - /* XXX Clean up..... */ - goto unmap; - } - /* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */ - /* -- RAS2 : FS50 only: Default is OK. */ - - /* DMAMODE, default should be OK. -- REW */ - write_fs (dev, DMAMR, DMAMR_TX_MODE_FULL); - - init_q (dev, &dev->hp_txq, TX_PQ(TXQ_HP), TXQ_NENTRIES, 0); - init_q (dev, &dev->lp_txq, TX_PQ(TXQ_LP), TXQ_NENTRIES, 0); - init_q (dev, &dev->tx_relq, TXB_RQ, TXQ_NENTRIES, 1); - init_q (dev, &dev->st_q, ST_Q, TXQ_NENTRIES, 1); - - for (i=0;i < FS_NR_FREE_POOLS;i++) { - init_fp (dev, &dev->rx_fp[i], RXB_FP(i), - rx_buf_sizes[i], rx_pool_sizes[i]); - top_off_fp (dev, &dev->rx_fp[i], GFP_KERNEL); - } - - - for (i=0;i < FS_NR_RX_QUEUES;i++) - init_q (dev, &dev->rx_rq[i], RXB_RQ(i), RXRQ_NENTRIES, 1); - - dev->irq = pci_dev->irq; - if (request_irq (dev->irq, fs_irq, IRQF_SHARED, "firestream", dev)) { - printk (KERN_WARNING "couldn't get irq %d for firestream.\n", pci_dev->irq); - /* XXX undo all previous stuff... */ - goto unmap; - } - fs_dprintk (FS_DEBUG_INIT, "Grabbed irq %d for dev at %p.\n", dev->irq, dev); - - /* We want to be notified of most things. Just the statistics count - overflows are not interesting */ - write_fs (dev, IMR, 0 - | ISR_RBRQ0_W - | ISR_RBRQ1_W - | ISR_RBRQ2_W - | ISR_RBRQ3_W - | ISR_TBRQ_W - | ISR_CSQ_W); - - write_fs (dev, SARMODE0, 0 - | (0 * SARMODE0_SHADEN) /* We don't use shadow registers. */ - | (1 * SARMODE0_GINT) - | (1 * SARMODE0_INTMODE_READCLEAR) - | (0 * SARMODE0_CWRE) - | (IS_FS50(dev)?SARMODE0_PRPWT_FS50_5: - SARMODE0_PRPWT_FS155_3) - | (1 * SARMODE0_CALSUP_1) - | (IS_FS50 (dev)?(0 - | SARMODE0_RXVCS_32 - | SARMODE0_ABRVCS_32 - | SARMODE0_TXVCS_32): - (0 - | SARMODE0_RXVCS_1k - | SARMODE0_ABRVCS_1k - | SARMODE0_TXVCS_1k)) - | (1 * SARMODE0_RUN)); - - init_phy (dev, PHY_NTC_INIT); - - if (loopback == 2) { - write_phy (dev, 0x39, 0x000e); - } - -#ifdef FS_POLL_FREQ - timer_setup(&dev->timer, fs_poll, 0); - dev->timer.expires = jiffies + FS_POLL_FREQ; - add_timer (&dev->timer); -#endif - - dev->atm_dev->dev_data = dev; - - func_exit (); - return 0; -unmap: - iounmap(dev->base); - return 1; -} - -static int firestream_init_one(struct pci_dev *pci_dev, - const struct pci_device_id *ent) -{ - struct atm_dev *atm_dev; - struct fs_dev *fs_dev; - - if (pci_enable_device(pci_dev)) - goto err_out; - - fs_dev = kzalloc (sizeof (struct fs_dev), GFP_KERNEL); - fs_dprintk (FS_DEBUG_ALLOC, "Alloc fs-dev: %p(%zd)\n", - fs_dev, sizeof (struct fs_dev)); - if (!fs_dev) - goto err_out; - atm_dev = atm_dev_register("fs", &pci_dev->dev, &ops, -1, NULL); - if (!atm_dev) - goto err_out_free_fs_dev; - - fs_dev->pci_dev = pci_dev; - fs_dev->atm_dev = atm_dev; - fs_dev->flags = ent->driver_data; - - if (fs_init(fs_dev)) - goto err_out_free_atm_dev; - - fs_dev->next = fs_boards; - fs_boards = fs_dev; - return 0; - - err_out_free_atm_dev: - atm_dev_deregister(atm_dev); - err_out_free_fs_dev: - kfree(fs_dev); - err_out: - return -ENODEV; -} - -static void firestream_remove_one(struct pci_dev *pdev) -{ - int i; - struct fs_dev *dev, *nxtdev; - struct fs_vcc *vcc; - struct FS_BPENTRY *fp, *nxt; - - func_enter (); - -#if 0 - printk ("hptxq:\n"); - for (i=0;i<60;i++) { - printk ("%d: %08x %08x %08x %08x \n", - i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2); - qp++; - if (qp >= 60) qp = 0; - } - - printk ("descriptors:\n"); - for (i=0;i<60;i++) { - printk ("%d: %p: %08x %08x %p %p\n", - i, da[qd], dq[qd].flags, dq[qd].bsa, dq[qd].skb, dq[qd].dev); - qd++; - if (qd >= 60) qd = 0; - } -#endif - - for (dev = fs_boards;dev != NULL;dev=nxtdev) { - fs_dprintk (FS_DEBUG_CLEANUP, "Releasing resources for dev at %p.\n", dev); - - /* XXX Hit all the tx channels too! */ - - for (i=0;i < dev->nchannels;i++) { - if (dev->atm_vccs[i]) { - vcc = FS_VCC (dev->atm_vccs[i]); - submit_command (dev, &dev->hp_txq, - QE_CMD_TX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0); - submit_command (dev, &dev->hp_txq, - QE_CMD_RX_PURGE_INH | QE_CMD_IMM_INQ | vcc->channo, 0,0,0); - - } - } - - /* XXX Wait a while for the chip to release all buffers. */ - - for (i=0;i < FS_NR_FREE_POOLS;i++) { - for (fp=bus_to_virt (read_fs (dev, FP_SA(dev->rx_fp[i].offset))); - !(fp->flags & FP_FLAGS_EPI);fp = nxt) { - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb); - dev_kfree_skb_any (fp->skb); - nxt = bus_to_virt (fp->next); - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", fp); - kfree (fp); - } - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p\n", fp->skb); - dev_kfree_skb_any (fp->skb); - fs_dprintk (FS_DEBUG_ALLOC, "Free rec-d: %p\n", fp); - kfree (fp); - } - - /* Hang the chip in "reset", prevent it clobbering memory that is - no longer ours. */ - reset_chip (dev); - - fs_dprintk (FS_DEBUG_CLEANUP, "Freeing irq%d.\n", dev->irq); - free_irq (dev->irq, dev); - del_timer_sync (&dev->timer); - - atm_dev_deregister(dev->atm_dev); - free_queue (dev, &dev->hp_txq); - free_queue (dev, &dev->lp_txq); - free_queue (dev, &dev->tx_relq); - free_queue (dev, &dev->st_q); - - fs_dprintk (FS_DEBUG_ALLOC, "Free atmvccs: %p\n", dev->atm_vccs); - kfree (dev->atm_vccs); - - for (i=0;i< FS_NR_FREE_POOLS;i++) - free_freepool (dev, &dev->rx_fp[i]); - - for (i=0;i < FS_NR_RX_QUEUES;i++) - free_queue (dev, &dev->rx_rq[i]); - - iounmap(dev->base); - fs_dprintk (FS_DEBUG_ALLOC, "Free fs-dev: %p\n", dev); - nxtdev = dev->next; - kfree (dev); - } - - func_exit (); -} - -static const struct pci_device_id firestream_pci_tbl[] = { - { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50}, - { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155}, - { 0, } -}; - -MODULE_DEVICE_TABLE(pci, firestream_pci_tbl); - -static struct pci_driver firestream_driver = { - .name = "firestream", - .id_table = firestream_pci_tbl, - .probe = firestream_init_one, - .remove = firestream_remove_one, -}; - -static int __init firestream_init_module (void) -{ - int error; - - func_enter (); - error = pci_register_driver(&firestream_driver); - func_exit (); - return error; -} - -static void __exit firestream_cleanup_module(void) -{ - pci_unregister_driver(&firestream_driver); -} - -module_init(firestream_init_module); -module_exit(firestream_cleanup_module); - -MODULE_LICENSE("GPL"); - - - diff --git a/drivers/atm/firestream.h b/drivers/atm/firestream.h deleted file mode 100644 index 6d684160808d..000000000000 --- a/drivers/atm/firestream.h +++ /dev/null @@ -1,502 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* drivers/atm/firestream.h - FireStream 155 (MB86697) and - * FireStream 50 (MB86695) device driver - */ - -/* Written & (C) 2000 by R.E.Wolff@BitWizard.nl - * Copied snippets from zatm.c by Werner Almesberger, EPFL LRC/ICA - * and ambassador.c Copyright (C) 1995-1999 Madge Networks Ltd - */ - -/* -*/ - - -/*********************************************************************** - * first the defines for the chip. * - ***********************************************************************/ - - -/********************* General chip parameters. ************************/ - -#define FS_NR_FREE_POOLS 8 -#define FS_NR_RX_QUEUES 4 - - -/********************* queues and queue access macros ******************/ - - -/* A queue entry. */ -struct FS_QENTRY { - u32 cmd; - u32 p0, p1, p2; -}; - - -/* A freepool entry. */ -struct FS_BPENTRY { - u32 flags; - u32 next; - u32 bsa; - u32 aal_bufsize; - - /* The hardware doesn't look at this, but we need the SKB somewhere... */ - struct sk_buff *skb; - struct freepool *fp; - struct fs_dev *dev; -}; - - -#define STATUS_CODE(qe) ((qe->cmd >> 22) & 0x3f) - - -/* OFFSETS against the base of a QUEUE... */ -#define QSA 0x00 -#define QEA 0x04 -#define QRP 0x08 -#define QWP 0x0c -#define QCNF 0x10 /* Only for Release queues! */ -/* Not for the transmit pending queue. */ - - -/* OFFSETS against the base of a FREE POOL... */ -#define FPCNF 0x00 -#define FPSA 0x04 -#define FPEA 0x08 -#define FPCNT 0x0c -#define FPCTU 0x10 - -#define Q_SA(b) (b + QSA ) -#define Q_EA(b) (b + QEA ) -#define Q_RP(b) (b + QRP ) -#define Q_WP(b) (b + QWP ) -#define Q_CNF(b) (b + QCNF) - -#define FP_CNF(b) (b + FPCNF) -#define FP_SA(b) (b + FPSA) -#define FP_EA(b) (b + FPEA) -#define FP_CNT(b) (b + FPCNT) -#define FP_CTU(b) (b + FPCTU) - -/* bits in a queue register. */ -#define Q_FULL 0x1 -#define Q_EMPTY 0x2 -#define Q_INCWRAP 0x4 -#define Q_ADDR_MASK 0xfffffff0 - -/* bits in a FreePool config register */ -#define RBFP_RBS (0x1 << 16) -#define RBFP_RBSVAL (0x1 << 15) -#define RBFP_CME (0x1 << 12) -#define RBFP_DLP (0x1 << 11) -#define RBFP_BFPWT (0x1 << 0) - - - - -/* FireStream commands. */ -#define QE_CMD_NULL (0x00 << 22) -#define QE_CMD_REG_RD (0x01 << 22) -#define QE_CMD_REG_RDM (0x02 << 22) -#define QE_CMD_REG_WR (0x03 << 22) -#define QE_CMD_REG_WRM (0x04 << 22) -#define QE_CMD_CONFIG_TX (0x05 << 22) -#define QE_CMD_CONFIG_RX (0x06 << 22) -#define QE_CMD_PRP_RD (0x07 << 22) -#define QE_CMD_PRP_RDM (0x2a << 22) -#define QE_CMD_PRP_WR (0x09 << 22) -#define QE_CMD_PRP_WRM (0x2b << 22) -#define QE_CMD_RX_EN (0x0a << 22) -#define QE_CMD_RX_PURGE (0x0b << 22) -#define QE_CMD_RX_PURGE_INH (0x0c << 22) -#define QE_CMD_TX_EN (0x0d << 22) -#define QE_CMD_TX_PURGE (0x0e << 22) -#define QE_CMD_TX_PURGE_INH (0x0f << 22) -#define QE_CMD_RST_CG (0x10 << 22) -#define QE_CMD_SET_CG (0x11 << 22) -#define QE_CMD_RST_CLP (0x12 << 22) -#define QE_CMD_SET_CLP (0x13 << 22) -#define QE_CMD_OVERRIDE (0x14 << 22) -#define QE_CMD_ADD_BFP (0x15 << 22) -#define QE_CMD_DUMP_TX (0x16 << 22) -#define QE_CMD_DUMP_RX (0x17 << 22) -#define QE_CMD_LRAM_RD (0x18 << 22) -#define QE_CMD_LRAM_RDM (0x28 << 22) -#define QE_CMD_LRAM_WR (0x19 << 22) -#define QE_CMD_LRAM_WRM (0x29 << 22) -#define QE_CMD_LRAM_BSET (0x1a << 22) -#define QE_CMD_LRAM_BCLR (0x1b << 22) -#define QE_CMD_CONFIG_SEGM (0x1c << 22) -#define QE_CMD_READ_SEGM (0x1d << 22) -#define QE_CMD_CONFIG_ROUT (0x1e << 22) -#define QE_CMD_READ_ROUT (0x1f << 22) -#define QE_CMD_CONFIG_TM (0x20 << 22) -#define QE_CMD_READ_TM (0x21 << 22) -#define QE_CMD_CONFIG_TXBM (0x22 << 22) -#define QE_CMD_READ_TXBM (0x23 << 22) -#define QE_CMD_CONFIG_RXBM (0x24 << 22) -#define QE_CMD_READ_RXBM (0x25 << 22) -#define QE_CMD_CONFIG_REAS (0x26 << 22) -#define QE_CMD_READ_REAS (0x27 << 22) - -#define QE_TRANSMIT_DE (0x0 << 30) -#define QE_CMD_LINKED (0x1 << 30) -#define QE_CMD_IMM (0x2 << 30) -#define QE_CMD_IMM_INQ (0x3 << 30) - -#define TD_EPI (0x1 << 27) -#define TD_COMMAND (0x1 << 28) - -#define TD_DATA (0x0 << 29) -#define TD_RM_CELL (0x1 << 29) -#define TD_OAM_CELL (0x2 << 29) -#define TD_OAM_CELL_SEGMENT (0x3 << 29) - -#define TD_BPI (0x1 << 20) - -#define FP_FLAGS_EPI (0x1 << 27) - - -#define TX_PQ(i) (0x00 + (i) * 0x10) -#define TXB_RQ (0x20) -#define ST_Q (0x48) -#define RXB_FP(i) (0x90 + (i) * 0x14) -#define RXB_RQ(i) (0x134 + (i) * 0x14) - - -#define TXQ_HP 0 -#define TXQ_LP 1 - -/* Phew. You don't want to know how many revisions these simple queue - * address macros went through before I got them nice and compact as - * they are now. -- REW - */ - - -/* And now for something completely different: - * The rest of the registers... */ - - -#define CMDR0 0x34 -#define CMDR1 0x38 -#define CMDR2 0x3c -#define CMDR3 0x40 - - -#define SARMODE0 0x5c - -#define SARMODE0_TXVCS_0 (0x0 << 0) -#define SARMODE0_TXVCS_1k (0x1 << 0) -#define SARMODE0_TXVCS_2k (0x2 << 0) -#define SARMODE0_TXVCS_4k (0x3 << 0) -#define SARMODE0_TXVCS_8k (0x4 << 0) -#define SARMODE0_TXVCS_16k (0x5 << 0) -#define SARMODE0_TXVCS_32k (0x6 << 0) -#define SARMODE0_TXVCS_64k (0x7 << 0) -#define SARMODE0_TXVCS_32 (0x8 << 0) - -#define SARMODE0_ABRVCS_0 (0x0 << 4) -#define SARMODE0_ABRVCS_512 (0x1 << 4) -#define SARMODE0_ABRVCS_1k (0x2 << 4) -#define SARMODE0_ABRVCS_2k (0x3 << 4) -#define SARMODE0_ABRVCS_4k (0x4 << 4) -#define SARMODE0_ABRVCS_8k (0x5 << 4) -#define SARMODE0_ABRVCS_16k (0x6 << 4) -#define SARMODE0_ABRVCS_32k (0x7 << 4) -#define SARMODE0_ABRVCS_32 (0x9 << 4) /* The others are "8", this one really has to - be 9. Tell me you don't believe me. -- REW */ - -#define SARMODE0_RXVCS_0 (0x0 << 8) -#define SARMODE0_RXVCS_1k (0x1 << 8) -#define SARMODE0_RXVCS_2k (0x2 << 8) -#define SARMODE0_RXVCS_4k (0x3 << 8) -#define SARMODE0_RXVCS_8k (0x4 << 8) -#define SARMODE0_RXVCS_16k (0x5 << 8) -#define SARMODE0_RXVCS_32k (0x6 << 8) -#define SARMODE0_RXVCS_64k (0x7 << 8) -#define SARMODE0_RXVCS_32 (0x8 << 8) - -#define SARMODE0_CALSUP_1 (0x0 << 12) -#define SARMODE0_CALSUP_2 (0x1 << 12) -#define SARMODE0_CALSUP_3 (0x2 << 12) -#define SARMODE0_CALSUP_4 (0x3 << 12) - -#define SARMODE0_PRPWT_FS50_0 (0x0 << 14) -#define SARMODE0_PRPWT_FS50_2 (0x1 << 14) -#define SARMODE0_PRPWT_FS50_5 (0x2 << 14) -#define SARMODE0_PRPWT_FS50_11 (0x3 << 14) - -#define SARMODE0_PRPWT_FS155_0 (0x0 << 14) -#define SARMODE0_PRPWT_FS155_1 (0x1 << 14) -#define SARMODE0_PRPWT_FS155_2 (0x2 << 14) -#define SARMODE0_PRPWT_FS155_3 (0x3 << 14) - -#define SARMODE0_SRTS0 (0x1 << 23) -#define SARMODE0_SRTS1 (0x1 << 24) - -#define SARMODE0_RUN (0x1 << 25) - -#define SARMODE0_UNLOCK (0x1 << 26) -#define SARMODE0_CWRE (0x1 << 27) - - -#define SARMODE0_INTMODE_READCLEAR (0x0 << 28) -#define SARMODE0_INTMODE_READNOCLEAR (0x1 << 28) -#define SARMODE0_INTMODE_READNOCLEARINHIBIT (0x2 << 28) -#define SARMODE0_INTMODE_READCLEARINHIBIT (0x3 << 28) /* Tell me you don't believe me. */ - -#define SARMODE0_GINT (0x1 << 30) -#define SARMODE0_SHADEN (0x1 << 31) - - -#define SARMODE1 0x60 - - -#define SARMODE1_TRTL_SHIFT 0 /* Program to 0 */ -#define SARMODE1_RRTL_SHIFT 4 /* Program to 0 */ - -#define SARMODE1_TAGM (0x1 << 8) /* Program to 0 */ - -#define SARMODE1_HECM0 (0x1 << 9) -#define SARMODE1_HECM1 (0x1 << 10) -#define SARMODE1_HECM2 (0x1 << 11) - -#define SARMODE1_GFCE (0x1 << 14) -#define SARMODE1_GFCR (0x1 << 15) -#define SARMODE1_PMS (0x1 << 18) -#define SARMODE1_GPRI (0x1 << 19) -#define SARMODE1_GPAS (0x1 << 20) -#define SARMODE1_GVAS (0x1 << 21) -#define SARMODE1_GNAM (0x1 << 22) -#define SARMODE1_GPLEN (0x1 << 23) -#define SARMODE1_DUMPE (0x1 << 24) -#define SARMODE1_OAMCRC (0x1 << 25) -#define SARMODE1_DCOAM (0x1 << 26) -#define SARMODE1_DCRM (0x1 << 27) -#define SARMODE1_TSTLP (0x1 << 28) -#define SARMODE1_DEFHEC (0x1 << 29) - - -#define ISR 0x64 -#define IUSR 0x68 -#define IMR 0x6c - -#define ISR_LPCO (0x1 << 0) -#define ISR_DPCO (0x1 << 1) -#define ISR_RBRQ0_W (0x1 << 2) -#define ISR_RBRQ1_W (0x1 << 3) -#define ISR_RBRQ2_W (0x1 << 4) -#define ISR_RBRQ3_W (0x1 << 5) -#define ISR_RBRQ0_NF (0x1 << 6) -#define ISR_RBRQ1_NF (0x1 << 7) -#define ISR_RBRQ2_NF (0x1 << 8) -#define ISR_RBRQ3_NF (0x1 << 9) -#define ISR_BFP_SC (0x1 << 10) -#define ISR_INIT (0x1 << 11) -#define ISR_INIT_ERR (0x1 << 12) /* Documented as "reserved" */ -#define ISR_USCEO (0x1 << 13) -#define ISR_UPEC0 (0x1 << 14) -#define ISR_VPFCO (0x1 << 15) -#define ISR_CRCCO (0x1 << 16) -#define ISR_HECO (0x1 << 17) -#define ISR_TBRQ_W (0x1 << 18) -#define ISR_TBRQ_NF (0x1 << 19) -#define ISR_CTPQ_E (0x1 << 20) -#define ISR_GFC_C0 (0x1 << 21) -#define ISR_PCI_FTL (0x1 << 22) -#define ISR_CSQ_W (0x1 << 23) -#define ISR_CSQ_NF (0x1 << 24) -#define ISR_EXT_INT (0x1 << 25) -#define ISR_RXDMA_S (0x1 << 26) - - -#define TMCONF 0x78 -/* Bits? */ - - -#define CALPRESCALE 0x7c -/* Bits? */ - -#define CELLOSCONF 0x84 -#define CELLOSCONF_COTS (0x1 << 28) -#define CELLOSCONF_CEN (0x1 << 27) -#define CELLOSCONF_SC8 (0x3 << 24) -#define CELLOSCONF_SC4 (0x2 << 24) -#define CELLOSCONF_SC2 (0x1 << 24) -#define CELLOSCONF_SC1 (0x0 << 24) - -#define CELLOSCONF_COBS (0x1 << 16) -#define CELLOSCONF_COPK (0x1 << 8) -#define CELLOSCONF_COST (0x1 << 0) -/* Bits? */ - -#define RAS0 0x1bc -#define RAS0_DCD_XHLT (0x1 << 31) - -#define RAS0_VPSEL (0x1 << 16) -#define RAS0_VCSEL (0x1 << 0) - -#define RAS1 0x1c0 -#define RAS1_UTREG (0x1 << 5) - - -#define DMAMR 0x1cc -#define DMAMR_TX_MODE_FULL (0x0 << 0) -#define DMAMR_TX_MODE_PART (0x1 << 0) -#define DMAMR_TX_MODE_NONE (0x2 << 0) /* And 3 */ - - - -#define RAS2 0x280 - -#define RAS2_NNI (0x1 << 0) -#define RAS2_USEL (0x1 << 1) -#define RAS2_UBS (0x1 << 2) - - - -struct fs_transmit_config { - u32 flags; - u32 atm_hdr; - u32 TMC[4]; - u32 spec; - u32 rtag[3]; -}; - -#define TC_FLAGS_AAL5 (0x0 << 29) -#define TC_FLAGS_TRANSPARENT_PAYLOAD (0x1 << 29) -#define TC_FLAGS_TRANSPARENT_CELL (0x2 << 29) -#define TC_FLAGS_STREAMING (0x1 << 28) -#define TC_FLAGS_PACKET (0x0) -#define TC_FLAGS_TYPE_ABR (0x0 << 22) -#define TC_FLAGS_TYPE_CBR (0x1 << 22) -#define TC_FLAGS_TYPE_VBR (0x2 << 22) -#define TC_FLAGS_TYPE_UBR (0x3 << 22) -#define TC_FLAGS_CAL0 (0x0 << 20) -#define TC_FLAGS_CAL1 (0x1 << 20) -#define TC_FLAGS_CAL2 (0x2 << 20) -#define TC_FLAGS_CAL3 (0x3 << 20) - - -#define RC_FLAGS_NAM (0x1 << 13) -#define RC_FLAGS_RXBM_PSB (0x0 << 14) -#define RC_FLAGS_RXBM_CIF (0x1 << 14) -#define RC_FLAGS_RXBM_PMB (0x2 << 14) -#define RC_FLAGS_RXBM_STR (0x4 << 14) -#define RC_FLAGS_RXBM_SAF (0x6 << 14) -#define RC_FLAGS_RXBM_POS (0x6 << 14) -#define RC_FLAGS_BFPS (0x1 << 17) - -#define RC_FLAGS_BFPS_BFP (0x1 << 17) - -#define RC_FLAGS_BFPS_BFP0 (0x0 << 17) -#define RC_FLAGS_BFPS_BFP1 (0x1 << 17) -#define RC_FLAGS_BFPS_BFP2 (0x2 << 17) -#define RC_FLAGS_BFPS_BFP3 (0x3 << 17) -#define RC_FLAGS_BFPS_BFP4 (0x4 << 17) -#define RC_FLAGS_BFPS_BFP5 (0x5 << 17) -#define RC_FLAGS_BFPS_BFP6 (0x6 << 17) -#define RC_FLAGS_BFPS_BFP7 (0x7 << 17) -#define RC_FLAGS_BFPS_BFP01 (0x8 << 17) -#define RC_FLAGS_BFPS_BFP23 (0x9 << 17) -#define RC_FLAGS_BFPS_BFP45 (0xa << 17) -#define RC_FLAGS_BFPS_BFP67 (0xb << 17) -#define RC_FLAGS_BFPS_BFP07 (0xc << 17) -#define RC_FLAGS_BFPS_BFP27 (0xd << 17) -#define RC_FLAGS_BFPS_BFP47 (0xe << 17) - -#define RC_FLAGS_BFPP (0x1 << 21) -#define RC_FLAGS_TEVC (0x1 << 22) -#define RC_FLAGS_TEP (0x1 << 23) -#define RC_FLAGS_AAL5 (0x0 << 24) -#define RC_FLAGS_TRANSP (0x1 << 24) -#define RC_FLAGS_TRANSC (0x2 << 24) -#define RC_FLAGS_ML (0x1 << 27) -#define RC_FLAGS_TRBRM (0x1 << 28) -#define RC_FLAGS_PRI (0x1 << 29) -#define RC_FLAGS_HOAM (0x1 << 30) -#define RC_FLAGS_CRC10 (0x1 << 31) - - -#define RAC 0x1c8 -#define RAM 0x1c4 - - - -/************************************************************************ - * Then the datastructures that the DRIVER uses. * - ************************************************************************/ - -#define TXQ_NENTRIES 32 -#define RXRQ_NENTRIES 1024 - - -struct fs_vcc { - int channo; - wait_queue_head_t close_wait; - struct sk_buff *last_skb; -}; - - -struct queue { - struct FS_QENTRY *sa, *ea; - int offset; -}; - -struct freepool { - int offset; - int bufsize; - int nr_buffers; - int n; -}; - - -struct fs_dev { - struct fs_dev *next; /* other FS devices */ - int flags; - - unsigned char irq; /* IRQ */ - struct pci_dev *pci_dev; /* PCI stuff */ - struct atm_dev *atm_dev; - struct timer_list timer; - - unsigned long hw_base; /* mem base address */ - void __iomem *base; /* Mapping of base address */ - int channo; - unsigned long channel_mask; - - struct queue hp_txq, lp_txq, tx_relq, st_q; - struct freepool rx_fp[FS_NR_FREE_POOLS]; - struct queue rx_rq[FS_NR_RX_QUEUES]; - - int nchannels; - struct atm_vcc **atm_vccs; - void *tx_inuse; - int ntxpckts; -}; - - - - -/* Number of channesl that the FS50 supports. */ -#define FS50_CHANNEL_BITS 5 -#define FS50_NR_CHANNELS (1 << FS50_CHANNEL_BITS) - - -#define FS_DEV(atm_dev) ((struct fs_dev *) (atm_dev)->dev_data) -#define FS_VCC(atm_vcc) ((struct fs_vcc *) (atm_vcc)->dev_data) - - -#define FS_IS50 0x1 -#define FS_IS155 0x2 - -#define IS_FS50(dev) (dev->flags & FS_IS50) -#define IS_FS155(dev) (dev->flags & FS_IS155) - -/* Within limits this is user-configurable. */ -/* Note: Currently the sum (10 -> 1k channels) is hardcoded in the driver. */ -#define FS155_VPI_BITS 4 -#define FS155_VCI_BITS 6 - -#define FS155_CHANNEL_BITS (FS155_VPI_BITS + FS155_VCI_BITS) -#define FS155_NR_CHANNELS (1 << FS155_CHANNEL_BITS) -- cgit v1.2.3 From 5b74a20d35ab8ff05c2111233bb069eb7fcea4e9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 Apr 2022 10:54:32 -0700 Subject: net: atm: remove support for Madge Horizon ATM devices This driver received nothing but automated fixes since git era begun. Since it's using virt_to_bus it's unlikely to be used on any modern platform. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- arch/mips/configs/gpr_defconfig | 1 - arch/mips/configs/mtx1_defconfig | 1 - drivers/atm/Kconfig | 24 - drivers/atm/Makefile | 1 - drivers/atm/horizon.c | 2853 -------------------------------------- drivers/atm/horizon.h | 492 ------- 6 files changed, 3372 deletions(-) delete mode 100644 drivers/atm/horizon.c delete mode 100644 drivers/atm/horizon.h (limited to 'drivers') diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 605e778dff74..7ed202db9ef0 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -181,7 +181,6 @@ CONFIG_ATM_ENI=m CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m -CONFIG_ATM_HORIZON=m CONFIG_ATM_IA=m CONFIG_ATM_FORE200E=m CONFIG_ATM_HE=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index de95e7fe5a77..f46ad2e294fa 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -258,7 +258,6 @@ CONFIG_ATM_ENI=m CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m -CONFIG_ATM_HORIZON=m CONFIG_ATM_IA=m CONFIG_ATM_FORE200E=m CONFIG_ATM_HE=m diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 360c98ad29eb..9c778308722a 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -234,30 +234,6 @@ config ATM_IDT77252_USE_SUNI depends on ATM_IDT77252 default y -config ATM_HORIZON - tristate "Madge Horizon [Ultra] (Collage PCI 25 and Collage PCI 155 Client)" - depends on PCI && VIRT_TO_BUS - help - This is a driver for the Horizon chipset ATM adapter cards once - produced by Madge Networks Ltd. Say Y (or M to compile as a module - named horizon) here if you have one of these cards. - -config ATM_HORIZON_DEBUG - bool "Enable debugging messages" - depends on ATM_HORIZON - help - Somewhat useful debugging messages are available. The choice of - messages is controlled by a bitmap. This may be specified as a - module argument (kernel command line argument as well?), changed - dynamically using an ioctl (not yet) or changed by sending the - string "Dxxxx" to VCI 1023 (where x is a hex digit). See the file - for the meanings of the bits in the - mask. - - When active, these messages can have a significant impact on the - speed of the driver, and the size of your syslog files! When - inactive, they will have only a modest impact on performance. - config ATM_IA tristate "Interphase ATM PCI x575/x525/x531" depends on PCI diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile index 7d38fdaddd09..1b6a8ddaf007 100644 --- a/drivers/atm/Makefile +++ b/drivers/atm/Makefile @@ -7,7 +7,6 @@ fore_200e-y := fore200e.o obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o obj-$(CONFIG_ATM_NICSTAR) += nicstar.o -obj-$(CONFIG_ATM_HORIZON) += horizon.o obj-$(CONFIG_ATM_IA) += iphase.o suni.o obj-$(CONFIG_ATM_FORE200E) += fore_200e.o obj-$(CONFIG_ATM_ENI) += eni.o suni.o diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c deleted file mode 100644 index d0e67ec46216..000000000000 --- a/drivers/atm/horizon.c +++ /dev/null @@ -1,2853 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - Madge Horizon ATM Adapter driver. - Copyright (C) 1995-1999 Madge Networks Ltd. - -*/ - -/* - IMPORTANT NOTE: Madge Networks no longer makes the adapters - supported by this driver and makes no commitment to maintain it. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "horizon.h" - -#define maintainer_string "Giuliano Procida at Madge Networks " -#define description_string "Madge ATM Horizon [Ultra] driver" -#define version_string "1.2.1" - -static inline void __init show_version (void) { - printk ("%s version %s\n", description_string, version_string); -} - -/* - - CREDITS - - Driver and documentation by: - - Chris Aston Madge Networks - Giuliano Procida Madge Networks - Simon Benham Madge Networks - Simon Johnson Madge Networks - Various Others Madge Networks - - Some inspiration taken from other drivers by: - - Alexandru Cucos UTBv - Kari Mettinen University of Helsinki - Werner Almesberger EPFL LRC - - Theory of Operation - - I Hardware, detection, initialisation and shutdown. - - 1. Supported Hardware - - This driver should handle all variants of the PCI Madge ATM adapters - with the Horizon chipset. These are all PCI cards supporting PIO, BM - DMA and a form of MMIO (registers only, not internal RAM). - - The driver is only known to work with SONET and UTP Horizon Ultra - cards at 155Mb/s. However, code is in place to deal with both the - original Horizon and 25Mb/s operation. - - There are two revisions of the Horizon ASIC: the original and the - Ultra. Details of hardware bugs are in section III. - - The ASIC version can be distinguished by chip markings but is NOT - indicated by the PCI revision (all adapters seem to have PCI rev 1). - - I believe that: - - Horizon => Collage 25 PCI Adapter (UTP and STP) - Horizon Ultra => Collage 155 PCI Client (UTP or SONET) - Ambassador x => Collage 155 PCI Server (completely different) - - Horizon (25Mb/s) is fitted with UTP and STP connectors. It seems to - have a Madge B154 plus glue logic serializer. I have also found a - really ancient version of this with slightly different glue. It - comes with the revision 0 (140-025-01) ASIC. - - Horizon Ultra (155Mb/s) is fitted with either a Pulse Medialink - output (UTP) or an HP HFBR 5205 output (SONET). It has either - Madge's SAMBA framer or a SUNI-lite device (early versions). It - comes with the revision 1 (140-027-01) ASIC. - - 2. Detection - - All Horizon-based cards present with the same PCI Vendor and Device - IDs. The standard Linux 2.2 PCI API is used to locate any cards and - to enable bus-mastering (with appropriate latency). - - ATM_LAYER_STATUS in the control register distinguishes between the - two possible physical layers (25 and 155). It is not clear whether - the 155 cards can also operate at 25Mbps. We rely on the fact that a - card operates at 155 if and only if it has the newer Horizon Ultra - ASIC. - - For 155 cards the two possible framers are probed for and then set - up for loop-timing. - - 3. Initialisation - - The card is reset and then put into a known state. The physical - layer is configured for normal operation at the appropriate speed; - in the case of the 155 cards, the framer is initialised with - line-based timing; the internal RAM is zeroed and the allocation of - buffers for RX and TX is made; the Burnt In Address is read and - copied to the ATM ESI; various policy settings for RX (VPI bits, - unknown VCs, oam cells) are made. Ideally all policy items should be - configurable at module load (if not actually on-demand), however, - only the vpi vs vci bit allocation can be specified at insmod. - - 4. Shutdown - - This is in response to module_cleaup. No VCs are in use and the card - should be idle; it is reset. - - II Driver software (as it should be) - - 0. Traffic Parameters - - The traffic classes (not an enumeration) are currently: ATM_NONE (no - traffic), ATM_UBR, ATM_CBR, ATM_VBR and ATM_ABR, ATM_ANYCLASS - (compatible with everything). Together with (perhaps only some of) - the following items they make up the traffic specification. - - struct atm_trafprm { - unsigned char traffic_class; traffic class (ATM_UBR, ...) - int max_pcr; maximum PCR in cells per second - int pcr; desired PCR in cells per second - int min_pcr; minimum PCR in cells per second - int max_cdv; maximum CDV in microseconds - int max_sdu; maximum SDU in bytes - }; - - Note that these denote bandwidth available not bandwidth used; the - possibilities according to ATMF are: - - Real Time (cdv and max CDT given) - - CBR(pcr) pcr bandwidth always available - rtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too - - Non Real Time - - nrtVBR(pcr,scr,mbs) scr bandwidth always available, up to pcr at mbs too - UBR() - ABR(mcr,pcr) mcr bandwidth always available, up to pcr (depending) too - - mbs is max burst size (bucket) - pcr and scr have associated cdvt values - mcr is like scr but has no cdtv - cdtv may differ at each hop - - Some of the above items are qos items (as opposed to traffic - parameters). We have nothing to do with qos. All except ABR can have - their traffic parameters converted to GCRA parameters. The GCRA may - be implemented as a (real-number) leaky bucket. The GCRA can be used - in complicated ways by switches and in simpler ways by end-stations. - It can be used both to filter incoming cells and shape out-going - cells. - - ATM Linux actually supports: - - ATM_NONE() (no traffic in this direction) - ATM_UBR(max_frame_size) - ATM_CBR(max/min_pcr, max_cdv, max_frame_size) - - 0 or ATM_MAX_PCR are used to indicate maximum available PCR - - A traffic specification consists of the AAL type and separate - traffic specifications for either direction. In ATM Linux it is: - - struct atm_qos { - struct atm_trafprm txtp; - struct atm_trafprm rxtp; - unsigned char aal; - }; - - AAL types are: - - ATM_NO_AAL AAL not specified - ATM_AAL0 "raw" ATM cells - ATM_AAL1 AAL1 (CBR) - ATM_AAL2 AAL2 (VBR) - ATM_AAL34 AAL3/4 (data) - ATM_AAL5 AAL5 (data) - ATM_SAAL signaling AAL - - The Horizon has support for AAL frame types: 0, 3/4 and 5. However, - it does not implement AAL 3/4 SAR and it has a different notion of - "raw cell" to ATM Linux's (48 bytes vs. 52 bytes) so neither are - supported by this driver. - - The Horizon has limited support for ABR (including UBR), VBR and - CBR. Each TX channel has a bucket (containing up to 31 cell units) - and two timers (PCR and SCR) associated with it that can be used to - govern cell emissions and host notification (in the case of ABR this - is presumably so that RM cells may be emitted at appropriate times). - The timers may either be disabled or may be set to any of 240 values - (determined by the clock crystal, a fixed (?) per-device divider, a - configurable divider and a configurable timer preload value). - - At the moment only UBR and CBR are supported by the driver. VBR will - be supported as soon as ATM for Linux supports it. ABR support is - very unlikely as RM cell handling is completely up to the driver. - - 1. TX (TX channel setup and TX transfer) - - The TX half of the driver owns the TX Horizon registers. The TX - component in the IRQ handler is the BM completion handler. This can - only be entered when tx_busy is true (enforced by hardware). The - other TX component can only be entered when tx_busy is false - (enforced by driver). So TX is single-threaded. - - Apart from a minor optimisation to not re-select the last channel, - the TX send component works as follows: - - Atomic test and set tx_busy until we succeed; we should implement - some sort of timeout so that tx_busy will never be stuck at true. - - If no TX channel is set up for this VC we wait for an idle one (if - necessary) and set it up. - - At this point we have a TX channel ready for use. We wait for enough - buffers to become available then start a TX transmit (set the TX - descriptor, schedule transfer, exit). - - The IRQ component handles TX completion (stats, free buffer, tx_busy - unset, exit). We also re-schedule further transfers for the same - frame if needed. - - TX setup in more detail: - - TX open is a nop, the relevant information is held in the hrz_vcc - (vcc->dev_data) structure and is "cached" on the card. - - TX close gets the TX lock and clears the channel from the "cache". - - 2. RX (Data Available and RX transfer) - - The RX half of the driver owns the RX registers. There are two RX - components in the IRQ handler: the data available handler deals with - fresh data that has arrived on the card, the BM completion handler - is very similar to the TX completion handler. The data available - handler grabs the rx_lock and it is only released once the data has - been discarded or completely transferred to the host. The BM - completion handler only runs when the lock is held; the data - available handler is locked out over the same period. - - Data available on the card triggers an interrupt. If the data is not - suitable for our existing RX channels or we cannot allocate a buffer - it is flushed. Otherwise an RX receive is scheduled. Multiple RX - transfers may be scheduled for the same frame. - - RX setup in more detail: - - RX open... - RX close... - - III Hardware Bugs - - 0. Byte vs Word addressing of adapter RAM. - - A design feature; see the .h file (especially the memory map). - - 1. Bus Master Data Transfers (original Horizon only, fixed in Ultra) - - The host must not start a transmit direction transfer at a - non-four-byte boundary in host memory. Instead the host should - perform a byte, or a two byte, or one byte followed by two byte - transfer in order to start the rest of the transfer on a four byte - boundary. RX is OK. - - Simultaneous transmit and receive direction bus master transfers are - not allowed. - - The simplest solution to these two is to always do PIO (never DMA) - in the TX direction on the original Horizon. More complicated - solutions are likely to hurt my brain. - - 2. Loss of buffer on close VC - - When a VC is being closed, the buffer associated with it is not - returned to the pool. The host must store the reference to this - buffer and when opening a new VC then give it to that new VC. - - The host intervention currently consists of stacking such a buffer - pointer at VC close and checking the stack at VC open. - - 3. Failure to close a VC - - If a VC is currently receiving a frame then closing the VC may fail - and the frame continues to be received. - - The solution is to make sure any received frames are flushed when - ready. This is currently done just before the solution to 2. - - 4. PCI bus (original Horizon only, fixed in Ultra) - - Reading from the data port prior to initialisation will hang the PCI - bus. Just don't do that then! We don't. - - IV To Do List - - . Timer code may be broken. - - . Allow users to specify buffer allocation split for TX and RX. - - . Deal once and for all with buggy VC close. - - . Handle interrupted and/or non-blocking operations. - - . Change some macros to functions and move from .h to .c. - - . Try to limit the number of TX frames each VC may have queued, in - order to reduce the chances of TX buffer exhaustion. - - . Implement VBR (bucket and timers not understood) and ABR (need to - do RM cells manually); also no Linux support for either. - - . Implement QoS changes on open VCs (involves extracting parts of VC open - and close into separate functions and using them to make changes). - -*/ - -/********** globals **********/ - -static void do_housekeeping (struct timer_list *t); - -static unsigned short debug = 0; -static unsigned short vpi_bits = 0; -static int max_tx_size = 9000; -static int max_rx_size = 9000; -static unsigned char pci_lat = 0; - -/********** access functions **********/ - -/* Read / Write Horizon registers */ -static inline void wr_regl (const hrz_dev * dev, unsigned char reg, u32 data) { - outl (cpu_to_le32 (data), dev->iobase + reg); -} - -static inline u32 rd_regl (const hrz_dev * dev, unsigned char reg) { - return le32_to_cpu (inl (dev->iobase + reg)); -} - -static inline void wr_regw (const hrz_dev * dev, unsigned char reg, u16 data) { - outw (cpu_to_le16 (data), dev->iobase + reg); -} - -static inline u16 rd_regw (const hrz_dev * dev, unsigned char reg) { - return le16_to_cpu (inw (dev->iobase + reg)); -} - -static inline void wrs_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) { - outsb (dev->iobase + reg, addr, len); -} - -static inline void rds_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) { - insb (dev->iobase + reg, addr, len); -} - -/* Read / Write to a given address in Horizon buffer memory. - Interrupts must be disabled between the address register and data - port accesses as these must form an atomic operation. */ -static inline void wr_mem (const hrz_dev * dev, HDW * addr, u32 data) { - // wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr); - wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW)); - wr_regl (dev, MEMORY_PORT_OFF, data); -} - -static inline u32 rd_mem (const hrz_dev * dev, HDW * addr) { - // wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr); - wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW)); - return rd_regl (dev, MEMORY_PORT_OFF); -} - -static inline void wr_framer (const hrz_dev * dev, u32 addr, u32 data) { - wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr | 0x80000000); - wr_regl (dev, MEMORY_PORT_OFF, data); -} - -static inline u32 rd_framer (const hrz_dev * dev, u32 addr) { - wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr | 0x80000000); - return rd_regl (dev, MEMORY_PORT_OFF); -} - -/********** specialised access functions **********/ - -/* RX */ - -static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) { - wr_regw (dev, RX_CHANNEL_PORT_OFF, FLUSH_CHANNEL | channel); - return; -} - -static void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) { - while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL) - ; - return; -} - -static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) { - wr_regw (dev, RX_CHANNEL_PORT_OFF, channel); - return; -} - -static void WAIT_UPDATE_COMPLETE (hrz_dev * dev) { - while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS) - ; - return; -} - -/* TX */ - -static inline void SELECT_TX_CHANNEL (hrz_dev * dev, u16 tx_channel) { - wr_regl (dev, TX_CHANNEL_PORT_OFF, tx_channel); - return; -} - -/* Update or query one configuration parameter of a particular channel. */ - -static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode, u16 value) { - wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF, - chan * TX_CHANNEL_CONFIG_MULT | mode); - wr_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF, value); - return; -} - -/********** dump functions **********/ - -static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) { -#ifdef DEBUG_HORIZON - unsigned int i; - unsigned char * data = skb->data; - PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc); - for (i=0; ilen && i < 256;i++) - PRINTDM (DBG_DATA, "%02x ", data[i]); - PRINTDE (DBG_DATA,""); -#else - (void) prefix; - (void) vc; - (void) skb; -#endif - return; -} - -static inline void dump_regs (hrz_dev * dev) { -#ifdef DEBUG_HORIZON - PRINTD (DBG_REGS, "CONTROL 0: %#x", rd_regl (dev, CONTROL_0_REG)); - PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF)); - PRINTD (DBG_REGS, "TX CONFIG: %#x", rd_regw (dev, TX_CONFIG_OFF)); - PRINTD (DBG_REGS, "TX STATUS: %#x", rd_regw (dev, TX_STATUS_OFF)); - PRINTD (DBG_REGS, "IRQ ENBLE: %#x", rd_regl (dev, INT_ENABLE_REG_OFF)); - PRINTD (DBG_REGS, "IRQ SORCE: %#x", rd_regl (dev, INT_SOURCE_REG_OFF)); -#else - (void) dev; -#endif - return; -} - -static inline void dump_framer (hrz_dev * dev) { -#ifdef DEBUG_HORIZON - unsigned int i; - PRINTDB (DBG_REGS, "framer registers:"); - for (i = 0; i < 0x10; ++i) - PRINTDM (DBG_REGS, " %02x", rd_framer (dev, i)); - PRINTDE (DBG_REGS,""); -#else - (void) dev; -#endif - return; -} - -/********** VPI/VCI <-> (RX) channel conversions **********/ - -/* RX channels are 10 bit integers, these fns are quite paranoid */ - -static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) { - unsigned short vci_bits = 10 - vpi_bits; - if (0 <= vpi && vpi < 1<>RX_Q_ENTRY_CHANNEL_SHIFT) & RX_CHANNEL_MASK; -} - -/* Cell Transmit Rate Values - * - * the cell transmit rate (cells per sec) can be set to a variety of - * different values by specifying two parameters: a timer preload from - * 1 to 16 (stored as 0 to 15) and a clock divider (2 to the power of - * an exponent from 0 to 14; the special value 15 disables the timer). - * - * cellrate = baserate / (preload * 2^divider) - * - * The maximum cell rate that can be specified is therefore just the - * base rate. Halving the preload is equivalent to adding 1 to the - * divider and so values 1 to 8 of the preload are redundant except - * in the case of a maximal divider (14). - * - * Given a desired cell rate, an algorithm to determine the preload - * and divider is: - * - * a) x = baserate / cellrate, want p * 2^d = x (as far as possible) - * b) if x > 16 * 2^14 then set p = 16, d = 14 (min rate), done - * if x <= 16 then set p = x, d = 0 (high rates), done - * c) now have 16 < x <= 2^18, or 1 < x/16 <= 2^14 and we want to - * know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until - * we find the range (n will be between 1 and 14), set d = n - * d) Also have 8 < x/2^n <= 16, so set p nearest x/2^n - * - * The algorithm used below is a minor variant of the above. - * - * The base rate is derived from the oscillator frequency (Hz) using a - * fixed divider: - * - * baserate = freq / 32 in the case of some Unknown Card - * baserate = freq / 8 in the case of the Horizon 25 - * baserate = freq / 8 in the case of the Horizon Ultra 155 - * - * The Horizon cards have oscillators and base rates as follows: - * - * Card Oscillator Base Rate - * Unknown Card 33 MHz 1.03125 MHz (33 MHz = PCI freq) - * Horizon 25 32 MHz 4 MHz - * Horizon Ultra 155 40 MHz 5 MHz - * - * The following defines give the base rates in Hz. These were - * previously a factor of 100 larger, no doubt someone was using - * cps*100. - */ - -#define BR_UKN 1031250l -#define BR_HRZ 4000000l -#define BR_ULT 5000000l - -// d is an exponent -#define CR_MIND 0 -#define CR_MAXD 14 - -// p ranges from 1 to a power of 2 -#define CR_MAXPEXP 4 - -static int make_rate (const hrz_dev * dev, u32 c, rounding r, - u16 * bits, unsigned int * actual) -{ - // note: rounding the rate down means rounding 'p' up - const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ; - - u32 div = CR_MIND; - u32 pre; - - // br_exp and br_man are used to avoid overflowing (c*maxp*2^d) in - // the tests below. We could think harder about exact possibilities - // of failure... - - unsigned long br_man = br; - unsigned int br_exp = 0; - - PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c, - r == round_up ? "up" : r == round_down ? "down" : "nearest"); - - // avoid div by zero - if (!c) { - PRINTD (DBG_QOS|DBG_ERR, "zero rate is not allowed!"); - return -EINVAL; - } - - while (br_exp < CR_MAXPEXP + CR_MIND && (br_man % 2 == 0)) { - br_man = br_man >> 1; - ++br_exp; - } - // (br >>br_exp) < CR_MAXD || (!pre) || pre > 1<rx_descs[channel]; - - PRINTD (DBG_FLOW, "hrz_open_rx %x", channel); - - spin_lock_irqsave (&dev->mem_lock, flags); - channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK; - spin_unlock_irqrestore (&dev->mem_lock, flags); - - // very serious error, should never occur - if (channel_type != RX_CHANNEL_DISABLED) { - PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open"); - return -EBUSY; // clean up? - } - - // Give back spare buffer - if (dev->noof_spare_buffers) { - buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers]; - PRINTD (DBG_VCC, "using a spare buffer: %u", buf_ptr); - // should never occur - if (buf_ptr == RX_CHANNEL_DISABLED || buf_ptr == RX_CHANNEL_IDLE) { - // but easy to recover from - PRINTD (DBG_ERR|DBG_VCC, "bad spare buffer pointer, using IDLE"); - buf_ptr = RX_CHANNEL_IDLE; - } - } else { - PRINTD (DBG_VCC, "using IDLE buffer pointer"); - } - - // Channel is currently disabled so change its status to idle - - // do we really need to save the flags again? - spin_lock_irqsave (&dev->mem_lock, flags); - - wr_mem (dev, &rx_desc->wr_buf_type, - buf_ptr | CHANNEL_TYPE_AAL5 | FIRST_CELL_OF_AAL5_FRAME); - if (buf_ptr != RX_CHANNEL_IDLE) - wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr); - - spin_unlock_irqrestore (&dev->mem_lock, flags); - - // rxer->rate = make_rate (qos->peak_cells); - - PRINTD (DBG_FLOW, "hrz_open_rx ok"); - - return 0; -} - -#if 0 -/********** change vc rate for a given vc **********/ - -static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) { - rxer->rate = make_rate (qos->peak_cells); -} -#endif - -/********** free an skb (as per ATM device driver documentation) **********/ - -static void hrz_kfree_skb (struct sk_buff * skb) { - if (ATM_SKB(skb)->vcc->pop) { - ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb); - } else { - dev_kfree_skb_any (skb); - } -} - -/********** cancel listen on a VC **********/ - -static void hrz_close_rx (hrz_dev * dev, u16 vc) { - unsigned long flags; - - u32 value; - - u32 r1, r2; - - rx_ch_desc * rx_desc = &memmap->rx_descs[vc]; - - int was_idle = 0; - - spin_lock_irqsave (&dev->mem_lock, flags); - value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK; - spin_unlock_irqrestore (&dev->mem_lock, flags); - - if (value == RX_CHANNEL_DISABLED) { - // I suppose this could happen once we deal with _NONE traffic properly - PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc); - return; - } - if (value == RX_CHANNEL_IDLE) - was_idle = 1; - - spin_lock_irqsave (&dev->mem_lock, flags); - - for (;;) { - wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED); - - if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED) - break; - - was_idle = 0; - } - - if (was_idle) { - spin_unlock_irqrestore (&dev->mem_lock, flags); - return; - } - - WAIT_FLUSH_RX_COMPLETE(dev); - - // XXX Is this all really necessary? We can rely on the rx_data_av - // handler to discard frames that remain queued for delivery. If the - // worry is that immediately reopening the channel (perhaps by a - // different process) may cause some data to be mis-delivered then - // there may still be a simpler solution (such as busy-waiting on - // rx_busy once the channel is disabled or before a new one is - // opened - does this leave any holes?). Arguably setting up and - // tearing down the TX and RX halves of each virtual circuit could - // most safely be done within ?x_busy protected regions. - - // OK, current changes are that Simon's marker is disabled and we DO - // look for NULL rxer elsewhere. The code here seems flush frames - // and then remember the last dead cell belonging to the channel - // just disabled - the cell gets relinked at the next vc_open. - // However, when all VCs are closed or only a few opened there are a - // handful of buffers that are unusable. - - // Does anyone feel like documenting spare_buffers properly? - // Does anyone feel like fixing this in a nicer way? - - // Flush any data which is left in the channel - for (;;) { - // Change the rx channel port to something different to the RX - // channel we are trying to close to force Horizon to flush the rx - // channel read and write pointers. - - u16 other = vc^(RX_CHANS/2); - - SELECT_RX_CHANNEL (dev, other); - WAIT_UPDATE_COMPLETE (dev); - - r1 = rd_mem (dev, &rx_desc->rd_buf_type); - - // Select this RX channel. Flush doesn't seem to work unless we - // select an RX channel before hand - - SELECT_RX_CHANNEL (dev, vc); - WAIT_UPDATE_COMPLETE (dev); - - // Attempt to flush a frame on this RX channel - - FLUSH_RX_CHANNEL (dev, vc); - WAIT_FLUSH_RX_COMPLETE (dev); - - // Force Horizon to flush rx channel read and write pointers as before - - SELECT_RX_CHANNEL (dev, other); - WAIT_UPDATE_COMPLETE (dev); - - r2 = rd_mem (dev, &rx_desc->rd_buf_type); - - PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2); - - if (r1 == r2) { - dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1; - break; - } - } - -#if 0 - { - rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)]; - rx_q_entry * rd_ptr = dev->rx_q_entry; - - PRINTD (DBG_VCC|DBG_RX, "rd_ptr = %u, wr_ptr = %u", rd_ptr, wr_ptr); - - while (rd_ptr != wr_ptr) { - u32 x = rd_mem (dev, (HDW *) rd_ptr); - - if (vc == rx_q_entry_to_rx_channel (x)) { - x |= SIMONS_DODGEY_MARKER; - - PRINTD (DBG_RX|DBG_VCC|DBG_WARN, "marking a frame as dodgey"); - - wr_mem (dev, (HDW *) rd_ptr, x); - } - - if (rd_ptr == dev->rx_q_wrap) - rd_ptr = dev->rx_q_reset; - else - rd_ptr++; - } - } -#endif - - spin_unlock_irqrestore (&dev->mem_lock, flags); - - return; -} - -/********** schedule RX transfers **********/ - -// Note on tail recursion: a GCC developer said that it is not likely -// to be fixed soon, so do not define TAILRECUSRIONWORKS unless you -// are sure it does as you may otherwise overflow the kernel stack. - -// giving this fn a return value would help GCC, allegedly - -static void rx_schedule (hrz_dev * dev, int irq) { - unsigned int rx_bytes; - - int pio_instead = 0; -#ifndef TAILRECURSIONWORKS - pio_instead = 1; - while (pio_instead) { -#endif - // bytes waiting for RX transfer - rx_bytes = dev->rx_bytes; - -#if 0 - spin_count = 0; - while (rd_regl (dev, MASTER_RX_COUNT_REG_OFF)) { - PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!"); - if (++spin_count > 10) { - PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion"); - wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0); - clear_bit (rx_busy, &dev->flags); - hrz_kfree_skb (dev->rx_skb); - return; - } - } -#endif - - // this code follows the TX code but (at the moment) there is only - // one region - the skb itself. I don't know if this will change, - // but it doesn't hurt to have the code here, disabled. - - if (rx_bytes) { - // start next transfer within same region - if (rx_bytes <= MAX_PIO_COUNT) { - PRINTD (DBG_RX|DBG_BUS, "(pio)"); - pio_instead = 1; - } - if (rx_bytes <= MAX_TRANSFER_COUNT) { - PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)"); - dev->rx_bytes = 0; - } else { - PRINTD (DBG_RX|DBG_BUS, "(continuing multi)"); - dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT; - rx_bytes = MAX_TRANSFER_COUNT; - } - } else { - // rx_bytes == 0 -- we're between regions - // regions remaining to transfer -#if 0 - unsigned int rx_regions = dev->rx_regions; -#else - unsigned int rx_regions = 0; -#endif - - if (rx_regions) { -#if 0 - // start a new region - dev->rx_addr = dev->rx_iovec->iov_base; - rx_bytes = dev->rx_iovec->iov_len; - ++dev->rx_iovec; - dev->rx_regions = rx_regions - 1; - - if (rx_bytes <= MAX_PIO_COUNT) { - PRINTD (DBG_RX|DBG_BUS, "(pio)"); - pio_instead = 1; - } - if (rx_bytes <= MAX_TRANSFER_COUNT) { - PRINTD (DBG_RX|DBG_BUS, "(full region)"); - dev->rx_bytes = 0; - } else { - PRINTD (DBG_RX|DBG_BUS, "(start multi region)"); - dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT; - rx_bytes = MAX_TRANSFER_COUNT; - } -#endif - } else { - // rx_regions == 0 - // that's all folks - end of frame - struct sk_buff * skb = dev->rx_skb; - // dev->rx_iovec = 0; - - FLUSH_RX_CHANNEL (dev, dev->rx_channel); - - dump_skb ("<<<", dev->rx_channel, skb); - - PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len); - - { - struct atm_vcc * vcc = ATM_SKB(skb)->vcc; - // VC layer stats - atomic_inc(&vcc->stats->rx); - __net_timestamp(skb); - // end of our responsibility - vcc->push (vcc, skb); - } - } - } - - // note: writing RX_COUNT clears any interrupt condition - if (rx_bytes) { - if (pio_instead) { - if (irq) - wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0); - rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes); - } else { - wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr)); - wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes); - } - dev->rx_addr += rx_bytes; - } else { - if (irq) - wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0); - // allow another RX thread to start - YELLOW_LED_ON(dev); - clear_bit (rx_busy, &dev->flags); - PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev); - } - -#ifdef TAILRECURSIONWORKS - // and we all bless optimised tail calls - if (pio_instead) - return rx_schedule (dev, 0); - return; -#else - // grrrrrrr! - irq = 0; - } - return; -#endif -} - -/********** handle RX bus master complete events **********/ - -static void rx_bus_master_complete_handler (hrz_dev * dev) { - if (test_bit (rx_busy, &dev->flags)) { - rx_schedule (dev, 1); - } else { - PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion"); - // clear interrupt condition on adapter - wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0); - } - return; -} - -/********** (queue to) become the next TX thread **********/ - -static int tx_hold (hrz_dev * dev) { - PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags); - wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags))); - PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags); - if (signal_pending (current)) - return -1; - PRINTD (DBG_TX, "set tx_busy for dev %p", dev); - return 0; -} - -/********** allow another TX thread to start **********/ - -static inline void tx_release (hrz_dev * dev) { - clear_bit (tx_busy, &dev->flags); - PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev); - wake_up_interruptible (&dev->tx_queue); -} - -/********** schedule TX transfers **********/ - -static void tx_schedule (hrz_dev * const dev, int irq) { - unsigned int tx_bytes; - - int append_desc = 0; - - int pio_instead = 0; -#ifndef TAILRECURSIONWORKS - pio_instead = 1; - while (pio_instead) { -#endif - // bytes in current region waiting for TX transfer - tx_bytes = dev->tx_bytes; - -#if 0 - spin_count = 0; - while (rd_regl (dev, MASTER_TX_COUNT_REG_OFF)) { - PRINTD (DBG_TX|DBG_WARN, "TX error: other PCI Bus Master TX still in progress!"); - if (++spin_count > 10) { - PRINTD (DBG_TX|DBG_ERR, "spun out waiting PCI Bus Master TX completion"); - wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0); - tx_release (dev); - hrz_kfree_skb (dev->tx_skb); - return; - } - } -#endif - - if (tx_bytes) { - // start next transfer within same region - if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) { - PRINTD (DBG_TX|DBG_BUS, "(pio)"); - pio_instead = 1; - } - if (tx_bytes <= MAX_TRANSFER_COUNT) { - PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)"); - if (!dev->tx_iovec) { - // end of last region - append_desc = 1; - } - dev->tx_bytes = 0; - } else { - PRINTD (DBG_TX|DBG_BUS, "(continuing multi)"); - dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT; - tx_bytes = MAX_TRANSFER_COUNT; - } - } else { - // tx_bytes == 0 -- we're between regions - // regions remaining to transfer - unsigned int tx_regions = dev->tx_regions; - - if (tx_regions) { - // start a new region - dev->tx_addr = dev->tx_iovec->iov_base; - tx_bytes = dev->tx_iovec->iov_len; - ++dev->tx_iovec; - dev->tx_regions = tx_regions - 1; - - if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) { - PRINTD (DBG_TX|DBG_BUS, "(pio)"); - pio_instead = 1; - } - if (tx_bytes <= MAX_TRANSFER_COUNT) { - PRINTD (DBG_TX|DBG_BUS, "(full region)"); - dev->tx_bytes = 0; - } else { - PRINTD (DBG_TX|DBG_BUS, "(start multi region)"); - dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT; - tx_bytes = MAX_TRANSFER_COUNT; - } - } else { - // tx_regions == 0 - // that's all folks - end of frame - struct sk_buff * skb = dev->tx_skb; - dev->tx_iovec = NULL; - - // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); - - // free the skb - hrz_kfree_skb (skb); - } - } - - // note: writing TX_COUNT clears any interrupt condition - if (tx_bytes) { - if (pio_instead) { - if (irq) - wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0); - wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes); - if (append_desc) - wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len)); - } else { - wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr)); - if (append_desc) - wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len)); - wr_regl (dev, MASTER_TX_COUNT_REG_OFF, - append_desc - ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC - : tx_bytes); - } - dev->tx_addr += tx_bytes; - } else { - if (irq) - wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0); - YELLOW_LED_ON(dev); - tx_release (dev); - } - -#ifdef TAILRECURSIONWORKS - // and we all bless optimised tail calls - if (pio_instead) - return tx_schedule (dev, 0); - return; -#else - // grrrrrrr! - irq = 0; - } - return; -#endif -} - -/********** handle TX bus master complete events **********/ - -static void tx_bus_master_complete_handler (hrz_dev * dev) { - if (test_bit (tx_busy, &dev->flags)) { - tx_schedule (dev, 1); - } else { - PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion"); - // clear interrupt condition on adapter - wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0); - } - return; -} - -/********** move RX Q pointer to next item in circular buffer **********/ - -// called only from IRQ sub-handler -static u32 rx_queue_entry_next (hrz_dev * dev) { - u32 rx_queue_entry; - spin_lock (&dev->mem_lock); - rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry); - if (dev->rx_q_entry == dev->rx_q_wrap) - dev->rx_q_entry = dev->rx_q_reset; - else - dev->rx_q_entry++; - wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset); - spin_unlock (&dev->mem_lock); - return rx_queue_entry; -} - -/********** handle RX data received by device **********/ - -// called from IRQ handler -static void rx_data_av_handler (hrz_dev * dev) { - u32 rx_queue_entry; - u32 rx_queue_entry_flags; - u16 rx_len; - u16 rx_channel; - - PRINTD (DBG_FLOW, "hrz_data_av_handler"); - - // try to grab rx lock (not possible during RX bus mastering) - if (test_and_set_bit (rx_busy, &dev->flags)) { - PRINTD (DBG_RX, "locked out of rx lock"); - return; - } - PRINTD (DBG_RX, "set rx_busy for dev %p", dev); - // lock is cleared if we fail now, o/w after bus master completion - - YELLOW_LED_OFF(dev); - - rx_queue_entry = rx_queue_entry_next (dev); - - rx_len = rx_q_entry_to_length (rx_queue_entry); - rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry); - - WAIT_FLUSH_RX_COMPLETE (dev); - - SELECT_RX_CHANNEL (dev, rx_channel); - - PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry); - rx_queue_entry_flags = rx_queue_entry & (RX_CRC_32_OK|RX_COMPLETE_FRAME|SIMONS_DODGEY_MARKER); - - if (!rx_len) { - // (at least) bus-mastering breaks if we try to handle a - // zero-length frame, besides AAL5 does not support them - PRINTK (KERN_ERR, "zero-length frame!"); - rx_queue_entry_flags &= ~RX_COMPLETE_FRAME; - } - - if (rx_queue_entry_flags & SIMONS_DODGEY_MARKER) { - PRINTD (DBG_RX|DBG_ERR, "Simon's marker detected!"); - } - if (rx_queue_entry_flags == (RX_CRC_32_OK | RX_COMPLETE_FRAME)) { - struct atm_vcc * atm_vcc; - - PRINTD (DBG_RX, "got a frame on rx_channel %x len %u", rx_channel, rx_len); - - atm_vcc = dev->rxer[rx_channel]; - // if no vcc is assigned to this channel, we should drop the frame - // (is this what SIMONS etc. was trying to achieve?) - - if (atm_vcc) { - - if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { - - if (rx_len <= atm_vcc->qos.rxtp.max_sdu) { - - struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC); - if (skb) { - // remember this so we can push it later - dev->rx_skb = skb; - // remember this so we can flush it later - dev->rx_channel = rx_channel; - - // prepare socket buffer - skb_put (skb, rx_len); - ATM_SKB(skb)->vcc = atm_vcc; - - // simple transfer - // dev->rx_regions = 0; - // dev->rx_iovec = 0; - dev->rx_bytes = rx_len; - dev->rx_addr = skb->data; - PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)", - skb->data, rx_len); - - // do the business - rx_schedule (dev, 0); - return; - - } else { - PRINTD (DBG_SKB|DBG_WARN, "failed to get skb"); - } - - } else { - PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel); - // do we count this? - } - - } else { - PRINTK (KERN_WARNING, "dropped over-size frame"); - // do we count this? - } - - } else { - PRINTD (DBG_WARN|DBG_VCC|DBG_RX, "no VCC for this frame (VC closed)"); - // do we count this? - } - - } else { - // Wait update complete ? SPONG - } - - // RX was aborted - YELLOW_LED_ON(dev); - - FLUSH_RX_CHANNEL (dev,rx_channel); - clear_bit (rx_busy, &dev->flags); - - return; -} - -/********** interrupt handler **********/ - -static irqreturn_t interrupt_handler(int irq, void *dev_id) -{ - hrz_dev *dev = dev_id; - u32 int_source; - unsigned int irq_ok; - - PRINTD (DBG_FLOW, "interrupt_handler: %p", dev_id); - - // definitely for us - irq_ok = 0; - while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF) - & INTERESTING_INTERRUPTS)) { - // In the interests of fairness, the handlers below are - // called in sequence and without immediate return to the head of - // the while loop. This is only of issue for slow hosts (or when - // debugging messages are on). Really slow hosts may find a fast - // sender keeps them permanently in the IRQ handler. :( - - // (only an issue for slow hosts) RX completion goes before - // rx_data_av as the former implies rx_busy and so the latter - // would just abort. If it reschedules another transfer - // (continuing the same frame) then it will not clear rx_busy. - - // (only an issue for slow hosts) TX completion goes before RX - // data available as it is a much shorter routine - there is the - // chance that any further transfers it schedules will be complete - // by the time of the return to the head of the while loop - - if (int_source & RX_BUS_MASTER_COMPLETE) { - ++irq_ok; - PRINTD (DBG_IRQ|DBG_BUS|DBG_RX, "rx_bus_master_complete asserted"); - rx_bus_master_complete_handler (dev); - } - if (int_source & TX_BUS_MASTER_COMPLETE) { - ++irq_ok; - PRINTD (DBG_IRQ|DBG_BUS|DBG_TX, "tx_bus_master_complete asserted"); - tx_bus_master_complete_handler (dev); - } - if (int_source & RX_DATA_AV) { - ++irq_ok; - PRINTD (DBG_IRQ|DBG_RX, "rx_data_av asserted"); - rx_data_av_handler (dev); - } - } - if (irq_ok) { - PRINTD (DBG_IRQ, "work done: %u", irq_ok); - } else { - PRINTD (DBG_IRQ|DBG_WARN, "spurious interrupt source: %#x", int_source); - } - - PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id); - if (irq_ok) - return IRQ_HANDLED; - return IRQ_NONE; -} - -/********** housekeeping **********/ - -static void do_housekeeping (struct timer_list *t) { - // just stats at the moment - hrz_dev * dev = from_timer(dev, t, housekeeping); - - // collect device-specific (not driver/atm-linux) stats here - dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF); - dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF); - dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF); - dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF); - - mod_timer (&dev->housekeeping, jiffies + HZ/10); - - return; -} - -/********** find an idle channel for TX and set it up **********/ - -// called with tx_busy set -static short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) { - unsigned short idle_channels; - short tx_channel = -1; - unsigned int spin_count; - PRINTD (DBG_FLOW|DBG_TX, "setup_idle_tx_channel %p", dev); - - // better would be to fail immediately, the caller can then decide whether - // to wait or drop (depending on whether this is UBR etc.) - spin_count = 0; - while (!(idle_channels = rd_regw (dev, TX_STATUS_OFF) & IDLE_CHANNELS_MASK)) { - PRINTD (DBG_TX|DBG_WARN, "waiting for idle TX channel"); - // delay a bit here - if (++spin_count > 100) { - PRINTD (DBG_TX|DBG_ERR, "spun out waiting for idle TX channel"); - return -EBUSY; - } - } - - // got an idle channel - { - // tx_idle ensures we look for idle channels in RR order - int chan = dev->tx_idle; - - int keep_going = 1; - while (keep_going) { - if (idle_channels & (1<tx_idle = chan; - } - - // set up the channel we found - { - // Initialise the cell header in the transmit channel descriptor - // a.k.a. prepare the channel and remember that we have done so. - - tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel]; - u32 rd_ptr; - u32 wr_ptr; - u16 channel = vcc->channel; - - unsigned long flags; - spin_lock_irqsave (&dev->mem_lock, flags); - - // Update the transmit channel record. - dev->tx_channel_record[tx_channel] = channel; - - // xBR channel - update_tx_channel_config (dev, tx_channel, RATE_TYPE_ACCESS, - vcc->tx_xbr_bits); - - // Update the PCR counter preload value etc. - update_tx_channel_config (dev, tx_channel, PCR_TIMER_ACCESS, - vcc->tx_pcr_bits); - -#if 0 - if (vcc->tx_xbr_bits == VBR_RATE_TYPE) { - // SCR timer - update_tx_channel_config (dev, tx_channel, SCR_TIMER_ACCESS, - vcc->tx_scr_bits); - - // Bucket size... - update_tx_channel_config (dev, tx_channel, BUCKET_CAPACITY_ACCESS, - vcc->tx_bucket_bits); - - // ... and fullness - update_tx_channel_config (dev, tx_channel, BUCKET_FULLNESS_ACCESS, - vcc->tx_bucket_bits); - } -#endif - - // Initialise the read and write buffer pointers - rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK; - wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK; - - // idle TX channels should have identical pointers - if (rd_ptr != wr_ptr) { - PRINTD (DBG_TX|DBG_ERR, "TX buffer pointers are broken!"); - // spin_unlock... return -E... - // I wonder if gcc would get rid of one of the pointer aliases - } - PRINTD (DBG_TX, "TX buffer pointers are: rd %x, wr %x.", - rd_ptr, wr_ptr); - - switch (vcc->aal) { - case aal0: - PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal0"); - rd_ptr |= CHANNEL_TYPE_RAW_CELLS; - wr_ptr |= CHANNEL_TYPE_RAW_CELLS; - break; - case aal34: - PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal34"); - rd_ptr |= CHANNEL_TYPE_AAL3_4; - wr_ptr |= CHANNEL_TYPE_AAL3_4; - break; - case aal5: - rd_ptr |= CHANNEL_TYPE_AAL5; - wr_ptr |= CHANNEL_TYPE_AAL5; - // Initialise the CRC - wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC); - break; - } - - wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr); - wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr); - - // Write the Cell Header - // Payload Type, CLP and GFC would go here if non-zero - wr_mem (dev, &tx_desc->cell_header, channel); - - spin_unlock_irqrestore (&dev->mem_lock, flags); - } - - return tx_channel; -} - -/********** send a frame **********/ - -static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { - unsigned int spin_count; - int free_buffers; - hrz_dev * dev = HRZ_DEV(atm_vcc->dev); - hrz_vcc * vcc = HRZ_VCC(atm_vcc); - u16 channel = vcc->channel; - - u32 buffers_required; - - /* signed for error return */ - short tx_channel; - - PRINTD (DBG_FLOW|DBG_TX, "hrz_send vc %x data %p len %u", - channel, skb->data, skb->len); - - dump_skb (">>>", channel, skb); - - if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) { - PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel); - hrz_kfree_skb (skb); - return -EIO; - } - - // don't understand this - ATM_SKB(skb)->vcc = atm_vcc; - - if (skb->len > atm_vcc->qos.txtp.max_sdu) { - PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping..."); - hrz_kfree_skb (skb); - return -EIO; - } - - if (!channel) { - PRINTD (DBG_ERR|DBG_TX, "attempt to transmit on zero (rx_)channel"); - hrz_kfree_skb (skb); - return -EIO; - } - -#if 0 - { - // where would be a better place for this? housekeeping? - u16 status; - pci_read_config_word (dev->pci_dev, PCI_STATUS, &status); - if (status & PCI_STATUS_REC_MASTER_ABORT) { - PRINTD (DBG_BUS|DBG_ERR, "Clearing PCI Master Abort (and cleaning up)"); - status &= ~PCI_STATUS_REC_MASTER_ABORT; - pci_write_config_word (dev->pci_dev, PCI_STATUS, status); - if (test_bit (tx_busy, &dev->flags)) { - hrz_kfree_skb (dev->tx_skb); - tx_release (dev); - } - } - } -#endif - -#ifdef DEBUG_HORIZON - /* wey-hey! */ - if (channel == 1023) { - unsigned int i; - unsigned short d = 0; - char * s = skb->data; - if (*s++ == 'D') { - for (i = 0; i < 4; ++i) - d = (d << 4) | hex_to_bin(*s++); - PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d); - } - } -#endif - - // wait until TX is free and grab lock - if (tx_hold (dev)) { - hrz_kfree_skb (skb); - return -ERESTARTSYS; - } - - // Wait for enough space to be available in transmit buffer memory. - - // should be number of cells needed + 2 (according to hardware docs) - // = ((framelen+8)+47) / 48 + 2 - // = (framelen+7) / 48 + 3, hmm... faster to put addition inside XXX - buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3; - - // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry) - spin_count = 0; - while ((free_buffers = rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF)) < buffers_required) { - PRINTD (DBG_TX, "waiting for free TX buffers, got %d of %d", - free_buffers, buffers_required); - // what is the appropriate delay? implement a timeout? (depending on line speed?) - // mdelay (1); - // what happens if we kill (current_pid, SIGKILL) ? - schedule(); - if (++spin_count > 1000) { - PRINTD (DBG_TX|DBG_ERR, "spun out waiting for tx buffers, got %d of %d", - free_buffers, buffers_required); - tx_release (dev); - hrz_kfree_skb (skb); - return -ERESTARTSYS; - } - } - - // Select a channel to transmit the frame on. - if (channel == dev->last_vc) { - PRINTD (DBG_TX, "last vc hack: hit"); - tx_channel = dev->tx_last; - } else { - PRINTD (DBG_TX, "last vc hack: miss"); - // Are we currently transmitting this VC on one of the channels? - for (tx_channel = 0; tx_channel < TX_CHANS; ++tx_channel) - if (dev->tx_channel_record[tx_channel] == channel) { - PRINTD (DBG_TX, "vc already on channel: hit"); - break; - } - if (tx_channel == TX_CHANS) { - PRINTD (DBG_TX, "vc already on channel: miss"); - // Find and set up an idle channel. - tx_channel = setup_idle_tx_channel (dev, vcc); - if (tx_channel < 0) { - PRINTD (DBG_TX|DBG_ERR, "failed to get channel"); - tx_release (dev); - return tx_channel; - } - } - - PRINTD (DBG_TX, "got channel"); - SELECT_TX_CHANNEL(dev, tx_channel); - - dev->last_vc = channel; - dev->tx_last = tx_channel; - } - - PRINTD (DBG_TX, "using channel %u", tx_channel); - - YELLOW_LED_OFF(dev); - - // TX start transfer - - { - unsigned int tx_len = skb->len; - unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags; - // remember this so we can free it later - dev->tx_skb = skb; - - if (tx_iovcnt) { - // scatter gather transfer - dev->tx_regions = tx_iovcnt; - dev->tx_iovec = NULL; /* @@@ needs rewritten */ - dev->tx_bytes = 0; - PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)", - skb->data, tx_len); - tx_release (dev); - hrz_kfree_skb (skb); - return -EIO; - } else { - // simple transfer - dev->tx_regions = 0; - dev->tx_iovec = NULL; - dev->tx_bytes = tx_len; - dev->tx_addr = skb->data; - PRINTD (DBG_TX|DBG_BUS, "TX start simple transfer (addr %p, len %d)", - skb->data, tx_len); - } - - // and do the business - tx_schedule (dev, 0); - - } - - return 0; -} - -/********** reset a card **********/ - -static void hrz_reset (const hrz_dev * dev) { - u32 control_0_reg = rd_regl (dev, CONTROL_0_REG); - - // why not set RESET_HORIZON to one and wait for the card to - // reassert that bit as zero? Like so: - control_0_reg = control_0_reg & RESET_HORIZON; - wr_regl (dev, CONTROL_0_REG, control_0_reg); - while (control_0_reg & RESET_HORIZON) - control_0_reg = rd_regl (dev, CONTROL_0_REG); - - // old reset code retained: - wr_regl (dev, CONTROL_0_REG, control_0_reg | - RESET_ATM | RESET_RX | RESET_TX | RESET_HOST); - // just guessing here - udelay (1000); - - wr_regl (dev, CONTROL_0_REG, control_0_reg); -} - -/********** read the burnt in address **********/ - -static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl) -{ - wr_regl (dev, CONTROL_0_REG, ctrl); - udelay (5); -} - -static void CLOCK_IT (const hrz_dev *dev, u32 ctrl) -{ - // DI must be valid around rising SK edge - WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK); - WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK); -} - -static u16 read_bia(const hrz_dev *dev, u16 addr) -{ - u32 ctrl = rd_regl (dev, CONTROL_0_REG); - - const unsigned int addr_bits = 6; - const unsigned int data_bits = 16; - - unsigned int i; - - u16 res; - - ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI); - WRITE_IT_WAIT(dev, ctrl); - - // wake Serial EEPROM and send 110 (READ) command - ctrl |= (SEEPROM_CS | SEEPROM_DI); - CLOCK_IT(dev, ctrl); - - ctrl |= SEEPROM_DI; - CLOCK_IT(dev, ctrl); - - ctrl &= ~SEEPROM_DI; - CLOCK_IT(dev, ctrl); - - for (i=0; i> 1; - - CLOCK_IT(dev, ctrl); - - if (rd_regl (dev, CONTROL_0_REG) & SEEPROM_DO) - res |= (1 << (data_bits-1)); - } - - ctrl &= ~(SEEPROM_SK | SEEPROM_CS); - WRITE_IT_WAIT(dev, ctrl); - - return res; -} - -/********** initialise a card **********/ - -static int hrz_init(hrz_dev *dev) -{ - int onefivefive; - - u16 chan; - - int buff_count; - - HDW * mem; - - cell_buf * tx_desc; - cell_buf * rx_desc; - - u32 ctrl; - - ctrl = rd_regl (dev, CONTROL_0_REG); - PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl); - onefivefive = ctrl & ATM_LAYER_STATUS; - - if (onefivefive) - printk (DEV_LABEL ": Horizon Ultra (at 155.52 MBps)"); - else - printk (DEV_LABEL ": Horizon (at 25 MBps)"); - - printk (":"); - // Reset the card to get everything in a known state - - printk (" reset"); - hrz_reset (dev); - - // Clear all the buffer memory - - printk (" clearing memory"); - - for (mem = (HDW *) memmap; mem < (HDW *) (memmap + 1); ++mem) - wr_mem (dev, mem, 0); - - printk (" tx channels"); - - // All transmit eight channels are set up as AAL5 ABR channels with - // a 16us cell spacing. Why? - - // Channel 0 gets the free buffer at 100h, channel 1 gets the free - // buffer at 110h etc. - - for (chan = 0; chan < TX_CHANS; ++chan) { - tx_ch_desc * tx_desc = &memmap->tx_descs[chan]; - cell_buf * buf = &memmap->inittxbufs[chan]; - - // initialise the read and write buffer pointers - wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf)); - wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf)); - - // set the status of the initial buffers to empty - wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY); - } - - // Use space bufn3 at the moment for tx buffers - - printk (" tx buffers"); - - tx_desc = memmap->bufn3; - - wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY); - - for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) { - wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY); - tx_desc++; - } - - wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY); - - // Initialise the transmit free buffer count - wr_regw (dev, TX_FREE_BUFFER_COUNT_OFF, BUFN3_SIZE); - - printk (" rx channels"); - - // Initialise all of the receive channels to be AAL5 disabled with - // an interrupt threshold of 0 - - for (chan = 0; chan < RX_CHANS; ++chan) { - rx_ch_desc * rx_desc = &memmap->rx_descs[chan]; - - wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED); - } - - printk (" rx buffers"); - - // Use space bufn4 at the moment for rx buffers - - rx_desc = memmap->bufn4; - - wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY); - - for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) { - wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY); - - rx_desc++; - } - - wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY); - - // Initialise the receive free buffer count - wr_regw (dev, RX_FREE_BUFFER_COUNT_OFF, BUFN4_SIZE); - - // Initialize Horizons registers - - // TX config - wr_regw (dev, TX_CONFIG_OFF, - ABR_ROUND_ROBIN | TX_NORMAL_OPERATION | DRVR_DRVRBAR_ENABLE); - - // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0. - wr_regw (dev, RX_CONFIG_OFF, - DISCARD_UNUSED_VPI_VCI_BITS_SET | NON_USER_CELLS_IN_ONE_CHANNEL | vpi_bits); - - // RX line config - wr_regw (dev, RX_LINE_CONFIG_OFF, - LOCK_DETECT_ENABLE | FREQUENCY_DETECT_ENABLE | GXTALOUT_SELECT_DIV4); - - // Set the max AAL5 cell count to be just enough to contain the - // largest AAL5 frame that the user wants to receive - wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF, - DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD)); - - // Enable receive - wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE); - - printk (" control"); - - // Drive the OE of the LEDs then turn the green LED on - ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED; - wr_regl (dev, CONTROL_0_REG, ctrl); - - // Test for a 155-capable card - - if (onefivefive) { - // Select 155 mode... make this a choice (or: how do we detect - // external line speed and switch?) - ctrl |= ATM_LAYER_SELECT; - wr_regl (dev, CONTROL_0_REG, ctrl); - - // test SUNI-lite vs SAMBA - - // Register 0x00 in the SUNI will have some of bits 3-7 set, and - // they will always be zero for the SAMBA. Ha! Bloody hardware - // engineers. It'll never work. - - if (rd_framer (dev, 0) & 0x00f0) { - // SUNI - printk (" SUNI"); - - // Reset, just in case - wr_framer (dev, 0x00, 0x0080); - wr_framer (dev, 0x00, 0x0000); - - // Configure transmit FIFO - wr_framer (dev, 0x63, rd_framer (dev, 0x63) | 0x0002); - - // Set line timed mode - wr_framer (dev, 0x05, rd_framer (dev, 0x05) | 0x0001); - } else { - // SAMBA - printk (" SAMBA"); - - // Reset, just in case - wr_framer (dev, 0, rd_framer (dev, 0) | 0x0001); - wr_framer (dev, 0, rd_framer (dev, 0) &~ 0x0001); - - // Turn off diagnostic loopback and enable line-timed mode - wr_framer (dev, 0, 0x0002); - - // Turn on transmit outputs - wr_framer (dev, 2, 0x0B80); - } - } else { - // Select 25 mode - ctrl &= ~ATM_LAYER_SELECT; - - // Madge B154 setup - // none required? - } - - printk (" LEDs"); - - GREEN_LED_ON(dev); - YELLOW_LED_ON(dev); - - printk (" ESI="); - - { - u16 b = 0; - int i; - u8 * esi = dev->atm_dev->esi; - - // in the card I have, EEPROM - // addresses 0, 1, 2 contain 0 - // addresess 5, 6 etc. contain ffff - // NB: Madge prefix is 00 00 f6 (which is 00 00 6f in Ethernet bit order) - // the read_bia routine gets the BIA in Ethernet bit order - - for (i=0; i < ESI_LEN; ++i) { - if (i % 2 == 0) - b = read_bia (dev, i/2 + 2); - else - b = b >> 8; - esi[i] = b & 0xFF; - printk ("%02x", esi[i]); - } - } - - // Enable RX_Q and ?X_COMPLETE interrupts only - wr_regl (dev, INT_ENABLE_REG_OFF, INTERESTING_INTERRUPTS); - printk (" IRQ on"); - - printk (".\n"); - - return onefivefive; -} - -/********** check max_sdu **********/ - -static int check_max_sdu (hrz_aal aal, struct atm_trafprm * tp, unsigned int max_frame_size) { - PRINTD (DBG_FLOW|DBG_QOS, "check_max_sdu"); - - switch (aal) { - case aal0: - if (!(tp->max_sdu)) { - PRINTD (DBG_QOS, "defaulting max_sdu"); - tp->max_sdu = ATM_AAL0_SDU; - } else if (tp->max_sdu != ATM_AAL0_SDU) { - PRINTD (DBG_QOS|DBG_ERR, "rejecting max_sdu"); - return -EINVAL; - } - break; - case aal34: - if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) { - PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default"); - tp->max_sdu = ATM_MAX_AAL34_PDU; - } - break; - case aal5: - if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) { - PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default"); - tp->max_sdu = max_frame_size; - } - break; - } - return 0; -} - -/********** check pcr **********/ - -// something like this should be part of ATM Linux -static int atm_pcr_check (struct atm_trafprm * tp, unsigned int pcr) { - // we are assuming non-UBR, and non-special values of pcr - if (tp->min_pcr == ATM_MAX_PCR) - PRINTD (DBG_QOS, "luser gave min_pcr = ATM_MAX_PCR"); - else if (tp->min_pcr < 0) - PRINTD (DBG_QOS, "luser gave negative min_pcr"); - else if (tp->min_pcr && tp->min_pcr > pcr) - PRINTD (DBG_QOS, "pcr less than min_pcr"); - else - // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1) - // easier to #define ATM_MAX_PCR 0 and have all rates unsigned? - // [this would get rid of next two conditionals] - if ((0) && tp->max_pcr == ATM_MAX_PCR) - PRINTD (DBG_QOS, "luser gave max_pcr = ATM_MAX_PCR"); - else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0) - PRINTD (DBG_QOS, "luser gave negative max_pcr"); - else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr) - PRINTD (DBG_QOS, "pcr greater than max_pcr"); - else { - // each limit unspecified or not violated - PRINTD (DBG_QOS, "xBR(pcr) OK"); - return 0; - } - PRINTD (DBG_QOS, "pcr=%u, tp: min_pcr=%d, pcr=%d, max_pcr=%d", - pcr, tp->min_pcr, tp->pcr, tp->max_pcr); - return -EINVAL; -} - -/********** open VC **********/ - -static int hrz_open (struct atm_vcc *atm_vcc) -{ - int error; - u16 channel; - - struct atm_qos * qos; - struct atm_trafprm * txtp; - struct atm_trafprm * rxtp; - - hrz_dev * dev = HRZ_DEV(atm_vcc->dev); - hrz_vcc vcc; - hrz_vcc * vccp; // allocated late - short vpi = atm_vcc->vpi; - int vci = atm_vcc->vci; - PRINTD (DBG_FLOW|DBG_VCC, "hrz_open %x %x", vpi, vci); - -#ifdef ATM_VPI_UNSPEC - // UNSPEC is deprecated, remove this code eventually - if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) { - PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)"); - return -EINVAL; - } -#endif - - error = vpivci_to_channel (&channel, vpi, vci); - if (error) { - PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci); - return error; - } - - vcc.channel = channel; - // max speed for the moment - vcc.tx_rate = 0x0; - - qos = &atm_vcc->qos; - - // check AAL and remember it - switch (qos->aal) { - case ATM_AAL0: - // we would if it were 48 bytes and not 52! - PRINTD (DBG_QOS|DBG_VCC, "AAL0"); - vcc.aal = aal0; - break; - case ATM_AAL34: - // we would if I knew how do the SAR! - PRINTD (DBG_QOS|DBG_VCC, "AAL3/4"); - vcc.aal = aal34; - break; - case ATM_AAL5: - PRINTD (DBG_QOS|DBG_VCC, "AAL5"); - vcc.aal = aal5; - break; - default: - PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!"); - return -EINVAL; - } - - // TX traffic parameters - - // there are two, interrelated problems here: 1. the reservation of - // PCR is not a binary choice, we are given bounds and/or a - // desirable value; 2. the device is only capable of certain values, - // most of which are not integers. It is almost certainly acceptable - // to be off by a maximum of 1 to 10 cps. - - // Pragmatic choice: always store an integral PCR as that which has - // been allocated, even if we allocate a little (or a lot) less, - // after rounding. The actual allocation depends on what we can - // manage with our rate selection algorithm. The rate selection - // algorithm is given an integral PCR and a tolerance and told - // whether it should round the value up or down if the tolerance is - // exceeded; it returns: a) the actual rate selected (rounded up to - // the nearest integer), b) a bit pattern to feed to the timer - // register, and c) a failure value if no applicable rate exists. - - // Part of the job is done by atm_pcr_goal which gives us a PCR - // specification which says: EITHER grab the maximum available PCR - // (and perhaps a lower bound which we must not pass), OR grab this - // amount, rounding down if you have to (and perhaps a lower bound - // which we must not pass) OR grab this amount, rounding up if you - // have to (and perhaps an upper bound which we must not pass). If any - // bounds ARE passed we fail. Note that rounding is only rounding to - // match device limitations, we do not round down to satisfy - // bandwidth availability even if this would not violate any given - // lower bound. - - // Note: telephony = 64kb/s = 48 byte cell payload @ 500/3 cells/s - // (say) so this is not even a binary fixpoint cell rate (but this - // device can do it). To avoid this sort of hassle we use a - // tolerance parameter (currently fixed at 10 cps). - - PRINTD (DBG_QOS, "TX:"); - - txtp = &qos->txtp; - - // set up defaults for no traffic - vcc.tx_rate = 0; - // who knows what would actually happen if you try and send on this? - vcc.tx_xbr_bits = IDLE_RATE_TYPE; - vcc.tx_pcr_bits = CLOCK_DISABLE; -#if 0 - vcc.tx_scr_bits = CLOCK_DISABLE; - vcc.tx_bucket_bits = 0; -#endif - - if (txtp->traffic_class != ATM_NONE) { - error = check_max_sdu (vcc.aal, txtp, max_tx_size); - if (error) { - PRINTD (DBG_QOS, "TX max_sdu check failed"); - return error; - } - - switch (txtp->traffic_class) { - case ATM_UBR: { - // we take "the PCR" as a rate-cap - // not reserved - vcc.tx_rate = 0; - make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, NULL); - vcc.tx_xbr_bits = ABR_RATE_TYPE; - break; - } -#if 0 - case ATM_ABR: { - // reserve min, allow up to max - vcc.tx_rate = 0; // ? - make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, 0); - vcc.tx_xbr_bits = ABR_RATE_TYPE; - break; - } -#endif - case ATM_CBR: { - int pcr = atm_pcr_goal (txtp); - rounding r; - if (!pcr) { - // down vs. up, remaining bandwidth vs. unlimited bandwidth!! - // should really have: once someone gets unlimited bandwidth - // that no more non-UBR channels can be opened until the - // unlimited one closes?? For the moment, round_down means - // greedy people actually get something and not nothing - r = round_down; - // slight race (no locking) here so we may get -EAGAIN - // later; the greedy bastards would deserve it :) - PRINTD (DBG_QOS, "snatching all remaining TX bandwidth"); - pcr = dev->tx_avail; - } else if (pcr < 0) { - r = round_down; - pcr = -pcr; - } else { - r = round_up; - } - error = make_rate_with_tolerance (dev, pcr, r, 10, - &vcc.tx_pcr_bits, &vcc.tx_rate); - if (error) { - PRINTD (DBG_QOS, "could not make rate from TX PCR"); - return error; - } - // not really clear what further checking is needed - error = atm_pcr_check (txtp, vcc.tx_rate); - if (error) { - PRINTD (DBG_QOS, "TX PCR failed consistency check"); - return error; - } - vcc.tx_xbr_bits = CBR_RATE_TYPE; - break; - } -#if 0 - case ATM_VBR: { - int pcr = atm_pcr_goal (txtp); - // int scr = atm_scr_goal (txtp); - int scr = pcr/2; // just for fun - unsigned int mbs = 60; // just for fun - rounding pr; - rounding sr; - unsigned int bucket; - if (!pcr) { - pr = round_nearest; - pcr = 1<<30; - } else if (pcr < 0) { - pr = round_down; - pcr = -pcr; - } else { - pr = round_up; - } - error = make_rate_with_tolerance (dev, pcr, pr, 10, - &vcc.tx_pcr_bits, 0); - if (!scr) { - // see comments for PCR with CBR above - sr = round_down; - // slight race (no locking) here so we may get -EAGAIN - // later; the greedy bastards would deserve it :) - PRINTD (DBG_QOS, "snatching all remaining TX bandwidth"); - scr = dev->tx_avail; - } else if (scr < 0) { - sr = round_down; - scr = -scr; - } else { - sr = round_up; - } - error = make_rate_with_tolerance (dev, scr, sr, 10, - &vcc.tx_scr_bits, &vcc.tx_rate); - if (error) { - PRINTD (DBG_QOS, "could not make rate from TX SCR"); - return error; - } - // not really clear what further checking is needed - // error = atm_scr_check (txtp, vcc.tx_rate); - if (error) { - PRINTD (DBG_QOS, "TX SCR failed consistency check"); - return error; - } - // bucket calculations (from a piece of paper...) cell bucket - // capacity must be largest integer smaller than m(p-s)/p + 1 - // where m = max burst size, p = pcr, s = scr - bucket = mbs*(pcr-scr)/pcr; - if (bucket*pcr != mbs*(pcr-scr)) - bucket += 1; - if (bucket > BUCKET_MAX_SIZE) { - PRINTD (DBG_QOS, "shrinking bucket from %u to %u", - bucket, BUCKET_MAX_SIZE); - bucket = BUCKET_MAX_SIZE; - } - vcc.tx_xbr_bits = VBR_RATE_TYPE; - vcc.tx_bucket_bits = bucket; - break; - } -#endif - default: { - PRINTD (DBG_QOS, "unsupported TX traffic class"); - return -EINVAL; - } - } - } - - // RX traffic parameters - - PRINTD (DBG_QOS, "RX:"); - - rxtp = &qos->rxtp; - - // set up defaults for no traffic - vcc.rx_rate = 0; - - if (rxtp->traffic_class != ATM_NONE) { - error = check_max_sdu (vcc.aal, rxtp, max_rx_size); - if (error) { - PRINTD (DBG_QOS, "RX max_sdu check failed"); - return error; - } - switch (rxtp->traffic_class) { - case ATM_UBR: { - // not reserved - break; - } -#if 0 - case ATM_ABR: { - // reserve min - vcc.rx_rate = 0; // ? - break; - } -#endif - case ATM_CBR: { - int pcr = atm_pcr_goal (rxtp); - if (!pcr) { - // slight race (no locking) here so we may get -EAGAIN - // later; the greedy bastards would deserve it :) - PRINTD (DBG_QOS, "snatching all remaining RX bandwidth"); - pcr = dev->rx_avail; - } else if (pcr < 0) { - pcr = -pcr; - } - vcc.rx_rate = pcr; - // not really clear what further checking is needed - error = atm_pcr_check (rxtp, vcc.rx_rate); - if (error) { - PRINTD (DBG_QOS, "RX PCR failed consistency check"); - return error; - } - break; - } -#if 0 - case ATM_VBR: { - // int scr = atm_scr_goal (rxtp); - int scr = 1<<16; // just for fun - if (!scr) { - // slight race (no locking) here so we may get -EAGAIN - // later; the greedy bastards would deserve it :) - PRINTD (DBG_QOS, "snatching all remaining RX bandwidth"); - scr = dev->rx_avail; - } else if (scr < 0) { - scr = -scr; - } - vcc.rx_rate = scr; - // not really clear what further checking is needed - // error = atm_scr_check (rxtp, vcc.rx_rate); - if (error) { - PRINTD (DBG_QOS, "RX SCR failed consistency check"); - return error; - } - break; - } -#endif - default: { - PRINTD (DBG_QOS, "unsupported RX traffic class"); - return -EINVAL; - } - } - } - - - // late abort useful for diagnostics - if (vcc.aal != aal5) { - PRINTD (DBG_QOS, "AAL not supported"); - return -EINVAL; - } - - // get space for our vcc stuff and copy parameters into it - vccp = kmalloc (sizeof(hrz_vcc), GFP_KERNEL); - if (!vccp) { - PRINTK (KERN_ERR, "out of memory!"); - return -ENOMEM; - } - *vccp = vcc; - - // clear error and grab cell rate resource lock - error = 0; - spin_lock (&dev->rate_lock); - - if (vcc.tx_rate > dev->tx_avail) { - PRINTD (DBG_QOS, "not enough TX PCR left"); - error = -EAGAIN; - } - - if (vcc.rx_rate > dev->rx_avail) { - PRINTD (DBG_QOS, "not enough RX PCR left"); - error = -EAGAIN; - } - - if (!error) { - // really consume cell rates - dev->tx_avail -= vcc.tx_rate; - dev->rx_avail -= vcc.rx_rate; - PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR", - vcc.tx_rate, vcc.rx_rate); - } - - // release lock and exit on error - spin_unlock (&dev->rate_lock); - if (error) { - PRINTD (DBG_QOS|DBG_VCC, "insufficient cell rate resources"); - kfree (vccp); - return error; - } - - // this is "immediately before allocating the connection identifier - // in hardware" - so long as the next call does not fail :) - set_bit(ATM_VF_ADDR,&atm_vcc->flags); - - // any errors here are very serious and should never occur - - if (rxtp->traffic_class != ATM_NONE) { - if (dev->rxer[channel]) { - PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX"); - error = -EBUSY; - } - if (!error) - error = hrz_open_rx (dev, channel); - if (error) { - kfree (vccp); - return error; - } - // this link allows RX frames through - dev->rxer[channel] = atm_vcc; - } - - // success, set elements of atm_vcc - atm_vcc->dev_data = (void *) vccp; - - // indicate readiness - set_bit(ATM_VF_READY,&atm_vcc->flags); - - return 0; -} - -/********** close VC **********/ - -static void hrz_close (struct atm_vcc * atm_vcc) { - hrz_dev * dev = HRZ_DEV(atm_vcc->dev); - hrz_vcc * vcc = HRZ_VCC(atm_vcc); - u16 channel = vcc->channel; - PRINTD (DBG_VCC|DBG_FLOW, "hrz_close"); - - // indicate unreadiness - clear_bit(ATM_VF_READY,&atm_vcc->flags); - - if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { - unsigned int i; - - // let any TX on this channel that has started complete - // no restart, just keep trying - while (tx_hold (dev)) - ; - // remove record of any tx_channel having been setup for this channel - for (i = 0; i < TX_CHANS; ++i) - if (dev->tx_channel_record[i] == channel) { - dev->tx_channel_record[i] = -1; - break; - } - if (dev->last_vc == channel) - dev->tx_last = -1; - tx_release (dev); - } - - if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { - // disable RXing - it tries quite hard - hrz_close_rx (dev, channel); - // forget the vcc - no more skbs will be pushed - if (atm_vcc != dev->rxer[channel]) - PRINTK (KERN_ERR, "%s atm_vcc=%p rxer[channel]=%p", - "arghhh! we're going to die!", - atm_vcc, dev->rxer[channel]); - dev->rxer[channel] = NULL; - } - - // atomically release our rate reservation - spin_lock (&dev->rate_lock); - PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR", - vcc->tx_rate, vcc->rx_rate); - dev->tx_avail += vcc->tx_rate; - dev->rx_avail += vcc->rx_rate; - spin_unlock (&dev->rate_lock); - - // free our structure - kfree (vcc); - // say the VPI/VCI is free again - clear_bit(ATM_VF_ADDR,&atm_vcc->flags); -} - -#if 0 -static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) { - hrz_dev * dev = HRZ_DEV(atm_dev); - PRINTD (DBG_FLOW, "hrz_ioctl"); - return -1; -} - -unsigned char hrz_phy_get (struct atm_dev * atm_dev, unsigned long addr) { - hrz_dev * dev = HRZ_DEV(atm_dev); - PRINTD (DBG_FLOW, "hrz_phy_get"); - return 0; -} - -static void hrz_phy_put (struct atm_dev * atm_dev, unsigned char value, - unsigned long addr) { - hrz_dev * dev = HRZ_DEV(atm_dev); - PRINTD (DBG_FLOW, "hrz_phy_put"); -} - -static int hrz_change_qos (struct atm_vcc * atm_vcc, struct atm_qos *qos, int flgs) { - hrz_dev * dev = HRZ_DEV(vcc->dev); - PRINTD (DBG_FLOW, "hrz_change_qos"); - return -1; -} -#endif - -/********** proc file contents **********/ - -static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) { - hrz_dev * dev = HRZ_DEV(atm_dev); - int left = *pos; - PRINTD (DBG_FLOW, "hrz_proc_read"); - - /* more diagnostics here? */ - -#if 0 - if (!left--) { - unsigned int count = sprintf (page, "vbr buckets:"); - unsigned int i; - for (i = 0; i < TX_CHANS; ++i) - count += sprintf (page, " %u/%u", - query_tx_channel_config (dev, i, BUCKET_FULLNESS_ACCESS), - query_tx_channel_config (dev, i, BUCKET_CAPACITY_ACCESS)); - count += sprintf (page+count, ".\n"); - return count; - } -#endif - - if (!left--) - return sprintf (page, - "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n", - dev->tx_cell_count, dev->rx_cell_count, - dev->hec_error_count, dev->unassigned_cell_count); - - if (!left--) - return sprintf (page, - "free cell buffers: TX %hu, RX %hu+%hu.\n", - rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF), - rd_regw (dev, RX_FREE_BUFFER_COUNT_OFF), - dev->noof_spare_buffers); - - if (!left--) - return sprintf (page, - "cps remaining: TX %u, RX %u\n", - dev->tx_avail, dev->rx_avail); - - return 0; -} - -static const struct atmdev_ops hrz_ops = { - .open = hrz_open, - .close = hrz_close, - .send = hrz_send, - .proc_read = hrz_proc_read, - .owner = THIS_MODULE, -}; - -static int hrz_probe(struct pci_dev *pci_dev, - const struct pci_device_id *pci_ent) -{ - hrz_dev * dev; - int err = 0; - - // adapter slot free, read resources from PCI configuration space - u32 iobase = pci_resource_start (pci_dev, 0); - u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1)); - unsigned int irq; - unsigned char lat; - - PRINTD (DBG_FLOW, "hrz_probe"); - - if (pci_enable_device(pci_dev)) - return -EINVAL; - - /* XXX DEV_LABEL is a guess */ - if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) { - err = -EINVAL; - goto out_disable; - } - - dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL); - if (!dev) { - // perhaps we should be nice: deregister all adapters and abort? - PRINTD(DBG_ERR, "out of memory"); - err = -ENOMEM; - goto out_release; - } - - pci_set_drvdata(pci_dev, dev); - - // grab IRQ and install handler - move this someplace more sensible - irq = pci_dev->irq; - if (request_irq(irq, - interrupt_handler, - IRQF_SHARED, /* irqflags guess */ - DEV_LABEL, /* name guess */ - dev)) { - PRINTD(DBG_WARN, "request IRQ failed!"); - err = -EINVAL; - goto out_free; - } - - PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p", - iobase, irq, membase); - - dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1, - NULL); - if (!(dev->atm_dev)) { - PRINTD(DBG_ERR, "failed to register Madge ATM adapter"); - err = -EINVAL; - goto out_free_irq; - } - - PRINTD(DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p", - dev->atm_dev->number, dev, dev->atm_dev); - dev->atm_dev->dev_data = (void *) dev; - dev->pci_dev = pci_dev; - - // enable bus master accesses - pci_set_master(pci_dev); - - // frobnicate latency (upwards, usually) - pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat); - if (pci_lat) { - PRINTD(DBG_INFO, "%s PCI latency timer from %hu to %hu", - "changing", lat, pci_lat); - pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat); - } else if (lat < MIN_PCI_LATENCY) { - PRINTK(KERN_INFO, "%s PCI latency timer from %hu to %hu", - "increasing", lat, MIN_PCI_LATENCY); - pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY); - } - - dev->iobase = iobase; - dev->irq = irq; - dev->membase = membase; - - dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0]; - dev->rx_q_wrap = &memmap->rx_q_entries[RX_CHANS-1]; - - // these next three are performance hacks - dev->last_vc = -1; - dev->tx_last = -1; - dev->tx_idle = 0; - - dev->tx_regions = 0; - dev->tx_bytes = 0; - dev->tx_skb = NULL; - dev->tx_iovec = NULL; - - dev->tx_cell_count = 0; - dev->rx_cell_count = 0; - dev->hec_error_count = 0; - dev->unassigned_cell_count = 0; - - dev->noof_spare_buffers = 0; - - { - unsigned int i; - for (i = 0; i < TX_CHANS; ++i) - dev->tx_channel_record[i] = -1; - } - - dev->flags = 0; - - // Allocate cell rates and remember ASIC version - // Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53 - // Copper: (WRONG) we want 6 into the above, close to 25Mb/s - // Copper: (plagarise!) 25600000/8/270*260/53 - n/53 - - if (hrz_init(dev)) { - // to be really pedantic, this should be ATM_OC3c_PCR - dev->tx_avail = ATM_OC3_PCR; - dev->rx_avail = ATM_OC3_PCR; - set_bit(ultra, &dev->flags); // NOT "|= ultra" ! - } else { - dev->tx_avail = ((25600000/8)*26)/(27*53); - dev->rx_avail = ((25600000/8)*26)/(27*53); - PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering."); - } - - // rate changes spinlock - spin_lock_init(&dev->rate_lock); - - // on-board memory access spinlock; we want atomic reads and - // writes to adapter memory (handles IRQ and SMP) - spin_lock_init(&dev->mem_lock); - - init_waitqueue_head(&dev->tx_queue); - - // vpi in 0..4, vci in 6..10 - dev->atm_dev->ci_range.vpi_bits = vpi_bits; - dev->atm_dev->ci_range.vci_bits = 10-vpi_bits; - - timer_setup(&dev->housekeeping, do_housekeeping, 0); - mod_timer(&dev->housekeeping, jiffies); - -out: - return err; - -out_free_irq: - free_irq(irq, dev); -out_free: - kfree(dev); -out_release: - release_region(iobase, HRZ_IO_EXTENT); -out_disable: - pci_disable_device(pci_dev); - goto out; -} - -static void hrz_remove_one(struct pci_dev *pci_dev) -{ - hrz_dev *dev; - - dev = pci_get_drvdata(pci_dev); - - PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev); - del_timer_sync(&dev->housekeeping); - hrz_reset(dev); - atm_dev_deregister(dev->atm_dev); - free_irq(dev->irq, dev); - release_region(dev->iobase, HRZ_IO_EXTENT); - kfree(dev); - - pci_disable_device(pci_dev); -} - -static void __init hrz_check_args (void) { -#ifdef DEBUG_HORIZON - PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK); -#else - if (debug) - PRINTK (KERN_NOTICE, "no debug support in this image"); -#endif - - if (vpi_bits > HRZ_MAX_VPI) - PRINTK (KERN_ERR, "vpi_bits has been limited to %hu", - vpi_bits = HRZ_MAX_VPI); - - if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT) - PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu", - max_tx_size = TX_AAL5_LIMIT); - - if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT) - PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu", - max_rx_size = RX_AAL5_LIMIT); - - return; -} - -MODULE_AUTHOR(maintainer_string); -MODULE_DESCRIPTION(description_string); -MODULE_LICENSE("GPL"); -module_param(debug, ushort, 0644); -module_param(vpi_bits, ushort, 0); -module_param(max_tx_size, int, 0); -module_param(max_rx_size, int, 0); -module_param(pci_lat, byte, 0); -MODULE_PARM_DESC(debug, "debug bitmap, see .h file"); -MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs"); -MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames"); -MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames"); -MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); - -static const struct pci_device_id hrz_pci_tbl[] = { - { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID, - 0, 0, 0 }, - { 0, } -}; - -MODULE_DEVICE_TABLE(pci, hrz_pci_tbl); - -static struct pci_driver hrz_driver = { - .name = "horizon", - .probe = hrz_probe, - .remove = hrz_remove_one, - .id_table = hrz_pci_tbl, -}; - -/********** module entry **********/ - -static int __init hrz_module_init (void) { - BUILD_BUG_ON(sizeof(struct MEMMAP) != 128*1024/4); - - show_version(); - - // check arguments - hrz_check_args(); - - // get the juice - return pci_register_driver(&hrz_driver); -} - -/********** module exit **********/ - -static void __exit hrz_module_exit (void) { - PRINTD (DBG_FLOW, "cleanup_module"); - - pci_unregister_driver(&hrz_driver); -} - -module_init(hrz_module_init); -module_exit(hrz_module_exit); diff --git a/drivers/atm/horizon.h b/drivers/atm/horizon.h deleted file mode 100644 index 7523eba19bad..000000000000 --- a/drivers/atm/horizon.h +++ /dev/null @@ -1,492 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - Madge Horizon ATM Adapter driver. - Copyright (C) 1995-1999 Madge Networks Ltd. - -*/ - -/* - IMPORTANT NOTE: Madge Networks no longer makes the adapters - supported by this driver and makes no commitment to maintain it. -*/ - -/* too many macros - change to inline functions */ - -#ifndef DRIVER_ATM_HORIZON_H -#define DRIVER_ATM_HORIZON_H - - -#ifdef CONFIG_ATM_HORIZON_DEBUG -#define DEBUG_HORIZON -#endif - -#define DEV_LABEL "hrz" - -#ifndef PCI_VENDOR_ID_MADGE -#define PCI_VENDOR_ID_MADGE 0x10B6 -#endif -#ifndef PCI_DEVICE_ID_MADGE_HORIZON -#define PCI_DEVICE_ID_MADGE_HORIZON 0x1000 -#endif - -// diagnostic output - -#define PRINTK(severity,format,args...) \ - printk(severity DEV_LABEL ": " format "\n" , ## args) - -#ifdef DEBUG_HORIZON - -#define DBG_ERR 0x0001 -#define DBG_WARN 0x0002 -#define DBG_INFO 0x0004 -#define DBG_VCC 0x0008 -#define DBG_QOS 0x0010 -#define DBG_TX 0x0020 -#define DBG_RX 0x0040 -#define DBG_SKB 0x0080 -#define DBG_IRQ 0x0100 -#define DBG_FLOW 0x0200 -#define DBG_BUS 0x0400 -#define DBG_REGS 0x0800 -#define DBG_DATA 0x1000 -#define DBG_MASK 0x1fff - -/* the ## prevents the annoying double expansion of the macro arguments */ -/* KERN_INFO is used since KERN_DEBUG often does not make it to the console */ -#define PRINTDB(bits,format,args...) \ - ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format , ## args) : 1 ) -#define PRINTDM(bits,format,args...) \ - ( (debug & (bits)) ? printk (format , ## args) : 1 ) -#define PRINTDE(bits,format,args...) \ - ( (debug & (bits)) ? printk (format "\n" , ## args) : 1 ) -#define PRINTD(bits,format,args...) \ - ( (debug & (bits)) ? printk (KERN_INFO DEV_LABEL ": " format "\n" , ## args) : 1 ) - -#else - -#define PRINTD(bits,format,args...) -#define PRINTDB(bits,format,args...) -#define PRINTDM(bits,format,args...) -#define PRINTDE(bits,format,args...) - -#endif - -#define PRINTDD(sec,fmt,args...) -#define PRINTDDB(sec,fmt,args...) -#define PRINTDDM(sec,fmt,args...) -#define PRINTDDE(sec,fmt,args...) - -// fixed constants - -#define SPARE_BUFFER_POOL_SIZE MAX_VCS -#define HRZ_MAX_VPI 4 -#define MIN_PCI_LATENCY 48 // 24 IS TOO SMALL - -/* Horizon specific bits */ -/* Register offsets */ - -#define HRZ_IO_EXTENT 0x80 - -#define DATA_PORT_OFF 0x00 -#define TX_CHANNEL_PORT_OFF 0x04 -#define TX_DESCRIPTOR_PORT_OFF 0x08 -#define MEMORY_PORT_OFF 0x0C -#define MEM_WR_ADDR_REG_OFF 0x14 -#define MEM_RD_ADDR_REG_OFF 0x18 -#define CONTROL_0_REG 0x1C -#define INT_SOURCE_REG_OFF 0x20 -#define INT_ENABLE_REG_OFF 0x24 -#define MASTER_RX_ADDR_REG_OFF 0x28 -#define MASTER_RX_COUNT_REG_OFF 0x2C -#define MASTER_TX_ADDR_REG_OFF 0x30 -#define MASTER_TX_COUNT_REG_OFF 0x34 -#define TX_DESCRIPTOR_REG_OFF 0x38 -#define TX_CHANNEL_CONFIG_COMMAND_OFF 0x40 -#define TX_CHANNEL_CONFIG_DATA_OFF 0x44 -#define TX_FREE_BUFFER_COUNT_OFF 0x48 -#define RX_FREE_BUFFER_COUNT_OFF 0x4C -#define TX_CONFIG_OFF 0x50 -#define TX_STATUS_OFF 0x54 -#define RX_CONFIG_OFF 0x58 -#define RX_LINE_CONFIG_OFF 0x5C -#define RX_QUEUE_RD_PTR_OFF 0x60 -#define RX_QUEUE_WR_PTR_OFF 0x64 -#define MAX_AAL5_CELL_COUNT_OFF 0x68 -#define RX_CHANNEL_PORT_OFF 0x6C -#define TX_CELL_COUNT_OFF 0x70 -#define RX_CELL_COUNT_OFF 0x74 -#define HEC_ERROR_COUNT_OFF 0x78 -#define UNASSIGNED_CELL_COUNT_OFF 0x7C - -/* Register bit definitions */ - -/* Control 0 register */ - -#define SEEPROM_DO 0x00000001 -#define SEEPROM_DI 0x00000002 -#define SEEPROM_SK 0x00000004 -#define SEEPROM_CS 0x00000008 -#define DEBUG_BIT_0 0x00000010 -#define DEBUG_BIT_1 0x00000020 -#define DEBUG_BIT_2 0x00000040 -// RESERVED 0x00000080 -#define DEBUG_BIT_0_OE 0x00000100 -#define DEBUG_BIT_1_OE 0x00000200 -#define DEBUG_BIT_2_OE 0x00000400 -// RESERVED 0x00000800 -#define DEBUG_BIT_0_STATE 0x00001000 -#define DEBUG_BIT_1_STATE 0x00002000 -#define DEBUG_BIT_2_STATE 0x00004000 -// RESERVED 0x00008000 -#define GENERAL_BIT_0 0x00010000 -#define GENERAL_BIT_1 0x00020000 -#define GENERAL_BIT_2 0x00040000 -#define GENERAL_BIT_3 0x00080000 -#define RESET_HORIZON 0x00100000 -#define RESET_ATM 0x00200000 -#define RESET_RX 0x00400000 -#define RESET_TX 0x00800000 -#define RESET_HOST 0x01000000 -// RESERVED 0x02000000 -#define TARGET_RETRY_DISABLE 0x04000000 -#define ATM_LAYER_SELECT 0x08000000 -#define ATM_LAYER_STATUS 0x10000000 -// RESERVED 0xE0000000 - -/* Interrupt source and enable registers */ - -#define RX_DATA_AV 0x00000001 -#define RX_DISABLED 0x00000002 -#define TIMING_MARKER 0x00000004 -#define FORCED 0x00000008 -#define RX_BUS_MASTER_COMPLETE 0x00000010 -#define TX_BUS_MASTER_COMPLETE 0x00000020 -#define ABR_TX_CELL_COUNT_INT 0x00000040 -#define DEBUG_INT 0x00000080 -// RESERVED 0xFFFFFF00 - -/* PIO and Bus Mastering */ - -#define MAX_PIO_COUNT 0x000000ff // 255 - make tunable? -// 8188 is a hard limit for bus mastering -#define MAX_TRANSFER_COUNT 0x00001ffc // 8188 -#define MASTER_TX_AUTO_APPEND_DESC 0x80000000 - -/* TX channel config command port */ - -#define PCR_TIMER_ACCESS 0x0000 -#define SCR_TIMER_ACCESS 0x0001 -#define BUCKET_CAPACITY_ACCESS 0x0002 -#define BUCKET_FULLNESS_ACCESS 0x0003 -#define RATE_TYPE_ACCESS 0x0004 -// UNUSED 0x00F8 -#define TX_CHANNEL_CONFIG_MULT 0x0100 -// UNUSED 0xF800 -#define BUCKET_MAX_SIZE 0x003f - -/* TX channel config data port */ - -#define CLOCK_SELECT_SHIFT 4 -#define CLOCK_DISABLE 0x00ff - -#define IDLE_RATE_TYPE 0x0 -#define ABR_RATE_TYPE 0x1 -#define VBR_RATE_TYPE 0x2 -#define CBR_RATE_TYPE 0x3 - -/* TX config register */ - -#define DRVR_DRVRBAR_ENABLE 0x0001 -#define TXCLK_MUX_SELECT_RCLK 0x0002 -#define TRANSMIT_TIMING_MARKER 0x0004 -#define LOOPBACK_TIMING_MARKER 0x0008 -#define TX_TEST_MODE_16MHz 0x0000 -#define TX_TEST_MODE_8MHz 0x0010 -#define TX_TEST_MODE_5_33MHz 0x0020 -#define TX_TEST_MODE_4MHz 0x0030 -#define TX_TEST_MODE_3_2MHz 0x0040 -#define TX_TEST_MODE_2_66MHz 0x0050 -#define TX_TEST_MODE_2_29MHz 0x0060 -#define TX_NORMAL_OPERATION 0x0070 -#define ABR_ROUND_ROBIN 0x0080 - -/* TX status register */ - -#define IDLE_CHANNELS_MASK 0x00FF -#define ABR_CELL_COUNT_REACHED_MULT 0x0100 -#define ABR_CELL_COUNT_REACHED_MASK 0xFF - -/* RX config register */ - -#define NON_USER_CELLS_IN_ONE_CHANNEL 0x0008 -#define RX_ENABLE 0x0010 -#define IGNORE_UNUSED_VPI_VCI_BITS_SET 0x0000 -#define NON_USER_UNUSED_VPI_VCI_BITS_SET 0x0020 -#define DISCARD_UNUSED_VPI_VCI_BITS_SET 0x0040 - -/* RX line config register */ - -#define SIGNAL_LOSS 0x0001 -#define FREQUENCY_DETECT_ERROR 0x0002 -#define LOCK_DETECT_ERROR 0x0004 -#define SELECT_INTERNAL_LOOPBACK 0x0008 -#define LOCK_DETECT_ENABLE 0x0010 -#define FREQUENCY_DETECT_ENABLE 0x0020 -#define USER_FRAQ 0x0040 -#define GXTALOUT_SELECT_DIV4 0x0080 -#define GXTALOUT_SELECT_NO_GATING 0x0100 -#define TIMING_MARKER_RECEIVED 0x0200 - -/* RX channel port */ - -#define RX_CHANNEL_MASK 0x03FF -// UNUSED 0x3C00 -#define FLUSH_CHANNEL 0x4000 -#define RX_CHANNEL_UPDATE_IN_PROGRESS 0x8000 - -/* Receive queue entry */ - -#define RX_Q_ENTRY_LENGTH_MASK 0x0000FFFF -#define RX_Q_ENTRY_CHANNEL_SHIFT 16 -#define SIMONS_DODGEY_MARKER 0x08000000 -#define RX_CONGESTION_EXPERIENCED 0x10000000 -#define RX_CRC_10_OK 0x20000000 -#define RX_CRC_32_OK 0x40000000 -#define RX_COMPLETE_FRAME 0x80000000 - -/* Offsets and constants for use with the buffer memory */ - -/* Buffer pointers and channel types */ - -#define BUFFER_PTR_MASK 0x0000FFFF -#define RX_INT_THRESHOLD_MULT 0x00010000 -#define RX_INT_THRESHOLD_MASK 0x07FF -#define INT_EVERY_N_CELLS 0x08000000 -#define CONGESTION_EXPERIENCED 0x10000000 -#define FIRST_CELL_OF_AAL5_FRAME 0x20000000 -#define CHANNEL_TYPE_AAL5 0x00000000 -#define CHANNEL_TYPE_RAW_CELLS 0x40000000 -#define CHANNEL_TYPE_AAL3_4 0x80000000 - -/* Buffer status stuff */ - -#define BUFF_STATUS_MASK 0x00030000 -#define BUFF_STATUS_EMPTY 0x00000000 -#define BUFF_STATUS_CELL_AV 0x00010000 -#define BUFF_STATUS_LAST_CELL_AV 0x00020000 - -/* Transmit channel stuff */ - -/* Receive channel stuff */ - -#define RX_CHANNEL_DISABLED 0x00000000 -#define RX_CHANNEL_IDLE 0x00000001 - -/* General things */ - -#define INITIAL_CRC 0xFFFFFFFF - -// A Horizon u32, a byte! Really nasty. Horizon pointers are (32 bit) -// word addresses and so standard C pointer operations break (as they -// assume byte addresses); so we pretend that Horizon words (and word -// pointers) are bytes (and byte pointers) for the purposes of having -// a memory map that works. - -typedef u8 HDW; - -typedef struct cell_buf { - HDW payload[12]; - HDW next; - HDW cell_count; // AAL5 rx bufs - HDW res; - union { - HDW partial_crc; // AAL5 rx bufs - HDW cell_header; // RAW bufs - } u; -} cell_buf; - -typedef struct tx_ch_desc { - HDW rd_buf_type; - HDW wr_buf_type; - HDW partial_crc; - HDW cell_header; -} tx_ch_desc; - -typedef struct rx_ch_desc { - HDW wr_buf_type; - HDW rd_buf_type; -} rx_ch_desc; - -typedef struct rx_q_entry { - HDW entry; -} rx_q_entry; - -#define TX_CHANS 8 -#define RX_CHANS 1024 -#define RX_QS 1024 -#define MAX_VCS RX_CHANS - -/* Horizon buffer memory map */ - -// TX Channel Descriptors 2 -// TX Initial Buffers 8 // TX_CHANS -#define BUFN1_SIZE 118 // (126 - TX_CHANS) -// RX/TX Start/End Buffers 4 -#define BUFN2_SIZE 124 -// RX Queue Entries 64 -#define BUFN3_SIZE 192 -// RX Channel Descriptors 128 -#define BUFN4_SIZE 1408 -// TOTAL cell_buff chunks 2048 - -// cell_buf bufs[2048]; -// HDW dws[32768]; - -typedef struct MEMMAP { - tx_ch_desc tx_descs[TX_CHANS]; // 8 * 4 = 32 , 0x0020 - cell_buf inittxbufs[TX_CHANS]; // these are really - cell_buf bufn1[BUFN1_SIZE]; // part of this pool - cell_buf txfreebufstart; - cell_buf txfreebufend; - cell_buf rxfreebufstart; - cell_buf rxfreebufend; // 8+118+1+1+1+1+124 = 254 - cell_buf bufn2[BUFN2_SIZE]; // 16 * 254 = 4064 , 0x1000 - rx_q_entry rx_q_entries[RX_QS]; // 1 * 1024 = 1024 , 0x1400 - cell_buf bufn3[BUFN3_SIZE]; // 16 * 192 = 3072 , 0x2000 - rx_ch_desc rx_descs[MAX_VCS]; // 2 * 1024 = 2048 , 0x2800 - cell_buf bufn4[BUFN4_SIZE]; // 16 * 1408 = 22528 , 0x8000 -} MEMMAP; - -#define memmap ((MEMMAP *)0) - -/* end horizon specific bits */ - -typedef enum { - aal0, - aal34, - aal5 -} hrz_aal; - -typedef enum { - tx_busy, - rx_busy, - ultra -} hrz_flags; - -// a single struct pointed to by atm_vcc->dev_data - -typedef struct { - unsigned int tx_rate; - unsigned int rx_rate; - u16 channel; - u16 tx_xbr_bits; - u16 tx_pcr_bits; -#if 0 - u16 tx_scr_bits; - u16 tx_bucket_bits; -#endif - hrz_aal aal; -} hrz_vcc; - -struct hrz_dev { - - u32 iobase; - u32 * membase; - - struct sk_buff * rx_skb; // skb being RXed - unsigned int rx_bytes; // bytes remaining to RX within region - void * rx_addr; // addr to send bytes to (for PIO) - unsigned int rx_channel; // channel that the skb is going out on - - struct sk_buff * tx_skb; // skb being TXed - unsigned int tx_bytes; // bytes remaining to TX within region - void * tx_addr; // addr to send bytes from (for PIO) - struct iovec * tx_iovec; // remaining regions - unsigned int tx_regions; // number of remaining regions - - spinlock_t mem_lock; - wait_queue_head_t tx_queue; - - u8 irq; - unsigned long flags; - u8 tx_last; - u8 tx_idle; - - rx_q_entry * rx_q_reset; - rx_q_entry * rx_q_entry; - rx_q_entry * rx_q_wrap; - - struct atm_dev * atm_dev; - - u32 last_vc; - - int noof_spare_buffers; - u16 spare_buffers[SPARE_BUFFER_POOL_SIZE]; - - u16 tx_channel_record[TX_CHANS]; - - // this is what we follow when we get incoming data - u32 txer[MAX_VCS/32]; - struct atm_vcc * rxer[MAX_VCS]; - - // cell rate allocation - spinlock_t rate_lock; - unsigned int rx_avail; - unsigned int tx_avail; - - // dev stats - unsigned long tx_cell_count; - unsigned long rx_cell_count; - unsigned long hec_error_count; - unsigned long unassigned_cell_count; - - struct pci_dev * pci_dev; - struct timer_list housekeeping; -}; - -typedef struct hrz_dev hrz_dev; - -/* macros for use later */ - -#define BUF_PTR(cbptr) ((cbptr) - (cell_buf *) 0) - -#define INTERESTING_INTERRUPTS \ - (RX_DATA_AV | RX_DISABLED | TX_BUS_MASTER_COMPLETE | RX_BUS_MASTER_COMPLETE) - -// 190 cells by default (192 TX buffers - 2 elbow room, see docs) -#define TX_AAL5_LIMIT (190*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER) // 9112 - -// Have enough RX buffers (unless we allow other buffer splits) -#define RX_AAL5_LIMIT ATM_MAX_AAL5_PDU - -/* multi-statement macro protector */ -#define DW(x) do{ x } while(0) - -#define HRZ_DEV(atm_dev) ((hrz_dev *) (atm_dev)->dev_data) -#define HRZ_VCC(atm_vcc) ((hrz_vcc *) (atm_vcc)->dev_data) - -/* Turn the LEDs on and off */ -// The LEDs bits are upside down in that setting the bit in the debug -// register will turn the appropriate LED off. - -#define YELLOW_LED DEBUG_BIT_0 -#define GREEN_LED DEBUG_BIT_1 -#define YELLOW_LED_OE DEBUG_BIT_0_OE -#define GREEN_LED_OE DEBUG_BIT_1_OE - -#define GREEN_LED_OFF(dev) \ - wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) | GREEN_LED) -#define GREEN_LED_ON(dev) \ - wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) &~ GREEN_LED) -#define YELLOW_LED_OFF(dev) \ - wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) | YELLOW_LED) -#define YELLOW_LED_ON(dev) \ - wr_regl (dev, CONTROL_0_REG, rd_regl (dev, CONTROL_0_REG) &~ YELLOW_LED) - -typedef enum { - round_up, - round_down, - round_nearest -} rounding; - -#endif /* DRIVER_ATM_HORIZON_H */ -- cgit v1.2.3 From 052e1f01bfae8be6f31b61ed3a2356edfca855dc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 Apr 2022 10:54:33 -0700 Subject: net: atm: remove support for ZeitNet ZN122x ATM devices This driver received nothing but automated fixes in the last 15 years. Since it's using virt_to_bus it's unlikely to be used on any modern platform. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- arch/mips/configs/gpr_defconfig | 1 - arch/mips/configs/mtx1_defconfig | 1 - drivers/atm/Kconfig | 20 - drivers/atm/Makefile | 1 - drivers/atm/uPD98401.h | 293 ------- drivers/atm/uPD98402.c | 266 ------ drivers/atm/uPD98402.h | 107 --- drivers/atm/zatm.c | 1652 -------------------------------------- drivers/atm/zatm.h | 104 --- include/uapi/linux/atm_zatm.h | 47 -- 10 files changed, 2492 deletions(-) delete mode 100644 drivers/atm/uPD98401.h delete mode 100644 drivers/atm/uPD98402.c delete mode 100644 drivers/atm/uPD98402.h delete mode 100644 drivers/atm/zatm.c delete mode 100644 drivers/atm/zatm.h delete mode 100644 include/uapi/linux/atm_zatm.h (limited to 'drivers') diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 7ed202db9ef0..d82f4ebf687f 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -178,7 +178,6 @@ CONFIG_NETCONSOLE=m CONFIG_ATM_TCP=m CONFIG_ATM_LANAI=m CONFIG_ATM_ENI=m -CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m CONFIG_ATM_IA=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index f46ad2e294fa..0cb4d9aa14d1 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -255,7 +255,6 @@ CONFIG_ARCNET_COM20020_CS=m CONFIG_ATM_TCP=m CONFIG_ATM_LANAI=m CONFIG_ATM_ENI=m -CONFIG_ATM_ZATM=m CONFIG_ATM_NICSTAR=m CONFIG_ATM_IDT77252=m CONFIG_ATM_IA=m diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 9c778308722a..63cdb46a3439 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -146,26 +146,6 @@ config ATM_ENI_BURST_RX_2W try this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or 8W are also set may or may not improve throughput. -config ATM_ZATM - tristate "ZeitNet ZN1221/ZN1225" - depends on PCI && VIRT_TO_BUS - help - Driver for the ZeitNet ZN1221 (MMF) and ZN1225 (UTP-5) 155 Mbps ATM - adapters. - - To compile this driver as a module, choose M here: the module will - be called zatm. - -config ATM_ZATM_DEBUG - bool "Enable extended debugging" - depends on ATM_ZATM - help - Extended debugging records various events and displays that list - when an inconsistency is detected. This mechanism is faster than - generally using printks, but still has some impact on performance. - Note that extended debugging may create certain race conditions - itself. Enable this ONLY if you suspect problems with the driver. - config ATM_NICSTAR tristate "IDT 77201 (NICStAR) (ForeRunnerLE)" depends on PCI diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile index 1b6a8ddaf007..c9eade92019b 100644 --- a/drivers/atm/Makefile +++ b/drivers/atm/Makefile @@ -5,7 +5,6 @@ fore_200e-y := fore200e.o -obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o obj-$(CONFIG_ATM_NICSTAR) += nicstar.o obj-$(CONFIG_ATM_IA) += iphase.o suni.o obj-$(CONFIG_ATM_FORE200E) += fore_200e.o diff --git a/drivers/atm/uPD98401.h b/drivers/atm/uPD98401.h deleted file mode 100644 index f766a5ef0c5d..000000000000 --- a/drivers/atm/uPD98401.h +++ /dev/null @@ -1,293 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* drivers/atm/uPD98401.h - NEC uPD98401 (SAR) declarations */ - -/* Written 1995 by Werner Almesberger, EPFL LRC */ - - -#ifndef DRIVERS_ATM_uPD98401_H -#define DRIVERS_ATM_uPD98401_H - - -#define MAX_CRAM_SIZE (1 << 18) /* 2^18 words */ -#define RAM_INCREMENT 1024 /* check in 4 kB increments */ - -#define uPD98401_PORTS 0x24 /* probably more ? */ - - -/* - * Commands - */ - -#define uPD98401_OPEN_CHAN 0x20000000 /* open channel */ -#define uPD98401_CHAN_ADDR 0x0003fff8 /* channel address */ -#define uPD98401_CHAN_ADDR_SHIFT 3 -#define uPD98401_CLOSE_CHAN 0x24000000 /* close channel */ -#define uPD98401_CHAN_RT 0x02000000 /* RX/TX (0 TX, 1 RX) */ -#define uPD98401_DEACT_CHAN 0x28000000 /* deactivate channel */ -#define uPD98401_TX_READY 0x30000000 /* TX ready */ -#define uPD98401_ADD_BAT 0x34000000 /* add batches */ -#define uPD98401_POOL 0x000f0000 /* pool number */ -#define uPD98401_POOL_SHIFT 16 -#define uPD98401_POOL_NUMBAT 0x0000ffff /* number of batches */ -#define uPD98401_NOP 0x3f000000 /* NOP */ -#define uPD98401_IND_ACC 0x00000000 /* Indirect Access */ -#define uPD98401_IA_RW 0x10000000 /* Read/Write (0 W, 1 R) */ -#define uPD98401_IA_B3 0x08000000 /* Byte select, 1 enable */ -#define uPD98401_IA_B2 0x04000000 -#define uPD98401_IA_B1 0x02000000 -#define uPD98401_IA_B0 0x01000000 -#define uPD98401_IA_BALL 0x0f000000 /* whole longword */ -#define uPD98401_IA_TGT 0x000c0000 /* Target */ -#define uPD98401_IA_TGT_SHIFT 18 -#define uPD98401_IA_TGT_CM 0 /* - Control Memory */ -#define uPD98401_IA_TGT_SAR 1 /* - uPD98401 registers */ -#define uPD98401_IA_TGT_PHY 3 /* - PHY device */ -#define uPD98401_IA_ADDR 0x0003ffff - -/* - * Command Register Status - */ - -#define uPD98401_BUSY 0x80000000 /* SAR is busy */ -#define uPD98401_LOCKED 0x40000000 /* SAR is locked by other CPU */ - -/* - * Indications - */ - -/* Normal (AAL5) Receive Indication */ -#define uPD98401_AAL5_UINFO 0xffff0000 /* user-supplied information */ -#define uPD98401_AAL5_UINFO_SHIFT 16 -#define uPD98401_AAL5_SIZE 0x0000ffff /* PDU size (in _CELLS_ !!) */ -#define uPD98401_AAL5_CHAN 0x7fff0000 /* Channel number */ -#define uPD98401_AAL5_CHAN_SHIFT 16 -#define uPD98401_AAL5_ERR 0x00008000 /* Error indication */ -#define uPD98401_AAL5_CI 0x00004000 /* Congestion Indication */ -#define uPD98401_AAL5_CLP 0x00002000 /* CLP (>= 1 cell had CLP=1) */ -#define uPD98401_AAL5_ES 0x00000f00 /* Error Status */ -#define uPD98401_AAL5_ES_SHIFT 8 -#define uPD98401_AAL5_ES_NONE 0 /* No error */ -#define uPD98401_AAL5_ES_FREE 1 /* Receiver free buf underflow */ -#define uPD98401_AAL5_ES_FIFO 2 /* Receiver FIFO overrun */ -#define uPD98401_AAL5_ES_TOOBIG 3 /* Maximum length violation */ -#define uPD98401_AAL5_ES_CRC 4 /* CRC error */ -#define uPD98401_AAL5_ES_ABORT 5 /* User abort */ -#define uPD98401_AAL5_ES_LENGTH 6 /* Length violation */ -#define uPD98401_AAL5_ES_T1 7 /* T1 error (timeout) */ -#define uPD98401_AAL5_ES_DEACT 8 /* Deactivated with DEACT_CHAN */ -#define uPD98401_AAL5_POOL 0x0000001f /* Free buffer pool number */ - -/* Raw Cell Indication */ -#define uPD98401_RAW_UINFO uPD98401_AAL5_UINFO -#define uPD98401_RAW_UINFO_SHIFT uPD98401_AAL5_UINFO_SHIFT -#define uPD98401_RAW_HEC 0x000000ff /* HEC */ -#define uPD98401_RAW_CHAN uPD98401_AAL5_CHAN -#define uPD98401_RAW_CHAN_SHIFT uPD98401_AAL5_CHAN_SHIFT - -/* Transmit Indication */ -#define uPD98401_TXI_CONN 0x7fff0000 /* Connection Number */ -#define uPD98401_TXI_CONN_SHIFT 16 -#define uPD98401_TXI_ACTIVE 0x00008000 /* Channel remains active */ -#define uPD98401_TXI_PQP 0x00007fff /* Packet Queue Pointer */ - -/* - * Directly Addressable Registers - */ - -#define uPD98401_GMR 0x00 /* General Mode Register */ -#define uPD98401_GSR 0x01 /* General Status Register */ -#define uPD98401_IMR 0x02 /* Interrupt Mask Register */ -#define uPD98401_RQU 0x03 /* Receive Queue Underrun */ -#define uPD98401_RQA 0x04 /* Receive Queue Alert */ -#define uPD98401_ADDR 0x05 /* Last Burst Address */ -#define uPD98401_VER 0x06 /* Version Number */ -#define uPD98401_SWR 0x07 /* Software Reset */ -#define uPD98401_CMR 0x08 /* Command Register */ -#define uPD98401_CMR_L 0x09 /* Command Register and Lock/Unlock */ -#define uPD98401_CER 0x0a /* Command Extension Register */ -#define uPD98401_CER_L 0x0b /* Command Ext Reg and Lock/Unlock */ - -#define uPD98401_MSH(n) (0x10+(n)) /* Mailbox n Start Address High */ -#define uPD98401_MSL(n) (0x14+(n)) /* Mailbox n Start Address High */ -#define uPD98401_MBA(n) (0x18+(n)) /* Mailbox n Bottom Address */ -#define uPD98401_MTA(n) (0x1c+(n)) /* Mailbox n Tail Address */ -#define uPD98401_MWA(n) (0x20+(n)) /* Mailbox n Write Address */ - -/* GMR is at 0x00 */ -#define uPD98401_GMR_ONE 0x80000000 /* Must be set to one */ -#define uPD98401_GMR_SLM 0x40000000 /* Address mode (0 word, 1 byte) */ -#define uPD98401_GMR_CPE 0x00008000 /* Control Memory Parity Enable */ -#define uPD98401_GMR_LP 0x00004000 /* Loopback */ -#define uPD98401_GMR_WA 0x00002000 /* Early Bus Write Abort/RDY */ -#define uPD98401_GMR_RA 0x00001000 /* Early Read Abort/RDY */ -#define uPD98401_GMR_SZ 0x00000f00 /* Burst Size Enable */ -#define uPD98401_BURST16 0x00000800 /* 16-word burst */ -#define uPD98401_BURST8 0x00000400 /* 8-word burst */ -#define uPD98401_BURST4 0x00000200 /* 4-word burst */ -#define uPD98401_BURST2 0x00000100 /* 2-word burst */ -#define uPD98401_GMR_AD 0x00000080 /* Address (burst resolution) Disable */ -#define uPD98401_GMR_BO 0x00000040 /* Byte Order (0 little, 1 big) */ -#define uPD98401_GMR_PM 0x00000020 /* Bus Parity Mode (0 byte, 1 word)*/ -#define uPD98401_GMR_PC 0x00000010 /* Bus Parity Control (0even,1odd) */ -#define uPD98401_GMR_BPE 0x00000008 /* Bus Parity Enable */ -#define uPD98401_GMR_DR 0x00000004 /* Receive Drop Mode (0drop,1don't)*/ -#define uPD98401_GMR_SE 0x00000002 /* Shapers Enable */ -#define uPD98401_GMR_RE 0x00000001 /* Receiver Enable */ - -/* GSR is at 0x01, IMR is at 0x02 */ -#define uPD98401_INT_PI 0x80000000 /* PHY interrupt */ -#define uPD98401_INT_RQA 0x40000000 /* Receive Queue Alert */ -#define uPD98401_INT_RQU 0x20000000 /* Receive Queue Underrun */ -#define uPD98401_INT_RD 0x10000000 /* Receiver Deactivated */ -#define uPD98401_INT_SPE 0x08000000 /* System Parity Error */ -#define uPD98401_INT_CPE 0x04000000 /* Control Memory Parity Error */ -#define uPD98401_INT_SBE 0x02000000 /* System Bus Error */ -#define uPD98401_INT_IND 0x01000000 /* Initialization Done */ -#define uPD98401_INT_RCR 0x0000ff00 /* Raw Cell Received */ -#define uPD98401_INT_RCR_SHIFT 8 -#define uPD98401_INT_MF 0x000000f0 /* Mailbox Full */ -#define uPD98401_INT_MF_SHIFT 4 -#define uPD98401_INT_MM 0x0000000f /* Mailbox Modified */ - -/* VER is at 0x06 */ -#define uPD98401_MAJOR 0x0000ff00 /* Major revision */ -#define uPD98401_MAJOR_SHIFT 8 -#define uPD98401_MINOR 0x000000ff /* Minor revision */ - -/* - * Indirectly Addressable Registers - */ - -#define uPD98401_IM(n) (0x40000+(n)) /* Scheduler n I and M */ -#define uPD98401_X(n) (0x40010+(n)) /* Scheduler n X */ -#define uPD98401_Y(n) (0x40020+(n)) /* Scheduler n Y */ -#define uPD98401_PC(n) (0x40030+(n)) /* Scheduler n P, C, p and c */ -#define uPD98401_PS(n) (0x40040+(n)) /* Scheduler n priority and status */ - -/* IM contents */ -#define uPD98401_IM_I 0xff000000 /* I */ -#define uPD98401_IM_I_SHIFT 24 -#define uPD98401_IM_M 0x00ffffff /* M */ - -/* PC contents */ -#define uPD98401_PC_P 0xff000000 /* P */ -#define uPD98401_PC_P_SHIFT 24 -#define uPD98401_PC_C 0x00ff0000 /* C */ -#define uPD98401_PC_C_SHIFT 16 -#define uPD98401_PC_p 0x0000ff00 /* p */ -#define uPD98401_PC_p_SHIFT 8 -#define uPD98401_PC_c 0x000000ff /* c */ - -/* PS contents */ -#define uPD98401_PS_PRIO 0xf0 /* Priority level (0 high, 15 low) */ -#define uPD98401_PS_PRIO_SHIFT 4 -#define uPD98401_PS_S 0x08 /* Scan - must be 0 (internal) */ -#define uPD98401_PS_R 0x04 /* Round Robin (internal) */ -#define uPD98401_PS_A 0x02 /* Active (internal) */ -#define uPD98401_PS_E 0x01 /* Enabled */ - -#define uPD98401_TOS 0x40100 /* Top of Stack Control Memory Address */ -#define uPD98401_SMA 0x40200 /* Shapers Control Memory Start Address */ -#define uPD98401_PMA 0x40201 /* Receive Pool Control Memory Start Address */ -#define uPD98401_T1R 0x40300 /* T1 Register */ -#define uPD98401_VRR 0x40301 /* VPI/VCI Reduction Register/Recv. Shutdown */ -#define uPD98401_TSR 0x40302 /* Time-Stamp Register */ - -/* VRR is at 0x40301 */ -#define uPD98401_VRR_SDM 0x80000000 /* Shutdown Mode */ -#define uPD98401_VRR_SHIFT 0x000f0000 /* VPI/VCI Shift */ -#define uPD98401_VRR_SHIFT_SHIFT 16 -#define uPD98401_VRR_MASK 0x0000ffff /* VPI/VCI mask */ - -/* - * TX packet descriptor - */ - -#define uPD98401_TXPD_SIZE 16 /* descriptor size (in bytes) */ - -#define uPD98401_TXPD_V 0x80000000 /* Valid bit */ -#define uPD98401_TXPD_DP 0x40000000 /* Descriptor (1) or Pointer (0) */ -#define uPD98401_TXPD_SM 0x20000000 /* Single (1) or Multiple (0) */ -#define uPD98401_TXPD_CLPM 0x18000000 /* CLP mode */ -#define uPD98401_CLPM_0 0 /* 00 CLP = 0 */ -#define uPD98401_CLPM_1 3 /* 11 CLP = 1 */ -#define uPD98401_CLPM_LAST 1 /* 01 CLP unless last cell */ -#define uPD98401_TXPD_CLPM_SHIFT 27 -#define uPD98401_TXPD_PTI 0x07000000 /* PTI pattern */ -#define uPD98401_TXPD_PTI_SHIFT 24 -#define uPD98401_TXPD_GFC 0x00f00000 /* GFC pattern */ -#define uPD98401_TXPD_GFC_SHIFT 20 -#define uPD98401_TXPD_C10 0x00040000 /* insert CRC-10 */ -#define uPD98401_TXPD_AAL5 0x00020000 /* AAL5 processing */ -#define uPD98401_TXPD_MB 0x00010000 /* TX mailbox number */ -#define uPD98401_TXPD_UU 0x0000ff00 /* CPCS-UU */ -#define uPD98401_TXPD_UU_SHIFT 8 -#define uPD98401_TXPD_CPI 0x000000ff /* CPI */ - -/* - * TX buffer descriptor - */ - -#define uPD98401_TXBD_SIZE 8 /* descriptor size (in bytes) */ - -#define uPD98401_TXBD_LAST 0x80000000 /* last buffer in packet */ - -/* - * TX VC table - */ - -/* 1st word has the same structure as in a TX packet descriptor */ -#define uPD98401_TXVC_L 0x80000000 /* last buffer */ -#define uPD98401_TXVC_SHP 0x0f000000 /* shaper number */ -#define uPD98401_TXVC_SHP_SHIFT 24 -#define uPD98401_TXVC_VPI 0x00ff0000 /* VPI */ -#define uPD98401_TXVC_VPI_SHIFT 16 -#define uPD98401_TXVC_VCI 0x0000ffff /* VCI */ -#define uPD98401_TXVC_QRP 6 /* Queue Read Pointer is in word 6 */ - -/* - * RX free buffer pools descriptor - */ - -#define uPD98401_RXFP_ALERT 0x70000000 /* low water mark */ -#define uPD98401_RXFP_ALERT_SHIFT 28 -#define uPD98401_RXFP_BFSZ 0x0f000000 /* buffer size, 64*2^n */ -#define uPD98401_RXFP_BFSZ_SHIFT 24 -#define uPD98401_RXFP_BTSZ 0x00ff0000 /* batch size, n+1 */ -#define uPD98401_RXFP_BTSZ_SHIFT 16 -#define uPD98401_RXFP_REMAIN 0x0000ffff /* remaining batches in pool */ - -/* - * RX VC table - */ - -#define uPD98401_RXVC_BTSZ 0xff000000 /* remaining free buffers in batch */ -#define uPD98401_RXVC_BTSZ_SHIFT 24 -#define uPD98401_RXVC_MB 0x00200000 /* RX mailbox number */ -#define uPD98401_RXVC_POOL 0x001f0000 /* free buffer pool number */ -#define uPD98401_RXVC_POOL_SHIFT 16 -#define uPD98401_RXVC_UINFO 0x0000ffff /* user-supplied information */ -#define uPD98401_RXVC_T1 0xffff0000 /* T1 timestamp */ -#define uPD98401_RXVC_T1_SHIFT 16 -#define uPD98401_RXVC_PR 0x00008000 /* Packet Reception, 1 if busy */ -#define uPD98401_RXVC_DR 0x00004000 /* FIFO Drop */ -#define uPD98401_RXVC_OD 0x00001000 /* Drop OAM cells */ -#define uPD98401_RXVC_AR 0x00000800 /* AAL5 or raw cell; 1 if AAL5 */ -#define uPD98401_RXVC_MAXSEG 0x000007ff /* max number of segments per PDU */ -#define uPD98401_RXVC_REM 0xfffe0000 /* remaining words in curr buffer */ -#define uPD98401_RXVC_REM_SHIFT 17 -#define uPD98401_RXVC_CLP 0x00010000 /* CLP received */ -#define uPD98401_RXVC_BFA 0x00008000 /* Buffer Assigned */ -#define uPD98401_RXVC_BTA 0x00004000 /* Batch Assigned */ -#define uPD98401_RXVC_CI 0x00002000 /* Congestion Indication */ -#define uPD98401_RXVC_DD 0x00001000 /* Dropping incoming cells */ -#define uPD98401_RXVC_DP 0x00000800 /* like PR ? */ -#define uPD98401_RXVC_CURSEG 0x000007ff /* Current Segment count */ - -/* - * RX lookup table - */ - -#define uPD98401_RXLT_ENBL 0x8000 /* Enable */ - -#endif diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c deleted file mode 100644 index 239852d85558..000000000000 --- a/drivers/atm/uPD98402.c +++ /dev/null @@ -1,266 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* drivers/atm/uPD98402.c - NEC uPD98402 (PHY) declarations */ - -/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "uPD98402.h" - - -#if 0 -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - - -struct uPD98402_priv { - struct k_sonet_stats sonet_stats;/* link diagnostics */ - unsigned char framing; /* SONET/SDH framing */ - int loop_mode; /* loopback mode */ - spinlock_t lock; -}; - - -#define PRIV(dev) ((struct uPD98402_priv *) dev->phy_data) - -#define PUT(val,reg) dev->ops->phy_put(dev,val,uPD98402_##reg) -#define GET(reg) dev->ops->phy_get(dev,uPD98402_##reg) - - -static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int zero) -{ - struct sonet_stats tmp; - int error = 0; - - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); - sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); - if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); - if (zero && !error) { - /* unused fields are reported as -1, but we must not "adjust" - them */ - tmp.corr_hcs = tmp.tx_cells = tmp.rx_cells = 0; - sonet_subtract_stats(&PRIV(dev)->sonet_stats,&tmp); - } - return error ? -EFAULT : 0; -} - - -static int set_framing(struct atm_dev *dev,unsigned char framing) -{ - static const unsigned char sonet[] = { 1,2,3,0 }; - static const unsigned char sdh[] = { 1,0,0,2 }; - const char *set; - unsigned long flags; - - switch (framing) { - case SONET_FRAME_SONET: - set = sonet; - break; - case SONET_FRAME_SDH: - set = sdh; - break; - default: - return -EINVAL; - } - spin_lock_irqsave(&PRIV(dev)->lock, flags); - PUT(set[0],C11T); - PUT(set[1],C12T); - PUT(set[2],C13T); - PUT((GET(MDR) & ~uPD98402_MDR_SS_MASK) | (set[3] << - uPD98402_MDR_SS_SHIFT),MDR); - spin_unlock_irqrestore(&PRIV(dev)->lock, flags); - return 0; -} - - -static int get_sense(struct atm_dev *dev,u8 __user *arg) -{ - unsigned long flags; - unsigned char s[3]; - - spin_lock_irqsave(&PRIV(dev)->lock, flags); - s[0] = GET(C11R); - s[1] = GET(C12R); - s[2] = GET(C13R); - spin_unlock_irqrestore(&PRIV(dev)->lock, flags); - return (put_user(s[0], arg) || put_user(s[1], arg+1) || - put_user(s[2], arg+2) || put_user(0xff, arg+3) || - put_user(0xff, arg+4) || put_user(0xff, arg+5)) ? -EFAULT : 0; -} - - -static int set_loopback(struct atm_dev *dev,int mode) -{ - unsigned char mode_reg; - - mode_reg = GET(MDR) & ~(uPD98402_MDR_TPLP | uPD98402_MDR_ALP | - uPD98402_MDR_RPLP); - switch (__ATM_LM_XTLOC(mode)) { - case __ATM_LM_NONE: - break; - case __ATM_LM_PHY: - mode_reg |= uPD98402_MDR_TPLP; - break; - case __ATM_LM_ATM: - mode_reg |= uPD98402_MDR_ALP; - break; - default: - return -EINVAL; - } - switch (__ATM_LM_XTRMT(mode)) { - case __ATM_LM_NONE: - break; - case __ATM_LM_PHY: - mode_reg |= uPD98402_MDR_RPLP; - break; - default: - return -EINVAL; - } - PUT(mode_reg,MDR); - PRIV(dev)->loop_mode = mode; - return 0; -} - - -static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) -{ - switch (cmd) { - - case SONET_GETSTATZ: - case SONET_GETSTAT: - return fetch_stats(dev,arg, cmd == SONET_GETSTATZ); - case SONET_SETFRAMING: - return set_framing(dev, (int)(unsigned long)arg); - case SONET_GETFRAMING: - return put_user(PRIV(dev)->framing,(int __user *)arg) ? - -EFAULT : 0; - case SONET_GETFRSENSE: - return get_sense(dev,arg); - case ATM_SETLOOP: - return set_loopback(dev, (int)(unsigned long)arg); - case ATM_GETLOOP: - return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ? - -EFAULT : 0; - case ATM_QUERYLOOP: - return put_user(ATM_LM_LOC_PHY | ATM_LM_LOC_ATM | - ATM_LM_RMT_PHY,(int __user *)arg) ? -EFAULT : 0; - default: - return -ENOIOCTLCMD; - } -} - - -#define ADD_LIMITED(s,v) \ - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } - - -static void stat_event(struct atm_dev *dev) -{ - unsigned char events; - - events = GET(PCR); - if (events & uPD98402_PFM_PFEB) ADD_LIMITED(path_febe,PFECB); - if (events & uPD98402_PFM_LFEB) ADD_LIMITED(line_febe,LECCT); - if (events & uPD98402_PFM_B3E) ADD_LIMITED(path_bip,B3ECT); - if (events & uPD98402_PFM_B2E) ADD_LIMITED(line_bip,B2ECT); - if (events & uPD98402_PFM_B1E) ADD_LIMITED(section_bip,B1ECT); -} - - -#undef ADD_LIMITED - - -static void uPD98402_int(struct atm_dev *dev) -{ - static unsigned long silence = 0; - unsigned char reason; - - while ((reason = GET(PICR))) { - if (reason & uPD98402_INT_LOS) - printk(KERN_NOTICE "%s(itf %d): signal lost\n", - dev->type,dev->number); - if (reason & uPD98402_INT_PFM) stat_event(dev); - if (reason & uPD98402_INT_PCO) { - (void) GET(PCOCR); /* clear interrupt cause */ - atomic_add(GET(HECCT), - &PRIV(dev)->sonet_stats.uncorr_hcs); - } - if ((reason & uPD98402_INT_RFO) && - (time_after(jiffies, silence) || silence == 0)) { - printk(KERN_WARNING "%s(itf %d): uPD98402 receive " - "FIFO overflow\n",dev->type,dev->number); - silence = (jiffies+HZ/2)|1; - } - } -} - - -static int uPD98402_start(struct atm_dev *dev) -{ - DPRINTK("phy_start\n"); - if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) - return -ENOMEM; - spin_lock_init(&PRIV(dev)->lock); - memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats)); - (void) GET(PCR); /* clear performance events */ - PUT(uPD98402_PFM_FJ,PCMR); /* ignore frequency adj */ - (void) GET(PCOCR); /* clear overflows */ - PUT(~uPD98402_PCO_HECC,PCOMR); - (void) GET(PICR); /* clear interrupts */ - PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | - uPD98402_INT_LOS),PIMR); /* enable them */ - (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); - return 0; -} - - -static int uPD98402_stop(struct atm_dev *dev) -{ - /* let SAR driver worry about stopping interrupts */ - kfree(PRIV(dev)); - return 0; -} - - -static const struct atmphy_ops uPD98402_ops = { - .start = uPD98402_start, - .ioctl = uPD98402_ioctl, - .interrupt = uPD98402_int, - .stop = uPD98402_stop, -}; - - -int uPD98402_init(struct atm_dev *dev) -{ -DPRINTK("phy_init\n"); - dev->phy = &uPD98402_ops; - return 0; -} - - -MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(uPD98402_init); - -static __init int uPD98402_module_init(void) -{ - return 0; -} -module_init(uPD98402_module_init); -/* module_exit not defined so not unloadable */ diff --git a/drivers/atm/uPD98402.h b/drivers/atm/uPD98402.h deleted file mode 100644 index 437cfaa20c96..000000000000 --- a/drivers/atm/uPD98402.h +++ /dev/null @@ -1,107 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* drivers/atm/uPD98402.h - NEC uPD98402 (PHY) declarations */ - -/* Written 1995 by Werner Almesberger, EPFL LRC */ - - -#ifndef DRIVERS_ATM_uPD98402_H -#define DRIVERS_ATM_uPD98402_H - -/* - * Registers - */ - -#define uPD98402_CMR 0x00 /* Command Register */ -#define uPD98402_MDR 0x01 /* Mode Register */ -#define uPD98402_PICR 0x02 /* PHY Interrupt Cause Register */ -#define uPD98402_PIMR 0x03 /* PHY Interrupt Mask Register */ -#define uPD98402_ACR 0x04 /* Alarm Cause Register */ -#define uPD98402_ACMR 0x05 /* Alarm Cause Mask Register */ -#define uPD98402_PCR 0x06 /* Performance Cause Register */ -#define uPD98402_PCMR 0x07 /* Performance Cause Mask Register */ -#define uPD98402_IACM 0x08 /* Internal Alarm Cause Mask Register */ -#define uPD98402_B1ECT 0x09 /* B1 Error Count Register */ -#define uPD98402_B2ECT 0x0a /* B2 Error Count Register */ -#define uPD98402_B3ECT 0x0b /* B3 Error Count Regster */ -#define uPD98402_PFECB 0x0c /* Path FEBE Count Register */ -#define uPD98402_LECCT 0x0d /* Line FEBE Count Register */ -#define uPD98402_HECCT 0x0e /* HEC Error Count Register */ -#define uPD98402_FJCT 0x0f /* Frequence Justification Count Reg */ -#define uPD98402_PCOCR 0x10 /* Perf. Counter Overflow Cause Reg */ -#define uPD98402_PCOMR 0x11 /* Perf. Counter Overflow Mask Reg */ -#define uPD98402_C11T 0x20 /* C11T Data Register */ -#define uPD98402_C12T 0x21 /* C12T Data Register */ -#define uPD98402_C13T 0x22 /* C13T Data Register */ -#define uPD98402_F1T 0x23 /* F1T Data Register */ -#define uPD98402_K2T 0x25 /* K2T Data Register */ -#define uPD98402_C2T 0x26 /* C2T Data Register */ -#define uPD98402_F2T 0x27 /* F2T Data Register */ -#define uPD98402_C11R 0x30 /* C11T Data Register */ -#define uPD98402_C12R 0x31 /* C12T Data Register */ -#define uPD98402_C13R 0x32 /* C13T Data Register */ -#define uPD98402_F1R 0x33 /* F1T Data Register */ -#define uPD98402_K2R 0x35 /* K2T Data Register */ -#define uPD98402_C2R 0x36 /* C2T Data Register */ -#define uPD98402_F2R 0x37 /* F2T Data Register */ - -/* CMR is at 0x00 */ -#define uPD98402_CMR_PFRF 0x01 /* Send path FERF */ -#define uPD98402_CMR_LFRF 0x02 /* Send line FERF */ -#define uPD98402_CMR_PAIS 0x04 /* Send path AIS */ -#define uPD98402_CMR_LAIS 0x08 /* Send line AIS */ - -/* MDR is at 0x01 */ -#define uPD98402_MDR_ALP 0x01 /* ATM layer loopback */ -#define uPD98402_MDR_TPLP 0x02 /* PMD loopback, to host */ -#define uPD98402_MDR_RPLP 0x04 /* PMD loopback, to network */ -#define uPD98402_MDR_SS0 0x08 /* SS0 */ -#define uPD98402_MDR_SS1 0x10 /* SS1 */ -#define uPD98402_MDR_SS_MASK 0x18 /* mask */ -#define uPD98402_MDR_SS_SHIFT 3 /* shift */ -#define uPD98402_MDR_HEC 0x20 /* disable HEC inbound processing */ -#define uPD98402_MDR_FSR 0x40 /* disable frame scrambler */ -#define uPD98402_MDR_CSR 0x80 /* disable cell scrambler */ - -/* PICR is at 0x02, PIMR is at 0x03 */ -#define uPD98402_INT_PFM 0x01 /* performance counter has changed */ -#define uPD98402_INT_ALM 0x02 /* line fault */ -#define uPD98402_INT_RFO 0x04 /* receive FIFO overflow */ -#define uPD98402_INT_PCO 0x08 /* performance counter overflow */ -#define uPD98402_INT_OTD 0x20 /* OTD has occurred */ -#define uPD98402_INT_LOS 0x40 /* Loss Of Signal */ -#define uPD98402_INT_LOF 0x80 /* Loss Of Frame */ - -/* ACR is as 0x04, ACMR is at 0x05 */ -#define uPD98402_ALM_PFRF 0x01 /* path FERF */ -#define uPD98402_ALM_LFRF 0x02 /* line FERF */ -#define uPD98402_ALM_PAIS 0x04 /* path AIS */ -#define uPD98402_ALM_LAIS 0x08 /* line AIS */ -#define uPD98402_ALM_LOD 0x10 /* loss of delineation */ -#define uPD98402_ALM_LOP 0x20 /* loss of pointer */ -#define uPD98402_ALM_OOF 0x40 /* out of frame */ - -/* PCR is at 0x06, PCMR is at 0x07 */ -#define uPD98402_PFM_PFEB 0x01 /* path FEBE */ -#define uPD98402_PFM_LFEB 0x02 /* line FEBE */ -#define uPD98402_PFM_B3E 0x04 /* B3 error */ -#define uPD98402_PFM_B2E 0x08 /* B2 error */ -#define uPD98402_PFM_B1E 0x10 /* B1 error */ -#define uPD98402_PFM_FJ 0x20 /* frequency justification */ - -/* IACM is at 0x08 */ -#define uPD98402_IACM_PFRF 0x01 /* don't generate path FERF */ -#define uPD98402_IACM_LFRF 0x02 /* don't generate line FERF */ - -/* PCOCR is at 0x010, PCOMR is at 0x11 */ -#define uPD98402_PCO_B1EC 0x01 /* B1ECT overflow */ -#define uPD98402_PCO_B2EC 0x02 /* B2ECT overflow */ -#define uPD98402_PCO_B3EC 0x04 /* B3ECT overflow */ -#define uPD98402_PCO_PFBC 0x08 /* PFEBC overflow */ -#define uPD98402_PCO_LFBC 0x10 /* LFEVC overflow */ -#define uPD98402_PCO_HECC 0x20 /* HECCT overflow */ -#define uPD98402_PCO_FJC 0x40 /* FJCT overflow */ - - -int uPD98402_init(struct atm_dev *dev); - -#endif diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c deleted file mode 100644 index cf5fffcf98a1..000000000000 --- a/drivers/atm/zatm.c +++ /dev/null @@ -1,1652 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* drivers/atm/zatm.c - ZeitNet ZN122x device driver */ - -/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "uPD98401.h" -#include "uPD98402.h" -#include "zeprom.h" -#include "zatm.h" - - -/* - * TODO: - * - * Minor features - * - support 64 kB SDUs (will have to use multibuffer batches then :-( ) - * - proper use of CDV, credit = max(1,CDVT*PCR) - * - AAL0 - * - better receive timestamps - * - OAM - */ - -#define ZATM_COPPER 1 - -#if 0 -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - -#ifndef CONFIG_ATM_ZATM_DEBUG - - -#define NULLCHECK(x) - -#define EVENT(s,a,b) - - -static void event_dump(void) -{ -} - - -#else - - -/* - * NULL pointer checking - */ - -#define NULLCHECK(x) \ - if ((unsigned long) (x) < 0x30) printk(KERN_CRIT #x "==0x%x\n", (int) (x)) - -/* - * Very extensive activity logging. Greatly improves bug detection speed but - * costs a few Mbps if enabled. - */ - -#define EV 64 - -static const char *ev[EV]; -static unsigned long ev_a[EV],ev_b[EV]; -static int ec = 0; - - -static void EVENT(const char *s,unsigned long a,unsigned long b) -{ - ev[ec] = s; - ev_a[ec] = a; - ev_b[ec] = b; - ec = (ec+1) % EV; -} - - -static void event_dump(void) -{ - int n,i; - - printk(KERN_NOTICE "----- event dump follows -----\n"); - for (n = 0; n < EV; n++) { - i = (ec+n) % EV; - printk(KERN_NOTICE); - printk(ev[i] ? ev[i] : "(null)",ev_a[i],ev_b[i]); - } - printk(KERN_NOTICE "----- event dump ends here -----\n"); -} - - -#endif /* CONFIG_ATM_ZATM_DEBUG */ - - -#define RING_BUSY 1 /* indication from do_tx that PDU has to be - backlogged */ - -static struct atm_dev *zatm_boards = NULL; -static unsigned long dummy[2] = {0,0}; - - -#define zin_n(r) inl(zatm_dev->base+r*4) -#define zin(r) inl(zatm_dev->base+uPD98401_##r*4) -#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4) -#define zwait() do {} while (zin(CMR) & uPD98401_BUSY) - -/* RX0, RX1, TX0, TX1 */ -static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 }; -static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */ - -#define MBX_SIZE(i) (mbx_entries[i]*mbx_esize[i]) - - -/*-------------------------------- utilities --------------------------------*/ - - -static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr) -{ - zwait(); - zout(value,CER); - zout(uPD98401_IND_ACC | uPD98401_IA_BALL | - (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); -} - - -static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr) -{ - zwait(); - zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW | - (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR); - zwait(); - return zin(CER); -} - - -/*------------------------------- free lists --------------------------------*/ - - -/* - * Free buffer head structure: - * [0] pointer to buffer (for SAR) - * [1] buffer descr link pointer (for SAR) - * [2] back pointer to skb (for poll_rx) - * [3] data - * ... - */ - -struct rx_buffer_head { - u32 buffer; /* pointer to buffer (for SAR) */ - u32 link; /* buffer descriptor link pointer (for SAR) */ - struct sk_buff *skb; /* back pointer to skb (for poll_rx) */ -}; - - -static void refill_pool(struct atm_dev *dev,int pool) -{ - struct zatm_dev *zatm_dev; - struct sk_buff *skb; - struct rx_buffer_head *first; - unsigned long flags; - int align,offset,free,count,size; - - EVENT("refill_pool\n",0,0); - zatm_dev = ZATM_DEV(dev); - size = (64 << (pool <= ZATM_AAL5_POOL_BASE ? 0 : - pool-ZATM_AAL5_POOL_BASE))+sizeof(struct rx_buffer_head); - if (size < PAGE_SIZE) { - align = 32; /* for 32 byte alignment */ - offset = sizeof(struct rx_buffer_head); - } - else { - align = 4096; - offset = zatm_dev->pool_info[pool].offset+ - sizeof(struct rx_buffer_head); - } - size += align; - spin_lock_irqsave(&zatm_dev->lock, flags); - free = zpeekl(zatm_dev,zatm_dev->pool_base+2*pool) & - uPD98401_RXFP_REMAIN; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - if (free >= zatm_dev->pool_info[pool].low_water) return; - EVENT("starting ... POOL: 0x%x, 0x%x\n", - zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), - zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); - EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); - count = 0; - first = NULL; - while (free < zatm_dev->pool_info[pool].high_water) { - struct rx_buffer_head *head; - - skb = alloc_skb(size,GFP_ATOMIC); - if (!skb) { - printk(KERN_WARNING DEV_LABEL "(Itf %d): got no new " - "skb (%d) with %d free\n",dev->number,size,free); - break; - } - skb_reserve(skb,(unsigned char *) ((((unsigned long) skb->data+ - align+offset-1) & ~(unsigned long) (align-1))-offset)- - skb->data); - head = (struct rx_buffer_head *) skb->data; - skb_reserve(skb,sizeof(struct rx_buffer_head)); - if (!first) first = head; - count++; - head->buffer = virt_to_bus(skb->data); - head->link = 0; - head->skb = skb; - EVENT("enq skb 0x%08lx/0x%08lx\n",(unsigned long) skb, - (unsigned long) head); - spin_lock_irqsave(&zatm_dev->lock, flags); - if (zatm_dev->last_free[pool]) - ((struct rx_buffer_head *) (zatm_dev->last_free[pool]-> - data))[-1].link = virt_to_bus(head); - zatm_dev->last_free[pool] = skb; - skb_queue_tail(&zatm_dev->pool[pool],skb); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - free++; - } - if (first) { - spin_lock_irqsave(&zatm_dev->lock, flags); - zwait(); - zout(virt_to_bus(first),CER); - zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count, - CMR); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - EVENT ("POOL: 0x%x, 0x%x\n", - zpeekl(zatm_dev,zatm_dev->pool_base+2*pool), - zpeekl(zatm_dev,zatm_dev->pool_base+2*pool+1)); - EVENT("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); - } -} - - -static void drain_free(struct atm_dev *dev,int pool) -{ - skb_queue_purge(&ZATM_DEV(dev)->pool[pool]); -} - - -static int pool_index(int max_pdu) -{ - int i; - - if (max_pdu % ATM_CELL_PAYLOAD) - printk(KERN_ERR DEV_LABEL ": driver error in pool_index: " - "max_pdu is %d\n",max_pdu); - if (max_pdu > 65536) return -1; - for (i = 0; (64 << i) < max_pdu; i++); - return i+ZATM_AAL5_POOL_BASE; -} - - -/* use_pool isn't reentrant */ - - -static void use_pool(struct atm_dev *dev,int pool) -{ - struct zatm_dev *zatm_dev; - unsigned long flags; - int size; - - zatm_dev = ZATM_DEV(dev); - if (!(zatm_dev->pool_info[pool].ref_count++)) { - skb_queue_head_init(&zatm_dev->pool[pool]); - size = pool-ZATM_AAL5_POOL_BASE; - if (size < 0) size = 0; /* 64B... */ - else if (size > 10) size = 10; /* ... 64kB */ - spin_lock_irqsave(&zatm_dev->lock, flags); - zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) << - uPD98401_RXFP_ALERT_SHIFT) | - (1 << uPD98401_RXFP_BTSZ_SHIFT) | - (size << uPD98401_RXFP_BFSZ_SHIFT), - zatm_dev->pool_base+pool*2); - zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+ - pool*2+1); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_dev->last_free[pool] = NULL; - refill_pool(dev,pool); - } - DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count); -} - - -static void unuse_pool(struct atm_dev *dev,int pool) -{ - if (!(--ZATM_DEV(dev)->pool_info[pool].ref_count)) - drain_free(dev,pool); -} - -/*----------------------------------- RX ------------------------------------*/ - - -#if 0 -static void exception(struct atm_vcc *vcc) -{ - static int count = 0; - struct zatm_dev *zatm_dev = ZATM_DEV(vcc->dev); - struct zatm_vcc *zatm_vcc = ZATM_VCC(vcc); - unsigned long *qrp; - int i; - - if (count++ > 2) return; - for (i = 0; i < 8; i++) - printk("TX%d: 0x%08lx\n",i, - zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+i)); - for (i = 0; i < 5; i++) - printk("SH%d: 0x%08lx\n",i, - zpeekl(zatm_dev,uPD98401_IM(zatm_vcc->shaper)+16*i)); - qrp = (unsigned long *) zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ - uPD98401_TXVC_QRP); - printk("qrp=0x%08lx\n",(unsigned long) qrp); - for (i = 0; i < 4; i++) printk("QRP[%d]: 0x%08lx",i,qrp[i]); -} -#endif - - -static const char *err_txt[] = { - "No error", - "RX buf underflow", - "RX FIFO overrun", - "Maximum len violation", - "CRC error", - "User abort", - "Length violation", - "T1 error", - "Deactivated", - "???", - "???", - "???", - "???", - "???", - "???", - "???" -}; - - -static void poll_rx(struct atm_dev *dev,int mbx) -{ - struct zatm_dev *zatm_dev; - unsigned long pos; - u32 x; - int error; - - EVENT("poll_rx\n",0,0); - zatm_dev = ZATM_DEV(dev); - pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); - while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { - u32 *here; - struct sk_buff *skb; - struct atm_vcc *vcc; - int cells,size,chan; - - EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); - here = (u32 *) pos; - if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx]) - pos = zatm_dev->mbx_start[mbx]; - cells = here[0] & uPD98401_AAL5_SIZE; -#if 0 -printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]); -{ -unsigned long *x; - printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev, - zatm_dev->pool_base), - zpeekl(zatm_dev,zatm_dev->pool_base+1)); - x = (unsigned long *) here[2]; - printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n", - x[0],x[1],x[2],x[3]); -} -#endif - error = 0; - if (here[3] & uPD98401_AAL5_ERR) { - error = (here[3] & uPD98401_AAL5_ES) >> - uPD98401_AAL5_ES_SHIFT; - if (error == uPD98401_AAL5_ES_DEACT || - error == uPD98401_AAL5_ES_FREE) continue; - } -EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >> - uPD98401_AAL5_ES_SHIFT,error); - skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb; - __net_timestamp(skb); -#if 0 -printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3], - ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1], - ((unsigned *) skb->data)[0]); -#endif - EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb, - (unsigned long) here); -#if 0 -printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]); -#endif - size = error ? 0 : ntohs(((__be16 *) skb->data)[cells* - ATM_CELL_PAYLOAD/sizeof(u16)-3]); - EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size); - chan = (here[3] & uPD98401_AAL5_CHAN) >> - uPD98401_AAL5_CHAN_SHIFT; - if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) { - int pos; - vcc = zatm_dev->rx_map[chan]; - pos = ZATM_VCC(vcc)->pool; - if (skb == zatm_dev->last_free[pos]) - zatm_dev->last_free[pos] = NULL; - skb_unlink(skb, zatm_dev->pool + pos); - } - else { - printk(KERN_ERR DEV_LABEL "(itf %d): RX indication " - "for non-existing channel\n",dev->number); - size = 0; - vcc = NULL; - event_dump(); - } - if (error) { - static unsigned long silence = 0; - static int last_error = 0; - - if (error != last_error || - time_after(jiffies, silence) || silence == 0){ - printk(KERN_WARNING DEV_LABEL "(itf %d): " - "chan %d error %s\n",dev->number,chan, - err_txt[error]); - last_error = error; - silence = (jiffies+2*HZ)|1; - } - size = 0; - } - if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER || - size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) { - printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d " - "cells\n",dev->number,size,cells); - size = 0; - event_dump(); - } - if (size > ATM_MAX_AAL5_PDU) { - printk(KERN_ERR DEV_LABEL "(itf %d): size too big " - "(%d)\n",dev->number,size); - size = 0; - event_dump(); - } - if (!size) { - dev_kfree_skb_irq(skb); - if (vcc) atomic_inc(&vcc->stats->rx_err); - continue; - } - if (!atm_charge(vcc,skb->truesize)) { - dev_kfree_skb_irq(skb); - continue; - } - skb->len = size; - ATM_SKB(skb)->vcc = vcc; - vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); - } - zout(pos & 0xffff,MTA(mbx)); -#if 0 /* probably a stupid idea */ - refill_pool(dev,zatm_vcc->pool); - /* maybe this saves us a few interrupts */ -#endif -} - - -static int open_rx_first(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - unsigned short chan; - int cells; - - DPRINTK("open_rx_first (0x%x)\n",inb_p(0xc053)); - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - zatm_vcc->rx_chan = 0; - if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; - if (vcc->qos.aal == ATM_AAL5) { - if (vcc->qos.rxtp.max_sdu > 65464) - vcc->qos.rxtp.max_sdu = 65464; - /* fix this - we may want to receive 64kB SDUs - later */ - cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER, - ATM_CELL_PAYLOAD); - zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); - } - else { - cells = 1; - zatm_vcc->pool = ZATM_AAL0_POOL; - } - if (zatm_vcc->pool < 0) return -EMSGSIZE; - spin_lock_irqsave(&zatm_dev->lock, flags); - zwait(); - zout(uPD98401_OPEN_CHAN,CMR); - zwait(); - DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); - chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - DPRINTK("chan is %d\n",chan); - if (!chan) return -EAGAIN; - use_pool(vcc->dev,zatm_vcc->pool); - DPRINTK("pool %d\n",zatm_vcc->pool); - /* set up VC descriptor */ - spin_lock_irqsave(&zatm_dev->lock, flags); - zpokel(zatm_dev,zatm_vcc->pool << uPD98401_RXVC_POOL_SHIFT, - chan*VC_SIZE/4); - zpokel(zatm_dev,uPD98401_RXVC_OD | (vcc->qos.aal == ATM_AAL5 ? - uPD98401_RXVC_AR : 0) | cells,chan*VC_SIZE/4+1); - zpokel(zatm_dev,0,chan*VC_SIZE/4+2); - zatm_vcc->rx_chan = chan; - zatm_dev->rx_map[chan] = vcc; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return 0; -} - - -static int open_rx_second(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - int pos,shift; - - DPRINTK("open_rx_second (0x%x)\n",inb_p(0xc053)); - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - if (!zatm_vcc->rx_chan) return 0; - spin_lock_irqsave(&zatm_dev->lock, flags); - /* should also handle VPI @@@ */ - pos = vcc->vci >> 1; - shift = (1-(vcc->vci & 1)) << 4; - zpokel(zatm_dev,(zpeekl(zatm_dev,pos) & ~(0xffff << shift)) | - ((zatm_vcc->rx_chan | uPD98401_RXLT_ENBL) << shift),pos); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return 0; -} - - -static void close_rx(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - int pos,shift; - - zatm_vcc = ZATM_VCC(vcc); - zatm_dev = ZATM_DEV(vcc->dev); - if (!zatm_vcc->rx_chan) return; - DPRINTK("close_rx\n"); - /* disable receiver */ - if (vcc->vpi != ATM_VPI_UNSPEC && vcc->vci != ATM_VCI_UNSPEC) { - spin_lock_irqsave(&zatm_dev->lock, flags); - pos = vcc->vci >> 1; - shift = (1-(vcc->vci & 1)) << 4; - zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos); - zwait(); - zout(uPD98401_NOP,CMR); - zwait(); - zout(uPD98401_NOP,CMR); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - } - spin_lock_irqsave(&zatm_dev->lock, flags); - zwait(); - zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << - uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait(); - udelay(10); /* why oh why ... ? */ - zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan << - uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait(); - if (!(zin(CMR) & uPD98401_CHAN_ADDR)) - printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel " - "%d\n",vcc->dev->number,zatm_vcc->rx_chan); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_dev->rx_map[zatm_vcc->rx_chan] = NULL; - zatm_vcc->rx_chan = 0; - unuse_pool(vcc->dev,zatm_vcc->pool); -} - - -static int start_rx(struct atm_dev *dev) -{ - struct zatm_dev *zatm_dev; - int i; - - DPRINTK("start_rx\n"); - zatm_dev = ZATM_DEV(dev); - zatm_dev->rx_map = kcalloc(zatm_dev->chans, - sizeof(*zatm_dev->rx_map), - GFP_KERNEL); - if (!zatm_dev->rx_map) return -ENOMEM; - /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */ - zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR); - /* prepare free buffer pools */ - for (i = 0; i <= ZATM_LAST_POOL; i++) { - zatm_dev->pool_info[i].ref_count = 0; - zatm_dev->pool_info[i].rqa_count = 0; - zatm_dev->pool_info[i].rqu_count = 0; - zatm_dev->pool_info[i].low_water = LOW_MARK; - zatm_dev->pool_info[i].high_water = HIGH_MARK; - zatm_dev->pool_info[i].offset = 0; - zatm_dev->pool_info[i].next_off = 0; - zatm_dev->pool_info[i].next_cnt = 0; - zatm_dev->pool_info[i].next_thres = OFF_CNG_THRES; - } - return 0; -} - - -/*----------------------------------- TX ------------------------------------*/ - - -static int do_tx(struct sk_buff *skb) -{ - struct atm_vcc *vcc; - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - u32 *dsc; - unsigned long flags; - - EVENT("do_tx\n",0,0); - DPRINTK("sending skb %p\n",skb); - vcc = ATM_SKB(skb)->vcc; - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - EVENT("iovcnt=%d\n",skb_shinfo(skb)->nr_frags,0); - spin_lock_irqsave(&zatm_dev->lock, flags); - if (!skb_shinfo(skb)->nr_frags) { - if (zatm_vcc->txing == RING_ENTRIES-1) { - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return RING_BUSY; - } - zatm_vcc->txing++; - dsc = zatm_vcc->ring+zatm_vcc->ring_curr; - zatm_vcc->ring_curr = (zatm_vcc->ring_curr+RING_WORDS) & - (RING_ENTRIES*RING_WORDS-1); - dsc[1] = 0; - dsc[2] = skb->len; - dsc[3] = virt_to_bus(skb->data); - mb(); - dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | uPD98401_TXPD_SM - | (vcc->qos.aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | - (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? - uPD98401_CLPM_1 : uPD98401_CLPM_0)); - EVENT("dsc (0x%lx)\n",(unsigned long) dsc,0); - } - else { -printk("NONONONOO!!!!\n"); - dsc = NULL; -#if 0 - u32 *put; - int i; - - dsc = kmalloc(uPD98401_TXPD_SIZE * 2 + - uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC); - if (!dsc) { - if (vcc->pop) - vcc->pop(vcc, skb); - else - dev_kfree_skb_irq(skb); - return -EAGAIN; - } - /* @@@ should check alignment */ - put = dsc+8; - dsc[0] = uPD98401_TXPD_V | uPD98401_TXPD_DP | - (vcc->aal == ATM_AAL5 ? uPD98401_TXPD_AAL5 : 0 | - (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? - uPD98401_CLPM_1 : uPD98401_CLPM_0)); - dsc[1] = 0; - dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE; - dsc[3] = virt_to_bus(put); - for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) { - *put++ = ((struct iovec *) skb->data)[i].iov_len; - *put++ = virt_to_bus(((struct iovec *) - skb->data)[i].iov_base); - } - put[-2] |= uPD98401_TXBD_LAST; -#endif - } - ZATM_PRV_DSC(skb) = dsc; - skb_queue_tail(&zatm_vcc->tx_queue,skb); - DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+ - uPD98401_TXVC_QRP)); - zwait(); - zout(uPD98401_TX_READY | (zatm_vcc->tx_chan << - uPD98401_CHAN_ADDR_SHIFT),CMR); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - EVENT("done\n",0,0); - return 0; -} - - -static inline void dequeue_tx(struct atm_vcc *vcc) -{ - struct zatm_vcc *zatm_vcc; - struct sk_buff *skb; - - EVENT("dequeue_tx\n",0,0); - zatm_vcc = ZATM_VCC(vcc); - skb = skb_dequeue(&zatm_vcc->tx_queue); - if (!skb) { - printk(KERN_CRIT DEV_LABEL "(itf %d): dequeue_tx but not " - "txing\n",vcc->dev->number); - return; - } -#if 0 /* @@@ would fail on CLP */ -if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP | - uPD98401_TXPD_SM | uPD98401_TXPD_AAL5)) printk("@#*$!!!! (%08x)\n", - *ZATM_PRV_DSC(skb)); -#endif - *ZATM_PRV_DSC(skb) = 0; /* mark as invalid */ - zatm_vcc->txing--; - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb_irq(skb); - while ((skb = skb_dequeue(&zatm_vcc->backlog))) - if (do_tx(skb) == RING_BUSY) { - skb_queue_head(&zatm_vcc->backlog,skb); - break; - } - atomic_inc(&vcc->stats->tx); - wake_up(&zatm_vcc->tx_wait); -} - - -static void poll_tx(struct atm_dev *dev,int mbx) -{ - struct zatm_dev *zatm_dev; - unsigned long pos; - u32 x; - - EVENT("poll_tx\n",0,0); - zatm_dev = ZATM_DEV(dev); - pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx)); - while (x = zin(MWA(mbx)), (pos & 0xffff) != x) { - int chan; - -#if 1 - u32 data,*addr; - - EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x); - addr = (u32 *) pos; - data = *addr; - chan = (data & uPD98401_TXI_CONN) >> uPD98401_TXI_CONN_SHIFT; - EVENT("addr = 0x%lx, data = 0x%08x,",(unsigned long) addr, - data); - EVENT("chan = %d\n",chan,0); -#else -NO ! - chan = (zatm_dev->mbx_start[mbx][pos >> 2] & uPD98401_TXI_CONN) - >> uPD98401_TXI_CONN_SHIFT; -#endif - if (chan < zatm_dev->chans && zatm_dev->tx_map[chan]) - dequeue_tx(zatm_dev->tx_map[chan]); - else { - printk(KERN_CRIT DEV_LABEL "(itf %d): TX indication " - "for non-existing channel %d\n",dev->number,chan); - event_dump(); - } - if (((pos += 4) & 0xffff) == zatm_dev->mbx_end[mbx]) - pos = zatm_dev->mbx_start[mbx]; - } - zout(pos & 0xffff,MTA(mbx)); -} - - -/* - * BUG BUG BUG: Doesn't handle "new-style" rate specification yet. - */ - -static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr) -{ - struct zatm_dev *zatm_dev; - unsigned long flags; - unsigned long i,m,c; - int shaper; - - DPRINTK("alloc_shaper (min = %d, max = %d)\n",min,max); - zatm_dev = ZATM_DEV(dev); - if (!zatm_dev->free_shapers) return -EAGAIN; - for (shaper = 0; !((zatm_dev->free_shapers >> shaper) & 1); shaper++); - zatm_dev->free_shapers &= ~1 << shaper; - if (ubr) { - c = 5; - i = m = 1; - zatm_dev->ubr_ref_cnt++; - zatm_dev->ubr = shaper; - *pcr = 0; - } - else { - if (min) { - if (min <= 255) { - i = min; - m = ATM_OC3_PCR; - } - else { - i = 255; - m = ATM_OC3_PCR*255/min; - } - } - else { - if (max > zatm_dev->tx_bw) max = zatm_dev->tx_bw; - if (max <= 255) { - i = max; - m = ATM_OC3_PCR; - } - else { - i = 255; - m = DIV_ROUND_UP(ATM_OC3_PCR*255, max); - } - } - if (i > m) { - printk(KERN_CRIT DEV_LABEL "shaper algorithm botched " - "[%d,%d] -> i=%ld,m=%ld\n",min,max,i,m); - m = i; - } - *pcr = i*ATM_OC3_PCR/m; - c = 20; /* @@@ should use max_cdv ! */ - if ((min && *pcr < min) || (max && *pcr > max)) return -EINVAL; - if (zatm_dev->tx_bw < *pcr) return -EAGAIN; - zatm_dev->tx_bw -= *pcr; - } - spin_lock_irqsave(&zatm_dev->lock, flags); - DPRINTK("i = %d, m = %d, PCR = %d\n",i,m,*pcr); - zpokel(zatm_dev,(i << uPD98401_IM_I_SHIFT) | m,uPD98401_IM(shaper)); - zpokel(zatm_dev,c << uPD98401_PC_C_SHIFT,uPD98401_PC(shaper)); - zpokel(zatm_dev,0,uPD98401_X(shaper)); - zpokel(zatm_dev,0,uPD98401_Y(shaper)); - zpokel(zatm_dev,uPD98401_PS_E,uPD98401_PS(shaper)); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return shaper; -} - - -static void dealloc_shaper(struct atm_dev *dev,int shaper) -{ - struct zatm_dev *zatm_dev; - unsigned long flags; - - zatm_dev = ZATM_DEV(dev); - if (shaper == zatm_dev->ubr) { - if (--zatm_dev->ubr_ref_cnt) return; - zatm_dev->ubr = -1; - } - spin_lock_irqsave(&zatm_dev->lock, flags); - zpokel(zatm_dev,zpeekl(zatm_dev,uPD98401_PS(shaper)) & ~uPD98401_PS_E, - uPD98401_PS(shaper)); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_dev->free_shapers |= 1 << shaper; -} - - -static void close_tx(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - int chan; - - zatm_vcc = ZATM_VCC(vcc); - zatm_dev = ZATM_DEV(vcc->dev); - chan = zatm_vcc->tx_chan; - if (!chan) return; - DPRINTK("close_tx\n"); - if (skb_peek(&zatm_vcc->backlog)) { - printk("waiting for backlog to drain ...\n"); - event_dump(); - wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); - } - if (skb_peek(&zatm_vcc->tx_queue)) { - printk("waiting for TX queue to drain ...\n"); - event_dump(); - wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->tx_queue)); - } - spin_lock_irqsave(&zatm_dev->lock, flags); -#if 0 - zwait(); - zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); -#endif - zwait(); - zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR); - zwait(); - if (!(zin(CMR) & uPD98401_CHAN_ADDR)) - printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel " - "%d\n",vcc->dev->number,chan); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_vcc->tx_chan = 0; - zatm_dev->tx_map[chan] = NULL; - if (zatm_vcc->shaper != zatm_dev->ubr) { - zatm_dev->tx_bw += vcc->qos.txtp.min_pcr; - dealloc_shaper(vcc->dev,zatm_vcc->shaper); - } - kfree(zatm_vcc->ring); -} - - -static int open_tx_first(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - u32 *loop; - unsigned short chan; - int unlimited; - - DPRINTK("open_tx_first\n"); - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - zatm_vcc->tx_chan = 0; - if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; - spin_lock_irqsave(&zatm_dev->lock, flags); - zwait(); - zout(uPD98401_OPEN_CHAN,CMR); - zwait(); - DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER)); - chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - DPRINTK("chan is %d\n",chan); - if (!chan) return -EAGAIN; - unlimited = vcc->qos.txtp.traffic_class == ATM_UBR && - (!vcc->qos.txtp.max_pcr || vcc->qos.txtp.max_pcr == ATM_MAX_PCR || - vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); - if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; - else { - int pcr; - - if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; - if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, - vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited)) - < 0) { - close_tx(vcc); - return zatm_vcc->shaper; - } - if (pcr > ATM_OC3_PCR) pcr = ATM_OC3_PCR; - vcc->qos.txtp.min_pcr = vcc->qos.txtp.max_pcr = pcr; - } - zatm_vcc->tx_chan = chan; - skb_queue_head_init(&zatm_vcc->tx_queue); - init_waitqueue_head(&zatm_vcc->tx_wait); - /* initialize ring */ - zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL); - if (!zatm_vcc->ring) return -ENOMEM; - loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS; - loop[0] = uPD98401_TXPD_V; - loop[1] = loop[2] = 0; - loop[3] = virt_to_bus(zatm_vcc->ring); - zatm_vcc->ring_curr = 0; - zatm_vcc->txing = 0; - skb_queue_head_init(&zatm_vcc->backlog); - zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring), - chan*VC_SIZE/4+uPD98401_TXVC_QRP); - return 0; -} - - -static int open_tx_second(struct atm_vcc *vcc) -{ - struct zatm_dev *zatm_dev; - struct zatm_vcc *zatm_vcc; - unsigned long flags; - - DPRINTK("open_tx_second\n"); - zatm_dev = ZATM_DEV(vcc->dev); - zatm_vcc = ZATM_VCC(vcc); - if (!zatm_vcc->tx_chan) return 0; - /* set up VC descriptor */ - spin_lock_irqsave(&zatm_dev->lock, flags); - zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4); - zpokel(zatm_dev,uPD98401_TXVC_L | (zatm_vcc->shaper << - uPD98401_TXVC_SHP_SHIFT) | (vcc->vpi << uPD98401_TXVC_VPI_SHIFT) | - vcc->vci,zatm_vcc->tx_chan*VC_SIZE/4+1); - zpokel(zatm_dev,0,zatm_vcc->tx_chan*VC_SIZE/4+2); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - zatm_dev->tx_map[zatm_vcc->tx_chan] = vcc; - return 0; -} - - -static int start_tx(struct atm_dev *dev) -{ - struct zatm_dev *zatm_dev; - int i; - - DPRINTK("start_tx\n"); - zatm_dev = ZATM_DEV(dev); - zatm_dev->tx_map = kmalloc_array(zatm_dev->chans, - sizeof(*zatm_dev->tx_map), - GFP_KERNEL); - if (!zatm_dev->tx_map) return -ENOMEM; - zatm_dev->tx_bw = ATM_OC3_PCR; - zatm_dev->free_shapers = (1 << NR_SHAPERS)-1; - zatm_dev->ubr = -1; - zatm_dev->ubr_ref_cnt = 0; - /* initialize shapers */ - for (i = 0; i < NR_SHAPERS; i++) zpokel(zatm_dev,0,uPD98401_PS(i)); - return 0; -} - - -/*------------------------------- interrupts --------------------------------*/ - - -static irqreturn_t zatm_int(int irq,void *dev_id) -{ - struct atm_dev *dev; - struct zatm_dev *zatm_dev; - u32 reason; - int handled = 0; - - dev = dev_id; - zatm_dev = ZATM_DEV(dev); - while ((reason = zin(GSR))) { - handled = 1; - EVENT("reason 0x%x\n",reason,0); - if (reason & uPD98401_INT_PI) { - EVENT("PHY int\n",0,0); - dev->phy->interrupt(dev); - } - if (reason & uPD98401_INT_RQA) { - unsigned long pools; - int i; - - pools = zin(RQA); - EVENT("RQA (0x%08x)\n",pools,0); - for (i = 0; pools; i++) { - if (pools & 1) { - refill_pool(dev,i); - zatm_dev->pool_info[i].rqa_count++; - } - pools >>= 1; - } - } - if (reason & uPD98401_INT_RQU) { - unsigned long pools; - int i; - pools = zin(RQU); - printk(KERN_WARNING DEV_LABEL "(itf %d): RQU 0x%08lx\n", - dev->number,pools); - event_dump(); - for (i = 0; pools; i++) { - if (pools & 1) { - refill_pool(dev,i); - zatm_dev->pool_info[i].rqu_count++; - } - pools >>= 1; - } - } - /* don't handle RD */ - if (reason & uPD98401_INT_SPE) - printk(KERN_ALERT DEV_LABEL "(itf %d): system parity " - "error at 0x%08x\n",dev->number,zin(ADDR)); - if (reason & uPD98401_INT_CPE) - printk(KERN_ALERT DEV_LABEL "(itf %d): control memory " - "parity error at 0x%08x\n",dev->number,zin(ADDR)); - if (reason & uPD98401_INT_SBE) { - printk(KERN_ALERT DEV_LABEL "(itf %d): system bus " - "error at 0x%08x\n",dev->number,zin(ADDR)); - event_dump(); - } - /* don't handle IND */ - if (reason & uPD98401_INT_MF) { - printk(KERN_CRIT DEV_LABEL "(itf %d): mailbox full " - "(0x%x)\n",dev->number,(reason & uPD98401_INT_MF) - >> uPD98401_INT_MF_SHIFT); - event_dump(); - /* @@@ should try to recover */ - } - if (reason & uPD98401_INT_MM) { - if (reason & 1) poll_rx(dev,0); - if (reason & 2) poll_rx(dev,1); - if (reason & 4) poll_tx(dev,2); - if (reason & 8) poll_tx(dev,3); - } - /* @@@ handle RCRn */ - } - return IRQ_RETVAL(handled); -} - - -/*----------------------------- (E)EPROM access -----------------------------*/ - - -static void eprom_set(struct zatm_dev *zatm_dev, unsigned long value, - unsigned short cmd) -{ - int error; - - if ((error = pci_write_config_dword(zatm_dev->pci_dev,cmd,value))) - printk(KERN_ERR DEV_LABEL ": PCI write failed (0x%02x)\n", - error); -} - - -static unsigned long eprom_get(struct zatm_dev *zatm_dev, unsigned short cmd) -{ - unsigned int value; - int error; - - if ((error = pci_read_config_dword(zatm_dev->pci_dev,cmd,&value))) - printk(KERN_ERR DEV_LABEL ": PCI read failed (0x%02x)\n", - error); - return value; -} - - -static void eprom_put_bits(struct zatm_dev *zatm_dev, unsigned long data, - int bits, unsigned short cmd) -{ - unsigned long value; - int i; - - for (i = bits-1; i >= 0; i--) { - value = ZEPROM_CS | (((data >> i) & 1) ? ZEPROM_DI : 0); - eprom_set(zatm_dev,value,cmd); - eprom_set(zatm_dev,value | ZEPROM_SK,cmd); - eprom_set(zatm_dev,value,cmd); - } -} - - -static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte, - unsigned short cmd) -{ - int i; - - *byte = 0; - for (i = 8; i; i--) { - eprom_set(zatm_dev,ZEPROM_CS,cmd); - eprom_set(zatm_dev,ZEPROM_CS | ZEPROM_SK,cmd); - *byte <<= 1; - if (eprom_get(zatm_dev,cmd) & ZEPROM_DO) *byte |= 1; - eprom_set(zatm_dev,ZEPROM_CS,cmd); - } -} - - -static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset, - int swap) -{ - unsigned char buf[ZEPROM_SIZE]; - struct zatm_dev *zatm_dev; - int i; - - zatm_dev = ZATM_DEV(dev); - for (i = 0; i < ZEPROM_SIZE; i += 2) { - eprom_set(zatm_dev,ZEPROM_CS,cmd); /* select EPROM */ - eprom_put_bits(zatm_dev,ZEPROM_CMD_READ,ZEPROM_CMD_LEN,cmd); - eprom_put_bits(zatm_dev,i >> 1,ZEPROM_ADDR_LEN,cmd); - eprom_get_byte(zatm_dev,buf+i+swap,cmd); - eprom_get_byte(zatm_dev,buf+i+1-swap,cmd); - eprom_set(zatm_dev,0,cmd); /* deselect EPROM */ - } - memcpy(dev->esi,buf+offset,ESI_LEN); - return memcmp(dev->esi,"\0\0\0\0\0",ESI_LEN); /* assumes ESI_LEN == 6 */ -} - - -static void eprom_get_esi(struct atm_dev *dev) -{ - if (eprom_try_esi(dev,ZEPROM_V1_REG,ZEPROM_V1_ESI_OFF,1)) return; - (void) eprom_try_esi(dev,ZEPROM_V2_REG,ZEPROM_V2_ESI_OFF,0); -} - - -/*--------------------------------- entries ---------------------------------*/ - - -static int zatm_init(struct atm_dev *dev) -{ - struct zatm_dev *zatm_dev; - struct pci_dev *pci_dev; - unsigned short command; - int error,i,last; - unsigned long t0,t1,t2; - - DPRINTK(">zatm_init\n"); - zatm_dev = ZATM_DEV(dev); - spin_lock_init(&zatm_dev->lock); - pci_dev = zatm_dev->pci_dev; - zatm_dev->base = pci_resource_start(pci_dev, 0); - zatm_dev->irq = pci_dev->irq; - if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) { - printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n", - dev->number,error); - return -EINVAL; - } - if ((error = pci_write_config_word(pci_dev,PCI_COMMAND, - command | PCI_COMMAND_IO | PCI_COMMAND_MASTER))) { - printk(KERN_ERR DEV_LABEL "(itf %d): can't enable IO (0x%02x)" - "\n",dev->number,error); - return -EIO; - } - eprom_get_esi(dev); - printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,", - dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq); - /* reset uPD98401 */ - zout(0,SWR); - while (!(zin(GSR) & uPD98401_INT_IND)); - zout(uPD98401_GMR_ONE /*uPD98401_BURST4*/,GMR); - last = MAX_CRAM_SIZE; - for (i = last-RAM_INCREMENT; i >= 0; i -= RAM_INCREMENT) { - zpokel(zatm_dev,0x55555555,i); - if (zpeekl(zatm_dev,i) != 0x55555555) last = i; - else { - zpokel(zatm_dev,0xAAAAAAAA,i); - if (zpeekl(zatm_dev,i) != 0xAAAAAAAA) last = i; - else zpokel(zatm_dev,i,i); - } - } - for (i = 0; i < last; i += RAM_INCREMENT) - if (zpeekl(zatm_dev,i) != i) break; - zatm_dev->mem = i << 2; - while (i) zpokel(zatm_dev,0,--i); - /* reset again to rebuild memory pointers */ - zout(0,SWR); - while (!(zin(GSR) & uPD98401_INT_IND)); - zout(uPD98401_GMR_ONE | uPD98401_BURST8 | uPD98401_BURST4 | - uPD98401_BURST2 | uPD98401_GMR_PM | uPD98401_GMR_DR,GMR); - /* TODO: should shrink allocation now */ - printk("mem=%dkB,%s (",zatm_dev->mem >> 10,zatm_dev->copper ? "UTP" : - "MMF"); - for (i = 0; i < ESI_LEN; i++) - printk("%02X%s",dev->esi[i],i == ESI_LEN-1 ? ")\n" : "-"); - do { - unsigned long flags; - - spin_lock_irqsave(&zatm_dev->lock, flags); - t0 = zpeekl(zatm_dev,uPD98401_TSR); - udelay(10); - t1 = zpeekl(zatm_dev,uPD98401_TSR); - udelay(1010); - t2 = zpeekl(zatm_dev,uPD98401_TSR); - spin_unlock_irqrestore(&zatm_dev->lock, flags); - } - while (t0 > t1 || t1 > t2); /* loop if wrapping ... */ - zatm_dev->khz = t2-2*t1+t0; - printk(KERN_NOTICE DEV_LABEL "(itf %d): uPD98401 %d.%d at %d.%03d " - "MHz\n",dev->number, - (zin(VER) & uPD98401_MAJOR) >> uPD98401_MAJOR_SHIFT, - zin(VER) & uPD98401_MINOR,zatm_dev->khz/1000,zatm_dev->khz % 1000); - return uPD98402_init(dev); -} - - -static int zatm_start(struct atm_dev *dev) -{ - struct zatm_dev *zatm_dev = ZATM_DEV(dev); - struct pci_dev *pdev = zatm_dev->pci_dev; - unsigned long curr; - int pools,vccs,rx; - int error, i, ld; - - DPRINTK("zatm_start\n"); - zatm_dev->rx_map = zatm_dev->tx_map = NULL; - for (i = 0; i < NR_MBX; i++) - zatm_dev->mbx_start[i] = 0; - error = request_irq(zatm_dev->irq, zatm_int, IRQF_SHARED, DEV_LABEL, dev); - if (error < 0) { - printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", - dev->number,zatm_dev->irq); - goto done; - } - /* define memory regions */ - pools = NR_POOLS; - if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE) - pools = NR_SHAPERS*SHAPER_SIZE/POOL_SIZE; - vccs = (zatm_dev->mem-NR_SHAPERS*SHAPER_SIZE-pools*POOL_SIZE)/ - (2*VC_SIZE+RX_SIZE); - ld = -1; - for (rx = 1; rx < vccs; rx <<= 1) ld++; - dev->ci_range.vpi_bits = 0; /* @@@ no VPI for now */ - dev->ci_range.vci_bits = ld; - dev->link_rate = ATM_OC3_PCR; - zatm_dev->chans = vccs; /* ??? */ - curr = rx*RX_SIZE/4; - DPRINTK("RX pool 0x%08lx\n",curr); - zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */ - zatm_dev->pool_base = curr; - curr += pools*POOL_SIZE/4; - DPRINTK("Shapers 0x%08lx\n",curr); - zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */ - curr += NR_SHAPERS*SHAPER_SIZE/4; - DPRINTK("Free 0x%08lx\n",curr); - zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */ - printk(KERN_INFO DEV_LABEL "(itf %d): %d shapers, %d pools, %d RX, " - "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx, - (zatm_dev->mem-curr*4)/VC_SIZE); - /* create mailboxes */ - for (i = 0; i < NR_MBX; i++) { - void *mbx; - dma_addr_t mbx_dma; - - if (!mbx_entries[i]) - continue; - mbx = dma_alloc_coherent(&pdev->dev, - 2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL); - if (!mbx) { - error = -ENOMEM; - goto out; - } - /* - * Alignment provided by dma_alloc_coherent() isn't enough - * for this device. - */ - if (((unsigned long)mbx ^ mbx_dma) & 0xffff) { - printk(KERN_ERR DEV_LABEL "(itf %d): system " - "bus incompatible with driver\n", dev->number); - dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma); - error = -ENODEV; - goto out; - } - DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i)); - zatm_dev->mbx_start[i] = (unsigned long)mbx; - zatm_dev->mbx_dma[i] = mbx_dma; - zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) & - 0xffff; - zout(mbx_dma >> 16, MSH(i)); - zout(mbx_dma, MSL(i)); - zout(zatm_dev->mbx_end[i], MBA(i)); - zout((unsigned long)mbx & 0xffff, MTA(i)); - zout((unsigned long)mbx & 0xffff, MWA(i)); - } - error = start_tx(dev); - if (error) - goto out; - error = start_rx(dev); - if (error) - goto out_tx; - error = dev->phy->start(dev); - if (error) - goto out_rx; - zout(0xffffffff,IMR); /* enable interrupts */ - /* enable TX & RX */ - zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR); -done: - return error; - -out_rx: - kfree(zatm_dev->rx_map); -out_tx: - kfree(zatm_dev->tx_map); -out: - while (i-- > 0) { - dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i), - (void *)zatm_dev->mbx_start[i], - zatm_dev->mbx_dma[i]); - } - free_irq(zatm_dev->irq, dev); - goto done; -} - - -static void zatm_close(struct atm_vcc *vcc) -{ - DPRINTK(">zatm_close\n"); - if (!ZATM_VCC(vcc)) return; - clear_bit(ATM_VF_READY,&vcc->flags); - close_rx(vcc); - EVENT("close_tx\n",0,0); - close_tx(vcc); - DPRINTK("zatm_close: done waiting\n"); - /* deallocate memory */ - kfree(ZATM_VCC(vcc)); - vcc->dev_data = NULL; - clear_bit(ATM_VF_ADDR,&vcc->flags); -} - - -static int zatm_open(struct atm_vcc *vcc) -{ - struct zatm_vcc *zatm_vcc; - short vpi = vcc->vpi; - int vci = vcc->vci; - int error; - - DPRINTK(">zatm_open\n"); - if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) - vcc->dev_data = NULL; - if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC) - set_bit(ATM_VF_ADDR,&vcc->flags); - if (vcc->qos.aal != ATM_AAL5) return -EINVAL; /* @@@ AAL0 */ - DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi, - vcc->vci); - if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { - zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL); - if (!zatm_vcc) { - clear_bit(ATM_VF_ADDR,&vcc->flags); - return -ENOMEM; - } - vcc->dev_data = zatm_vcc; - ZATM_VCC(vcc)->tx_chan = 0; /* for zatm_close after open_rx */ - if ((error = open_rx_first(vcc))) { - zatm_close(vcc); - return error; - } - if ((error = open_tx_first(vcc))) { - zatm_close(vcc); - return error; - } - } - if (vci == ATM_VPI_UNSPEC || vpi == ATM_VCI_UNSPEC) return 0; - if ((error = open_rx_second(vcc))) { - zatm_close(vcc); - return error; - } - if ((error = open_tx_second(vcc))) { - zatm_close(vcc); - return error; - } - set_bit(ATM_VF_READY,&vcc->flags); - return 0; -} - - -static int zatm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos,int flags) -{ - printk("Not yet implemented\n"); - return -ENOSYS; - /* @@@ */ -} - - -static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) -{ - struct zatm_dev *zatm_dev; - unsigned long flags; - - zatm_dev = ZATM_DEV(dev); - switch (cmd) { - case ZATM_GETPOOLZ: - if (!capable(CAP_NET_ADMIN)) return -EPERM; - fallthrough; - case ZATM_GETPOOL: - { - struct zatm_pool_info info; - int pool; - - if (get_user(pool, - &((struct zatm_pool_req __user *) arg)->pool_num)) - return -EFAULT; - if (pool < 0 || pool > ZATM_LAST_POOL) - return -EINVAL; - pool = array_index_nospec(pool, - ZATM_LAST_POOL + 1); - spin_lock_irqsave(&zatm_dev->lock, flags); - info = zatm_dev->pool_info[pool]; - if (cmd == ZATM_GETPOOLZ) { - zatm_dev->pool_info[pool].rqa_count = 0; - zatm_dev->pool_info[pool].rqu_count = 0; - } - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return copy_to_user( - &((struct zatm_pool_req __user *) arg)->info, - &info,sizeof(info)) ? -EFAULT : 0; - } - case ZATM_SETPOOL: - { - struct zatm_pool_info info; - int pool; - - if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (get_user(pool, - &((struct zatm_pool_req __user *) arg)->pool_num)) - return -EFAULT; - if (pool < 0 || pool > ZATM_LAST_POOL) - return -EINVAL; - pool = array_index_nospec(pool, - ZATM_LAST_POOL + 1); - if (copy_from_user(&info, - &((struct zatm_pool_req __user *) arg)->info, - sizeof(info))) return -EFAULT; - if (!info.low_water) - info.low_water = zatm_dev-> - pool_info[pool].low_water; - if (!info.high_water) - info.high_water = zatm_dev-> - pool_info[pool].high_water; - if (!info.next_thres) - info.next_thres = zatm_dev-> - pool_info[pool].next_thres; - if (info.low_water >= info.high_water || - info.low_water < 0) - return -EINVAL; - spin_lock_irqsave(&zatm_dev->lock, flags); - zatm_dev->pool_info[pool].low_water = - info.low_water; - zatm_dev->pool_info[pool].high_water = - info.high_water; - zatm_dev->pool_info[pool].next_thres = - info.next_thres; - spin_unlock_irqrestore(&zatm_dev->lock, flags); - return 0; - } - default: - if (!dev->phy->ioctl) return -ENOIOCTLCMD; - return dev->phy->ioctl(dev,cmd,arg); - } -} - -static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb) -{ - int error; - - EVENT(">zatm_send 0x%lx\n",(unsigned long) skb,0); - if (!ZATM_VCC(vcc)->tx_chan || !test_bit(ATM_VF_READY,&vcc->flags)) { - if (vcc->pop) vcc->pop(vcc,skb); - else dev_kfree_skb(skb); - return -EINVAL; - } - if (!skb) { - printk(KERN_CRIT "!skb in zatm_send ?\n"); - if (vcc->pop) vcc->pop(vcc,skb); - return -EINVAL; - } - ATM_SKB(skb)->vcc = vcc; - error = do_tx(skb); - if (error != RING_BUSY) return error; - skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); - return 0; -} - - -static void zatm_phy_put(struct atm_dev *dev,unsigned char value, - unsigned long addr) -{ - struct zatm_dev *zatm_dev; - - zatm_dev = ZATM_DEV(dev); - zwait(); - zout(value,CER); - zout(uPD98401_IND_ACC | uPD98401_IA_B0 | - (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); -} - - -static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr) -{ - struct zatm_dev *zatm_dev; - - zatm_dev = ZATM_DEV(dev); - zwait(); - zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW | - (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR); - zwait(); - return zin(CER) & 0xff; -} - - -static const struct atmdev_ops ops = { - .open = zatm_open, - .close = zatm_close, - .ioctl = zatm_ioctl, - .send = zatm_send, - .phy_put = zatm_phy_put, - .phy_get = zatm_phy_get, - .change_qos = zatm_change_qos, -}; - -static int zatm_init_one(struct pci_dev *pci_dev, - const struct pci_device_id *ent) -{ - struct atm_dev *dev; - struct zatm_dev *zatm_dev; - int ret = -ENOMEM; - - zatm_dev = kmalloc(sizeof(*zatm_dev), GFP_KERNEL); - if (!zatm_dev) { - printk(KERN_EMERG "%s: memory shortage\n", DEV_LABEL); - goto out; - } - - dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); - if (!dev) - goto out_free; - - ret = pci_enable_device(pci_dev); - if (ret < 0) - goto out_deregister; - - ret = pci_request_regions(pci_dev, DEV_LABEL); - if (ret < 0) - goto out_disable; - - ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); - if (ret < 0) - goto out_release; - - zatm_dev->pci_dev = pci_dev; - dev->dev_data = zatm_dev; - zatm_dev->copper = (int)ent->driver_data; - if ((ret = zatm_init(dev)) || (ret = zatm_start(dev))) - goto out_release; - - pci_set_drvdata(pci_dev, dev); - zatm_dev->more = zatm_boards; - zatm_boards = dev; - ret = 0; -out: - return ret; - -out_release: - pci_release_regions(pci_dev); -out_disable: - pci_disable_device(pci_dev); -out_deregister: - atm_dev_deregister(dev); -out_free: - kfree(zatm_dev); - goto out; -} - - -MODULE_LICENSE("GPL"); - -static const struct pci_device_id zatm_pci_tbl[] = { - { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, - { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, - { 0, } -}; -MODULE_DEVICE_TABLE(pci, zatm_pci_tbl); - -static struct pci_driver zatm_driver = { - .name = DEV_LABEL, - .id_table = zatm_pci_tbl, - .probe = zatm_init_one, -}; - -static int __init zatm_init_module(void) -{ - return pci_register_driver(&zatm_driver); -} - -module_init(zatm_init_module); -/* module_exit not defined so not unloadable */ diff --git a/drivers/atm/zatm.h b/drivers/atm/zatm.h deleted file mode 100644 index 8204369fe825..000000000000 --- a/drivers/atm/zatm.h +++ /dev/null @@ -1,104 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* drivers/atm/zatm.h - ZeitNet ZN122x device driver declarations */ - -/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */ - - -#ifndef DRIVER_ATM_ZATM_H -#define DRIVER_ATM_ZATM_H - -#include -#include -#include -#include -#include - - -#define DEV_LABEL "zatm" - -#define MAX_AAL5_PDU 10240 /* allocate for AAL5 PDUs of this size */ -#define MAX_RX_SIZE_LD 14 /* ceil(log2((MAX_AAL5_PDU+47)/48)) */ - -#define LOW_MARK 12 /* start adding new buffers if less than 12 */ -#define HIGH_MARK 30 /* stop adding buffers after reaching 30 */ -#define OFF_CNG_THRES 5 /* threshold for offset changes */ - -#define RX_SIZE 2 /* RX lookup entry size (in bytes) */ -#define NR_POOLS 32 /* number of free buffer pointers */ -#define POOL_SIZE 8 /* buffer entry size (in bytes) */ -#define NR_SHAPERS 16 /* number of shapers */ -#define SHAPER_SIZE 4 /* shaper entry size (in bytes) */ -#define VC_SIZE 32 /* VC dsc (TX or RX) size (in bytes) */ - -#define RING_ENTRIES 32 /* ring entries (without back pointer) */ -#define RING_WORDS 4 /* ring element size */ -#define RING_SIZE (sizeof(unsigned long)*(RING_ENTRIES+1)*RING_WORDS) - -#define NR_MBX 4 /* four mailboxes */ -#define MBX_RX_0 0 /* mailbox indices */ -#define MBX_RX_1 1 -#define MBX_TX_0 2 -#define MBX_TX_1 3 - -struct zatm_vcc { - /*-------------------------------- RX part */ - int rx_chan; /* RX channel, 0 if none */ - int pool; /* free buffer pool */ - /*-------------------------------- TX part */ - int tx_chan; /* TX channel, 0 if none */ - int shaper; /* shaper, <0 if none */ - struct sk_buff_head tx_queue; /* list of buffers in transit */ - wait_queue_head_t tx_wait; /* for close */ - u32 *ring; /* transmit ring */ - int ring_curr; /* current write position */ - int txing; /* number of transmits in progress */ - struct sk_buff_head backlog; /* list of buffers waiting for ring */ -}; - -struct zatm_dev { - /*-------------------------------- TX part */ - int tx_bw; /* remaining bandwidth */ - u32 free_shapers; /* bit set */ - int ubr; /* UBR shaper; -1 if none */ - int ubr_ref_cnt; /* number of VCs using UBR shaper */ - /*-------------------------------- RX part */ - int pool_ref[NR_POOLS]; /* free buffer pool usage counters */ - volatile struct sk_buff *last_free[NR_POOLS]; - /* last entry in respective pool */ - struct sk_buff_head pool[NR_POOLS];/* free buffer pools */ - struct zatm_pool_info pool_info[NR_POOLS]; /* pool information */ - /*-------------------------------- maps */ - struct atm_vcc **tx_map; /* TX VCCs */ - struct atm_vcc **rx_map; /* RX VCCs */ - int chans; /* map size, must be 2^n */ - /*-------------------------------- mailboxes */ - unsigned long mbx_start[NR_MBX];/* start addresses */ - dma_addr_t mbx_dma[NR_MBX]; - u16 mbx_end[NR_MBX]; /* end offset (in bytes) */ - /*-------------------------------- other pointers */ - u32 pool_base; /* Free buffer pool dsc (word addr) */ - /*-------------------------------- ZATM links */ - struct atm_dev *more; /* other ZATM devices */ - /*-------------------------------- general information */ - int mem; /* RAM on board (in bytes) */ - int khz; /* timer clock */ - int copper; /* PHY type */ - unsigned char irq; /* IRQ */ - unsigned int base; /* IO base address */ - struct pci_dev *pci_dev; /* PCI stuff */ - spinlock_t lock; -}; - - -#define ZATM_DEV(d) ((struct zatm_dev *) (d)->dev_data) -#define ZATM_VCC(d) ((struct zatm_vcc *) (d)->dev_data) - - -struct zatm_skb_prv { - struct atm_skb_data _; /* reserved */ - u32 *dsc; /* pointer to skb's descriptor */ -}; - -#define ZATM_PRV_DSC(skb) (((struct zatm_skb_prv *) (skb)->cb)->dsc) - -#endif diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h deleted file mode 100644 index 5135027b93c1..000000000000 --- a/include/uapi/linux/atm_zatm.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* atm_zatm.h - Driver-specific declarations of the ZATM driver (for use by - driver-specific utilities) */ - -/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */ - - -#ifndef LINUX_ATM_ZATM_H -#define LINUX_ATM_ZATM_H - -/* - * Note: non-kernel programs including this file must also include - * sys/types.h for struct timeval - */ - -#include -#include - -#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) - /* get pool statistics */ -#define ZATM_GETPOOLZ _IOW('a',ATMIOC_SARPRV+2,struct atmif_sioc) - /* get statistics and zero */ -#define ZATM_SETPOOL _IOW('a',ATMIOC_SARPRV+3,struct atmif_sioc) - /* set pool parameters */ - -struct zatm_pool_info { - int ref_count; /* free buffer pool usage counters */ - int low_water,high_water; /* refill parameters */ - int rqa_count,rqu_count; /* queue condition counters */ - int offset,next_off; /* alignment optimizations: offset */ - int next_cnt,next_thres; /* repetition counter and threshold */ -}; - -struct zatm_pool_req { - int pool_num; /* pool number */ - struct zatm_pool_info info; /* actual information */ -}; - -#define ZATM_OAM_POOL 0 /* free buffer pool for OAM cells */ -#define ZATM_AAL0_POOL 1 /* free buffer pool for AAL0 cells */ -#define ZATM_AAL5_POOL_BASE 2 /* first AAL5 free buffer pool */ -#define ZATM_LAST_POOL ZATM_AAL5_POOL_BASE+10 /* max. 64 kB */ - -#define ZATM_TIMER_HISTORY_SIZE 16 /* number of timer adjustments to - record; must be 2^n */ - -#endif -- cgit v1.2.3 From 89fbca3307d40d5a48bc41919a543dc46298cf7a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 Apr 2022 10:54:34 -0700 Subject: net: wan: remove support for COSA and SRP synchronous serial boards Looks like all the changes to this driver had been automated churn since git era begun. The driver is using virt_to_bus() so it should be updated to a proper DMA API or removed. Given the latest "news" entry on the website is from 1999 I'm opting for the latter. I'm marking the allocated char device major number as [REMOVED], I reckon we can't reuse it in case some SW out there assumes its COSA? Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- Documentation/admin-guide/devices.txt | 2 +- MAINTAINERS | 6 - drivers/net/wan/Kconfig | 22 - drivers/net/wan/Makefile | 1 - drivers/net/wan/cosa.c | 2052 --------------------------------- drivers/net/wan/cosa.h | 104 -- 6 files changed, 1 insertion(+), 2186 deletions(-) delete mode 100644 drivers/net/wan/cosa.c delete mode 100644 drivers/net/wan/cosa.h (limited to 'drivers') diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt index c07dc0ee860e..9764d6edb189 100644 --- a/Documentation/admin-guide/devices.txt +++ b/Documentation/admin-guide/devices.txt @@ -1933,7 +1933,7 @@ ... 255= /dev/umem/d15p15 15th partition of 16th board. - 117 char COSA/SRP synchronous serial card + 117 char [REMOVED] COSA/SRP synchronous serial card 0 = /dev/cosa0c0 1st board, 1st channel 1 = /dev/cosa0c1 1st board, 2nd channel ... diff --git a/MAINTAINERS b/MAINTAINERS index dd61684c2573..9619058bb0b9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5047,12 +5047,6 @@ S: Maintained F: Documentation/hwmon/corsair-psu.rst F: drivers/hwmon/corsair-psu.c -COSA/SRP SYNC SERIAL DRIVER -M: Jan "Yenya" Kasprzak -S: Maintained -W: http://www.fi.muni.cz/~kas/cosa/ -F: drivers/net/wan/cosa* - COUNTER SUBSYSTEM M: William Breathitt Gray L: linux-iio@vger.kernel.org diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 588b2333cdb8..12c5b6c67ab2 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -35,28 +35,6 @@ config HOSTESS_SV11 The driver will be compiled as a module: the module will be called hostess_sv11. -# The COSA/SRP driver has not been tested as non-modular yet. -config COSA - tristate "COSA/SRP sync serial boards support" - depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS - help - Driver for COSA and SRP synchronous serial boards. - - These boards allow to connect synchronous serial devices (for example - base-band modems, or any other device with the X.21, V.24, V.35 or - V.36 interface) to your Linux box. The cards can work as the - character device, synchronous PPP network device, or the Cisco HDLC - network device. - - You will need user-space utilities COSA or SRP boards for downloading - the firmware to the cards and to set them up. Look at the - for more information. You can also - read the comment at the top of the for - details about the cards and the driver itself. - - The driver will be compiled as a module: the - module will be called cosa. - # There is no way to detect a Sealevel board. Force it modular config SEALEVEL_4021 tristate "Sealevel Systems 4021 support" diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 1cd42147b34f..901a094c061c 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -16,7 +16,6 @@ obj-$(CONFIG_HDLC_X25) += hdlc_x25.o obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o -obj-$(CONFIG_COSA) += cosa.o obj-$(CONFIG_FARSYNC) += farsync.o obj-$(CONFIG_LAPBETHER) += lapbether.o diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c deleted file mode 100644 index 23d2954d9747..000000000000 --- a/drivers/net/wan/cosa.c +++ /dev/null @@ -1,2052 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */ - -/* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak - * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa - */ - -/* The driver for the SRP and COSA synchronous serial cards. - * - * HARDWARE INFO - * - * Both cards are developed at the Institute of Computer Science, - * Masaryk University (https://www.ics.muni.cz/). The hardware is - * developed by Jiri Novotny . More information - * and the photo of both cards is available at - * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares - * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/. - * For Linux-specific utilities, see below in the "Software info" section. - * If you want to order the card, contact Jiri Novotny. - * - * The SRP (serial port?, the Czech word "srp" means "sickle") card - * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card - * with V.24 interfaces up to 80kb/s each. - * - * The COSA (communication serial adapter?, the Czech word "kosa" means - * "scythe") is a next-generation sync/async board with two interfaces - * - currently any of V.24, X.21, V.35 and V.36 can be selected. - * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel. - * The 8-channels version is in development. - * - * Both types have downloadable firmware and communicate via ISA DMA. - * COSA can be also a bus-mastering device. - * - * SOFTWARE INFO - * - * The homepage of the Linux driver is at https://www.fi.muni.cz/~kas/cosa/. - * The CVS tree of Linux driver can be viewed there, as well as the - * firmware binaries and user-space utilities for downloading the firmware - * into the card and setting up the card. - * - * The Linux driver (unlike the present *BSD drivers :-) can work even - * for the COSA and SRP in one computer and allows each channel to work - * in one of the two modes (character or network device). - * - * AUTHOR - * - * The Linux driver was written by Jan "Yenya" Kasprzak . - * - * You can mail me bugfixes and even success reports. I am especially - * interested in the SMP and/or muliti-channel success/failure reports - * (I wonder if I did the locking properly :-). - * - * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER - * - * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek - * The skeleton.c by Donald Becker - * The SDL Riscom/N2 driver by Mike Natale - * The Comtrol Hostess SV11 driver by Alan Cox - * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#undef COSA_SLOW_IO /* for testing purposes only */ - -#include "cosa.h" - -/* Maximum length of the identification string. */ -#define COSA_MAX_ID_STRING 128 - -/* Maximum length of the channel name */ -#define COSA_MAX_NAME (sizeof("cosaXXXcXXX") + 1) - -/* Per-channel data structure */ - -struct channel_data { - int usage; /* Usage count; >0 for chrdev, -1 for netdev */ - int num; /* Number of the channel */ - struct cosa_data *cosa; /* Pointer to the per-card structure */ - int txsize; /* Size of transmitted data */ - char *txbuf; /* Transmit buffer */ - char name[COSA_MAX_NAME]; /* channel name */ - - /* The HW layer interface */ - /* routine called from the RX interrupt */ - char *(*setup_rx)(struct channel_data *channel, int size); - /* routine called when the RX is done (from the EOT interrupt) */ - int (*rx_done)(struct channel_data *channel); - /* routine called when the TX is done (from the EOT interrupt) */ - int (*tx_done)(struct channel_data *channel, int size); - - /* Character device parts */ - struct mutex rlock; - struct semaphore wsem; - char *rxdata; - int rxsize; - wait_queue_head_t txwaitq, rxwaitq; - int tx_status, rx_status; - - /* generic HDLC device parts */ - struct net_device *netdev; - struct sk_buff *rx_skb, *tx_skb; -}; - -/* cosa->firmware_status bits */ -#define COSA_FW_RESET BIT(0) /* Is the ROM monitor active? */ -#define COSA_FW_DOWNLOAD BIT(1) /* Is the microcode downloaded? */ -#define COSA_FW_START BIT(2) /* Is the microcode running? */ - -struct cosa_data { - int num; /* Card number */ - char name[COSA_MAX_NAME]; /* Card name - e.g "cosa0" */ - unsigned int datareg, statusreg; /* I/O ports */ - unsigned short irq, dma; /* IRQ and DMA number */ - unsigned short startaddr; /* Firmware start address */ - unsigned short busmaster; /* Use busmastering? */ - int nchannels; /* # of channels on this card */ - int driver_status; /* For communicating with firmware */ - int firmware_status; /* Downloaded, reseted, etc. */ - unsigned long rxbitmap, txbitmap;/* Bitmap of channels who are willing to send/receive data */ - unsigned long rxtx; /* RX or TX in progress? */ - int enabled; - int usage; /* usage count */ - int txchan, txsize, rxsize; - struct channel_data *rxchan; - char *bouncebuf; - char *txbuf, *rxbuf; - struct channel_data *chan; - spinlock_t lock; /* For exclusive operations on this structure */ - char id_string[COSA_MAX_ID_STRING]; /* ROM monitor ID string */ - char *type; /* card type */ -}; - -/* Define this if you want all the possible ports to be autoprobed. - * It is here but it probably is not a good idea to use this. - */ -/* #define COSA_ISA_AUTOPROBE 1*/ - -/* Character device major number. 117 was allocated for us. - * The value of 0 means to allocate a first free one. - */ -static DEFINE_MUTEX(cosa_chardev_mutex); -static int cosa_major = 117; - -/* Encoding of the minor numbers: - * The lowest CARD_MINOR_BITS bits means the channel on the single card, - * the highest bits means the card number. - */ -#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved - * for the single card - */ -/* The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING" - * macro doesn't like anything other than the raw number as an argument :-( - */ -#define MAX_CARDS 16 -/* #define MAX_CARDS (1 << (8-CARD_MINOR_BITS)) */ - -#define DRIVER_RX_READY 0x0001 -#define DRIVER_TX_READY 0x0002 -#define DRIVER_TXMAP_SHIFT 2 -#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */ - -/* for cosa->rxtx - indicates whether either transmit or receive is - * in progress. These values are mean number of the bit. - */ -#define TXBIT 0 -#define RXBIT 1 -#define IRQBIT 2 - -#define COSA_MTU 2000 /* FIXME: I don't know this exactly */ - -#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */ -#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */ -#undef DEBUG_IO //1 /* Dump the I/O traffic */ - -#define TX_TIMEOUT (5 * HZ) - -/* Maybe the following should be allocated dynamically */ -static struct cosa_data cosa_cards[MAX_CARDS]; -static int nr_cards; - -#ifdef COSA_ISA_AUTOPROBE -static int io[MAX_CARDS + 1] = {0x220, 0x228, 0x210, 0x218, 0,}; -/* NOTE: DMA is not autoprobed!!! */ -static int dma[MAX_CARDS + 1] = {1, 7, 1, 7, 1, 7, 1, 7, 0,}; -#else -static int io[MAX_CARDS + 1]; -static int dma[MAX_CARDS + 1]; -#endif -/* IRQ can be safely autoprobed */ -static int irq[MAX_CARDS + 1] = {-1, -1, -1, -1, -1, -1, 0,}; - -/* for class stuff*/ -static struct class *cosa_class; - -#ifdef MODULE -module_param_hw_array(io, int, ioport, NULL, 0); -MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards"); -module_param_hw_array(irq, int, irq, NULL, 0); -MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards"); -module_param_hw_array(dma, int, dma, NULL, 0); -MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards"); - -MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, "); -MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card"); -MODULE_LICENSE("GPL"); -#endif - -/* I use this mainly for testing purposes */ -#ifdef COSA_SLOW_IO -#define cosa_outb outb_p -#define cosa_outw outw_p -#define cosa_inb inb_p -#define cosa_inw inw_p -#else -#define cosa_outb outb -#define cosa_outw outw -#define cosa_inb inb -#define cosa_inw inw -#endif - -#define is_8bit(cosa) (!((cosa)->datareg & 0x08)) - -#define cosa_getstatus(cosa) (cosa_inb((cosa)->statusreg)) -#define cosa_putstatus(cosa, stat) (cosa_outb(stat, (cosa)->statusreg)) -#define cosa_getdata16(cosa) (cosa_inw((cosa)->datareg)) -#define cosa_getdata8(cosa) (cosa_inb((cosa)->datareg)) -#define cosa_putdata16(cosa, dt) (cosa_outw(dt, (cosa)->datareg)) -#define cosa_putdata8(cosa, dt) (cosa_outb(dt, (cosa)->datareg)) - -/* Initialization stuff */ -static int cosa_probe(int ioaddr, int irq, int dma); - -/* HW interface */ -static void cosa_enable_rx(struct channel_data *chan); -static void cosa_disable_rx(struct channel_data *chan); -static int cosa_start_tx(struct channel_data *channel, char *buf, int size); -static void cosa_kick(struct cosa_data *cosa); -static int cosa_dma_able(struct channel_data *chan, char *buf, int data); - -/* Network device stuff */ -static int cosa_net_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity); -static int cosa_net_open(struct net_device *d); -static int cosa_net_close(struct net_device *d); -static void cosa_net_timeout(struct net_device *d, unsigned int txqueue); -static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d); -static char *cosa_net_setup_rx(struct channel_data *channel, int size); -static int cosa_net_rx_done(struct channel_data *channel); -static int cosa_net_tx_done(struct channel_data *channel, int size); - -/* Character device */ -static char *chrdev_setup_rx(struct channel_data *channel, int size); -static int chrdev_rx_done(struct channel_data *channel); -static int chrdev_tx_done(struct channel_data *channel, int size); -static ssize_t cosa_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos); -static ssize_t cosa_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos); -static unsigned int cosa_poll(struct file *file, poll_table *poll); -static int cosa_open(struct inode *inode, struct file *file); -static int cosa_release(struct inode *inode, struct file *file); -static long cosa_chardev_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); -#ifdef COSA_FASYNC_WORKING -static int cosa_fasync(struct inode *inode, struct file *file, int on); -#endif - -static const struct file_operations cosa_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .read = cosa_read, - .write = cosa_write, - .poll = cosa_poll, - .unlocked_ioctl = cosa_chardev_ioctl, - .open = cosa_open, - .release = cosa_release, -#ifdef COSA_FASYNC_WORKING - .fasync = cosa_fasync, -#endif -}; - -/* Ioctls */ -static int cosa_start(struct cosa_data *cosa, int address); -static int cosa_reset(struct cosa_data *cosa); -static int cosa_download(struct cosa_data *cosa, void __user *a); -static int cosa_readmem(struct cosa_data *cosa, void __user *a); - -/* COSA/SRP ROM monitor */ -static int download(struct cosa_data *cosa, const char __user *data, int addr, int len); -static int startmicrocode(struct cosa_data *cosa, int address); -static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len); -static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id); - -/* Auxiliary functions */ -static int get_wait_data(struct cosa_data *cosa); -static int put_wait_data(struct cosa_data *cosa, int data); -static int puthexnumber(struct cosa_data *cosa, int number); -static void put_driver_status(struct cosa_data *cosa); -static void put_driver_status_nolock(struct cosa_data *cosa); - -/* Interrupt handling */ -static irqreturn_t cosa_interrupt(int irq, void *cosa); - -/* I/O ops debugging */ -#ifdef DEBUG_IO -static void debug_data_in(struct cosa_data *cosa, int data); -static void debug_data_out(struct cosa_data *cosa, int data); -static void debug_data_cmd(struct cosa_data *cosa, int data); -static void debug_status_in(struct cosa_data *cosa, int status); -static void debug_status_out(struct cosa_data *cosa, int status); -#endif - -static inline struct channel_data *dev_to_chan(struct net_device *dev) -{ - return (struct channel_data *)dev_to_hdlc(dev)->priv; -} - -/* ---------- Initialization stuff ---------- */ - -static int __init cosa_init(void) -{ - int i, err = 0; - - if (cosa_major > 0) { - if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { - pr_warn("unable to get major %d\n", cosa_major); - err = -EIO; - goto out; - } - } else { - cosa_major = register_chrdev(0, "cosa", &cosa_fops); - if (!cosa_major) { - pr_warn("unable to register chardev\n"); - err = -EIO; - goto out; - } - } - for (i = 0; i < MAX_CARDS; i++) - cosa_cards[i].num = -1; - for (i = 0; io[i] != 0 && i < MAX_CARDS; i++) - cosa_probe(io[i], irq[i], dma[i]); - if (!nr_cards) { - pr_warn("no devices found\n"); - unregister_chrdev(cosa_major, "cosa"); - err = -ENODEV; - goto out; - } - cosa_class = class_create(THIS_MODULE, "cosa"); - if (IS_ERR(cosa_class)) { - err = PTR_ERR(cosa_class); - goto out_chrdev; - } - for (i = 0; i < nr_cards; i++) - device_create(cosa_class, NULL, MKDEV(cosa_major, i), NULL, - "cosa%d", i); - err = 0; - goto out; - -out_chrdev: - unregister_chrdev(cosa_major, "cosa"); -out: - return err; -} -module_init(cosa_init); - -static void __exit cosa_exit(void) -{ - struct cosa_data *cosa; - int i; - - for (i = 0; i < nr_cards; i++) - device_destroy(cosa_class, MKDEV(cosa_major, i)); - class_destroy(cosa_class); - - for (cosa = cosa_cards; nr_cards--; cosa++) { - /* Clean up the per-channel data */ - for (i = 0; i < cosa->nchannels; i++) { - /* Chardev driver has no alloc'd per-channel data */ - unregister_hdlc_device(cosa->chan[i].netdev); - free_netdev(cosa->chan[i].netdev); - } - /* Clean up the per-card data */ - kfree(cosa->chan); - kfree(cosa->bouncebuf); - free_irq(cosa->irq, cosa); - free_dma(cosa->dma); - release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4); - } - unregister_chrdev(cosa_major, "cosa"); -} -module_exit(cosa_exit); - -static const struct net_device_ops cosa_ops = { - .ndo_open = cosa_net_open, - .ndo_stop = cosa_net_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_siocwandev = hdlc_ioctl, - .ndo_tx_timeout = cosa_net_timeout, -}; - -static int cosa_probe(int base, int irq, int dma) -{ - struct cosa_data *cosa = cosa_cards + nr_cards; - int i, err = 0; - - memset(cosa, 0, sizeof(struct cosa_data)); - - /* Checking validity of parameters: */ - /* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */ - if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) { - pr_info("invalid IRQ %d\n", irq); - return -1; - } - /* I/O address should be between 0x100 and 0x3ff and should be - * multiple of 8. - */ - if (base < 0x100 || base > 0x3ff || base & 0x7) { - pr_info("invalid I/O address 0x%x\n", base); - return -1; - } - /* DMA should be 0,1 or 3-7 */ - if (dma < 0 || dma == 4 || dma > 7) { - pr_info("invalid DMA %d\n", dma); - return -1; - } - /* and finally, on 16-bit COSA DMA should be 4-7 and - * I/O base should not be multiple of 0x10 - */ - if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) { - pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n", - base, dma); - return -1; - } - - cosa->dma = dma; - cosa->datareg = base; - cosa->statusreg = is_8bit(cosa) ? base + 1 : base + 2; - spin_lock_init(&cosa->lock); - - if (!request_region(base, is_8bit(cosa) ? 2 : 4, "cosa")) - return -1; - - if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) { - printk(KERN_DEBUG "probe at 0x%x failed.\n", base); - err = -1; - goto err_out; - } - - /* Test the validity of identification string */ - if (!strncmp(cosa->id_string, "SRP", 3)) { - cosa->type = "srp"; - } else if (!strncmp(cosa->id_string, "COSA", 4)) { - cosa->type = is_8bit(cosa) ? "cosa8" : "cosa16"; - } else { -/* Print a warning only if we are not autoprobing */ -#ifndef COSA_ISA_AUTOPROBE - pr_info("valid signature not found at 0x%x\n", base); -#endif - err = -1; - goto err_out; - } - /* Update the name of the region now we know the type of card */ - release_region(base, is_8bit(cosa) ? 2 : 4); - if (!request_region(base, is_8bit(cosa) ? 2 : 4, cosa->type)) { - printk(KERN_DEBUG "changing name at 0x%x failed.\n", base); - return -1; - } - - /* Now do IRQ autoprobe */ - if (irq < 0) { - unsigned long irqs; -/* pr_info("IRQ autoprobe\n"); */ - irqs = probe_irq_on(); - /* Enable interrupt on tx buffer empty (it sure is) - * really sure ? - * FIXME: When this code is not used as module, we should - * probably call udelay() instead of the interruptible sleep. - */ - set_current_state(TASK_INTERRUPTIBLE); - cosa_putstatus(cosa, SR_TX_INT_ENA); - schedule_timeout(msecs_to_jiffies(300)); - irq = probe_irq_off(irqs); - /* Disable all IRQs from the card */ - cosa_putstatus(cosa, 0); - /* Empty the received data register */ - cosa_getdata8(cosa); - - if (irq < 0) { - pr_info("multiple interrupts obtained (%d, board at 0x%x)\n", - irq, cosa->datareg); - err = -1; - goto err_out; - } - if (irq == 0) { - pr_info("no interrupt obtained (board at 0x%x)\n", - cosa->datareg); - /* return -1; */ - } - } - - cosa->irq = irq; - cosa->num = nr_cards; - cosa->usage = 0; - cosa->nchannels = 2; /* FIXME: how to determine this? */ - - if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) { - err = -1; - goto err_out; - } - if (request_dma(cosa->dma, cosa->type)) { - err = -1; - goto err_out1; - } - - cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL | GFP_DMA); - if (!cosa->bouncebuf) { - err = -ENOMEM; - goto err_out2; - } - sprintf(cosa->name, "cosa%d", cosa->num); - - /* Initialize the per-channel data */ - cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); - if (!cosa->chan) { - err = -ENOMEM; - goto err_out3; - } - - for (i = 0; i < cosa->nchannels; i++) { - struct channel_data *chan = &cosa->chan[i]; - - chan->cosa = cosa; - chan->num = i; - sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i); - - /* Initialize the chardev data structures */ - mutex_init(&chan->rlock); - sema_init(&chan->wsem, 1); - - /* Register the network interface */ - chan->netdev = alloc_hdlcdev(chan); - if (!chan->netdev) { - pr_warn("%s: alloc_hdlcdev failed\n", chan->name); - err = -ENOMEM; - goto err_hdlcdev; - } - dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; - dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; - chan->netdev->netdev_ops = &cosa_ops; - chan->netdev->watchdog_timeo = TX_TIMEOUT; - chan->netdev->base_addr = chan->cosa->datareg; - chan->netdev->irq = chan->cosa->irq; - chan->netdev->dma = chan->cosa->dma; - err = register_hdlc_device(chan->netdev); - if (err) { - netdev_warn(chan->netdev, - "register_hdlc_device() failed\n"); - free_netdev(chan->netdev); - goto err_hdlcdev; - } - } - - pr_info("cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", - cosa->num, cosa->id_string, cosa->type, - cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); - - return nr_cards++; - -err_hdlcdev: - while (i-- > 0) { - unregister_hdlc_device(cosa->chan[i].netdev); - free_netdev(cosa->chan[i].netdev); - } - kfree(cosa->chan); -err_out3: - kfree(cosa->bouncebuf); -err_out2: - free_dma(cosa->dma); -err_out1: - free_irq(cosa->irq, cosa); -err_out: - release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4); - pr_notice("cosa%d: allocating resources failed\n", cosa->num); - return err; -} - -/*---------- network device ---------- */ - -static int cosa_net_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) - return 0; - return -EINVAL; -} - -static int cosa_net_open(struct net_device *dev) -{ - struct channel_data *chan = dev_to_chan(dev); - int err; - unsigned long flags; - - if (!(chan->cosa->firmware_status & COSA_FW_START)) { - pr_notice("%s: start the firmware first (status %d)\n", - chan->cosa->name, chan->cosa->firmware_status); - return -EPERM; - } - spin_lock_irqsave(&chan->cosa->lock, flags); - if (chan->usage != 0) { - pr_warn("%s: cosa_net_open called with usage count %d\n", - chan->name, chan->usage); - spin_unlock_irqrestore(&chan->cosa->lock, flags); - return -EBUSY; - } - chan->setup_rx = cosa_net_setup_rx; - chan->tx_done = cosa_net_tx_done; - chan->rx_done = cosa_net_rx_done; - chan->usage = -1; - chan->cosa->usage++; - spin_unlock_irqrestore(&chan->cosa->lock, flags); - - err = hdlc_open(dev); - if (err) { - spin_lock_irqsave(&chan->cosa->lock, flags); - chan->usage = 0; - chan->cosa->usage--; - spin_unlock_irqrestore(&chan->cosa->lock, flags); - return err; - } - - netif_start_queue(dev); - cosa_enable_rx(chan); - return 0; -} - -static netdev_tx_t cosa_net_tx(struct sk_buff *skb, - struct net_device *dev) -{ - struct channel_data *chan = dev_to_chan(dev); - - netif_stop_queue(dev); - - chan->tx_skb = skb; - cosa_start_tx(chan, skb->data, skb->len); - return NETDEV_TX_OK; -} - -static void cosa_net_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct channel_data *chan = dev_to_chan(dev); - - if (test_bit(RXBIT, &chan->cosa->rxtx)) { - chan->netdev->stats.rx_errors++; - chan->netdev->stats.rx_missed_errors++; - } else { - chan->netdev->stats.tx_errors++; - chan->netdev->stats.tx_aborted_errors++; - } - cosa_kick(chan->cosa); - if (chan->tx_skb) { - dev_kfree_skb(chan->tx_skb); - chan->tx_skb = NULL; - } - netif_wake_queue(dev); -} - -static int cosa_net_close(struct net_device *dev) -{ - struct channel_data *chan = dev_to_chan(dev); - unsigned long flags; - - netif_stop_queue(dev); - hdlc_close(dev); - cosa_disable_rx(chan); - spin_lock_irqsave(&chan->cosa->lock, flags); - if (chan->rx_skb) { - kfree_skb(chan->rx_skb); - chan->rx_skb = NULL; - } - if (chan->tx_skb) { - kfree_skb(chan->tx_skb); - chan->tx_skb = NULL; - } - chan->usage = 0; - chan->cosa->usage--; - spin_unlock_irqrestore(&chan->cosa->lock, flags); - return 0; -} - -static char *cosa_net_setup_rx(struct channel_data *chan, int size) -{ - /* We can safely fall back to non-dma-able memory, because we have - * the cosa->bouncebuf pre-allocated. - */ - kfree_skb(chan->rx_skb); - chan->rx_skb = dev_alloc_skb(size); - if (!chan->rx_skb) { - pr_notice("%s: Memory squeeze, dropping packet\n", chan->name); - chan->netdev->stats.rx_dropped++; - return NULL; - } - netif_trans_update(chan->netdev); - return skb_put(chan->rx_skb, size); -} - -static int cosa_net_rx_done(struct channel_data *chan) -{ - if (!chan->rx_skb) { - pr_warn("%s: rx_done with empty skb!\n", chan->name); - chan->netdev->stats.rx_errors++; - chan->netdev->stats.rx_frame_errors++; - return 0; - } - chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev); - chan->rx_skb->dev = chan->netdev; - skb_reset_mac_header(chan->rx_skb); - chan->netdev->stats.rx_packets++; - chan->netdev->stats.rx_bytes += chan->cosa->rxsize; - netif_rx(chan->rx_skb); - chan->rx_skb = NULL; - return 0; -} - -/* ARGSUSED */ -static int cosa_net_tx_done(struct channel_data *chan, int size) -{ - if (!chan->tx_skb) { - pr_warn("%s: tx_done with empty skb!\n", chan->name); - chan->netdev->stats.tx_errors++; - chan->netdev->stats.tx_aborted_errors++; - return 1; - } - dev_consume_skb_irq(chan->tx_skb); - chan->tx_skb = NULL; - chan->netdev->stats.tx_packets++; - chan->netdev->stats.tx_bytes += size; - netif_wake_queue(chan->netdev); - return 1; -} - -/*---------- Character device ---------- */ - -static ssize_t cosa_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - DECLARE_WAITQUEUE(wait, current); - unsigned long flags; - struct channel_data *chan = file->private_data; - struct cosa_data *cosa = chan->cosa; - char *kbuf; - - if (!(cosa->firmware_status & COSA_FW_START)) { - pr_notice("%s: start the firmware first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - if (mutex_lock_interruptible(&chan->rlock)) - return -ERESTARTSYS; - - chan->rxdata = kmalloc(COSA_MTU, GFP_DMA | GFP_KERNEL); - if (!chan->rxdata) { - mutex_unlock(&chan->rlock); - return -ENOMEM; - } - - chan->rx_status = 0; - cosa_enable_rx(chan); - spin_lock_irqsave(&cosa->lock, flags); - add_wait_queue(&chan->rxwaitq, &wait); - while (!chan->rx_status) { - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore(&cosa->lock, flags); - schedule(); - spin_lock_irqsave(&cosa->lock, flags); - if (signal_pending(current) && chan->rx_status == 0) { - chan->rx_status = 1; - remove_wait_queue(&chan->rxwaitq, &wait); - __set_current_state(TASK_RUNNING); - spin_unlock_irqrestore(&cosa->lock, flags); - mutex_unlock(&chan->rlock); - return -ERESTARTSYS; - } - } - remove_wait_queue(&chan->rxwaitq, &wait); - __set_current_state(TASK_RUNNING); - kbuf = chan->rxdata; - count = chan->rxsize; - spin_unlock_irqrestore(&cosa->lock, flags); - mutex_unlock(&chan->rlock); - - if (copy_to_user(buf, kbuf, count)) { - kfree(kbuf); - return -EFAULT; - } - kfree(kbuf); - return count; -} - -static char *chrdev_setup_rx(struct channel_data *chan, int size) -{ - /* Expect size <= COSA_MTU */ - chan->rxsize = size; - return chan->rxdata; -} - -static int chrdev_rx_done(struct channel_data *chan) -{ - if (chan->rx_status) { /* Reader has died */ - kfree(chan->rxdata); - up(&chan->wsem); - } - chan->rx_status = 1; - wake_up_interruptible(&chan->rxwaitq); - return 1; -} - -static ssize_t cosa_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - DECLARE_WAITQUEUE(wait, current); - struct channel_data *chan = file->private_data; - struct cosa_data *cosa = chan->cosa; - unsigned long flags; - char *kbuf; - - if (!(cosa->firmware_status & COSA_FW_START)) { - pr_notice("%s: start the firmware first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - if (down_interruptible(&chan->wsem)) - return -ERESTARTSYS; - - if (count > COSA_MTU) - count = COSA_MTU; - - /* Allocate the buffer */ - kbuf = kmalloc(count, GFP_KERNEL | GFP_DMA); - if (!kbuf) { - up(&chan->wsem); - return -ENOMEM; - } - if (copy_from_user(kbuf, buf, count)) { - up(&chan->wsem); - kfree(kbuf); - return -EFAULT; - } - chan->tx_status = 0; - cosa_start_tx(chan, kbuf, count); - - spin_lock_irqsave(&cosa->lock, flags); - add_wait_queue(&chan->txwaitq, &wait); - while (!chan->tx_status) { - set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irqrestore(&cosa->lock, flags); - schedule(); - spin_lock_irqsave(&cosa->lock, flags); - if (signal_pending(current) && chan->tx_status == 0) { - chan->tx_status = 1; - remove_wait_queue(&chan->txwaitq, &wait); - __set_current_state(TASK_RUNNING); - chan->tx_status = 1; - spin_unlock_irqrestore(&cosa->lock, flags); - up(&chan->wsem); - kfree(kbuf); - return -ERESTARTSYS; - } - } - remove_wait_queue(&chan->txwaitq, &wait); - __set_current_state(TASK_RUNNING); - up(&chan->wsem); - spin_unlock_irqrestore(&cosa->lock, flags); - kfree(kbuf); - return count; -} - -static int chrdev_tx_done(struct channel_data *chan, int size) -{ - if (chan->tx_status) { /* Writer was interrupted */ - kfree(chan->txbuf); - up(&chan->wsem); - } - chan->tx_status = 1; - wake_up_interruptible(&chan->txwaitq); - return 1; -} - -static __poll_t cosa_poll(struct file *file, poll_table *poll) -{ - pr_info("cosa_poll is here\n"); - return 0; -} - -static int cosa_open(struct inode *inode, struct file *file) -{ - struct cosa_data *cosa; - struct channel_data *chan; - unsigned long flags; - int n; - int ret = 0; - - mutex_lock(&cosa_chardev_mutex); - n = iminor(file_inode(file)) >> CARD_MINOR_BITS; - if (n >= nr_cards) { - ret = -ENODEV; - goto out; - } - cosa = cosa_cards + n; - - n = iminor(file_inode(file)) & ((1 << CARD_MINOR_BITS) - 1); - if (n >= cosa->nchannels) { - ret = -ENODEV; - goto out; - } - chan = cosa->chan + n; - - file->private_data = chan; - - spin_lock_irqsave(&cosa->lock, flags); - - if (chan->usage < 0) { /* in netdev mode */ - spin_unlock_irqrestore(&cosa->lock, flags); - ret = -EBUSY; - goto out; - } - cosa->usage++; - chan->usage++; - - chan->tx_done = chrdev_tx_done; - chan->setup_rx = chrdev_setup_rx; - chan->rx_done = chrdev_rx_done; - spin_unlock_irqrestore(&cosa->lock, flags); -out: - mutex_unlock(&cosa_chardev_mutex); - return ret; -} - -static int cosa_release(struct inode *inode, struct file *file) -{ - struct channel_data *channel = file->private_data; - struct cosa_data *cosa; - unsigned long flags; - - cosa = channel->cosa; - spin_lock_irqsave(&cosa->lock, flags); - cosa->usage--; - channel->usage--; - spin_unlock_irqrestore(&cosa->lock, flags); - return 0; -} - -#ifdef COSA_FASYNC_WORKING -static struct fasync_struct *fasync[256] = { NULL, }; - -/* To be done ... */ -static int cosa_fasync(struct inode *inode, struct file *file, int on) -{ - int port = iminor(inode); - - return fasync_helper(inode, file, on, &fasync[port]); -} -#endif - -/* ---------- Ioctls ---------- */ - -/* Ioctl subroutines can safely be made inline, because they are called - * only from cosa_ioctl(). - */ -static inline int cosa_reset(struct cosa_data *cosa) -{ - char idstring[COSA_MAX_ID_STRING]; - - if (cosa->usage > 1) - pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n", - cosa->num, cosa->usage); - cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_START); - if (cosa_reset_and_read_id(cosa, idstring) < 0) { - pr_notice("cosa%d: reset failed\n", cosa->num); - return -EIO; - } - pr_info("cosa%d: resetting device: %s\n", cosa->num, idstring); - cosa->firmware_status |= COSA_FW_RESET; - return 0; -} - -/* High-level function to download data into COSA memory. Calls download() */ -static inline int cosa_download(struct cosa_data *cosa, void __user *arg) -{ - struct cosa_download d; - int i; - - if (cosa->usage > 1) - pr_info("%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n", - cosa->name, cosa->usage); - if (!(cosa->firmware_status & COSA_FW_RESET)) { - pr_notice("%s: reset the card first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - - if (copy_from_user(&d, arg, sizeof(d))) - return -EFAULT; - - if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE) - return -EINVAL; - if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE) - return -EINVAL; - - /* If something fails, force the user to reset the card */ - cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_DOWNLOAD); - - i = download(cosa, d.code, d.len, d.addr); - if (i < 0) { - pr_notice("cosa%d: microcode download failed: %d\n", - cosa->num, i); - return -EIO; - } - pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n", - cosa->num, d.len, d.addr); - cosa->firmware_status |= COSA_FW_RESET | COSA_FW_DOWNLOAD; - return 0; -} - -/* High-level function to read COSA memory. Calls readmem() */ -static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg) -{ - struct cosa_download d; - int i; - - if (cosa->usage > 1) - pr_info("cosa%d: WARNING: readmem requested with cosa->usage > 1 (%d). Odd things may happen.\n", - cosa->num, cosa->usage); - if (!(cosa->firmware_status & COSA_FW_RESET)) { - pr_notice("%s: reset the card first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - - if (copy_from_user(&d, arg, sizeof(d))) - return -EFAULT; - - /* If something fails, force the user to reset the card */ - cosa->firmware_status &= ~COSA_FW_RESET; - - i = readmem(cosa, d.code, d.len, d.addr); - if (i < 0) { - pr_notice("cosa%d: reading memory failed: %d\n", cosa->num, i); - return -EIO; - } - pr_info("cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n", - cosa->num, d.len, d.addr); - cosa->firmware_status |= COSA_FW_RESET; - return 0; -} - -/* High-level function to start microcode. Calls startmicrocode(). */ -static inline int cosa_start(struct cosa_data *cosa, int address) -{ - int i; - - if (cosa->usage > 1) - pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n", - cosa->num, cosa->usage); - - if ((cosa->firmware_status & (COSA_FW_RESET | COSA_FW_DOWNLOAD)) - != (COSA_FW_RESET | COSA_FW_DOWNLOAD)) { - pr_notice("%s: download the microcode and/or reset the card first (status %d)\n", - cosa->name, cosa->firmware_status); - return -EPERM; - } - cosa->firmware_status &= ~COSA_FW_RESET; - i = startmicrocode(cosa, address); - if (i < 0) { - pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n", - cosa->num, address, i); - return -EIO; - } - pr_info("cosa%d: starting microcode at 0x%04x\n", cosa->num, address); - cosa->startaddr = address; - cosa->firmware_status |= COSA_FW_START; - return 0; -} - -/* Buffer of size at least COSA_MAX_ID_STRING is expected */ -static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string) -{ - int l = strlen(cosa->id_string) + 1; - - if (copy_to_user(string, cosa->id_string, l)) - return -EFAULT; - return l; -} - -/* Buffer of size at least COSA_MAX_ID_STRING is expected */ -static inline int cosa_gettype(struct cosa_data *cosa, char __user *string) -{ - int l = strlen(cosa->type) + 1; - - if (copy_to_user(string, cosa->type, l)) - return -EFAULT; - return l; -} - -static int cosa_ioctl_common(struct cosa_data *cosa, - struct channel_data *channel, unsigned int cmd, - unsigned long arg) -{ - void __user *argp = (void __user *)arg; - - switch (cmd) { - case COSAIORSET: /* Reset the device */ - if (!capable(CAP_NET_ADMIN)) - return -EACCES; - return cosa_reset(cosa); - case COSAIOSTRT: /* Start the firmware */ - if (!capable(CAP_SYS_RAWIO)) - return -EACCES; - return cosa_start(cosa, arg); - case COSAIODOWNLD: /* Download the firmware */ - if (!capable(CAP_SYS_RAWIO)) - return -EACCES; - - return cosa_download(cosa, argp); - case COSAIORMEM: - if (!capable(CAP_SYS_RAWIO)) - return -EACCES; - return cosa_readmem(cosa, argp); - case COSAIORTYPE: - return cosa_gettype(cosa, argp); - case COSAIORIDSTR: - return cosa_getidstr(cosa, argp); - case COSAIONRCARDS: - return nr_cards; - case COSAIONRCHANS: - return cosa->nchannels; - case COSAIOBMSET: - if (!capable(CAP_SYS_RAWIO)) - return -EACCES; - if (is_8bit(cosa)) - return -EINVAL; - if (arg != COSA_BM_OFF && arg != COSA_BM_ON) - return -EINVAL; - cosa->busmaster = arg; - return 0; - case COSAIOBMGET: - return cosa->busmaster; - } - return -ENOIOCTLCMD; -} - -static long cosa_chardev_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct channel_data *channel = file->private_data; - struct cosa_data *cosa; - long ret; - - mutex_lock(&cosa_chardev_mutex); - cosa = channel->cosa; - ret = cosa_ioctl_common(cosa, channel, cmd, arg); - mutex_unlock(&cosa_chardev_mutex); - return ret; -} - -/*---------- HW layer interface ---------- */ - -/* The higher layer can bind itself to the HW layer by setting the callbacks - * in the channel_data structure and by using these routines. - */ -static void cosa_enable_rx(struct channel_data *chan) -{ - struct cosa_data *cosa = chan->cosa; - - if (!test_and_set_bit(chan->num, &cosa->rxbitmap)) - put_driver_status(cosa); -} - -static void cosa_disable_rx(struct channel_data *chan) -{ - struct cosa_data *cosa = chan->cosa; - - if (test_and_clear_bit(chan->num, &cosa->rxbitmap)) - put_driver_status(cosa); -} - -/* FIXME: This routine probably should check for cosa_start_tx() called when - * the previous transmit is still unfinished. In this case the non-zero - * return value should indicate to the caller that the queuing(sp?) up - * the transmit has failed. - */ -static int cosa_start_tx(struct channel_data *chan, char *buf, int len) -{ - struct cosa_data *cosa = chan->cosa; - unsigned long flags; -#ifdef DEBUG_DATA - int i; - - pr_info("cosa%dc%d: starting tx(0x%x)", - chan->cosa->num, chan->num, len); - for (i = 0; i < len; i++) - pr_cont(" %02x", buf[i]&0xff); - pr_cont("\n"); -#endif - spin_lock_irqsave(&cosa->lock, flags); - chan->txbuf = buf; - chan->txsize = len; - if (len > COSA_MTU) - chan->txsize = COSA_MTU; - spin_unlock_irqrestore(&cosa->lock, flags); - - /* Tell the firmware we are ready */ - set_bit(chan->num, &cosa->txbitmap); - put_driver_status(cosa); - - return 0; -} - -static void put_driver_status(struct cosa_data *cosa) -{ - unsigned long flags; - int status; - - spin_lock_irqsave(&cosa->lock, flags); - - status = (cosa->rxbitmap ? DRIVER_RX_READY : 0) - | (cosa->txbitmap ? DRIVER_TX_READY : 0) - | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT) - & DRIVER_TXMAP_MASK : 0); - if (!cosa->rxtx) { - if (cosa->rxbitmap | cosa->txbitmap) { - if (!cosa->enabled) { - cosa_putstatus(cosa, SR_RX_INT_ENA); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_RX_INT_ENA); -#endif - cosa->enabled = 1; - } - } else if (cosa->enabled) { - cosa->enabled = 0; - cosa_putstatus(cosa, 0); -#ifdef DEBUG_IO - debug_status_out(cosa, 0); -#endif - } - cosa_putdata8(cosa, status); -#ifdef DEBUG_IO - debug_data_cmd(cosa, status); -#endif - } - spin_unlock_irqrestore(&cosa->lock, flags); -} - -static void put_driver_status_nolock(struct cosa_data *cosa) -{ - int status; - - status = (cosa->rxbitmap ? DRIVER_RX_READY : 0) - | (cosa->txbitmap ? DRIVER_TX_READY : 0) - | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT) - & DRIVER_TXMAP_MASK : 0); - - if (cosa->rxbitmap | cosa->txbitmap) { - cosa_putstatus(cosa, SR_RX_INT_ENA); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_RX_INT_ENA); -#endif - cosa->enabled = 1; - } else { - cosa_putstatus(cosa, 0); -#ifdef DEBUG_IO - debug_status_out(cosa, 0); -#endif - cosa->enabled = 0; - } - cosa_putdata8(cosa, status); -#ifdef DEBUG_IO - debug_data_cmd(cosa, status); -#endif -} - -/* The "kickme" function: When the DMA times out, this is called to - * clean up the driver status. - * FIXME: Preliminary support, the interface is probably wrong. - */ -static void cosa_kick(struct cosa_data *cosa) -{ - unsigned long flags, flags1; - char *s = "(probably) IRQ"; - - if (test_bit(RXBIT, &cosa->rxtx)) - s = "RX DMA"; - if (test_bit(TXBIT, &cosa->rxtx)) - s = "TX DMA"; - - pr_info("%s: %s timeout - restarting\n", cosa->name, s); - spin_lock_irqsave(&cosa->lock, flags); - cosa->rxtx = 0; - - flags1 = claim_dma_lock(); - disable_dma(cosa->dma); - clear_dma_ff(cosa->dma); - release_dma_lock(flags1); - - /* FIXME: Anything else? */ - udelay(100); - cosa_putstatus(cosa, 0); - udelay(100); - (void)cosa_getdata8(cosa); - udelay(100); - cosa_putdata8(cosa, 0); - udelay(100); - put_driver_status_nolock(cosa); - spin_unlock_irqrestore(&cosa->lock, flags); -} - -/* Check if the whole buffer is DMA-able. It means it is below the 16M of - * physical memory and doesn't span the 64k boundary. For now it seems - * SKB's never do this, but we'll check this anyway. - */ -static int cosa_dma_able(struct channel_data *chan, char *buf, int len) -{ - static int count; - unsigned long b = (unsigned long)buf; - - if (b + len >= MAX_DMA_ADDRESS) - return 0; - if ((b ^ (b + len)) & 0x10000) { - if (count++ < 5) - pr_info("%s: packet spanning a 64k boundary\n", - chan->name); - return 0; - } - return 1; -} - -/* ---------- The SRP/COSA ROM monitor functions ---------- */ - -/* Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=", - * drivers need to say 4-digit hex number meaning start address of the microcode - * separated by a single space. Monitor replies by saying " =". Now driver - * has to write 4-digit hex number meaning the last byte address ended - * by a single space. Monitor has to reply with a space. Now the download - * begins. After the download monitor replies with "\r\n." (CR LF dot). - */ -static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address) -{ - int i; - - if (put_wait_data(cosa, 'w') == -1) - return -1; - if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;} - if (get_wait_data(cosa) != '=') - return -3; - - if (puthexnumber(cosa, address) < 0) - return -4; - if (put_wait_data(cosa, ' ') == -1) - return -10; - if (get_wait_data(cosa) != ' ') - return -11; - if (get_wait_data(cosa) != '=') - return -12; - - if (puthexnumber(cosa, address + length - 1) < 0) - return -13; - if (put_wait_data(cosa, ' ') == -1) - return -18; - if (get_wait_data(cosa) != ' ') - return -19; - - while (length--) { - char c; -#ifndef SRP_DOWNLOAD_AT_BOOT - if (get_user(c, microcode)) - return -23; /* ??? */ -#else - c = *microcode; -#endif - if (put_wait_data(cosa, c) == -1) - return -20; - microcode++; - } - - if (get_wait_data(cosa) != '\r') - return -21; - if (get_wait_data(cosa) != '\n') - return -22; - if (get_wait_data(cosa) != '.') - return -23; -#if 0 - printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num); -#endif - return 0; -} - -/* Starting microcode is done via the "g" command of the SRP monitor. - * The chat should be the following: "g" "g=" "" - * "". - */ -static int startmicrocode(struct cosa_data *cosa, int address) -{ - if (put_wait_data(cosa, 'g') == -1) - return -1; - if (get_wait_data(cosa) != 'g') - return -2; - if (get_wait_data(cosa) != '=') - return -3; - - if (puthexnumber(cosa, address) < 0) - return -4; - if (put_wait_data(cosa, '\r') == -1) - return -5; - - if (get_wait_data(cosa) != '\r') - return -6; - if (get_wait_data(cosa) != '\r') - return -7; - if (get_wait_data(cosa) != '\n') - return -8; - if (get_wait_data(cosa) != '\r') - return -9; - if (get_wait_data(cosa) != '\n') - return -10; -#if 0 - printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num); -#endif - return 0; -} - -/* Reading memory is done via the "r" command of the SRP monitor. - * The chat is the following "r" "r=" " " " =" " " " " - * Then driver can read the data and the conversation is finished - * by SRP monitor sending "." (dot at the end). - * - * This routine is not needed during the normal operation and serves - * for debugging purposes only. - */ -static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address) -{ - if (put_wait_data(cosa, 'r') == -1) - return -1; - if ((get_wait_data(cosa)) != 'r') - return -2; - if ((get_wait_data(cosa)) != '=') - return -3; - - if (puthexnumber(cosa, address) < 0) - return -4; - if (put_wait_data(cosa, ' ') == -1) - return -5; - if (get_wait_data(cosa) != ' ') - return -6; - if (get_wait_data(cosa) != '=') - return -7; - - if (puthexnumber(cosa, address + length - 1) < 0) - return -8; - if (put_wait_data(cosa, ' ') == -1) - return -9; - if (get_wait_data(cosa) != ' ') - return -10; - - while (length--) { - char c; - int i; - - i = get_wait_data(cosa); - if (i == -1) { - pr_info("0x%04x bytes remaining\n", length); - return -11; - } - c = i; -#if 1 - if (put_user(c, microcode)) - return -23; /* ??? */ -#else - *microcode = c; -#endif - microcode++; - } - - if (get_wait_data(cosa) != '\r') - return -21; - if (get_wait_data(cosa) != '\n') - return -22; - if (get_wait_data(cosa) != '.') - return -23; -#if 0 - printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num); -#endif - return 0; -} - -/* This function resets the device and reads the initial prompt - * of the device's ROM monitor. - */ -static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring) -{ - int i = 0, id = 0, prev = 0, curr = 0; - - /* Reset the card ... */ - cosa_putstatus(cosa, 0); - cosa_getdata8(cosa); - cosa_putstatus(cosa, SR_RST); - msleep(500); - /* Disable all IRQs from the card */ - cosa_putstatus(cosa, 0); - - /* Try to read the ID string. The card then prints out the - * identification string ended by the "\n\x2e". - * - * The following loop is indexed through i (instead of id) - * to avoid looping forever when for any reason - * the port returns '\r', '\n' or '\x2e' permanently. - */ - for (i = 0; i < COSA_MAX_ID_STRING - 1; i++, prev = curr) { - curr = get_wait_data(cosa); - if (curr == -1) - return -1; - - curr &= 0xff; - if (curr != '\r' && curr != '\n' && curr != 0x2e) - idstring[id++] = curr; - if (curr == 0x2e && prev == '\n') - break; - } - /* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */ - idstring[id] = '\0'; - return id; -} - -/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */ - -/* This routine gets the data byte from the card waiting for the SR_RX_RDY - * bit to be set in a loop. It should be used in the exceptional cases - * only (for example when resetting the card or downloading the firmware. - */ -static int get_wait_data(struct cosa_data *cosa) -{ - int retries = 1000; - - while (--retries) { - /* read data and return them */ - if (cosa_getstatus(cosa) & SR_RX_RDY) { - short r; - - r = cosa_getdata8(cosa); -#if 0 - pr_info("get_wait_data returning after %d retries\n", - 999 - retries); -#endif - return r; - } - /* sleep if not ready to read */ - schedule_timeout_interruptible(1); - } - pr_info("timeout in get_wait_data (status 0x%x)\n", - cosa_getstatus(cosa)); - return -1; -} - -/* This routine puts the data byte to the card waiting for the SR_TX_RDY - * bit to be set in a loop. It should be used in the exceptional cases - * only (for example when resetting the card or downloading the firmware). - */ -static int put_wait_data(struct cosa_data *cosa, int data) -{ - int retries = 1000; - - while (--retries) { - /* read data and return them */ - if (cosa_getstatus(cosa) & SR_TX_RDY) { - cosa_putdata8(cosa, data); -#if 0 - pr_info("Putdata: %d retries\n", 999 - retries); -#endif - return 0; - } -#if 0 - /* sleep if not ready to read */ - schedule_timeout_interruptible(1); -#endif - } - pr_info("cosa%d: timeout in put_wait_data (status 0x%x)\n", - cosa->num, cosa_getstatus(cosa)); - return -1; -} - -/* The following routine puts the hexadecimal number into the SRP monitor - * and verifies the proper echo of the sent bytes. Returns 0 on success, - * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed, - * (-2,-4,-6,-8) means that reading echo failed. - */ -static int puthexnumber(struct cosa_data *cosa, int number) -{ - char temp[5]; - int i; - - /* Well, I should probably replace this by something faster. */ - sprintf(temp, "%04X", number); - for (i = 0; i < 4; i++) { - if (put_wait_data(cosa, temp[i]) == -1) { - pr_notice("cosa%d: puthexnumber failed to write byte %d\n", - cosa->num, i); - return -1 - 2 * i; - } - if (get_wait_data(cosa) != temp[i]) { - pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n", - cosa->num, i); - return -2 - 2 * i; - } - } - return 0; -} - -/* ---------- Interrupt routines ---------- */ - -/* There are three types of interrupt: - * At the beginning of transmit - this handled is in tx_interrupt(), - * at the beginning of receive - it is in rx_interrupt() and - * at the end of transmit/receive - it is the eot_interrupt() function. - * These functions are multiplexed by cosa_interrupt() according to the - * COSA status byte. I have moved the rx/tx/eot interrupt handling into - * separate functions to make it more readable. These functions are inline, - * so there should be no overhead of function call. - * - * In the COSA bus-master mode, we need to tell the card the address of a - * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait. - * It's time to use the bottom half :-( - */ - -/* Transmit interrupt routine - called when COSA is willing to obtain - * data from the OS. The most tricky part of the routine is selection - * of channel we (OS) want to send packet for. For SRP we should probably - * use the round-robin approach. The newer COSA firmwares have a simple - * flow-control - in the status word has bits 2 and 3 set to 1 means that the - * channel 0 or 1 doesn't want to receive data. - * - * It seems there is a bug in COSA firmware (need to trace it further): - * When the driver status says that the kernel has no more data for transmit - * (e.g. at the end of TX DMA) and then the kernel changes its mind - * (e.g. new packet is queued to hard_start_xmit()), the card issues - * the TX interrupt but does not mark the channel as ready-to-transmit. - * The fix seems to be to push the packet to COSA despite its request. - * We first try to obey the card's opinion, and then fall back to forced TX. - */ -static inline void tx_interrupt(struct cosa_data *cosa, int status) -{ - unsigned long flags, flags1; -#ifdef DEBUG_IRQS - pr_info("cosa%d: SR_DOWN_REQUEST status=0x%04x\n", cosa->num, status); -#endif - spin_lock_irqsave(&cosa->lock, flags); - set_bit(TXBIT, &cosa->rxtx); - if (!test_bit(IRQBIT, &cosa->rxtx)) { - /* flow control, see the comment above */ - int i = 0; - - if (!cosa->txbitmap) { - pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n", - cosa->name); - put_driver_status_nolock(cosa); - clear_bit(TXBIT, &cosa->rxtx); - spin_unlock_irqrestore(&cosa->lock, flags); - return; - } - while (1) { - cosa->txchan++; - i++; - if (cosa->txchan >= cosa->nchannels) - cosa->txchan = 0; - if (!(cosa->txbitmap & (1 << cosa->txchan))) - continue; - if (~status & - (1 << (cosa->txchan + DRIVER_TXMAP_SHIFT))) - break; - /* in second pass, accept first ready-to-TX channel */ - if (i > cosa->nchannels) { - /* Can be safely ignored */ -#ifdef DEBUG_IRQS - printk(KERN_DEBUG "%s: Forcing TX " - "to not-ready channel %d\n", - cosa->name, cosa->txchan); -#endif - break; - } - } - - cosa->txsize = cosa->chan[cosa->txchan].txsize; - if (cosa_dma_able(cosa->chan + cosa->txchan, - cosa->chan[cosa->txchan].txbuf, - cosa->txsize)) { - cosa->txbuf = cosa->chan[cosa->txchan].txbuf; - } else { - memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf, - cosa->txsize); - cosa->txbuf = cosa->bouncebuf; - } - } - - if (is_8bit(cosa)) { - if (!test_bit(IRQBIT, &cosa->rxtx)) { - cosa_putstatus(cosa, SR_TX_INT_ENA); - cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0) | - ((cosa->txsize >> 8) & 0x1f)); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_TX_INT_ENA); - debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0) | - ((cosa->txsize >> 8) & 0x1f)); - debug_data_in(cosa, cosa_getdata8(cosa)); -#else - cosa_getdata8(cosa); -#endif - set_bit(IRQBIT, &cosa->rxtx); - spin_unlock_irqrestore(&cosa->lock, flags); - return; - } else { - clear_bit(IRQBIT, &cosa->rxtx); - cosa_putstatus(cosa, 0); - cosa_putdata8(cosa, cosa->txsize & 0xff); -#ifdef DEBUG_IO - debug_status_out(cosa, 0); - debug_data_out(cosa, cosa->txsize & 0xff); -#endif - } - } else { - cosa_putstatus(cosa, SR_TX_INT_ENA); - cosa_putdata16(cosa, ((cosa->txchan << 13) & 0xe000) - | (cosa->txsize & 0x1fff)); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_TX_INT_ENA); - debug_data_out(cosa, ((cosa->txchan << 13) & 0xe000) | - (cosa->txsize & 0x1fff)); - debug_data_in(cosa, cosa_getdata8(cosa)); - debug_status_out(cosa, 0); -#else - cosa_getdata8(cosa); -#endif - cosa_putstatus(cosa, 0); - } - - if (cosa->busmaster) { - unsigned long addr = virt_to_bus(cosa->txbuf); - int count = 0; - - pr_info("busmaster IRQ\n"); - while (!(cosa_getstatus(cosa) & SR_TX_RDY)) { - count++; - udelay(10); - if (count > 1000) - break; - } - pr_info("status %x\n", cosa_getstatus(cosa)); - pr_info("ready after %d loops\n", count); - cosa_putdata16(cosa, (addr >> 16) & 0xffff); - - count = 0; - while (!(cosa_getstatus(cosa) & SR_TX_RDY)) { - count++; - if (count > 1000) - break; - udelay(10); - } - pr_info("ready after %d loops\n", count); - cosa_putdata16(cosa, addr & 0xffff); - flags1 = claim_dma_lock(); - set_dma_mode(cosa->dma, DMA_MODE_CASCADE); - enable_dma(cosa->dma); - release_dma_lock(flags1); - } else { - /* start the DMA */ - flags1 = claim_dma_lock(); - disable_dma(cosa->dma); - clear_dma_ff(cosa->dma); - set_dma_mode(cosa->dma, DMA_MODE_WRITE); - set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf)); - set_dma_count(cosa->dma, cosa->txsize); - enable_dma(cosa->dma); - release_dma_lock(flags1); - } - cosa_putstatus(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA); -#endif - spin_unlock_irqrestore(&cosa->lock, flags); -} - -static inline void rx_interrupt(struct cosa_data *cosa, int status) -{ - unsigned long flags; -#ifdef DEBUG_IRQS - pr_info("cosa%d: SR_UP_REQUEST\n", cosa->num); -#endif - - spin_lock_irqsave(&cosa->lock, flags); - set_bit(RXBIT, &cosa->rxtx); - - if (is_8bit(cosa)) { - if (!test_bit(IRQBIT, &cosa->rxtx)) { - set_bit(IRQBIT, &cosa->rxtx); - put_driver_status_nolock(cosa); - cosa->rxsize = cosa_getdata8(cosa) << 8; -#ifdef DEBUG_IO - debug_data_in(cosa, cosa->rxsize >> 8); -#endif - spin_unlock_irqrestore(&cosa->lock, flags); - return; - } else { - clear_bit(IRQBIT, &cosa->rxtx); - cosa->rxsize |= cosa_getdata8(cosa) & 0xff; -#ifdef DEBUG_IO - debug_data_in(cosa, cosa->rxsize & 0xff); -#endif -#if 0 - pr_info("cosa%d: receive rxsize = (0x%04x)\n", - cosa->num, cosa->rxsize); -#endif - } - } else { - cosa->rxsize = cosa_getdata16(cosa); -#ifdef DEBUG_IO - debug_data_in(cosa, cosa->rxsize); -#endif -#if 0 - pr_info("cosa%d: receive rxsize = (0x%04x)\n", - cosa->num, cosa->rxsize); -#endif - } - if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) { - pr_warn("%s: rx for unknown channel (0x%04x)\n", - cosa->name, cosa->rxsize); - spin_unlock_irqrestore(&cosa->lock, flags); - goto reject; - } - cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13); - cosa->rxsize &= 0x1fff; - spin_unlock_irqrestore(&cosa->lock, flags); - - cosa->rxbuf = NULL; - if (cosa->rxchan->setup_rx) - cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize); - - if (!cosa->rxbuf) { -reject: /* Reject the packet */ - pr_info("cosa%d: rejecting packet on channel %d\n", - cosa->num, cosa->rxchan->num); - cosa->rxbuf = cosa->bouncebuf; - } - - /* start the DMA */ - flags = claim_dma_lock(); - disable_dma(cosa->dma); - clear_dma_ff(cosa->dma); - set_dma_mode(cosa->dma, DMA_MODE_READ); - if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff)) - set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf)); - else - set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf)); - - set_dma_count(cosa->dma, (cosa->rxsize & 0x1fff)); - enable_dma(cosa->dma); - release_dma_lock(flags); - spin_lock_irqsave(&cosa->lock, flags); - cosa_putstatus(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA); - if (!is_8bit(cosa) && (status & SR_TX_RDY)) - cosa_putdata8(cosa, DRIVER_RX_READY); -#ifdef DEBUG_IO - debug_status_out(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA); - if (!is_8bit(cosa) && (status & SR_TX_RDY)) - debug_data_cmd(cosa, DRIVER_RX_READY); -#endif - spin_unlock_irqrestore(&cosa->lock, flags); -} - -static inline void eot_interrupt(struct cosa_data *cosa, int status) -{ - unsigned long flags, flags1; - - spin_lock_irqsave(&cosa->lock, flags); - flags1 = claim_dma_lock(); - disable_dma(cosa->dma); - clear_dma_ff(cosa->dma); - release_dma_lock(flags1); - if (test_bit(TXBIT, &cosa->rxtx)) { - struct channel_data *chan = cosa->chan + cosa->txchan; - - if (chan->tx_done) - if (chan->tx_done(chan, cosa->txsize)) - clear_bit(chan->num, &cosa->txbitmap); - } else if (test_bit(RXBIT, &cosa->rxtx)) { -#ifdef DEBUG_DATA - { - int i; - - pr_info("cosa%dc%d: done rx(0x%x)", - cosa->num, cosa->rxchan->num, cosa->rxsize); - for (i = 0; i < cosa->rxsize; i++) - pr_cont(" %02x", cosa->rxbuf[i]&0xff); - pr_cont("\n"); - } -#endif - /* Packet for unknown channel? */ - if (cosa->rxbuf == cosa->bouncebuf) - goto out; - if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize)) - memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize); - if (cosa->rxchan->rx_done) - if (cosa->rxchan->rx_done(cosa->rxchan)) - clear_bit(cosa->rxchan->num, &cosa->rxbitmap); - } else { - pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num); - } - /* Clear the RXBIT, TXBIT and IRQBIT (the latest should be - * cleared anyway). We should do it as soon as possible - * so that we can tell the COSA we are done and to give it a time - * for recovery. - */ -out: - cosa->rxtx = 0; - put_driver_status_nolock(cosa); - spin_unlock_irqrestore(&cosa->lock, flags); -} - -static irqreturn_t cosa_interrupt(int irq, void *cosa_) -{ - unsigned status; - int count = 0; - struct cosa_data *cosa = cosa_; -again: - status = cosa_getstatus(cosa); -#ifdef DEBUG_IRQS - pr_info("cosa%d: got IRQ, status 0x%02x\n", cosa->num, status & 0xff); -#endif -#ifdef DEBUG_IO - debug_status_in(cosa, status); -#endif - switch (status & SR_CMD_FROM_SRP_MASK) { - case SR_DOWN_REQUEST: - tx_interrupt(cosa, status); - break; - case SR_UP_REQUEST: - rx_interrupt(cosa, status); - break; - case SR_END_OF_TRANSFER: - eot_interrupt(cosa, status); - break; - default: - /* We may be too fast for SRP. Try to wait a bit more. */ - if (count++ < 100) { - udelay(100); - goto again; - } - pr_info("cosa%d: unknown status 0x%02x in IRQ after %d retries\n", - cosa->num, status & 0xff, count); - } -#ifdef DEBUG_IRQS - if (count) - pr_info("%s: %d-times got unknown status in IRQ\n", - cosa->name, count); - else - pr_info("%s: returning from IRQ\n", cosa->name); -#endif - return IRQ_HANDLED; -} - -/* ---------- I/O debugging routines ---------- */ -/* These routines can be used to monitor COSA/SRP I/O and to printk() - * the data being transferred on the data and status I/O port in a - * readable way. - */ - -#ifdef DEBUG_IO -static void debug_status_in(struct cosa_data *cosa, int status) -{ - char *s; - - switch (status & SR_CMD_FROM_SRP_MASK) { - case SR_UP_REQUEST: - s = "RX_REQ"; - break; - case SR_DOWN_REQUEST: - s = "TX_REQ"; - break; - case SR_END_OF_TRANSFER: - s = "ET_REQ"; - break; - default: - s = "NO_REQ"; - break; - } - pr_info("%s: IO: status -> 0x%02x (%s%s%s%s)\n", - cosa->name, - status, - status & SR_USR_RQ ? "USR_RQ|" : "", - status & SR_TX_RDY ? "TX_RDY|" : "", - status & SR_RX_RDY ? "RX_RDY|" : "", - s); -} - -static void debug_status_out(struct cosa_data *cosa, int status) -{ - pr_info("%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n", - cosa->name, - status, - status & SR_RX_DMA_ENA ? "RXDMA|" : "!rxdma|", - status & SR_TX_DMA_ENA ? "TXDMA|" : "!txdma|", - status & SR_RST ? "RESET|" : "", - status & SR_USR_INT_ENA ? "USRINT|" : "!usrint|", - status & SR_TX_INT_ENA ? "TXINT|" : "!txint|", - status & SR_RX_INT_ENA ? "RXINT" : "!rxint"); -} - -static void debug_data_in(struct cosa_data *cosa, int data) -{ - pr_info("%s: IO: data -> 0x%04x\n", cosa->name, data); -} - -static void debug_data_out(struct cosa_data *cosa, int data) -{ - pr_info("%s: IO: data <- 0x%04x\n", cosa->name, data); -} - -static void debug_data_cmd(struct cosa_data *cosa, int data) -{ - pr_info("%s: IO: data <- 0x%04x (%s|%s)\n", - cosa->name, data, - data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy", - data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy"); -} -#endif - -/* EOF -- this file has not been truncated */ diff --git a/drivers/net/wan/cosa.h b/drivers/net/wan/cosa.h deleted file mode 100644 index f57e0af9d56a..000000000000 --- a/drivers/net/wan/cosa.h +++ /dev/null @@ -1,104 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */ - -/* - * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak - */ - -#ifndef COSA_H__ -#define COSA_H__ - -#include - -#ifdef __KERNEL__ -/* status register - output bits */ -#define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */ -#define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */ -#define SR_RST 0x10 /* SRP reset */ -#define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */ -#define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */ -#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */ - -/* status register - input bits */ -#define SR_USR_RQ 0x20 /* user interrupt request pending */ -#define SR_TX_RDY 0x40 /* transmitter empty (ready) */ -#define SR_RX_RDY 0x80 /* receiver data ready */ - -#define SR_UP_REQUEST 0x02 /* request from SRP to transfer data - up to PC */ -#define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down - from PC to SRP */ -#define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of - transfer (up or down) */ - -#define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */ - -/* bits in driver status byte definitions : */ -#define SR_RDY_RCV 0x01 /* ready to receive packet */ -#define SR_RDY_SND 0x02 /* ready to send packet */ -#define SR_CMD_PND 0x04 /* command pending */ /* not currently used */ - -/* ???? */ -#define SR_PKT_UP 0x01 /* transfer of packet up in progress */ -#define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */ - -#endif /* __KERNEL__ */ - -#define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */ -#define SR_START_ADDR 0x4400 /* SRP microcode start address */ - -#define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */ -#define COSA_MAX_FIRMWARE_SIZE 0x10000 - -/* ioctls */ -struct cosa_download { - int addr, len; - char __user *code; -}; - -/* Reset the device */ -#define COSAIORSET _IO('C',0xf0) - -/* Start microcode at given address */ -#define COSAIOSTRT _IOW('C',0xf1, int) - -/* Read the block from the device memory */ -#define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *) - /* actually the struct cosa_download itself; this is to keep - * the ioctl number same as in 2.4 in order to keep the user-space - * utils compatible. */ - -/* Write the block to the device memory (i.e. download the microcode) */ -#define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *) - /* actually the struct cosa_download itself; this is to keep - * the ioctl number same as in 2.4 in order to keep the user-space - * utils compatible. */ - -/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */ -#define COSAIORTYPE _IOR('C',0xf3, char *) - -/* Read the device identification string */ -#define COSAIORIDSTR _IOR('C',0xf4, char *) -/* Maximum length of the identification string. */ -#define COSA_MAX_ID_STRING 128 - -/* Increment/decrement the module usage count :-) */ -/* #define COSAIOMINC _IO('C',0xf5) */ -/* #define COSAIOMDEC _IO('C',0xf6) */ - -/* Get the total number of cards installed */ -#define COSAIONRCARDS _IO('C',0xf7) - -/* Get the number of channels on this card */ -#define COSAIONRCHANS _IO('C',0xf8) - -/* Set the driver for the bus-master operations */ -#define COSAIOBMSET _IOW('C', 0xf9, unsigned short) - -#define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */ -#define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */ - -/* Gets the busmaster status */ -#define COSAIOBMGET _IO('C', 0xfa) - -#endif /* !COSA_H__ */ -- cgit v1.2.3 From bc6df26f1f785be9b4c10d37e50ac40b46428984 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 Apr 2022 10:54:35 -0700 Subject: net: wan: remove support for Z85230-based devices Looks like all the changes to this driver had been automated churn since git era begun. The driver is using virt_to_bus(), it's just a maintenance burden unlikely to have any users. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- Documentation/networking/device_drivers/index.rst | 1 - .../networking/device_drivers/wan/index.rst | 18 - .../networking/device_drivers/wan/z8530book.rst | 256 --- drivers/net/wan/Kconfig | 22 - drivers/net/wan/Makefile | 2 - drivers/net/wan/hostess_sv11.c | 336 ---- drivers/net/wan/sealevel.c | 352 ----- drivers/net/wan/z85230.c | 1641 -------------------- drivers/net/wan/z85230.h | 407 ----- 9 files changed, 3035 deletions(-) delete mode 100644 Documentation/networking/device_drivers/wan/index.rst delete mode 100644 Documentation/networking/device_drivers/wan/z8530book.rst delete mode 100644 drivers/net/wan/hostess_sv11.c delete mode 100644 drivers/net/wan/sealevel.c delete mode 100644 drivers/net/wan/z85230.c delete mode 100644 drivers/net/wan/z85230.h (limited to 'drivers') diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index 5f5cfdb2a300..601eacaf12f3 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -17,7 +17,6 @@ Contents: fddi/index hamradio/index qlogic/index - wan/index wifi/index wwan/index diff --git a/Documentation/networking/device_drivers/wan/index.rst b/Documentation/networking/device_drivers/wan/index.rst deleted file mode 100644 index 9d9ae94f00b4..000000000000 --- a/Documentation/networking/device_drivers/wan/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) - -Classic WAN Device Drivers -========================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - z8530book - -.. only:: subproject and html - - Indices - ======= - - * :ref:`genindex` diff --git a/Documentation/networking/device_drivers/wan/z8530book.rst b/Documentation/networking/device_drivers/wan/z8530book.rst deleted file mode 100644 index fea2c40e7973..000000000000 --- a/Documentation/networking/device_drivers/wan/z8530book.rst +++ /dev/null @@ -1,256 +0,0 @@ -======================= -Z8530 Programming Guide -======================= - -:Author: Alan Cox - -Introduction -============ - -The Z85x30 family synchronous/asynchronous controller chips are used on -a large number of cheap network interface cards. The kernel provides a -core interface layer that is designed to make it easy to provide WAN -services using this chip. - -The current driver only support synchronous operation. Merging the -asynchronous driver support into this code to allow any Z85x30 device to -be used as both a tty interface and as a synchronous controller is a -project for Linux post the 2.4 release - -Driver Modes -============ - -The Z85230 driver layer can drive Z8530, Z85C30 and Z85230 devices in -three different modes. Each mode can be applied to an individual channel -on the chip (each chip has two channels). - -The PIO synchronous mode supports the most common Z8530 wiring. Here the -chip is interface to the I/O and interrupt facilities of the host -machine but not to the DMA subsystem. When running PIO the Z8530 has -extremely tight timing requirements. Doing high speeds, even with a -Z85230 will be tricky. Typically you should expect to achieve at best -9600 baud with a Z8C530 and 64Kbits with a Z85230. - -The DMA mode supports the chip when it is configured to use dual DMA -channels on an ISA bus. The better cards tend to support this mode of -operation for a single channel. With DMA running the Z85230 tops out -when it starts to hit ISA DMA constraints at about 512Kbits. It is worth -noting here that many PC machines hang or crash when the chip is driven -fast enough to hold the ISA bus solid. - -Transmit DMA mode uses a single DMA channel. The DMA channel is used for -transmission as the transmit FIFO is smaller than the receive FIFO. it -gives better performance than pure PIO mode but is nowhere near as ideal -as pure DMA mode. - -Using the Z85230 driver -======================= - -The Z85230 driver provides the back end interface to your board. To -configure a Z8530 interface you need to detect the board and to identify -its ports and interrupt resources. It is also your problem to verify the -resources are available. - -Having identified the chip you need to fill in a struct z8530_dev, -which describes each chip. This object must exist until you finally -shutdown the board. Firstly zero the active field. This ensures nothing -goes off without you intending it. The irq field should be set to the -interrupt number of the chip. (Each chip has a single interrupt source -rather than each channel). You are responsible for allocating the -interrupt line. The interrupt handler should be set to -:c:func:`z8530_interrupt()`. The device id should be set to the -z8530_dev structure pointer. Whether the interrupt can be shared or not -is board dependent, and up to you to initialise. - -The structure holds two channel structures. Initialise chanA.ctrlio and -chanA.dataio with the address of the control and data ports. You can or -this with Z8530_PORT_SLEEP to indicate your interface needs the 5uS -delay for chip settling done in software. The PORT_SLEEP option is -architecture specific. Other flags may become available on future -platforms, eg for MMIO. Initialise the chanA.irqs to &z8530_nop to -start the chip up as disabled and discarding interrupt events. This -ensures that stray interrupts will be mopped up and not hang the bus. -Set chanA.dev to point to the device structure itself. The private and -name field you may use as you wish. The private field is unused by the -Z85230 layer. The name is used for error reporting and it may thus make -sense to make it match the network name. - -Repeat the same operation with the B channel if your chip has both -channels wired to something useful. This isn't always the case. If it is -not wired then the I/O values do not matter, but you must initialise -chanB.dev. - -If your board has DMA facilities then initialise the txdma and rxdma -fields for the relevant channels. You must also allocate the ISA DMA -channels and do any necessary board level initialisation to configure -them. The low level driver will do the Z8530 and DMA controller -programming but not board specific magic. - -Having initialised the device you can then call -:c:func:`z8530_init()`. This will probe the chip and reset it into -a known state. An identification sequence is then run to identify the -chip type. If the checks fail to pass the function returns a non zero -error code. Typically this indicates that the port given is not valid. -After this call the type field of the z8530_dev structure is -initialised to either Z8530, Z85C30 or Z85230 according to the chip -found. - -Once you have called z8530_init you can also make use of the utility -function :c:func:`z8530_describe()`. This provides a consistent -reporting format for the Z8530 devices, and allows all the drivers to -provide consistent reporting. - -Attaching Network Interfaces -============================ - -If you wish to use the network interface facilities of the driver, then -you need to attach a network device to each channel that is present and -in use. In addition to use the generic HDLC you need to follow some -additional plumbing rules. They may seem complex but a look at the -example hostess_sv11 driver should reassure you. - -The network device used for each channel should be pointed to by the -netdevice field of each channel. The hdlc-> priv field of the network -device points to your private data - you will need to be able to find -your private data from this. - -The way most drivers approach this particular problem is to create a -structure holding the Z8530 device definition and put that into the -private field of the network device. The network device fields of the -channels then point back to the network devices. - -If you wish to use the generic HDLC then you need to register the HDLC -device. - -Before you register your network device you will also need to provide -suitable handlers for most of the network device callbacks. See the -network device documentation for more details on this. - -Configuring And Activating The Port -=================================== - -The Z85230 driver provides helper functions and tables to load the port -registers on the Z8530 chips. When programming the register settings for -a channel be aware that the documentation recommends initialisation -orders. Strange things happen when these are not followed. - -:c:func:`z8530_channel_load()` takes an array of pairs of -initialisation values in an array of u8 type. The first value is the -Z8530 register number. Add 16 to indicate the alternate register bank on -the later chips. The array is terminated by a 255. - -The driver provides a pair of public tables. The z8530_hdlc_kilostream -table is for the UK 'Kilostream' service and also happens to cover most -other end host configurations. The z8530_hdlc_kilostream_85230 table -is the same configuration using the enhancements of the 85230 chip. The -configuration loaded is standard NRZ encoded synchronous data with HDLC -bitstuffing. All of the timing is taken from the other end of the link. - -When writing your own tables be aware that the driver internally tracks -register values. It may need to reload values. You should therefore be -sure to set registers 1-7, 9-11, 14 and 15 in all configurations. Where -the register settings depend on DMA selection the driver will update the -bits itself when you open or close. Loading a new table with the -interface open is not recommended. - -There are three standard configurations supported by the core code. In -PIO mode the interface is programmed up to use interrupt driven PIO. -This places high demands on the host processor to avoid latency. The -driver is written to take account of latency issues but it cannot avoid -latencies caused by other drivers, notably IDE in PIO mode. Because the -drivers allocate buffers you must also prevent MTU changes while the -port is open. - -Once the port is open it will call the rx_function of each channel -whenever a completed packet arrived. This is invoked from interrupt -context and passes you the channel and a network buffer (struct -sk_buff) holding the data. The data includes the CRC bytes so most -users will want to trim the last two bytes before processing the data. -This function is very timing critical. When you wish to simply discard -data the support code provides the function -:c:func:`z8530_null_rx()` to discard the data. - -To active PIO mode sending and receiving the ``z8530_sync_open`` is called. -This expects to be passed the network device and the channel. Typically -this is called from your network device open callback. On a failure a -non zero error status is returned. -The :c:func:`z8530_sync_close()` function shuts down a PIO -channel. This must be done before the channel is opened again and before -the driver shuts down and unloads. - -The ideal mode of operation is dual channel DMA mode. Here the kernel -driver will configure the board for DMA in both directions. The driver -also handles ISA DMA issues such as controller programming and the -memory range limit for you. This mode is activated by calling the -:c:func:`z8530_sync_dma_open()` function. On failure a non zero -error value is returned. Once this mode is activated it can be shut down -by calling the :c:func:`z8530_sync_dma_close()`. You must call -the close function matching the open mode you used. - -The final supported mode uses a single DMA channel to drive the transmit -side. As the Z85C30 has a larger FIFO on the receive channel this tends -to increase the maximum speed a little. This is activated by calling the -``z8530_sync_txdma_open``. This returns a non zero error code on failure. The -:c:func:`z8530_sync_txdma_close()` function closes down the Z8530 -interface from this mode. - -Network Layer Functions -======================= - -The Z8530 layer provides functions to queue packets for transmission. -The driver internally buffers the frame currently being transmitted and -one further frame (in order to keep back to back transmission running). -Any further buffering is up to the caller. - -The function :c:func:`z8530_queue_xmit()` takes a network buffer -in sk_buff format and queues it for transmission. The caller must -provide the entire packet with the exception of the bitstuffing and CRC. -This is normally done by the caller via the generic HDLC interface -layer. It returns 0 if the buffer has been queued and non zero values -for queue full. If the function accepts the buffer it becomes property -of the Z8530 layer and the caller should not free it. - -The function :c:func:`z8530_get_stats()` returns a pointer to an -internally maintained per interface statistics block. This provides most -of the interface code needed to implement the network layer get_stats -callback. - -Porting The Z8530 Driver -======================== - -The Z8530 driver is written to be portable. In DMA mode it makes -assumptions about the use of ISA DMA. These are probably warranted in -most cases as the Z85230 in particular was designed to glue to PC type -machines. The PIO mode makes no real assumptions. - -Should you need to retarget the Z8530 driver to another architecture the -only code that should need changing are the port I/O functions. At the -moment these assume PC I/O port accesses. This may not be appropriate -for all platforms. Replacing :c:func:`z8530_read_port()` and -``z8530_write_port`` is intended to be all that is required to port -this driver layer. - -Known Bugs And Assumptions -========================== - -Interrupt Locking - The locking in the driver is done via the global cli/sti lock. This - makes for relatively poor SMP performance. Switching this to use a - per device spin lock would probably materially improve performance. - -Occasional Failures - We have reports of occasional failures when run for very long - periods of time and the driver starts to receive junk frames. At the - moment the cause of this is not clear. - -Public Functions Provided -========================= - -.. kernel-doc:: drivers/net/wan/z85230.c - :export: - -Internal Functions -================== - -.. kernel-doc:: drivers/net/wan/z85230.c - :internal: diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 12c5b6c67ab2..dcb069dde66b 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -23,28 +23,6 @@ menuconfig WAN if WAN -# There is no way to detect a comtrol sv11 - force it modular for now. -config HOSTESS_SV11 - tristate "Comtrol Hostess SV-11 support" - depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS - help - Driver for Comtrol Hostess SV-11 network card which - operates on low speed synchronous serial links at up to - 256Kbps, supporting PPP and Cisco HDLC. - - The driver will be compiled as a module: the - module will be called hostess_sv11. - -# There is no way to detect a Sealevel board. Force it modular -config SEALEVEL_4021 - tristate "Sealevel Systems 4021 support" - depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS - help - This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. - - The driver will be compiled as a module: the - module will be called sealevel. - # Generic HDLC config HDLC tristate "Generic HDLC layer" diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 901a094c061c..5bec8fae47f8 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -14,8 +14,6 @@ obj-$(CONFIG_HDLC_FR) += hdlc_fr.o obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o obj-$(CONFIG_HDLC_X25) += hdlc_x25.o -obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o -obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o obj-$(CONFIG_FARSYNC) += farsync.o obj-$(CONFIG_LAPBETHER) += lapbether.o diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c deleted file mode 100644 index e985e54ba75d..000000000000 --- a/drivers/net/wan/hostess_sv11.c +++ /dev/null @@ -1,336 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Comtrol SV11 card driver - * - * This is a slightly odd Z85230 synchronous driver. All you need to - * know basically is - * - * Its a genuine Z85230 - * - * It supports DMA using two DMA channels in SYNC mode. The driver doesn't - * use these facilities - * - * The control port is at io+1, the data at io+3 and turning off the DMA - * is done by writing 0 to io+4 - * - * The hardware does the bus handling to avoid the need for delays between - * touching control registers. - * - * Port B isn't wired (why - beats me) - * - * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include "z85230.h" - -static int dma; - -/* Network driver support routines - */ - -static inline struct z8530_dev *dev_to_sv(struct net_device *dev) -{ - return (struct z8530_dev *)dev_to_hdlc(dev)->priv; -} - -/* Frame receive. Simple for our card as we do HDLC and there - * is no funny garbage involved - */ - -static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) -{ - /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ - skb_trim(skb, skb->len - 2); - skb->protocol = hdlc_type_trans(skb, c->netdevice); - skb_reset_mac_header(skb); - skb->dev = c->netdevice; - /* Send it to the PPP layer. We don't have time to process - * it right now. - */ - netif_rx(skb); -} - -/* We've been placed in the UP state - */ - -static int hostess_open(struct net_device *d) -{ - struct z8530_dev *sv11 = dev_to_sv(d); - int err = -1; - - /* Link layer up - */ - switch (dma) { - case 0: - err = z8530_sync_open(d, &sv11->chanA); - break; - case 1: - err = z8530_sync_dma_open(d, &sv11->chanA); - break; - case 2: - err = z8530_sync_txdma_open(d, &sv11->chanA); - break; - } - - if (err) - return err; - - err = hdlc_open(d); - if (err) { - switch (dma) { - case 0: - z8530_sync_close(d, &sv11->chanA); - break; - case 1: - z8530_sync_dma_close(d, &sv11->chanA); - break; - case 2: - z8530_sync_txdma_close(d, &sv11->chanA); - break; - } - return err; - } - sv11->chanA.rx_function = hostess_input; - - /* - * Go go go - */ - - netif_start_queue(d); - return 0; -} - -static int hostess_close(struct net_device *d) -{ - struct z8530_dev *sv11 = dev_to_sv(d); - /* Discard new frames - */ - sv11->chanA.rx_function = z8530_null_rx; - - hdlc_close(d); - netif_stop_queue(d); - - switch (dma) { - case 0: - z8530_sync_close(d, &sv11->chanA); - break; - case 1: - z8530_sync_dma_close(d, &sv11->chanA); - break; - case 2: - z8530_sync_txdma_close(d, &sv11->chanA); - break; - } - return 0; -} - -/* Passed network frames, fire them downwind. - */ - -static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb, - struct net_device *d) -{ - return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb); -} - -static int hostess_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) - return 0; - return -EINVAL; -} - -/* Description block for a Comtrol Hostess SV11 card - */ - -static const struct net_device_ops hostess_ops = { - .ndo_open = hostess_open, - .ndo_stop = hostess_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_siocwandev = hdlc_ioctl, -}; - -static struct z8530_dev *sv11_init(int iobase, int irq) -{ - struct z8530_dev *sv; - struct net_device *netdev; - /* Get the needed I/O space - */ - - if (!request_region(iobase, 8, "Comtrol SV11")) { - pr_warn("I/O 0x%X already in use\n", iobase); - return NULL; - } - - sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL); - if (!sv) - goto err_kzalloc; - - /* Stuff in the I/O addressing - */ - - sv->active = 0; - - sv->chanA.ctrlio = iobase + 1; - sv->chanA.dataio = iobase + 3; - sv->chanB.ctrlio = -1; - sv->chanB.dataio = -1; - sv->chanA.irqs = &z8530_nop; - sv->chanB.irqs = &z8530_nop; - - outb(0, iobase + 4); /* DMA off */ - - /* We want a fast IRQ for this device. Actually we'd like an even faster - * IRQ ;) - This is one driver RtLinux is made for - */ - - if (request_irq(irq, z8530_interrupt, 0, - "Hostess SV11", sv) < 0) { - pr_warn("IRQ %d already in use\n", irq); - goto err_irq; - } - - sv->irq = irq; - sv->chanA.private = sv; - sv->chanA.dev = sv; - sv->chanB.dev = sv; - - if (dma) { - /* You can have DMA off or 1 and 3 thats the lot - * on the Comtrol. - */ - sv->chanA.txdma = 3; - sv->chanA.rxdma = 1; - outb(0x03 | 0x08, iobase + 4); /* DMA on */ - if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)")) - goto err_txdma; - - if (dma == 1) - if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)")) - goto err_rxdma; - } - - /* Kill our private IRQ line the hostess can end up chattering - * until the configuration is set - */ - disable_irq(irq); - - /* Begin normal initialise - */ - - if (z8530_init(sv)) { - pr_err("Z8530 series device not found\n"); - enable_irq(irq); - goto free_dma; - } - z8530_channel_load(&sv->chanB, z8530_dead_port); - if (sv->type == Z85C30) - z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream); - else - z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230); - - enable_irq(irq); - - /* Now we can take the IRQ - */ - - sv->chanA.netdevice = netdev = alloc_hdlcdev(sv); - if (!netdev) - goto free_dma; - - dev_to_hdlc(netdev)->attach = hostess_attach; - dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; - netdev->netdev_ops = &hostess_ops; - netdev->base_addr = iobase; - netdev->irq = irq; - - if (register_hdlc_device(netdev)) { - pr_err("unable to register HDLC device\n"); - free_netdev(netdev); - goto free_dma; - } - - z8530_describe(sv, "I/O", iobase); - sv->active = 1; - return sv; - -free_dma: - if (dma == 1) - free_dma(sv->chanA.rxdma); -err_rxdma: - if (dma) - free_dma(sv->chanA.txdma); -err_txdma: - free_irq(irq, sv); -err_irq: - kfree(sv); -err_kzalloc: - release_region(iobase, 8); - return NULL; -} - -static void sv11_shutdown(struct z8530_dev *dev) -{ - unregister_hdlc_device(dev->chanA.netdevice); - z8530_shutdown(dev); - free_irq(dev->irq, dev); - if (dma) { - if (dma == 1) - free_dma(dev->chanA.rxdma); - free_dma(dev->chanA.txdma); - } - release_region(dev->chanA.ctrlio - 1, 8); - free_netdev(dev->chanA.netdevice); - kfree(dev); -} - -static int io = 0x200; -static int irq = 9; - -module_param_hw(io, int, ioport, 0); -MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); -module_param_hw(dma, int, dma, 0); -MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX"); -module_param_hw(irq, int, irq, 0); -MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card"); - -MODULE_AUTHOR("Alan Cox"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); - -static struct z8530_dev *sv11_unit; - -static int sv11_module_init(void) -{ - sv11_unit = sv11_init(io, irq); - if (!sv11_unit) - return -ENODEV; - return 0; -} -module_init(sv11_module_init); - -static void sv11_module_cleanup(void) -{ - if (sv11_unit) - sv11_shutdown(sv11_unit); -} -module_exit(sv11_module_cleanup); diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c deleted file mode 100644 index eddd20aab691..000000000000 --- a/drivers/net/wan/sealevel.c +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* Sealevel Systems 4021 driver. - * - * (c) Copyright 1999, 2001 Alan Cox - * (c) Copyright 2001 Red Hat Inc. - * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include "z85230.h" - -struct slvl_device { - struct z8530_channel *chan; - int channel; -}; - -struct slvl_board { - struct slvl_device dev[2]; - struct z8530_dev board; - int iobase; -}; - - /* Network driver support routines */ - -static inline struct slvl_device *dev_to_chan(struct net_device *dev) -{ - return (struct slvl_device *)dev_to_hdlc(dev)->priv; -} - -/* Frame receive. Simple for our card as we do HDLC and there - * is no funny garbage involved - */ - -static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) -{ - /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ - skb_trim(skb, skb->len - 2); - skb->protocol = hdlc_type_trans(skb, c->netdevice); - skb_reset_mac_header(skb); - skb->dev = c->netdevice; - netif_rx(skb); -} - - /* We've been placed in the UP state */ - -static int sealevel_open(struct net_device *d) -{ - struct slvl_device *slvl = dev_to_chan(d); - int err = -1; - int unit = slvl->channel; - - /* Link layer up. */ - - switch (unit) { - case 0: - err = z8530_sync_dma_open(d, slvl->chan); - break; - case 1: - err = z8530_sync_open(d, slvl->chan); - break; - } - - if (err) - return err; - - err = hdlc_open(d); - if (err) { - switch (unit) { - case 0: - z8530_sync_dma_close(d, slvl->chan); - break; - case 1: - z8530_sync_close(d, slvl->chan); - break; - } - return err; - } - - slvl->chan->rx_function = sealevel_input; - - netif_start_queue(d); - return 0; -} - -static int sealevel_close(struct net_device *d) -{ - struct slvl_device *slvl = dev_to_chan(d); - int unit = slvl->channel; - - /* Discard new frames */ - - slvl->chan->rx_function = z8530_null_rx; - - hdlc_close(d); - netif_stop_queue(d); - - switch (unit) { - case 0: - z8530_sync_dma_close(d, slvl->chan); - break; - case 1: - z8530_sync_close(d, slvl->chan); - break; - } - return 0; -} - -/* Passed network frames, fire them downwind. */ - -static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb, - struct net_device *d) -{ - return z8530_queue_xmit(dev_to_chan(d)->chan, skb); -} - -static int sealevel_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) - return 0; - return -EINVAL; -} - -static const struct net_device_ops sealevel_ops = { - .ndo_open = sealevel_open, - .ndo_stop = sealevel_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_siocwandev = hdlc_ioctl, -}; - -static int slvl_setup(struct slvl_device *sv, int iobase, int irq) -{ - struct net_device *dev = alloc_hdlcdev(sv); - - if (!dev) - return -1; - - dev_to_hdlc(dev)->attach = sealevel_attach; - dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; - dev->netdev_ops = &sealevel_ops; - dev->base_addr = iobase; - dev->irq = irq; - - if (register_hdlc_device(dev)) { - pr_err("unable to register HDLC device\n"); - free_netdev(dev); - return -1; - } - - sv->chan->netdevice = dev; - return 0; -} - -/* Allocate and setup Sealevel board. */ - -static __init struct slvl_board *slvl_init(int iobase, int irq, - int txdma, int rxdma, int slow) -{ - struct z8530_dev *dev; - struct slvl_board *b; - - /* Get the needed I/O space */ - - if (!request_region(iobase, 8, "Sealevel 4021")) { - pr_warn("I/O 0x%X already in use\n", iobase); - return NULL; - } - - b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); - if (!b) - goto err_kzalloc; - - b->dev[0].chan = &b->board.chanA; - b->dev[0].channel = 0; - - b->dev[1].chan = &b->board.chanB; - b->dev[1].channel = 1; - - dev = &b->board; - - /* Stuff in the I/O addressing */ - - dev->active = 0; - - b->iobase = iobase; - - /* Select 8530 delays for the old board */ - - if (slow) - iobase |= Z8530_PORT_SLEEP; - - dev->chanA.ctrlio = iobase + 1; - dev->chanA.dataio = iobase; - dev->chanB.ctrlio = iobase + 3; - dev->chanB.dataio = iobase + 2; - - dev->chanA.irqs = &z8530_nop; - dev->chanB.irqs = &z8530_nop; - - /* Assert DTR enable DMA */ - - outb(3 | (1 << 7), b->iobase + 4); - - /* We want a fast IRQ for this device. Actually we'd like an even faster - * IRQ ;) - This is one driver RtLinux is made for - */ - - if (request_irq(irq, z8530_interrupt, 0, - "SeaLevel", dev) < 0) { - pr_warn("IRQ %d already in use\n", irq); - goto err_request_irq; - } - - dev->irq = irq; - dev->chanA.private = &b->dev[0]; - dev->chanB.private = &b->dev[1]; - dev->chanA.dev = dev; - dev->chanB.dev = dev; - - dev->chanA.txdma = 3; - dev->chanA.rxdma = 1; - if (request_dma(dev->chanA.txdma, "SeaLevel (TX)")) - goto err_dma_tx; - - if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)")) - goto err_dma_rx; - - disable_irq(irq); - - /* Begin normal initialise */ - - if (z8530_init(dev) != 0) { - pr_err("Z8530 series device not found\n"); - enable_irq(irq); - goto free_hw; - } - if (dev->type == Z85C30) { - z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); - z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); - } else { - z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); - z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); - } - - /* Now we can take the IRQ */ - - enable_irq(irq); - - if (slvl_setup(&b->dev[0], iobase, irq)) - goto free_hw; - if (slvl_setup(&b->dev[1], iobase, irq)) - goto free_netdev0; - - z8530_describe(dev, "I/O", iobase); - dev->active = 1; - return b; - -free_netdev0: - unregister_hdlc_device(b->dev[0].chan->netdevice); - free_netdev(b->dev[0].chan->netdevice); -free_hw: - free_dma(dev->chanA.rxdma); -err_dma_rx: - free_dma(dev->chanA.txdma); -err_dma_tx: - free_irq(irq, dev); -err_request_irq: - kfree(b); -err_kzalloc: - release_region(iobase, 8); - return NULL; -} - -static void __exit slvl_shutdown(struct slvl_board *b) -{ - int u; - - z8530_shutdown(&b->board); - - for (u = 0; u < 2; u++) { - struct net_device *d = b->dev[u].chan->netdevice; - - unregister_hdlc_device(d); - free_netdev(d); - } - - free_irq(b->board.irq, &b->board); - free_dma(b->board.chanA.rxdma); - free_dma(b->board.chanA.txdma); - /* DMA off on the card, drop DTR */ - outb(0, b->iobase); - release_region(b->iobase, 8); - kfree(b); -} - -static int io = 0x238; -static int txdma = 1; -static int rxdma = 3; -static int irq = 5; -static bool slow; - -module_param_hw(io, int, ioport, 0); -MODULE_PARM_DESC(io, "The I/O base of the Sealevel card"); -module_param_hw(txdma, int, dma, 0); -MODULE_PARM_DESC(txdma, "Transmit DMA channel"); -module_param_hw(rxdma, int, dma, 0); -MODULE_PARM_DESC(rxdma, "Receive DMA channel"); -module_param_hw(irq, int, irq, 0); -MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card"); -module_param(slow, bool, 0); -MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012"); - -MODULE_AUTHOR("Alan Cox"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021"); - -static struct slvl_board *slvl_unit; - -static int __init slvl_init_module(void) -{ - slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); - - return slvl_unit ? 0 : -ENODEV; -} - -static void __exit slvl_cleanup_module(void) -{ - if (slvl_unit) - slvl_shutdown(slvl_unit); -} - -module_init(slvl_init_module); -module_exit(slvl_cleanup_module); diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c deleted file mode 100644 index 982a03488a00..000000000000 --- a/drivers/net/wan/z85230.c +++ /dev/null @@ -1,1641 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* (c) Copyright 1998 Alan Cox - * (c) Copyright 2000, 2001 Red Hat Inc - * - * Development of this driver was funded by Equiinet Ltd - * http://www.equiinet.com - * - * ChangeLog: - * - * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the - * unification of all the Z85x30 asynchronous drivers for real. - * - * DMA now uses get_free_page as kmalloc buffers may span a 64K - * boundary. - * - * Modified for SMP safety and SMP locking by Alan Cox - * - * - * Performance - * - * Z85230: - * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud - * X.25 is not unrealistic on all machines. DMA mode can in theory - * handle T1/E1 quite nicely. In practice the limit seems to be about - * 512Kbit->1Mbit depending on motherboard. - * - * Z85C30: - * 64K will take DMA, 9600 baud X.25 should be ok. - * - * Z8530: - * Synchronous mode without DMA is unlikely to pass about 2400 baud. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#define RT_LOCK -#define RT_UNLOCK -#include - -#include "z85230.h" - -/** - * z8530_read_port - Architecture specific interface function - * @p: port to read - * - * Provided port access methods. The Comtrol SV11 requires no delays - * between accesses and uses PC I/O. Some drivers may need a 5uS delay - * - * In the longer term this should become an architecture specific - * section so that this can become a generic driver interface for all - * platforms. For now we only handle PC I/O ports with or without the - * dread 5uS sanity delay. - * - * The caller must hold sufficient locks to avoid violating the horrible - * 5uS delay rule. - */ - -static inline int z8530_read_port(unsigned long p) -{ - u8 r = inb(Z8530_PORT_OF(p)); - - if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */ - udelay(5); - return r; -} - -/** - * z8530_write_port - Architecture specific interface function - * @p: port to write - * @d: value to write - * - * Write a value to a port with delays if need be. Note that the - * caller must hold locks to avoid read/writes from other contexts - * violating the 5uS rule - * - * In the longer term this should become an architecture specific - * section so that this can become a generic driver interface for all - * platforms. For now we only handle PC I/O ports with or without the - * dread 5uS sanity delay. - */ - -static inline void z8530_write_port(unsigned long p, u8 d) -{ - outb(d, Z8530_PORT_OF(p)); - if (p & Z8530_PORT_SLEEP) - udelay(5); -} - -static void z8530_rx_done(struct z8530_channel *c); -static void z8530_tx_done(struct z8530_channel *c); - -/** - * read_zsreg - Read a register from a Z85230 - * @c: Z8530 channel to read from (2 per chip) - * @reg: Register to read - * FIXME: Use a spinlock. - * - * Most of the Z8530 registers are indexed off the control registers. - * A read is done by writing to the control register and reading the - * register back. The caller must hold the lock - */ - -static inline u8 read_zsreg(struct z8530_channel *c, u8 reg) -{ - if (reg) - z8530_write_port(c->ctrlio, reg); - return z8530_read_port(c->ctrlio); -} - -/** - * read_zsdata - Read the data port of a Z8530 channel - * @c: The Z8530 channel to read the data port from - * - * The data port provides fast access to some things. We still - * have all the 5uS delays to worry about. - */ - -static inline u8 read_zsdata(struct z8530_channel *c) -{ - u8 r; - - r = z8530_read_port(c->dataio); - return r; -} - -/** - * write_zsreg - Write to a Z8530 channel register - * @c: The Z8530 channel - * @reg: Register number - * @val: Value to write - * - * Write a value to an indexed register. The caller must hold the lock - * to honour the irritating delay rules. We know about register 0 - * being fast to access. - * - * Assumes c->lock is held. - */ -static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val) -{ - if (reg) - z8530_write_port(c->ctrlio, reg); - z8530_write_port(c->ctrlio, val); -} - -/** - * write_zsctrl - Write to a Z8530 control register - * @c: The Z8530 channel - * @val: Value to write - * - * Write directly to the control register on the Z8530 - */ - -static inline void write_zsctrl(struct z8530_channel *c, u8 val) -{ - z8530_write_port(c->ctrlio, val); -} - -/** - * write_zsdata - Write to a Z8530 control register - * @c: The Z8530 channel - * @val: Value to write - * - * Write directly to the data register on the Z8530 - */ -static inline void write_zsdata(struct z8530_channel *c, u8 val) -{ - z8530_write_port(c->dataio, val); -} - -/* Register loading parameters for a dead port - */ - -u8 z8530_dead_port[] = { - 255 -}; -EXPORT_SYMBOL(z8530_dead_port); - -/* Register loading parameters for currently supported circuit types - */ - -/* Data clocked by telco end. This is the correct data for the UK - * "kilostream" service, and most other similar services. - */ - -u8 z8530_hdlc_kilostream[] = { - 4, SYNC_ENAB | SDLC | X1CLK, - 2, 0, /* No vector */ - 1, 0, - 3, ENT_HM | RxCRC_ENAB | Rx8, - 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR, - 9, 0, /* Disable interrupts */ - 6, 0xFF, - 7, FLAG, - 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/ - 11, TCTRxCP, - 14, DISDPLL, - 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE, - 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx, - 9, NV | MIE | NORESET, - 255 -}; -EXPORT_SYMBOL(z8530_hdlc_kilostream); - -/* As above but for enhanced chips. - */ - -u8 z8530_hdlc_kilostream_85230[] = { - 4, SYNC_ENAB | SDLC | X1CLK, - 2, 0, /* No vector */ - 1, 0, - 3, ENT_HM | RxCRC_ENAB | Rx8, - 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR, - 9, 0, /* Disable interrupts */ - 6, 0xFF, - 7, FLAG, - 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */ - 11, TCTRxCP, - 14, DISDPLL, - 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE, - 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx, - 9, NV | MIE | NORESET, - 23, 3, /* Extended mode AUTO TX and EOM*/ - - 255 -}; -EXPORT_SYMBOL(z8530_hdlc_kilostream_85230); - -/** - * z8530_flush_fifo - Flush on chip RX FIFO - * @c: Channel to flush - * - * Flush the receive FIFO. There is no specific option for this, we - * blindly read bytes and discard them. Reading when there is no data - * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes. - * - * All locking is handled for the caller. On return data may still be - * present if it arrived during the flush. - */ - -static void z8530_flush_fifo(struct z8530_channel *c) -{ - read_zsreg(c, R1); - read_zsreg(c, R1); - read_zsreg(c, R1); - read_zsreg(c, R1); - if (c->dev->type == Z85230) { - read_zsreg(c, R1); - read_zsreg(c, R1); - read_zsreg(c, R1); - read_zsreg(c, R1); - } -} - -/** - * z8530_rtsdtr - Control the outgoing DTS/RTS line - * @c: The Z8530 channel to control; - * @set: 1 to set, 0 to clear - * - * Sets or clears DTR/RTS on the requested line. All locking is handled - * by the caller. For now we assume all boards use the actual RTS/DTR - * on the chip. Apparently one or two don't. We'll scream about them - * later. - */ - -static void z8530_rtsdtr(struct z8530_channel *c, int set) -{ - if (set) - c->regs[5] |= (RTS | DTR); - else - c->regs[5] &= ~(RTS | DTR); - write_zsreg(c, R5, c->regs[5]); -} - -/** - * z8530_rx - Handle a PIO receive event - * @c: Z8530 channel to process - * - * Receive handler for receiving in PIO mode. This is much like the - * async one but not quite the same or as complex - * - * Note: Its intended that this handler can easily be separated from - * the main code to run realtime. That'll be needed for some machines - * (eg to ever clock 64kbits on a sparc ;)). - * - * The RT_LOCK macros don't do anything now. Keep the code covered - * by them as short as possible in all circumstances - clocks cost - * baud. The interrupt handler is assumed to be atomic w.r.t. to - * other code - this is true in the RT case too. - * - * We only cover the sync cases for this. If you want 2Mbit async - * do it yourself but consider medical assistance first. This non DMA - * synchronous mode is portable code. The DMA mode assumes PCI like - * ISA DMA - * - * Called with the device lock held - */ - -static void z8530_rx(struct z8530_channel *c) -{ - u8 ch, stat; - - while (1) { - /* FIFO empty ? */ - if (!(read_zsreg(c, R0) & 1)) - break; - ch = read_zsdata(c); - stat = read_zsreg(c, R1); - - /* Overrun ? - */ - if (c->count < c->max) { - *c->dptr++ = ch; - c->count++; - } - - if (stat & END_FR) { - /* Error ? - */ - if (stat & (Rx_OVR | CRC_ERR)) { - /* Rewind the buffer and return */ - if (c->skb) - c->dptr = c->skb->data; - c->count = 0; - if (stat & Rx_OVR) { - pr_warn("%s: overrun\n", c->dev->name); - c->rx_overrun++; - } - if (stat & CRC_ERR) { - c->rx_crc_err++; - /* printk("crc error\n"); */ - } - /* Shove the frame upstream */ - } else { - /* Drop the lock for RX processing, or - * there are deadlocks - */ - z8530_rx_done(c); - write_zsctrl(c, RES_Rx_CRC); - } - } - } - /* Clear irq - */ - write_zsctrl(c, ERR_RES); - write_zsctrl(c, RES_H_IUS); -} - -/** - * z8530_tx - Handle a PIO transmit event - * @c: Z8530 channel to process - * - * Z8530 transmit interrupt handler for the PIO mode. The basic - * idea is to attempt to keep the FIFO fed. We fill as many bytes - * in as possible, its quite possible that we won't keep up with the - * data rate otherwise. - */ - -static void z8530_tx(struct z8530_channel *c) -{ - while (c->txcount) { - /* FIFO full ? */ - if (!(read_zsreg(c, R0) & 4)) - return; - c->txcount--; - /* Shovel out the byte - */ - write_zsreg(c, R8, *c->tx_ptr++); - write_zsctrl(c, RES_H_IUS); - /* We are about to underflow */ - if (c->txcount == 0) { - write_zsctrl(c, RES_EOM_L); - write_zsreg(c, R10, c->regs[10] & ~ABUNDER); - } - } - - /* End of frame TX - fire another one - */ - - write_zsctrl(c, RES_Tx_P); - - z8530_tx_done(c); - write_zsctrl(c, RES_H_IUS); -} - -/** - * z8530_status - Handle a PIO status exception - * @chan: Z8530 channel to process - * - * A status event occurred in PIO synchronous mode. There are several - * reasons the chip will bother us here. A transmit underrun means we - * failed to feed the chip fast enough and just broke a packet. A DCD - * change is a line up or down. - */ - -static void z8530_status(struct z8530_channel *chan) -{ - u8 status, altered; - - status = read_zsreg(chan, R0); - altered = chan->status ^ status; - - chan->status = status; - - if (status & TxEOM) { -/* printk("%s: Tx underrun.\n", chan->dev->name); */ - chan->netdevice->stats.tx_fifo_errors++; - write_zsctrl(chan, ERR_RES); - z8530_tx_done(chan); - } - - if (altered & chan->dcdcheck) { - if (status & chan->dcdcheck) { - pr_info("%s: DCD raised\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3] | RxENABLE); - if (chan->netdevice) - netif_carrier_on(chan->netdevice); - } else { - pr_info("%s: DCD lost\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); - z8530_flush_fifo(chan); - if (chan->netdevice) - netif_carrier_off(chan->netdevice); - } - } - write_zsctrl(chan, RES_EXT_INT); - write_zsctrl(chan, RES_H_IUS); -} - -struct z8530_irqhandler z8530_sync = { - .rx = z8530_rx, - .tx = z8530_tx, - .status = z8530_status, -}; -EXPORT_SYMBOL(z8530_sync); - -/** - * z8530_dma_rx - Handle a DMA RX event - * @chan: Channel to handle - * - * Non bus mastering DMA interfaces for the Z8x30 devices. This - * is really pretty PC specific. The DMA mode means that most receive - * events are handled by the DMA hardware. We get a kick here only if - * a frame ended. - */ - -static void z8530_dma_rx(struct z8530_channel *chan) -{ - if (chan->rxdma_on) { - /* Special condition check only */ - u8 status; - - read_zsreg(chan, R7); - read_zsreg(chan, R6); - - status = read_zsreg(chan, R1); - - if (status & END_FR) - z8530_rx_done(chan); /* Fire up the next one */ - - write_zsctrl(chan, ERR_RES); - write_zsctrl(chan, RES_H_IUS); - } else { - /* DMA is off right now, drain the slow way */ - z8530_rx(chan); - } -} - -/** - * z8530_dma_tx - Handle a DMA TX event - * @chan: The Z8530 channel to handle - * - * We have received an interrupt while doing DMA transmissions. It - * shouldn't happen. Scream loudly if it does. - */ -static void z8530_dma_tx(struct z8530_channel *chan) -{ - if (!chan->dma_tx) { - pr_warn("Hey who turned the DMA off?\n"); - z8530_tx(chan); - return; - } - /* This shouldn't occur in DMA mode */ - pr_err("DMA tx - bogus event!\n"); - z8530_tx(chan); -} - -/** - * z8530_dma_status - Handle a DMA status exception - * @chan: Z8530 channel to process - * - * A status event occurred on the Z8530. We receive these for two reasons - * when in DMA mode. Firstly if we finished a packet transfer we get one - * and kick the next packet out. Secondly we may see a DCD change. - * - */ -static void z8530_dma_status(struct z8530_channel *chan) -{ - u8 status, altered; - - status = read_zsreg(chan, R0); - altered = chan->status ^ status; - - chan->status = status; - - if (chan->dma_tx) { - if (status & TxEOM) { - unsigned long flags; - - flags = claim_dma_lock(); - disable_dma(chan->txdma); - clear_dma_ff(chan->txdma); - chan->txdma_on = 0; - release_dma_lock(flags); - z8530_tx_done(chan); - } - } - - if (altered & chan->dcdcheck) { - if (status & chan->dcdcheck) { - pr_info("%s: DCD raised\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3] | RxENABLE); - if (chan->netdevice) - netif_carrier_on(chan->netdevice); - } else { - pr_info("%s: DCD lost\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); - z8530_flush_fifo(chan); - if (chan->netdevice) - netif_carrier_off(chan->netdevice); - } - } - - write_zsctrl(chan, RES_EXT_INT); - write_zsctrl(chan, RES_H_IUS); -} - -static struct z8530_irqhandler z8530_dma_sync = { - .rx = z8530_dma_rx, - .tx = z8530_dma_tx, - .status = z8530_dma_status, -}; - -static struct z8530_irqhandler z8530_txdma_sync = { - .rx = z8530_rx, - .tx = z8530_dma_tx, - .status = z8530_dma_status, -}; - -/** - * z8530_rx_clear - Handle RX events from a stopped chip - * @c: Z8530 channel to shut up - * - * Receive interrupt vectors for a Z8530 that is in 'parked' mode. - * For machines with PCI Z85x30 cards, or level triggered interrupts - * (eg the MacII) we must clear the interrupt cause or die. - */ - -static void z8530_rx_clear(struct z8530_channel *c) -{ - /* Data and status bytes - */ - u8 stat; - - read_zsdata(c); - stat = read_zsreg(c, R1); - - if (stat & END_FR) - write_zsctrl(c, RES_Rx_CRC); - /* Clear irq - */ - write_zsctrl(c, ERR_RES); - write_zsctrl(c, RES_H_IUS); -} - -/** - * z8530_tx_clear - Handle TX events from a stopped chip - * @c: Z8530 channel to shut up - * - * Transmit interrupt vectors for a Z8530 that is in 'parked' mode. - * For machines with PCI Z85x30 cards, or level triggered interrupts - * (eg the MacII) we must clear the interrupt cause or die. - */ - -static void z8530_tx_clear(struct z8530_channel *c) -{ - write_zsctrl(c, RES_Tx_P); - write_zsctrl(c, RES_H_IUS); -} - -/** - * z8530_status_clear - Handle status events from a stopped chip - * @chan: Z8530 channel to shut up - * - * Status interrupt vectors for a Z8530 that is in 'parked' mode. - * For machines with PCI Z85x30 cards, or level triggered interrupts - * (eg the MacII) we must clear the interrupt cause or die. - */ - -static void z8530_status_clear(struct z8530_channel *chan) -{ - u8 status = read_zsreg(chan, R0); - - if (status & TxEOM) - write_zsctrl(chan, ERR_RES); - write_zsctrl(chan, RES_EXT_INT); - write_zsctrl(chan, RES_H_IUS); -} - -struct z8530_irqhandler z8530_nop = { - .rx = z8530_rx_clear, - .tx = z8530_tx_clear, - .status = z8530_status_clear, -}; -EXPORT_SYMBOL(z8530_nop); - -/** - * z8530_interrupt - Handle an interrupt from a Z8530 - * @irq: Interrupt number - * @dev_id: The Z8530 device that is interrupting. - * - * A Z85[2]30 device has stuck its hand in the air for attention. - * We scan both the channels on the chip for events and then call - * the channel specific call backs for each channel that has events. - * We have to use callback functions because the two channels can be - * in different modes. - * - * Locking is done for the handlers. Note that locking is done - * at the chip level (the 5uS delay issue is per chip not per - * channel). c->lock for both channels points to dev->lock - */ - -irqreturn_t z8530_interrupt(int irq, void *dev_id) -{ - struct z8530_dev *dev = dev_id; - u8 intr; - static volatile int locker=0; - int work = 0; - struct z8530_irqhandler *irqs; - - if (locker) { - pr_err("IRQ re-enter\n"); - return IRQ_NONE; - } - locker = 1; - - spin_lock(&dev->lock); - - while (++work < 5000) { - intr = read_zsreg(&dev->chanA, R3); - if (!(intr & - (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT))) - break; - - /* This holds the IRQ status. On the 8530 you must read it - * from chan A even though it applies to the whole chip - */ - - /* Now walk the chip and see what it is wanting - it may be - * an IRQ for someone else remember - */ - - irqs = dev->chanA.irqs; - - if (intr & (CHARxIP | CHATxIP | CHAEXT)) { - if (intr & CHARxIP) - irqs->rx(&dev->chanA); - if (intr & CHATxIP) - irqs->tx(&dev->chanA); - if (intr & CHAEXT) - irqs->status(&dev->chanA); - } - - irqs = dev->chanB.irqs; - - if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) { - if (intr & CHBRxIP) - irqs->rx(&dev->chanB); - if (intr & CHBTxIP) - irqs->tx(&dev->chanB); - if (intr & CHBEXT) - irqs->status(&dev->chanB); - } - } - spin_unlock(&dev->lock); - if (work == 5000) - pr_err("%s: interrupt jammed - abort(0x%X)!\n", - dev->name, intr); - /* Ok all done */ - locker = 0; - return IRQ_HANDLED; -} -EXPORT_SYMBOL(z8530_interrupt); - -static const u8 reg_init[16] = { - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0x55, 0, 0, 0 -}; - -/** - * z8530_sync_open - Open a Z8530 channel for PIO - * @dev: The network interface we are using - * @c: The Z8530 channel to open in synchronous PIO mode - * - * Switch a Z8530 into synchronous mode without DMA assist. We - * raise the RTS/DTR and commence network operation. - */ -int z8530_sync_open(struct net_device *dev, struct z8530_channel *c) -{ - unsigned long flags; - - spin_lock_irqsave(c->lock, flags); - - c->sync = 1; - c->mtu = dev->mtu + 64; - c->count = 0; - c->skb = NULL; - c->skb2 = NULL; - c->irqs = &z8530_sync; - - /* This loads the double buffer up */ - z8530_rx_done(c); /* Load the frame ring */ - z8530_rx_done(c); /* Load the backup frame */ - z8530_rtsdtr(c, 1); - c->dma_tx = 0; - c->regs[R1] |= TxINT_ENAB; - write_zsreg(c, R1, c->regs[R1]); - write_zsreg(c, R3, c->regs[R3] | RxENABLE); - - spin_unlock_irqrestore(c->lock, flags); - return 0; -} -EXPORT_SYMBOL(z8530_sync_open); - -/** - * z8530_sync_close - Close a PIO Z8530 channel - * @dev: Network device to close - * @c: Z8530 channel to disassociate and move to idle - * - * Close down a Z8530 interface and switch its interrupt handlers - * to discard future events. - */ -int z8530_sync_close(struct net_device *dev, struct z8530_channel *c) -{ - u8 chk; - unsigned long flags; - - spin_lock_irqsave(c->lock, flags); - c->irqs = &z8530_nop; - c->max = 0; - c->sync = 0; - - chk = read_zsreg(c, R0); - write_zsreg(c, R3, c->regs[R3]); - z8530_rtsdtr(c, 0); - - spin_unlock_irqrestore(c->lock, flags); - return 0; -} -EXPORT_SYMBOL(z8530_sync_close); - -/** - * z8530_sync_dma_open - Open a Z8530 for DMA I/O - * @dev: The network device to attach - * @c: The Z8530 channel to configure in sync DMA mode. - * - * Set up a Z85x30 device for synchronous DMA in both directions. Two - * ISA DMA channels must be available for this to work. We assume ISA - * DMA driven I/O and PC limits on access. - */ -int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c) -{ - unsigned long cflags, dflags; - - c->sync = 1; - c->mtu = dev->mtu + 64; - c->count = 0; - c->skb = NULL; - c->skb2 = NULL; - - /* Load the DMA interfaces up - */ - c->rxdma_on = 0; - c->txdma_on = 0; - - /* Allocate the DMA flip buffers. Limit by page size. - * Everyone runs 1500 mtu or less on wan links so this - * should be fine. - */ - - if (c->mtu > PAGE_SIZE / 2) - return -EMSGSIZE; - - c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!c->rx_buf[0]) - return -ENOBUFS; - c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2; - - c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!c->tx_dma_buf[0]) { - free_page((unsigned long)c->rx_buf[0]); - c->rx_buf[0] = NULL; - return -ENOBUFS; - } - c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2; - - c->tx_dma_used = 0; - c->dma_tx = 1; - c->dma_num = 0; - c->dma_ready = 1; - - /* Enable DMA control mode - */ - - spin_lock_irqsave(c->lock, cflags); - - /* TX DMA via DIR/REQ - */ - - c->regs[R14] |= DTRREQ; - write_zsreg(c, R14, c->regs[R14]); - - c->regs[R1] &= ~TxINT_ENAB; - write_zsreg(c, R1, c->regs[R1]); - - /* RX DMA via W/Req - */ - - c->regs[R1] |= WT_FN_RDYFN; - c->regs[R1] |= WT_RDY_RT; - c->regs[R1] |= INT_ERR_Rx; - c->regs[R1] &= ~TxINT_ENAB; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R1] |= WT_RDY_ENAB; - write_zsreg(c, R1, c->regs[R1]); - - /* DMA interrupts - */ - - /* Set up the DMA configuration - */ - - dflags = claim_dma_lock(); - - disable_dma(c->rxdma); - clear_dma_ff(c->rxdma); - set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10); - set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0])); - set_dma_count(c->rxdma, c->mtu); - enable_dma(c->rxdma); - - disable_dma(c->txdma); - clear_dma_ff(c->txdma); - set_dma_mode(c->txdma, DMA_MODE_WRITE); - disable_dma(c->txdma); - - release_dma_lock(dflags); - - /* Select the DMA interrupt handlers - */ - - c->rxdma_on = 1; - c->txdma_on = 1; - c->tx_dma_used = 1; - - c->irqs = &z8530_dma_sync; - z8530_rtsdtr(c, 1); - write_zsreg(c, R3, c->regs[R3] | RxENABLE); - - spin_unlock_irqrestore(c->lock, cflags); - - return 0; -} -EXPORT_SYMBOL(z8530_sync_dma_open); - -/** - * z8530_sync_dma_close - Close down DMA I/O - * @dev: Network device to detach - * @c: Z8530 channel to move into discard mode - * - * Shut down a DMA mode synchronous interface. Halt the DMA, and - * free the buffers. - */ -int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c) -{ - u8 chk; - unsigned long flags; - - c->irqs = &z8530_nop; - c->max = 0; - c->sync = 0; - - /* Disable the PC DMA channels - */ - - flags = claim_dma_lock(); - disable_dma(c->rxdma); - clear_dma_ff(c->rxdma); - - c->rxdma_on = 0; - - disable_dma(c->txdma); - clear_dma_ff(c->txdma); - release_dma_lock(flags); - - c->txdma_on = 0; - c->tx_dma_used = 0; - - spin_lock_irqsave(c->lock, flags); - - /* Disable DMA control mode - */ - - c->regs[R1] &= ~WT_RDY_ENAB; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx); - c->regs[R1] |= INT_ALL_Rx; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R14] &= ~DTRREQ; - write_zsreg(c, R14, c->regs[R14]); - - if (c->rx_buf[0]) { - free_page((unsigned long)c->rx_buf[0]); - c->rx_buf[0] = NULL; - } - if (c->tx_dma_buf[0]) { - free_page((unsigned long)c->tx_dma_buf[0]); - c->tx_dma_buf[0] = NULL; - } - chk = read_zsreg(c, R0); - write_zsreg(c, R3, c->regs[R3]); - z8530_rtsdtr(c, 0); - - spin_unlock_irqrestore(c->lock, flags); - - return 0; -} -EXPORT_SYMBOL(z8530_sync_dma_close); - -/** - * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA - * @dev: The network device to attach - * @c: The Z8530 channel to configure in sync DMA mode. - * - * Set up a Z85x30 device for synchronous DMA transmission. One - * ISA DMA channel must be available for this to work. The receive - * side is run in PIO mode, but then it has the bigger FIFO. - */ - -int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c) -{ - unsigned long cflags, dflags; - - printk("Opening sync interface for TX-DMA\n"); - c->sync = 1; - c->mtu = dev->mtu + 64; - c->count = 0; - c->skb = NULL; - c->skb2 = NULL; - - /* Allocate the DMA flip buffers. Limit by page size. - * Everyone runs 1500 mtu or less on wan links so this - * should be fine. - */ - - if (c->mtu > PAGE_SIZE / 2) - return -EMSGSIZE; - - c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!c->tx_dma_buf[0]) - return -ENOBUFS; - - c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2; - - spin_lock_irqsave(c->lock, cflags); - - /* Load the PIO receive ring - */ - - z8530_rx_done(c); - z8530_rx_done(c); - - /* Load the DMA interfaces up - */ - - c->rxdma_on = 0; - c->txdma_on = 0; - - c->tx_dma_used = 0; - c->dma_num = 0; - c->dma_ready = 1; - c->dma_tx = 1; - - /* Enable DMA control mode - */ - - /* TX DMA via DIR/REQ - */ - c->regs[R14] |= DTRREQ; - write_zsreg(c, R14, c->regs[R14]); - - c->regs[R1] &= ~TxINT_ENAB; - write_zsreg(c, R1, c->regs[R1]); - - /* Set up the DMA configuration - */ - - dflags = claim_dma_lock(); - - disable_dma(c->txdma); - clear_dma_ff(c->txdma); - set_dma_mode(c->txdma, DMA_MODE_WRITE); - disable_dma(c->txdma); - - release_dma_lock(dflags); - - /* Select the DMA interrupt handlers - */ - - c->rxdma_on = 0; - c->txdma_on = 1; - c->tx_dma_used = 1; - - c->irqs = &z8530_txdma_sync; - z8530_rtsdtr(c, 1); - write_zsreg(c, R3, c->regs[R3] | RxENABLE); - spin_unlock_irqrestore(c->lock, cflags); - - return 0; -} -EXPORT_SYMBOL(z8530_sync_txdma_open); - -/** - * z8530_sync_txdma_close - Close down a TX driven DMA channel - * @dev: Network device to detach - * @c: Z8530 channel to move into discard mode - * - * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA, - * and free the buffers. - */ - -int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c) -{ - unsigned long dflags, cflags; - u8 chk; - - spin_lock_irqsave(c->lock, cflags); - - c->irqs = &z8530_nop; - c->max = 0; - c->sync = 0; - - /* Disable the PC DMA channels - */ - - dflags = claim_dma_lock(); - - disable_dma(c->txdma); - clear_dma_ff(c->txdma); - c->txdma_on = 0; - c->tx_dma_used = 0; - - release_dma_lock(dflags); - - /* Disable DMA control mode - */ - - c->regs[R1] &= ~WT_RDY_ENAB; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx); - c->regs[R1] |= INT_ALL_Rx; - write_zsreg(c, R1, c->regs[R1]); - c->regs[R14] &= ~DTRREQ; - write_zsreg(c, R14, c->regs[R14]); - - if (c->tx_dma_buf[0]) { - free_page((unsigned long)c->tx_dma_buf[0]); - c->tx_dma_buf[0] = NULL; - } - chk = read_zsreg(c, R0); - write_zsreg(c, R3, c->regs[R3]); - z8530_rtsdtr(c, 0); - - spin_unlock_irqrestore(c->lock, cflags); - return 0; -} -EXPORT_SYMBOL(z8530_sync_txdma_close); - -/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny - * it exists... - */ -static const char * const z8530_type_name[] = { - "Z8530", - "Z85C30", - "Z85230" -}; - -/** - * z8530_describe - Uniformly describe a Z8530 port - * @dev: Z8530 device to describe - * @mapping: string holding mapping type (eg "I/O" or "Mem") - * @io: the port value in question - * - * Describe a Z8530 in a standard format. We must pass the I/O as - * the port offset isn't predictable. The main reason for this function - * is to try and get a common format of report. - */ - -void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io) -{ - pr_info("%s: %s found at %s 0x%lX, IRQ %d\n", - dev->name, - z8530_type_name[dev->type], - mapping, - Z8530_PORT_OF(io), - dev->irq); -} -EXPORT_SYMBOL(z8530_describe); - -/* Locked operation part of the z8530 init code - */ -static inline int do_z8530_init(struct z8530_dev *dev) -{ - /* NOP the interrupt handlers first - we might get a - * floating IRQ transition when we reset the chip - */ - dev->chanA.irqs = &z8530_nop; - dev->chanB.irqs = &z8530_nop; - dev->chanA.dcdcheck = DCD; - dev->chanB.dcdcheck = DCD; - - /* Reset the chip */ - write_zsreg(&dev->chanA, R9, 0xC0); - udelay(200); - /* Now check its valid */ - write_zsreg(&dev->chanA, R12, 0xAA); - if (read_zsreg(&dev->chanA, R12) != 0xAA) - return -ENODEV; - write_zsreg(&dev->chanA, R12, 0x55); - if (read_zsreg(&dev->chanA, R12) != 0x55) - return -ENODEV; - - dev->type = Z8530; - - /* See the application note. - */ - - write_zsreg(&dev->chanA, R15, 0x01); - - /* If we can set the low bit of R15 then - * the chip is enhanced. - */ - - if (read_zsreg(&dev->chanA, R15) == 0x01) { - /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */ - /* Put a char in the fifo */ - write_zsreg(&dev->chanA, R8, 0); - if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP) - dev->type = Z85230; /* Has a FIFO */ - else - dev->type = Z85C30; /* Z85C30, 1 byte FIFO */ - } - - /* The code assumes R7' and friends are - * off. Use write_zsext() for these and keep - * this bit clear. - */ - - write_zsreg(&dev->chanA, R15, 0); - - /* At this point it looks like the chip is behaving - */ - - memcpy(dev->chanA.regs, reg_init, 16); - memcpy(dev->chanB.regs, reg_init, 16); - - return 0; -} - -/** - * z8530_init - Initialise a Z8530 device - * @dev: Z8530 device to initialise. - * - * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device - * is present, identify the type and then program it to hopefully - * keep quite and behave. This matters a lot, a Z8530 in the wrong - * state will sometimes get into stupid modes generating 10Khz - * interrupt streams and the like. - * - * We set the interrupt handler up to discard any events, in case - * we get them during reset or setp. - * - * Return 0 for success, or a negative value indicating the problem - * in errno form. - */ - -int z8530_init(struct z8530_dev *dev) -{ - unsigned long flags; - int ret; - - /* Set up the chip level lock */ - spin_lock_init(&dev->lock); - dev->chanA.lock = &dev->lock; - dev->chanB.lock = &dev->lock; - - spin_lock_irqsave(&dev->lock, flags); - ret = do_z8530_init(dev); - spin_unlock_irqrestore(&dev->lock, flags); - - return ret; -} -EXPORT_SYMBOL(z8530_init); - -/** - * z8530_shutdown - Shutdown a Z8530 device - * @dev: The Z8530 chip to shutdown - * - * We set the interrupt handlers to silence any interrupts. We then - * reset the chip and wait 100uS to be sure the reset completed. Just - * in case the caller then tries to do stuff. - * - * This is called without the lock held - */ -int z8530_shutdown(struct z8530_dev *dev) -{ - unsigned long flags; - /* Reset the chip */ - - spin_lock_irqsave(&dev->lock, flags); - dev->chanA.irqs = &z8530_nop; - dev->chanB.irqs = &z8530_nop; - write_zsreg(&dev->chanA, R9, 0xC0); - /* We must lock the udelay, the chip is offlimits here */ - udelay(100); - spin_unlock_irqrestore(&dev->lock, flags); - return 0; -} -EXPORT_SYMBOL(z8530_shutdown); - -/** - * z8530_channel_load - Load channel data - * @c: Z8530 channel to configure - * @rtable: table of register, value pairs - * FIXME: ioctl to allow user uploaded tables - * - * Load a Z8530 channel up from the system data. We use +16 to - * indicate the "prime" registers. The value 255 terminates the - * table. - */ - -int z8530_channel_load(struct z8530_channel *c, u8 *rtable) -{ - unsigned long flags; - - spin_lock_irqsave(c->lock, flags); - - while (*rtable != 255) { - int reg = *rtable++; - - if (reg > 0x0F) - write_zsreg(c, R15, c->regs[15] | 1); - write_zsreg(c, reg & 0x0F, *rtable); - if (reg > 0x0F) - write_zsreg(c, R15, c->regs[15] & ~1); - c->regs[reg] = *rtable++; - } - c->rx_function = z8530_null_rx; - c->skb = NULL; - c->tx_skb = NULL; - c->tx_next_skb = NULL; - c->mtu = 1500; - c->max = 0; - c->count = 0; - c->status = read_zsreg(c, R0); - c->sync = 1; - write_zsreg(c, R3, c->regs[R3] | RxENABLE); - - spin_unlock_irqrestore(c->lock, flags); - return 0; -} -EXPORT_SYMBOL(z8530_channel_load); - -/** - * z8530_tx_begin - Begin packet transmission - * @c: The Z8530 channel to kick - * - * This is the speed sensitive side of transmission. If we are called - * and no buffer is being transmitted we commence the next buffer. If - * nothing is queued we idle the sync. - * - * Note: We are handling this code path in the interrupt path, keep it - * fast or bad things will happen. - * - * Called with the lock held. - */ - -static void z8530_tx_begin(struct z8530_channel *c) -{ - unsigned long flags; - - if (c->tx_skb) - return; - - c->tx_skb = c->tx_next_skb; - c->tx_next_skb = NULL; - c->tx_ptr = c->tx_next_ptr; - - if (!c->tx_skb) { - /* Idle on */ - if (c->dma_tx) { - flags = claim_dma_lock(); - disable_dma(c->txdma); - /* Check if we crapped out. - */ - if (get_dma_residue(c->txdma)) { - c->netdevice->stats.tx_dropped++; - c->netdevice->stats.tx_fifo_errors++; - } - release_dma_lock(flags); - } - c->txcount = 0; - } else { - c->txcount = c->tx_skb->len; - - if (c->dma_tx) { - /* FIXME. DMA is broken for the original 8530, - * on the older parts we need to set a flag and - * wait for a further TX interrupt to fire this - * stage off - */ - - flags = claim_dma_lock(); - disable_dma(c->txdma); - - /* These two are needed by the 8530/85C30 - * and must be issued when idling. - */ - if (c->dev->type != Z85230) { - write_zsctrl(c, RES_Tx_CRC); - write_zsctrl(c, RES_EOM_L); - } - write_zsreg(c, R10, c->regs[10] & ~ABUNDER); - clear_dma_ff(c->txdma); - set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); - set_dma_count(c->txdma, c->txcount); - enable_dma(c->txdma); - release_dma_lock(flags); - write_zsctrl(c, RES_EOM_L); - write_zsreg(c, R5, c->regs[R5] | TxENAB); - } else { - /* ABUNDER off */ - write_zsreg(c, R10, c->regs[10]); - write_zsctrl(c, RES_Tx_CRC); - - while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) { - write_zsreg(c, R8, *c->tx_ptr++); - c->txcount--; - } - } - } - /* Since we emptied tx_skb we can ask for more - */ - netif_wake_queue(c->netdevice); -} - -/** - * z8530_tx_done - TX complete callback - * @c: The channel that completed a transmit. - * - * This is called when we complete a packet send. We wake the queue, - * start the next packet going and then free the buffer of the existing - * packet. This code is fairly timing sensitive. - * - * Called with the register lock held. - */ - -static void z8530_tx_done(struct z8530_channel *c) -{ - struct sk_buff *skb; - - /* Actually this can happen.*/ - if (!c->tx_skb) - return; - - skb = c->tx_skb; - c->tx_skb = NULL; - z8530_tx_begin(c); - c->netdevice->stats.tx_packets++; - c->netdevice->stats.tx_bytes += skb->len; - dev_consume_skb_irq(skb); -} - -/** - * z8530_null_rx - Discard a packet - * @c: The channel the packet arrived on - * @skb: The buffer - * - * We point the receive handler at this function when idle. Instead - * of processing the frames we get to throw them away. - */ -void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) -{ - dev_kfree_skb_any(skb); -} -EXPORT_SYMBOL(z8530_null_rx); - -/** - * z8530_rx_done - Receive completion callback - * @c: The channel that completed a receive - * - * A new packet is complete. Our goal here is to get back into receive - * mode as fast as possible. On the Z85230 we could change to using - * ESCC mode, but on the older chips we have no choice. We flip to the - * new buffer immediately in DMA mode so that the DMA of the next - * frame can occur while we are copying the previous buffer to an sk_buff - * - * Called with the lock held - */ -static void z8530_rx_done(struct z8530_channel *c) -{ - struct sk_buff *skb; - int ct; - - /* Is our receive engine in DMA mode - */ - if (c->rxdma_on) { - /* Save the ready state and the buffer currently - * being used as the DMA target - */ - int ready = c->dma_ready; - unsigned char *rxb = c->rx_buf[c->dma_num]; - unsigned long flags; - - /* Complete this DMA. Necessary to find the length - */ - flags = claim_dma_lock(); - - disable_dma(c->rxdma); - clear_dma_ff(c->rxdma); - c->rxdma_on = 0; - ct = c->mtu - get_dma_residue(c->rxdma); - if (ct < 0) - ct = 2; /* Shit happens.. */ - c->dma_ready = 0; - - /* Normal case: the other slot is free, start the next DMA - * into it immediately. - */ - - if (ready) { - c->dma_num ^= 1; - set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10); - set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num])); - set_dma_count(c->rxdma, c->mtu); - c->rxdma_on = 1; - enable_dma(c->rxdma); - /* Stop any frames that we missed the head of - * from passing - */ - write_zsreg(c, R0, RES_Rx_CRC); - } else { - /* Can't occur as we dont reenable the DMA irq until - * after the flip is done - */ - netdev_warn(c->netdevice, "DMA flip overrun!\n"); - } - - release_dma_lock(flags); - - /* Shove the old buffer into an sk_buff. We can't DMA - * directly into one on a PC - it might be above the 16Mb - * boundary. Optimisation - we could check to see if we - * can avoid the copy. Optimisation 2 - make the memcpy - * a copychecksum. - */ - - skb = dev_alloc_skb(ct); - if (!skb) { - c->netdevice->stats.rx_dropped++; - netdev_warn(c->netdevice, "Memory squeeze\n"); - } else { - skb_put(skb, ct); - skb_copy_to_linear_data(skb, rxb, ct); - c->netdevice->stats.rx_packets++; - c->netdevice->stats.rx_bytes += ct; - } - c->dma_ready = 1; - } else { - RT_LOCK; - skb = c->skb; - - /* The game we play for non DMA is similar. We want to - * get the controller set up for the next packet as fast - * as possible. We potentially only have one byte + the - * fifo length for this. Thus we want to flip to the new - * buffer and then mess around copying and allocating - * things. For the current case it doesn't matter but - * if you build a system where the sync irq isn't blocked - * by the kernel IRQ disable then you need only block the - * sync IRQ for the RT_LOCK area. - * - */ - ct = c->count; - - c->skb = c->skb2; - c->count = 0; - c->max = c->mtu; - if (c->skb) { - c->dptr = c->skb->data; - c->max = c->mtu; - } else { - c->count = 0; - c->max = 0; - } - RT_UNLOCK; - - c->skb2 = dev_alloc_skb(c->mtu); - if (c->skb2) - skb_put(c->skb2, c->mtu); - - c->netdevice->stats.rx_packets++; - c->netdevice->stats.rx_bytes += ct; - } - /* If we received a frame we must now process it. - */ - if (skb) { - skb_trim(skb, ct); - c->rx_function(c, skb); - } else { - c->netdevice->stats.rx_dropped++; - netdev_err(c->netdevice, "Lost a frame\n"); - } -} - -/** - * spans_boundary - Check a packet can be ISA DMA'd - * @skb: The buffer to check - * - * Returns true if the buffer cross a DMA boundary on a PC. The poor - * thing can only DMA within a 64K block not across the edges of it. - */ - -static inline int spans_boundary(struct sk_buff *skb) -{ - unsigned long a = (unsigned long)skb->data; - - a ^= (a + skb->len); - if (a & 0x00010000) /* If the 64K bit is different.. */ - return 1; - return 0; -} - -/** - * z8530_queue_xmit - Queue a packet - * @c: The channel to use - * @skb: The packet to kick down the channel - * - * Queue a packet for transmission. Because we have rather - * hard to hit interrupt latencies for the Z85230 per packet - * even in DMA mode we do the flip to DMA buffer if needed here - * not in the IRQ. - * - * Called from the network code. The lock is not held at this - * point. - */ -netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) -{ - unsigned long flags; - - netif_stop_queue(c->netdevice); - if (c->tx_next_skb) - return NETDEV_TX_BUSY; - - /* PC SPECIFIC - DMA limits */ - /* If we will DMA the transmit and its gone over the ISA bus - * limit, then copy to the flip buffer - */ - - if (c->dma_tx && - ((unsigned long)(virt_to_bus(skb->data + skb->len)) >= - 16 * 1024 * 1024 || spans_boundary(skb))) { - /* Send the flip buffer, and flip the flippy bit. - * We don't care which is used when just so long as - * we never use the same buffer twice in a row. Since - * only one buffer can be going out at a time the other - * has to be safe. - */ - c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used]; - c->tx_dma_used ^= 1; /* Flip temp buffer */ - skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len); - } else { - c->tx_next_ptr = skb->data; - } - RT_LOCK; - c->tx_next_skb = skb; - RT_UNLOCK; - - spin_lock_irqsave(c->lock, flags); - z8530_tx_begin(c); - spin_unlock_irqrestore(c->lock, flags); - - return NETDEV_TX_OK; -} -EXPORT_SYMBOL(z8530_queue_xmit); - -/* Module support - */ -static const char banner[] __initconst = - KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n"; - -static int __init z85230_init_driver(void) -{ - printk(banner); - return 0; -} -module_init(z85230_init_driver); - -static void __exit z85230_cleanup_driver(void) -{ -} -module_exit(z85230_cleanup_driver); - -MODULE_AUTHOR("Red Hat Inc."); -MODULE_DESCRIPTION("Z85x30 synchronous driver core"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h deleted file mode 100644 index 462cb620bc5d..000000000000 --- a/drivers/net/wan/z85230.h +++ /dev/null @@ -1,407 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Description of Z8530 Z85C30 and Z85230 communications chips - * - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1998 Alan Cox - */ - -#ifndef _Z8530_H -#define _Z8530_H - -#include -#include - -/* Conversion routines to/from brg time constants from/to bits - * per second. - */ -#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2)) -#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2) - -/* The Zilog register set */ - -#define FLAG 0x7e - -/* Write Register 0 */ -#define R0 0 /* Register selects */ -#define R1 1 -#define R2 2 -#define R3 3 -#define R4 4 -#define R5 5 -#define R6 6 -#define R7 7 -#define R8 8 -#define R9 9 -#define R10 10 -#define R11 11 -#define R12 12 -#define R13 13 -#define R14 14 -#define R15 15 - -#define RPRIME 16 /* Indicate a prime register access on 230 */ - -#define NULLCODE 0 /* Null Code */ -#define POINT_HIGH 0x8 /* Select upper half of registers */ -#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */ -#define SEND_ABORT 0x18 /* HDLC Abort */ -#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */ -#define RES_Tx_P 0x28 /* Reset TxINT Pending */ -#define ERR_RES 0x30 /* Error Reset */ -#define RES_H_IUS 0x38 /* Reset highest IUS */ - -#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */ -#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */ -#define RES_EOM_L 0xC0 /* Reset EOM latch */ - -/* Write Register 1 */ - -#define EXT_INT_ENAB 0x1 /* Ext Int Enable */ -#define TxINT_ENAB 0x2 /* Tx Int Enable */ -#define PAR_SPEC 0x4 /* Parity is special condition */ - -#define RxINT_DISAB 0 /* Rx Int Disable */ -#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */ -#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */ -#define INT_ERR_Rx 0x18 /* Int on error only */ - -#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */ -#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */ -#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */ - -/* Write Register #2 (Interrupt Vector) */ - -/* Write Register 3 */ - -#define RxENABLE 0x1 /* Rx Enable */ -#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */ -#define ADD_SM 0x4 /* Address Search Mode (SDLC) */ -#define RxCRC_ENAB 0x8 /* Rx CRC Enable */ -#define ENT_HM 0x10 /* Enter Hunt Mode */ -#define AUTO_ENAB 0x20 /* Auto Enables */ -#define Rx5 0x0 /* Rx 5 Bits/Character */ -#define Rx7 0x40 /* Rx 7 Bits/Character */ -#define Rx6 0x80 /* Rx 6 Bits/Character */ -#define Rx8 0xc0 /* Rx 8 Bits/Character */ - -/* Write Register 4 */ - -#define PAR_ENA 0x1 /* Parity Enable */ -#define PAR_EVEN 0x2 /* Parity Even/Odd* */ - -#define SYNC_ENAB 0 /* Sync Modes Enable */ -#define SB1 0x4 /* 1 stop bit/char */ -#define SB15 0x8 /* 1.5 stop bits/char */ -#define SB2 0xc /* 2 stop bits/char */ - -#define MONSYNC 0 /* 8 Bit Sync character */ -#define BISYNC 0x10 /* 16 bit sync character */ -#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */ -#define EXTSYNC 0x30 /* External Sync Mode */ - -#define X1CLK 0x0 /* x1 clock mode */ -#define X16CLK 0x40 /* x16 clock mode */ -#define X32CLK 0x80 /* x32 clock mode */ -#define X64CLK 0xC0 /* x64 clock mode */ - -/* Write Register 5 */ - -#define TxCRC_ENAB 0x1 /* Tx CRC Enable */ -#define RTS 0x2 /* RTS */ -#define SDLC_CRC 0x4 /* SDLC/CRC-16 */ -#define TxENAB 0x8 /* Tx Enable */ -#define SND_BRK 0x10 /* Send Break */ -#define Tx5 0x0 /* Tx 5 bits (or less)/character */ -#define Tx7 0x20 /* Tx 7 bits/character */ -#define Tx6 0x40 /* Tx 6 bits/character */ -#define Tx8 0x60 /* Tx 8 bits/character */ -#define DTR 0x80 /* DTR */ - -/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */ - -/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ - -/* Write Register 8 (transmit buffer) */ - -/* Write Register 9 (Master interrupt control) */ -#define VIS 1 /* Vector Includes Status */ -#define NV 2 /* No Vector */ -#define DLC 4 /* Disable Lower Chain */ -#define MIE 8 /* Master Interrupt Enable */ -#define STATHI 0x10 /* Status high */ -#define NORESET 0 /* No reset on write to R9 */ -#define CHRB 0x40 /* Reset channel B */ -#define CHRA 0x80 /* Reset channel A */ -#define FHWRES 0xc0 /* Force hardware reset */ - -/* Write Register 10 (misc control bits) */ -#define BIT6 1 /* 6 bit/8bit sync */ -#define LOOPMODE 2 /* SDLC Loop mode */ -#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */ -#define MARKIDLE 8 /* Mark/flag on idle */ -#define GAOP 0x10 /* Go active on poll */ -#define NRZ 0 /* NRZ mode */ -#define NRZI 0x20 /* NRZI mode */ -#define FM1 0x40 /* FM1 (transition = 1) */ -#define FM0 0x60 /* FM0 (transition = 0) */ -#define CRCPS 0x80 /* CRC Preset I/O */ - -/* Write Register 11 (Clock Mode control) */ -#define TRxCXT 0 /* TRxC = Xtal output */ -#define TRxCTC 1 /* TRxC = Transmit clock */ -#define TRxCBR 2 /* TRxC = BR Generator Output */ -#define TRxCDP 3 /* TRxC = DPLL output */ -#define TRxCOI 4 /* TRxC O/I */ -#define TCRTxCP 0 /* Transmit clock = RTxC pin */ -#define TCTRxCP 8 /* Transmit clock = TRxC pin */ -#define TCBR 0x10 /* Transmit clock = BR Generator output */ -#define TCDPLL 0x18 /* Transmit clock = DPLL output */ -#define RCRTxCP 0 /* Receive clock = RTxC pin */ -#define RCTRxCP 0x20 /* Receive clock = TRxC pin */ -#define RCBR 0x40 /* Receive clock = BR Generator output */ -#define RCDPLL 0x60 /* Receive clock = DPLL output */ -#define RTxCX 0x80 /* RTxC Xtal/No Xtal */ - -/* Write Register 12 (lower byte of baud rate generator time constant) */ - -/* Write Register 13 (upper byte of baud rate generator time constant) */ - -/* Write Register 14 (Misc control bits) */ -#define BRENABL 1 /* Baud rate generator enable */ -#define BRSRC 2 /* Baud rate generator source */ -#define DTRREQ 4 /* DTR/Request function */ -#define AUTOECHO 8 /* Auto Echo */ -#define LOOPBAK 0x10 /* Local loopback */ -#define SEARCH 0x20 /* Enter search mode */ -#define RMC 0x40 /* Reset missing clock */ -#define DISDPLL 0x60 /* Disable DPLL */ -#define SSBR 0x80 /* Set DPLL source = BR generator */ -#define SSRTxC 0xa0 /* Set DPLL source = RTxC */ -#define SFMM 0xc0 /* Set FM mode */ -#define SNRZI 0xe0 /* Set NRZI mode */ - -/* Write Register 15 (external/status interrupt control) */ -#define PRIME 1 /* R5' etc register access (Z85C30/230 only) */ -#define ZCIE 2 /* Zero count IE */ -#define FIFOE 4 /* Z85230 only */ -#define DCDIE 8 /* DCD IE */ -#define SYNCIE 0x10 /* Sync/hunt IE */ -#define CTSIE 0x20 /* CTS IE */ -#define TxUIE 0x40 /* Tx Underrun/EOM IE */ -#define BRKIE 0x80 /* Break/Abort IE */ - - -/* Read Register 0 */ -#define Rx_CH_AV 0x1 /* Rx Character Available */ -#define ZCOUNT 0x2 /* Zero count */ -#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */ -#define DCD 0x8 /* DCD */ -#define SYNC_HUNT 0x10 /* Sync/hunt */ -#define CTS 0x20 /* CTS */ -#define TxEOM 0x40 /* Tx underrun */ -#define BRK_ABRT 0x80 /* Break/Abort */ - -/* Read Register 1 */ -#define ALL_SNT 0x1 /* All sent */ -/* Residue Data for 8 Rx bits/char programmed */ -#define RES3 0x8 /* 0/3 */ -#define RES4 0x4 /* 0/4 */ -#define RES5 0xc /* 0/5 */ -#define RES6 0x2 /* 0/6 */ -#define RES7 0xa /* 0/7 */ -#define RES8 0x6 /* 0/8 */ -#define RES18 0xe /* 1/8 */ -#define RES28 0x0 /* 2/8 */ -/* Special Rx Condition Interrupts */ -#define PAR_ERR 0x10 /* Parity error */ -#define Rx_OVR 0x20 /* Rx Overrun Error */ -#define CRC_ERR 0x40 /* CRC/Framing Error */ -#define END_FR 0x80 /* End of Frame (SDLC) */ - -/* Read Register 2 (channel b only) - Interrupt vector */ - -/* Read Register 3 (interrupt pending register) ch a only */ -#define CHBEXT 0x1 /* Channel B Ext/Stat IP */ -#define CHBTxIP 0x2 /* Channel B Tx IP */ -#define CHBRxIP 0x4 /* Channel B Rx IP */ -#define CHAEXT 0x8 /* Channel A Ext/Stat IP */ -#define CHATxIP 0x10 /* Channel A Tx IP */ -#define CHARxIP 0x20 /* Channel A Rx IP */ - -/* Read Register 8 (receive data register) */ - -/* Read Register 10 (misc status bits) */ -#define ONLOOP 2 /* On loop */ -#define LOOPSEND 0x10 /* Loop sending */ -#define CLK2MIS 0x40 /* Two clocks missing */ -#define CLK1MIS 0x80 /* One clock missing */ - -/* Read Register 12 (lower byte of baud rate generator constant) */ - -/* Read Register 13 (upper byte of baud rate generator constant) */ - -/* Read Register 15 (value of WR 15) */ - - -/* - * Interrupt handling functions for this SCC - */ - -struct z8530_channel; - -struct z8530_irqhandler -{ - void (*rx)(struct z8530_channel *); - void (*tx)(struct z8530_channel *); - void (*status)(struct z8530_channel *); -}; - -/* - * A channel of the Z8530 - */ - -struct z8530_channel -{ - struct z8530_irqhandler *irqs; /* IRQ handlers */ - /* - * Synchronous - */ - u16 count; /* Buyes received */ - u16 max; /* Most we can receive this frame */ - u16 mtu; /* MTU of the device */ - u8 *dptr; /* Pointer into rx buffer */ - struct sk_buff *skb; /* Buffer dptr points into */ - struct sk_buff *skb2; /* Pending buffer */ - u8 status; /* Current DCD */ - u8 dcdcheck; /* which bit to check for line */ - u8 sync; /* Set if in sync mode */ - - u8 regs[32]; /* Register map for the chip */ - u8 pendregs[32]; /* Pending register values */ - - struct sk_buff *tx_skb; /* Buffer being transmitted */ - struct sk_buff *tx_next_skb; /* Next transmit buffer */ - u8 *tx_ptr; /* Byte pointer into the buffer */ - u8 *tx_next_ptr; /* Next pointer to use */ - u8 *tx_dma_buf[2]; /* TX flip buffers for DMA */ - u8 tx_dma_used; /* Flip buffer usage toggler */ - u16 txcount; /* Count of bytes to transmit */ - - void (*rx_function)(struct z8530_channel *, struct sk_buff *); - - /* - * Sync DMA - */ - - u8 rxdma; /* DMA channels */ - u8 txdma; - u8 rxdma_on; /* DMA active if flag set */ - u8 txdma_on; - u8 dma_num; /* Buffer we are DMAing into */ - u8 dma_ready; /* Is the other buffer free */ - u8 dma_tx; /* TX is to use DMA */ - u8 *rx_buf[2]; /* The flip buffers */ - - /* - * System - */ - - struct z8530_dev *dev; /* Z85230 chip instance we are from */ - unsigned long ctrlio; /* I/O ports */ - unsigned long dataio; - - /* - * For PC we encode this way. - */ -#define Z8530_PORT_SLEEP 0x80000000 -#define Z8530_PORT_OF(x) ((x)&0xFFFF) - - u32 rx_overrun; /* Overruns - not done yet */ - u32 rx_crc_err; - - /* - * Bound device pointers - */ - - void *private; /* For our owner */ - struct net_device *netdevice; /* Network layer device */ - - spinlock_t *lock; /* Device lock */ -}; - -/* - * Each Z853x0 device. - */ - -struct z8530_dev -{ - char *name; /* Device instance name */ - struct z8530_channel chanA; /* SCC channel A */ - struct z8530_channel chanB; /* SCC channel B */ - int type; -#define Z8530 0 /* NMOS dinosaur */ -#define Z85C30 1 /* CMOS - better */ -#define Z85230 2 /* CMOS with real FIFO */ - int irq; /* Interrupt for the device */ - int active; /* Soft interrupt enable - the Mac doesn't - always have a hard disable on its 8530s... */ - spinlock_t lock; -}; - - -/* - * Functions - */ - -extern u8 z8530_dead_port[]; -extern u8 z8530_hdlc_kilostream_85230[]; -extern u8 z8530_hdlc_kilostream[]; -irqreturn_t z8530_interrupt(int, void *); -void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io); -int z8530_init(struct z8530_dev *); -int z8530_shutdown(struct z8530_dev *); -int z8530_sync_open(struct net_device *, struct z8530_channel *); -int z8530_sync_close(struct net_device *, struct z8530_channel *); -int z8530_sync_dma_open(struct net_device *, struct z8530_channel *); -int z8530_sync_dma_close(struct net_device *, struct z8530_channel *); -int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *); -int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); -int z8530_channel_load(struct z8530_channel *, u8 *); -netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); -void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); - - -/* - * Standard interrupt vector sets - */ - -extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop; - -/* - * Asynchronous Interfacing - */ - -/* - * The size of the serial xmit buffer is 1 page, or 4096 bytes - */ - -#define SERIAL_XMIT_SIZE 4096 -#define WAKEUP_CHARS 256 - -/* - * Events are used to schedule things to happen at timer-interrupt - * time, instead of at rs interrupt time. - */ -#define RS_EVENT_WRITE_WAKEUP 0 - -/* Internal flags used only by kernel/chr_drv/serial.c */ -#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */ -#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */ -#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */ -#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */ -#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */ -#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */ -#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */ - -#endif /* !(_Z8530_H) */ -- cgit v1.2.3 From 865e2eb08f51820bdefa37a80abe771b5979d440 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 Apr 2022 10:54:36 -0700 Subject: net: hamradio: remove support for DMA SCC devices Another incarnation of Z8530, looks like? Again, no real changes in the git history, and it needs VIRT_TO_BUS. Unlikely to have users, let's spend less time refactoring dead code... Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- MAINTAINERS | 1 - drivers/net/hamradio/Kconfig | 34 - drivers/net/hamradio/Makefile | 1 - drivers/net/hamradio/dmascc.c | 1450 ----------------------------------------- 4 files changed, 1486 deletions(-) delete mode 100644 drivers/net/hamradio/dmascc.c (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index 9619058bb0b9..cc5559a7fb5c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8766,7 +8766,6 @@ F: kernel/time/timer_*.c HIGH-SPEED SCC DRIVER FOR AX.25 L: linux-hams@vger.kernel.org S: Orphan -F: drivers/net/hamradio/dmascc.c F: drivers/net/hamradio/scc.c HIGHPOINT ROCKETRAID 3xxx RAID DRIVER diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index 441da03c23ee..a9c44f08199d 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -45,40 +45,6 @@ config BPQETHER useful if some other computer on your local network has a direct amateur radio connection. -config DMASCC - tristate "High-speed (DMA) SCC driver for AX.25" - depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API - depends on VIRT_TO_BUS - help - This is a driver for high-speed SCC boards, i.e. those supporting - DMA on one port. You usually use those boards to connect your - computer to an amateur radio modem (such as the WA4DSY 56kbps - modem), in order to send and receive AX.25 packet radio network - traffic. - - Currently, this driver supports Ottawa PI/PI2, Paccomm/Gracilis - PackeTwin, and S5SCC/DMA boards. They are detected automatically. - If you have one of these cards, say Y here and read the AX25-HOWTO, - available from . - - This driver can operate multiple boards simultaneously. If you - compile it as a module (by saying M instead of Y), it will be called - dmascc. If you don't pass any parameter to the driver, all - possible I/O addresses are probed. This could irritate other devices - that are currently not in use. You may specify the list of addresses - to be probed by "dmascc.io=addr1,addr2,..." (when compiled into the - kernel image) or "io=addr1,addr2,..." (when loaded as a module). The - network interfaces will be called dmascc0 and dmascc1 for the board - detected first, dmascc2 and dmascc3 for the second one, and so on. - - Before you configure each interface with ifconfig, you MUST set - certain parameters, such as channel access timing, clock mode, and - DMA channel. This is accomplished with a small utility program, - dmascc_cfg, available at - . Please be sure to - get at least version 1.27 of dmascc_cfg, as older versions will not - work with the current driver. - config SCC tristate "Z8530 SCC driver" depends on ISA && AX25 && ISA_DMA_API diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile index 7a1518d763e3..25fc400369ba 100644 --- a/drivers/net/hamradio/Makefile +++ b/drivers/net/hamradio/Makefile @@ -11,7 +11,6 @@ # Christoph Hellwig # -obj-$(CONFIG_DMASCC) += dmascc.o obj-$(CONFIG_SCC) += scc.o obj-$(CONFIG_MKISS) += mkiss.o obj-$(CONFIG_6PACK) += 6pack.o diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c deleted file mode 100644 index a2a12208e3ad..000000000000 --- a/drivers/net/hamradio/dmascc.c +++ /dev/null @@ -1,1450 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Driver for high-speed SCC boards (those with DMA support) - * Copyright (C) 1997-2000 Klaus Kudielka - * - * S5SCC/DMA support by Janko Koleznik S52HI - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "z8530.h" - - -/* Number of buffers per channel */ - -#define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */ -#define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */ -#define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */ - - -/* Cards supported */ - -#define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \ - 0, 8, 1843200, 3686400 } -#define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \ - 0, 8, 3686400, 7372800 } -#define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \ - 0, 4, 6144000, 6144000 } -#define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \ - 0, 8, 4915200, 9830400 } - -#define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 } - -#define TMR_0_HZ 25600 /* Frequency of timer 0 */ - -#define TYPE_PI 0 -#define TYPE_PI2 1 -#define TYPE_TWIN 2 -#define TYPE_S5 3 -#define NUM_TYPES 4 - -#define MAX_NUM_DEVS 32 - - -/* SCC chips supported */ - -#define Z8530 0 -#define Z85C30 1 -#define Z85230 2 - -#define CHIPNAMES { "Z8530", "Z85C30", "Z85230" } - - -/* I/O registers */ - -/* 8530 registers relative to card base */ -#define SCCB_CMD 0x00 -#define SCCB_DATA 0x01 -#define SCCA_CMD 0x02 -#define SCCA_DATA 0x03 - -/* 8253/8254 registers relative to card base */ -#define TMR_CNT0 0x00 -#define TMR_CNT1 0x01 -#define TMR_CNT2 0x02 -#define TMR_CTRL 0x03 - -/* Additional PI/PI2 registers relative to card base */ -#define PI_DREQ_MASK 0x04 - -/* Additional PackeTwin registers relative to card base */ -#define TWIN_INT_REG 0x08 -#define TWIN_CLR_TMR1 0x09 -#define TWIN_CLR_TMR2 0x0a -#define TWIN_SPARE_1 0x0b -#define TWIN_DMA_CFG 0x08 -#define TWIN_SERIAL_CFG 0x09 -#define TWIN_DMA_CLR_FF 0x0a -#define TWIN_SPARE_2 0x0b - - -/* PackeTwin I/O register values */ - -/* INT_REG */ -#define TWIN_SCC_MSK 0x01 -#define TWIN_TMR1_MSK 0x02 -#define TWIN_TMR2_MSK 0x04 -#define TWIN_INT_MSK 0x07 - -/* SERIAL_CFG */ -#define TWIN_DTRA_ON 0x01 -#define TWIN_DTRB_ON 0x02 -#define TWIN_EXTCLKA 0x04 -#define TWIN_EXTCLKB 0x08 -#define TWIN_LOOPA_ON 0x10 -#define TWIN_LOOPB_ON 0x20 -#define TWIN_EI 0x80 - -/* DMA_CFG */ -#define TWIN_DMA_HDX_T1 0x08 -#define TWIN_DMA_HDX_R1 0x0a -#define TWIN_DMA_HDX_T3 0x14 -#define TWIN_DMA_HDX_R3 0x16 -#define TWIN_DMA_FDX_T3R1 0x1b -#define TWIN_DMA_FDX_T1R3 0x1d - - -/* Status values */ - -#define IDLE 0 -#define TX_HEAD 1 -#define TX_DATA 2 -#define TX_PAUSE 3 -#define TX_TAIL 4 -#define RTS_OFF 5 -#define WAIT 6 -#define DCD_ON 7 -#define RX_ON 8 -#define DCD_OFF 9 - - -/* Ioctls */ - -#define SIOCGSCCPARAM SIOCDEVPRIVATE -#define SIOCSSCCPARAM (SIOCDEVPRIVATE+1) - - -/* Data types */ - -struct scc_param { - int pclk_hz; /* frequency of BRG input (don't change) */ - int brg_tc; /* BRG terminal count; BRG disabled if < 0 */ - int nrzi; /* 0 (nrz), 1 (nrzi) */ - int clocks; /* see dmascc_cfg documentation */ - int txdelay; /* [1/TMR_0_HZ] */ - int txtimeout; /* [1/HZ] */ - int txtail; /* [1/TMR_0_HZ] */ - int waittime; /* [1/TMR_0_HZ] */ - int slottime; /* [1/TMR_0_HZ] */ - int persist; /* 1 ... 256 */ - int dma; /* -1 (disable), 0, 1, 3 */ - int txpause; /* [1/TMR_0_HZ] */ - int rtsoff; /* [1/TMR_0_HZ] */ - int dcdon; /* [1/TMR_0_HZ] */ - int dcdoff; /* [1/TMR_0_HZ] */ -}; - -struct scc_hardware { - char *name; - int io_region; - int io_delta; - int io_size; - int num_devs; - int scc_offset; - int tmr_offset; - int tmr_hz; - int pclk_hz; -}; - -struct scc_priv { - int type; - int chip; - struct net_device *dev; - struct scc_info *info; - - int channel; - int card_base, scc_cmd, scc_data; - int tmr_cnt, tmr_ctrl, tmr_mode; - struct scc_param param; - char rx_buf[NUM_RX_BUF][BUF_SIZE]; - int rx_len[NUM_RX_BUF]; - int rx_ptr; - struct work_struct rx_work; - int rx_head, rx_tail, rx_count; - int rx_over; - char tx_buf[NUM_TX_BUF][BUF_SIZE]; - int tx_len[NUM_TX_BUF]; - int tx_ptr; - int tx_head, tx_tail, tx_count; - int state; - unsigned long tx_start; - int rr0; - spinlock_t *register_lock; /* Per scc_info */ - spinlock_t ring_lock; -}; - -struct scc_info { - int irq_used; - int twin_serial_cfg; - struct net_device *dev[2]; - struct scc_priv priv[2]; - struct scc_info *next; - spinlock_t register_lock; /* Per device register lock */ -}; - - -/* Function declarations */ -static int setup_adapter(int card_base, int type, int n) __init; - -static void write_scc(struct scc_priv *priv, int reg, int val); -static void write_scc_data(struct scc_priv *priv, int val, int fast); -static int read_scc(struct scc_priv *priv, int reg); -static int read_scc_data(struct scc_priv *priv); - -static int scc_open(struct net_device *dev); -static int scc_close(struct net_device *dev); -static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd); -static int scc_send_packet(struct sk_buff *skb, struct net_device *dev); -static int scc_set_mac_address(struct net_device *dev, void *sa); - -static inline void tx_on(struct scc_priv *priv); -static inline void rx_on(struct scc_priv *priv); -static inline void rx_off(struct scc_priv *priv); -static void start_timer(struct scc_priv *priv, int t, int r15); -static inline unsigned char random(void); - -static inline void z8530_isr(struct scc_info *info); -static irqreturn_t scc_isr(int irq, void *dev_id); -static void rx_isr(struct scc_priv *priv); -static void special_condition(struct scc_priv *priv, int rc); -static void rx_bh(struct work_struct *); -static void tx_isr(struct scc_priv *priv); -static void es_isr(struct scc_priv *priv); -static void tm_isr(struct scc_priv *priv); - - -/* Initialization variables */ - -static int io[MAX_NUM_DEVS] __initdata = { 0, }; - -/* Beware! hw[] is also used in dmascc_exit(). */ -static struct scc_hardware hw[NUM_TYPES] = HARDWARE; - - -/* Global variables */ - -static struct scc_info *first; -static unsigned long rand; - - -MODULE_AUTHOR("Klaus Kudielka"); -MODULE_DESCRIPTION("Driver for high-speed SCC boards"); -module_param_hw_array(io, int, ioport, NULL, 0); -MODULE_LICENSE("GPL"); - -static void __exit dmascc_exit(void) -{ - int i; - struct scc_info *info; - - while (first) { - info = first; - - /* Unregister devices */ - for (i = 0; i < 2; i++) - unregister_netdev(info->dev[i]); - - /* Reset board */ - if (info->priv[0].type == TYPE_TWIN) - outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG); - write_scc(&info->priv[0], R9, FHWRES); - release_region(info->dev[0]->base_addr, - hw[info->priv[0].type].io_size); - - for (i = 0; i < 2; i++) - free_netdev(info->dev[i]); - - /* Free memory */ - first = info->next; - kfree(info); - } -} - -static int __init dmascc_init(void) -{ - int h, i, j, n; - int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS], - t1[MAX_NUM_DEVS]; - unsigned t_val; - unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS], - counting[MAX_NUM_DEVS]; - - /* Initialize random number generator */ - rand = jiffies; - /* Cards found = 0 */ - n = 0; - /* Warning message */ - if (!io[0]) - printk(KERN_INFO "dmascc: autoprobing (dangerous)\n"); - - /* Run autodetection for each card type */ - for (h = 0; h < NUM_TYPES; h++) { - - if (io[0]) { - /* User-specified I/O address regions */ - for (i = 0; i < hw[h].num_devs; i++) - base[i] = 0; - for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) { - j = (io[i] - - hw[h].io_region) / hw[h].io_delta; - if (j >= 0 && j < hw[h].num_devs && - hw[h].io_region + - j * hw[h].io_delta == io[i]) { - base[j] = io[i]; - } - } - } else { - /* Default I/O address regions */ - for (i = 0; i < hw[h].num_devs; i++) { - base[i] = - hw[h].io_region + i * hw[h].io_delta; - } - } - - /* Check valid I/O address regions */ - for (i = 0; i < hw[h].num_devs; i++) - if (base[i]) { - if (!request_region - (base[i], hw[h].io_size, "dmascc")) - base[i] = 0; - else { - tcmd[i] = - base[i] + hw[h].tmr_offset + - TMR_CTRL; - t0[i] = - base[i] + hw[h].tmr_offset + - TMR_CNT0; - t1[i] = - base[i] + hw[h].tmr_offset + - TMR_CNT1; - } - } - - /* Start timers */ - for (i = 0; i < hw[h].num_devs; i++) - if (base[i]) { - /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */ - outb(0x36, tcmd[i]); - outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF, - t0[i]); - outb((hw[h].tmr_hz / TMR_0_HZ) >> 8, - t0[i]); - /* Timer 1: LSB+MSB, Mode 0, HZ/10 */ - outb(0x70, tcmd[i]); - outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]); - outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]); - start[i] = jiffies; - delay[i] = 0; - counting[i] = 1; - /* Timer 2: LSB+MSB, Mode 0 */ - outb(0xb0, tcmd[i]); - } - time = jiffies; - /* Wait until counter registers are loaded */ - udelay(2000000 / TMR_0_HZ); - - /* Timing loop */ - while (time_is_after_jiffies(time + 13)) { - for (i = 0; i < hw[h].num_devs; i++) - if (base[i] && counting[i]) { - /* Read back Timer 1: latch; read LSB; read MSB */ - outb(0x40, tcmd[i]); - t_val = - inb(t1[i]) + (inb(t1[i]) << 8); - /* Also check whether counter did wrap */ - if (t_val == 0 || - t_val > TMR_0_HZ / HZ * 10) - counting[i] = 0; - delay[i] = jiffies - start[i]; - } - } - - /* Evaluate measurements */ - for (i = 0; i < hw[h].num_devs; i++) - if (base[i]) { - if ((delay[i] >= 9 && delay[i] <= 11) && - /* Ok, we have found an adapter */ - (setup_adapter(base[i], h, n) == 0)) - n++; - else - release_region(base[i], - hw[h].io_size); - } - - } /* NUM_TYPES */ - - /* If any adapter was successfully initialized, return ok */ - if (n) - return 0; - - /* If no adapter found, return error */ - printk(KERN_INFO "dmascc: no adapters found\n"); - return -EIO; -} - -module_init(dmascc_init); -module_exit(dmascc_exit); - -static void __init dev_setup(struct net_device *dev) -{ - dev->type = ARPHRD_AX25; - dev->hard_header_len = AX25_MAX_HEADER_LEN; - dev->mtu = 1500; - dev->addr_len = AX25_ADDR_LEN; - dev->tx_queue_len = 64; - memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); - dev_addr_set(dev, (u8 *)&ax25_defaddr); -} - -static const struct net_device_ops scc_netdev_ops = { - .ndo_open = scc_open, - .ndo_stop = scc_close, - .ndo_start_xmit = scc_send_packet, - .ndo_siocdevprivate = scc_siocdevprivate, - .ndo_set_mac_address = scc_set_mac_address, -}; - -static int __init setup_adapter(int card_base, int type, int n) -{ - int i, irq, chip, err; - struct scc_info *info; - struct net_device *dev; - struct scc_priv *priv; - unsigned long time; - unsigned int irqs; - int tmr_base = card_base + hw[type].tmr_offset; - int scc_base = card_base + hw[type].scc_offset; - char *chipnames[] = CHIPNAMES; - - /* Initialize what is necessary for write_scc and write_scc_data */ - info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); - if (!info) { - err = -ENOMEM; - goto out; - } - - info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup); - if (!info->dev[0]) { - printk(KERN_ERR "dmascc: " - "could not allocate memory for %s at %#3x\n", - hw[type].name, card_base); - err = -ENOMEM; - goto out1; - } - - info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup); - if (!info->dev[1]) { - printk(KERN_ERR "dmascc: " - "could not allocate memory for %s at %#3x\n", - hw[type].name, card_base); - err = -ENOMEM; - goto out2; - } - spin_lock_init(&info->register_lock); - - priv = &info->priv[0]; - priv->type = type; - priv->card_base = card_base; - priv->scc_cmd = scc_base + SCCA_CMD; - priv->scc_data = scc_base + SCCA_DATA; - priv->register_lock = &info->register_lock; - - /* Reset SCC */ - write_scc(priv, R9, FHWRES | MIE | NV); - - /* Determine type of chip by enabling SDLC/HDLC enhancements */ - write_scc(priv, R15, SHDLCE); - if (!read_scc(priv, R15)) { - /* WR7' not present. This is an ordinary Z8530 SCC. */ - chip = Z8530; - } else { - /* Put one character in TX FIFO */ - write_scc_data(priv, 0, 0); - if (read_scc(priv, R0) & Tx_BUF_EMP) { - /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */ - chip = Z85230; - } else { - /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */ - chip = Z85C30; - } - } - write_scc(priv, R15, 0); - - /* Start IRQ auto-detection */ - irqs = probe_irq_on(); - - /* Enable interrupts */ - if (type == TYPE_TWIN) { - outb(0, card_base + TWIN_DMA_CFG); - inb(card_base + TWIN_CLR_TMR1); - inb(card_base + TWIN_CLR_TMR2); - info->twin_serial_cfg = TWIN_EI; - outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG); - } else { - write_scc(priv, R15, CTSIE); - write_scc(priv, R0, RES_EXT_INT); - write_scc(priv, R1, EXT_INT_ENAB); - } - - /* Start timer */ - outb(1, tmr_base + TMR_CNT1); - outb(0, tmr_base + TMR_CNT1); - - /* Wait and detect IRQ */ - time = jiffies; - while (time_is_after_jiffies(time + 2 + HZ / TMR_0_HZ)); - irq = probe_irq_off(irqs); - - /* Clear pending interrupt, disable interrupts */ - if (type == TYPE_TWIN) { - inb(card_base + TWIN_CLR_TMR1); - } else { - write_scc(priv, R1, 0); - write_scc(priv, R15, 0); - write_scc(priv, R0, RES_EXT_INT); - } - - if (irq <= 0) { - printk(KERN_ERR - "dmascc: could not find irq of %s at %#3x (irq=%d)\n", - hw[type].name, card_base, irq); - err = -ENODEV; - goto out3; - } - - /* Set up data structures */ - for (i = 0; i < 2; i++) { - dev = info->dev[i]; - priv = &info->priv[i]; - priv->type = type; - priv->chip = chip; - priv->dev = dev; - priv->info = info; - priv->channel = i; - spin_lock_init(&priv->ring_lock); - priv->register_lock = &info->register_lock; - priv->card_base = card_base; - priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD); - priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA); - priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1); - priv->tmr_ctrl = tmr_base + TMR_CTRL; - priv->tmr_mode = i ? 0xb0 : 0x70; - priv->param.pclk_hz = hw[type].pclk_hz; - priv->param.brg_tc = -1; - priv->param.clocks = TCTRxCP | RCRTxCP; - priv->param.persist = 256; - priv->param.dma = -1; - INIT_WORK(&priv->rx_work, rx_bh); - dev->ml_priv = priv; - snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i); - dev->base_addr = card_base; - dev->irq = irq; - dev->netdev_ops = &scc_netdev_ops; - dev->header_ops = &ax25_header_ops; - } - if (register_netdev(info->dev[0])) { - printk(KERN_ERR "dmascc: could not register %s\n", - info->dev[0]->name); - err = -ENODEV; - goto out3; - } - if (register_netdev(info->dev[1])) { - printk(KERN_ERR "dmascc: could not register %s\n", - info->dev[1]->name); - err = -ENODEV; - goto out4; - } - - - info->next = first; - first = info; - printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n", - hw[type].name, chipnames[chip], card_base, irq); - return 0; - - out4: - unregister_netdev(info->dev[0]); - out3: - if (info->priv[0].type == TYPE_TWIN) - outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG); - write_scc(&info->priv[0], R9, FHWRES); - free_netdev(info->dev[1]); - out2: - free_netdev(info->dev[0]); - out1: - kfree(info); - out: - return err; -} - - -/* Driver functions */ - -static void write_scc(struct scc_priv *priv, int reg, int val) -{ - unsigned long flags; - switch (priv->type) { - case TYPE_S5: - if (reg) - outb(reg, priv->scc_cmd); - outb(val, priv->scc_cmd); - return; - case TYPE_TWIN: - if (reg) - outb_p(reg, priv->scc_cmd); - outb_p(val, priv->scc_cmd); - return; - default: - spin_lock_irqsave(priv->register_lock, flags); - outb_p(0, priv->card_base + PI_DREQ_MASK); - if (reg) - outb_p(reg, priv->scc_cmd); - outb_p(val, priv->scc_cmd); - outb(1, priv->card_base + PI_DREQ_MASK); - spin_unlock_irqrestore(priv->register_lock, flags); - return; - } -} - - -static void write_scc_data(struct scc_priv *priv, int val, int fast) -{ - unsigned long flags; - switch (priv->type) { - case TYPE_S5: - outb(val, priv->scc_data); - return; - case TYPE_TWIN: - outb_p(val, priv->scc_data); - return; - default: - if (fast) - outb_p(val, priv->scc_data); - else { - spin_lock_irqsave(priv->register_lock, flags); - outb_p(0, priv->card_base + PI_DREQ_MASK); - outb_p(val, priv->scc_data); - outb(1, priv->card_base + PI_DREQ_MASK); - spin_unlock_irqrestore(priv->register_lock, flags); - } - return; - } -} - - -static int read_scc(struct scc_priv *priv, int reg) -{ - int rc; - unsigned long flags; - switch (priv->type) { - case TYPE_S5: - if (reg) - outb(reg, priv->scc_cmd); - return inb(priv->scc_cmd); - case TYPE_TWIN: - if (reg) - outb_p(reg, priv->scc_cmd); - return inb_p(priv->scc_cmd); - default: - spin_lock_irqsave(priv->register_lock, flags); - outb_p(0, priv->card_base + PI_DREQ_MASK); - if (reg) - outb_p(reg, priv->scc_cmd); - rc = inb_p(priv->scc_cmd); - outb(1, priv->card_base + PI_DREQ_MASK); - spin_unlock_irqrestore(priv->register_lock, flags); - return rc; - } -} - - -static int read_scc_data(struct scc_priv *priv) -{ - int rc; - unsigned long flags; - switch (priv->type) { - case TYPE_S5: - return inb(priv->scc_data); - case TYPE_TWIN: - return inb_p(priv->scc_data); - default: - spin_lock_irqsave(priv->register_lock, flags); - outb_p(0, priv->card_base + PI_DREQ_MASK); - rc = inb_p(priv->scc_data); - outb(1, priv->card_base + PI_DREQ_MASK); - spin_unlock_irqrestore(priv->register_lock, flags); - return rc; - } -} - - -static int scc_open(struct net_device *dev) -{ - struct scc_priv *priv = dev->ml_priv; - struct scc_info *info = priv->info; - int card_base = priv->card_base; - - /* Request IRQ if not already used by other channel */ - if (!info->irq_used) { - if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) { - return -EAGAIN; - } - } - info->irq_used++; - - /* Request DMA if required */ - if (priv->param.dma >= 0) { - if (request_dma(priv->param.dma, "dmascc")) { - if (--info->irq_used == 0) - free_irq(dev->irq, info); - return -EAGAIN; - } else { - unsigned long flags = claim_dma_lock(); - clear_dma_ff(priv->param.dma); - release_dma_lock(flags); - } - } - - /* Initialize local variables */ - priv->rx_ptr = 0; - priv->rx_over = 0; - priv->rx_head = priv->rx_tail = priv->rx_count = 0; - priv->state = IDLE; - priv->tx_head = priv->tx_tail = priv->tx_count = 0; - priv->tx_ptr = 0; - - /* Reset channel */ - write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV); - /* X1 clock, SDLC mode */ - write_scc(priv, R4, SDLC | X1CLK); - /* DMA */ - write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); - /* 8 bit RX char, RX disable */ - write_scc(priv, R3, Rx8); - /* 8 bit TX char, TX disable */ - write_scc(priv, R5, Tx8); - /* SDLC address field */ - write_scc(priv, R6, 0); - /* SDLC flag */ - write_scc(priv, R7, FLAG); - switch (priv->chip) { - case Z85C30: - /* Select WR7' */ - write_scc(priv, R15, SHDLCE); - /* Auto EOM reset */ - write_scc(priv, R7, AUTOEOM); - write_scc(priv, R15, 0); - break; - case Z85230: - /* Select WR7' */ - write_scc(priv, R15, SHDLCE); - /* The following bits are set (see 2.5.2.1): - - Automatic EOM reset - - Interrupt request if RX FIFO is half full - This bit should be ignored in DMA mode (according to the - documentation), but actually isn't. The receiver doesn't work if - it is set. Thus, we have to clear it in DMA mode. - - Interrupt/DMA request if TX FIFO is completely empty - a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30 - compatibility). - b) If cleared, DMA requests may follow each other very quickly, - filling up the TX FIFO. - Advantage: TX works even in case of high bus latency. - Disadvantage: Edge-triggered DMA request circuitry may miss - a request. No more data is delivered, resulting - in a TX FIFO underrun. - Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared. - The PackeTwin doesn't. I don't know about the PI, but let's - assume it behaves like the PI2. - */ - if (priv->param.dma >= 0) { - if (priv->type == TYPE_TWIN) - write_scc(priv, R7, AUTOEOM | TXFIFOE); - else - write_scc(priv, R7, AUTOEOM); - } else { - write_scc(priv, R7, AUTOEOM | RXFIFOH); - } - write_scc(priv, R15, 0); - break; - } - /* Preset CRC, NRZ(I) encoding */ - write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ)); - - /* Configure baud rate generator */ - if (priv->param.brg_tc >= 0) { - /* Program BR generator */ - write_scc(priv, R12, priv->param.brg_tc & 0xFF); - write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF); - /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by - PackeTwin, not connected on the PI2); set DPLL source to BRG */ - write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL); - /* Enable DPLL */ - write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL); - } else { - /* Disable BR generator */ - write_scc(priv, R14, DTRREQ | BRSRC); - } - - /* Configure clocks */ - if (priv->type == TYPE_TWIN) { - /* Disable external TX clock receiver */ - outb((info->twin_serial_cfg &= - ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)), - card_base + TWIN_SERIAL_CFG); - } - write_scc(priv, R11, priv->param.clocks); - if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) { - /* Enable external TX clock receiver */ - outb((info->twin_serial_cfg |= - (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)), - card_base + TWIN_SERIAL_CFG); - } - - /* Configure PackeTwin */ - if (priv->type == TYPE_TWIN) { - /* Assert DTR, enable interrupts */ - outb((info->twin_serial_cfg |= TWIN_EI | - (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)), - card_base + TWIN_SERIAL_CFG); - } - - /* Read current status */ - priv->rr0 = read_scc(priv, R0); - /* Enable DCD interrupt */ - write_scc(priv, R15, DCDIE); - - netif_start_queue(dev); - - return 0; -} - - -static int scc_close(struct net_device *dev) -{ - struct scc_priv *priv = dev->ml_priv; - struct scc_info *info = priv->info; - int card_base = priv->card_base; - - netif_stop_queue(dev); - - if (priv->type == TYPE_TWIN) { - /* Drop DTR */ - outb((info->twin_serial_cfg &= - (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)), - card_base + TWIN_SERIAL_CFG); - } - - /* Reset channel, free DMA and IRQ */ - write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV); - if (priv->param.dma >= 0) { - if (priv->type == TYPE_TWIN) - outb(0, card_base + TWIN_DMA_CFG); - free_dma(priv->param.dma); - } - if (--info->irq_used == 0) - free_irq(dev->irq, info); - - return 0; -} - - -static int scc_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) -{ - struct scc_priv *priv = dev->ml_priv; - - switch (cmd) { - case SIOCGSCCPARAM: - if (copy_to_user(data, &priv->param, sizeof(struct scc_param))) - return -EFAULT; - return 0; - case SIOCSSCCPARAM: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (netif_running(dev)) - return -EAGAIN; - if (copy_from_user(&priv->param, data, - sizeof(struct scc_param))) - return -EFAULT; - return 0; - default: - return -EOPNOTSUPP; - } -} - - -static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - struct scc_priv *priv = dev->ml_priv; - unsigned long flags; - int i; - - if (skb->protocol == htons(ETH_P_IP)) - return ax25_ip_xmit(skb); - - /* Temporarily stop the scheduler feeding us packets */ - netif_stop_queue(dev); - - /* Transfer data to DMA buffer */ - i = priv->tx_head; - skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1); - priv->tx_len[i] = skb->len - 1; - - /* Clear interrupts while we touch our circular buffers */ - - spin_lock_irqsave(&priv->ring_lock, flags); - /* Move the ring buffer's head */ - priv->tx_head = (i + 1) % NUM_TX_BUF; - priv->tx_count++; - - /* If we just filled up the last buffer, leave queue stopped. - The higher layers must wait until we have a DMA buffer - to accept the data. */ - if (priv->tx_count < NUM_TX_BUF) - netif_wake_queue(dev); - - /* Set new TX state */ - if (priv->state == IDLE) { - /* Assert RTS, start timer */ - priv->state = TX_HEAD; - priv->tx_start = jiffies; - write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8); - write_scc(priv, R15, 0); - start_timer(priv, priv->param.txdelay, 0); - } - - /* Turn interrupts back on and free buffer */ - spin_unlock_irqrestore(&priv->ring_lock, flags); - dev_kfree_skb(skb); - - return NETDEV_TX_OK; -} - - -static int scc_set_mac_address(struct net_device *dev, void *sa) -{ - dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data); - return 0; -} - - -static inline void tx_on(struct scc_priv *priv) -{ - int i, n; - unsigned long flags; - - if (priv->param.dma >= 0) { - n = (priv->chip == Z85230) ? 3 : 1; - /* Program DMA controller */ - flags = claim_dma_lock(); - set_dma_mode(priv->param.dma, DMA_MODE_WRITE); - set_dma_addr(priv->param.dma, - virt_to_bus(priv->tx_buf[priv->tx_tail]) + n); - set_dma_count(priv->param.dma, - priv->tx_len[priv->tx_tail] - n); - release_dma_lock(flags); - /* Enable TX underrun interrupt */ - write_scc(priv, R15, TxUIE); - /* Configure DREQ */ - if (priv->type == TYPE_TWIN) - outb((priv->param.dma == - 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3, - priv->card_base + TWIN_DMA_CFG); - else - write_scc(priv, R1, - EXT_INT_ENAB | WT_FN_RDYFN | - WT_RDY_ENAB); - /* Write first byte(s) */ - spin_lock_irqsave(priv->register_lock, flags); - for (i = 0; i < n; i++) - write_scc_data(priv, - priv->tx_buf[priv->tx_tail][i], 1); - enable_dma(priv->param.dma); - spin_unlock_irqrestore(priv->register_lock, flags); - } else { - write_scc(priv, R15, TxUIE); - write_scc(priv, R1, - EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB); - tx_isr(priv); - } - /* Reset EOM latch if we do not have the AUTOEOM feature */ - if (priv->chip == Z8530) - write_scc(priv, R0, RES_EOM_L); -} - - -static inline void rx_on(struct scc_priv *priv) -{ - unsigned long flags; - - /* Clear RX FIFO */ - while (read_scc(priv, R0) & Rx_CH_AV) - read_scc_data(priv); - priv->rx_over = 0; - if (priv->param.dma >= 0) { - /* Program DMA controller */ - flags = claim_dma_lock(); - set_dma_mode(priv->param.dma, DMA_MODE_READ); - set_dma_addr(priv->param.dma, - virt_to_bus(priv->rx_buf[priv->rx_head])); - set_dma_count(priv->param.dma, BUF_SIZE); - release_dma_lock(flags); - enable_dma(priv->param.dma); - /* Configure PackeTwin DMA */ - if (priv->type == TYPE_TWIN) { - outb((priv->param.dma == - 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3, - priv->card_base + TWIN_DMA_CFG); - } - /* Sp. cond. intr. only, ext int enable, RX DMA enable */ - write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx | - WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB); - } else { - /* Reset current frame */ - priv->rx_ptr = 0; - /* Intr. on all Rx characters and Sp. cond., ext int enable */ - write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT | - WT_FN_RDYFN); - } - write_scc(priv, R0, ERR_RES); - write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB); -} - - -static inline void rx_off(struct scc_priv *priv) -{ - /* Disable receiver */ - write_scc(priv, R3, Rx8); - /* Disable DREQ / RX interrupt */ - if (priv->param.dma >= 0 && priv->type == TYPE_TWIN) - outb(0, priv->card_base + TWIN_DMA_CFG); - else - write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); - /* Disable DMA */ - if (priv->param.dma >= 0) - disable_dma(priv->param.dma); -} - - -static void start_timer(struct scc_priv *priv, int t, int r15) -{ - outb(priv->tmr_mode, priv->tmr_ctrl); - if (t == 0) { - tm_isr(priv); - } else if (t > 0) { - outb(t & 0xFF, priv->tmr_cnt); - outb((t >> 8) & 0xFF, priv->tmr_cnt); - if (priv->type != TYPE_TWIN) { - write_scc(priv, R15, r15 | CTSIE); - priv->rr0 |= CTS; - } - } -} - - -static inline unsigned char random(void) -{ - /* See "Numerical Recipes in C", second edition, p. 284 */ - rand = rand * 1664525L + 1013904223L; - return (unsigned char) (rand >> 24); -} - -static inline void z8530_isr(struct scc_info *info) -{ - int is, i = 100; - - while ((is = read_scc(&info->priv[0], R3)) && i--) { - if (is & CHARxIP) { - rx_isr(&info->priv[0]); - } else if (is & CHATxIP) { - tx_isr(&info->priv[0]); - } else if (is & CHAEXT) { - es_isr(&info->priv[0]); - } else if (is & CHBRxIP) { - rx_isr(&info->priv[1]); - } else if (is & CHBTxIP) { - tx_isr(&info->priv[1]); - } else { - es_isr(&info->priv[1]); - } - write_scc(&info->priv[0], R0, RES_H_IUS); - i++; - } - if (i < 0) { - printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n", - is); - } - /* Ok, no interrupts pending from this 8530. The INT line should - be inactive now. */ -} - - -static irqreturn_t scc_isr(int irq, void *dev_id) -{ - struct scc_info *info = dev_id; - - spin_lock(info->priv[0].register_lock); - /* At this point interrupts are enabled, and the interrupt under service - is already acknowledged, but masked off. - - Interrupt processing: We loop until we know that the IRQ line is - low. If another positive edge occurs afterwards during the ISR, - another interrupt will be triggered by the interrupt controller - as soon as the IRQ level is enabled again (see asm/irq.h). - - Bottom-half handlers will be processed after scc_isr(). This is - important, since we only have small ringbuffers and want new data - to be fetched/delivered immediately. */ - - if (info->priv[0].type == TYPE_TWIN) { - int is, card_base = info->priv[0].card_base; - while ((is = ~inb(card_base + TWIN_INT_REG)) & - TWIN_INT_MSK) { - if (is & TWIN_SCC_MSK) { - z8530_isr(info); - } else if (is & TWIN_TMR1_MSK) { - inb(card_base + TWIN_CLR_TMR1); - tm_isr(&info->priv[0]); - } else { - inb(card_base + TWIN_CLR_TMR2); - tm_isr(&info->priv[1]); - } - } - } else - z8530_isr(info); - spin_unlock(info->priv[0].register_lock); - return IRQ_HANDLED; -} - - -static void rx_isr(struct scc_priv *priv) -{ - if (priv->param.dma >= 0) { - /* Check special condition and perform error reset. See 2.4.7.5. */ - special_condition(priv, read_scc(priv, R1)); - write_scc(priv, R0, ERR_RES); - } else { - /* Check special condition for each character. Error reset not necessary. - Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */ - int rc; - while (read_scc(priv, R0) & Rx_CH_AV) { - rc = read_scc(priv, R1); - if (priv->rx_ptr < BUF_SIZE) - priv->rx_buf[priv->rx_head][priv-> - rx_ptr++] = - read_scc_data(priv); - else { - priv->rx_over = 2; - read_scc_data(priv); - } - special_condition(priv, rc); - } - } -} - - -static void special_condition(struct scc_priv *priv, int rc) -{ - int cb; - unsigned long flags; - - /* See Figure 2-15. Only overrun and EOF need to be checked. */ - - if (rc & Rx_OVR) { - /* Receiver overrun */ - priv->rx_over = 1; - if (priv->param.dma < 0) - write_scc(priv, R0, ERR_RES); - } else if (rc & END_FR) { - /* End of frame. Get byte count */ - if (priv->param.dma >= 0) { - flags = claim_dma_lock(); - cb = BUF_SIZE - get_dma_residue(priv->param.dma) - - 2; - release_dma_lock(flags); - } else { - cb = priv->rx_ptr - 2; - } - if (priv->rx_over) { - /* We had an overrun */ - priv->dev->stats.rx_errors++; - if (priv->rx_over == 2) - priv->dev->stats.rx_length_errors++; - else - priv->dev->stats.rx_fifo_errors++; - priv->rx_over = 0; - } else if (rc & CRC_ERR) { - /* Count invalid CRC only if packet length >= minimum */ - if (cb >= 15) { - priv->dev->stats.rx_errors++; - priv->dev->stats.rx_crc_errors++; - } - } else { - if (cb >= 15) { - if (priv->rx_count < NUM_RX_BUF - 1) { - /* Put good frame in FIFO */ - priv->rx_len[priv->rx_head] = cb; - priv->rx_head = - (priv->rx_head + - 1) % NUM_RX_BUF; - priv->rx_count++; - schedule_work(&priv->rx_work); - } else { - priv->dev->stats.rx_errors++; - priv->dev->stats.rx_over_errors++; - } - } - } - /* Get ready for new frame */ - if (priv->param.dma >= 0) { - flags = claim_dma_lock(); - set_dma_addr(priv->param.dma, - virt_to_bus(priv->rx_buf[priv->rx_head])); - set_dma_count(priv->param.dma, BUF_SIZE); - release_dma_lock(flags); - } else { - priv->rx_ptr = 0; - } - } -} - - -static void rx_bh(struct work_struct *ugli_api) -{ - struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work); - int i = priv->rx_tail; - int cb; - unsigned long flags; - struct sk_buff *skb; - unsigned char *data; - - spin_lock_irqsave(&priv->ring_lock, flags); - while (priv->rx_count) { - spin_unlock_irqrestore(&priv->ring_lock, flags); - cb = priv->rx_len[i]; - /* Allocate buffer */ - skb = dev_alloc_skb(cb + 1); - if (skb == NULL) { - /* Drop packet */ - priv->dev->stats.rx_dropped++; - } else { - /* Fill buffer */ - data = skb_put(skb, cb + 1); - data[0] = 0; - memcpy(&data[1], priv->rx_buf[i], cb); - skb->protocol = ax25_type_trans(skb, priv->dev); - netif_rx(skb); - priv->dev->stats.rx_packets++; - priv->dev->stats.rx_bytes += cb; - } - spin_lock_irqsave(&priv->ring_lock, flags); - /* Move tail */ - priv->rx_tail = i = (i + 1) % NUM_RX_BUF; - priv->rx_count--; - } - spin_unlock_irqrestore(&priv->ring_lock, flags); -} - - -static void tx_isr(struct scc_priv *priv) -{ - int i = priv->tx_tail, p = priv->tx_ptr; - - /* Suspend TX interrupts if we don't want to send anything. - See Figure 2-22. */ - if (p == priv->tx_len[i]) { - write_scc(priv, R0, RES_Tx_P); - return; - } - - /* Write characters */ - while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) { - write_scc_data(priv, priv->tx_buf[i][p++], 0); - } - - /* Reset EOM latch of Z8530 */ - if (!priv->tx_ptr && p && priv->chip == Z8530) - write_scc(priv, R0, RES_EOM_L); - - priv->tx_ptr = p; -} - - -static void es_isr(struct scc_priv *priv) -{ - int i, rr0, drr0, res; - unsigned long flags; - - /* Read status, reset interrupt bit (open latches) */ - rr0 = read_scc(priv, R0); - write_scc(priv, R0, RES_EXT_INT); - drr0 = priv->rr0 ^ rr0; - priv->rr0 = rr0; - - /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since - it might have already been cleared again by AUTOEOM. */ - if (priv->state == TX_DATA) { - /* Get remaining bytes */ - i = priv->tx_tail; - if (priv->param.dma >= 0) { - disable_dma(priv->param.dma); - flags = claim_dma_lock(); - res = get_dma_residue(priv->param.dma); - release_dma_lock(flags); - } else { - res = priv->tx_len[i] - priv->tx_ptr; - priv->tx_ptr = 0; - } - /* Disable DREQ / TX interrupt */ - if (priv->param.dma >= 0 && priv->type == TYPE_TWIN) - outb(0, priv->card_base + TWIN_DMA_CFG); - else - write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN); - if (res) { - /* Update packet statistics */ - priv->dev->stats.tx_errors++; - priv->dev->stats.tx_fifo_errors++; - /* Other underrun interrupts may already be waiting */ - write_scc(priv, R0, RES_EXT_INT); - write_scc(priv, R0, RES_EXT_INT); - } else { - /* Update packet statistics */ - priv->dev->stats.tx_packets++; - priv->dev->stats.tx_bytes += priv->tx_len[i]; - /* Remove frame from FIFO */ - priv->tx_tail = (i + 1) % NUM_TX_BUF; - priv->tx_count--; - /* Inform upper layers */ - netif_wake_queue(priv->dev); - } - /* Switch state */ - write_scc(priv, R15, 0); - if (priv->tx_count && - time_is_after_jiffies(priv->tx_start + priv->param.txtimeout)) { - priv->state = TX_PAUSE; - start_timer(priv, priv->param.txpause, 0); - } else { - priv->state = TX_TAIL; - start_timer(priv, priv->param.txtail, 0); - } - } - - /* DCD transition */ - if (drr0 & DCD) { - if (rr0 & DCD) { - switch (priv->state) { - case IDLE: - case WAIT: - priv->state = DCD_ON; - write_scc(priv, R15, 0); - start_timer(priv, priv->param.dcdon, 0); - } - } else { - switch (priv->state) { - case RX_ON: - rx_off(priv); - priv->state = DCD_OFF; - write_scc(priv, R15, 0); - start_timer(priv, priv->param.dcdoff, 0); - } - } - } - - /* CTS transition */ - if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN) - tm_isr(priv); - -} - - -static void tm_isr(struct scc_priv *priv) -{ - switch (priv->state) { - case TX_HEAD: - case TX_PAUSE: - tx_on(priv); - priv->state = TX_DATA; - break; - case TX_TAIL: - write_scc(priv, R5, TxCRC_ENAB | Tx8); - priv->state = RTS_OFF; - if (priv->type != TYPE_TWIN) - write_scc(priv, R15, 0); - start_timer(priv, priv->param.rtsoff, 0); - break; - case RTS_OFF: - write_scc(priv, R15, DCDIE); - priv->rr0 = read_scc(priv, R0); - if (priv->rr0 & DCD) { - priv->dev->stats.collisions++; - rx_on(priv); - priv->state = RX_ON; - } else { - priv->state = WAIT; - start_timer(priv, priv->param.waittime, DCDIE); - } - break; - case WAIT: - if (priv->tx_count) { - priv->state = TX_HEAD; - priv->tx_start = jiffies; - write_scc(priv, R5, - TxCRC_ENAB | RTS | TxENAB | Tx8); - write_scc(priv, R15, 0); - start_timer(priv, priv->param.txdelay, 0); - } else { - priv->state = IDLE; - if (priv->type != TYPE_TWIN) - write_scc(priv, R15, DCDIE); - } - break; - case DCD_ON: - case DCD_OFF: - write_scc(priv, R15, DCDIE); - priv->rr0 = read_scc(priv, R0); - if (priv->rr0 & DCD) { - rx_on(priv); - priv->state = RX_ON; - } else { - priv->state = WAIT; - start_timer(priv, - random() / priv->param.persist * - priv->param.slottime, DCDIE); - } - break; - } -} -- cgit v1.2.3 From c6101dd7ffb8b7f940e3fc4a22ce4023f8184f0d Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 26 Apr 2022 14:40:48 +0530 Subject: net: dsa: ksz9477: move get_stats64 to ksz_common.c The mib counters for the ksz9477 is same for the ksz9477 switch and LAN937x switch. Hence moving it to ksz_common.c file in order to have it generic function. The DSA hook get_stats64 now can call ksz_get_stats64. Signed-off-by: Arun Ramadoss Reviewed-by: Oleksij Rempel Reviewed-by: Florian Fainelli Reviewed-by: Vladimir Oltean Link: https://lore.kernel.org/r/20220426091048.9311-1-arun.ramadoss@microchip.com Signed-off-by: Jakub Kicinski --- drivers/net/dsa/microchip/ksz9477.c | 98 +--------------------------------- drivers/net/dsa/microchip/ksz_common.c | 96 +++++++++++++++++++++++++++++++++ drivers/net/dsa/microchip/ksz_common.h | 3 ++ 3 files changed, 101 insertions(+), 96 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 4f617fee9a4e..48c90e4cda30 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -65,100 +65,6 @@ static const struct { { 0x83, "tx_discards" }, }; -struct ksz9477_stats_raw { - u64 rx_hi; - u64 rx_undersize; - u64 rx_fragments; - u64 rx_oversize; - u64 rx_jabbers; - u64 rx_symbol_err; - u64 rx_crc_err; - u64 rx_align_err; - u64 rx_mac_ctrl; - u64 rx_pause; - u64 rx_bcast; - u64 rx_mcast; - u64 rx_ucast; - u64 rx_64_or_less; - u64 rx_65_127; - u64 rx_128_255; - u64 rx_256_511; - u64 rx_512_1023; - u64 rx_1024_1522; - u64 rx_1523_2000; - u64 rx_2001; - u64 tx_hi; - u64 tx_late_col; - u64 tx_pause; - u64 tx_bcast; - u64 tx_mcast; - u64 tx_ucast; - u64 tx_deferred; - u64 tx_total_col; - u64 tx_exc_col; - u64 tx_single_col; - u64 tx_mult_col; - u64 rx_total; - u64 tx_total; - u64 rx_discards; - u64 tx_discards; -}; - -static void ksz9477_r_mib_stats64(struct ksz_device *dev, int port) -{ - struct rtnl_link_stats64 *stats; - struct ksz9477_stats_raw *raw; - struct ksz_port_mib *mib; - - mib = &dev->ports[port].mib; - stats = &mib->stats64; - raw = (struct ksz9477_stats_raw *)mib->counters; - - spin_lock(&mib->stats64_lock); - - stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast; - stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast; - - /* HW counters are counting bytes + FCS which is not acceptable - * for rtnl_link_stats64 interface - */ - stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; - stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; - - stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + - raw->rx_oversize; - - stats->rx_crc_errors = raw->rx_crc_err; - stats->rx_frame_errors = raw->rx_align_err; - stats->rx_dropped = raw->rx_discards; - stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + - stats->rx_frame_errors + stats->rx_dropped; - - stats->tx_window_errors = raw->tx_late_col; - stats->tx_fifo_errors = raw->tx_discards; - stats->tx_aborted_errors = raw->tx_exc_col; - stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + - stats->tx_aborted_errors; - - stats->multicast = raw->rx_mcast; - stats->collisions = raw->tx_total_col; - - spin_unlock(&mib->stats64_lock); -} - -static void ksz9477_get_stats64(struct dsa_switch *ds, int port, - struct rtnl_link_stats64 *s) -{ - struct ksz_device *dev = ds->priv; - struct ksz_port_mib *mib; - - mib = &dev->ports[port].mib; - - spin_lock(&mib->stats64_lock); - memcpy(s, &mib->stats64, sizeof(*s)); - spin_unlock(&mib->stats64_lock); -} - static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); @@ -1462,7 +1368,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = { .port_mdb_del = ksz9477_port_mdb_del, .port_mirror_add = ksz9477_port_mirror_add, .port_mirror_del = ksz9477_port_mirror_del, - .get_stats64 = ksz9477_get_stats64, + .get_stats64 = ksz_get_stats64, .port_change_mtu = ksz9477_change_mtu, .port_max_mtu = ksz9477_max_mtu, }; @@ -1653,7 +1559,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .port_setup = ksz9477_port_setup, .r_mib_cnt = ksz9477_r_mib_cnt, .r_mib_pkt = ksz9477_r_mib_pkt, - .r_mib_stat64 = ksz9477_r_mib_stats64, + .r_mib_stat64 = ksz_r_mib_stats64, .freeze_mib = ksz9477_freeze_mib, .port_init_cnt = ksz9477_port_init_cnt, .shutdown = ksz9477_reset_switch, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 9b9f570ebb0b..10f127b09e58 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -20,6 +20,102 @@ #include "ksz_common.h" +struct ksz_stats_raw { + u64 rx_hi; + u64 rx_undersize; + u64 rx_fragments; + u64 rx_oversize; + u64 rx_jabbers; + u64 rx_symbol_err; + u64 rx_crc_err; + u64 rx_align_err; + u64 rx_mac_ctrl; + u64 rx_pause; + u64 rx_bcast; + u64 rx_mcast; + u64 rx_ucast; + u64 rx_64_or_less; + u64 rx_65_127; + u64 rx_128_255; + u64 rx_256_511; + u64 rx_512_1023; + u64 rx_1024_1522; + u64 rx_1523_2000; + u64 rx_2001; + u64 tx_hi; + u64 tx_late_col; + u64 tx_pause; + u64 tx_bcast; + u64 tx_mcast; + u64 tx_ucast; + u64 tx_deferred; + u64 tx_total_col; + u64 tx_exc_col; + u64 tx_single_col; + u64 tx_mult_col; + u64 rx_total; + u64 tx_total; + u64 rx_discards; + u64 tx_discards; +}; + +void ksz_r_mib_stats64(struct ksz_device *dev, int port) +{ + struct rtnl_link_stats64 *stats; + struct ksz_stats_raw *raw; + struct ksz_port_mib *mib; + + mib = &dev->ports[port].mib; + stats = &mib->stats64; + raw = (struct ksz_stats_raw *)mib->counters; + + spin_lock(&mib->stats64_lock); + + stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast; + stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast; + + /* HW counters are counting bytes + FCS which is not acceptable + * for rtnl_link_stats64 interface + */ + stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN; + stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN; + + stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments + + raw->rx_oversize; + + stats->rx_crc_errors = raw->rx_crc_err; + stats->rx_frame_errors = raw->rx_align_err; + stats->rx_dropped = raw->rx_discards; + stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + + stats->rx_frame_errors + stats->rx_dropped; + + stats->tx_window_errors = raw->tx_late_col; + stats->tx_fifo_errors = raw->tx_discards; + stats->tx_aborted_errors = raw->tx_exc_col; + stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors + + stats->tx_aborted_errors; + + stats->multicast = raw->rx_mcast; + stats->collisions = raw->tx_total_col; + + spin_unlock(&mib->stats64_lock); +} +EXPORT_SYMBOL_GPL(ksz_r_mib_stats64); + +void ksz_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *s) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port_mib *mib; + + mib = &dev->ports[port].mib; + + spin_lock(&mib->stats64_lock); + memcpy(s, &mib->stats64, sizeof(*s)); + spin_unlock(&mib->stats64_lock); +} +EXPORT_SYMBOL_GPL(ksz_get_stats64); + void ksz_update_port_member(struct ksz_device *dev, int port) { struct ksz_port *p = &dev->ports[port]; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 4d978832c448..28cda79b090f 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -151,6 +151,9 @@ int ksz9477_switch_register(struct ksz_device *dev); void ksz_update_port_member(struct ksz_device *dev, int port); void ksz_init_mib_timer(struct ksz_device *dev); +void ksz_r_mib_stats64(struct ksz_device *dev, int port); +void ksz_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *s); /* Common DSA access functions */ -- cgit v1.2.3 From 1585362250fed6b0c166e176b628b335611d8f19 Mon Sep 17 00:00:00 2001 From: Dylan Hung Date: Wed, 27 Apr 2022 11:55:00 +0800 Subject: net: mdio: add reset control for Aspeed MDIO Add reset assertion/deassertion for Aspeed MDIO. There are 4 MDIO controllers embedded in Aspeed AST2600 SOC and share one reset control register SCU50[3]. To work with old DT blobs which don't have the reset property, devm_reset_control_get_optional_shared is used in this change. Signed-off-by: Dylan Hung Reviewed-by: Philipp Zabel Reviewed-by: Andrew Lunn Reviewed-by: Andrew Jeffery Signed-off-by: Paolo Abeni --- drivers/net/mdio/mdio-aspeed.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index 7aa49827196f..944d005d2bd1 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -41,6 +42,7 @@ struct aspeed_mdio { void __iomem *base; + struct reset_control *reset; }; static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad, @@ -174,6 +176,12 @@ static int aspeed_mdio_probe(struct platform_device *pdev) if (IS_ERR(ctx->base)) return PTR_ERR(ctx->base); + ctx->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); + if (IS_ERR(ctx->reset)) + return PTR_ERR(ctx->reset); + + reset_control_deassert(ctx->reset); + bus->name = DRV_NAME; snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); bus->parent = &pdev->dev; @@ -184,6 +192,7 @@ static int aspeed_mdio_probe(struct platform_device *pdev) rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); + reset_control_assert(ctx->reset); return rc; } @@ -194,7 +203,11 @@ static int aspeed_mdio_probe(struct platform_device *pdev) static int aspeed_mdio_remove(struct platform_device *pdev) { - mdiobus_unregister(platform_get_drvdata(pdev)); + struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev); + struct aspeed_mdio *ctx = bus->priv; + + reset_control_assert(ctx->reset); + mdiobus_unregister(bus); return 0; } -- cgit v1.2.3 From 07caad0bb1f8963e6e99b665bc17842d93762469 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Wed, 27 Apr 2022 08:30:51 +0200 Subject: net: phy: Deduplicate interrupt disablement on PHY attach phy_attach_direct() first calls phy_init_hw() (which restores interrupt settings through ->config_intr()), then calls phy_disable_interrupts(). So if phydev->interrupts was previously set to 1, interrupts are briefly enabled, then disabled, which seems nonsensical. If it was previously set to 0, interrupts are disabled twice, which is equally nonsensical. Deduplicate interrupt disablement. Signed-off-by: Lukas Wunner Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/805ccdc606bd8898d59931bd4c7c68537ed6e550.1651040826.git.lukas@wunner.de Signed-off-by: Jakub Kicinski --- drivers/net/phy/phy_device.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8406ac739def..f867042b2eb4 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1449,6 +1449,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, phydev->state = PHY_READY; + phydev->interrupts = PHY_INTERRUPT_DISABLED; + /* Port is set to PORT_TP by default and the actual PHY driver will set * it to different value depending on the PHY configuration. If we have * the generic PHY driver we can't figure it out, thus set the old @@ -1471,10 +1473,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (err) goto error; - err = phy_disable_interrupts(phydev); - if (err) - return err; - phy_resume(phydev); phy_led_triggers_register(phydev); -- cgit v1.2.3 From dde2daa0a279623a6f769b258339df744cc0fdd6 Mon Sep 17 00:00:00 2001 From: Volodymyr Mytnyk Date: Wed, 27 Apr 2022 15:05:48 +0300 Subject: net: prestera: add police action support - Add HW api to configure policer: - SR TCM policer mode is only supported for now. - Policer ingress/egress direction support. - Add police action support into flower Signed-off-by: Volodymyr Mytnyk Link: https://lore.kernel.org/r/1651061148-21321-1-git-send-email-volodymyr.mytnyk@plvision.eu Signed-off-by: Jakub Kicinski --- .../net/ethernet/marvell/prestera/prestera_acl.c | 35 +++++++++- .../net/ethernet/marvell/prestera/prestera_acl.h | 12 ++++ .../ethernet/marvell/prestera/prestera_flower.c | 10 +++ .../net/ethernet/marvell/prestera/prestera_hw.c | 81 ++++++++++++++++++++++ .../net/ethernet/marvell/prestera/prestera_hw.h | 13 ++++ 5 files changed, 149 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c index e5627782fac6..3a141f2db812 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c @@ -34,6 +34,10 @@ struct prestera_acl_rule_entry { struct { u8 valid:1; } accept, drop, trap; + struct { + u8 valid:1; + struct prestera_acl_action_police i; + } police; struct { struct prestera_acl_action_jump i; u8 valid:1; @@ -533,6 +537,12 @@ static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw, act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP; act_num++; } + /* police */ + if (e->police.valid) { + act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_POLICE; + act_hw[act_num].police = e->police.i; + act_num++; + } /* jump */ if (e->jump.valid) { act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP; @@ -557,6 +567,9 @@ __prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw, { /* counter */ prestera_counter_put(sw->counter, e->counter.block, e->counter.id); + /* police */ + if (e->police.valid) + prestera_hw_policer_release(sw, e->police.i.id); } void prestera_acl_rule_entry_destroy(struct prestera_acl *acl, @@ -579,6 +592,8 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, struct prestera_acl_rule_entry *e, struct prestera_acl_rule_entry_arg *arg) { + int err; + /* accept */ e->accept.valid = arg->accept.valid; /* drop */ @@ -588,10 +603,26 @@ __prestera_acl_rule_entry_act_construct(struct prestera_switch *sw, /* jump */ e->jump.valid = arg->jump.valid; e->jump.i = arg->jump.i; + /* police */ + if (arg->police.valid) { + u8 type = arg->police.ingress ? PRESTERA_POLICER_TYPE_INGRESS : + PRESTERA_POLICER_TYPE_EGRESS; + + err = prestera_hw_policer_create(sw, type, &e->police.i.id); + if (err) + goto err_out; + + err = prestera_hw_policer_sr_tcm_set(sw, e->police.i.id, + arg->police.rate, + arg->police.burst); + if (err) { + prestera_hw_policer_release(sw, e->police.i.id); + goto err_out; + } + e->police.valid = arg->police.valid; + } /* counter */ if (arg->count.valid) { - int err; - err = prestera_counter_get(sw->counter, arg->count.client, &e->counter.block, &e->counter.id); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h index 6d2ad27682d1..f963e1e0c0f0 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h @@ -56,6 +56,7 @@ enum prestera_acl_rule_action { PRESTERA_ACL_RULE_ACTION_TRAP = 2, PRESTERA_ACL_RULE_ACTION_JUMP = 5, PRESTERA_ACL_RULE_ACTION_COUNT = 7, + PRESTERA_ACL_RULE_ACTION_POLICE = 8, PRESTERA_ACL_RULE_ACTION_MAX }; @@ -74,6 +75,10 @@ struct prestera_acl_action_jump { u32 index; }; +struct prestera_acl_action_police { + u32 id; +}; + struct prestera_acl_action_count { u32 id; }; @@ -86,6 +91,7 @@ struct prestera_acl_rule_entry_key { struct prestera_acl_hw_action_info { enum prestera_acl_rule_action id; union { + struct prestera_acl_action_police police; struct prestera_acl_action_count count; struct prestera_acl_action_jump jump; }; @@ -105,6 +111,12 @@ struct prestera_acl_rule_entry_arg { struct prestera_acl_action_jump i; u8 valid:1; } jump; + struct { + u8 valid:1; + u64 rate; + u64 burst; + bool ingress; + } police; struct { u8 valid:1; u32 client; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index c12b09ac6559..d43e503c644f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -108,6 +108,16 @@ static int prestera_flower_parse_actions(struct prestera_flow_block *block, rule->re_arg.trap.valid = 1; break; + case FLOW_ACTION_POLICE: + if (rule->re_arg.police.valid) + return -EEXIST; + + rule->re_arg.police.valid = 1; + rule->re_arg.police.rate = + act->police.rate_bytes_ps; + rule->re_arg.police.burst = act->police.burst; + rule->re_arg.police.ingress = true; + break; case FLOW_ACTION_GOTO: err = prestera_flower_parse_goto_action(block, rule, chain_index, diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index c66cc929c820..79fd3cac539d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -74,6 +74,10 @@ enum prestera_cmd_type_t { PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102, PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103, + PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500, + PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501, + PRESTERA_CMD_TYPE_POLICER_SET = 0x1502, + PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000, PRESTERA_CMD_TYPE_ACK = 0x10000, @@ -163,6 +167,10 @@ enum { PRESTERA_FC_SYMM_ASYMM, }; +enum { + PRESTERA_POLICER_MODE_SR_TCM +}; + enum { PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0, PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1, @@ -428,6 +436,9 @@ struct prestera_msg_acl_action { struct { __le32 index; } jump; + struct { + __le32 id; + } police; struct { __le32 id; } count; @@ -570,6 +581,26 @@ struct mvsw_msg_cpu_code_counter_ret { __le64 packet_count; }; +struct prestera_msg_policer_req { + struct prestera_msg_cmd cmd; + __le32 id; + union { + struct { + __le64 cir; + __le32 cbs; + } __packed sr_tcm; /* make sure always 12 bytes size */ + __le32 reserved[6]; + }; + u8 mode; + u8 type; + u8 pad[2]; +}; + +struct prestera_msg_policer_resp { + struct prestera_msg_ret ret; + __le32 id; +}; + struct prestera_msg_event { __le16 type; __le16 id; @@ -622,6 +653,7 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36); BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8); BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36); + BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36); /* structure that are part of req/resp fw messages */ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16); @@ -640,6 +672,7 @@ static void prestera_hw_build_tests(void) BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24); BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12); BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12); + BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12); /* check events */ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20); @@ -1192,6 +1225,9 @@ prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action, case PRESTERA_ACL_RULE_ACTION_JUMP: action->jump.index = __cpu_to_le32(info->jump.index); break; + case PRESTERA_ACL_RULE_ACTION_POLICE: + action->police.id = __cpu_to_le32(info->police.id); + break; case PRESTERA_ACL_RULE_ACTION_COUNT: action->count.id = __cpu_to_le32(info->count.id); break; @@ -2163,3 +2199,48 @@ int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id, return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR, &req.cmd, sizeof(req)); } + +int prestera_hw_policer_create(struct prestera_switch *sw, u8 type, + u32 *policer_id) +{ + struct prestera_msg_policer_resp resp; + struct prestera_msg_policer_req req = { + .type = type + }; + int err; + + err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE, + &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); + if (err) + return err; + + *policer_id = __le32_to_cpu(resp.id); + return 0; +} + +int prestera_hw_policer_release(struct prestera_switch *sw, + u32 policer_id) +{ + struct prestera_msg_policer_req req = { + .id = __cpu_to_le32(policer_id) + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE, + &req.cmd, sizeof(req)); +} + +int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw, + u32 policer_id, u64 cir, u32 cbs) +{ + struct prestera_msg_policer_req req = { + .mode = PRESTERA_POLICER_MODE_SR_TCM, + .id = __cpu_to_le32(policer_id), + .sr_tcm = { + .cir = __cpu_to_le64(cir), + .cbs = __cpu_to_le32(cbs) + } + }; + + return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET, + &req.cmd, sizeof(req)); +} diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h index fd896a8838bb..579d9ba23ffc 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h @@ -107,6 +107,11 @@ enum { PRESTERA_STP_FORWARD, }; +enum { + PRESTERA_POLICER_TYPE_INGRESS, + PRESTERA_POLICER_TYPE_EGRESS +}; + enum prestera_hw_cpu_code_cnt_t { PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0, PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1, @@ -288,4 +293,12 @@ prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code, enum prestera_hw_cpu_code_cnt_t counter_type, u64 *packet_count); +/* Policer API */ +int prestera_hw_policer_create(struct prestera_switch *sw, u8 type, + u32 *policer_id); +int prestera_hw_policer_release(struct prestera_switch *sw, + u32 policer_id); +int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw, + u32 policer_id, u64 cir, u32 cbs); + #endif /* _PRESTERA_HW_H_ */ -- cgit v1.2.3 From 5da66099d6e28c66d24c49d9e791f64318c136a9 Mon Sep 17 00:00:00 2001 From: Nathan Rossi Date: Wed, 27 Apr 2022 13:09:28 +0000 Subject: net: dsa: mv88e6xxx: Single chip mode detection for MV88E6*41 The mv88e6xxx driver expects switches that are configured in single chip addressing mode to have the MDIO address configured as 0. This is due to the switch ADDR pins representing the single chip addressing mode as 0. However depending on the device (e.g. MV88E6*41) the switch does not respond on address 0 or any other address below 16 (the first port address) in single chip addressing mode. This allows for other devices to be on the same shared MDIO bus despite the switch being in single chip addressing mode. When using a switch that works this way it is not possible to configure switch driver as single chip addressing via device tree, along with another MDIO device on the same bus with address 0, as both devices would have the same address of 0 resulting in mdiobus_register_device -EBUSY errors for one of the devices with address 0. In order to support this configuration the switch node can have its MDIO address configured as 16 (the first address that the device responds to). During initialization the driver will treat this address similar to how address 0 is, however because this address is also a valid multi-chip address (in certain switch models, but not all) the driver will configure the SMI in single chip addressing mode and attempt to detect the switch model. If the device is configured in single chip addressing mode this will succeed and the initialization process can continue. If it fails to detect a valid model this is because the switch model register is not a valid register when in multi-chip mode, it will then fall back to the existing SMI initialization process using the MDIO address as the multi-chip mode address. This detection method is safe if the device is in either mode because the single chip addressing mode read is a direct SMI/MDIO read operation and has no side effects compared to the SMI writes required for the multi-chip addressing mode. In order to implement this change, the reset gpio configuration is moved to occur before any SMI initialization. This ensures that the device has the same/correct reset gpio state for both mv88e6xxx_smi_init calls. Signed-off-by: Nathan Rossi Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20220427130928.540007-1-nathan@nathanrossi.com Signed-off-by: Jakub Kicinski --- drivers/net/dsa/mv88e6xxx/chip.c | 46 ++++++++++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 64f4fdd02902..b8f956eece9c 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -6276,6 +6276,32 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip) return 0; } +static int mv88e6xxx_single_chip_detect(struct mv88e6xxx_chip *chip, + struct mdio_device *mdiodev) +{ + int err; + + /* dual_chip takes precedence over single/multi-chip modes */ + if (chip->info->dual_chip) + return -EINVAL; + + /* If the mdio addr is 16 indicating the first port address of a switch + * (e.g. mv88e6*41) in single chip addressing mode the device may be + * configured in single chip addressing mode. Setup the smi access as + * single chip addressing mode and attempt to detect the model of the + * switch, if this fails the device is not configured in single chip + * addressing mode. + */ + if (mdiodev->addr != 16) + return -EINVAL; + + err = mv88e6xxx_smi_init(chip, mdiodev->bus, 0); + if (err) + return err; + + return mv88e6xxx_detect(chip); +} + static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) { struct mv88e6xxx_chip *chip; @@ -6959,10 +6985,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) chip->info = compat_info; - err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); - if (err) - goto out; - chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(chip->reset)) { err = PTR_ERR(chip->reset); @@ -6971,9 +6993,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (chip->reset) usleep_range(1000, 2000); - err = mv88e6xxx_detect(chip); - if (err) - goto out; + /* Detect if the device is configured in single chip addressing mode, + * otherwise continue with address specific smi init/detection. + */ + err = mv88e6xxx_single_chip_detect(chip, mdiodev); + if (err) { + err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); + if (err) + goto out; + + err = mv88e6xxx_detect(chip); + if (err) + goto out; + } if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED) chip->tag_protocol = DSA_TAG_PROTO_EDSA; -- cgit v1.2.3 From 11dc130b4ee010396b5374300e03f0b6c729ce8c Mon Sep 17 00:00:00 2001 From: Yang Li Date: Wed, 27 Apr 2022 08:31:42 +0800 Subject: rtw89: remove unneeded semicolon Eliminate the following coccicheck warning: ./drivers/net/wireless/realtek/rtw89/rtw8852c.c:2556:2-3: Unneeded semicolon Reported-by: Abaci Robot Signed-off-by: Yang Li Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220427003142.107916-1-yang.lee@linux.alibaba.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 858611c64e6b..275568468212 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2553,7 +2553,7 @@ do { \ default: val = arg.gnt_bt.data; break; - }; + } __write_ctrl(R_AX_PWR_COEXT_CTRL, B_AX_TXAGC_BT_MASK, val, B_AX_TXAGC_BT_EN, arg.ctrl_gnt_bt != 0xffff); -- cgit v1.2.3 From 2950833f10cfa601813262e1d9c8473f9415681b Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Wed, 27 Apr 2022 10:37:32 +0300 Subject: ath9k: hif_usb: simplify if-if to if-else MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use if and else instead of if(A) and if (!A). Signed-off-by: Wan Jiabing Acked-by: Toke Høiland-Jørgensen Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220424094441.104937-1-wanjiabing@vivo.com --- drivers/net/wireless/ath/ath9k/hif_usb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index f06eec99de68..518deb5098a2 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -368,10 +368,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) __skb_queue_head_init(&tx_buf->skb_queue); list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); hif_dev->tx.tx_buf_cnt++; - } - - if (!ret) + } else { TX_STAT_INC(buf_queued); + } return ret; } -- cgit v1.2.3 From b72a4aff947ba807177bdabb43debaf2c66bee05 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 27 Apr 2022 10:37:33 +0300 Subject: ath10k: skip ath10k_halt during suspend for driver state RESTARTING Double free crash is observed when FW recovery(caused by wmi timeout/crash) is followed by immediate suspend event. The FW recovery is triggered by ath10k_core_restart() which calls driver clean up via ath10k_halt(). When the suspend event occurs between the FW recovery, the restart worker thread is put into frozen state until suspend completes. The suspend event triggers ath10k_stop() which again triggers ath10k_halt() The double invocation of ath10k_halt() causes ath10k_htt_rx_free() to be called twice(Note: ath10k_htt_rx_alloc was not called by restart worker thread because of its frozen state), causing the crash. To fix this, during the suspend flow, skip call to ath10k_halt() in ath10k_stop() when the current driver state is ATH10K_STATE_RESTARTING. Also, for driver state ATH10K_STATE_RESTARTING, call ath10k_wait_for_suspend() in ath10k_stop(). This is because call to ath10k_wait_for_suspend() is skipped later in [ath10k_halt() > ath10k_core_stop()] for the driver state ATH10K_STATE_RESTARTING. The frozen restart worker thread will be cancelled during resume when the device comes out of suspend. Below is the crash stack for reference: [ 428.469167] ------------[ cut here ]------------ [ 428.469180] kernel BUG at mm/slub.c:4150! [ 428.469193] invalid opcode: 0000 [#1] PREEMPT SMP NOPTI [ 428.469219] Workqueue: events_unbound async_run_entry_fn [ 428.469230] RIP: 0010:kfree+0x319/0x31b [ 428.469241] RSP: 0018:ffffa1fac015fc30 EFLAGS: 00010246 [ 428.469247] RAX: ffffedb10419d108 RBX: ffff8c05262b0000 [ 428.469252] RDX: ffff8c04a8c07000 RSI: 0000000000000000 [ 428.469256] RBP: ffffa1fac015fc78 R08: 0000000000000000 [ 428.469276] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 428.469285] Call Trace: [ 428.469295] ? dma_free_attrs+0x5f/0x7d [ 428.469320] ath10k_core_stop+0x5b/0x6f [ 428.469336] ath10k_halt+0x126/0x177 [ 428.469352] ath10k_stop+0x41/0x7e [ 428.469387] drv_stop+0x88/0x10e [ 428.469410] __ieee80211_suspend+0x297/0x411 [ 428.469441] rdev_suspend+0x6e/0xd0 [ 428.469462] wiphy_suspend+0xb1/0x105 [ 428.469483] ? name_show+0x2d/0x2d [ 428.469490] dpm_run_callback+0x8c/0x126 [ 428.469511] ? name_show+0x2d/0x2d [ 428.469517] __device_suspend+0x2e7/0x41b [ 428.469523] async_suspend+0x1f/0x93 [ 428.469529] async_run_entry_fn+0x3d/0xd1 [ 428.469535] process_one_work+0x1b1/0x329 [ 428.469541] worker_thread+0x213/0x372 [ 428.469547] kthread+0x150/0x15f [ 428.469552] ? pr_cont_work+0x58/0x58 [ 428.469558] ? kthread_blkcg+0x31/0x31 Tested-on: QCA6174 hw3.2 PCI WLAN.RM.4.4.1-00288-QCARMSWPZ-1 Co-developed-by: Wen Gong Signed-off-by: Wen Gong Signed-off-by: Abhishek Kumar Reviewed-by: Brian Norris Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220426221859.v2.1.I650b809482e1af8d0156ed88b5dc2677a0711d46@changeid --- drivers/net/wireless/ath/ath10k/mac.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a46f56a1efca..75d7a004a115 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5338,13 +5338,29 @@ err: static void ath10k_stop(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; + u32 opt; ath10k_drain_tx(ar); mutex_lock(&ar->conf_mutex); if (ar->state != ATH10K_STATE_OFF) { - if (!ar->hw_rfkill_on) - ath10k_halt(ar); + if (!ar->hw_rfkill_on) { + /* If the current driver state is RESTARTING but not yet + * fully RESTARTED because of incoming suspend event, + * then ath10k_halt() is already called via + * ath10k_core_restart() and should not be called here. + */ + if (ar->state != ATH10K_STATE_RESTARTING) { + ath10k_halt(ar); + } else { + /* Suspending here, because when in RESTARTING + * state, ath10k_core_stop() skips + * ath10k_wait_for_suspend(). + */ + opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR; + ath10k_wait_for_suspend(ar, opt); + } + } ar->state = ATH10K_STATE_OFF; } mutex_unlock(&ar->conf_mutex); -- cgit v1.2.3 From 5f012b40ef639343a976553bf3cc26dd0474756e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:09 -0700 Subject: eth: remove copies of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Drop the special defines in a bunch of drivers where the removal is relatively simple so grouping into one patch does not impact reviewability. Signed-off-by: Jakub Kicinski Reviewed-by: Paul Durrant Signed-off-by: David S. Miller --- drivers/net/ethernet/cortina/gemini.c | 4 +--- drivers/net/ethernet/marvell/skge.c | 3 +-- drivers/net/ethernet/marvell/sky2.c | 3 +-- drivers/net/ethernet/mediatek/mtk_star_emac.c | 3 +-- drivers/net/ethernet/ti/davinci_emac.c | 3 +-- drivers/net/ethernet/ti/netcp_core.c | 5 ++--- drivers/net/xen-netback/interface.c | 3 +-- 7 files changed, 8 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 8014eb33937c..9e6de2f968fa 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -68,7 +68,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define DEFAULT_GMAC_RXQ_ORDER 9 #define DEFAULT_GMAC_TXQ_ORDER 8 #define DEFAULT_RX_BUF_ORDER 11 -#define DEFAULT_NAPI_WEIGHT 64 #define TX_MAX_FRAGS 16 #define TX_QUEUE_NUM 1 /* max: 6 */ #define RX_MAX_ALLOC_ORDER 2 @@ -2472,8 +2471,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) netdev->max_mtu = 10236 - VLAN_ETH_HLEN; port->freeq_refill = 0; - netif_napi_add(netdev, &port->napi, gmac_napi_poll, - DEFAULT_NAPI_WEIGHT); + netif_napi_add(netdev, &port->napi, gmac_napi_poll, NAPI_POLL_WEIGHT); ret = of_get_mac_address(np, mac); if (!ret) { diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index cf03c67fbf40..c1e985416c0e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -50,7 +50,6 @@ #define PHY_RETRIES 1000 #define ETH_JUMBO_MTU 9000 #define TX_WATCHDOG (5 * HZ) -#define NAPI_WEIGHT 64 #define BLINK_MS 250 #define LINK_HZ HZ @@ -3833,7 +3832,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, dev->features |= NETIF_F_HIGHDMA; skge = netdev_priv(dev); - netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_POLL_WEIGHT); skge->netdev = dev; skge->hw = hw; skge->msg_enable = netif_msg_init(debug, default_msg); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index ea16b1dd6a98..a1e907c85217 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -63,7 +63,6 @@ #define TX_DEF_PENDING 63 #define TX_WATCHDOG (5 * HZ) -#define NAPI_WEIGHT 64 #define PHY_RETRIES 1000 #define SKY2_EEPROM_MAGIC 0x9955aabb @@ -4938,7 +4937,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_POLL_WEIGHT); err = register_netdev(dev); if (err) { diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 4cd0747edaff..95839fd84dab 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -30,7 +30,6 @@ #define MTK_STAR_WAIT_TIMEOUT 300 #define MTK_STAR_MAX_FRAME_SIZE 1514 #define MTK_STAR_SKB_ALIGNMENT 16 -#define MTK_STAR_NAPI_WEIGHT 64 #define MTK_STAR_HASHTABLE_MC_LIMIT 256 #define MTK_STAR_HASHTABLE_SIZE_MAX 512 @@ -1551,7 +1550,7 @@ static int mtk_star_probe(struct platform_device *pdev) ndev->netdev_ops = &mtk_star_netdev_ops; ndev->ethtool_ops = &mtk_star_ethtool_ops; - netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT); + netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT); return devm_register_netdev(dev, ndev); } diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 9d1e98db308b..2a3e4e842fa5 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -113,7 +113,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DEF_RX_NUM_DESC (128) #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ -#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ /* Buffer descriptor parameters */ #define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */ @@ -1949,7 +1948,7 @@ static int davinci_emac_probe(struct platform_device *pdev) ndev->netdev_ops = &emac_netdev_ops; ndev->ethtool_ops = ðtool_ops; - netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); + netif_napi_add(ndev, &priv->napi, emac_poll, NAPI_POLL_WEIGHT); pm_runtime_enable(&pdev->dev); rc = pm_runtime_resume_and_get(&pdev->dev); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 16507bff652a..21b0e961eab5 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -24,7 +24,6 @@ #include "netcp.h" #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) -#define NETCP_NAPI_WEIGHT 64 #define NETCP_TX_TIMEOUT (5 * HZ) #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) #define NETCP_MIN_PACKET_SIZE ETH_ZLEN @@ -2096,8 +2095,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device, } /* NAPI register */ - netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); - netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); + netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NAPI_POLL_WEIGHT); + netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NAPI_POLL_WEIGHT); /* Register the network device */ ndev->dev_id = 0; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index fe8e21ad8ed9..8e035374a370 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -42,7 +42,6 @@ #include #define XENVIF_QUEUE_LENGTH 32 -#define XENVIF_NAPI_WEIGHT 64 /* Number of bytes allowed on the internal guest Rx queue. */ #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) @@ -739,7 +738,7 @@ int xenvif_connect_data(struct xenvif_queue *queue, atomic_set(&queue->inflight_packets, 0); netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, - XENVIF_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); queue->stalled = true; -- cgit v1.2.3 From e2a303295d28070cd602f3355d007b9222aa6683 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:10 -0700 Subject: eth: smsc: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/smsc9420.c | 2 +- drivers/net/ethernet/smsc/smsc9420.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index d937af18973e..0c68c7f8056d 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1585,7 +1585,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->netdev_ops = &smsc9420_netdev_ops; dev->ethtool_ops = &smsc9420_ethtool_ops; - netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); + netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_POLL_WEIGHT); result = register_netdev(dev); if (result) { diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h index 409e82b2018a..876410a256c6 100644 --- a/drivers/net/ethernet/smsc/smsc9420.h +++ b/drivers/net/ethernet/smsc/smsc9420.h @@ -15,7 +15,6 @@ /* interrupt deassertion in multiples of 10us */ #define INT_DEAS_TIME (50) -#define NAPI_WEIGHT (64) #define SMSC_BAR (3) #ifdef __BIG_ENDIAN -- cgit v1.2.3 From 055e13f31f283bb7f672ba95dd457f04deb77dcf Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:11 -0700 Subject: eth: cpsw: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 4 ++-- drivers/net/ethernet/ti/cpsw_new.c | 4 ++-- drivers/net/ethernet/ti/cpsw_priv.c | 12 ++++++------ drivers/net/ethernet/ti/cpsw_priv.h | 1 - 4 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e6ad2e53f1cd..662435e36805 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1639,10 +1639,10 @@ static int cpsw_probe(struct platform_device *pdev) ndev->ethtool_ops = &cpsw_ethtool_ops; netif_napi_add(ndev, &cpsw->napi_rx, cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, - CPSW_POLL_WEIGHT); + NAPI_POLL_WEIGHT); netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, - CPSW_POLL_WEIGHT); + NAPI_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, dev); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 0f31cb4168bb..b33781ed760e 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1416,11 +1416,11 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) netif_napi_add(ndev, &cpsw->napi_rx, cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, - CPSW_POLL_WEIGHT); + NAPI_POLL_WEIGHT); netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, - CPSW_POLL_WEIGHT); + NAPI_POLL_WEIGHT); } napi_ndev = ndev; diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 887285c57db8..758295c898ac 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -364,7 +364,7 @@ void cpsw_split_res(struct cpsw_common *cpsw) if (cpsw->tx_ch_num == rlim_ch_num) { max_rate = consumed_rate; } else if (!rlim_ch_num) { - ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; + ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num; bigest_rate = 0; max_rate = consumed_rate; } else { @@ -379,19 +379,19 @@ void cpsw_split_res(struct cpsw_common *cpsw) if (max_rate < consumed_rate) max_rate *= 10; - ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; - ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / + ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate; + ch_budget = (NAPI_POLL_WEIGHT - ch_budget) / (cpsw->tx_ch_num - rlim_ch_num); bigest_rate = (max_rate - consumed_rate) / (cpsw->tx_ch_num - rlim_ch_num); } /* split tx weight/budget */ - budget = CPSW_POLL_WEIGHT; + budget = NAPI_POLL_WEIGHT; for (i = 0; i < cpsw->tx_ch_num; i++) { ch_rate = cpdma_chan_get_rate(txv[i].ch); if (ch_rate) { - txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; + txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate; if (!txv[i].budget) txv[i].budget++; if (ch_rate > bigest_rate) { @@ -417,7 +417,7 @@ void cpsw_split_res(struct cpsw_common *cpsw) txv[bigest_rate_ch].budget += budget; /* split rx budget */ - budget = CPSW_POLL_WEIGHT; + budget = NAPI_POLL_WEIGHT; ch_budget = budget / cpsw->rx_ch_num; for (i = 0; i < cpsw->rx_ch_num; i++) { cpsw->rxv[i].budget = ch_budget; diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index fc591f5ebe18..34230145ca0b 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -89,7 +89,6 @@ do { \ #define CPDMA_TXCP 0x40 #define CPDMA_RXCP 0x60 -#define CPSW_POLL_WEIGHT 64 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4 #define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN) #define CPSW_MIN_PACKET_SIZE (ETH_ZLEN) -- cgit v1.2.3 From feda771f1b9ec7a0f6d957d280a8ccf571351b46 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:12 -0700 Subject: eth: pch_gbe: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Reviewed-by: Andy Shevchenko Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 1dc40c537281..46da937ad27f 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -32,8 +32,6 @@ #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 -#define PCH_GBE_TX_WEIGHT 64 -#define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_BUFFER_WRITE 16 /* Initialize the wake-on-LAN settings */ @@ -1469,7 +1467,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, tx_desc->gbec_status, tx_desc->dma_status); unused = PCH_GBE_DESC_UNUSED(tx_ring); - thresh = tx_ring->count - PCH_GBE_TX_WEIGHT; + thresh = tx_ring->count - NAPI_POLL_WEIGHT; if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh)) { /* current marked clean, tx queue filling up, do extra clean */ int j, k; @@ -1482,13 +1480,13 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, /* current marked clean, scan for more that need cleaning. */ k = i; - for (j = 0; j < PCH_GBE_TX_WEIGHT; j++) + for (j = 0; j < NAPI_POLL_WEIGHT; j++) { tx_desc = PCH_GBE_TX_DESC(*tx_ring, k); if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/ if (++k >= tx_ring->count) k = 0; /*increment, wrap*/ } - if (j < PCH_GBE_TX_WEIGHT) { + if (j < NAPI_POLL_WEIGHT) { netdev_dbg(adapter->netdev, "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n", unused, j, i, k, tx_ring->next_to_use, @@ -1547,7 +1545,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); /* weight of a sort for tx, to avoid endless transmit cleanup */ - if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { + if (cleaned_count++ == NAPI_POLL_WEIGHT) { cleaned = false; break; } @@ -2519,7 +2517,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, netdev->netdev_ops = &pch_gbe_netdev_ops; netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; netif_napi_add(netdev, &adapter->napi, - pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT); + pch_gbe_napi_poll, NAPI_POLL_WEIGHT); netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->features = netdev->hw_features; -- cgit v1.2.3 From 889e3691b9d6573de133da1f5e78f590e52152cd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:13 -0700 Subject: eth: mtk_eth_soc: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ++-- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 18eebcaa6a76..31c5da5d6b72 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3298,9 +3298,9 @@ static int mtk_probe(struct platform_device *pdev) */ init_dummy_netdev(ð->dummy_dev); netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, - MTK_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, - MTK_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); platform_set_drvdata(pdev, eth); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index c98c7ee42c6f..b04977fa84f6 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -24,7 +24,6 @@ #define MTK_MAX_RX_LENGTH_2K 2048 #define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_DMA_SIZE 512 -#define MTK_NAPI_WEIGHT 64 #define MTK_MAC_COUNT 2 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) -- cgit v1.2.3 From f130683b1e24292cb99f46653b7f6799c318f7ec Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:14 -0700 Subject: usb: lan78xx: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 415f16662f88..94e571fb61da 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -92,8 +92,6 @@ WAKE_MCAST | WAKE_BCAST | \ WAKE_ARP | WAKE_MAGIC) -#define LAN78XX_NAPI_WEIGHT 64 - #define TX_URB_NUM 10 #define TX_SS_URB_NUM TX_URB_NUM #define TX_HS_URB_NUM TX_URB_NUM @@ -4376,7 +4374,7 @@ static int lan78xx_probe(struct usb_interface *intf, netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev)); - netif_napi_add(netdev, &dev->napi, lan78xx_poll, LAN78XX_NAPI_WEIGHT); + netif_napi_add(netdev, &dev->napi, lan78xx_poll, NAPI_POLL_WEIGHT); INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); init_usb_anchor(&dev->deferred); -- cgit v1.2.3 From b3c2b61ef621d3b31ea3ccc7811b0c8c07f28970 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:15 -0700 Subject: slic: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Acked-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/alacritech/slic.h | 2 -- drivers/net/ethernet/alacritech/slicoss.c | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h index 3add305d34b4..4eecbdfff3ff 100644 --- a/drivers/net/ethernet/alacritech/slic.h +++ b/drivers/net/ethernet/alacritech/slic.h @@ -265,8 +265,6 @@ #define SLIC_NUM_STAT_DESC_ARRAYS 4 #define SLIC_INVALID_STAT_DESC_IDX 0xffffffff -#define SLIC_NAPI_WEIGHT 64 - #define SLIC_UPR_LSTAT 0 #define SLIC_UPR_CONFIG 1 diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 1fc9a1cd3ef8..ce353b0c02a3 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -1803,7 +1803,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto unmap; } - netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT); + netif_napi_add(dev, &sdev->napi, slic_poll, NAPI_POLL_WEIGHT); netif_carrier_off(dev); err = register_netdev(dev); -- cgit v1.2.3 From 592df366378963669a7e561f58b23564193a230d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:16 -0700 Subject: net: bgmac: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Reviewed-by: Florian Fainelli Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac.c | 2 +- drivers/net/ethernet/broadcom/bgmac.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 7b525c65bacb..2dfc1e32bbb3 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1527,7 +1527,7 @@ int bgmac_enet_probe(struct bgmac *bgmac) if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) bgmac->int_mask &= ~BGMAC_IS_TX_MASK; - netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); + netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, NAPI_POLL_WEIGHT); err = bgmac_phy_connect(bgmac); if (err) { diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 110088e662ea..e05ac92c0650 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -364,8 +364,6 @@ #define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040 #define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080 -#define BGMAC_WEIGHT 64 - #define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN) /* Feature Flags */ -- cgit v1.2.3 From 0258f5399f0cba31933be07cf222f084999fa94e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:17 -0700 Subject: eth: atlantic: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | 2 -- drivers/net/ethernet/aquantia/atlantic/aq_ptp.c | 2 +- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 8bcda2cb3a2e..7e9c74b141ef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -65,8 +65,6 @@ */ #define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2) -#define AQ_CFG_NAPI_WEIGHT 64U - /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ #define AQ_CFG_FC_MODE AQ_NIC_FC_FULL diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 06de19f63287..275324c9e51e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -1218,7 +1218,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec) atomic_set(&aq_ptp->offset_ingress, 0); netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, - aq_ptp_poll, AQ_CFG_NAPI_WEIGHT); + aq_ptp_poll, NAPI_POLL_WEIGHT); aq_ptp->idx_vector = idx_vec; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index e839e1002ec7..f0fdf20f01c1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -120,7 +120,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx, self->rx_rings = 0; netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, - aq_vec_poll, AQ_CFG_NAPI_WEIGHT); + aq_vec_poll, NAPI_POLL_WEIGHT); err_exit: return self; -- cgit v1.2.3 From e702def527ecdeabe0c8a302db0370161892d740 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:18 -0700 Subject: eth: benet: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 3 +-- drivers/net/ethernet/emulex/benet/be_main.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8689d4a51fe5..61fe9625bed1 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -101,8 +101,7 @@ #define MAX_ROCE_EQS 5 #define MAX_MSIX_VECTORS 32 #define MIN_MSIX_VECTORS 1 -#define BE_NAPI_WEIGHT 64 -#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ +#define MAX_RX_POST NAPI_POLL_WEIGHT /* Frags posted at a time */ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) #define MAX_NUM_POST_ERX_DB 255u diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d0c262f2695a..5939068a8f62 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2983,7 +2983,7 @@ static int be_evt_queues_create(struct be_adapter *adapter) cpumask_set_cpu(cpumask_local_spread(i, numa_node), eqo->affinity_mask); netif_napi_add(adapter->netdev, &eqo->napi, be_poll, - BE_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); } return 0; } -- cgit v1.2.3 From bbbe6ecbc36d55e0ac49251f52f3ccffa51ec32a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:19 -0700 Subject: eth: gfar: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.c | 2 +- drivers/net/ethernet/freescale/gianfar.h | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 206b7a35eaf5..f0b652a65043 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3232,7 +3232,7 @@ static int gfar_probe(struct platform_device *ofdev) /* Register for napi ...We are registering NAPI for each grp */ for (i = 0; i < priv->num_grps; i++) { netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + gfar_poll_rx_sq, NAPI_POLL_WEIGHT); netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, gfar_poll_tx_sq, 2); } diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index ca5e14f908fe..68b59d3202e3 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -52,9 +52,6 @@ struct ethtool_rx_list { unsigned int count; }; -/* The maximum number of packets to be handled in one call of gfar_poll */ -#define GFAR_DEV_WEIGHT 64 - /* Length for FCB */ #define GMAC_FCB_LEN 8 -- cgit v1.2.3 From 288696565f2d2f04ad7eddd599aa113422b96613 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:20 -0700 Subject: eth: vxge: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-main.c | 2 +- drivers/net/ethernet/neterion/vxge/vxge-main.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index aa7c093f1f91..db4dfae8c01d 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -4351,7 +4351,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) } ll_config->tx_steering_type = TX_MULTIQ_STEERING; ll_config->intr_type = MSI_X; - ll_config->napi_weight = NEW_NAPI_WEIGHT; + ll_config->napi_weight = NAPI_POLL_WEIGHT; ll_config->rth_steering = RTH_STEERING; /* get the default configuration parameters */ diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 63f65193dd49..da9d2c191828 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -167,8 +167,6 @@ struct macInfo { struct vxge_config { int tx_pause_enable; int rx_pause_enable; - -#define NEW_NAPI_WEIGHT 64 int napi_weight; int intr_type; #define INTA 0 -- cgit v1.2.3 From 26450aa7ca4266444e89d065766f8d8b98e277ff Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:21 -0700 Subject: eth: spider: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Acked-by: Geoff Levand Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/toshiba/spider_net.c | 2 +- drivers/net/ethernet/toshiba/spider_net.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index f47b8358669d..c09cd961edbb 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2270,7 +2270,7 @@ spider_net_setup_netdev(struct spider_net_card *card) timer_setup(&card->aneg_timer, spider_net_link_phy, 0); netif_napi_add(netdev, &card->napi, - spider_net_poll, SPIDER_NET_NAPI_WEIGHT); + spider_net_poll, NAPI_POLL_WEIGHT); spider_net_setup_netdev_ops(netdev); diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h index 05b1a0736835..51948e2b3a34 100644 --- a/drivers/net/ethernet/toshiba/spider_net.h +++ b/drivers/net/ethernet/toshiba/spider_net.h @@ -44,7 +44,6 @@ extern char spider_net_driver_name[]; #define SPIDER_NET_RX_CSUM_DEFAULT 1 #define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ -#define SPIDER_NET_NAPI_WEIGHT 64 #define SPIDER_NET_FIRMWARE_SEQS 6 #define SPIDER_NET_FIRMWARE_SEQWORDS 1024 -- cgit v1.2.3 From e9c6ec6510301285647d0a1357ce49ab07ccd4ef Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:22 -0700 Subject: eth: velocity: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/via/via-velocity.c | 3 +-- drivers/net/ethernet/via/via-velocity.h | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index be2b992f24d9..ff0c102cb578 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2846,8 +2846,7 @@ static int velocity_probe(struct device *dev, int irq, netdev->netdev_ops = &velocity_netdev_ops; netdev->ethtool_ops = &velocity_ethtool_ops; - netif_napi_add(netdev, &vptr->napi, velocity_poll, - VELOCITY_NAPI_WEIGHT); + netif_napi_add(netdev, &vptr->napi, velocity_poll, NAPI_POLL_WEIGHT); netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX; diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index d3f960cc7c6e..c02a9654dce6 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -23,7 +23,6 @@ #define VELOCITY_VERSION "1.15" #define VELOCITY_IO_SIZE 256 -#define VELOCITY_NAPI_WEIGHT 64 #define PKT_BUF_SZ 1540 -- cgit v1.2.3 From 4bb0c7f09a1962330e9edf5dd6cfe44113aeee45 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 28 Apr 2022 14:23:23 -0700 Subject: qeth: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Acked-by: Alexandra Winter Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 -- drivers/s390/net/qeth_core_main.c | 2 +- drivers/s390/net/qeth_l2_main.c | 2 +- drivers/s390/net/qeth_l3_main.c | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index de25d7ac41da..1d195429753d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -801,8 +801,6 @@ struct qeth_priv { u32 brport_features; }; -#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT - struct qeth_card { enum qeth_card_states state; spinlock_t lock; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index d99c5b773e22..ae85179ca49a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -7100,7 +7100,7 @@ int qeth_open(struct net_device *dev) local_bh_disable(); qeth_for_each_output_queue(card, queue, i) { netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, - QETH_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); napi_enable(&queue->napi); napi_schedule(&queue->napi); } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 303461d70af3..92698f79a4e0 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1133,7 +1133,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)); } - netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); + netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT); return register_netdev(card->dev); } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d2f422a9a4f7..ea3b6b18aa6e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1910,7 +1910,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) netif_set_gso_max_size(card->dev, PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); - netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); + netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT); return register_netdev(card->dev); } -- cgit v1.2.3 From eee645eccfc4edc9fb7dcef1b381e406c5dd4d14 Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Fri, 29 Apr 2022 09:09:25 +0300 Subject: ath11k: Don't use GFP_KERNEL in atomic context We are seeing below warning: ... kernel: [ 5720.362941] BUG: sleeping function called from invalid context at include/linux/sched/mm.h:197 kernel: [ 5720.362943] in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 0, name: swapper/4 kernel: [ 5720.362947] CPU: 4 PID: 0 Comm: swapper/4 Tainted: G W 5.10.90 #18 4fa489e3e5c16043994f416310c2f60eff666320 kernel: [ 5720.362949] Hardware name: Google Nipperkin/Nipperkin, BIOS Google_Nipperkin.14316.0.0 10/30/2021 kernel: [ 5720.362950] Call Trace: kernel: [ 5720.362953] kernel: [ 5720.362959] dump_stack+0x9c/0xe7 kernel: [ 5720.362964] ___might_sleep+0x14a/0x160 kernel: [ 5720.362967] kmem_cache_alloc+0x46/0x226 kernel: [ 5720.362970] ? __alloc_skb+0x6c/0x19e kernel: [ 5720.362972] __alloc_skb+0x6c/0x19e kernel: [ 5720.362985] cfg80211_gtk_rekey_notify+0xa2/0x21d [cfg80211 2c8b5aee0416e7d010d70c332a47990fc843c1c5] kernel: [ 5720.362995] ath11k_wmi_gtk_offload_status_event+0x102/0x155 [ath11k 4c6bb5f7331c81199d56a7e37bdc10030f167838] kernel: [ 5720.363002] ath11k_wmi_tlv_op_rx+0x301/0x51b [ath11k 4c6bb5f7331c81199d56a7e37bdc10030f167838] kernel: [ 5720.363009] ath11k_htc_rx_completion_handler+0xee/0x3f5 [ath11k 4c6bb5f7331c81199d56a7e37bdc10030f167838] kernel: [ 5720.363017] ath11k_ce_per_engine_service+0x2aa/0x32c [ath11k 4c6bb5f7331c81199d56a7e37bdc10030f167838] kernel: [ 5720.363024] ath11k_pci_ce_tasklet+0x1a/0x30 [ath11k_pci 9acc399855ea172aa14a892c0bfdba0ce22d6f07] kernel: [ 5720.363028] tasklet_action_common+0x8d/0x9f kernel: [ 5720.363032] __do_softirq+0x163/0x29a kernel: [ 5720.363035] asm_call_irq_on_stack+0x12/0x20 kernel: [ 5720.363037] kernel: [ 5720.363041] do_softirq_own_stack+0x3c/0x48 kernel: [ 5720.363043] __irq_exit_rcu+0x9b/0x9d kernel: [ 5720.363046] common_interrupt+0xc9/0x14d kernel: [ 5720.363049] asm_common_interrupt+0x1e/0x40 kernel: [ 5720.363054] RIP: 0010:cpuidle_enter_state+0x1c5/0x2ac kernel: [ 5720.363056] Code: 84 f6 4c 8b 75 c0 74 1e 48 c7 45 c8 00 00 00 00 9c 8f 45 c8 0f ba 65 c8 09 0f 82 d1 00 00 00 31 ff e8 4a bb 6c ff fb 45 85 e4 <78> 47 44 89 e0 48 6b d0 68 49 8b 4c 16 48 48 2b 5d b8 49 89 5d 18 kernel: [ 5720.363058] RSP: 0018:ffffa7e640157e78 EFLAGS: 00000206 kernel: [ 5720.363060] RAX: ffff9807ddf29b40 RBX: 00000533e033584c RCX: 00000533e033584c kernel: [ 5720.363062] RDX: 0000000000000004 RSI: 0000000000000000 RDI: 0000000000000000 kernel: [ 5720.363063] RBP: ffffa7e640157ec0 R08: 0000000000000002 R09: 00000533e171bb7a kernel: [ 5720.363064] R10: 0000000000000900 R11: fffffffffffffffe R12: 0000000000000003 kernel: [ 5720.363065] R13: ffff9804c2ef6000 R14: ffffffffbe9a7bd0 R15: 0000000000000003 kernel: [ 5720.363069] ? cpuidle_enter_state+0x19a/0x2ac kernel: [ 5720.363072] cpuidle_enter+0x2e/0x3d kernel: [ 5720.363074] do_idle+0x163/0x1ee kernel: [ 5720.363076] cpu_startup_entry+0x1d/0x1f kernel: [ 5720.363078] secondary_startup_64_no_verify+0xb1/0xbb ... This is because GFP_KERNEL is used by ath11k_wmi_gtk_offload_status_event while in atomic context. Fix it by using GFP_ATOMIC instead. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Fixes: a16d9b50cfba ("ath11k: support GTK rekey offload") Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220427120033.1046759-1-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 3c0ac1e29479..598ec061694f 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -7844,7 +7844,7 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, replay_ctr_be = cpu_to_be64(replay_ctr); ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, - (void *)&replay_ctr_be, GFP_KERNEL); + (void *)&replay_ctr_be, GFP_ATOMIC); kfree(tb); } -- cgit v1.2.3 From 31d00ca4ce0e1abf5342854606bbe7d20e38c3f8 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Wed, 27 Apr 2022 23:44:05 +0200 Subject: net: phy: micrel: move the PHY timestamping check Both lan8814_ptp_init() and lan8814_ptp_probe_once() are only used if PTP and PHY timestamping is enabed. Up until now the probe function just returns early, if they are not needed. But we need the phy_package_init_once() functionality for the coma mode GPIO setup. Move the check into the functions itself. Signed-off-by: Michael Walle Signed-off-by: Jakub Kicinski --- drivers/net/phy/micrel.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 96840695debd..b981c5eaac33 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2729,6 +2729,10 @@ static void lan8814_ptp_init(struct phy_device *phydev) struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv; u32 temp; + if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || + !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) + return; + lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_); temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD); @@ -2767,6 +2771,10 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) { struct lan8814_shared_priv *shared = phydev->shared->priv; + if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || + !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) + return 0; + /* Initialise shared lock for clock*/ mutex_init(&shared->shared_lock); @@ -2843,10 +2851,6 @@ static int lan8814_probe(struct phy_device *phydev) phydev->priv = priv; - if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || - !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) - return 0; - /* Strap-in value for PHY address, below register read gives starting * phy address value */ -- cgit v1.2.3 From 738871b09250ee9043c0e05101fcc254059f1492 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Wed, 27 Apr 2022 23:44:06 +0200 Subject: net: phy: micrel: add coma mode GPIO The LAN8814 has a coma mode pin which puts the PHY into isolate and power-dowm mode. Unfortunately, the mode cannot be disabled by a register. Usually, the input pin has a pull-up and connected to a GPIO which can then be used to disable the mode. Try to get the GPIO and deassert it. Signed-off-by: Michael Walle Signed-off-by: Jakub Kicinski --- drivers/net/phy/micrel.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index b981c5eaac33..685a0ab5453c 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -32,6 +32,7 @@ #include #include #include +#include /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@ -2837,6 +2838,21 @@ static int lan8814_config_init(struct phy_device *phydev) return 0; } +static int lan8814_release_coma_mode(struct phy_device *phydev) +{ + struct gpio_desc *gpiod; + + gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode", + GPIOD_OUT_HIGH_OPEN_DRAIN); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + + gpiod_set_consumer_name(gpiod, "LAN8814 coma mode"); + gpiod_set_value_cansleep(gpiod, 0); + + return 0; +} + static int lan8814_probe(struct phy_device *phydev) { struct kszphy_priv *priv; @@ -2859,6 +2875,10 @@ static int lan8814_probe(struct phy_device *phydev) addr, sizeof(struct lan8814_shared_priv)); if (phy_package_init_once(phydev)) { + err = lan8814_release_coma_mode(phydev); + if (err) + return err; + err = lan8814_ptp_probe_once(phydev); if (err) return err; -- cgit v1.2.3 From 78a9b3c47befde80a4896a8c5172990b60c58af4 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Thu, 28 Apr 2022 12:39:33 +0100 Subject: sfc: add EF100 VF support via a write to sriov_numvfs This patch extends the EF100 PF driver by adding .sriov_configure() which would allow users to enable and disable virtual functions using the sriov sysfs. Signed-off-by: Pieter Jansen van Vuuren Signed-off-by: Edward Cree Link: https://lore.kernel.org/r/75e74d9e-14ce-0524-9668-5ab735a7cf62@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/Makefile | 2 +- drivers/net/ethernet/sfc/ef100.c | 27 +++++++++++++++- drivers/net/ethernet/sfc/ef100_nic.c | 6 +++- drivers/net/ethernet/sfc/ef100_sriov.c | 56 ++++++++++++++++++++++++++++++++++ drivers/net/ethernet/sfc/ef100_sriov.h | 14 +++++++++ 5 files changed, 102 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/sfc/ef100_sriov.c create mode 100644 drivers/net/ethernet/sfc/ef100_sriov.h (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 8bd01c429f91..5ba98769b52b 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -8,7 +8,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ ef100.o ef100_nic.o ef100_netdev.o \ ef100_ethtool.o ef100_rx.o ef100_tx.o sfc-$(CONFIG_SFC_MTD) += mtd.o -sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o +sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o ef100_sriov.o obj-$(CONFIG_SFC) += sfc.o diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c index ffdb36715a49..173f0ecebc70 100644 --- a/drivers/net/ethernet/sfc/ef100.c +++ b/drivers/net/ethernet/sfc/ef100.c @@ -2,7 +2,7 @@ /**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2005-2018 Solarflare Communications Inc. - * Copyright 2019-2020 Xilinx Inc. + * Copyright 2019-2022 Xilinx Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -17,6 +17,7 @@ #include "io.h" #include "ef100_nic.h" #include "ef100_netdev.h" +#include "ef100_sriov.h" #include "ef100_regs.h" #include "ef100.h" @@ -436,6 +437,10 @@ static void ef100_pci_remove(struct pci_dev *pci_dev) * blocks, so we have to do it before PCI removal. */ unregister_netdevice_notifier(&efx->netdev_notifier); +#if defined(CONFIG_SFC_SRIOV) + if (!efx->type->is_vf) + efx_ef100_pci_sriov_disable(efx); +#endif ef100_remove(efx); efx_fini_io(efx); netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); @@ -524,6 +529,23 @@ fail: return rc; } +#ifdef CONFIG_SFC_SRIOV +static int ef100_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct efx_nic *efx = pci_get_drvdata(dev); + int rc; + + if (efx->type->sriov_configure) { + rc = efx->type->sriov_configure(efx, num_vfs); + if (rc) + return rc; + else + return num_vfs; + } + return -ENOENT; +} +#endif + /* PCI device ID table */ static const struct pci_device_id ef100_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */ @@ -538,6 +560,9 @@ struct pci_driver ef100_pci_driver = { .id_table = ef100_pci_table, .probe = ef100_pci_probe, .remove = ef100_pci_remove, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = ef100_pci_sriov_configure, +#endif .err_handler = &efx_err_handlers, }; diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index a07cbf45a326..b04911bc8c57 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -2,7 +2,7 @@ /**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2018 Solarflare Communications Inc. - * Copyright 2019-2020 Xilinx Inc. + * Copyright 2019-2022 Xilinx Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -22,6 +22,7 @@ #include "mcdi_filters.h" #include "ef100_rx.h" #include "ef100_tx.h" +#include "ef100_sriov.h" #include "ef100_netdev.h" #include "rx_common.h" @@ -787,6 +788,9 @@ const struct efx_nic_type ef100_pf_nic_type = { .update_stats = ef100_update_stats, .pull_stats = efx_mcdi_mac_pull_stats, .stop_stats = efx_mcdi_mac_stop_stats, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_ef100_sriov_configure, +#endif /* Per-type bar/size configuration not used on ef100. Location of * registers is defined by extended capabilities. diff --git a/drivers/net/ethernet/sfc/ef100_sriov.c b/drivers/net/ethernet/sfc/ef100_sriov.c new file mode 100644 index 000000000000..664578176bfe --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_sriov.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "ef100_sriov.h" +#include "ef100_nic.h" + +static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs) +{ + struct pci_dev *dev = efx->pci_dev; + int rc; + + efx->vf_count = num_vfs; + rc = pci_enable_sriov(dev, num_vfs); + if (rc) + goto fail; + + return 0; + +fail: + netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n"); + efx->vf_count = 0; + return rc; +} + +int efx_ef100_pci_sriov_disable(struct efx_nic *efx) +{ + struct pci_dev *dev = efx->pci_dev; + unsigned int vfs_assigned; + + vfs_assigned = pci_vfs_assigned(dev); + if (vfs_assigned) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); + return -EBUSY; + } + + pci_disable_sriov(dev); + + return 0; +} + +int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs) +{ + if (num_vfs == 0) + return efx_ef100_pci_sriov_disable(efx); + else + return efx_ef100_pci_sriov_enable(efx, num_vfs); +} diff --git a/drivers/net/ethernet/sfc/ef100_sriov.h b/drivers/net/ethernet/sfc/ef100_sriov.h new file mode 100644 index 000000000000..c48fccd46c57 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_sriov.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2020-2022 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "net_driver.h" + +int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs); +int efx_ef100_pci_sriov_disable(struct efx_nic *efx); -- cgit v1.2.3 From 7195464cf8f219c4d5e493f07efdd532926c2434 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Fri, 29 Apr 2022 09:51:24 +0200 Subject: nfp: flower: utilize the tuple iifidx in offloading ct flows The device info from which conntrack originates is stored in metadata field of the ct flow to offload now, driver can utilize it to reduce the number of offloaded flows. v2: Drop inline keyword from get_netdev_from_rule() signature. The compiler can decide. Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Simon Horman Link: https://lore.kernel.org/r/20220429075124.128589-1-simon.horman@corigine.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/netronome/nfp/flower/conntrack.c | 25 +++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index bfd7d1c35076..1edcd9f86c9c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -83,6 +83,10 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, entry2->rule->match.dissector->used_keys; bool out; + if (entry1->netdev && entry2->netdev && + entry1->netdev != entry2->netdev) + return -EINVAL; + /* check the overlapped fields one by one, the unmasked part * should not conflict with each other. */ @@ -914,7 +918,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt, /* Check that the two tc flows are also compatible with * the nft entry. No need to check the pre_ct and post_ct * entries as that was already done during pre_merge. - * The nft entry does not have a netdev or chain populated, so + * The nft entry does not have a chain populated, so * skip this check. */ err = nfp_ct_merge_check(pre_ct_entry, nft_entry); @@ -999,8 +1003,6 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt, pre_ct_entry = ct_entry2; } - if (post_ct_entry->netdev != pre_ct_entry->netdev) - return -EINVAL; /* Checks that the chain_index of the filter matches the * chain_index of the GOTO action. */ @@ -1114,6 +1116,20 @@ err_tc_merge_tb_init: return ERR_PTR(err); } +static struct net_device *get_netdev_from_rule(struct flow_rule *rule) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { + struct flow_match_meta match; + + flow_rule_match_meta(rule, &match); + if (match.key->ingress_ifindex & match.mask->ingress_ifindex) + return __dev_get_by_index(&init_net, + match.key->ingress_ifindex); + } + + return NULL; +} + static struct nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt, struct net_device *netdev, @@ -1154,6 +1170,9 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt, entry->rule->match.dissector = &nft_match->dissector; entry->rule->match.mask = &nft_match->mask; entry->rule->match.key = &nft_match->key; + + if (!netdev) + netdev = get_netdev_from_rule(entry->rule); } else { entry->rule->match.dissector = flow->rule->match.dissector; entry->rule->match.mask = flow->rule->match.mask; -- cgit v1.2.3 From 5b06ef86826a4636c4475b024c0fdc3c9b088843 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Thu, 28 Apr 2022 13:40:49 +0200 Subject: net: lan966x: remove PHY reset support The PHY subsystem as well as the MIIM mdio driver (in case of the integrated PHYs) will take care of the resets. A separate reset driver isn't needed. There is no in-tree user of this feature. Remove the support. Signed-off-by: Michael Walle Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan966x/lan966x_main.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 6579c7062aa5..138718f33dbd 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -937,7 +937,7 @@ static int lan966x_ram_init(struct lan966x *lan966x) static int lan966x_reset_switch(struct lan966x *lan966x) { - struct reset_control *switch_reset, *phy_reset; + struct reset_control *switch_reset; int val = 0; int ret; @@ -946,13 +946,7 @@ static int lan966x_reset_switch(struct lan966x *lan966x) return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset), "Could not obtain switch reset"); - phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy"); - if (IS_ERR(phy_reset)) - return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset), - "Could not obtain phy reset\n"); - reset_control_reset(switch_reset); - reset_control_reset(phy_reset); lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG); lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT); -- cgit v1.2.3 From 2f187bfa6f35cc8bf1626bfc09449a09a6063ea1 Mon Sep 17 00:00:00 2001 From: Colin Foster Date: Fri, 29 Apr 2022 14:30:36 -0700 Subject: net: ethernet: ocelot: remove the need for num_stats initializer There is a desire to share the oclot_stats_layout struct outside of the current vsc7514 driver. In order to do so, the length of the array needs to be known at compile time, and defined in the struct ocelot and struct felix_info. Since the array is defined in a .c file and would be declared in the header file via: extern struct ocelot_stat_layout[]; the size of the array will not be known at compile time to outside modules. To fix this, remove the need for defining the number of stats at compile time and allow this number to be determined at initialization. Signed-off-by: Colin Foster Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 1 - drivers/net/dsa/ocelot/felix.h | 1 - drivers/net/dsa/ocelot/felix_vsc9959.c | 2 +- drivers/net/dsa/ocelot/seville_vsc9953.c | 2 +- drivers/net/ethernet/mscc/ocelot.c | 5 +++++ drivers/net/ethernet/mscc/ocelot_vsc7514.c | 2 +- include/soc/mscc/ocelot.h | 10 ++++++++++ 7 files changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 9e28219b223d..33cb124ca912 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1197,7 +1197,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) ocelot->map = felix->info->map; ocelot->stats_layout = felix->info->stats_layout; - ocelot->num_stats = felix->info->num_stats; ocelot->num_mact_rows = felix->info->num_mact_rows; ocelot->vcap = felix->info->vcap; ocelot->vcap_pol.base = felix->info->vcap_pol_base; diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index f083b06fdfe9..39faf1027965 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -24,7 +24,6 @@ struct felix_info { const u32 *port_modes; int num_mact_rows; const struct ocelot_stat_layout *stats_layout; - unsigned int num_stats; int num_ports; int num_tx_queues; struct vcap_props *vcap; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 52a8566071ed..081871824eaf 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -638,6 +638,7 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = { { .offset = 0x10F, .name = "drop_green_prio_5", }, { .offset = 0x110, .name = "drop_green_prio_6", }, { .offset = 0x111, .name = "drop_green_prio_7", }, + OCELOT_STAT_END }; static const struct vcap_field vsc9959_vcap_es0_keys[] = { @@ -2216,7 +2217,6 @@ static const struct felix_info felix_info_vsc9959 = { .map = vsc9959_regmap, .ops = &vsc9959_ops, .stats_layout = vsc9959_stats_layout, - .num_stats = ARRAY_SIZE(vsc9959_stats_layout), .vcap = vsc9959_vcap_props, .vcap_pol_base = VSC9959_VCAP_POLICER_BASE, .vcap_pol_max = VSC9959_VCAP_POLICER_MAX, diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 68ef8f111bbe..48fd43a93364 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -636,6 +636,7 @@ static const struct ocelot_stat_layout vsc9953_stats_layout[] = { { .offset = 0x8F, .name = "drop_green_prio_5", }, { .offset = 0x90, .name = "drop_green_prio_6", }, { .offset = 0x91, .name = "drop_green_prio_7", }, + OCELOT_STAT_END }; static const struct vcap_field vsc9953_vcap_es0_keys[] = { @@ -1086,7 +1087,6 @@ static const struct felix_info seville_info_vsc9953 = { .map = vsc9953_regmap, .ops = &vsc9953_ops, .stats_layout = vsc9953_stats_layout, - .num_stats = ARRAY_SIZE(vsc9953_stats_layout), .vcap = vsc9953_vcap_props, .vcap_pol_base = VSC9953_VCAP_POLICER_BASE, .vcap_pol_max = VSC9953_VCAP_POLICER_MAX, diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index ca71b62a44dc..0825a92599a5 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -3228,6 +3228,7 @@ static void ocelot_detect_features(struct ocelot *ocelot) int ocelot_init(struct ocelot *ocelot) { + const struct ocelot_stat_layout *stat; char queue_name[32]; int i, ret; u32 port; @@ -3240,6 +3241,10 @@ int ocelot_init(struct ocelot *ocelot) } } + ocelot->num_stats = 0; + for_each_stat(ocelot, stat) + ocelot->num_stats++; + ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * ocelot->num_stats, sizeof(u64), GFP_KERNEL); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 4f4a495a60ad..961f803aca19 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -190,6 +190,7 @@ static const struct ocelot_stat_layout ocelot_stats_layout[] = { { .name = "drop_green_prio_5", .offset = 0x8F, }, { .name = "drop_green_prio_6", .offset = 0x90, }, { .name = "drop_green_prio_7", .offset = 0x91, }, + OCELOT_STAT_END }; static void ocelot_pll5_init(struct ocelot *ocelot) @@ -227,7 +228,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) ocelot->map = ocelot_regmap; ocelot->stats_layout = ocelot_stats_layout; - ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); ocelot->num_mact_rows = 1024; ocelot->ops = ops; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 9b4e6c78d0f4..5c4f57cfa785 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -105,6 +105,13 @@ #define REG_RESERVED_ADDR 0xffffffff #define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR) +#define OCELOT_STAT_FLAG_END BIT(0) + +#define for_each_stat(ocelot, stat) \ + for ((stat) = ocelot->stats_layout; \ + !((stat)->flags & OCELOT_STAT_FLAG_END); \ + (stat)++) + enum ocelot_target { ANA = 1, QS, @@ -535,9 +542,12 @@ enum ocelot_ptp_pins { struct ocelot_stat_layout { u32 offset; + u32 flags; char name[ETH_GSTRING_LEN]; }; +#define OCELOT_STAT_END { .flags = OCELOT_STAT_FLAG_END } + struct ocelot_stats_region { struct list_head node; u32 offset; -- cgit v1.2.3 From 059d9f413efe3ed954e2df37511a4c0e514662f7 Mon Sep 17 00:00:00 2001 From: Prabhakar Kushwaha Date: Sat, 30 Apr 2022 04:05:13 +0300 Subject: qede: Reduce verbosity of ptp tx timestamp Reduce verbosity of ptp tx timestamp error to reduce excessive log messages. Signed-off-by: Manish Chopra Signed-off-by: Alok Prasad Signed-off-by: Ariel Elior Signed-off-by: Prabhakar Kushwaha Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ptp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 39176e765767..c9c8225f04d6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -496,19 +496,19 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags)) { - DP_ERR(edev, "Timestamping in progress\n"); + DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n"); edev->ptp_skip_txts++; return; } if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) { - DP_ERR(edev, - "Tx timestamping was not enabled, this packet will not be timestamped\n"); + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Tx timestamping was not enabled, this pkt will not be timestamped\n"); clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); edev->ptp_skip_txts++; } else if (unlikely(ptp->tx_skb)) { - DP_ERR(edev, - "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Device supports a single outstanding pkt to ts, It will not be ts\n"); clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); edev->ptp_skip_txts++; } else { -- cgit v1.2.3 From ce7deda0d5cdbd84e31aa17d95a5896966f2ed83 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Fri, 29 Apr 2022 09:01:04 +0000 Subject: net/funeth: simplify the return expression of fun_dl_info_get() Simplify the return expression. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: David S. Miller --- drivers/net/ethernet/fungible/funeth/funeth_devlink.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c index a849b3c6b01f..d50c222948b4 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c @@ -6,13 +6,7 @@ static int fun_dl_info_get(struct devlink *dl, struct devlink_info_req *req, struct netlink_ext_ack *extack) { - int err; - - err = devlink_info_driver_name_put(req, KBUILD_MODNAME); - if (err) - return err; - - return 0; + return devlink_info_driver_name_put(req, KBUILD_MODNAME); } static const struct devlink_ops fun_dl_ops = { -- cgit v1.2.3 From 36ffca1afea9b429d3e49aa0b6a68ecd93f3be11 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Apr 2022 10:43:30 -0700 Subject: eth: remove remaining copies of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. This patch covers three more drivers which I missed in commit 5f012b40ef63 ("eth: remove copies of the NAPI_POLL_WEIGHT define"). Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 4 +--- drivers/net/ethernet/brocade/bna/bnad.c | 3 +-- drivers/net/ethernet/nvidia/forcedeth.c | 6 +++--- 3 files changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 07444aead3fd..6a356a6cee15 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -31,8 +31,6 @@ MODULE_LICENSE("GPL"); #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) -#define ENA_NAPI_BUDGET 64 - #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) @@ -2270,7 +2268,7 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter, netif_napi_add(adapter->netdev, &napi->napi, ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll, - ENA_NAPI_BUDGET); + NAPI_POLL_WEIGHT); if (!ENA_IS_XDP_INDEX(adapter, i)) { napi->rx_ring = &adapter->rx_ring[i]; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index f1d2c4cd5da2..f6fe08df568b 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1881,7 +1881,6 @@ poll_exit: return rcvd; } -#define BNAD_NAPI_POLL_QUOTA 64 static void bnad_napi_add(struct bnad *bnad, u32 rx_id) { @@ -1892,7 +1891,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id) for (i = 0; i < bnad->num_rxp_per_rx; i++) { rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; netif_napi_add(bnad->netdev, &rx_ctrl->napi, - bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); + bnad_napi_poll_rx, NAPI_POLL_WEIGHT); } } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 660013f716d4..5116badaf091 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -56,8 +56,8 @@ #include -#define TX_WORK_PER_LOOP 64 -#define RX_WORK_PER_LOOP 64 +#define TX_WORK_PER_LOOP NAPI_POLL_WEIGHT +#define RX_WORK_PER_LOOP NAPI_POLL_WEIGHT /* * Hardware access: @@ -5876,7 +5876,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) else dev->netdev_ops = &nv_netdev_ops_optimized; - netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); + netif_napi_add(dev, &np->napi, nv_napi_poll, NAPI_POLL_WEIGHT); dev->ethtool_ops = &ops; dev->watchdog_timeo = NV_WATCHDOG_TIMEO; -- cgit v1.2.3 From 0ed99ecc95b9c1d3c46d3bc34459088e82caef32 Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Wed, 27 Apr 2022 13:39:28 -0600 Subject: net: phy: marvell: update abilities and advertising when switching to SGMII With some SFP modules, such as Finisar FCLF8522P2BTL, the PHY hardware strapping defaults to 1000BaseX mode, but the kernel prefers to set them for SGMII mode. When this happens and the PHY is soft reset, the BMSR status register is updated, but this happens after the kernel has already read the PHY abilities during probing. This results in support not being detected for, and the PHY not advertising support for, 10 and 100 Mbps modes, preventing the link from working with a non-gigabit link partner. When the PHY is being configured for SGMII mode, call genphy_read_abilities again in order to re-read the capabilities, and update the advertising field accordingly. Signed-off-by: Robert Hancock Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2702faf7b0f6..47e83c1e9051 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -961,7 +961,21 @@ static int m88e1111_config_init(struct phy_device *phydev) if (err < 0) return err; - return genphy_soft_reset(phydev); + err = genphy_soft_reset(phydev); + if (err < 0) + return err; + + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + /* If the HWCFG_MODE was changed from another mode (such as + * 1000BaseX) to SGMII, the state of the support bits may have + * also changed now that the PHY has been reset. + * Update the PHY abilities accordingly. + */ + err = genphy_read_abilities(phydev); + linkmode_or(phydev->advertising, phydev->advertising, + phydev->supported); + } + return err; } static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data) -- cgit v1.2.3 From 961c6136359eef38a8c023d02028fdcd123f02a6 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Fri, 29 Apr 2022 08:17:35 +0300 Subject: net: enable memcg accounting for veth queues veth netdevice defines own rx queues and allocates array containing up to 4095 ~750-bytes-long 'struct veth_rq' elements. Such allocation is quite huge and should be accounted to memcg. Signed-off-by: Vasily Averin Signed-off-by: David S. Miller --- drivers/net/veth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index eb0121a64d6d..3592014e50cc 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1375,7 +1375,7 @@ static int veth_alloc_queues(struct net_device *dev) struct veth_priv *priv = netdev_priv(dev); int i; - priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL); + priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT); if (!priv->rq) return -ENOMEM; -- cgit v1.2.3 From 72a1a2edeb1cfb9d4400e19ff80ba5b0f93e9bd6 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 29 Apr 2022 15:54:59 +0800 Subject: plfxlc: Remove unused include Eliminate the follow versioncheck warning: ./drivers/net/wireless/purelifi/plfxlc/usb.c: 20 linux/version.h not needed. Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429075459.72029-1-jiapeng.chong@linux.alibaba.com --- drivers/net/wireless/purelifi/plfxlc/usb.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c index 6f338b1572a1..d0e98b2f1365 100644 --- a/drivers/net/wireless/purelifi/plfxlc/usb.c +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include "mac.h" -- cgit v1.2.3 From 411a1476ea417c7cce9792b31dbc83cedbad7a41 Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Fri, 29 Apr 2022 16:32:14 +0200 Subject: net: dsa: mv88e6xxx: Cosmetic change spaces to tabs in dsa_switch_ops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All but 5 methods in dsa_swith_ops use tabs for indentation. Change the 5 methods that break this rule. Signed-off-by: Marek Behún Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index b8f956eece9c..53fd12e7a21c 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -6856,11 +6856,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, .vlan_msti_set = mv88e6xxx_vlan_msti_set, - .port_fdb_add = mv88e6xxx_port_fdb_add, - .port_fdb_del = mv88e6xxx_port_fdb_del, - .port_fdb_dump = mv88e6xxx_port_fdb_dump, - .port_mdb_add = mv88e6xxx_port_mdb_add, - .port_mdb_del = mv88e6xxx_port_mdb_del, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, + .port_mdb_add = mv88e6xxx_port_mdb_add, + .port_mdb_del = mv88e6xxx_port_mdb_del, .port_mirror_add = mv88e6xxx_port_mirror_add, .port_mirror_del = mv88e6xxx_port_mirror_del, .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join, -- cgit v1.2.3 From 1d6d131d504922327dbef7aded01430a251d765c Mon Sep 17 00:00:00 2001 From: Chih-Kang Chang Date: Thu, 28 Apr 2022 10:05:19 +0800 Subject: rtw88: add HT MPDU density value for each chip Each chip have best ampdu density value, the correct setting can improve throughput performance. Signed-off-by: Chih-Kang Chang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220428020521.8015-1-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/main.c | 3 ++- drivers/net/wireless/realtek/rtw88/main.h | 1 + drivers/net/wireless/realtek/rtw88/rtw8723d.c | 1 + drivers/net/wireless/realtek/rtw88/rtw8821c.c | 1 + drivers/net/wireless/realtek/rtw88/rtw8822b.c | 1 + drivers/net/wireless/realtek/rtw88/rtw8822c.c | 1 + 6 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 7431988b5985..14289f83feb5 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1461,6 +1461,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_ht_cap *ht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; ht_cap->ht_supported = true; ht_cap->cap = 0; @@ -1478,7 +1479,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_SGI_40; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; - ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + ht_cap->ampdu_density = chip->ampdu_density; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (efuse->hw_cap.nss > 1) { ht_cap->mcs.rx_mask[0] = 0xFF; diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 2743074a4256..de149a3b3ba1 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1179,6 +1179,7 @@ struct rtw_chip_info { bool rx_ldpc; bool tx_stbc; u8 max_power_index; + u8 ampdu_density; u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; const struct rtw_fwcd_segs *fwcd_segs; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index ad2b323a0423..93cce44df531 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -2747,6 +2747,7 @@ struct rtw_chip_info rtw8723d_hw_spec = { .rx_ldpc = false, .pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl, .iqk_threshold = 8, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, .coex_para_ver = 0x2007022f, .bt_desired_ver = 0x2f, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index ec38a7c84951..ffee39ea5df6 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -1923,6 +1923,7 @@ struct rtw_chip_info rtw8821c_hw_spec = { .iqk_threshold = 8, .bfer_su_max_num = 2, .bfer_mu_max_num = 1, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2, .coex_para_ver = 0x19092746, .bt_desired_ver = 0x46, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index eee7bf035403..dccd722b8e62 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2548,6 +2548,7 @@ struct rtw_chip_info rtw8822b_hw_spec = { .edcca_th = rtw8822b_edcca_th, .l2h_th_ini_cs = 10 + EDCCA_IGI_BASE, .l2h_th_ini_ad = -14 + EDCCA_IGI_BASE, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2, .coex_para_ver = 0x20070206, .bt_desired_ver = 0x6, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index cd74607a61a2..c043b5c520b9 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -5368,6 +5368,7 @@ struct rtw_chip_info rtw8822c_hw_spec = { .edcca_th = rtw8822c_edcca_th, .l2h_th_ini_cs = 60, .l2h_th_ini_ad = 45, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2, #ifdef CONFIG_PM .wow_fw_name = "rtw88/rtw8822c_wow_fw.bin", -- cgit v1.2.3 From 02ee806843bd8ee868a0a2ff4364188807ce321d Mon Sep 17 00:00:00 2001 From: Chih-Kang Chang Date: Thu, 28 Apr 2022 10:05:20 +0800 Subject: rtw88: fix not disabling beacon filter after disconnection Correct the judgment to let beacon filter disable after disconnection. Signed-off-by: Chih-Kang Chang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220428020521.8015-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/fw.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 3545d51c6951..28cc8d680be3 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -649,7 +649,7 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect, s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; - if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si) + if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER)) return; if (!connect) { @@ -659,6 +659,10 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect, return; } + + if (!si) + return; + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0); ether_addr_copy(&h2c_pkt[1], bss_conf->bssid); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); -- cgit v1.2.3 From 5b3fd8fd7ceb135d08edf52adf6e833ee92a351e Mon Sep 17 00:00:00 2001 From: Chih-Kang Chang Date: Thu, 28 Apr 2022 10:05:21 +0800 Subject: rtw88: fix hw scan may cause disconnect issue After scan aborts we still receive some hw scan c2h packets, and processing these c2h commands will change current channel. If device already connect to other AP, driver will set wrong op channel due to current channel changed. The disconnection happens when hw scan back to wrong op channel that device can't receive beacon from AP. To fix this issue, we ignore the late c2h if we are not scanning, and set current channel back to op channel after scan to align the FW behavior. Signed-off-by: Chih-Kang Chang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220428020521.8015-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/fw.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 28cc8d680be3..e344e058f943 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -2056,7 +2056,10 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct cfg80211_scan_info info = { .aborted = aborted, }; + struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; + struct rtw_hal *hal = &rtwdev->hal; struct rtw_vif *rtwvif; + u8 chan = scan_info->op_chan; if (!vif) return; @@ -2066,10 +2069,14 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, rtw_core_scan_complete(rtwdev, vif, true); + rtwvif = (struct rtw_vif *)vif->drv_priv; + if (rtwvif->net_type == RTW_NET_MGD_LINKED) { + hal->current_channel = chan; + hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; + } ieee80211_wake_queues(rtwdev->hw); ieee80211_scan_completed(rtwdev->hw, &info); - rtwvif = (struct rtw_vif *)vif->drv_priv; rtwvif->scan_req = NULL; rtwvif->scan_ies = NULL; rtwdev->scan_info.scanning_vif = NULL; @@ -2178,6 +2185,9 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb) enum rtw_scan_notify_id id; u8 chan, status; + if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + return; + c2h = get_c2h_from_skb(skb); chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload); id = GET_CHAN_SWITCH_ID(c2h->payload); -- cgit v1.2.3 From 3254e0b9eb5649ffaa48717ebc9c593adc4ee6a9 Mon Sep 17 00:00:00 2001 From: Alexandru Tachici Date: Fri, 29 Apr 2022 18:34:31 +0300 Subject: ethtool: Add 10base-T1L link mode entry Add entry for the 10base-T1L full duplex mode. Reviewed-by: Andrew Lunn Reviewed-by: Oleksij Rempel Signed-off-by: Alexandru Tachici Signed-off-by: David S. Miller --- drivers/net/phy/phy-core.c | 3 ++- drivers/net/phy/phy_device.c | 3 ++- drivers/net/phy/phylink.c | 4 +++- include/linux/phy.h | 2 +- include/uapi/linux/ethtool.h | 1 + net/ethtool/common.c | 3 +++ 6 files changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 2001f3329133..1f2531a1a876 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -13,7 +13,7 @@ */ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 92, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 93, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -176,6 +176,7 @@ static const struct phy_setting settings[] = { /* 10M */ PHY_SETTING( 10, FULL, 10baseT_Full ), PHY_SETTING( 10, HALF, 10baseT_Half ), + PHY_SETTING( 10, FULL, 10baseT1L_Full ), }; #undef PHY_SETTING diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index f867042b2eb4..1369daeded14 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -90,8 +90,9 @@ const int phy_10_100_features_array[4] = { }; EXPORT_SYMBOL_GPL(phy_10_100_features_array); -const int phy_basic_t1_features_array[2] = { +const int phy_basic_t1_features_array[3] = { ETHTOOL_LINK_MODE_TP_BIT, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, ETHTOOL_LINK_MODE_100baseT1_Full_BIT, }; EXPORT_SYMBOL_GPL(phy_basic_t1_features_array); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 33c285252584..d707604d1d5a 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -168,8 +168,10 @@ static void phylink_caps_to_linkmodes(unsigned long *linkmodes, if (caps & MAC_10HD) __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes); - if (caps & MAC_10FD) + if (caps & MAC_10FD) { __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes); + __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes); + } if (caps & MAC_100HD) { __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes); diff --git a/include/linux/phy.h b/include/linux/phy.h index 36ca2b5c2253..b12af9e2f389 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -65,7 +65,7 @@ extern const int phy_basic_ports_array[3]; extern const int phy_fibre_port_array[1]; extern const int phy_all_ports_features_array[7]; extern const int phy_10_100_features_array[4]; -extern const int phy_basic_t1_features_array[2]; +extern const int phy_basic_t1_features_array[3]; extern const int phy_gbit_features_array[2]; extern const int phy_10gbit_features_array[1]; diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 7bc4b8def12c..e0f0ee9bc89e 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1691,6 +1691,7 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + ETHTOOL_LINK_MODE_10baseT1L_Full_BIT = 92, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS }; diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 0c5210015911..566adf85e658 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -201,6 +201,7 @@ const char link_mode_names[][ETH_GSTRING_LEN] = { __DEFINE_LINK_MODE_NAME(400000, CR4, Full), __DEFINE_LINK_MODE_NAME(100, FX, Half), __DEFINE_LINK_MODE_NAME(100, FX, Full), + __DEFINE_LINK_MODE_NAME(10, T1L, Full), }; static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -236,6 +237,7 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); #define __LINK_MODE_LANES_T1 1 #define __LINK_MODE_LANES_X 1 #define __LINK_MODE_LANES_FX 1 +#define __LINK_MODE_LANES_T1L 1 #define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \ [ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \ @@ -349,6 +351,7 @@ const struct link_mode_info link_mode_params[] = { __DEFINE_LINK_MODE_PARAMS(400000, CR4, Full), __DEFINE_LINK_MODE_PARAMS(100, FX, Half), __DEFINE_LINK_MODE_PARAMS(100, FX, Full), + __DEFINE_LINK_MODE_PARAMS(10, T1L, Full), }; static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS); -- cgit v1.2.3 From 3da8ffd8545f62fec85a48a3c637b2f427974f11 Mon Sep 17 00:00:00 2001 From: Alexandru Tachici Date: Fri, 29 Apr 2022 18:34:34 +0300 Subject: net: phy: Add 10BASE-T1L support in phy-c45 This patch is needed because the BASE-T1 uses different registers for status, control and advertisement to those already employed in the existing phy-c45 functions. Where required, genphy_c45 functions will now check whether the device supports BASE-T1 and use the specific registers instead: 45.2.7.19 BASE-T1 AN control register, 45.2.7.20 BASE-T1 AN status, 45.2.7.21 BASE-T1 AN advertisement register, 45.2.7.22 BASE-T1 AN LP Base Page ability register, 45.2.1.185 BASE-T1 PMA/PMD control register. Tested-by: Oleksij Rempel Signed-off-by: Alexandru Tachici Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 257 ++++++++++++++++++++++++++++++++++++++++++- drivers/net/phy/phy_device.c | 1 + include/linux/mdio.h | 70 ++++++++++++ include/linux/phy.h | 3 + include/uapi/linux/mdio.h | 10 ++ 5 files changed, 336 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index db709d30bf84..eefdd67d5556 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -8,6 +8,25 @@ #include #include +/** + * genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities + * @phydev: target phy_device struct + */ +static bool genphy_c45_baset1_able(struct phy_device *phydev) +{ + int val; + + if (phydev->pma_extable == -ENODATA) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); + if (val < 0) + return false; + + phydev->pma_extable = val; + } + + return !!(phydev->pma_extable & MDIO_PMA_EXTABLE_BT1); +} + /** * genphy_c45_pma_can_sleep - checks if the PMA have sleep support * @phydev: target phy_device struct @@ -80,7 +99,10 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) switch (phydev->speed) { case SPEED_10: - ctrl2 |= MDIO_PMA_CTRL2_10BT; + if (genphy_c45_baset1_able(phydev)) + ctrl2 |= MDIO_PMA_CTRL2_BASET1; + else + ctrl2 |= MDIO_PMA_CTRL2_10BT; break; case SPEED_100: ctrl1 |= MDIO_PMA_CTRL1_SPEED100; @@ -118,10 +140,95 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) if (ret < 0) return ret; + if (genphy_c45_baset1_able(phydev)) { + int ctl = 0; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_MASTER_FORCE: + ctl = MDIO_PMA_PMD_BT1_CTRL_CFG_MST; + break; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + break; + default: + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; + } + + ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, + MDIO_PMA_PMD_BT1_CTRL_CFG_MST, ctl); + if (ret < 0) + return ret; + } + return genphy_c45_an_disable_aneg(phydev); } EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced); +/* Sets master/slave preference and supported technologies. + * The preference is set in the BIT(4) of BASE-T1 AN + * advertisement register 7.515 and whether the status + * is forced or not, it is set in the BIT(12) of BASE-T1 + * AN advertisement register 7.514. + * Sets 10BASE-T1L Ability BIT(14) in BASE-T1 autonegotiation + * advertisement register [31:16] if supported. + */ +static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev) +{ + int changed = 0; + u16 adv_l = 0; + u16 adv_m = 0; + int ret; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_FORCE: + case MASTER_SLAVE_CFG_SLAVE_FORCE: + adv_l |= MDIO_AN_T1_ADV_L_FORCE_MS; + break; + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + break; + default: + break; + } + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_FORCE: + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + adv_m |= MDIO_AN_T1_ADV_M_MST; + break; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + break; + default: + break; + } + + adv_l |= linkmode_adv_to_mii_t1_adv_l_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L, + (MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP + | MDIO_AN_T1_ADV_L_PAUSE_ASYM), adv_l); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + adv_m |= linkmode_adv_to_mii_t1_adv_m_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M, + MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L, adv_m); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + return changed; +} + /** * genphy_c45_an_config_aneg - configure advertisement registers * @phydev: target phy_device struct @@ -141,6 +248,9 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev) changed = genphy_config_eee_advert(phydev); + if (genphy_c45_baset1_able(phydev)) + return genphy_c45_baset1_an_config_aneg(phydev); + adv = linkmode_adv_to_mii_adv_t(phydev->advertising); ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, @@ -178,8 +288,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg); */ int genphy_c45_an_disable_aneg(struct phy_device *phydev) { + u16 reg = MDIO_CTRL1; - return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + + return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg, MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); @@ -194,7 +308,12 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); */ int genphy_c45_restart_aneg(struct phy_device *phydev) { - return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + u16 reg = MDIO_CTRL1; + + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + + return phy_set_bits_mmd(phydev, MDIO_MMD_AN, reg, MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); @@ -210,11 +329,15 @@ EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); */ int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart) { + u16 reg = MDIO_CTRL1; int ret; + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_CTRL; + if (!restart) { /* Configure and restart aneg if it wasn't set before */ - ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + ret = phy_read_mmd(phydev, MDIO_MMD_AN, reg); if (ret < 0) return ret; @@ -242,7 +365,13 @@ EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg); */ int genphy_c45_aneg_done(struct phy_device *phydev) { - int val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + int reg = MDIO_STAT1; + int val; + + if (genphy_c45_baset1_able(phydev)) + reg = MDIO_AN_T1_STAT; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, reg); return val < 0 ? val : val & MDIO_AN_STAT1_COMPLETE ? 1 : 0; } @@ -307,6 +436,49 @@ int genphy_c45_read_link(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_link); +/* Read the Clause 45 defined BASE-T1 AN (7.513) status register to check + * if autoneg is complete. If so read the BASE-T1 Autonegotiation + * Advertisement registers filling in the link partner advertisement, + * pause and asym_pause members in phydev. + */ +static int genphy_c45_baset1_read_lpa(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT); + if (val < 0) + return val; + + if (!(val & MDIO_AN_STAT1_COMPLETE)) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising); + mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, 0); + mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, 0); + + phydev->pause = 0; + phydev->asym_pause = 0; + + return 0; + } + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising, 1); + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_L); + if (val < 0) + return val; + + mii_t1_adv_l_mod_linkmode_t(phydev->lp_advertising, val); + phydev->pause = val & MDIO_AN_T1_ADV_L_PAUSE_CAP ? 1 : 0; + phydev->asym_pause = val & MDIO_AN_T1_ADV_L_PAUSE_ASYM ? 1 : 0; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_LP_M); + if (val < 0) + return val; + + mii_t1_adv_m_mod_linkmode_t(phydev->lp_advertising, val); + + return 0; +} + /** * genphy_c45_read_lpa - read the link partner advertisement and pause * @phydev: target phy_device struct @@ -321,6 +493,9 @@ int genphy_c45_read_lpa(struct phy_device *phydev) { int val; + if (genphy_c45_baset1_able(phydev)) + return genphy_c45_baset1_read_lpa(phydev); + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; @@ -399,6 +574,17 @@ int genphy_c45_read_pma(struct phy_device *phydev) phydev->duplex = DUPLEX_FULL; + if (genphy_c45_baset1_able(phydev)) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL); + if (val < 0) + return val; + + if (MDIO_PMA_PMD_BT1_CTRL_CFG_MST) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + else + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + } + return 0; } EXPORT_SYMBOL_GPL(genphy_c45_read_pma); @@ -530,12 +716,67 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev) phydev->supported, val & MDIO_PMA_NG_EXTABLE_5GBT); } + + if (val & MDIO_PMA_EXTABLE_BT1) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, + phydev->supported, + val & MDIO_PMA_PMD_BT1_B10L_ABLE); + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_STAT); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported, + val & MDIO_AN_STAT1_ABLE); + } } return 0; } EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities); +/* Read master/slave preference from registers. + * The preference is read from the BIT(4) of BASE-T1 AN + * advertisement register 7.515 and whether the preference + * is forced or not, it is read from BASE-T1 AN advertisement + * register 7.514. + */ +static int genphy_c45_baset1_read_status(struct phy_device *phydev) +{ + int ret; + int cfg; + + phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; + phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; + + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_L); + if (ret < 0) + return ret; + + cfg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_M); + if (cfg < 0) + return cfg; + + if (ret & MDIO_AN_T1_ADV_L_FORCE_MS) { + if (cfg & MDIO_AN_T1_ADV_M_MST) + phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE; + else + phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE; + } else { + if (cfg & MDIO_AN_T1_ADV_M_MST) + phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_PREFERRED; + else + phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_PREFERRED; + } + + return 0; +} + /** * genphy_c45_read_status - read PHY status * @phydev: target phy_device struct @@ -560,6 +801,12 @@ int genphy_c45_read_status(struct phy_device *phydev) if (ret) return ret; + if (genphy_c45_baset1_able(phydev)) { + ret = genphy_c45_baset1_read_status(phydev); + if (ret < 0) + return ret; + } + phy_resolve_aneg_linkmode(phydev); } else { ret = genphy_c45_read_pma(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1369daeded14..431a8719c635 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -600,6 +600,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, dev->autoneg = AUTONEG_ENABLE; + dev->pma_extable = -ENODATA; dev->is_c45 = is_c45; dev->phy_id = phy_id; if (c45_ids) diff --git a/include/linux/mdio.h b/include/linux/mdio.h index ecac96d52e01..00177567cfef 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -340,6 +340,76 @@ static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising, advertising, lpa & MDIO_AN_10GBT_STAT_LP10G); } +/** + * mii_t1_adv_l_mod_linkmode_t + * @advertising: target the linkmode advertisement settings + * @lpa: value of the BASE-T1 Autonegotiation Advertisement [15:0] Register + * + * A small helper function that translates BASE-T1 Autonegotiation + * Advertisement [15:0] Register bits to linkmode advertisement settings. + * Other bits in advertising aren't changed. + */ +static inline void mii_t1_adv_l_mod_linkmode_t(unsigned long *advertising, u32 lpa) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising, + lpa & MDIO_AN_T1_ADV_L_PAUSE_CAP); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising, + lpa & MDIO_AN_T1_ADV_L_PAUSE_ASYM); +} + +/** + * mii_t1_adv_m_mod_linkmode_t + * @advertising: target the linkmode advertisement settings + * @lpa: value of the BASE-T1 Autonegotiation Advertisement [31:16] Register + * + * A small helper function that translates BASE-T1 Autonegotiation + * Advertisement [31:16] Register bits to linkmode advertisement settings. + * Other bits in advertising aren't changed. + */ +static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 lpa) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, + advertising, lpa & MDIO_AN_T1_ADV_M_B10L); +} + +/** + * linkmode_adv_to_mii_t1_adv_l_t + * @advertising: the linkmode advertisement settings + * + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the + * BASE-T1 Autonegotiation Advertisement [15:0] Register. + */ +static inline u32 linkmode_adv_to_mii_t1_adv_l_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising)) + result |= MDIO_AN_T1_ADV_L_PAUSE_CAP; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) + result |= MDIO_AN_T1_ADV_L_PAUSE_ASYM; + + return result; +} + +/** + * linkmode_adv_to_mii_t1_adv_m_t + * @advertising: the linkmode advertisement settings + * + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the + * BASE-T1 Autonegotiation Advertisement [31:16] Register. + */ +static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising)) + result |= MDIO_AN_T1_ADV_M_B10L; + + return result; +} + int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, diff --git a/include/linux/phy.h b/include/linux/phy.h index b12af9e2f389..2d12054932ba 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -570,6 +570,7 @@ struct macsec_ops; * @autoneg_complete: Flag auto negotiation of the link has completed * @mdix: Current crossover * @mdix_ctrl: User setting of crossover + * @pma_extable: Cached value of PMA/PMD Extended Abilities Register * @interrupts: Flag interrupts have been enabled * @interface: enum phy_interface_t value * @skb: Netlink message for cable diagnostics @@ -698,6 +699,8 @@ struct phy_device { u8 mdix; u8 mdix_ctrl; + int pma_extable; + void (*phy_link_change)(struct phy_device *phydev, bool up); void (*adjust_link)(struct net_device *dev); diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index fa3515257f54..75b7257a51e1 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -70,6 +70,7 @@ #define MDIO_B10L_PMA_CTRL 2294 /* 10BASE-T1L PMA control */ #define MDIO_PMA_10T1L_STAT 2295 /* 10BASE-T1L PMA status */ #define MDIO_PCS_10T1L_CTRL 2278 /* 10BASE-T1L PCS control */ +#define MDIO_PMA_PMD_BT1 18 /* BASE-T1 PMA/PMD extended ability */ #define MDIO_AN_T1_CTRL 512 /* BASE-T1 AN control */ #define MDIO_AN_T1_STAT 513 /* BASE-T1 AN status */ #define MDIO_AN_T1_ADV_L 514 /* BASE-T1 AN advertisement register [15:0] */ @@ -78,6 +79,7 @@ #define MDIO_AN_T1_LP_L 517 /* BASE-T1 AN LP Base Page ability register [15:0] */ #define MDIO_AN_T1_LP_M 518 /* BASE-T1 AN LP Base Page ability register [31:16] */ #define MDIO_AN_T1_LP_H 519 /* BASE-T1 AN LP Base Page ability register [47:32] */ +#define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */ /* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ #define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ @@ -170,6 +172,7 @@ #define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */ #define MDIO_PMA_CTRL2_2_5GBT 0x0030 /* 2.5GBaseT type */ #define MDIO_PMA_CTRL2_5GBT 0x0031 /* 5GBaseT type */ +#define MDIO_PMA_CTRL2_BASET1 0x003D /* BASE-T1 type */ #define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */ #define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */ #define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */ @@ -223,6 +226,7 @@ #define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */ #define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */ #define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */ +#define MDIO_PMA_EXTABLE_BT1 0x0800 /* BASE-T1 ability */ #define MDIO_PMA_EXTABLE_NBT 0x4000 /* 2.5/5GBASE-T ability */ /* PHY XGXS lane state register. */ @@ -301,6 +305,9 @@ #define MDIO_PCS_10T1L_CTRL_LB 0x4000 /* Enable PCS level loopback mode */ #define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */ +/* BASE-T1 PMA/PMD extended ability register. */ +#define MDIO_PMA_PMD_BT1_B10L_ABLE 0x0004 /* 10BASE-T1L Ability */ + /* BASE-T1 auto-negotiation advertisement register [15:0] */ #define MDIO_AN_T1_ADV_L_PAUSE_CAP ADVERTISE_PAUSE_CAP #define MDIO_AN_T1_ADV_L_PAUSE_ASYM ADVERTISE_PAUSE_ASYM @@ -333,6 +340,9 @@ #define MDIO_AN_T1_LP_H_10L_TX_HI_REQ 0x1000 /* 10BASE-T1L High Level LP Transmit Request */ #define MDIO_AN_T1_LP_H_10L_TX_HI 0x2000 /* 10BASE-T1L High Level LP Transmit Ability */ +/* BASE-T1 PMA/PMD control register */ +#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */ + /* EEE Supported/Advertisement/LP Advertisement registers. * * EEE capability Register (3.20), Advertisement (7.60) and -- cgit v1.2.3 From 7eaf9132996a34ba2643efe91a355385c6ff5114 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 29 Apr 2022 18:34:35 +0300 Subject: net: phy: adin1100: Add initial support for ADIN1100 industrial PHY The ADIN1100 is a low power single port 10BASE-T1L transceiver designed for industrial Ethernet applications and is compliant with the IEEE 802.3cg Ethernet standard for long reach 10 Mb/s Single Pair Ethernet. Signed-off-by: Alexandru Ardelean Signed-off-by: Alexandru Tachici Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 7 ++ drivers/net/phy/Makefile | 1 + drivers/net/phy/adin1100.c | 240 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 248 insertions(+) create mode 100644 drivers/net/phy/adin1100.c (limited to 'drivers') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index ea7571a2b39b..bbbf6c07ea53 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -83,6 +83,13 @@ config ADIN_PHY - ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit Ethernet PHY +config ADIN1100_PHY + tristate "Analog Devices Industrial Ethernet T1L PHYs" + help + Adds support for the Analog Devices Industrial T1L Ethernet PHYs. + Currently supports the: + - ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY + config AQUANTIA_PHY tristate "Aquantia PHYs" help diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index b2728d00fc9a..b82651b57043 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -31,6 +31,7 @@ sfp-obj-$(CONFIG_SFP) += sfp-bus.o obj-y += $(sfp-obj-y) $(sfp-obj-m) obj-$(CONFIG_ADIN_PHY) += adin.o +obj-$(CONFIG_ADIN1100_PHY) += adin1100.o obj-$(CONFIG_AMD_PHY) += amd.o aquantia-objs += aquantia_main.o ifdef CONFIG_HWMON diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c new file mode 100644 index 000000000000..f20bbef37239 --- /dev/null +++ b/drivers/net/phy/adin1100.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * Driver for Analog Devices Industrial Ethernet T1L PHYs + * + * Copyright 2020 Analog Devices Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PHY_ID_ADIN1100 0x0283bc81 + +#define ADIN_FORCED_MODE 0x8000 +#define ADIN_FORCED_MODE_EN BIT(0) + +#define ADIN_CRSM_SFT_RST 0x8810 +#define ADIN_CRSM_SFT_RST_EN BIT(0) + +#define ADIN_CRSM_SFT_PD_CNTRL 0x8812 +#define ADIN_CRSM_SFT_PD_CNTRL_EN BIT(0) + +#define ADIN_AN_PHY_INST_STATUS 0x8030 +#define ADIN_IS_CFG_SLV BIT(2) +#define ADIN_IS_CFG_MST BIT(3) + +#define ADIN_CRSM_STAT 0x8818 +#define ADIN_CRSM_SFT_PD_RDY BIT(1) +#define ADIN_CRSM_SYS_RDY BIT(0) + +/** + * struct adin_priv - ADIN PHY driver private data + * @tx_level_2v4_able: set if the PHY supports 2.4V TX levels (10BASE-T1L) + * @tx_level_2v4: set if the PHY requests 2.4V TX levels (10BASE-T1L) + * @tx_level_prop_present: set if the TX level is specified in DT + */ +struct adin_priv { + unsigned int tx_level_2v4_able:1; + unsigned int tx_level_2v4:1; + unsigned int tx_level_prop_present:1; +}; + +static int adin_read_status(struct phy_device *phydev) +{ + int ret; + + ret = genphy_c45_read_status(phydev); + if (ret) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_AN, ADIN_AN_PHY_INST_STATUS); + if (ret < 0) + return ret; + + if (ret & ADIN_IS_CFG_SLV) + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + + if (ret & ADIN_IS_CFG_MST) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + + return 0; +} + +static int adin_config_aneg(struct phy_device *phydev) +{ + struct adin_priv *priv = phydev->priv; + int ret; + + if (phydev->autoneg == AUTONEG_DISABLE) { + ret = genphy_c45_pma_setup_forced(phydev); + if (ret < 0) + return ret; + + if (priv->tx_level_prop_present && priv->tx_level_2v4) + ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL, + MDIO_PMA_10T1L_CTRL_2V4_EN); + else + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_B10L_PMA_CTRL, + MDIO_PMA_10T1L_CTRL_2V4_EN); + if (ret < 0) + return ret; + + /* Force PHY to use above configurations */ + return phy_set_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN); + } + + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, ADIN_FORCED_MODE, ADIN_FORCED_MODE_EN); + if (ret < 0) + return ret; + + /* Request increased transmit level from LP. */ + if (priv->tx_level_prop_present && priv->tx_level_2v4) { + ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H, + MDIO_AN_T1_ADV_H_10L_TX_HI | + MDIO_AN_T1_ADV_H_10L_TX_HI_REQ); + if (ret < 0) + return ret; + } + + /* Disable 2.4 Vpp transmit level. */ + if ((priv->tx_level_prop_present && !priv->tx_level_2v4) || !priv->tx_level_2v4_able) { + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H, + MDIO_AN_T1_ADV_H_10L_TX_HI | + MDIO_AN_T1_ADV_H_10L_TX_HI_REQ); + if (ret < 0) + return ret; + } + + return genphy_c45_config_aneg(phydev); +} + +static int adin_set_powerdown_mode(struct phy_device *phydev, bool en) +{ + int ret; + int val; + + val = en ? ADIN_CRSM_SFT_PD_CNTRL_EN : 0; + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, + ADIN_CRSM_SFT_PD_CNTRL, val); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret, + (ret & ADIN_CRSM_SFT_PD_RDY) == val, + 1000, 30000, true); +} + +static int adin_suspend(struct phy_device *phydev) +{ + return adin_set_powerdown_mode(phydev, true); +} + +static int adin_resume(struct phy_device *phydev) +{ + return adin_set_powerdown_mode(phydev, false); +} + +static int adin_set_loopback(struct phy_device *phydev, bool enable) +{ + if (enable) + return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL, + BMCR_LOOPBACK); + + /* PCS loopback (according to 10BASE-T1L spec) */ + return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL, + BMCR_LOOPBACK); +} + +static int adin_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN_CRSM_SFT_RST, ADIN_CRSM_SFT_RST_EN); + if (ret < 0) + return ret; + + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, ADIN_CRSM_STAT, ret, + (ret & ADIN_CRSM_SYS_RDY), + 10000, 30000, true); +} + +static int adin_get_features(struct phy_device *phydev) +{ + struct adin_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + int ret; + u8 val; + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10T1L_STAT); + if (ret < 0) + return ret; + + /* This depends on the voltage level from the power source */ + priv->tx_level_2v4_able = !!(ret & MDIO_PMA_10T1L_STAT_2V4_ABLE); + + phydev_dbg(phydev, "PHY supports 2.4V TX level: %s\n", + priv->tx_level_2v4_able ? "yes" : "no"); + + priv->tx_level_prop_present = device_property_present(dev, "phy-10base-t1l-2.4vpp"); + if (priv->tx_level_prop_present) { + ret = device_property_read_u8(dev, "phy-10base-t1l-2.4vpp", &val); + if (ret < 0) + return ret; + + priv->tx_level_2v4 = val; + if (!priv->tx_level_2v4 && priv->tx_level_2v4_able) + phydev_info(phydev, + "PHY supports 2.4V TX level, but disabled via config\n"); + } + + linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), + phydev->supported); + + return genphy_c45_pma_read_abilities(phydev); +} + +static int adin_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct adin_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + return 0; +} + +static struct phy_driver adin_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100), + .name = "ADIN1100", + .get_features = adin_get_features, + .soft_reset = adin_soft_reset, + .probe = adin_probe, + .config_aneg = adin_config_aneg, + .read_status = adin_read_status, + .set_loopback = adin_set_loopback, + .suspend = adin_suspend, + .resume = adin_resume, + }, +}; + +module_phy_driver(adin_driver); + +static struct mdio_device_id __maybe_unused adin_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, adin_tbl); +MODULE_DESCRIPTION("Analog Devices Industrial Ethernet T1L PHY driver"); +MODULE_LICENSE("Dual BSD/GPL"); -- cgit v1.2.3 From 48f20f90211978140fc391c4c1f10016f020a0fa Mon Sep 17 00:00:00 2001 From: Alexandru Tachici Date: Fri, 29 Apr 2022 18:34:36 +0300 Subject: net: phy: adin1100: Add SQI support Determine the SQI from MSE using a predefined table for the 10BASE-T1L. Reviewed-by: Andrew Lunn Signed-off-by: Alexandru Tachici Signed-off-by: David S. Miller --- drivers/net/phy/adin1100.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c index f20bbef37239..b6d139501199 100644 --- a/drivers/net/phy/adin1100.c +++ b/drivers/net/phy/adin1100.c @@ -33,6 +33,26 @@ #define ADIN_CRSM_SFT_PD_RDY BIT(1) #define ADIN_CRSM_SYS_RDY BIT(0) +#define ADIN_MSE_VAL 0x830B + +#define ADIN_SQI_MAX 7 + +struct adin_mse_sqi_range { + u16 start; + u16 end; +}; + +static const struct adin_mse_sqi_range adin_mse_sqi_map[] = { + { 0x0A74, 0xFFFF }, + { 0x084E, 0x0A74 }, + { 0x0698, 0x084E }, + { 0x053D, 0x0698 }, + { 0x0429, 0x053D }, + { 0x034E, 0x0429 }, + { 0x02A0, 0x034E }, + { 0x0000, 0x02A0 }, +}; + /** * struct adin_priv - ADIN PHY driver private data * @tx_level_2v4_able: set if the PHY supports 2.4V TX levels (10BASE-T1L) @@ -199,6 +219,36 @@ static int adin_get_features(struct phy_device *phydev) return genphy_c45_pma_read_abilities(phydev); } +static int adin_get_sqi(struct phy_device *phydev) +{ + u16 mse_val; + int sqi; + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT1); + if (ret < 0) + return ret; + else if (!(ret & MDIO_STAT1_LSTATUS)) + return 0; + + ret = phy_read_mmd(phydev, MDIO_STAT1, ADIN_MSE_VAL); + if (ret < 0) + return ret; + + mse_val = 0xFFFF & ret; + for (sqi = 0; sqi < ARRAY_SIZE(adin_mse_sqi_map); sqi++) { + if (mse_val >= adin_mse_sqi_map[sqi].start && mse_val <= adin_mse_sqi_map[sqi].end) + return sqi; + } + + return -EINVAL; +} + +static int adin_get_sqi_max(struct phy_device *phydev) +{ + return ADIN_SQI_MAX; +} + static int adin_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; @@ -225,6 +275,8 @@ static struct phy_driver adin_driver[] = { .set_loopback = adin_set_loopback, .suspend = adin_suspend, .resume = adin_resume, + .get_sqi = adin_get_sqi, + .get_sqi_max = adin_get_sqi_max, }, }; -- cgit v1.2.3 From e1cf330fa28acc342e527baf12298a5fd8219bad Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Apr 2022 10:44:46 -0700 Subject: can: m_can: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Link: https://lore.kernel.org/all/20220429174446.196655-1-kuba@kernel.org Signed-off-by: Jakub Kicinski Signed-off-by: Marc Kleine-Budde --- drivers/net/can/m_can/m_can.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2779bba390f2..e6d2da4a9f41 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -77,9 +77,6 @@ enum m_can_reg { M_CAN_TXEFA = 0xf8, }; -/* napi related */ -#define M_CAN_NAPI_WEIGHT 64 - /* message ram configuration data length */ #define MRAM_CFG_LEN 8 @@ -951,7 +948,7 @@ static int m_can_rx_peripheral(struct net_device *dev) struct m_can_classdev *cdev = netdev_priv(dev); int work_done; - work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); + work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT); /* Don't re-enable interrupts if the driver had a fatal error * (e.g., FIFO read failure). @@ -1474,7 +1471,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) if (!cdev->is_peripheral) netif_napi_add(dev, &cdev->napi, - m_can_poll, M_CAN_NAPI_WEIGHT); + m_can_poll, NAPI_POLL_WEIGHT); /* Shared properties of all M_CAN versions */ cdev->version = m_can_version; @@ -1994,7 +1991,7 @@ int m_can_class_register(struct m_can_classdev *cdev) if (cdev->is_peripheral) { ret = can_rx_offload_add_manual(cdev->net, &cdev->offload, - M_CAN_NAPI_WEIGHT); + NAPI_POLL_WEIGHT); if (ret) goto clk_disable; } -- cgit v1.2.3 From 704fd176204577459beadb37d46e164d376fabc3 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 22 Apr 2022 04:28:52 +0800 Subject: can: ctucanfd: remove unused including Eliminate the follow versioncheck warning: | drivers/net/can/ctucanfd/ctucanfd_base.c: 34 linux/version.h not needed. Link: https://lore.kernel.org/all/20220421202852.2693-1-jiapeng.chong@linux.alibaba.com Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Acked-by: Pave Pisa Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/ctucanfd_base.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c index 7a4550f60abb..be90136be442 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_base.c +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -31,7 +31,6 @@ #include #include #include -#include #include "ctucanfd.h" #include "ctucanfd_kregs.h" -- cgit v1.2.3 From e715d4459485377273c085d30992b8f9d70eede0 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 22 Apr 2022 04:32:42 +0800 Subject: can: ctucanfd: ctucan_platform_probe(): remove unnecessary print function dev_err() The print function dev_err() is redundant because platform_get_irq() already prints an error. Eliminate the follow coccicheck warnings: | drivers/net/can/ctucanfd/ctucanfd_platform.c:67:2-9: | line 67 is redundant because platform_get_irq() already prints an error. Link: https://lore.kernel.org/all/20220421203242.7335-1-jiapeng.chong@linux.alibaba.com Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Acked-by: Pave Pisa Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/ctucanfd_platform.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c index 5e4806068662..89d54c2151e1 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_platform.c +++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c @@ -64,7 +64,6 @@ static int ctucan_platform_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "Cannot find interrupt.\n"); ret = irq; goto err; } -- cgit v1.2.3 From a51491ac6ed2f55d0c4b65bc8370b3833dca6b7b Mon Sep 17 00:00:00 2001 From: Pavel Pisa Date: Sun, 24 Apr 2022 18:28:08 +0200 Subject: can: ctucanfd: remove inline keyword from local static functions This patch removes the inline keywords from the local static functions to make both checkpatch.pl and patchwork happy. Link: https://lore.kernel.org/all/1fd684bcf5ddb0346aad234072f54e976a5210fb.1650816929.git.pisa@cmp.felk.cvut.cz Signed-off-by: Pavel Pisa [mkl: split into separate patches] Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/ctucanfd_base.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c index be90136be442..10c2517a395b 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_base.c +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -132,13 +132,12 @@ static u32 ctucan_read32_be(struct ctucan_priv *priv, return ioread32be(priv->mem_base + reg); } -static inline void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, - u32 val) +static void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val) { priv->write_reg(priv, reg, val); } -static inline u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg) +static u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg) { return priv->read_reg(priv, reg); } @@ -485,7 +484,7 @@ static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode) * * Return: Status of TXT buffer */ -static inline enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf) +static enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf) { u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS); enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7; -- cgit v1.2.3 From e391a0f7be617b16a3b4d1092596519bd96889a4 Mon Sep 17 00:00:00 2001 From: Pavel Pisa Date: Sun, 24 Apr 2022 18:28:08 +0200 Subject: can: ctucanfd: remove debug statements This patch removes the debug statements from the driver to make checkpatch.pl and patchwork happy. Link: https://lore.kernel.org/all/1fd684bcf5ddb0346aad234072f54e976a5210fb.1650816929.git.pisa@cmp.felk.cvut.cz Signed-off-by: Pavel Pisa [mkl: split into separate patches] Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/ctucanfd_base.c | 26 -------------------------- 1 file changed, 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c index 10c2517a395b..2ada097d1ede 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_base.c +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -177,8 +177,6 @@ static int ctucan_reset(struct net_device *ndev) struct ctucan_priv *priv = netdev_priv(ndev); int i = 100; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST); clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); @@ -264,8 +262,6 @@ static int ctucan_set_bittiming(struct net_device *ndev) struct ctucan_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - /* Note that bt may be modified here */ return ctucan_set_btr(ndev, bt, true); } @@ -281,8 +277,6 @@ static int ctucan_set_data_bittiming(struct net_device *ndev) struct ctucan_priv *priv = netdev_priv(ndev); struct can_bittiming *dbt = &priv->can.data_bittiming; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - /* Note that dbt may be modified here */ return ctucan_set_btr(ndev, dbt, false); } @@ -300,8 +294,6 @@ static int ctucan_set_secondary_sample_point(struct net_device *ndev) int ssp_offset = 0; u32 ssp_cfg = 0; /* No SSP by default */ - ctucan_netdev_dbg(ndev, "%s\n", __func__); - if (CTU_CAN_FD_ENABLED(priv)) { netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n"); return -EPERM; @@ -388,8 +380,6 @@ static int ctucan_chip_start(struct net_device *ndev) int err; struct can_ctrlmode mode; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - priv->txb_prio = 0x01234567; priv->txb_head = 0; priv->txb_tail = 0; @@ -455,8 +445,6 @@ static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode) { int ret; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - switch (mode) { case CAN_MODE_START: ret = ctucan_reset(ndev); @@ -1121,8 +1109,6 @@ static irqreturn_t ctucan_interrupt(int irq, void *dev_id) u32 imask; int irq_loops; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - for (irq_loops = 0; irq_loops < 10000; irq_loops++) { /* Get the interrupt status */ isr = ctucan_read32(priv, CTUCANFD_INT_STAT); @@ -1196,8 +1182,6 @@ static void ctucan_chip_stop(struct net_device *ndev) u32 mask = 0xffffffff; u32 mode; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - /* Disable interrupts and disable CAN */ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask); ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask); @@ -1220,8 +1204,6 @@ static int ctucan_open(struct net_device *ndev) struct ctucan_priv *priv = netdev_priv(ndev); int ret; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", @@ -1281,8 +1263,6 @@ static int ctucan_close(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); - ctucan_netdev_dbg(ndev, "%s\n", __func__); - netif_stop_queue(ndev); napi_disable(&priv->napi); ctucan_chip_stop(ndev); @@ -1308,8 +1288,6 @@ static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_ber struct ctucan_priv *priv = netdev_priv(ndev); int ret; - ctucan_netdev_dbg(ndev, "%s\n", __func__); - ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); @@ -1335,8 +1313,6 @@ int ctucan_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct ctucan_priv *priv = netdev_priv(ndev); - ctucan_netdev_dbg(ndev, "%s\n", __func__); - if (netif_running(ndev)) { netif_stop_queue(ndev); netif_device_detach(ndev); @@ -1353,8 +1329,6 @@ int ctucan_resume(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct ctucan_priv *priv = netdev_priv(ndev); - ctucan_netdev_dbg(ndev, "%s\n", __func__); - priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { -- cgit v1.2.3 From 28b250e070e9a45a814d13c4ae756aab1298ff27 Mon Sep 17 00:00:00 2001 From: Pavel Pisa Date: Sun, 24 Apr 2022 18:28:08 +0200 Subject: can: ctucanfd: remove PCI module debug parameters This patch removes the PCI module debug parameters, which are not needed anymore, to make both checkpatch.pl and patchwork happy. Link: https://lore.kernel.org/all/1fd684bcf5ddb0346aad234072f54e976a5210fb.1650816929.git.pisa@cmp.felk.cvut.cz Signed-off-by: Pavel Pisa [mkl: split into separate patches] Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/ctucanfd_pci.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c index c37a42480533..8f2956a8ae43 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_pci.c +++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c @@ -45,14 +45,6 @@ #define CTUCAN_WITHOUT_CTUCAN_ID 0 #define CTUCAN_WITH_CTUCAN_ID 1 -static bool use_msi = true; -module_param(use_msi, bool, 0444); -MODULE_PARM_DESC(use_msi, "PCIe implementation use MSI interrupts. Default: 1 (yes)"); - -static bool pci_use_second = true; -module_param(pci_use_second, bool, 0444); -MODULE_PARM_DESC(pci_use_second, "Use the second CAN core on PCIe card. Default: 1 (yes)"); - struct ctucan_pci_board_data { void __iomem *bar0_base; void __iomem *cra_base; @@ -117,13 +109,11 @@ static int ctucan_pci_probe(struct pci_dev *pdev, goto err_disable_device; } - if (use_msi) { - ret = pci_enable_msi(pdev); - if (!ret) { - dev_info(dev, "MSI enabled\n"); - pci_set_master(pdev); - msi_ok = 1; - } + ret = pci_enable_msi(pdev); + if (!ret) { + dev_info(dev, "MSI enabled\n"); + pci_set_master(pdev); + msi_ok = 1; } dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n", @@ -184,7 +174,7 @@ static int ctucan_pci_probe(struct pci_dev *pdev, core_i++; - while (pci_use_second && (core_i < num_cores)) { + while (core_i < num_cores) { addr += 0x4000; ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, 0, ctucan_pci_set_drvdata); -- cgit v1.2.3 From ae664d9d8559b123766b47af4dfbfa877a80156e Mon Sep 17 00:00:00 2001 From: Fei Qin Date: Sun, 1 May 2022 08:11:50 +0900 Subject: nfp: support VxLAN inner TSO with GSO_PARTIAL offload VxLAN belongs to UDP-based encapsulation protocol. Inner TSO for VxLAN packet with udpcsum requires offloading of outer header csum. The device doesn't support outer header csum offload. However, inner TSO for VxLAN with udpcsum can still work with GSO_PARTIAL offload, which means outer udp csum computed by stack and inner tcp segmentation finished by hardware. Thus, the patch enable features "NETIF_F_GSO_UDP_TUNNEL_CSUM" and "NETIF_F_GSO_PARTIAL" and set gso_partial_features. Signed-off-by: Fei Qin Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Simon Horman Link: https://lore.kernel.org/r/20220430231150.175270-1-simon.horman@corigine.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index b412670d89b2..5528d12d1f48 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2259,8 +2259,12 @@ static void nfp_net_netdev_init(struct nfp_net *nn) if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) netdev->hw_features |= NETIF_F_RXHASH; if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { - if (nn->cap & NFP_NET_CFG_CTRL_LSO) - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + if (nn->cap & NFP_NET_CFG_CTRL_LSO) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; + } netdev->udp_tunnel_nic_info = &nfp_udp_tunnels; nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; } -- cgit v1.2.3 From 70dcf3cdc342b8c3613614a24e587aa524a01c14 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 30 Apr 2022 19:30:33 +0200 Subject: net: phylink: Convert to mdiobus_c45_{read|write} Stop using the helpers to construct a special phy address which indicates C45. Instead use the C45 accessors, which will call the busses C45 specific read/write API. Signed-off-by: Andrew Lunn Signed-off-by: Paolo Abeni --- drivers/net/phy/phylink.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index d707604d1d5a..066684b80919 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -2303,8 +2303,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, if (mdio_phy_id_is_c45(phy_id)) { prtad = mdio_phy_id_prtad(phy_id); devad = mdio_phy_id_devad(phy_id); - devad = mdiobus_c45_addr(devad, reg); - } else if (phydev->is_c45) { + return mdiobus_c45_read(pl->phydev->mdio.bus, prtad, devad, + reg); + } + + if (phydev->is_c45) { switch (reg) { case MII_BMCR: case MII_BMSR: @@ -2326,12 +2329,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, return -EINVAL; } prtad = phy_id; - devad = mdiobus_c45_addr(devad, reg); - } else { - prtad = phy_id; - devad = reg; + return mdiobus_c45_read(pl->phydev->mdio.bus, prtad, devad, + reg); } - return mdiobus_read(pl->phydev->mdio.bus, prtad, devad); + + return mdiobus_read(pl->phydev->mdio.bus, phy_id, reg); } static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, @@ -2343,8 +2345,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, if (mdio_phy_id_is_c45(phy_id)) { prtad = mdio_phy_id_prtad(phy_id); devad = mdio_phy_id_devad(phy_id); - devad = mdiobus_c45_addr(devad, reg); - } else if (phydev->is_c45) { + return mdiobus_c45_write(pl->phydev->mdio.bus, prtad, devad, + reg, val); + } + + if (phydev->is_c45) { switch (reg) { case MII_BMCR: case MII_BMSR: @@ -2365,14 +2370,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, default: return -EINVAL; } - prtad = phy_id; - devad = mdiobus_c45_addr(devad, reg); - } else { - prtad = phy_id; - devad = reg; + return mdiobus_c45_write(pl->phydev->mdio.bus, phy_id, devad, + reg, val); } - return mdiobus_write(phydev->mdio.bus, prtad, devad, val); + return mdiobus_write(phydev->mdio.bus, phy_id, reg, val); } static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, -- cgit v1.2.3 From 260bdfea873aec8fe8990154c722abe3a0a6d8d3 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 30 Apr 2022 19:30:34 +0200 Subject: net: phy: Convert to mdiobus_c45_{read|write} Stop using the helpers to construct a special phy address which indicates C45. Instead use the C45 accessors, which will call the busses C45 specific read/write API. Signed-off-by: Andrew Lunn Signed-off-by: Paolo Abeni --- drivers/net/phy/phy.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index beb2b66da132..9034c6a8e18f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -295,20 +295,20 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - devad = mdiobus_c45_addr(devad, mii_data->reg_num); + mii_data->val_out = mdiobus_c45_read( + phydev->mdio.bus, prtad, devad, + mii_data->reg_num); } else { - prtad = mii_data->phy_id; - devad = mii_data->reg_num; + mii_data->val_out = mdiobus_read( + phydev->mdio.bus, mii_data->phy_id, + mii_data->reg_num); } - mii_data->val_out = mdiobus_read(phydev->mdio.bus, prtad, - devad); return 0; case SIOCSMIIREG: if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - devad = mdiobus_c45_addr(devad, mii_data->reg_num); } else { prtad = mii_data->phy_id; devad = mii_data->reg_num; @@ -351,7 +351,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } } - mdiobus_write(phydev->mdio.bus, prtad, devad, val); + if (mdio_phy_id_is_c45(mii_data->phy_id)) + mdiobus_c45_write(phydev->mdio.bus, prtad, devad, + mii_data->reg_num, val); + else + mdiobus_write(phydev->mdio.bus, prtad, devad, val); if (prtad == phydev->mdio.addr && devad == MII_BMCR && -- cgit v1.2.3 From cad75717c71b49149fbb1ab136bf88a85274d01e Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 30 Apr 2022 19:30:35 +0200 Subject: net: phy: bcm87xx: Use mmd helpers Rather than construct special phy device addresses to access C45 registers, use the mmd helpers. These will directly call the C45 API of the MDIO bus driver. Signed-off-by: Andrew Lunn Signed-off-by: Paolo Abeni --- drivers/net/phy/bcm87xx.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index 313563482690..cc2858107668 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -10,12 +10,12 @@ #define PHY_ID_BCM8706 0x0143bdc1 #define PHY_ID_BCM8727 0x0143bff0 -#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a) -#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020) -#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018) +#define BCM87XX_PMD_RX_SIGNAL_DETECT 0x000a +#define BCM87XX_10GBASER_PCS_STATUS 0x0020 +#define BCM87XX_XGXS_LANE_STATUS 0x0018 -#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002) -#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005) +#define BCM87XX_LASI_CONTROL 0x9002 +#define BCM87XX_LASI_STATUS 0x9005 #if IS_ENABLED(CONFIG_OF_MDIO) /* Set and/or override some configuration registers based on the @@ -54,11 +54,10 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) u16 reg = be32_to_cpup(paddr++); u16 mask = be32_to_cpup(paddr++); u16 val_bits = be32_to_cpup(paddr++); - u32 regnum = mdiobus_c45_addr(devid, reg); int val = 0; if (mask) { - val = phy_read(phydev, regnum); + val = phy_read_mmd(phydev, devid, reg); if (val < 0) { ret = val; goto err; @@ -67,7 +66,7 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) } val |= val_bits; - ret = phy_write(phydev, regnum, val); + ret = phy_write_mmd(phydev, devid, reg, val); if (ret < 0) goto err; } @@ -104,21 +103,24 @@ static int bcm87xx_read_status(struct phy_device *phydev) int pcs_status; int xgxs_lane_status; - rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT); + rx_signal_detect = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + BCM87XX_PMD_RX_SIGNAL_DETECT); if (rx_signal_detect < 0) return rx_signal_detect; if ((rx_signal_detect & 1) == 0) goto no_link; - pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS); + pcs_status = phy_read_mmd(phydev, MDIO_MMD_PCS, + BCM87XX_10GBASER_PCS_STATUS); if (pcs_status < 0) return pcs_status; if ((pcs_status & 1) == 0) goto no_link; - xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS); + xgxs_lane_status = phy_read_mmd(phydev, MDIO_MMD_PHYXS, + BCM87XX_XGXS_LANE_STATUS); if (xgxs_lane_status < 0) return xgxs_lane_status; @@ -139,25 +141,27 @@ static int bcm87xx_config_intr(struct phy_device *phydev) { int reg, err; - reg = phy_read(phydev, BCM87XX_LASI_CONTROL); + reg = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_CONTROL); if (reg < 0) return reg; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { - err = phy_read(phydev, BCM87XX_LASI_STATUS); + err = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_STATUS); if (err) return err; reg |= 1; - err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg); + err = phy_write_mmd(phydev, MDIO_MMD_PCS, + BCM87XX_LASI_CONTROL, reg); } else { reg &= ~1; - err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg); + err = phy_write_mmd(phydev, MDIO_MMD_PCS, + BCM87XX_LASI_CONTROL, reg); if (err) return err; - err = phy_read(phydev, BCM87XX_LASI_STATUS); + err = phy_read_mmd(phydev, MDIO_MMD_PCS, BCM87XX_LASI_STATUS); } return err; -- cgit v1.2.3 From 639e4b93ab68f5f5fc4734c766124ca96c167f14 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 30 Apr 2022 19:30:36 +0200 Subject: net: dsa: sja1105: Convert to mdiobus_c45_read Stop using the helpers to construct a special phy address which indicates C45. Instead use the C45 accessors, which will call the busses C45 specific read/write API. Reviewed-by: Vladimir Oltean Tested-by: Vladimir Oltean Signed-off-by: Andrew Lunn Signed-off-by: Paolo Abeni --- drivers/net/dsa/sja1105/sja1105_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index b33841c6507a..72b6fc1932b5 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2252,14 +2252,13 @@ int sja1105_static_config_reload(struct sja1105_private *priv, * change it through the dynamic interface later. */ for (i = 0; i < ds->num_ports; i++) { - u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1); - speed_mbps[i] = sja1105_port_speed_to_ethtool(priv, mac[i].speed); mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO]; if (priv->xpcs[i]) - bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr); + bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i, + MDIO_MMD_VEND2, MDIO_CTRL1); } /* No PTP operations can run right now */ -- cgit v1.2.3 From d18af067c98eb187b8d9fe1f4e2e59ba3ad0a1d8 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 30 Apr 2022 19:30:37 +0200 Subject: net: pcs: pcs-xpcs: Convert to mdiobus_c45_read Stop using the helpers to construct a special mdio address which indicates C45. Instead use the C45 accessors, which will call the busses C45 specific read/write API. Reviewed-by: Vladimir Oltean Tested-by: Vladimir Oltean Signed-off-by: Andrew Lunn Signed-off-by: Paolo Abeni --- drivers/net/pcs/pcs-xpcs.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 61418d4dc0cd..4cfd05c15aee 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -175,20 +175,18 @@ static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat, int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg) { - u32 reg_addr = mdiobus_c45_addr(dev, reg); struct mii_bus *bus = xpcs->mdiodev->bus; int addr = xpcs->mdiodev->addr; - return mdiobus_read(bus, addr, reg_addr); + return mdiobus_c45_read(bus, addr, dev, reg); } int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val) { - u32 reg_addr = mdiobus_c45_addr(dev, reg); struct mii_bus *bus = xpcs->mdiodev->bus; int addr = xpcs->mdiodev->addr; - return mdiobus_write(bus, addr, reg_addr, val); + return mdiobus_c45_write(bus, addr, dev, reg, val); } static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg) -- cgit v1.2.3 From 7330e1ec9748948177830c6e1a13379835d577f9 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Fri, 29 Apr 2022 16:15:02 +0300 Subject: ath11k: fix warning of not found station for bssid in message When test connect/disconnect to an AP frequently with WCN6855, sometimes it show below log. [ 277.040121] wls1: deauthenticating from 8c:21:0a:b3:5a:64 by local choice (Reason: 3=DEAUTH_LEAVING) [ 277.050906] ath11k_pci 0000:05:00.0: wmi stats vdev id 0 mac 00:03:7f:29:61:11 [ 277.050944] ath11k_pci 0000:05:00.0: wmi stats bssid 8c:21:0a:b3:5a:64 vif pK-error [ 277.050954] ath11k_pci 0000:05:00.0: not found station for bssid 8c:21:0a:b3:5a:64 [ 277.050961] ath11k_pci 0000:05:00.0: failed to parse rssi chain -71 [ 277.050967] ath11k_pci 0000:05:00.0: failed to pull fw stats: -71 [ 277.050976] ath11k_pci 0000:05:00.0: wmi stats vdev id 0 mac 00:03:7f:29:61:11 [ 277.050983] ath11k_pci 0000:05:00.0: wmi stats bssid 8c:21:0a:b3:5a:64 vif pK-error [ 277.050989] ath11k_pci 0000:05:00.0: not found station for bssid 8c:21:0a:b3:5a:64 [ 277.050995] ath11k_pci 0000:05:00.0: failed to parse rssi chain -71 [ 277.051000] ath11k_pci 0000:05:00.0: failed to pull fw stats: -71 [ 278.064050] ath11k_pci 0000:05:00.0: failed to request fw stats: -110 Reason is: When running disconnect operation, sta_info removed from local->sta_hash by __sta_info_destroy_part1() from __sta_info_flush(), after this, ieee80211_find_sta_by_ifaddr() which called by ath11k_wmi_tlv_fw_stats_data_parse() and ath11k_wmi_tlv_rssi_chain_parse() cannot find this station, then failed log printed. steps are like this: 1. when disconnect from AP, __sta_info_destroy() called __sta_info_destroy_part1() and __sta_info_destroy_part2(). 2. in __sta_info_destroy_part1(), it has "sta_info_hash_del(local, sta)" and "list_del_rcu(&sta->list)", it will remove the ieee80211_sta from the list of ieee80211_hw. 3. in __sta_info_destroy_part2(), it called drv_sta_state()->ath11k_mac_op_sta_state(), then peer->sta is clear at this moment. 4. in __sta_info_destroy_part2(), it then called sta_set_sinfo()->drv_sta_statistics() ->ath11k_mac_op_sta_statistics(), then WMI_REQUEST_STATS_CMDID sent to firmware. 5. WMI_UPDATE_STATS_EVENTID reported from firmware, at this moment, the ieee80211_sta can not be found again because it has remove from list in step2 and also peer->sta is clear in step3. 6. in __sta_info_destroy_part2(), it then called cleanup_single_sta()-> sta_info_free()->kfree(sta), at this moment, the ieee80211_sta is freed in memory, then the failed log will not happen because function ath11k_mac_op_sta_state() will not be called. Actually this print log is not a real error, it is only to skip parse the info, so change to skip print by default debug setting. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220428022426.2927-1-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/wmi.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 598ec061694f..1410114d1d5c 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -5794,9 +5794,9 @@ static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, arvif->bssid, NULL); if (!sta) { - ath11k_warn(ab, "not found station for bssid %pM\n", - arvif->bssid); - ret = -EPROTO; + ath11k_dbg(ab, ATH11K_DBG_WMI, + "not found station of bssid %pM for rssi chain\n", + arvif->bssid); goto exit; } @@ -5894,8 +5894,9 @@ static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, "wmi stats vdev id %d snr %d\n", src->vdev_id, src->beacon_snr); } else { - ath11k_warn(ab, "not found station for bssid %pM\n", - arvif->bssid); + ath11k_dbg(ab, ATH11K_DBG_WMI, + "not found station of bssid %pM for vdev stat\n", + arvif->bssid); } } -- cgit v1.2.3 From 3a597f0d425b2160ce4278c3e1c8384bec8ccfb4 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Fri, 29 Apr 2022 16:15:02 +0300 Subject: ath11k: change management tx queue to avoid connection timed out In the phase of wlan load, it has hw scan and 11d scan which sent to firmware by ath11k, then hw scan and 11d scan will use about 14 seconds, and meanwhile ath11k_reg_update_chan_list() is running in workqueue of ath11k_base, and wait for 11d scan/hw scan finished. When the hw scan finished, mac80211 will start to connect and send management packet, at this moment, ath11k_reg_update_chan_list() is still waiting for 11d scan finished, so wmi_mgmt_tx_work of ath11k will not run and thus the tx management packet also not send out and lead authentication timed out. log: INFO kernel: [ 187.885322] wlan0: authenticate with 72:6c:57:43:9f:90 INFO kernel: [ 187.937266] wlan0: send auth to 72:6c:57:43:9f:90 (try 1/3) INFO kernel: [ 188.626944] wlan0: send auth to 72:6c:57:43:9f:90 (try 2/3) INFO kernel: [ 189.650999] wlan0: send auth to 72:6c:57:43:9f:90 (try 3/3) INFO kernel: [ 190.651917] wlan0: authentication with 72:6c:57:43:9f:90 timed out Change wmi_mgmt_tx_work to another queue workqueue_aux of ath11k_base, then connection success. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220428023320.4007-1-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index c025bcb25c00..769319604340 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -5592,7 +5592,7 @@ static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb, skb_queue_tail(q, skb); atomic_inc(&ar->num_pending_mgmt_tx); - queue_work(ar->ab->workqueue, &ar->wmi_mgmt_tx_work); + queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work); return 0; } -- cgit v1.2.3 From 92c1858e4399edc093a41cbf8f74874bdab59da7 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 29 Apr 2022 22:34:55 +0530 Subject: ath11k: Move parameters in bus_params to hw_params In ath11k, bus_params were added with an intention to hold parameters related to bus (AHB/PCI), but this is not true as some bus parameters being different between chipsets of the same bus. With the addition of WCN6750 to ath11k, bus parameters are going to be entirely different among AHB devices. Therefore, it is wise to move bus_params to hw_params and get rid of bus_params entirely. Also, mhi_support parameter is not used anywhere in the driver, remove it from bus_params. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429170502.20080-3-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/ahb.c | 11 ++--------- drivers/net/wireless/ath/ath11k/core.c | 28 +++++++++++++++++++++++++--- drivers/net/wireless/ath/ath11k/core.h | 12 +----------- drivers/net/wireless/ath/ath11k/hw.h | 4 ++++ drivers/net/wireless/ath/ath11k/pci.c | 14 +++----------- drivers/net/wireless/ath/ath11k/pcic.c | 4 ++-- drivers/net/wireless/ath/ath11k/qmi.c | 23 ++++++++++++----------- 7 files changed, 49 insertions(+), 47 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index f407d4af2074..49d79cfbf21c 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -28,13 +29,6 @@ static const struct of_device_id ath11k_ahb_of_match[] = { MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match); -static const struct ath11k_bus_params ath11k_ahb_bus_params = { - .mhi_support = false, - .m3_fw_support = false, - .fixed_bdf_addr = true, - .fixed_mem_region = true, -}; - #define ATH11K_IRQ_CE0_OFFSET 4 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { @@ -685,8 +679,7 @@ static int ath11k_ahb_probe(struct platform_device *pdev) } ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb), - ATH11K_BUS_AHB, - &ath11k_ahb_bus_params); + ATH11K_BUS_AHB); if (!ab) { dev_err(&pdev->dev, "failed to allocate ath11k base\n"); return -ENOMEM; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 7e074b7716e7..19b5bb06c7e8 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -102,6 +102,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, + .m3_fw_support = false, + .fixed_bdf_addr = true, + .fixed_mem_region = true, + .static_window_map = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -169,6 +173,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, + .m3_fw_support = false, + .fixed_bdf_addr = true, + .fixed_mem_region = true, + .static_window_map = false, }, { .name = "qca6390 hw2.0", @@ -235,6 +243,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = NULL, + .m3_fw_support = true, + .fixed_bdf_addr = false, + .fixed_mem_region = false, + .static_window_map = false, }, { .name = "qcn9074 hw1.0", @@ -301,6 +313,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, + .m3_fw_support = true, + .fixed_bdf_addr = false, + .fixed_mem_region = false, + .static_window_map = true, }, { .name = "wcn6855 hw2.0", @@ -367,6 +383,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, + .m3_fw_support = true, + .fixed_bdf_addr = false, + .fixed_mem_region = false, + .static_window_map = false, }, { .name = "wcn6855 hw2.1", @@ -432,6 +452,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, + .m3_fw_support = true, + .fixed_bdf_addr = false, + .fixed_mem_region = false, + .static_window_map = false, }, }; @@ -1730,8 +1754,7 @@ void ath11k_core_free(struct ath11k_base *ab) EXPORT_SYMBOL(ath11k_core_free); struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, - enum ath11k_bus bus, - const struct ath11k_bus_params *bus_params) + enum ath11k_bus bus) { struct ath11k_base *ab; @@ -1770,7 +1793,6 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, init_completion(&ab->wow.wakeup_completed); ab->dev = dev; - ab->bus_params = *bus_params; ab->hif.bus = bus; return ab; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 7a505531acf9..d8ab28413d95 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -748,14 +748,6 @@ struct ath11k_board_data { size_t len; }; -struct ath11k_bus_params { - bool mhi_support; - bool m3_fw_support; - bool fixed_bdf_addr; - bool fixed_mem_region; - bool static_window_map; -}; - struct ath11k_pci_ops { int (*wakeup)(struct ath11k_base *ab); void (*release)(struct ath11k_base *ab); @@ -887,7 +879,6 @@ struct ath11k_base { int bd_api; struct ath11k_hw_params hw_params; - struct ath11k_bus_params bus_params; const struct firmware *cal_file; @@ -1135,8 +1126,7 @@ int ath11k_core_pre_init(struct ath11k_base *ab); int ath11k_core_init(struct ath11k_base *ath11k); void ath11k_core_deinit(struct ath11k_base *ath11k); struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, - enum ath11k_bus bus, - const struct ath11k_bus_params *bus_params); + enum ath11k_bus bus); void ath11k_core_free(struct ath11k_base *ath11k); int ath11k_core_fetch_bdf(struct ath11k_base *ath11k, struct ath11k_board_data *bd); diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index b7ece3d5678c..09fa020bfee4 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -196,6 +196,10 @@ struct ath11k_hw_params { bool dbr_debug_support; bool global_reset; const struct cfg80211_sar_capa *bios_sar_capa; + bool m3_fw_support; + bool fixed_bdf_addr; + bool fixed_mem_region; + bool static_window_map; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index 024661a17008..dedf1b88ddf6 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -115,13 +115,6 @@ static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = { .window_read32 = ath11k_pci_window_read32, }; -static const struct ath11k_bus_params ath11k_pci_bus_params = { - .mhi_support = true, - .m3_fw_support = true, - .fixed_bdf_addr = false, - .fixed_mem_region = false, -}; - static const struct ath11k_msi_config msi_config_one_msi = { .total_vectors = 1, .total_users = 4, @@ -593,7 +586,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) return ret; } - if (ab->bus_params.static_window_map) + if (ab->hw_params.static_window_map) ath11k_pci_select_static_window(ab_pci); return 0; @@ -706,8 +699,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev, u32 soc_hw_version_major, soc_hw_version_minor, addr; int ret; - ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI, - &ath11k_pci_bus_params); + ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI); + if (!ab) { dev_err(&pdev->dev, "failed to allocate ath11k base\n"); return -ENOMEM; @@ -764,7 +757,6 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ab->pci.ops = &ath11k_pci_ops_qca6390; break; case QCN9074_DEVICE_ID: - ab->bus_params.static_window_map = true; ab->pci.ops = &ath11k_pci_ops_qcn9074; ab->hw_rev = ATH11K_HW_QCN9074_HW10; break; diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index 63c678aea29e..7a920d65023f 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -163,7 +163,7 @@ void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) if (offset < ATH11K_PCI_WINDOW_START) { iowrite32(value, ab->mem + offset); } else { - if (ab->bus_params.static_window_map) + if (ab->hw_params.static_window_map) window_start = ath11k_pcic_get_window_start(ab, offset); else window_start = ATH11K_PCI_WINDOW_START; @@ -198,7 +198,7 @@ u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) if (offset < ATH11K_PCI_WINDOW_START) { val = ioread32(ab->mem + offset); } else { - if (ab->bus_params.static_window_map) + if (ab->hw_params.static_window_map) window_start = ath11k_pcic_get_window_start(ab, offset); else window_start = ATH11K_PCI_WINDOW_START; diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 0442faa3b7af..c89e76108237 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1648,7 +1649,7 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) req.bdf_support_valid = 1; req.bdf_support = 1; - if (ab->bus_params.m3_fw_support) { + if (ab->hw_params.m3_fw_support) { req.m3_support_valid = 1; req.m3_support = 1; req.m3_cache_support_valid = 1; @@ -1803,7 +1804,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) * failure to FW and FW will then request mulitple blocks of small * chunk size memory. */ - if (!(ab->bus_params.fixed_mem_region || + if (!(ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) && ab->qmi.target_mem_delayed) { delayed = true; @@ -1873,7 +1874,7 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab) int i; for (i = 0; i < ab->qmi.mem_seg_count; i++) { - if ((ab->bus_params.fixed_mem_region || + if ((ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) && ab->qmi.target_mem[i].iaddr) iounmap(ab->qmi.target_mem[i].iaddr); @@ -2124,7 +2125,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, memset(&resp, 0, sizeof(resp)); - if (ab->bus_params.fixed_bdf_addr) { + if (ab->hw_params.fixed_bdf_addr) { bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size); if (!bdf_addr) { ath11k_warn(ab, "qmi ioremap error for bdf_addr\n"); @@ -2153,7 +2154,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, req->end = 1; } - if (ab->bus_params.fixed_bdf_addr || + if (ab->hw_params.fixed_bdf_addr || type == ATH11K_QMI_FILE_TYPE_EEPROM) { req->data_valid = 0; req->end = 1; @@ -2162,7 +2163,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, memcpy(req->data, temp, req->data_len); } - if (ab->bus_params.fixed_bdf_addr) { + if (ab->hw_params.fixed_bdf_addr) { if (type == ATH11K_QMI_FILE_TYPE_CALDATA) bdf_addr += ab->hw_params.fw.cal_offset; @@ -2201,7 +2202,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, goto err_iounmap; } - if (ab->bus_params.fixed_bdf_addr || + if (ab->hw_params.fixed_bdf_addr || type == ATH11K_QMI_FILE_TYPE_EEPROM) { remaining = 0; } else { @@ -2214,7 +2215,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, } err_iounmap: - if (ab->bus_params.fixed_bdf_addr) + if (ab->hw_params.fixed_bdf_addr) iounmap(bdf_addr); err_free_req: @@ -2353,7 +2354,7 @@ static void ath11k_qmi_m3_free(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; - if (!ab->bus_params.m3_fw_support || !m3_mem->vaddr) + if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr) return; dma_free_coherent(ab->dev, m3_mem->size, @@ -2373,7 +2374,7 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); - if (ab->bus_params.m3_fw_support) { + if (ab->hw_params.m3_fw_support) { ret = ath11k_qmi_m3_load(ab); if (ret) { ath11k_err(ab, "failed to load m3 firmware: %d", ret); @@ -2792,7 +2793,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, msg->mem_seg[i].type, msg->mem_seg[i].size); } - if (ab->bus_params.fixed_mem_region || + if (ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { ret = ath11k_qmi_assign_target_mem_chunk(ab); if (ret) { -- cgit v1.2.3 From d1e1edfde0352321af52599ad447b71c91bc6e76 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 29 Apr 2022 22:34:56 +0530 Subject: ath11k: Add HW params for WCN6750 WCN6750 is a PCIe based solution that is attached to and enumerated by the WPSS (Wireless Processor SubSystem) Q6 processor. Though it is a PCIe device, since it is not attached to APSS processor (Application Processor SubSystem), APSS will be unaware of such a decice and hence it is registered to the APSS processor as a platform device(AHB). Because of this hybrid nature, it is called as a hybrid bus device. A new variable hybrid_bus_type is defined in hw_params to indicate the hybrid nature of the device. Add HW params for WCN6750. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429170502.20080-4-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 73 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/core.h | 1 + drivers/net/wireless/ath/ath11k/hw.h | 1 + drivers/net/wireless/ath/ath11k/qmi.h | 2 + 4 files changed, 77 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 19b5bb06c7e8..1220514e19bf 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -106,6 +106,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_bdf_addr = true, .fixed_mem_region = true, .static_window_map = false, + .hybrid_bus_type = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -177,6 +178,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_bdf_addr = true, .fixed_mem_region = true, .static_window_map = false, + .hybrid_bus_type = false, }, { .name = "qca6390 hw2.0", @@ -247,6 +249,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, + .hybrid_bus_type = false, }, { .name = "qcn9074 hw1.0", @@ -317,6 +320,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = true, + .hybrid_bus_type = false, }, { .name = "wcn6855 hw2.0", @@ -387,6 +391,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, + .hybrid_bus_type = false, }, { .name = "wcn6855 hw2.1", @@ -456,6 +461,74 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, + .hybrid_bus_type = false, + }, + { + .name = "wcn6750 hw1.0", + .hw_rev = ATH11K_HW_WCN6750_HW10, + .fw = { + .dir = "WCN6750/hw1.0", + .board_size = 256 * 1024, + .cal_offset = 128 * 1024, + }, + .max_radios = 1, + .bdf_addr = 0x4B0C0000, + .ring_mask = &ath11k_hw_ring_mask_qca6390, + .internal_sleep_clock = false, + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750, + .host_ce_config = ath11k_host_ce_config_qca6390, + .ce_count = 9, + .target_ce_config = ath11k_target_ce_config_wlan_qca6390, + .target_ce_count = 9, + .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, + .svc_to_ce_map_len = 14, + .rfkill_pin = 0, + .rfkill_cfg = 0, + .rfkill_on_level = 0, + .single_pdev_only = true, + .rxdma1_enable = false, + .num_rxmda_per_pdev = 1, + .rx_mac_buf_ring = true, + .vdev_start_delay = true, + .htt_peer_map_v2 = false, + + .spectral = { + .fft_sz = 0, + .fft_pad_sz = 0, + .summary_pad_sz = 0, + .fft_hdr_len = 0, + .max_fft_bins = 0, + }, + + .interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP), + .supports_monitor = false, + .supports_shadow_regs = true, + .idle_ps = true, + .supports_sta_ps = true, + .cold_boot_calib = false, + .fw_mem_mode = 0, + .num_vdevs = 16 + 1, + .num_peers = 512, + .supports_suspend = false, + .supports_regdb = true, + .fix_l1ss = false, + .credit_flow = true, + .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, + .hal_params = &ath11k_hw_hal_params_qca6390, + .supports_dynamic_smps_6ghz = false, + .alloc_cacheable_memory = false, + .supports_rssi_stats = true, + .fw_wmi_diag_event = false, + .current_cc_support = true, + .dbr_debug_support = false, + .global_reset = false, + .bios_sar_capa = NULL, + .m3_fw_support = false, + .fixed_bdf_addr = false, + .fixed_mem_region = false, + .static_window_map = true, + .hybrid_bus_type = true, }, }; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index d8ab28413d95..fdea44627125 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -140,6 +140,7 @@ enum ath11k_hw_rev { ATH11K_HW_QCN9074_HW10, ATH11K_HW_WCN6855_HW20, ATH11K_HW_WCN6855_HW21, + ATH11K_HW_WCN6750_HW10, }; enum ath11k_firmware_mode { diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 09fa020bfee4..03eb5dfd4a5e 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -200,6 +200,7 @@ struct ath11k_hw_params { bool fixed_bdf_addr; bool fixed_mem_region; bool static_window_map; + bool hybrid_bus_type; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 61678de56ac7..e0201e184733 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_QMI_H @@ -20,6 +21,7 @@ #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01 #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02 #define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07 +#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750 0x03 #define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32 #define ATH11K_QMI_RESP_LEN_MAX 8192 #define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52 -- cgit v1.2.3 From 56c8ccf331bd2ebf8b85f70efb4844803ef3f768 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 29 Apr 2022 22:34:57 +0530 Subject: ath11k: Add register access logic for WCN6750 WCN6750 uses static window mapping to access the HW registers. Unlike QCN9074 which uses 3rd window for UMAC and 2nd window for CE register access, WCN6750 uses 1st window for UMAC and 2nd window for CE registers. Also, refactor the code so that WCN6750 can use the existing ath11k_pci_read32/write32() APIs for accessing the registers. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429170502.20080-5-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 14 +++++++++ drivers/net/wireless/ath/ath11k/hw.h | 2 ++ drivers/net/wireless/ath/ath11k/pcic.c | 54 ++++++++++++---------------------- 3 files changed, 35 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 1220514e19bf..3be4327b4d9c 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -107,6 +107,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_mem_region = true, .static_window_map = false, .hybrid_bus_type = false, + .dp_window_idx = 0, + .ce_window_idx = 0, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -179,6 +181,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_mem_region = true, .static_window_map = false, .hybrid_bus_type = false, + .dp_window_idx = 0, + .ce_window_idx = 0, }, { .name = "qca6390 hw2.0", @@ -250,6 +254,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, + .dp_window_idx = 0, + .ce_window_idx = 0, }, { .name = "qcn9074 hw1.0", @@ -321,6 +327,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_mem_region = false, .static_window_map = true, .hybrid_bus_type = false, + .dp_window_idx = 3, + .ce_window_idx = 2, }, { .name = "wcn6855 hw2.0", @@ -392,6 +400,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, + .dp_window_idx = 0, + .ce_window_idx = 0, }, { .name = "wcn6855 hw2.1", @@ -462,6 +472,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, + .dp_window_idx = 0, + .ce_window_idx = 0, }, { .name = "wcn6750 hw1.0", @@ -529,6 +541,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .fixed_mem_region = false, .static_window_map = true, .hybrid_bus_type = true, + .dp_window_idx = 1, + .ce_window_idx = 2, }, }; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 03eb5dfd4a5e..b63538084215 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -201,6 +201,8 @@ struct ath11k_hw_params { bool fixed_mem_region; bool static_window_map; bool hybrid_bus_type; + u8 dp_window_idx; + u8 ce_window_idx; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index 7a920d65023f..46cf96d3e1d2 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -134,16 +134,13 @@ EXPORT_SYMBOL(ath11k_pcic_init_msi_config); static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab, u32 offset) { - u32 window_start; + u32 window_start = 0; - /* If offset lies within DP register range, use 3rd window */ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) - window_start = 3 * ATH11K_PCI_WINDOW_START; - /* If offset lies within CE register range, use 2nd window */ - else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < ATH11K_PCI_WINDOW_RANGE_MASK) - window_start = 2 * ATH11K_PCI_WINDOW_START; - else - window_start = ATH11K_PCI_WINDOW_START; + window_start = ab->hw_params.dp_window_idx * ATH11K_PCI_WINDOW_START; + else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) < + ATH11K_PCI_WINDOW_RANGE_MASK) + window_start = ab->hw_params.ce_window_idx * ATH11K_PCI_WINDOW_START; return window_start; } @@ -162,19 +159,12 @@ void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) if (offset < ATH11K_PCI_WINDOW_START) { iowrite32(value, ab->mem + offset); - } else { - if (ab->hw_params.static_window_map) - window_start = ath11k_pcic_get_window_start(ab, offset); - else - window_start = ATH11K_PCI_WINDOW_START; - - if (window_start == ATH11K_PCI_WINDOW_START && - ab->pci.ops->window_write32) { - ab->pci.ops->window_write32(ab, offset, value); - } else { - iowrite32(value, ab->mem + window_start + - (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); - } + } else if (ab->hw_params.static_window_map) { + window_start = ath11k_pcic_get_window_start(ab, offset); + iowrite32(value, ab->mem + window_start + + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); + } else if (ab->pci.ops->window_write32) { + ab->pci.ops->window_write32(ab, offset, value); } if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && @@ -185,7 +175,8 @@ void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) { - u32 val, window_start; + u32 val = 0; + u32 window_start; int ret = 0; /* for offset beyond BAR + 4K - 32, may @@ -197,19 +188,12 @@ u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) if (offset < ATH11K_PCI_WINDOW_START) { val = ioread32(ab->mem + offset); - } else { - if (ab->hw_params.static_window_map) - window_start = ath11k_pcic_get_window_start(ab, offset); - else - window_start = ATH11K_PCI_WINDOW_START; - - if (window_start == ATH11K_PCI_WINDOW_START && - ab->pci.ops->window_read32) { - val = ab->pci.ops->window_read32(ab, offset); - } else { - val = ioread32(ab->mem + window_start + - (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); - } + } else if (ab->hw_params.static_window_map) { + window_start = ath11k_pcic_get_window_start(ab, offset); + val = ioread32(ab->mem + window_start + + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); + } else if (ab->pci.ops->window_read32) { + val = ab->pci.ops->window_read32(ab, offset); } if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && -- cgit v1.2.3 From 676f8905fff904ea3e4ee52086a18ab54912b410 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 29 Apr 2022 22:34:58 +0530 Subject: ath11k: Fetch device information via QMI for WCN6750 Since WPPS Q6 does the PCIe enumeration of WCN6750, device information like BAR and BAR size is not known to the APPS processor (Application Processor SubSystem). In order to fetch these details, a QMI message called device info request will be sent to the target. Therefore, add logic to fetch BAR details from the target. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429170502.20080-6-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/qmi.c | 144 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/qmi.h | 24 +++++- 2 files changed, 164 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index c89e76108237..ad422256f1c6 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 #define HOST_CSTATE_BIT 0x04 @@ -749,6 +751,68 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { }, }; +static struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, + bar_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, + bar_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, + bar_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, + bar_size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, @@ -2008,6 +2072,80 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) return 0; } +static int ath11k_qmi_request_device_info(struct ath11k_base *ab) +{ + struct qmi_wlanfw_device_info_req_msg_v01 req = {}; + struct qmi_wlanfw_device_info_resp_msg_v01 resp = {}; + struct qmi_txn txn; + void __iomem *bar_addr_va; + int ret; + + /* device info message req is only sent for hybrid bus devices */ + if (!ab->hw_params.hybrid_bus_type) + return 0; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlfw_device_info_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, + QMI_WLANFW_DEVICE_INFO_REQ_V01, + QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN, + qmi_wlanfw_device_info_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + ath11k_warn(ab, "failed to send qmi target device info request: %d\n", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); + if (ret < 0) { + ath11k_warn(ab, "failed to wait qmi target device info request: %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ath11k_warn(ab, "qmi device info request failed: %d %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } + + if (!resp.bar_addr_valid || !resp.bar_size_valid) { + ath11k_warn(ab, "qmi device info response invalid: %d %d\n", + resp.resp.result, resp.resp.error); + ret = -EINVAL; + goto out; + } + + if (!resp.bar_addr || + resp.bar_size != ATH11K_QMI_DEVICE_BAR_SIZE) { + ath11k_warn(ab, "qmi device info invalid address and size: %llu %u\n", + resp.bar_addr, resp.bar_size); + ret = -EINVAL; + goto out; + } + + bar_addr_va = devm_ioremap(ab->dev, resp.bar_addr, resp.bar_size); + + if (!bar_addr_va) { + ath11k_warn(ab, "qmi device info ioremap failed\n"); + ab->mem_len = 0; + ret = -EIO; + goto out; + } + + ab->mem = bar_addr_va; + ab->mem_len = resp.bar_size; + + return 0; +out: + return ret; +} + static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) { struct qmi_wlanfw_cap_req_msg_v01 req; @@ -2749,6 +2887,12 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) return ret; } + ret = ath11k_qmi_request_device_info(ab); + if (ret < 0) { + ath11k_warn(ab, "failed to request qmi device info: %d\n", ret); + return ret; + } + if (ab->hw_params.supports_regdb) ath11k_qmi_load_bdf_qmi(ab, true); diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index e0201e184733..c24e6995cca3 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -38,6 +38,8 @@ #define ATH11K_FIRMWARE_MODE_OFF 4 #define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ) +#define ATH11K_QMI_DEVICE_BAR_SIZE 0x200000 + struct ath11k_base; enum ath11k_qmi_file_type { @@ -287,10 +289,12 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 { char placeholder; }; -#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0 -#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235 -#define QMI_WLANFW_CAP_REQ_V01 0x0024 -#define QMI_WLANFW_CAP_RESP_V01 0x0024 +#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0 +#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235 +#define QMI_WLANFW_CAP_REQ_V01 0x0024 +#define QMI_WLANFW_CAP_RESP_V01 0x0024 +#define QMI_WLANFW_DEVICE_INFO_REQ_V01 0x004C +#define QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN 0 enum qmi_wlanfw_pipedir_enum_v01 { QMI_WLFW_PIPEDIR_NONE_V01 = 0, @@ -383,6 +387,18 @@ struct qmi_wlanfw_cap_req_msg_v01 { char placeholder; }; +struct qmi_wlanfw_device_info_req_msg_v01 { + char placeholder; +}; + +struct qmi_wlanfw_device_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u64 bar_addr; + u32 bar_size; + u8 bar_addr_valid; + u8 bar_size_valid; +}; + #define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182 #define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7 #define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025 -- cgit v1.2.3 From 73d3e71306fe864d9667e8d37f731e93a91e2040 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 29 Apr 2022 22:34:59 +0530 Subject: ath11k: Add QMI changes for WCN6750 In the case of WCN6750, FW doesn't request for DDR memory via QMI, instead it uses a fixed 12MB reserved Memory region in the DDR which is called as MSA region. As a result, QMI message sequence is not same as other ath11k supported devices. Also, M3 firmware will be bundled into the FW and will be downloaded to the target as part of Q6 boot. This is the QMI flow in the case of WCN6750, 1) QMI firmware indication REQ/RESP 2) QMI host capability REQ/RESP 3) QMI target capability REQ/RESP 4) QMI device info REQ/RESP 5) QMI BDF download 6) QMI FW ready Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429170502.20080-7-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 7 ++++ drivers/net/wireless/ath/ath11k/hw.h | 1 + drivers/net/wireless/ath/ath11k/qmi.c | 76 +++++++++++++++++++++------------- 3 files changed, 56 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 3be4327b4d9c..64cf87fd6a7f 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -109,6 +109,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hybrid_bus_type = false, .dp_window_idx = 0, .ce_window_idx = 0, + .fixed_fw_mem = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -183,6 +184,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hybrid_bus_type = false, .dp_window_idx = 0, .ce_window_idx = 0, + .fixed_fw_mem = false, }, { .name = "qca6390 hw2.0", @@ -256,6 +258,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hybrid_bus_type = false, .dp_window_idx = 0, .ce_window_idx = 0, + .fixed_fw_mem = false, }, { .name = "qcn9074 hw1.0", @@ -329,6 +332,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hybrid_bus_type = false, .dp_window_idx = 3, .ce_window_idx = 2, + .fixed_fw_mem = false, }, { .name = "wcn6855 hw2.0", @@ -402,6 +406,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hybrid_bus_type = false, .dp_window_idx = 0, .ce_window_idx = 0, + .fixed_fw_mem = false, }, { .name = "wcn6855 hw2.1", @@ -474,6 +479,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hybrid_bus_type = false, .dp_window_idx = 0, .ce_window_idx = 0, + .fixed_fw_mem = false, }, { .name = "wcn6750 hw1.0", @@ -543,6 +549,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hybrid_bus_type = true, .dp_window_idx = 1, .ce_window_idx = 2, + .fixed_fw_mem = true, }, }; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index b63538084215..b5a4758a6bc5 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -203,6 +203,7 @@ struct ath11k_hw_params { bool hybrid_bus_type; u8 dp_window_idx; u8 ce_window_idx; + bool fixed_fw_mem; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index ad422256f1c6..d1e945074bc1 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1799,10 +1799,6 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) req->client_id = QMI_WLANFW_CLIENT_ID; req->fw_ready_enable_valid = 1; req->fw_ready_enable = 1; - req->request_mem_enable_valid = 1; - req->request_mem_enable = 1; - req->fw_mem_ready_enable_valid = 1; - req->fw_mem_ready_enable = 1; req->cal_done_enable_valid = 1; req->cal_done_enable = 1; req->fw_init_done_enable_valid = 1; @@ -1811,6 +1807,17 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) req->pin_connect_result_enable_valid = 0; req->pin_connect_result_enable = 0; + /* WCN6750 doesn't request for DDR memory via QMI, + * instead it uses a fixed 12MB reserved memory + * region in DDR. + */ + if (!ab->hw_params.fixed_fw_mem) { + req->request_mem_enable_valid = 1; + req->request_mem_enable = 1; + req->fw_mem_ready_enable_valid = 1; + req->fw_mem_ready_enable = 1; + } + ret = qmi_txn_init(handle, &txn, qmi_wlanfw_ind_register_resp_msg_v01_ei, resp); if (ret < 0) @@ -2840,27 +2847,6 @@ ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi, return 0; } -static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) -{ - struct ath11k_base *ab = qmi->ab; - int ret; - - ret = ath11k_qmi_fw_ind_register_send(ab); - if (ret < 0) { - ath11k_warn(ab, "failed to send qmi firmware indication: %d\n", - ret); - return ret; - } - - ret = ath11k_qmi_host_cap_send(ab); - if (ret < 0) { - ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret); - return ret; - } - - return ret; -} - static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; @@ -2902,9 +2888,33 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) return ret; } - ret = ath11k_qmi_wlanfw_m3_info_send(ab); + return 0; +} + +static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) +{ + struct ath11k_base *ab = qmi->ab; + int ret; + + ret = ath11k_qmi_fw_ind_register_send(ab); + if (ret < 0) { + ath11k_warn(ab, "failed to send qmi firmware indication: %d\n", + ret); + return ret; + } + + ret = ath11k_qmi_host_cap_send(ab); if (ret < 0) { - ath11k_warn(ab, "failed to send qmi m3 info req: %d\n", ret); + ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret); + return ret; + } + + if (!ab->hw_params.fixed_fw_mem) + return ret; + + ret = ath11k_qmi_event_load_bdf(qmi); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to download BDF:%d\n", ret); return ret; } @@ -3104,8 +3114,18 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work) break; case ATH11K_QMI_EVENT_FW_MEM_READY: ret = ath11k_qmi_event_load_bdf(qmi); - if (ret < 0) + if (ret < 0) { set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); + break; + } + + ret = ath11k_qmi_wlanfw_m3_info_send(ab); + if (ret < 0) { + ath11k_warn(ab, + "failed to send qmi m3 info req: %d\n", ret); + set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); + } + break; case ATH11K_QMI_EVENT_FW_READY: clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); -- cgit v1.2.3 From 49890d9c93d5abf21babda2b495c7d3014fb9c98 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 29 Apr 2022 22:35:00 +0530 Subject: ath11k: HAL changes to support WCN6750 Add HAL changes required to support WCN6750. Offsets of some registers for WCN6750 are different from other supported devices; move such register offsets to platform specific ath11k_hw_regs. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429170502.20080-8-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 2 + drivers/net/wireless/ath/ath11k/hal.c | 15 ++-- drivers/net/wireless/ath/ath11k/hal.h | 15 ++-- drivers/net/wireless/ath/ath11k/hw.c | 134 +++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/hw.h | 10 +++ 5 files changed, 163 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 64cf87fd6a7f..7385e1c60ca1 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -491,8 +491,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { }, .max_radios = 1, .bdf_addr = 0x4B0C0000, + .hw_ops = &wcn6750_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = false, + .regs = &wcn6750_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 2ec09ae90080..1dba7b9e0bda 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include "hal_tx.h" @@ -1082,10 +1083,10 @@ static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab, srng = &hal->srng_list[ring_id]; if (srng_config->ring_dir == HAL_SRNG_DIR_DST) - srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) + + srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) + (unsigned long)ab->mem); else - srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) + + srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) + (unsigned long)ab->mem); } @@ -1120,7 +1121,7 @@ int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab, ath11k_dbg(ab, ATH11k_DBG_HAL, "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d", target_reg, - HAL_SHADOW_REG(shadow_cfg_idx), + HAL_SHADOW_REG(ab, shadow_cfg_idx), shadow_cfg_idx, ring_type, ring_num); @@ -1193,12 +1194,12 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab) s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab); s = &hal->srng_config[HAL_REO_REINJECT]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab); s = &hal->srng_config[HAL_REO_CMD]; - s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB; - s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP; + s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab); + s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab); s = &hal->srng_config[HAL_REO_STATUS]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab); diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index a7d9b4c551ad..1aadb1566df8 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_HAL_H @@ -31,12 +32,12 @@ struct ath11k_base; #define HAL_DSCP_TID_TBL_SIZE 24 /* calculate the register address from bar0 of shadow register x */ -#define HAL_SHADOW_BASE_ADDR 0x000008fc +#define HAL_SHADOW_BASE_ADDR(ab) ab->hw_params.regs->hal_shadow_base_addr #define HAL_SHADOW_NUM_REGS 36 #define HAL_HP_OFFSET_IN_REG_START 1 #define HAL_OFFSET_FROM_HP_TO_TP 4 -#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x))) +#define HAL_SHADOW_REG(ab, x) (HAL_SHADOW_BASE_ADDR(ab) + (4 * (x))) /* WCSS Relative address */ #define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000 @@ -180,16 +181,18 @@ struct ath11k_base; #define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp /* REO CMD R0 address */ -#define HAL_REO_CMD_RING_BASE_LSB 0x00000194 +#define HAL_REO_CMD_RING_BASE_LSB(ab) \ + ab->hw_params.regs->hal_reo_cmd_ring_base_lsb /* REO CMD R2 address */ -#define HAL_REO_CMD_HP 0x00003020 +#define HAL_REO_CMD_HP(ab) ab->hw_params.regs->hal_reo_cmd_ring_hp /* SW2REO R0 address */ -#define HAL_SW2REO_RING_BASE_LSB 0x000001ec +#define HAL_SW2REO_RING_BASE_LSB(ab) \ + ab->hw_params.regs->hal_sw2reo_ring_base_lsb /* SW2REO R2 address */ -#define HAL_SW2REO_RING_HP 0x00003028 +#define HAL_SW2REO_RING_HP(ab) ab->hw_params.regs->hal_sw2reo_ring_hp /* CE ring R0 address */ #define HAL_CE_DST_RING_BASE_LSB 0x00000000 diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index 46449a18b5cb..a9f5c4e63d62 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1014,6 +1015,13 @@ const struct ath11k_hw_ops wcn6855_ops = { .rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2, }; +const struct ath11k_hw_ops wcn6750_ops = { + .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, + .wmi_init_config = ath11k_init_wmi_config_qca6390, + .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390, + .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390, +}; + #define ATH11K_TX_RING_MASK_0 0x1 #define ATH11K_TX_RING_MASK_1 0x2 #define ATH11K_TX_RING_MASK_2 0x4 @@ -1908,10 +1916,18 @@ const struct ath11k_hw_regs ipq8074_regs = { .hal_reo_tcl_ring_base_lsb = 0x000003fc, .hal_reo_tcl_ring_hp = 0x00003058, + /* REO CMD ring address */ + .hal_reo_cmd_ring_base_lsb = 0x00000194, + .hal_reo_cmd_ring_hp = 0x00003020, + /* REO status address */ .hal_reo_status_ring_base_lsb = 0x00000504, .hal_reo_status_hp = 0x00003070, + /* SW2REO ring address */ + .hal_sw2reo_ring_base_lsb = 0x000001ec, + .hal_sw2reo_ring_hp = 0x00003028, + /* WCSS relative address */ .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000, .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000, @@ -1932,6 +1948,9 @@ const struct ath11k_hw_regs ipq8074_regs = { /* PCIe base address */ .pcie_qserdes_sysclk_en_sel = 0x0, .pcie_pcs_osc_dtct_config_base = 0x0, + + /* Shadow register area */ + .hal_shadow_base_addr = 0x0, }; const struct ath11k_hw_regs qca6390_regs = { @@ -1979,10 +1998,18 @@ const struct ath11k_hw_regs qca6390_regs = { .hal_reo_tcl_ring_base_lsb = 0x000003a4, .hal_reo_tcl_ring_hp = 0x00003050, + /* REO CMD ring address */ + .hal_reo_cmd_ring_base_lsb = 0x00000194, + .hal_reo_cmd_ring_hp = 0x00003020, + /* REO status address */ .hal_reo_status_ring_base_lsb = 0x000004ac, .hal_reo_status_hp = 0x00003068, + /* SW2REO ring address */ + .hal_sw2reo_ring_base_lsb = 0x000001ec, + .hal_sw2reo_ring_hp = 0x00003028, + /* WCSS relative address */ .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000, .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000, @@ -2003,6 +2030,9 @@ const struct ath11k_hw_regs qca6390_regs = { /* PCIe base address */ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac, .pcie_pcs_osc_dtct_config_base = 0x01e0c628, + + /* Shadow register area */ + .hal_shadow_base_addr = 0x000008fc, }; const struct ath11k_hw_regs qcn9074_regs = { @@ -2050,10 +2080,18 @@ const struct ath11k_hw_regs qcn9074_regs = { .hal_reo_tcl_ring_base_lsb = 0x000003fc, .hal_reo_tcl_ring_hp = 0x00003058, + /* REO CMD ring address */ + .hal_reo_cmd_ring_base_lsb = 0x00000194, + .hal_reo_cmd_ring_hp = 0x00003020, + /* REO status address */ .hal_reo_status_ring_base_lsb = 0x00000504, .hal_reo_status_hp = 0x00003070, + /* SW2REO ring address */ + .hal_sw2reo_ring_base_lsb = 0x000001ec, + .hal_sw2reo_ring_hp = 0x00003028, + /* WCSS relative address */ .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000, .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000, @@ -2074,6 +2112,9 @@ const struct ath11k_hw_regs qcn9074_regs = { /* PCIe base address */ .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8, .pcie_pcs_osc_dtct_config_base = 0x01e0f45c, + + /* Shadow register area */ + .hal_shadow_base_addr = 0x0, }; const struct ath11k_hw_regs wcn6855_regs = { @@ -2121,10 +2162,18 @@ const struct ath11k_hw_regs wcn6855_regs = { .hal_reo_tcl_ring_base_lsb = 0x00000454, .hal_reo_tcl_ring_hp = 0x00003060, + /* REO CMD ring address */ + .hal_reo_cmd_ring_base_lsb = 0x00000194, + .hal_reo_cmd_ring_hp = 0x00003020, + /* REO status address */ .hal_reo_status_ring_base_lsb = 0x0000055c, .hal_reo_status_hp = 0x00003078, + /* SW2REO ring address */ + .hal_sw2reo_ring_base_lsb = 0x000001ec, + .hal_sw2reo_ring_hp = 0x00003028, + /* WCSS relative address */ .hal_seq_wcss_umac_ce0_src_reg = 0x1b80000, .hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000, @@ -2145,6 +2194,91 @@ const struct ath11k_hw_regs wcn6855_regs = { /* PCIe base address */ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac, .pcie_pcs_osc_dtct_config_base = 0x01e0c628, + + /* Shadow register area */ + .hal_shadow_base_addr = 0x000008fc, +}; + +const struct ath11k_hw_regs wcn6750_regs = { + /* SW2TCL(x) R0 ring configuration address */ + .hal_tcl1_ring_base_lsb = 0x00000694, + .hal_tcl1_ring_base_msb = 0x00000698, + .hal_tcl1_ring_id = 0x0000069c, + .hal_tcl1_ring_misc = 0x000006a4, + .hal_tcl1_ring_tp_addr_lsb = 0x000006b0, + .hal_tcl1_ring_tp_addr_msb = 0x000006b4, + .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4, + .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8, + .hal_tcl1_ring_msi1_base_lsb = 0x000006dc, + .hal_tcl1_ring_msi1_base_msb = 0x000006e0, + .hal_tcl1_ring_msi1_data = 0x000006e4, + .hal_tcl2_ring_base_lsb = 0x000006ec, + .hal_tcl_ring_base_lsb = 0x0000079c, + + /* TCL STATUS ring address */ + .hal_tcl_status_ring_base_lsb = 0x000008a4, + + /* REO2SW(x) R0 ring configuration address */ + .hal_reo1_ring_base_lsb = 0x000001ec, + .hal_reo1_ring_base_msb = 0x000001f0, + .hal_reo1_ring_id = 0x000001f4, + .hal_reo1_ring_misc = 0x000001fc, + .hal_reo1_ring_hp_addr_lsb = 0x00000200, + .hal_reo1_ring_hp_addr_msb = 0x00000204, + .hal_reo1_ring_producer_int_setup = 0x00000210, + .hal_reo1_ring_msi1_base_lsb = 0x00000234, + .hal_reo1_ring_msi1_base_msb = 0x00000238, + .hal_reo1_ring_msi1_data = 0x0000023c, + .hal_reo2_ring_base_lsb = 0x00000244, + .hal_reo1_aging_thresh_ix_0 = 0x00000564, + .hal_reo1_aging_thresh_ix_1 = 0x00000568, + .hal_reo1_aging_thresh_ix_2 = 0x0000056c, + .hal_reo1_aging_thresh_ix_3 = 0x00000570, + + /* REO2SW(x) R2 ring pointers (head/tail) address */ + .hal_reo1_ring_hp = 0x00003028, + .hal_reo1_ring_tp = 0x0000302c, + .hal_reo2_ring_hp = 0x00003030, + + /* REO2TCL R0 ring configuration address */ + .hal_reo_tcl_ring_base_lsb = 0x000003fc, + .hal_reo_tcl_ring_hp = 0x00003058, + + /* REO CMD ring address */ + .hal_reo_cmd_ring_base_lsb = 0x000000e4, + .hal_reo_cmd_ring_hp = 0x00003010, + + /* REO status address */ + .hal_reo_status_ring_base_lsb = 0x00000504, + .hal_reo_status_hp = 0x00003070, + + /* SW2REO ring address */ + .hal_sw2reo_ring_base_lsb = 0x0000013c, + .hal_sw2reo_ring_hp = 0x00003018, + + /* WCSS relative address */ + .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000, + .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000, + .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000, + .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000, + + /* WBM Idle address */ + .hal_wbm_idle_link_ring_base_lsb = 0x00000874, + .hal_wbm_idle_link_ring_misc = 0x00000884, + + /* SW2WBM release address */ + .hal_wbm_release_ring_base_lsb = 0x000001ec, + + /* WBM2SW release address */ + .hal_wbm0_release_ring_base_lsb = 0x00000924, + .hal_wbm1_release_ring_base_lsb = 0x0000097c, + + /* PCIe base address */ + .pcie_qserdes_sysclk_en_sel = 0x0, + .pcie_pcs_osc_dtct_config_base = 0x0, + + /* Shadow register area */ + .hal_shadow_base_addr = 0x00000504, }; const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = { diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index b5a4758a6bc5..6d588cd80093 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -253,6 +253,7 @@ extern const struct ath11k_hw_ops ipq6018_ops; extern const struct ath11k_hw_ops qca6390_ops; extern const struct ath11k_hw_ops qcn9074_ops; extern const struct ath11k_hw_ops wcn6855_ops; +extern const struct ath11k_hw_ops wcn6750_ops; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074; extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390; @@ -355,6 +356,12 @@ struct ath11k_hw_regs { u32 hal_reo_status_ring_base_lsb; u32 hal_reo_status_hp; + u32 hal_reo_cmd_ring_base_lsb; + u32 hal_reo_cmd_ring_hp; + + u32 hal_sw2reo_ring_base_lsb; + u32 hal_sw2reo_ring_hp; + u32 hal_seq_wcss_umac_ce0_src_reg; u32 hal_seq_wcss_umac_ce0_dst_reg; u32 hal_seq_wcss_umac_ce1_src_reg; @@ -370,12 +377,15 @@ struct ath11k_hw_regs { u32 pcie_qserdes_sysclk_en_sel; u32 pcie_pcs_osc_dtct_config_base; + + u32 hal_shadow_base_addr; }; extern const struct ath11k_hw_regs ipq8074_regs; extern const struct ath11k_hw_regs qca6390_regs; extern const struct ath11k_hw_regs qcn9074_regs; extern const struct ath11k_hw_regs wcn6855_regs; +extern const struct ath11k_hw_regs wcn6750_regs; static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type) { -- cgit v1.2.3 From e67ba19739177f95706c1d4a53c884c89973b843 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 29 Apr 2022 22:35:01 +0530 Subject: ath11k: Datapath changes to support WCN6750 HAL RX descriptor for WCN6750 is same as QCN9074, this means that the size of the HAL RX decriptor and the DP APIs that WCN6750 requires to enable datapath should be initialized with that of QCN9074's RX descriptor size and the DP APIs respectively. There is one change wrt to REO configuration though, REO configuration for WCN6750 follows WCN6855, therefore use reo_setup() of WCN6855 for WCN6750. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429170502.20080-9-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 1 + drivers/net/wireless/ath/ath11k/hw.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 7385e1c60ca1..01e1d494b527 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -531,6 +531,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .num_vdevs = 16 + 1, .num_peers = 512, .supports_suspend = false, + .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index a9f5c4e63d62..09ce357f0f0d 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -1020,6 +1020,38 @@ const struct ath11k_hw_ops wcn6750_ops = { .wmi_init_config = ath11k_init_wmi_config_qca6390, .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390, .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390, + .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable, + .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu, + .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu, + .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes, + .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status, + .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid, + .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type, + .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type, + .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl, + .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support, + .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld, + .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid, + .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no, + .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len, + .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi, + .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs, + .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw, + .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq, + .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type, + .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss, + .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid, + .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id, + .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end, + .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag, + .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id, + .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len, + .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention, + .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload, + .reo_setup = ath11k_hw_wcn6855_reo_setup, + .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid, + .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid, + .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2, }; #define ATH11K_TX_RING_MASK_0 0x1 -- cgit v1.2.3 From 00402f49d26ffe991892bba76ccbdfed26538824 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 29 Apr 2022 22:35:02 +0530 Subject: ath11k: Add support for WCN6750 device WCN6750 is non-DBS 2x2 11AX chipset. Unlike QCA6390 which is a DBS (dual band simultaneous) solution (2 LMACs), WCN6750 has a single LMAC supporting 2G, 5G and 6G bands but will operate only on one band at any given point. WCN6750 is a PCIe based solution, but it is attached to the WPSS (Wireless Processor SubSystem) Q6 processor, hence it is enumerated by the Q6 processor. It is registered to the APSS processor (Application Processor SubSystem) as a platform device(AHB) and remoteproc APIs are used to boot up or shutdown the device like other AHB devices. Also, Device information like BAR and it's size is not known to the APSS processor as the chip is enumerated by WPSS Q6. These details are fetched over QMI. STA and AP modes are supported. Verified basic connectivity and ping in both the modes. An important point to note is that though WCN6750 is a PCIe device, it cannot be attached to any other platform except on Qualcomm Snapdragon SoCs due to the aforementioned reasons. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429170502.20080-10-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/Makefile | 5 +- drivers/net/wireless/ath/ath11k/ahb.c | 142 ++++++++++++++++++++++++++++--- drivers/net/wireless/ath/ath11k/core.h | 1 + drivers/net/wireless/ath/ath11k/pcic.c | 23 +++++ 4 files changed, 158 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index 0ebfe41d6143..cc47e0114595 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -16,7 +16,8 @@ ath11k-y += core.o \ ce.o \ peer.o \ dbring.o \ - hw.o + hw.o \ + pcic.o ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o @@ -29,7 +30,7 @@ obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o ath11k_ahb-y += ahb.o obj-$(CONFIG_ATH11K_PCI) += ath11k_pci.o -ath11k_pci-y += mhi.o pci.o pcic.o +ath11k_pci-y += mhi.o pci.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 49d79cfbf21c..050bda828966 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -13,6 +13,7 @@ #include "debug.h" #include "hif.h" #include +#include "pcic.h" static const struct of_device_id ath11k_ahb_of_match[] = { /* TODO: Should we change the compatible string to something similar @@ -24,6 +25,9 @@ static const struct of_device_id ath11k_ahb_of_match[] = { { .compatible = "qcom,ipq6018-wifi", .data = (void *)ATH11K_HW_IPQ6018_HW10, }, + { .compatible = "qcom,wcn6750-wifi", + .data = (void *)ATH11K_HW_WCN6750_HW10, + }, { } }; @@ -128,6 +132,16 @@ enum ext_irq_num { tcl2host_status_ring, }; +static int +ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector) +{ + return ab->pci.msi.irqs[vector]; +} + +static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = { + .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750, +}; + static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset) { return ioread32(ab->mem + offset); @@ -395,6 +409,9 @@ static void ath11k_ahb_free_irq(struct ath11k_base *ab) int irq_idx; int i; + if (ab->hw_params.hybrid_bus_type) + return ath11k_pcic_free_irq(ab); + for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; @@ -549,6 +566,9 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab) int irq, irq_idx, i; int ret; + if (ab->hw_params.hybrid_bus_type) + return ath11k_pcic_config_irq(ab); + /* Configure CE irqs */ for (i = 0; i < ab->hw_params.ce_count; i++) { struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; @@ -618,7 +638,7 @@ static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id return 0; } -static const struct ath11k_hif_ops ath11k_ahb_hif_ops = { +static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = { .start = ath11k_ahb_start, .stop = ath11k_ahb_stop, .read32 = ath11k_ahb_read32, @@ -630,6 +650,20 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops = { .power_up = ath11k_ahb_power_up, }; +static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = { + .start = ath11k_pcic_start, + .stop = ath11k_pcic_stop, + .read32 = ath11k_pcic_read32, + .write32 = ath11k_pcic_write32, + .irq_enable = ath11k_pcic_ext_irq_enable, + .irq_disable = ath11k_pcic_ext_irq_disable, + .get_msi_address = ath11k_pcic_get_msi_address, + .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment, + .map_service_to_pipe = ath11k_pcic_map_service_to_pipe, + .power_down = ath11k_ahb_power_down, + .power_up = ath11k_ahb_power_up, +}; + static int ath11k_core_get_rproc(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); @@ -652,12 +686,84 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab) return 0; } +static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab) +{ + struct platform_device *pdev = ab->pdev; + phys_addr_t msi_addr_pa; + dma_addr_t msi_addr_iova; + struct resource *res; + int int_prop; + int ret; + int i; + + ret = ath11k_pcic_init_msi_config(ab); + if (ret) { + ath11k_err(ab, "failed to init msi config: %d\n", ret); + return ret; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ath11k_err(ab, "failed to fetch msi_addr\n"); + return -ENOENT; + } + + msi_addr_pa = res->start; + msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE, + DMA_FROM_DEVICE, 0); + if (dma_mapping_error(ab->dev, msi_addr_iova)) + return -ENOMEM; + + ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova); + ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova); + + ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop); + if (ret) + return ret; + + ab->pci.msi.ep_base_data = int_prop + 32; + + for (i = 0; i < ab->pci.msi.config->total_vectors; i++) { + res = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (!res) + return -ENODEV; + + ab->pci.msi.irqs[i] = res->start; + } + + set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); + + return 0; +} + +static int ath11k_ahb_setup_resources(struct ath11k_base *ab) +{ + struct platform_device *pdev = ab->pdev; + struct resource *mem_res; + void __iomem *mem; + + if (ab->hw_params.hybrid_bus_type) + return ath11k_ahb_setup_msi_resources(ab); + + mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); + if (IS_ERR(mem)) { + dev_err(&pdev->dev, "ioremap error\n"); + return PTR_ERR(mem); + } + + ab->mem = mem; + ab->mem_len = resource_size(mem_res); + + return 0; +} + static int ath11k_ahb_probe(struct platform_device *pdev) { struct ath11k_base *ab; const struct of_device_id *of_id; - struct resource *mem_res; - void __iomem *mem; + const struct ath11k_hif_ops *hif_ops; + const struct ath11k_pci_ops *pci_ops; + enum ath11k_hw_rev hw_rev; int ret; of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev); @@ -666,10 +772,21 @@ static int ath11k_ahb_probe(struct platform_device *pdev) return -EINVAL; } - mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); - if (IS_ERR(mem)) { - dev_err(&pdev->dev, "ioremap error\n"); - return PTR_ERR(mem); + hw_rev = (enum ath11k_hw_rev)of_id->data; + + switch (hw_rev) { + case ATH11K_HW_IPQ8074: + case ATH11K_HW_IPQ6018_HW10: + hif_ops = &ath11k_ahb_hif_ops_ipq8074; + pci_ops = NULL; + break; + case ATH11K_HW_WCN6750_HW10: + hif_ops = &ath11k_ahb_hif_ops_wcn6750; + pci_ops = &ath11k_ahb_pci_ops_wcn6750; + break; + default: + dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev); + return -EOPNOTSUPP; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); @@ -685,13 +802,16 @@ static int ath11k_ahb_probe(struct platform_device *pdev) return -ENOMEM; } - ab->hif.ops = &ath11k_ahb_hif_ops; + ab->hif.ops = hif_ops; + ab->pci.ops = pci_ops; ab->pdev = pdev; - ab->hw_rev = (enum ath11k_hw_rev)of_id->data; - ab->mem = mem; - ab->mem_len = resource_size(mem_res); + ab->hw_rev = hw_rev; platform_set_drvdata(pdev, ab); + ret = ath11k_ahb_setup_resources(ab); + if (ret) + goto err_core_free; + ret = ath11k_core_pre_init(ab); if (ret) goto err_core_free; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index fdea44627125..95bca0b078b1 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -950,6 +950,7 @@ struct ath11k_base { struct { const struct ath11k_msi_config *config; u32 ep_base_data; + u32 irqs[32]; u32 addr_lo; u32 addr_hi; } msi; diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index 46cf96d3e1d2..cf12b98c480d 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -106,6 +106,15 @@ static const struct ath11k_msi_config ath11k_msi_config[] = { }, .hw_rev = ATH11K_HW_WCN6855_HW21, }, + { + .total_vectors = 28, + .total_users = 2, + .users = (struct ath11k_msi_user[]) { + { .name = "CE", .num_vectors = 10, .base_vector = 0 }, + { .name = "DP", .num_vectors = 18, .base_vector = 10 }, + }, + .hw_rev = ATH11K_HW_WCN6750_HW10, + }, }; int ath11k_pcic_init_msi_config(struct ath11k_base *ab) @@ -172,6 +181,7 @@ void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) !ret) ab->pci.ops->release(ab); } +EXPORT_SYMBOL(ath11k_pcic_write32); u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) { @@ -203,6 +213,7 @@ u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) return val; } +EXPORT_SYMBOL(ath11k_pcic_read32); void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, u32 *msi_addr_hi) @@ -210,6 +221,7 @@ void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, *msi_addr_lo = ab->pci.msi.addr_lo; *msi_addr_hi = ab->pci.msi.addr_hi; } +EXPORT_SYMBOL(ath11k_pcic_get_msi_address); int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, @@ -237,6 +249,7 @@ int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, return -EINVAL; } +EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment); void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) { @@ -253,6 +266,7 @@ void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) } *msi_idx = msi_data_idx; } +EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx); static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab) { @@ -281,6 +295,7 @@ void ath11k_pcic_free_irq(struct ath11k_base *ab) ath11k_pcic_free_ext_irq(ab); } +EXPORT_SYMBOL(ath11k_pcic_free_irq); static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { @@ -431,6 +446,7 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab) ath11k_pcic_ext_grp_enable(irq_grp); } } +EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable); static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab) { @@ -451,6 +467,7 @@ void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) __ath11k_pcic_ext_irq_disable(ab); ath11k_pcic_sync_ext_irqs(ab); } +EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable); static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget) { @@ -630,6 +647,7 @@ int ath11k_pcic_config_irq(struct ath11k_base *ab) return 0; } +EXPORT_SYMBOL(ath11k_pcic_config_irq); void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab) { @@ -643,6 +661,7 @@ void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab) ath11k_pcic_ce_irq_enable(ab, i); } } +EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable); static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab) { @@ -664,12 +683,14 @@ void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab) ath11k_pcic_sync_ce_irqs(ab); ath11k_pcic_kill_tasklets(ab); } +EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync); void ath11k_pcic_stop(struct ath11k_base *ab) { ath11k_pcic_ce_irq_disable_sync(ab); ath11k_ce_cleanup_pipes(ab); } +EXPORT_SYMBOL(ath11k_pcic_stop); int ath11k_pcic_start(struct ath11k_base *ab) { @@ -680,6 +701,7 @@ int ath11k_pcic_start(struct ath11k_base *ab) return 0; } +EXPORT_SYMBOL(ath11k_pcic_start); int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) @@ -723,3 +745,4 @@ int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, return 0; } +EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe); -- cgit v1.2.3 From 52bcfd1b239b0a62d863b92abcc1a04528a41946 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Apr 2022 10:46:42 -0700 Subject: ath10k: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429174643.196994-3-kuba@kernel.org --- drivers/net/wireless/ath/ath10k/core.h | 3 --- drivers/net/wireless/ath/ath10k/pci.c | 2 +- drivers/net/wireless/ath/ath10k/sdio.c | 2 +- drivers/net/wireless/ath/ath10k/snoc.c | 2 +- drivers/net/wireless/ath/ath10k/usb.c | 2 +- 5 files changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 9f6680b3be0a..8bfabbcfdb14 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -59,9 +59,6 @@ #define ATH10K_KEEPALIVE_MAX_IDLE 3895 #define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900 -/* NAPI poll budget */ -#define ATH10K_NAPI_BUDGET 64 - /* SMBIOS type containing Board Data File Name Extension */ #define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 4d4e2f91e15c..bf1c938be7d0 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3216,7 +3216,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar) void ath10k_pci_init_napi(struct ath10k *ar) { netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll, - ATH10K_NAPI_BUDGET); + NAPI_POLL_WEIGHT); } static int ath10k_pci_init_irq(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 63e1c2d783c5..9162b02b7211 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -2532,7 +2532,7 @@ static int ath10k_sdio_probe(struct sdio_func *func, } netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll, - ATH10K_NAPI_BUDGET); + NAPI_POLL_WEIGHT); ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 8328966a0471..607e8164bf98 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1243,7 +1243,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) static void ath10k_snoc_init_napi(struct ath10k *ar) { netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll, - ATH10K_NAPI_BUDGET); + NAPI_POLL_WEIGHT); } static int ath10k_snoc_request_irq(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index 10df6ca303a1..ad6471b21796 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -1015,7 +1015,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, } netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll, - ATH10K_NAPI_BUDGET); + NAPI_POLL_WEIGHT); usb_get_dev(dev); vendor_id = le16_to_cpu(dev->descriptor.idVendor); -- cgit v1.2.3 From 3b3299a1080e357c540e968b704f2eeb46a697f7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Apr 2022 10:46:43 -0700 Subject: wil6210: use NAPI_POLL_WEIGHT for napi budget The comment next to WIL6210_NAPI_BUDGET says "arbitrary". If we're picking arbitrary values let's pick the recommended default which is NAPI_POLL_WEIGHT. Signed-off-by: Jakub Kicinski Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429174643.196994-4-kuba@kernel.org --- drivers/net/wireless/ath/wil6210/netdev.c | 8 ++++---- drivers/net/wireless/ath/wil6210/wil6210.h | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 0913f0bf60e7..390648066382 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -457,17 +457,17 @@ int wil_if_add(struct wil6210_priv *wil) if (wil->use_enhanced_dma_hw) { netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx_edma, - WIL6210_NAPI_BUDGET); + NAPI_POLL_WEIGHT); netif_tx_napi_add(&wil->napi_ndev, &wil->napi_tx, wil6210_netdev_poll_tx_edma, - WIL6210_NAPI_BUDGET); + NAPI_POLL_WEIGHT); } else { netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx, - WIL6210_NAPI_BUDGET); + NAPI_POLL_WEIGHT); netif_tx_napi_add(&wil->napi_ndev, &wil->napi_tx, wil6210_netdev_poll_tx, - WIL6210_NAPI_BUDGET); + NAPI_POLL_WEIGHT); } wil_update_net_queues_bh(wil, vif, NULL, true); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 11946ecd0b99..22a6eb3e12b7 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -82,7 +82,6 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL6210_MAX_TX_RINGS (24) /* HW limit */ #define WIL6210_MAX_CID (20) /* max number of stations */ #define WIL6210_RX_DESC_MAX_CID (8) /* HW limit */ -#define WIL6210_NAPI_BUDGET (16) /* arbitrary */ #define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */ #define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */ #define WIL_MAX_AMPDU_SIZE_128 (128 * 1024) /* FW/HW limit */ -- cgit v1.2.3 From 54a6f29522da3c914da30e50721dedf51046449a Mon Sep 17 00:00:00 2001 From: Xiaomeng Tong Date: Mon, 28 Mar 2022 20:28:20 +0800 Subject: carl9170: tx: fix an incorrect use of list iterator If the previous list_for_each_entry_continue_rcu() don't exit early (no goto hit inside the loop), the iterator 'cvif' after the loop will be a bogus pointer to an invalid structure object containing the HEAD (&ar->vif_list). As a result, the use of 'cvif' after that will lead to a invalid memory access (i.e., 'cvif->id': the invalid pointer dereference when return back to/after the callsite in the carl9170_update_beacon()). The original intention should have been to return the valid 'cvif' when found in list, NULL otherwise. So just return NULL when no entry found, to fix this bug. Cc: stable@vger.kernel.org Fixes: 1f1d9654e183c ("carl9170: refactor carl9170_update_beacon") Signed-off-by: Xiaomeng Tong Acked-by: Christian Lamparter Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328122820.1004-1-xiam0nd.tong@gmail.com --- drivers/net/wireless/ath/carl9170/tx.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 1b76f4434c06..791f9f120af3 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -1558,6 +1558,9 @@ static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar) goto out; } } while (ar->beacon_enabled && i--); + + /* no entry found in list */ + return NULL; } out: -- cgit v1.2.3 From 0d3b26c4b97ae1a561070a973f0b2086e3644d1a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Apr 2022 10:46:41 -0700 Subject: rtw88: remove a copy of the NAPI_POLL_WEIGHT define Defining local versions of NAPI_POLL_WEIGHT with the same values in the drivers just makes refactoring harder. Signed-off-by: Jakub Kicinski Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220429174643.196994-2-kuba@kernel.org --- drivers/net/wireless/realtek/rtw88/main.h | 1 - drivers/net/wireless/realtek/rtw88/pci.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index de149a3b3ba1..0baaf5a32e82 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -17,7 +17,6 @@ #include "util.h" -#define RTW_NAPI_WEIGHT_NUM 64 #define RTW_MAX_MAC_ID_NUM 32 #define RTW_MAX_SEC_CAM_NUM 32 #define MAX_PG_CAM_BACKUP_NUM 8 diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 3ef0de70af32..24d5695363d3 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1718,7 +1718,7 @@ static void rtw_pci_napi_init(struct rtw_dev *rtwdev) init_dummy_netdev(&rtwpci->netdev); netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll, - RTW_NAPI_WEIGHT_NUM); + NAPI_POLL_WEIGHT); } static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) -- cgit v1.2.3 From 0b9f1b265ee16cd991714fe310eb59b800c142c5 Mon Sep 17 00:00:00 2001 From: Niels Dossche Date: Sat, 30 Apr 2022 21:46:56 +0200 Subject: octeontx2-af: debugfs: fix error return of allocations Current memory failure code in the debugfs returns -ENOSPC. This is normally used for indicating that there is no space left on the device and is not applicable for memory allocation failures. Replace this with -ENOMEM. Signed-off-by: Niels Dossche Link: https://lore.kernel.org/r/20220430194656.44357-1-dossche.niels@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index d1eddb769a41..2ad73b180276 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -248,7 +248,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) - return -ENOSPC; + return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); @@ -407,7 +407,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) - return -ENOSPC; + return -ENOMEM; /* Get the maximum width of a column */ lf_str_size = get_max_column_width(rvu); -- cgit v1.2.3 From c389362096be8ee69ec3a163a0699a31e84b8451 Mon Sep 17 00:00:00 2001 From: Ziyang Xuan Date: Wed, 20 Apr 2022 18:36:17 +0800 Subject: net/mlx5: use kvfree() for kvzalloc() in mlx5_ct_fs_smfs_matcher_create The memory of spec is allocated with kvzalloc(), the corresponding release function should not be kfree(), use kvfree() instead. Generated by: scripts/coccinelle/api/kfree_mismatch.cocci Signed-off-by: Ziyang Xuan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c index 59988e24b704..b979826f3f6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c @@ -100,7 +100,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS; dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec); - kfree(spec); + kvfree(spec); if (!dr_matcher) return ERR_PTR(-EINVAL); -- cgit v1.2.3 From 7134c602812a69885de9be7b6bc15c7250c5e637 Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Fri, 22 Apr 2022 14:08:32 +0800 Subject: net/mlx5: Remove useless kfree After alloc fail, we do not need to kfree. Signed-off-by: Haowen Bai Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index e49f51124c74..89aa0208b5d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1812,7 +1812,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); if (!ct_flow) { - kfree(ct_flow); return ERR_PTR(-ENOMEM); } -- cgit v1.2.3 From b5235a9979f9be05e1ffde3546877d3b9a1172c4 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Wed, 3 Nov 2021 12:18:35 +0200 Subject: net/mlx5: Delete redundant default assignment of runtime devlink params Runtime devlink params always read their values from the get() callbacks. Also, it is an error to set driverinit_value for params which don't support driverinit cmode. Delete such assignments. In addition, move the set of default matching mode inside eswitch code. Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 20 -------------------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 3 +++ 2 files changed, 3 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 057dde6f4417..e8789e6d7e7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -584,14 +584,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) struct mlx5_core_dev *dev = devlink_priv(devlink); union devlink_param_value value; - if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS) - strcpy(value.vstr, "dmfs"); - else - strcpy(value.vstr, "smfs"); - devlink_param_driverinit_value_set(devlink, - MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE, - value); - value.vbool = MLX5_CAP_GEN(dev, roce); devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, @@ -602,18 +594,6 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM, value); - - if (MLX5_ESWITCH_MANAGER(dev)) { - if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) { - dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; - value.vbool = true; - } else { - value.vbool = false; - } - devlink_param_driverinit_value_set(devlink, - MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA, - value); - } #endif value.vu32 = MLX5_COMP_EQ_SIZE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 458ec0bca1b8..25f2d2717aaa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1582,6 +1582,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; else esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; + if (MLX5_ESWITCH_MANAGER(dev) && + mlx5_esw_vport_match_metadata_supported(esw)) + esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA; dev->priv.eswitch = esw; BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); -- cgit v1.2.3 From cdfc6ffbfb390185f7cb442be16d1d2c29a8762d Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Tue, 22 Mar 2022 16:55:58 +0200 Subject: net/mlx5: Print initializing field in case of timeout Print the initializing field in case of FW couldn't initialize before timeout. This will help to better understand the root cause in some cases. Signed-off-by: Shay Drory Reviewed-by: Maor Gottlieb Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d504c8cb8f96..95d7712c2d9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -177,30 +177,29 @@ static struct mlx5_profile profile[] = { }, }; -static int fw_initializing(struct mlx5_core_dev *dev) -{ - return ioread32be(&dev->iseg->initializing) >> 31; -} - static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, u32 warn_time_mili) { unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili); unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili); + u32 fw_initializing; int err = 0; - while (fw_initializing(dev)) { + do { + fw_initializing = ioread32be(&dev->iseg->initializing); + if (!(fw_initializing >> 31)) + break; if (time_after(jiffies, end)) { err = -EBUSY; break; } if (warn_time_mili && time_after(jiffies, warn)) { - mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n", - jiffies_to_msecs(end - warn) / 1000); + mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n", + jiffies_to_msecs(end - warn) / 1000, fw_initializing); warn = jiffies + msecs_to_jiffies(warn_time_mili); } msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT)); - } + } while (true); return err; } -- cgit v1.2.3 From 84a137f051a571f0c89e395a8fb2acb5cb60e6a7 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 20 Jan 2022 11:32:04 +0200 Subject: net/mlx5e: Drop error CQE handling from the XSK RX handler This commit removes the redundant check and removes the unused cqe parameter of skb_from_cqe handlers. Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c | 6 ------ drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 16 ++++++++-------- 4 files changed, 10 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 50818081bdc0..b90902db7819 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -648,8 +648,8 @@ typedef struct sk_buff * (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); typedef struct sk_buff * -(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); +(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, + u32 cqe_bcnt); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index 021da085e603..9a1553598a7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -80,7 +80,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, } struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { @@ -99,11 +98,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool); net_prefetch(xdp->data); - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { - rq->stats->wqe_err++; - return NULL; - } - prog = rcu_dereference(rq->xdp_prog); if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) return NULL; /* page/packet was consumed by XDP */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h index 7f88ccf67fdd..a8cfab4a393c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h @@ -15,7 +15,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, u32 head_offset, u32 page_idx); struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index a5f6fd16b665..2dea9e4649a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1521,8 +1521,8 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, } static struct sk_buff * -mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, + u32 cqe_bcnt) { struct mlx5e_dma_info *di = wi->di; u16 rx_headroom = rq->buff.headroom; @@ -1565,8 +1565,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, } static struct sk_buff * -mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, + u32 cqe_bcnt) { struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; struct mlx5e_wqe_frag_info *head_wi = wi; @@ -1709,7 +1709,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, - rq, cqe, wi, cqe_bcnt); + rq, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { @@ -1762,7 +1762,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, - rq, cqe, wi, cqe_bcnt); + rq, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { @@ -2361,7 +2361,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, mlx5e_skb_from_cqe_linear, mlx5e_skb_from_cqe_nonlinear, - rq, cqe, wi, cqe_bcnt); + rq, wi, cqe_bcnt); if (!skb) goto wq_free_wqe; @@ -2453,7 +2453,7 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe goto free_wqe; } - skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt); + skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt); if (!skb) goto free_wqe; -- cgit v1.2.3 From a90889b4e8bdf49fa9a9b12c6f31811930b92b94 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 29 Mar 2022 15:24:33 +0300 Subject: net/mlx5e: Remove unused mlx5e_dcbnl_build_rep_netdev function Commit 7a9fb35e8c3a ("net/mlx5e: Do not reload ethernet ports when changing eswitch mode") removed the usage of mlx5e_dcbnl_build_rep_netdev() from the driver, delete the function. Signed-off-by: Gal Pressman Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h | 2 -- drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 9 --------- 2 files changed, 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h index 9976de8b9047..b59aee75de94 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h @@ -40,13 +40,11 @@ struct mlx5e_dcbx_dp { }; void mlx5e_dcbnl_build_netdev(struct net_device *netdev); -void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev); void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv); void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv); void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv); #else static inline void mlx5e_dcbnl_build_netdev(struct net_device *netdev) {} -static inline void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) {} static inline void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) {} static inline void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) {} static inline void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) {} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index d659fe07d464..6435517c0bee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1026,15 +1026,6 @@ void mlx5e_dcbnl_build_netdev(struct net_device *netdev) netdev->dcbnl_ops = &mlx5e_dcbnl_ops; } -void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; - - if (MLX5_CAP_GEN(mdev, qos)) - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; -} - static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv, enum mlx5_dcbx_oper_mode *mode) { -- cgit v1.2.3 From c70c3336a63ef83c1ac057b3321f438ed2a068a3 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 22 Mar 2022 16:14:51 +0000 Subject: net/mlx5e: TC, set proper dest type Dest type isn't set, this works only because MLX5_FLOW_DESTINATION_TYPE_VPORT is zero. Set the proper type. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c index fd4504518578..1cbd2eb9d04f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c @@ -93,6 +93,7 @@ sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample) act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; dest.vport.num = esw->manager_vport; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1); if (IS_ERR(tc_psample->termtbl_rule)) { err = PTR_ERR(tc_psample->termtbl_rule); -- cgit v1.2.3 From d639af621600dc52dd9ed0dcf62b22595f2f5b52 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 22 Mar 2022 09:16:39 +0000 Subject: net/mlx5: fs, split software and IFC flow destination definitions Separate flow destinations between software and IFC. Flow destination type passed by callers was used as the input in firmware commands and over the years software only types were added which resulted in mixing between the two. Create an IFC enum that contains only the flow destinations defined when talking to the firmware. Now that there is a proper software only enum for flow destinations the hardcoded values can be removed as the values are no longer used in firmware commands. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 13 ++++++++++--- .../net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c | 16 ++++++++++++---- include/linux/mlx5/fs.h | 11 +++++++++++ include/linux/mlx5/mlx5_ifc.h | 16 ++++++---------- 4 files changed, 39 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 33e9f86cf7d4..a5662cb46660 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -571,7 +571,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, int list_size = 0; list_for_each_entry(dst, &fte->node.children, node.list) { - unsigned int id, type = dst->dest_attr.type; + enum mlx5_flow_destination_type type = dst->dest_attr.type; + enum mlx5_ifc_flow_destination_type ifc_type; + unsigned int id; if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) continue; @@ -579,10 +581,11 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, switch (type) { case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: id = dst->dest_attr.ft_num; - type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: id = dst->dest_attr.ft->id; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; break; case MLX5_FLOW_DESTINATION_TYPE_UPLINK: case MLX5_FLOW_DESTINATION_TYPE_VPORT: @@ -596,8 +599,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) { /* destination_id is reserved */ id = 0; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK; break; } + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT; id = dst->dest_attr.vport.num; if (extended_dest && dst->dest_attr.vport.pkt_reformat) { @@ -612,13 +617,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: id = dst->dest_attr.sampler_id; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; break; default: id = dst->dest_attr.tir_num; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR; } MLX5_SET(dest_format_struct, in_dests, destination_type, - type); + ifc_type); MLX5_SET(dest_format_struct, in_dests, destination_id, id); in_dests += dst_cnt_size; list_size++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 4dd619d238cc..728ccb950fec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -311,7 +311,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); MLX5_SET(dest_format_struct, in_dests, destination_type, - MLX5_FLOW_DESTINATION_TYPE_VPORT); + MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT); MLX5_SET(dest_format_struct, in_dests, destination_id, vport); err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); @@ -719,7 +719,9 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, int list_size = 0; for (i = 0; i < fte->dests_size; i++) { - unsigned int id, type = fte->dest_arr[i].type; + enum mlx5_flow_destination_type type = fte->dest_arr[i].type; + enum mlx5_ifc_flow_destination_type ifc_type; + unsigned int id; if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) continue; @@ -727,10 +729,12 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, switch (type) { case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: id = fte->dest_arr[i].ft_num; - type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: id = fte->dest_arr[i].ft_id; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; + break; case MLX5_FLOW_DESTINATION_TYPE_UPLINK: case MLX5_FLOW_DESTINATION_TYPE_VPORT: @@ -740,8 +744,10 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, destination_eswitch_owner_vhca_id_valid, !!(fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID)); + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT; } else { id = 0; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK; MLX5_SET(dest_format_struct, in_dests, destination_eswitch_owner_vhca_id_valid, 1); } @@ -761,13 +767,15 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: id = fte->dest_arr[i].sampler_id; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; break; default: id = fte->dest_arr[i].tir_num; + ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR; } MLX5_SET(dest_format_struct, in_dests, destination_type, - type); + ifc_type); MLX5_SET(dest_format_struct, in_dests, destination_id, id); in_dests += dst_cnt_size; list_size++; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index e3bfed68b08a..9da9df9ae751 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -40,6 +40,17 @@ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) +enum mlx5_flow_destination_type { + MLX5_FLOW_DESTINATION_TYPE_VPORT, + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, + MLX5_FLOW_DESTINATION_TYPE_TIR, + MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER, + MLX5_FLOW_DESTINATION_TYPE_UPLINK, + MLX5_FLOW_DESTINATION_TYPE_PORT, + MLX5_FLOW_DESTINATION_TYPE_COUNTER, + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM, +}; + enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 7d2d0ba82144..7f4ec9faa180 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1806,16 +1806,12 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_c0[0x740]; }; -enum mlx5_flow_destination_type { - MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, - MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, - MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, - MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6, - MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8, - - MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, - MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, - MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101, +enum mlx5_ifc_flow_destination_type { + MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0, + MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, + MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2, + MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6, + MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8, }; enum mlx5_flow_table_miss_action { -- cgit v1.2.3 From d49d63075e0f56f7f05e3ddda59e0484d8a3aa7b Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 15 Mar 2022 10:44:14 +0000 Subject: net/mlx5: fs, refactor software deletion rule When deleting a rule make sure that for every type dests_size is decreased only once and no other logic is executed. Without this dests_size might be decreased twice when dests_size == 1 so the if for that type won't be entered and if action has MLX5_FLOW_CONTEXT_ACTION_FWD_DEST set dests_size will be decreased again. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 297e6a468a3e..ae83962fc5fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -550,8 +550,8 @@ static void del_sw_hw_rule(struct fs_node *node) mutex_unlock(&rule->dest_attr.ft->lock); } - if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && - --fte->dests_size) { + if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) { + --fte->dests_size; fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); @@ -559,15 +559,15 @@ static void del_sw_hw_rule(struct fs_node *node) goto out; } - if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT && - --fte->dests_size) { + if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) { + --fte->dests_size; fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; goto out; } - if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && - --fte->dests_size) { + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + --fte->dests_size; fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); } -- cgit v1.2.3 From c3ae3a9cfe2f3cfdea5a990fa96e23aee343fdb9 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 15 Mar 2022 10:45:00 +0000 Subject: net/mlx5: fs, jump to exit point and don't fall through For code clarity and to prevent future bugs make sure to jump to the exit point once done handling that specific type. This aligns the code with the rest logic in the function. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index ae83962fc5fc..e282d80f1fd2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -570,6 +570,7 @@ static void del_sw_hw_rule(struct fs_node *node) --fte->dests_size; fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); + goto out; } out: kfree(rule); -- cgit v1.2.3 From 6510bc0d7cb4119ebe10f9ab43aca6fd0e5f3e73 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 15 Mar 2022 14:08:23 +0000 Subject: net/mlx5: fs, add unused destination type When the caller doesn't pass a destination fs_core will create a unused rule just so a context can be returned. This unused rule is zeroed out and its type is 0 which can be mixed up with MLX5_FLOW_DESTINATION_TYPE_VPORT. Create a dedicated type to differentiate between the two named MLX5_FLOW_DESTINATION_TYPE_NONE. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 5 ++++- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c | 5 ++++- include/linux/mlx5/fs.h | 1 + 5 files changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 7841ef6c193c..c5bb79a4fa57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -259,6 +259,9 @@ const char *parse_fs_dst(struct trace_seq *p, case MLX5_FLOW_DESTINATION_TYPE_PORT: trace_seq_printf(p, "port\n"); break; + case MLX5_FLOW_DESTINATION_TYPE_NONE: + trace_seq_printf(p, "none\n"); + break; } trace_seq_putc(p, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a5662cb46660..2ccf7bef9b05 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -455,7 +455,8 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, return 0; list_for_each_entry(dst, &fte->node.children, node.list) { - if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER || + dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE) continue; if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT || dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && @@ -579,6 +580,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, continue; switch (type) { + case MLX5_FLOW_DESTINATION_TYPE_NONE: + continue; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: id = dst->dest_attr.ft_num; ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index e282d80f1fd2..f9d6ddd865e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1289,6 +1289,8 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) rule->node.type = FS_TYPE_FLOW_DEST; if (dest) memcpy(&rule->dest_attr, dest, sizeof(*dest)); + else + rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE; return rule; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 728ccb950fec..223c8741b7ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -604,7 +604,8 @@ static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev, if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return 0; for (i = 0; i < fte->dests_size; i++) { - if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER || + fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE) continue; if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT || fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && @@ -727,6 +728,8 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, continue; switch (type) { + case MLX5_FLOW_DESTINATION_TYPE_NONE: + continue; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: id = fte->dest_arr[i].ft_num; ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 9da9df9ae751..8135713b0d2d 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -41,6 +41,7 @@ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) enum mlx5_flow_destination_type { + MLX5_FLOW_DESTINATION_TYPE_NONE, MLX5_FLOW_DESTINATION_TYPE_VPORT, MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, MLX5_FLOW_DESTINATION_TYPE_TIR, -- cgit v1.2.3 From a30c8b9025dbc088c6041fccf97e0368c7db1c8d Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 15 Mar 2022 11:22:17 +0000 Subject: net/mlx5: fs, do proper bookkeeping for forward destinations Keep track after destinations that are forward destinations. When a forward destinations is removed from an FTE check if the actions bits need to be updated. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 20 +++++++++++++++++++- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 1 + 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f9d6ddd865e0..ec91727eee2a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -424,6 +424,16 @@ static bool is_fwd_next_action(u32 action) MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS); } +static bool is_fwd_dest_type(enum mlx5_flow_destination_type type) +{ + return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM || + type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE || + type == MLX5_FLOW_DESTINATION_TYPE_UPLINK || + type == MLX5_FLOW_DESTINATION_TYPE_VPORT || + type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER || + type == MLX5_FLOW_DESTINATION_TYPE_TIR; +} + static bool check_valid_spec(const struct mlx5_flow_spec *spec) { int i; @@ -566,8 +576,13 @@ static void del_sw_hw_rule(struct fs_node *node) goto out; } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + if (is_fwd_dest_type(rule->dest_attr.type)) { --fte->dests_size; + --fte->fwd_dests; + + if (!fte->fwd_dests) + fte->action.action &= + ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); goto out; @@ -1367,6 +1382,9 @@ create_flow_handle(struct fs_fte *fte, if (dest) { fte->dests_size++; + if (is_fwd_dest_type(dest[i].type)) + fte->fwd_dests++; + type = dest[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER; *modify_mask |= type ? count : dst; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index c488a7c5b07e..67cad7a6d836 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -226,6 +226,7 @@ struct fs_fte { struct mlx5_fs_dr_rule fs_dr_rule; u32 val[MLX5_ST_SZ_DW_MATCH_PARAM]; u32 dests_size; + u32 fwd_dests; u32 index; struct mlx5_flow_context flow_context; struct mlx5_flow_act action; -- cgit v1.2.3 From 7b0c6338597613f465d131bd939a51844a00455a Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 15 Mar 2022 11:23:40 +0000 Subject: net/mlx5: fs, delete the FTE when there are no rules attached to it When an FTE has no children is means all the rules where removed and the FTE can be deleted regardless of the dests_size value. While dests_size should be 0 when there are no children be extra careful not to leak memory or get firmware syndrome if the proper bookkeeping of dests_size wasn't done. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index ec91727eee2a..572237f262e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2084,16 +2084,16 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) down_write_ref_node(&fte->node, false); for (i = handle->num_rules - 1; i >= 0; i--) tree_remove_node(&handle->rule[i]->node, true); - if (fte->dests_size) { - if (fte->modify_mask) - modify_fte(fte); - up_write_ref_node(&fte->node, false); - } else if (list_empty(&fte->node.children)) { + if (list_empty(&fte->node.children)) { del_hw_fte(&fte->node); /* Avoid double call to del_hw_fte */ fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else if (fte->dests_size) { + if (fte->modify_mask) + modify_fte(fte); + up_write_ref_node(&fte->node, false); } else { up_write_ref_node(&fte->node, false); } -- cgit v1.2.3 From 72191a4cd5250bc39e431ad1a2ad3b4fb3c52d2e Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 15 Mar 2022 11:51:55 +0000 Subject: net/mlx5: fs, call the deletion function of the node Don't call del_hw_fte() directly, instead use the hardware deletion function set. This is just a small cleanup and doesn't change anything as for an FTE the deletion function is already set to del_hw_fte(). Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 572237f262e0..d512445c7627 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2085,7 +2085,7 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) for (i = handle->num_rules - 1; i >= 0; i--) tree_remove_node(&handle->rule[i]->node, true); if (list_empty(&fte->node.children)) { - del_hw_fte(&fte->node); + fte->node.del_hw_func(&fte->node); /* Avoid double call to del_hw_fte */ fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); -- cgit v1.2.3 From 3a09fae035c879c7ae8e5e154d7b03ddf0de5f20 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 22 Mar 2022 12:58:02 +0000 Subject: net/mlx5: fs, an FTE should have no dests when deleted When deleting an FTE it should have no dests, which means fte->dests_size should be 0. Add a WARN_ON() to catch bugs where the proper tracking wasn't done. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index d512445c7627..fb8175672478 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -605,6 +605,7 @@ static void del_hw_fte(struct fs_node *node) fs_get_obj(ft, fg->node.parent); trace_mlx5_fs_del_fte(fte); + WARN_ON(fte->dests_size); dev = get_dev(&ft->node); root = find_root(&ft->node); if (node->active) { -- cgit v1.2.3 From ccc915e7dd7e79d2d810a62a73c57e25a120795d Mon Sep 17 00:00:00 2001 From: Srinivasan Raju Date: Mon, 2 May 2022 16:01:32 +0100 Subject: plfxlc: fix le16_to_cpu warning for beacon_interval Fix the following sparse warnings: drivers/net/wireless/purelifi/plfxlc/chip.c:36:31: sparse: expected unsigned short [usertype] beacon_interval drivers/net/wireless/purelifi/plfxlc/chip.c:36:31: sparse: got restricted __le16 [usertype] Reported-by: kernel test robot Signed-off-by: Srinivasan Raju Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502150133.6052-1-srini.raju@purelifi.com --- drivers/net/wireless/purelifi/plfxlc/chip.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/purelifi/plfxlc/chip.c b/drivers/net/wireless/purelifi/plfxlc/chip.c index a5ec10b66ed5..f4ef9ff97146 100644 --- a/drivers/net/wireless/purelifi/plfxlc/chip.c +++ b/drivers/net/wireless/purelifi/plfxlc/chip.c @@ -29,11 +29,10 @@ int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval, u8 dtim_period, int type) { if (!interval || - (chip->beacon_set && - le16_to_cpu(chip->beacon_interval) == interval)) + (chip->beacon_set && chip->beacon_interval == interval)) return 0; - chip->beacon_interval = cpu_to_le16(interval); + chip->beacon_interval = interval; chip->beacon_set = true; return plfxlc_usb_wreq(chip->usb.ez_usb, &chip->beacon_interval, -- cgit v1.2.3 From ec424639d41b82aee16282b47091f95ee42e1767 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 07:54:01 +0800 Subject: rtw89: 8852c: rfk: add RFK tables These tables are used by RFK (RF calibration) to set parameters. These parameters can trigger certain calibration, or configure/reset settings before and after RF calibrations. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502235408.15052-2-pkshih@realtek.com --- .../wireless/realtek/rtw89/rtw8852c_rfk_table.c | 781 +++++++++++++++++++++ .../wireless/realtek/rtw89/rtw8852c_rfk_table.h | 67 ++ 2 files changed, 848 insertions(+) create mode 100644 drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c create mode 100644 drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c new file mode 100644 index 000000000000..d727d528b365 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c @@ -0,0 +1,781 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#include "rtw8852c_rfk_table.h" + +static const struct rtw89_reg5_def rtw8852c_dack_reload_defs[] = { + RTW89_DECL_RFK_WM(0xc004, BIT(17), 0x1), + RTW89_DECL_RFK_WM(0xc024, BIT(17), 0x1), + RTW89_DECL_RFK_WM(0xc104, BIT(17), 0x1), + RTW89_DECL_RFK_WM(0xc124, BIT(17), 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reload_defs); + +static const struct rtw89_reg5_def rtw8852c_dack_reset_defs_a[] = { + RTW89_DECL_RFK_WM(0xc000, BIT(17), 0x0), + RTW89_DECL_RFK_WM(0xc000, BIT(17), 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reset_defs_a); + +static const struct rtw89_reg5_def rtw8852c_dack_reset_defs_b[] = { + RTW89_DECL_RFK_WM(0xc100, BIT(17), 0x0), + RTW89_DECL_RFK_WM(0xc100, BIT(17), 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_reset_defs_b); + +static const struct rtw89_reg5_def rtw8852c_dack_defs_s0[] = { + RTW89_DECL_RFK_WM(0x12b8, BIT(30), 0x1), + RTW89_DECL_RFK_WM(0x030c, BIT(28), 0x1), + RTW89_DECL_RFK_WM(0x032c, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0xc004, 0xfff00000, 0x30), + RTW89_DECL_RFK_WM(0xc024, 0xfff00000, 0x30), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_defs_s0); + +static const struct rtw89_reg5_def rtw8852c_dack_defs_s1[] = { + RTW89_DECL_RFK_WM(0x32b8, BIT(30), 0x1), + RTW89_DECL_RFK_WM(0x030c, BIT(28), 0x1), + RTW89_DECL_RFK_WM(0x032c, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0xc104, 0xfff00000, 0x30), + RTW89_DECL_RFK_WM(0xc124, 0xfff00000, 0x30), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dack_defs_s1); + +static const struct rtw89_reg5_def rtw8852c_drck_defs[] = { + RTW89_DECL_RFK_WM(0xc0c4, BIT(6), 0x0), + RTW89_DECL_RFK_WM(0xc094, BIT(9), 0x1), + RTW89_DECL_RFK_DELAY(1), + RTW89_DECL_RFK_WM(0xc094, BIT(9), 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_drck_defs); + +static const struct rtw89_reg5_def rtw8852c_iqk_rxk_cfg_defs[] = { + RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0f), + RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x03), + RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0001), + RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0041), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_rxk_cfg_defs); + +static const struct rtw89_reg5_def rtw8852c_iqk_afebb_restore_defs_a[] = { + RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x00010000, 0x1), + RTW89_DECL_RFK_WM(0x20fc, 0x00100000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x01000000, 0x1), + RTW89_DECL_RFK_WM(0x20fc, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x5670, MASKDWORD, 0x00000000), + RTW89_DECL_RFK_WM(0x12a0, 0x000ff000, 0x00), + RTW89_DECL_RFK_WM(0x20fc, 0x00010000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x01000000, 0x0), + RTW89_DECL_RFK_WRF(RF_PATH_A, 0x10005, 0x00001, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_afebb_restore_defs_a); + +static const struct rtw89_reg5_def rtw8852c_iqk_afebb_restore_defs_b[] = { + RTW89_DECL_RFK_WM(0x32b8, 0x40000000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x00020000, 0x1), + RTW89_DECL_RFK_WM(0x20fc, 0x00200000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x02000000, 0x1), + RTW89_DECL_RFK_WM(0x20fc, 0x20000000, 0x0), + RTW89_DECL_RFK_WM(0x7670, MASKDWORD, 0x00000000), + RTW89_DECL_RFK_WM(0x32a0, 0x000ff000, 0x00), + RTW89_DECL_RFK_WM(0x20fc, 0x00020000, 0x0), + RTW89_DECL_RFK_WM(0x20fc, 0x02000000, 0x0), + RTW89_DECL_RFK_WRF(RF_PATH_B, 0x10005, 0x00001, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_iqk_afebb_restore_defs_b); + +static const struct rtw89_reg5_def rtw8852c_read_rxsram_pre_defs[] = { + RTW89_DECL_RFK_WM(0x80e8, BIT(7), 0x1), + RTW89_DECL_RFK_WM(0x8074, BIT(31), 0x1), + RTW89_DECL_RFK_WM(0x80d4, MASKDWORD, 0x00020000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_read_rxsram_pre_defs); + +static const struct rtw89_reg5_def rtw8852c_read_rxsram_post_defs[] = { + RTW89_DECL_RFK_WM(0x80e8, BIT(7), 0x0), + RTW89_DECL_RFK_WM(0x8074, BIT(31), 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_read_rxsram_post_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order0_defs[] = { + RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x0), + RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x2), + RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x4), + RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order0_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order1_defs[] = { + RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x1), + RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x1), + RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x0), + RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order1_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order2_defs[] = { + RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x2), + RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x0), + RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x0), + RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order2_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_mdpd_order3_defs[] = { + RTW89_DECL_RFK_WM(0x80a0, BIT(1) | BIT(0), 0x3), + RTW89_DECL_RFK_WM(0x809c, BIT(10) | BIT(9), 0x3), + RTW89_DECL_RFK_WM(0x80a0, 0x00001F00, 0x4), + RTW89_DECL_RFK_WM(0x8070, 0x70000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_mdpd_order3_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_kip_pwr_clk_on_defs[] = { + RTW89_DECL_RFK_WM(0x8008, MASKDWORD, 0x00000080), + RTW89_DECL_RFK_WM(0x8088, MASKDWORD, 0x807f030a), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_kip_pwr_clk_on_defs); + +static const struct rtw89_reg5_def rtw8852c_dpk_kip_pwr_clk_off_defs[] = { + RTW89_DECL_RFK_WM(0x8008, MASKDWORD, 0x00000000), + RTW89_DECL_RFK_WM(0x8088, MASKDWORD, 0x80000000), + RTW89_DECL_RFK_WM(0x80f4, BIT(18), 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_dpk_kip_pwr_clk_off_defs); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs[] = { + RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0xb5b5), + RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0xb5b5), + RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16), + RTW89_DECL_RFK_WM(0x0304, 0x0000ffff, 0x1f19), + RTW89_DECL_RFK_WM(0x0308, 0xff000000, 0x1c), + RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041), + RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041), + RTW89_DECL_RFK_WM(0x0324, 0xffff0000, 0x2001), + RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3), + RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3), + RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e), + RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e), + RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4), + RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4), + RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0), + RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_2g_a[] = { + RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33), + RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33), + RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_2g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_2g_b[] = { + RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33), + RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33), + RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_2g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_5g_a[] = { + RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44), + RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44), + RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_5g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_sys_defs_5g_b[] = { + RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44), + RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44), + RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_sys_defs_5g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_a[] = { + RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f), + RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40), + RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040), + RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000), + RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x026d000), + RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00), + RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x3dc80280), + RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00000080), + RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03), + RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff), + RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16), + RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000), + RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628), + RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f), + RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f), + RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff), + RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000), + RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0), + RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101), + RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00), + RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff), + RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100), + RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c), + RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f), + RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0x800), + RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff), + RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000), + RTW89_DECL_RFK_WM(0x58f8, 0x000fffff, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_defs_b[] = { + RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f), + RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40), + RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040), + RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000), + RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x026d000), + RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00), + RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x3dc80280), + RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00000080), + RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03), + RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff), + RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16), + RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000), + RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628), + RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f), + RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f), + RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff), + RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000), + RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0), + RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101), + RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00), + RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff), + RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100), + RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c), + RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f), + RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0x800), + RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff), + RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000), + RTW89_DECL_RFK_WM(0x78f8, 0x000fffff, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = { + RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe), + RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = { + RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe), + RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_a[] = { + RTW89_DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x0), + RTW89_DECL_RFK_WM(0x58c8, 0x00000fff, 0x0), + RTW89_DECL_RFK_WM(0x58c8, 0x00fff000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_b[] = { + RTW89_DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x0), + RTW89_DECL_RFK_WM(0x78c8, 0x00000fff, 0x0), + RTW89_DECL_RFK_WM(0x78c8, 0x00fff000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_2g_a[] = { + RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x1af), + RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_2g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_2g_b[] = { + RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x1af), + RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_2g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_5g_a[] = { + RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x5814, 0x00001000, 0x1), + RTW89_DECL_RFK_WM(0x5814, 0x0003c000, 0xb), + RTW89_DECL_RFK_WM(0x5814, 0x00002000, 0x1), + RTW89_DECL_RFK_WM(0x5814, 0x003c0000, 0x6), + RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_5g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_dck_defs_5g_b[] = { + RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x7814, 0x00001000, 0x1), + RTW89_DECL_RFK_WM(0x7814, 0x0003c000, 0xb), + RTW89_DECL_RFK_WM(0x7814, 0x00002000, 0x1), + RTW89_DECL_RFK_WM(0x7814, 0x003c0000, 0x6), + RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_dck_defs_5g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_bbgain_split_a[] = { + RTW89_DECL_RFK_WM(0x5818, 0x08000000, 0x1), + RTW89_DECL_RFK_WM(0x58d4, 0xf0000000, 0x7), + RTW89_DECL_RFK_WM(0x58f0, 0x000c0000, 0x1), + RTW89_DECL_RFK_WM(0x58f0, 0xfff00000, 0x400), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_bbgain_split_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_bbgain_split_b[] = { + RTW89_DECL_RFK_WM(0x7818, 0x08000000, 0x1), + RTW89_DECL_RFK_WM(0x78d4, 0xf0000000, 0x7), + RTW89_DECL_RFK_WM(0x78f0, 0x000c0000, 0x1), + RTW89_DECL_RFK_WM(0x78f0, 0xfff00000, 0x400), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_bbgain_split_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_2g_a[] = { + RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201020), + RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0801008), + RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x0808081e), + RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x081d), + RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_2g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_2g_b[] = { + RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0204020), + RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0801008), + RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x020), + RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08081e21), + RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x1d23), + RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_2g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_5g_a[] = { + RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_5g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_cal_org_defs_5g_b[] = { + RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_cal_org_defs_5g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_2g_a[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x2d2721), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x3b8), + RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3d2), + RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x042), + RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x06b), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x3bc), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x3d6), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x03e), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x06b), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_2g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_2g_b[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x2d2721), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x3c0), + RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3da), + RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x002), + RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x071), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x3c8), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x3e2), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x00c), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x071), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_2g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_a[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x312600), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x07d), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_5g_b[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x312600), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x07d), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_5g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_a[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x312600), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5634, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x5638, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x5638, 0x000ffc00, 0x080), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x5640, 0x3ff00000, 0x000), + RTW89_DECL_RFK_WM(0x5644, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x5644, 0x000ffc00, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_set_aligk_default_defs_6g_b[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x000000), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x312600), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7634, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7634, 0x3ff00000, 0x3e9), + RTW89_DECL_RFK_WM(0x7638, 0x000003ff, 0x039), + RTW89_DECL_RFK_WM(0x7638, 0x000ffc00, 0x080), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x000ffc00, 0x000), + RTW89_DECL_RFK_WM(0x7640, 0x3ff00000, 0x000), + RTW89_DECL_RFK_WM(0x7644, 0x000003ff, 0x000), + RTW89_DECL_RFK_WM(0x7644, 0x000ffc00, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_set_aligk_default_defs_6g_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_defs_a[] = { + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x0f), + RTW89_DECL_RFK_WM(0x581c, 0x000003ff, 0x280), + RTW89_DECL_RFK_WM(0x581c, 0x000ffc00, 0x200), + RTW89_DECL_RFK_WM(0x58b8, 0x007f0000, 0x00), + RTW89_DECL_RFK_WM(0x58b8, 0x7f000000, 0x00), + RTW89_DECL_RFK_WM(0x58b4, 0x7f000000, 0x0a), + RTW89_DECL_RFK_WM(0x58b8, 0x0000007f, 0x28), + RTW89_DECL_RFK_WM(0x58b8, 0x00007f00, 0x76), + RTW89_DECL_RFK_WM(0x5810, 0x20000000, 0x0), + RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x5834, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x5834, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5838, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5838, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x5854, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x5854, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5858, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5858, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x5824, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x5824, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5828, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5828, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x582c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x582c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5830, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5830, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x583c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x583c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5840, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5840, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x5844, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x5844, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5848, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5848, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x584c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x584c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5850, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5850, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x585c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x585c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x5860, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x5860, 0x003ff000, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_slope_defs_b[] = { + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x0f), + RTW89_DECL_RFK_WM(0x781c, 0x000003ff, 0x280), + RTW89_DECL_RFK_WM(0x781c, 0x000ffc00, 0x200), + RTW89_DECL_RFK_WM(0x78b8, 0x007f0000, 0x00), + RTW89_DECL_RFK_WM(0x78b8, 0x7f000000, 0x00), + RTW89_DECL_RFK_WM(0x78b4, 0x7f000000, 0x0a), + RTW89_DECL_RFK_WM(0x78b8, 0x0000007f, 0x28), + RTW89_DECL_RFK_WM(0x78b8, 0x00007f00, 0x76), + RTW89_DECL_RFK_WM(0x7810, 0x20000000, 0x0), + RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x7834, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x7834, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7838, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7838, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x7854, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x7854, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7858, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7858, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x7824, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x7824, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7828, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7828, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x782c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x782c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7830, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7830, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x783c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x783c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7840, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7840, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x7844, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x7844, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7848, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7848, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x784c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x784c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7850, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7850, 0x003ff000, 0x000), + RTW89_DECL_RFK_WM(0x785c, 0x0003ffff, 0x115f2), + RTW89_DECL_RFK_WM(0x785c, 0x3ffc0000, 0x000), + RTW89_DECL_RFK_WM(0x7860, 0x00000fff, 0x121), + RTW89_DECL_RFK_WM(0x7860, 0x003ff000, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_slope_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_run_slope_defs_a[] = { + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_run_slope_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_run_slope_defs_b[] = { + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_run_slope_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_track_defs_a[] = { + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x0), + RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x5864, 0x000003ff, 0x1ff), + RTW89_DECL_RFK_WM(0x5864, 0x000ffc00, 0x200), + RTW89_DECL_RFK_WM(0x5820, 0x00000fff, 0x080), + RTW89_DECL_RFK_WM(0x5814, 0x01000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_track_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_track_defs_b[] = { + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x0), + RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x0), + RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x7864, 0x000003ff, 0x1ff), + RTW89_DECL_RFK_WM(0x7864, 0x000ffc00, 0x200), + RTW89_DECL_RFK_WM(0x7820, 0x00000fff, 0x080), + RTW89_DECL_RFK_WM(0x7814, 0x01000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_track_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_txagc_ofst_mv_avg_defs_a[] = { + RTW89_DECL_RFK_WM(0x58e4, 0x00003800, 0x1), + RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x0), + RTW89_DECL_RFK_WM(0x58e4, 0x00008000, 0x1), + RTW89_DECL_RFK_WM(0x58e4, 0x000f0000, 0x0), + RTW89_DECL_RFK_WM(0x58e8, 0x0000003f, 0x03), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txagc_ofst_mv_avg_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_txagc_ofst_mv_avg_defs_b[] = { + RTW89_DECL_RFK_WM(0x78e4, 0x00003800, 0x1), + RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x0), + RTW89_DECL_RFK_WM(0x78e4, 0x00008000, 0x1), + RTW89_DECL_RFK_WM(0x78e4, 0x000f0000, 0x0), + RTW89_DECL_RFK_WM(0x78e8, 0x0000003f, 0x03), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_txagc_ofst_mv_avg_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_enable_defs_a[] = { + RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x0), + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x1), + RTW89_DECL_RFK_WRF(0x0, 0x10055, 0x00080, 0x1), + RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_enable_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_enable_defs_b[] = { + RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x0), + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x1), + RTW89_DECL_RFK_WRF(0x1, 0x10055, 0x00080, 0x1), + RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_enable_defs_b); + +static const struct rtw89_reg5_def rtw8852c_tssi_disable_defs_a[] = { + RTW89_DECL_RFK_WM(0x5820, 0x80000000, 0x00000000), + RTW89_DECL_RFK_WM(0x5818, 0x10000000, 0x00000000), + RTW89_DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_disable_defs_a); + +static const struct rtw89_reg5_def rtw8852c_tssi_disable_defs_b[] = { + RTW89_DECL_RFK_WM(0x7820, 0x80000000, 0x00000000), + RTW89_DECL_RFK_WM(0x7818, 0x10000000, 0x00000000), + RTW89_DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852c_tssi_disable_defs_b); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h new file mode 100644 index 000000000000..953a960ef1e8 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2019-2022 Realtek Corporation + */ + +#ifndef __RTW89_8852C_RFK_TABLE_H__ +#define __RTW89_8852C_RFK_TABLE_H__ + +#include "phy.h" + +extern const struct rtw89_rfk_tbl rtw8852c_dack_reload_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dack_reset_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dack_reset_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dack_defs_s0_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dack_defs_s1_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_drck_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_iqk_rxk_cfg_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_iqk_afebb_restore_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_iqk_afebb_restore_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_read_rxsram_pre_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_read_rxsram_post_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order0_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order1_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order2_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_mdpd_order3_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_kip_pwr_clk_on_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_dpk_kip_pwr_clk_off_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_2g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_2g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_5g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_sys_defs_5g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_2g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_2g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_5g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_dck_defs_5g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_bbgain_split_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_bbgain_split_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_slope_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_run_slope_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_run_slope_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_track_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_track_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_enable_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_enable_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_disable_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852c_tssi_disable_defs_b_tbl; + +#endif -- cgit v1.2.3 From 76599a8d0b7d23b990d03dc5a0ceb450afd18391 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 07:54:02 +0800 Subject: rtw89: 8852c: rfk: add DACK DACK (digital-to-analog converters calibration) is used to calibrate DAC to output analog signals as expected. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502235408.15052-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/reg.h | 96 ++++- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 2 + drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 446 ++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 1 + 4 files changed, 538 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 83cb509d2fbe..ce472d3b1a66 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3484,9 +3484,12 @@ #define R_S0_HW_SI_DIS 0x1200 #define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28) #define R_P0_RXCK 0x12A0 -#define B_P0_RXCK_VAL GENMASK(18, 16) -#define B_P0_RXCK_ON BIT(19) #define B_P0_RXCK_BW3 BIT(30) +#define B_P0_TXCK_ALL GENMASK(19, 12) +#define B_P0_RXCK_ON BIT(19) +#define B_P0_RXCK_VAL GENMASK(18, 16) +#define B_P0_TXCK_ON BIT(15) +#define B_P0_TXCK_VAL GENMASK(14, 12) #define R_P0_NRBW 0x12B8 #define B_P0_NRBW_DBG BIT(30) #define R_S0_RXDC 0x12D4 @@ -4035,19 +4038,98 @@ #define R_IQKINF2 0x9FE8 #define B_IQKINF2_FCNT GENMASK(23, 16) #define B_IQKINF2_KCNT GENMASK(15, 8) -#define B_IQKINF2_NCTLV GENMAKS(7, 0) +#define B_IQKINF2_NCTLV GENMASK(7, 0) +#define R_DCOF0 0xC000 +#define B_DCOF0_V GENMASK(4, 1) +#define R_DCOF1 0xC004 +#define B_DCOF1_S BIT(0) +#define R_DCOF8 0xC020 +#define B_DCOF8_V GENMASK(4, 1) +#define R_DACK_S0P0 0xC040 +#define B_DACK_S0P0_OK BIT(31) +#define R_DACK_BIAS00 0xc048 +#define B_DACK_BIAS00 GENMASK(11, 2) +#define R_DACK_S0P2 0xC05C +#define B_DACK_S0M0 GENMASK(31, 24) +#define B_DACK_S0P2_OK BIT(2) +#define R_DACK_DADCK00 0xC060 +#define B_DACK_DADCK00 GENMASK(31, 24) +#define R_DACK_S0P1 0xC064 +#define B_DACK_S0P1_OK BIT(31) +#define R_DACK_BIAS01 0xC06C +#define B_DACK_BIAS01 GENMASK(11, 2) +#define R_DACK_S0P3 0xC080 +#define B_DACK_S0M1 GENMASK(31, 24) +#define B_DACK_S0P3_OK BIT(2) +#define R_DACK_DADCK01 0xC084 +#define B_DACK_DADCK01 GENMASK(31, 24) +#define R_DRCK 0xC0C4 +#define B_DRCK_IDLE BIT(9) +#define B_DRCK_EN BIT(6) +#define B_DRCK_VAL GENMASK(4, 0) +#define R_DRCK_RES 0xC0C8 +#define B_DRCK_RES GENMASK(19, 15) +#define B_DRCK_POL BIT(3) #define R_PATH0_SAMPL_DLY_T_V1 0xC0D4 #define B_PATH0_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26) +#define R_P0_CFCH_BW0 0xC0D4 +#define B_P0_CFCH_BW0 GENMASK(27, 26) +#define R_P0_CFCH_BW1 0xC0D8 +#define B_P0_CFCH_BW1 GENMASK(8, 5) +#define R_ADDCK0 0xC0F4 +#define B_ADDCK0 GENMASK(9, 8) +#define B_ADDCK0_EN BIT(4) +#define B_ADDCK0_RST BIT(2) +#define R_ADDCK0_RL 0xC0F8 +#define B_ADDCK0_RLS GENMASK(29, 28) +#define B_ADDCK0_RL1 GENMASK(27, 18) +#define B_ADDCK0_RL0 GENMASK(17, 8) +#define R_ADDCKR0 0xC0FC +#define B_ADDCKR0_A0 GENMASK(19, 10) +#define B_ADDCKR0_A1 GENMASK(9, 0) +#define R_DACK10 0xC100 +#define B_DACK10 GENMASK(4, 1) +#define R_DACK1_K 0xc104 +#define B_DACK1_EN BIT(0) +#define R_DACK11 0xC120 +#define B_DACK11 GENMASK(4, 1) +#define R_DACK_S1P0 0xC140 +#define B_DACK_S1P0_OK BIT(31) +#define R_DACK_BIAS10 0xC148 +#define B_DACK_BIAS10 GENMASK(11, 2) +#define R_DACK10S 0xC15C +#define B_DACK10S GENMASK(31, 24) +#define R_DACK_S1P2 0xC15C +#define B_DACK_S1P2_OK BIT(2) +#define R_DACK_DADCK10 0xC160 +#define B_DACK_DADCK10 GENMASK(31, 24) +#define R_DACK_S1P1 0xC164 +#define B_DACK_S1P1_OK BIT(31) +#define R_DACK_BIAS11 0xC16C +#define B_DACK_BIAS11 GENMASK(11, 2) +#define R_DACK11S 0xC180 +#define B_DACK11S GENMASK(31, 24) +#define R_DACK_S1P3 0xC180 +#define B_DACK_S1P3_OK BIT(2) +#define R_DACK_DADCK11 0xC184 +#define B_DACK_DADCK11 GENMASK(31, 24) #define R_PATH1_SAMPL_DLY_T_V1 0xC1D4 #define B_PATH1_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26) #define R_PATH0_BW_SEL_V1 0xC0D8 #define B_PATH0_BW_SEL_MSK_V1 GENMASK(8, 5) #define R_PATH1_BW_SEL_V1 0xC1D8 #define B_PATH1_BW_SEL_MSK_V1 GENMASK(8, 5) -#define R_P0_CFCH_BW0 0xC0D4 -#define B_P0_CFCH_BW0 GENMASK(27, 26) -#define R_P0_CFCH_BW1 0xC0D8 -#define B_P0_CFCH_BW1 GENMASK(8, 5) +#define R_ADDCK1 0xC1F4 +#define B_ADDCK1 GENMASK(9, 8) +#define B_ADDCK1_EN BIT(4) +#define B_ADDCK1_RST BIT(2) +#define R_ADDCK1_RL 0xC1F8 +#define B_ADDCK1_RLS GENMASK(29, 28) +#define B_ADDCK1_RL1 GENMASK(27, 18) +#define B_ADDCK1_RL0 GENMASK(17, 8) +#define R_ADDCKR1 0xC1fC +#define B_ADDCKR1_A0 GENMASK(19, 10) +#define B_ADDCKR1_A1 GENMASK(9, 0) /* WiFi CPU local domain */ #define R_AX_WDT_CTRL 0x0040 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 275568468212..502627d8141d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1771,6 +1771,8 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; memset(mcc_info, 0, sizeof(*mcc_info)); + + rtw8852c_dack(rtwdev); } static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 56f0876c03fb..4245a2c5f9d6 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -2,11 +2,13 @@ /* Copyright(c) 2019-2022 Realtek Corporation */ +#include "coex.h" #include "debug.h" #include "phy.h" #include "reg.h" #include "rtw8852c.h" #include "rtw8852c_rfk.h" +#include "rtw8852c_rfk_table.h" static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { @@ -22,6 +24,441 @@ static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) return RF_B; } +static void _dack_dump(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + u8 t; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[0][0], dack->addck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[1][0], dack->addck_d[1][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[0][0], dack->dadck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[1][0], dack->dadck_d[1][1]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[0][0], dack->biask_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[1][0], dack->biask_d[1][1]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[0][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[0][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } +} + +static void _addck_backup(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); + dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, + B_ADDCKR0_A0); + dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, + B_ADDCKR0_A1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0); + dack->addck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, + B_ADDCKR1_A0); + dack->addck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, + B_ADDCKR1_A1); +} + +static void _addck_reload(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, + dack->addck_d[0][0]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, + dack->addck_d[0][1]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, + dack->addck_d[1][0]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, + dack->addck_d[1][1]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3); +} + +static void _dack_backup_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); + dack->msbk_d[0][0][i] = rtw89_phy_read32_mask(rtwdev, + R_DACK_S0P2, + B_DACK_S0M0); + rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); + dack->msbk_d[0][1][i] = rtw89_phy_read32_mask(rtwdev, + R_DACK_S0P3, + B_DACK_S0M1); + } + dack->biask_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, + B_DACK_BIAS00); + dack->biask_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, + B_DACK_BIAS01); + dack->dadck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, + B_DACK_DADCK00); + dack->dadck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, + B_DACK_DADCK01); +} + +static void _dack_backup_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i); + dack->msbk_d[1][0][i] = rtw89_phy_read32_mask(rtwdev, + R_DACK10S, + B_DACK10S); + rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i); + dack->msbk_d[1][1][i] = rtw89_phy_read32_mask(rtwdev, + R_DACK11S, + B_DACK11S); + } + dack->biask_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, + B_DACK_BIAS10); + dack->biask_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, + B_DACK_BIAS11); + dack->dadck_d[1][0] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, + B_DACK_DADCK10); + dack->dadck_d[1][1] = rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, + B_DACK_DADCK11); +} + +static void _dack_reload_by_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 index) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 idx_offset, path_offset; + u32 val32, offset, addr; + u8 i; + + idx_offset = (index == 0 ? 0 : 0x14); + path_offset = (path == RF_PATH_A ? 0 : 0x28); + offset = idx_offset + path_offset; + + rtw89_rfk_parser(rtwdev, &rtw8852c_dack_reload_defs_tbl); + + /* msbk_d: 15/14/13/12 */ + val32 = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + val32 |= dack->msbk_d[path][index][i + 12] << (i * 8); + addr = 0xc200 + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, + rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); + + /* msbk_d: 11/10/9/8 */ + val32 = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + val32 |= dack->msbk_d[path][index][i + 8] << (i * 8); + addr = 0xc204 + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, + rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); + + /* msbk_d: 7/6/5/4 */ + val32 = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + val32 |= dack->msbk_d[path][index][i + 4] << (i * 8); + addr = 0xc208 + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, + rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); + + /* msbk_d: 3/2/1/0 */ + val32 = 0x0; + for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++) + val32 |= dack->msbk_d[path][index][i] << (i * 8); + addr = 0xc20c + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", addr, + rtw89_phy_read32_mask(rtwdev, addr, MASKDWORD)); + + /* dadak_d/biask_d */ + val32 = (dack->biask_d[path][index] << 22) | + (dack->dadck_d[path][index] << 14); + addr = 0xc210 + offset; + rtw89_phy_write32(rtwdev, addr, val32); + rtw89_phy_write32_set(rtwdev, addr, BIT(1)); +} + +static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u8 i; + + for (i = 0; i < 2; i++) + _dack_reload_by_path(rtwdev, path, i); +} + +static void _addck(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 val; + int ret; + + /* S0 */ + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0); + fsleep(1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, rtwdev, 0xc0fc, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); + dack->addck_timeout[0] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0); + + /* S1 */ + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_EN, 0x0); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, rtwdev, 0xc1fc, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n"); + dack->addck_timeout[0] = true; + } + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_RST, 0x0); +} + +static void _dack_reset(struct rtw89_dev *rtwdev, u8 path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_dack_reset_defs_a_tbl, + &rtw8852c_dack_reset_defs_b_tbl); +} + +enum adc_ck { + ADC_NA = 0, + ADC_480M = 1, + ADC_960M = 2, + ADC_1920M = 3, +}; + +enum dac_ck { + DAC_40M = 0, + DAC_80M = 1, + DAC_120M = 2, + DAC_160M = 3, + DAC_240M = 4, + DAC_320M = 5, + DAC_480M = 6, + DAC_960M = 7, +}; + +enum rf_mode { + RF_SHUT_DOWN = 0x0, + RF_STANDBY = 0x1, + RF_TX = 0x2, + RF_RX = 0x3, + RF_TXIQK = 0x4, + RF_DPK = 0x5, + RF_RXK1 = 0x6, + RF_RXK2 = 0x7, +}; + +static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force, + enum dac_ck ck) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0); + + if (!force) + return; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1); +} + +static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force, + enum adc_ck ck) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0); + + if (!force) + return; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1); +} + +static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0) +{ + if (s0) { + if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) + return false; + } else { + if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0) + return false; + } + + return true; +} + +static void _dack_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + bool done; + int ret; + + rtw8852c_txck_force(rtwdev, RF_PATH_A, true, DAC_160M); + rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s0_tbl); + + _dack_reset(rtwdev, RF_PATH_A); + + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1); + ret = read_poll_timeout_atomic(_check_dack_done, done, done, + 1, 10000, false, rtwdev, true); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n"); + dack->msbk_timeout[0] = true; + } + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0); + rtw8852c_txck_force(rtwdev, RF_PATH_A, false, DAC_960M); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n"); + + _dack_backup_s0(rtwdev); + _dack_reload(rtwdev, RF_PATH_A); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); +} + +static void _dack_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + bool done; + int ret; + + rtw8852c_txck_force(rtwdev, RF_PATH_B, true, DAC_160M); + rtw89_rfk_parser(rtwdev, &rtw8852c_dack_defs_s1_tbl); + + _dack_reset(rtwdev, RF_PATH_B); + + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1); + ret = read_poll_timeout_atomic(_check_dack_done, done, done, + 1, 10000, false, rtwdev, false); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n"); + dack->msbk_timeout[0] = true; + } + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0); + rtw8852c_txck_force(rtwdev, RF_PATH_B, false, DAC_960M); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n"); + + _dack_backup_s1(rtwdev); + _dack_reload(rtwdev, RF_PATH_B); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); +} + +static void _dack(struct rtw89_dev *rtwdev) +{ + _dack_s0(rtwdev); + _dack_s1(rtwdev); +} + +static void _drck(struct rtw89_dev *rtwdev) +{ + u32 val; + int ret; + + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1); + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, rtwdev, 0xc0c8, BIT(3)); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); + + rtw89_rfk_parser(rtwdev, &rtw8852c_drck_defs_tbl); + + val = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, B_DRCK_RES); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, val); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n", + rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); +} + +static void _dac_cal(struct rtw89_dev *rtwdev, bool force) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 rf0_0, rf1_0; + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB); + + dack->dack_done = false; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n"); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); + rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK); + rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK); + _drck(rtwdev); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); + _addck(rtwdev); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); + + _addck_backup(rtwdev); + _addck_reload(rtwdev); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START); + _dack(rtwdev); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP); + + _dack_dump(rtwdev); + dack->dack_done = true; + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1); + dack->dack_cnt++; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); +} + static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, enum rtw89_bandwidth bw, bool is_dav) { @@ -212,3 +649,12 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, param->center_chan, param->band_type, param->bandwidth); } + +void rtw8852c_dack(struct rtw89_dev *rtwdev) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); + _dac_cal(rtwdev, false); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index 0e75555c1612..7323183e74d4 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -7,6 +7,7 @@ #include "core.h" +void rtw8852c_dack(struct rtw89_dev *rtwdev); void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, struct rtw89_channel_params *param, enum rtw89_phy_idx phy_idx); -- cgit v1.2.3 From fb8177d729f2c1f772128441adc80962d7e6ee85 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 07:54:03 +0800 Subject: rtw89: 8852c: rfk: add LCK LCK is short fro LC Tank calibration. Do this calibration once driver loads RF parameters table. Since the characteristic can be changed by temperature, we do this calibration again if difference of thermal value is over a threshold. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502235408.15052-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 5 ++ drivers/net/wireless/realtek/rtw89/reg.h | 2 + drivers/net/wireless/realtek/rtw89/rtw8852c.c | 7 +++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 63 +++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 2 + 5 files changed, 79 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index d544cf29e588..41d23711dc35 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2642,6 +2642,10 @@ struct rtw89_mcc_info { u8 table_idx; }; +struct rtw89_lck_info { + u8 thermal[RF_PATH_MAX]; +}; + struct rtw89_iqk_info { bool lok_cor_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; bool lok_fin_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; @@ -3114,6 +3118,7 @@ struct rtw89_dev { struct rtw89_iqk_info iqk; struct rtw89_dpk_info dpk; struct rtw89_mcc_info mcc; + struct rtw89_lck_info lck; bool is_tssi_mode[RF_PATH_MAX]; bool is_bt_iqk_timeout; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index ce472d3b1a66..028c88130823 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3327,6 +3327,8 @@ #define RR_MIXER_GN GENMASK(4, 3) #define RR_XTALX2 0xb8 #define RR_MALSEL 0xbe +#define RR_LCK_TRG 0xd3 +#define RR_LCK_TRGSEL BIT(8) #define RR_RCKD 0xde #define RR_RCKD_POW GENMASK(19, 13) #define RR_RCKD_BW BIT(2) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 502627d8141d..14302ebed3d5 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1771,6 +1771,7 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; memset(mcc_info, 0, sizeof(*mcc_info)); + rtw8852c_lck_init(rtwdev); rtw8852c_dack(rtwdev); } @@ -1780,6 +1781,11 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); } +static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev) +{ + rtw8852c_lck_track(rtwdev); +} + static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, s16 ref) { @@ -2708,6 +2714,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .read_phycap = rtw8852c_read_phycap, .rfk_init = rtw8852c_rfk_init, .rfk_channel = rtw8852c_rfk_channel, + .rfk_track = rtw8852c_rfk_track, .power_trim = rtw8852c_power_trim, .set_txpwr = rtw8852c_set_txpwr, .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 4245a2c5f9d6..ce08b2dcf545 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -632,6 +632,69 @@ static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, } } +static void _lck_keep_thermal(struct rtw89_dev *rtwdev) +{ + struct rtw89_lck_info *lck = &rtwdev->lck; + int path; + + for (path = 0; path < rtwdev->chip->rf_path_num; path++) { + lck->thermal[path] = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[LCK] path=%d thermal=0x%x", path, lck->thermal[path]); + } +} + +static void _lck(struct rtw89_dev *rtwdev) +{ + u32 tmp18[2]; + int path = rtwdev->dbcc_en ? 2 : 1; + int i; + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "[LCK] DO LCK\n"); + + tmp18[0] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); + tmp18[1] = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); + + for (i = 0; i < path; i++) { + rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1); + rtw89_write_rf(rtwdev, i, RR_CFGCH, RFREG_MASK, tmp18[i]); + rtw89_write_rf(rtwdev, i, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0); + } + + _lck_keep_thermal(rtwdev); +} + +#define RTW8852C_LCK_TH 8 + +void rtw8852c_lck_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_lck_info *lck = &rtwdev->lck; + u8 cur_thermal; + int delta; + int path; + + for (path = 0; path < rtwdev->chip->rf_path_num; path++) { + cur_thermal = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + delta = abs((int)cur_thermal - lck->thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[LCK] path=%d current thermal=0x%x delta=0x%x\n", + path, cur_thermal, delta); + + if (delta >= RTW8852C_LCK_TH) { + _lck(rtwdev); + return; + } + } +} + +void rtw8852c_lck_init(struct rtw89_dev *rtwdev) +{ + _lck_keep_thermal(rtwdev); +} + static void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 central_ch, enum rtw89_band band, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index 7323183e74d4..4ce76ef4c5e6 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -11,5 +11,7 @@ void rtw8852c_dack(struct rtw89_dev *rtwdev); void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, struct rtw89_channel_params *param, enum rtw89_phy_idx phy_idx); +void rtw8852c_lck_init(struct rtw89_dev *rtwdev); +void rtw8852c_lck_track(struct rtw89_dev *rtwdev); #endif -- cgit v1.2.3 From e5efc4d55c20beea57683b3c94f0e1c4e254c9c9 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 07:54:04 +0800 Subject: rtw89: 8852c: rfk: add TSSI TSSI is transmitter signal strength indication, which is a close-loop hardware circuit to feedback actual transmitting power as a reference for next transmission. When we setup channel to connect an AP, it does full calibration. When switching bands or channels, it needs to reset hardware status to prevent use wrong feedback of previous transmission. To do TX power compensation reflecting current temperature, it loads tables of compensation values into registers according to channel and band group. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502235408.15052-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 19 + drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 1033 +++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 5 + 3 files changed, 1057 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 14302ebed3d5..36406488e60e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1754,6 +1754,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter, rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL); rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); rtw8852c_dfs_en(rtwdev, false); + rtw8852c_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0); rtw8852c_adc_en(rtwdev, false); fsleep(40); rtw8852c_bb_reset_en(rtwdev, phy_idx, false); @@ -1761,6 +1762,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter, rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); rtw8852c_adc_en(rtwdev, true); rtw8852c_dfs_en(rtwdev, true); + rtw8852c_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0); rtw8852c_bb_reset_en(rtwdev, phy_idx, true); rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en); } @@ -1770,6 +1772,8 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) { struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + rtwdev->is_tssi_mode[RF_PATH_A] = false; + rtwdev->is_tssi_mode[RF_PATH_B] = false; memset(mcc_info, 0, sizeof(*mcc_info)); rtw8852c_lck_init(rtwdev); @@ -1778,9 +1782,22 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) { + enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + + rtw8852c_tssi(rtwdev, phy_idx); rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); } +static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev) +{ + rtw8852c_tssi_scan(rtwdev, RTW89_PHY_0); +} + +static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start) +{ + rtw8852c_wifi_scan_notify(rtwdev, start, RTW89_PHY_0); +} + static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev) { rtw8852c_lck_track(rtwdev); @@ -2714,6 +2731,8 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .read_phycap = rtw8852c_read_phycap, .rfk_init = rtw8852c_rfk_init, .rfk_channel = rtw8852c_rfk_channel, + .rfk_band_changed = rtw8852c_rfk_band_changed, + .rfk_scan = rtw8852c_rfk_scan, .rfk_track = rtw8852c_rfk_track, .power_trim = rtw8852c_power_trim, .set_txpwr = rtw8852c_set_txpwr, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index ce08b2dcf545..974bb5794ad9 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -9,6 +9,17 @@ #include "rtw8852c.h" #include "rtw8852c_rfk.h" #include "rtw8852c_rfk_table.h" +#include "rtw8852c_table.h" + +#define _TSSI_DE_MASK GENMASK(21, 12) +static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858}; +static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860}; +static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852C] = {0x5838, 0x7838}; +static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852C] = {0x5840, 0x7840}; +static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852C] = {0x5848, 0x7848}; +static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850}; +static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828}; +static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830}; static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { @@ -459,6 +470,901 @@ static void _dac_cal(struct rtw89_dev *rtwdev, bool force) rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); } +static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl); + + if (path == RF_PATH_A) + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_sys_defs_2g_a_tbl, + &rtw8852c_tssi_sys_defs_5g_a_tbl); + else + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_sys_defs_2g_b_tbl, + &rtw8852c_tssi_sys_defs_5g_b_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_txpwr_ctrl_bb_defs_a_tbl, + &rtw8852c_tssi_txpwr_ctrl_bb_defs_b_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl, + &rtw8852c_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl); +} + +static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + if (path == RF_PATH_A) { + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl); + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_dck_defs_2g_a_tbl, + &rtw8852c_tssi_dck_defs_5g_a_tbl); + } else { + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_b_tbl); + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_dck_defs_2g_b_tbl, + &rtw8852c_tssi_dck_defs_5g_b_tbl); + } +} + +static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_set_bbgain_split_a_tbl, + &rtw8852c_tssi_set_bbgain_split_b_tbl); +} + +static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ +#define RTW8852C_TSSI_GET_VAL(ptr, idx) \ +({ \ + s8 *__ptr = (ptr); \ + u8 __idx = (idx), __i, __v; \ + u32 __val = 0; \ + for (__i = 0; __i < 4; __i++) { \ + __v = (__ptr[__idx + __i]); \ + __val |= (__v << (8 * __i)); \ + } \ + __val; \ +}) + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 ch = rtwdev->hal.current_channel; + u8 subband = rtwdev->hal.current_subband; + const s8 *thm_up_a = NULL; + const s8 *thm_down_a = NULL; + const s8 *thm_up_b = NULL; + const s8 *thm_down_b = NULL; + u8 thermal = 0xff; + s8 thm_ofst[64] = {0}; + u32 tmp = 0; + u8 i, j; + + switch (subband) { + default: + case RTW89_CH_2G: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n; + break; + case RTW89_CH_5G_BAND_1: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0]; + break; + case RTW89_CH_5G_BAND_3: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1]; + break; + case RTW89_CH_5G_BAND_4: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2]; + break; + case RTW89_CH_6G_BAND_IDX0: + case RTW89_CH_6G_BAND_IDX1: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0]; + break; + case RTW89_CH_6G_BAND_IDX2: + case RTW89_CH_6G_BAND_IDX3: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1]; + break; + case RTW89_CH_6G_BAND_IDX4: + case RTW89_CH_6G_BAND_IDX5: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2]; + break; + case RTW89_CH_6G_BAND_IDX6: + case RTW89_CH_6G_BAND_IDX7: + thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3]; + thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3]; + thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3]; + thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3]; + break; + } + + if (path == RF_PATH_A) { + thermal = tssi_info->thermal[RF_PATH_A]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x5c00 + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_a[i++] : + -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_a[i++] : + thm_up_a[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x5c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); + + } else { + thermal = tssi_info->thermal[RF_PATH_B]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_b[i++] : + -thm_down_b[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_b[i++] : + thm_up_b[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = RTW8852C_TSSI_GET_VAL(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0); + } +#undef RTW8852C_TSSI_GET_VAL +} + +static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + + if (path == RF_PATH_A) { + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_slope_cal_org_defs_2g_a_tbl, + &rtw8852c_tssi_slope_cal_org_defs_5g_a_tbl); + } else { + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852c_tssi_slope_cal_org_defs_2g_b_tbl, + &rtw8852c_tssi_slope_cal_org_defs_5g_b_tbl); + } +} + +static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + enum rtw89_band band = rtwdev->hal.current_band_type; + const struct rtw89_rfk_tbl *tbl; + + if (path == RF_PATH_A) { + if (band == RTW89_BAND_2G) + tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_a_tbl; + else if (band == RTW89_BAND_6G) + tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_a_tbl; + else + tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_a_tbl; + } else { + if (band == RTW89_BAND_2G) + tbl = &rtw8852c_tssi_set_aligk_default_defs_2g_b_tbl; + else if (band == RTW89_BAND_6G) + tbl = &rtw8852c_tssi_set_aligk_default_defs_6g_b_tbl; + else + tbl = &rtw8852c_tssi_set_aligk_default_defs_5g_b_tbl; + } + + rtw89_rfk_parser(rtwdev, tbl); +} + +static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_slope_defs_a_tbl, + &rtw8852c_tssi_slope_defs_b_tbl); +} + +static void _tssi_run_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_run_slope_defs_a_tbl, + &rtw8852c_tssi_run_slope_defs_b_tbl); +} + +static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_track_defs_a_tbl, + &rtw8852c_tssi_track_defs_b_tbl); +} + +static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_tssi_txagc_ofst_mv_avg_defs_a_tbl, + &rtw8852c_tssi_txagc_ofst_mv_avg_defs_b_tbl); +} + +static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + for (i = path; i < path_max; i++) { + _tssi_set_track(rtwdev, phy, i); + _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i); + + rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A, + &rtw8852c_tssi_enable_defs_a_tbl, + &rtw8852c_tssi_enable_defs_b_tbl); + + tssi_info->base_thermal[i] = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]); + rtwdev->is_tssi_mode[i] = true; + } +} + +static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + for (i = path; i < path_max; i++) { + if (i == RF_PATH_A) { + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_a_tbl); + rtwdev->is_tssi_mode[RF_PATH_A] = false; + } else if (i == RF_PATH_B) { + rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_disable_defs_b_tbl); + rtwdev->is_tssi_mode[RF_PATH_B] = false; + } + } +} + +static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 13: + return 4; + case 14: + return 5; + } + + return 0; +} + +#define TSSI_EXTRA_GROUP_BIT (BIT(31)) +#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) +#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) + +static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 14: + return 4; + case 36 ... 40: + return 5; + case 41 ... 43: + return TSSI_EXTRA_GROUP(5); + case 44 ... 48: + return 6; + case 49 ... 51: + return TSSI_EXTRA_GROUP(6); + case 52 ... 56: + return 7; + case 57 ... 59: + return TSSI_EXTRA_GROUP(7); + case 60 ... 64: + return 8; + case 100 ... 104: + return 9; + case 105 ... 107: + return TSSI_EXTRA_GROUP(9); + case 108 ... 112: + return 10; + case 113 ... 115: + return TSSI_EXTRA_GROUP(10); + case 116 ... 120: + return 11; + case 121 ... 123: + return TSSI_EXTRA_GROUP(11); + case 124 ... 128: + return 12; + case 129 ... 131: + return TSSI_EXTRA_GROUP(12); + case 132 ... 136: + return 13; + case 137 ... 139: + return TSSI_EXTRA_GROUP(13); + case 140 ... 144: + return 14; + case 149 ... 153: + return 15; + case 154 ... 156: + return TSSI_EXTRA_GROUP(15); + case 157 ... 161: + return 16; + case 162 ... 164: + return TSSI_EXTRA_GROUP(16); + case 165 ... 169: + return 17; + case 170 ... 172: + return TSSI_EXTRA_GROUP(17); + case 173 ... 177: + return 18; + } + + return 0; +} + +static u32 _tssi_get_6g_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 5: + return 0; + case 6 ... 8: + return TSSI_EXTRA_GROUP(0); + case 9 ... 13: + return 1; + case 14 ... 16: + return TSSI_EXTRA_GROUP(1); + case 17 ... 21: + return 2; + case 22 ... 24: + return TSSI_EXTRA_GROUP(2); + case 25 ... 29: + return 3; + case 33 ... 37: + return 4; + case 38 ... 40: + return TSSI_EXTRA_GROUP(4); + case 41 ... 45: + return 5; + case 46 ... 48: + return TSSI_EXTRA_GROUP(5); + case 49 ... 53: + return 6; + case 54 ... 56: + return TSSI_EXTRA_GROUP(6); + case 57 ... 61: + return 7; + case 65 ... 69: + return 8; + case 70 ... 72: + return TSSI_EXTRA_GROUP(8); + case 73 ... 77: + return 9; + case 78 ... 80: + return TSSI_EXTRA_GROUP(9); + case 81 ... 85: + return 10; + case 86 ... 88: + return TSSI_EXTRA_GROUP(10); + case 89 ... 93: + return 11; + case 97 ... 101: + return 12; + case 102 ... 104: + return TSSI_EXTRA_GROUP(12); + case 105 ... 109: + return 13; + case 110 ... 112: + return TSSI_EXTRA_GROUP(13); + case 113 ... 117: + return 14; + case 118 ... 120: + return TSSI_EXTRA_GROUP(14); + case 121 ... 125: + return 15; + case 129 ... 133: + return 16; + case 134 ... 136: + return TSSI_EXTRA_GROUP(16); + case 137 ... 141: + return 17; + case 142 ... 144: + return TSSI_EXTRA_GROUP(17); + case 145 ... 149: + return 18; + case 150 ... 152: + return TSSI_EXTRA_GROUP(18); + case 153 ... 157: + return 19; + case 161 ... 165: + return 20; + case 166 ... 168: + return TSSI_EXTRA_GROUP(20); + case 169 ... 173: + return 21; + case 174 ... 176: + return TSSI_EXTRA_GROUP(21); + case 177 ... 181: + return 22; + case 182 ... 184: + return TSSI_EXTRA_GROUP(22); + case 185 ... 189: + return 23; + case 193 ... 197: + return 24; + case 198 ... 200: + return TSSI_EXTRA_GROUP(24); + case 201 ... 205: + return 25; + case 206 ... 208: + return TSSI_EXTRA_GROUP(25); + case 209 ... 213: + return 26; + case 214 ... 216: + return TSSI_EXTRA_GROUP(26); + case 217 ... 221: + return 27; + case 225 ... 229: + return 28; + case 230 ... 232: + return TSSI_EXTRA_GROUP(28); + case 233 ... 237: + return 29; + case 238 ... 240: + return TSSI_EXTRA_GROUP(29); + case 241 ... 245: + return 30; + case 246 ... 248: + return TSSI_EXTRA_GROUP(30); + case 249 ... 253: + return 31; + } + + return 0; +} + +static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 8: + return 0; + case 9 ... 14: + return 1; + case 36 ... 48: + return 2; + case 49 ... 51: + return TSSI_EXTRA_GROUP(2); + case 52 ... 64: + return 3; + case 100 ... 112: + return 4; + case 113 ... 115: + return TSSI_EXTRA_GROUP(4); + case 116 ... 128: + return 5; + case 132 ... 144: + return 6; + case 149 ... 177: + return 7; + } + + return 0; +} + +static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 13: + return 0; + case 14 ... 16: + return TSSI_EXTRA_GROUP(0); + case 17 ... 29: + return 1; + case 33 ... 45: + return 2; + case 46 ... 48: + return TSSI_EXTRA_GROUP(2); + case 49 ... 61: + return 3; + case 65 ... 77: + return 4; + case 78 ... 80: + return TSSI_EXTRA_GROUP(4); + case 81 ... 93: + return 5; + case 97 ... 109: + return 6; + case 110 ... 112: + return TSSI_EXTRA_GROUP(6); + case 113 ... 125: + return 7; + case 129 ... 141: + return 8; + case 142 ... 144: + return TSSI_EXTRA_GROUP(8); + case 145 ... 157: + return 9; + case 161 ... 173: + return 10; + case 174 ... 176: + return TSSI_EXTRA_GROUP(10); + case 177 ... 189: + return 11; + case 193 ... 205: + return 12; + case 206 ... 208: + return TSSI_EXTRA_GROUP(12); + case 209 ... 221: + return 13; + case 225 ... 237: + return 14; + case 238 ... 240: + return TSSI_EXTRA_GROUP(14); + case 241 ... 253: + return 15; + } + + return 0; +} + +static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + enum rtw89_band band = rtwdev->hal.current_band_type; + u8 ch = rtwdev->hal.current_channel; + u32 gidx, gidx_1st, gidx_2nd; + s8 de_1st; + s8 de_2nd; + s8 val; + + if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) { + gidx = _tssi_get_ofdm_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", + path, gidx); + + if (IS_TSSI_EXTRA_GROUP(gidx)) { + gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); + gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); + de_1st = tssi_info->tssi_mcs[path][gidx_1st]; + de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; + val = (de_1st + de_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", + path, val, de_1st, de_2nd); + } else { + val = tssi_info->tssi_mcs[path][gidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); + } + } else { + gidx = _tssi_get_6g_ofdm_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", + path, gidx); + + if (IS_TSSI_EXTRA_GROUP(gidx)) { + gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); + gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); + de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st]; + de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd]; + val = (de_1st + de_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", + path, val, de_1st, de_2nd); + } else { + val = tssi_info->tssi_6g_mcs[path][gidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); + } + } + + return val; +} + +static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + enum rtw89_band band = rtwdev->hal.current_band_type; + u8 ch = rtwdev->hal.current_channel; + u32 tgidx, tgidx_1st, tgidx_2nd; + s8 tde_1st = 0; + s8 tde_2nd = 0; + s8 val; + + if (band == RTW89_BAND_2G || band == RTW89_BAND_5G) { + tgidx = _tssi_get_trim_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", + path, tgidx); + + if (IS_TSSI_EXTRA_GROUP(tgidx)) { + tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); + tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); + tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; + tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; + val = (tde_1st + tde_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", + path, val, tde_1st, tde_2nd); + } else { + val = tssi_info->tssi_trim[path][tgidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", + path, val); + } + } else { + tgidx = _tssi_get_6g_trim_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", + path, tgidx); + + if (IS_TSSI_EXTRA_GROUP(tgidx)) { + tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); + tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); + tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st]; + tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd]; + val = (tde_1st + tde_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", + path, val, tde_1st, tde_2nd); + } else { + val = tssi_info->tssi_trim_6g[path][tgidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", + path, val); + } + } + + return val; +} + +static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 ch = rtwdev->hal.current_channel; + u8 gidx; + s8 ofdm_de; + s8 trim_de; + s32 val; + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", + phy, ch); + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + for (i = path; i < path_max; i++) { + gidx = _tssi_get_cck_group(rtwdev, ch); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = tssi_info->tssi_cck[i][gidx] + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", + i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); + + rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", + _tssi_de_cck_long[i], + rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], + _TSSI_DE_MASK)); + + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = ofdm_de + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", + i, ofdm_de, trim_de); + + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", + _tssi_de_mcs_20m[i], + rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], + _TSSI_DE_MASK)); + } +} + +static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en, + enum rtw89_rf_path path) +{ + static const u32 tssi_trk[2] = {0x5818, 0x7818}; + static const u32 tssi_en[2] = {0x5820, 0x7820}; + + if (en) { + rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0); + rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0); + if (rtwdev->dbcc_en && path == RF_PATH_B) + _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1); + else + _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0); + } else { + rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1); + rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1); + } +} + +void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx) +{ + if (!rtwdev->dbcc_en) { + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A); + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B); + } else { + if (phy_idx == RTW89_PHY_0) + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A); + else + rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B); + } +} + static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, enum rtw89_bandwidth bw, bool is_dav) { @@ -721,3 +1627,130 @@ void rtw8852c_dack(struct rtw89_dev *rtwdev) _dac_cal(rtwdev, false); rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); } + +void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + _tssi_disable(rtwdev, phy); + + for (i = path; i < path_max; i++) { + _tssi_set_sys(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); + _tssi_set_dck(rtwdev, phy, i); + _tssi_set_bbgain_split(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_slope_cal_org(rtwdev, phy, i); + _tssi_set_aligk_default(rtwdev, phy, i); + _tssi_set_slope(rtwdev, phy, i); + _tssi_run_slope(rtwdev, phy, i); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); +} + +void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", + __func__, phy); + + if (!rtwdev->is_tssi_mode[RF_PATH_A]) + return; + if (!rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + if (rtwdev->dbcc_en) { + if (phy == RTW89_PHY_0) { + path = RF_PATH_A; + path_max = RF_PATH_B; + } else if (phy == RTW89_PHY_1) { + path = RF_PATH_B; + path_max = RF_PATH_NUM_8852C; + } + } + + _tssi_disable(rtwdev, phy); + + for (i = path; i < path_max; i++) { + _tssi_set_sys(rtwdev, phy, i); + _tssi_set_dck(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_slope_cal_org(rtwdev, phy, i); + _tssi_set_aligk_default(rtwdev, phy, i); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); +} + +static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, bool enable) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 i; + + if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B]) + return; + + if (enable) { + /* SCAN_START */ + if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 && + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) { + for (i = 0; i < 6; i++) { + tssi_info->default_txagc_offset[RF_PATH_A] = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, + B_TXAGC_BB); + if (tssi_info->default_txagc_offset[RF_PATH_A]) + break; + } + } + + if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 && + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) { + for (i = 0; i < 6; i++) { + tssi_info->default_txagc_offset[RF_PATH_B] = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, + B_TXAGC_BB_S1); + if (tssi_info->default_txagc_offset[RF_PATH_B]) + break; + } + } + } else { + /* SCAN_END */ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, + tssi_info->default_txagc_offset[RF_PATH_A]); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, + tssi_info->default_txagc_offset[RF_PATH_B]); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); + } +} + +void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, + bool scan_start, enum rtw89_phy_idx phy_idx) +{ + if (scan_start) + rtw8852c_tssi_default_txagc(rtwdev, phy_idx, true); + else + rtw8852c_tssi_default_txagc(rtwdev, phy_idx, false); +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index 4ce76ef4c5e6..a16ad9305ba4 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -8,6 +8,11 @@ #include "core.h" void rtw8852c_dack(struct rtw89_dev *rtwdev); +void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx); +void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, + enum rtw89_phy_idx phy_idx); void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, struct rtw89_channel_params *param, enum rtw89_phy_idx phy_idx); -- cgit v1.2.3 From 30052c5a1c997da1c2523336b59d9a5eeb1905c9 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 07:54:05 +0800 Subject: rtw89: 8852c: rfk: add RCK RCK is synchronize RC calibration. It needs to be triggered only once when interface is going to up. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502235408.15052-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 43 +++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 1 + 3 files changed, 45 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 36406488e60e..6db61e6df1ff 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1777,6 +1777,7 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) memset(mcc_info, 0, sizeof(*mcc_info)); rtw8852c_lck_init(rtwdev); + rtw8852c_rck(rtwdev); rtw8852c_dack(rtwdev); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 974bb5794ad9..4025245cd7e6 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -470,6 +470,41 @@ static void _dac_cal(struct rtw89_dev *rtwdev, bool force) rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); } +static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u32 rf_reg5, rck_val = 0; + u32 val; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); + + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); + + /* RCK trigger */ + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); + + ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20, + false, rtwdev, path, 0x1c, BIT(3)); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n"); + + rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RCK] RF 0x1b / 0x1c = 0x%x / 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK)); +} + static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { @@ -1619,6 +1654,14 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, param->bandwidth); } +void rtw8852c_rck(struct rtw89_dev *rtwdev) +{ + u8 path; + + for (path = 0; path < 2; path++) + _rck(rtwdev, path); +} + void rtw8852c_dack(struct rtw89_dev *rtwdev) { u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index a16ad9305ba4..faacdf17a928 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -7,6 +7,7 @@ #include "core.h" +void rtw8852c_rck(struct rtw89_dev *rtwdev); void rtw8852c_dack(struct rtw89_dev *rtwdev); void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); -- cgit v1.2.3 From ac91be9756168c7834ffb2ab5692c1dfbf2e957a Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 07:54:06 +0800 Subject: rtw89: 8852c: rfk: add RX DCK RX DCK is receiver DC calibration. Do this calibration when bringing up interface and going to run on AP channel. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502235408.15052-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/reg.h | 9 ++- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 2 + drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 67 +++++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 1 + 4 files changed, 78 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 028c88130823..c65598a7af26 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3312,17 +3312,24 @@ #define RR_RXIQGEN_ATTL GENMASK(12, 8) #define RR_RXIQGEN_ATTH GENMASK(14, 13) #define RR_RXBB2 0x8f -#define RR_EN_TIA_IDA GENMASK(11, 10) #define RR_RXBB2_DAC_EN BIT(13) +#define RR_RXBB2_CKT BIT(12) +#define RR_EN_TIA_IDA GENMASK(11, 10) +#define RR_RXBB2_IDAC GENMASK(11, 9) +#define RR_RXBB2_EBW GENMASK(6, 5) #define RR_XALNA2 0x90 #define RR_XALNA2_SW GENMASK(1, 0) #define RR_DCK 0x92 +#define RR_DCK_DONE GENMASK(7, 5) #define RR_DCK_FINE BIT(1) #define RR_DCK_LV BIT(0) #define RR_DCK1 0x93 +#define RR_DCK1_CLR GENMASK(3, 0) #define RR_DCK1_SEL BIT(3) #define RR_DCK2 0x94 #define RR_DCK2_CYCLE GENMASK(7, 2) +#define RR_DCKC 0x95 +#define RR_DCKC_CHK BIT(3) #define RR_MIXER 0x9f #define RR_MIXER_GN GENMASK(4, 3) #define RR_XTALX2 0xb8 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 6db61e6df1ff..cbf69670eafe 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1779,12 +1779,14 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev) rtw8852c_rck(rtwdev); rtw8852c_dack(rtwdev); + rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false); } static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) { enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + rtw8852c_rx_dck(rtwdev, phy_idx, false); rtw8852c_tssi(rtwdev, phy_idx); rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 4025245cd7e6..197d177494da 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -505,6 +505,42 @@ static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK)); } +static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path) +{ + int ret; + u32 val; + + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); + + ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, + 2, 1000, false, rtwdev, path, 0x93, BIT(5)); + if (ret) + rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path); + else + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] S%d RXDCK finish\n", path); + + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); +} + +static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path, + bool is_afe) +{ + u8 res; + + rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0); + + _rx_dck_toggle(rtwdev, path); + if (rtw89_read_rf(rtwdev, path, RR_DCKC, RR_DCKC_CHK) == 0) + return; + res = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_DONE); + if (res > 1) { + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, res); + _rx_dck_toggle(rtwdev, path); + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_IDAC, 0x1); + } +} + static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { @@ -1671,6 +1707,37 @@ void rtw8852c_dack(struct rtw89_dev *rtwdev) rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); } +#define RXDCK_VER_8852C 0xe + +void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe) +{ + u8 path, kpath; + u32 rf_reg5; + + kpath = _kpath(rtwdev, phy); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n", + RXDCK_VER_8852C, rtwdev->hal.cv); + + for (path = 0; path < 2; path++) { + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + if (!(kpath & BIT(path))) + continue; + + if (rtwdev->is_tssi_mode[path]) + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, 0x1); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + _set_rx_dck(rtwdev, phy, path, is_afe); + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + if (rtwdev->is_tssi_mode[path]) + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, 0x0); + } +} + void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) { u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index faacdf17a928..fd07028a4964 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -9,6 +9,7 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev); void rtw8852c_dack(struct rtw89_dev *rtwdev); +void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe); void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx); -- cgit v1.2.3 From 2da8109d9885395b2439d8460cc7f616a9ccb3a8 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 07:54:07 +0800 Subject: rtw89: 8852c: rfk: add IQK IQ signal calibration is a very important calibration to yield good RF performance. We do this calibration only if we are going to run on AP channel. During scanning phase, without this calibration RF performance is still acceptable because it transmits with low data rate at this phase. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502235408.15052-8-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 3 + drivers/net/wireless/realtek/rtw89/reg.h | 55 +- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 1043 +++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 1 + 5 files changed, 1099 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 41d23711dc35..8fd719f93d6c 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2649,6 +2649,7 @@ struct rtw89_lck_info { struct rtw89_iqk_info { bool lok_cor_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; bool lok_fin_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; + bool lok_fail[RTW89_IQK_PATH_NR]; bool iqk_tx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; bool iqk_rx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; u32 iqk_fail_cnt; @@ -2677,6 +2678,8 @@ struct rtw89_iqk_info { u32 syn1to2; u8 iqk_mcc_ch[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; u8 iqk_table_idx[RTW89_IQK_PATH_NR]; + u32 lok_idac[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; + u32 lok_vbuf[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR]; }; #define RTW89_DPK_RF_PATH 2 diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index c65598a7af26..120fc13520fc 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3202,6 +3202,7 @@ #define RR_MOD_V_DPK 0x5 #define RR_MOD_V_RXK1 0x6 #define RR_MOD_V_RXK2 0x7 +#define RR_MOD_NBW GENMASK(15, 14) #define RR_MOD_M_RXG GENMASK(13, 4) #define RR_MOD_M_RXBB GENMASK(9, 5) #define RR_MODOPT 0x01 @@ -3210,8 +3211,20 @@ #define RR_WLSEL_AG GENMASK(18, 16) #define RR_RSV1 0x05 #define RR_RSV1_RST BIT(0) +#define RR_BBDC 0x10005 +#define RR_BBDC_SEL BIT(0) #define RR_DTXLOK 0x08 #define RR_RSV2 0x09 +#define RR_LOKVB 0x0a +#define RR_LOKVB_COI GENMASK(19, 14) +#define RR_LOKVB_COQ GENMASK(9, 4) +#define RR_TXIG 0x11 +#define RR_TXIG_TG GENMASK(16, 12) +#define RR_TXIG_GR1 GENMASK(6, 4) +#define RR_TXIG_GR0 GENMASK(1, 0) +#define RR_CHTR 0x17 +#define RR_CHTR_MOD GENMASK(11, 10) +#define RR_CHTR_TXRX GENMASK(9, 0) #define RR_CFGCH 0x18 #define RR_CFGCH_V1 0x10018 #define RR_CFGCH_BAND1 GENMASK(17, 16) @@ -3242,6 +3255,8 @@ #define RR_RXKPLL_OFF GENMASK(5, 0) #define RR_RXKPLL_POW BIT(19) #define RR_RSV4 0x1f +#define RR_RSV4_AGH GENMASK(17, 16) +#define RR_RSV4_PLLCH GENMASK(9, 0) #define RR_RXK 0x20 #define RR_RXK_SEL2G BIT(8) #define RR_RXK_SEL5G BIT(7) @@ -3264,8 +3279,9 @@ #define RR_TXG2_ATT0 BIT(11) #define RR_BSPAD 0x54 #define RR_TXGA 0x55 -#define RR_TXGA_LOK_EN BIT(0) #define RR_TXGA_TRK_EN BIT(7) +#define RR_TXGA_LOK_EXT GENMASK(4, 0) +#define RR_TXGA_LOK_EN BIT(0) #define RR_GAINTX 0x56 #define RR_GAINTX_ALL GENMASK(15, 0) #define RR_GAINTX_PAD GENMASK(9, 5) @@ -3288,26 +3304,37 @@ #define RR_BIASA2 0x63 #define RR_BIASA2_LB GENMASK(4, 2) #define RR_TXATANK 0x64 +#define RR_TXATANK_LBSW2 GENMASK(17, 15) #define RR_TXATANK_LBSW GENMASK(16, 15) +#define RR_TXA2 0x65 +#define RR_TXA2_LDO GENMASK(19, 16) #define RR_TRXIQ 0x66 #define RR_RSV6 0x6d #define RR_TXPOW 0x7f -#define RR_TXPOW_TXG BIT(1) #define RR_TXPOW_TXA BIT(8) +#define RR_TXPOW_TXAS BIT(7) +#define RR_TXPOW_TXG BIT(1) #define RR_RXPOW 0x80 #define RR_RXPOW_IQK GENMASK(17, 16) #define RR_RXBB 0x83 +#define RR_RXBB_VOBUF GENMASK(15, 12) #define RR_RXBB_C2G GENMASK(16, 10) #define RR_RXBB_C1G GENMASK(9, 8) #define RR_RXBB_ATTR GENMASK(7, 4) #define RR_RXBB_ATTC GENMASK(2, 0) +#define RR_RXG 0x84 +#define RR_RXG_IQKMOD GENMASK(19, 16) #define RR_XGLNA2 0x85 #define RR_XGLNA2_SW GENMASK(1, 0) +#define RR_RXAE 0x89 +#define RR_RXAE_IQKMOD GENMASK(3, 0) #define RR_RXA 0x8a #define RR_RXA_DPK GENMASK(9, 8) #define RR_RXA2 0x8c -#define RR_RXA2_C2 GENMASK(9, 3) #define RR_RXA2_C1 GENMASK(12, 10) +#define RR_RXA2_C2 GENMASK(9, 3) +#define RR_RXA2_IATT GENMASK(7, 4) +#define RR_RXA2_ATT GENMASK(3, 0) #define RR_RXIQGEN 0x8d #define RR_RXIQGEN_ATTL GENMASK(12, 8) #define RR_RXIQGEN_ATTH GENMASK(14, 13) @@ -3336,6 +3363,8 @@ #define RR_MALSEL 0xbe #define RR_LCK_TRG 0xd3 #define RR_LCK_TRGSEL BIT(8) +#define RR_IQKPLL 0xdc +#define RR_IQKPLL_MOD GENMASK(9, 8) #define RR_RCKD 0xde #define RR_RCKD_POW GENMASK(19, 13) #define RR_RCKD_BW BIT(2) @@ -3559,6 +3588,11 @@ #define B_S0_ADDCK_Q GENMASK(19, 10) #define R_ADC_FIFO 0x20fc #define B_ADC_FIFO_RST GENMASK(31, 24) +#define B_ADC_FIFO_RXK GENMASK(31, 16) +#define B_ADC_FIFO_A3 BIT(28) +#define B_ADC_FIFO_A2 BIT(24) +#define B_ADC_FIFO_A1 BIT(20) +#define B_ADC_FIFO_A0 BIT(16) #define R_TXFIR0 0x2300 #define B_TXFIR_C01 GENMASK(23, 0) #define R_TXFIR2 0x2304 @@ -3727,6 +3761,8 @@ #define B_P0_NBIIDX_NOTCH_EN_V1 BIT(12) #define R_P1_MODE 0x4718 #define B_P1_MODE_SEL GENMASK(31, 30) +#define R_P0_AGC_CTL 0x4730 +#define B_P0_AGC_EN BIT(31) #define R_PATH1_LNA_INIT 0x473C #define B_PATH1_LNA_INIT_IDX_MSK GENMASK(26, 24) #define R_PATH1_TIA_INIT 0x4748 @@ -3776,6 +3812,8 @@ #define B_BK_FC0_INV_MSK_V1 GENMASK(18, 0) #define R_CCK_FC0_INV_V1 0x4A20 #define B_CCK_FC0_INV_MSK_V1 GENMASK(18, 0) +#define R_P1_AGC_CTL 0x4A9C +#define B_P1_AGC_EN BIT(31) #define R_PATH0_RXBB_V1 0x4AD4 #define B_PATH0_RXBB_MSK_V1 GENMASK(31, 0) #define R_PATH1_RXBB_V1 0x4AE0 @@ -3822,6 +3860,12 @@ #define B_CFO_COMP_VALID_BIT BIT(29) #define B_CFO_COMP_WEIGHT_MSK GENMASK(27, 24) #define B_CFO_COMP_VAL_MSK GENMASK(11, 0) +#define R_UPD_CLK 0x5670 +#define B_DAC_VAL BIT(31) +#define B_ACK_VAL GENMASK(30, 29) +#define B_DPD_DIS BIT(14) +#define B_DPD_GDIS BIT(13) +#define B_IQK_RFC_ON BIT(1) #define R_DPD_OFT_EN 0x5800 #define B_DPD_OFT_EN BIT(28) #define R_DPD_OFT_ADDR 0x5804 @@ -3932,8 +3976,9 @@ #define R_IQK_DIF2 0x8024 #define B_IQK_DIF2_RXPI GENMASK(19, 0) #define R_IQK_DIF4 0x802C -#define B_IQK_DIF4_TXT GENMASK(11, 0) #define B_IQK_DIF4_RXT GENMASK(27, 16) +#define B_IQK_DIF4_TXT GENMASK(11, 0) +#define IQK_DF4_TXT_8_25MHZ 0x021 #define R_IQK_CFG 0x8034 #define B_IQK_CFG_SET GENMASK(5, 4) #define R_TPG_MOD 0x806C @@ -3995,8 +4040,10 @@ #define R_CFIR_MAP 0x8150 #define R_CFIR_LUT 0x8154 #define B_CFIR_LUT_SEL BIT(8) +#define B_CFIR_LUT_SET BIT(4) #define B_CFIR_LUT_G3 BIT(3) #define B_CFIR_LUT_G2 BIT(2) +#define B_CFIR_LUT_GP_V1 GENMASK(2, 0) #define B_CFIR_LUT_GP GENMASK(1, 0) #define R_DPD_V1 0x81a0 #define R_DPD_CH0 0x81AC diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index cbf69670eafe..0b722675aad4 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1787,6 +1787,7 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) enum rtw89_phy_idx phy_idx = RTW89_PHY_0; rtw8852c_rx_dck(rtwdev, phy_idx, false); + rtw8852c_iqk(rtwdev, phy_idx); rtw8852c_tssi(rtwdev, phy_idx); rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 197d177494da..3108fa9cd5b2 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -21,6 +21,40 @@ static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852C] = {0x5850, 0x7850}; static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852C] = {0x5828, 0x7828}; static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852C] = {0x5830, 0x7830}; +static const u32 rtw8852c_backup_bb_regs[] = { + 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0e8, 0x823c, 0x8224, 0x8220, + 0xc1d4, 0xc1d8, 0xc1e8 +}; + +static const u32 rtw8852c_backup_rf_regs[] = { + 0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005 +}; + +#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs) +#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852c_backup_rf_regs) + +#define RXK_GROUP_NR 4 +static const u32 _rxk_a6_idxrxgain[RXK_GROUP_NR] = {0x190, 0x196, 0x290, 0x316}; +static const u32 _rxk_a6_idxattc2[RXK_GROUP_NR] = {0x00, 0x0, 0x00, 0x00}; +static const u32 _rxk_a_idxrxgain[RXK_GROUP_NR] = {0x190, 0x198, 0x310, 0x318}; +static const u32 _rxk_a_idxattc2[RXK_GROUP_NR] = {0x00, 0x00, 0x00, 0x00}; +static const u32 _rxk_g_idxrxgain[RXK_GROUP_NR] = {0x252, 0x26c, 0x350, 0x360}; +static const u32 _rxk_g_idxattc2[RXK_GROUP_NR] = {0x00, 0x07, 0x00, 0x3}; + +#define TXK_GROUP_NR 3 +static const u32 _txk_a6_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; +static const u32 _txk_a6_track_range[TXK_GROUP_NR] = {0x6, 0x7, 0x7}; +static const u32 _txk_a6_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e}; +static const u32 _txk_a6_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12}; +static const u32 _txk_a_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; +static const u32 _txk_a_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x7}; +static const u32 _txk_a_gain_bb[TXK_GROUP_NR] = {0x12, 0x09, 0x0e}; +static const u32 _txk_a_itqt[TXK_GROUP_NR] = {0x12, 0x12, 0x12}; +static const u32 _txk_g_power_range[TXK_GROUP_NR] = {0x0, 0x0, 0x0}; +static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6}; +static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e}; +static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12}; + static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n", @@ -35,6 +69,82 @@ static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) return RF_B; } +static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + backup_bb_reg_val[i] = + rtw89_phy_read32_mask(rtwdev, rtw8852c_backup_bb_regs[i], + MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]backup bb reg : %x, value =%x\n", + rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], + u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + backup_rf_reg_val[i] = + rtw89_read_rf(rtwdev, rf_path, + rtw8852c_backup_rf_regs[i], RFREG_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path, + rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + rtw89_phy_write32_mask(rtwdev, rtw8852c_backup_bb_regs[i], + MASKDWORD, backup_bb_reg_val[i]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]restore bb reg : %x, value =%x\n", + rtw8852c_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], + u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + rtw89_write_rf(rtwdev, rf_path, rtw8852c_backup_rf_regs[i], + RFREG_MASK, backup_rf_reg_val[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path, + rtw8852c_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) +{ + u8 path; + u32 rf_mode; + int ret; + + for (path = 0; path < RF_PATH_MAX; path++) { + if (!(kpath & BIT(path))) + continue; + + ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2, + 2, 5000, false, rtwdev, path, 0x00, + RR_MOD_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", + path, ret); + } +} + static void _dack_dump(struct rtw89_dev *rtwdev) { struct rtw89_dack_info *dack = &rtwdev->dack; @@ -470,6 +580,843 @@ static void _dac_cal(struct rtw89_dev *rtwdev, bool force) rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); } +#define RTW8852C_NCTL_VER 0xd +#define RTW8852C_IQK_VER 0x2a +#define RTW8852C_IQK_SS 2 +#define RTW8852C_IQK_THR_REK 8 +#define RTW8852C_IQK_CFIR_GROUP_NR 4 + +enum rtw8852c_iqk_type { + ID_TXAGC, + ID_G_FLOK_COARSE, + ID_A_FLOK_COARSE, + ID_G_FLOK_FINE, + ID_A_FLOK_FINE, + ID_FLOK_VBUFFER, + ID_TXK, + ID_RXAGC, + ID_RXK, + ID_NBTXK, + ID_NBRXK, +}; + +static void rtw8852c_disable_rxagc(struct rtw89_dev *rtwdev, u8 path, u8 en_rxgac) +{ + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_P0_AGC_CTL, B_P0_AGC_EN, en_rxgac); + else + rtw89_phy_write32_mask(rtwdev, R_P1_AGC_CTL, B_P1_AGC_EN, en_rxgac); +} + +static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101); + else + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0202); + + switch (iqk_info->iqk_bw[path]) { + case RTW89_CHANNEL_WIDTH_20: + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); + rtw8852c_rxck_force(rtwdev, path, true, ADC_480M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf); + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); + rtw8852c_rxck_force(rtwdev, path, true, ADC_960M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd); + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); + break; + case RTW89_CHANNEL_WIDTH_160: + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1); + rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb); + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1); + break; + default: + break; + } + + rtw89_rfk_parser(rtwdev, &rtw8852c_iqk_rxk_cfg_defs_tbl); + + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1101); + else + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x2202); +} + +static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype) +{ + u32 tmp; + u32 val; + int ret; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 1, 8200, false, rtwdev, 0xbff8, MASKBYTE0); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n"); + + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret); + tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp); + + return false; +} + +static bool _iqk_one_shot(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path, u8 ktype) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 addr_rfc_ctl = R_UPD_CLK + (path << 13); + u32 iqk_cmd; + bool fail; + + switch (ktype) { + case ID_TXAGC: + iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1); + break; + case ID_A_FLOK_COARSE: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x008 | (1 << (4 + path)); + break; + case ID_G_FLOK_COARSE: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x108 | (1 << (4 + path)); + break; + case ID_A_FLOK_FINE: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x508 | (1 << (4 + path)); + break; + case ID_G_FLOK_FINE: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x208 | (1 << (4 + path)); + break; + case ID_FLOK_VBUFFER: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x308 | (1 << (4 + path)); + break; + case ID_TXK: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); + iqk_cmd = 0x008 | (1 << (4 + path)) | ((0x8 + iqk_info->iqk_bw[path]) << 8); + break; + case ID_RXAGC: + iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1); + break; + case ID_RXK: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x008 | (1 << (4 + path)) | ((0xc + iqk_info->iqk_bw[path]) << 8); + break; + case ID_NBTXK: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); + iqk_cmd = 0x408 | (1 << (4 + path)); + break; + case ID_NBRXK: + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x1); + iqk_cmd = 0x608 | (1 << (4 + path)); + break; + default: + return false; + } + + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); + fsleep(15); + fail = _iqk_check_cal(rtwdev, path, ktype); + rtw89_phy_write32_mask(rtwdev, addr_rfc_ctl, 0x00000002, 0x0); + + return fail; +} + +static bool _rxk_group_sel(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + u32 tmp; + u32 bkrf0; + u8 gp; + + bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW); + if (path == RF_PATH_B) { + rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3); + tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp); + tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp); + } + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9); + break; + } + + fsleep(10); + + for (gp = 0; gp < RXK_GROUP_NR; gp++) { + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, + _rxk_g_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, + _rxk_g_idxattc2[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, + _rxk_a_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, + _rxk_a_idxattc2[gp]); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, + _rxk_a6_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, + _rxk_a6_idxattc2[gp]); + break; + } + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_SET, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_GP_V1, gp); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + } + + if (path == RF_PATH_B) + rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0); + + if (fail) { + iqk_info->nb_rxcfir[path] = 0x40000002; + iqk_info->is_wb_rxiqk[path] = false; + } else { + iqk_info->nb_rxcfir[path] = 0x40000000; + iqk_info->is_wb_rxiqk[path] = true; + } + + return false; +} + +static bool _iqk_nbrxk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + u32 tmp; + u32 bkrf0; + u8 gp = 0x2; + + bkrf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_NBW); + if (path == RF_PATH_B) { + rtw89_write_rf(rtwdev, RF_PATH_B, RR_IQKPLL, RR_IQKPLL_MOD, 0x3); + tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_MOD); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_AGH, tmp); + tmp = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CHTR, RR_CHTR_TXRX); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV4, RR_RSV4_PLLCH, tmp); + } + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXG, RR_RXG_IQKMOD, 0x9); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x8); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXAE, RR_RXAE_IQKMOD, 0x9); + break; + } + + fsleep(10); + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_g_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_VOBUF, _rxk_g_idxattc2[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a_idxattc2[gp]); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, _rxk_a6_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_IATT, _rxk_a6_idxattc2[gp]); + break; + } + + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP_V1, gp); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + + if (path == RF_PATH_B) + rtw89_write_rf(rtwdev, path, RR_IQKPLL, RR_IQKPLL_MOD, 0x0); + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_NBW, bkrf0); + + if (fail) + iqk_info->nb_rxcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD) | 0x2; + else + iqk_info->nb_rxcfir[path] = 0x40000002; + + iqk_info->is_wb_rxiqk[path] = false; + return fail; +} + +static bool _txk_group_sel(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + u8 gp; + + for (gp = 0; gp < TXK_GROUP_NR; gp++) { + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, + _txk_g_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, + _txk_g_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, + _txk_g_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, + R_KIP_IQP + (path << 8), + MASKDWORD, _txk_g_itqt[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, + _txk_a_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, + _txk_a_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, + _txk_a_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, + R_KIP_IQP + (path << 8), + MASKDWORD, _txk_a_itqt[gp]); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, + _txk_a6_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, + _txk_a6_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, + _txk_a6_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, + R_KIP_IQP + (path << 8), + MASKDWORD, _txk_a6_itqt[gp]); + break; + default: + break; + } + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_SET, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_G2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + B_CFIR_LUT_GP, gp + 1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); + } + + if (fail) { + iqk_info->nb_txcfir[path] = 0x40000002; + iqk_info->is_wb_txiqk[path] = false; + } else { + iqk_info->nb_txcfir[path] = 0x40000000; + iqk_info->is_wb_txiqk[path] = true; + } + + return fail; +} + +static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + u8 gp = 0x2; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_g_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_g_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_g_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, _txk_g_itqt[gp]); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, _txk_a_itqt[gp]); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, _txk_a6_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, _txk_a6_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, _txk_a6_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, _txk_a6_itqt[gp]); + break; + default: + break; + } + + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SET, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp + 1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x00b); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); + + if (!fail) + iqk_info->nb_txcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), + MASKDWORD) | 0x2; + else + iqk_info->nb_txcfir[path] = 0x40000002; + + iqk_info->is_wb_txiqk[path] = false; + + return fail; +} + +static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 idx = mcc_info->table_idx; + bool is_fail1, is_fail2; + u32 val; + u32 core_i; + u32 core_q; + u32 vbuff_i; + u32 vbuff_q; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + val = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK); + core_i = FIELD_GET(RR_TXMO_COI, val); + core_q = FIELD_GET(RR_TXMO_COQ, val); + + if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d) + is_fail1 = true; + else + is_fail1 = false; + + iqk_info->lok_idac[idx][path] = val; + + val = rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK); + vbuff_i = FIELD_GET(RR_LOKVB_COI, val); + vbuff_q = FIELD_GET(RR_LOKVB_COQ, val); + + if (vbuff_i < 0x2 || vbuff_i > 0x3d || vbuff_q < 0x2 || vbuff_q > 0x3d) + is_fail2 = true; + else + is_fail2 = false; + + iqk_info->lok_vbuf[idx][path] = val; + + return is_fail1 || is_fail2; +} + +static bool _iqk_lok(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 tmp_id = 0x0; + bool fail = false; + bool tmp = false; + + /* Step 0: Init RF gain & tone idx= 8.25Mhz */ + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, IQK_DF4_TXT_8_25MHZ); + + /* Step 1 START: _lok_coarse_fine_wi_swap */ + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_G_FLOK_COARSE; + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_A_FLOK_COARSE; + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_A_FLOK_COARSE; + break; + default: + break; + } + tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id); + iqk_info->lok_cor_fail[0][path] = tmp; + + /* Step 2 */ + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + default: + break; + } + tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER); + + /* Step 3 */ + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_G_FLOK_FINE; + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_A_FLOK_FINE; + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x6); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x9); + tmp_id = ID_A_FLOK_FINE; + break; + default: + break; + } + tmp = _iqk_one_shot(rtwdev, phy_idx, path, tmp_id); + iqk_info->lok_fin_fail[0][path] = tmp; + + /* Step 4 large rf gain */ + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, 0x1b); + break; + } + tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_VBUFFER); + fail = _lok_finetune_check(rtwdev, path); + + return fail; +} + +static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + default: + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x403e0 | iqk_info->syn1to2); + fsleep(10); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x403e0 | iqk_info->syn1to2); + fsleep(10); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); + break; + case RTW89_BAND_6G: + rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXAS, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXA2, RR_TXA2_LDO, 0xf); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x403e0 | iqk_info->syn1to2); + fsleep(10); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x6); + break; + } +} + +static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 tmp; + bool flag; + + iqk_info->thermal[path] = + ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + iqk_info->thermal_rek_en = false; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path, + iqk_info->thermal[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path, + iqk_info->lok_cor_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path, + iqk_info->lok_fin_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path, + iqk_info->iqk_tx_fail[0][path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path, + iqk_info->iqk_rx_fail[0][path]); + + flag = iqk_info->lok_cor_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FCOR << (path * 4), flag); + flag = iqk_info->lok_fin_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FFIN << (path * 4), flag); + flag = iqk_info->iqk_tx_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_FTX << (path * 4), flag); + flag = iqk_info->iqk_rx_fail[0][path]; + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_F_RX << (path * 4), flag); + + tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD); + iqk_info->bp_iqkenable[path] = tmp; + tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + iqk_info->bp_txkresult[path] = tmp; + tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD); + iqk_info->bp_rxkresult[path] = tmp; + + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT, + iqk_info->iqk_times); + + tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, B_IQKINF_FAIL << (path * 4)); + if (tmp != 0x0) + iqk_info->iqk_fail_cnt++; + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_FCNT << (path * 4), + iqk_info->iqk_fail_cnt); +} + +static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + _iqk_txk_setting(rtwdev, path); + iqk_info->lok_fail[path] = _iqk_lok(rtwdev, phy_idx, path); + + if (iqk_info->is_nbiqk) + iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path); + else + iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path); + + _iqk_rxk_setting(rtwdev, path); + if (iqk_info->is_nbiqk) + iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path); + else + iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path); + + _iqk_info_iqk(rtwdev, phy_idx, path); +} + +static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + struct rtw89_hal *hal = &rtwdev->hal; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + iqk_info->iqk_band[path] = hal->current_band_type; + iqk_info->iqk_bw[path] = hal->current_band_width; + iqk_info->iqk_ch[path] = hal->current_channel; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path, + iqk_info->iqk_band[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n", + path, iqk_info->iqk_bw[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n", + path, iqk_info->iqk_ch[path]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy, + rtwdev->dbcc_en ? "on" : "off", + iqk_info->iqk_band[path] == 0 ? "2G" : + iqk_info->iqk_band[path] == 1 ? "5G" : "6G", + iqk_info->iqk_ch[path], + iqk_info->iqk_bw[path] == 0 ? "20M" : + iqk_info->iqk_bw[path] == 1 ? "40M" : "80M"); + if (!rtwdev->dbcc_en) + iqk_info->syn1to2 = 0x1; + else + iqk_info->syn1to2 = 0x3; + + rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852C_IQK_VER); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BAND << (path * 16), + iqk_info->iqk_band[path]); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_BW << (path * 16), + iqk_info->iqk_bw[path]); + rtw89_phy_write32_mask(rtwdev, R_IQKCH, B_IQKCH_CH << (path * 16), + iqk_info->iqk_ch[path]); + + rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_NCTLV, RTW8852C_NCTL_VER); +} + +static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + u8 path) +{ + _iqk_by_path(rtwdev, phy_idx, path); +} + +static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool fail; + + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, + iqk_info->nb_txcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, + iqk_info->nb_rxcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, + 0x00001219 + (path << 4)); + fsleep(200); + fail = _iqk_check_cal(rtwdev, path, 0x12); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail = %x\n", fail); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); + + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); +} + +static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852c_iqk_afebb_restore_defs_a_tbl, + &rtw8852c_iqk_afebb_restore_defs_b_tbl); + + rtw8852c_disable_rxagc(rtwdev, path, 0x1); +} + +static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + u8 idx = 0; + + idx = mcc_info->table_idx; + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); +} + +static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__); + + /* 01_BB_AFE_for DPK_S0_20210820 */ + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); + + /* disable rxgac */ + rtw8852c_disable_rxagc(rtwdev, path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), MASKDWORD, 0xf801fffd); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DAC_VAL, 0x1); + + rtw8852c_txck_force(rtwdev, path, true, DAC_960M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_DPD_GDIS, 0x1); + + rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2); + + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1); +} + static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) { u32 rf_reg5, rck_val = 0; @@ -505,6 +1452,86 @@ static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK)); } +static void _iqk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 ch, path; + + rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD); + if (iqk_info->is_iqk_init) + return; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + iqk_info->is_iqk_init = true; + iqk_info->is_nbiqk = false; + iqk_info->iqk_fft_en = false; + iqk_info->iqk_sram_en = false; + iqk_info->iqk_cfir_en = false; + iqk_info->iqk_xym_en = false; + iqk_info->thermal_rek_en = false; + iqk_info->iqk_times = 0x0; + + for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) { + iqk_info->iqk_channel[ch] = 0x0; + for (path = 0; path < RTW8852C_IQK_SS; path++) { + iqk_info->lok_cor_fail[ch][path] = false; + iqk_info->lok_fin_fail[ch][path] = false; + iqk_info->iqk_tx_fail[ch][path] = false; + iqk_info->iqk_rx_fail[ch][path] = false; + iqk_info->iqk_mcc_ch[ch][path] = 0x0; + iqk_info->iqk_table_idx[path] = 0x0; + } + } +} + +static void _doiqk(struct rtw89_dev *rtwdev, bool force, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 backup_bb_val[BACKUP_BB_REGS_NR]; + u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR]; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]==========IQK strat!!!!!==========\n"); + iqk_info->iqk_times++; + iqk_info->kcount = 0; + iqk_info->version = RTW8852C_IQK_VER; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); + _iqk_get_ch_info(rtwdev, phy_idx, path); + _rfk_backup_bb_reg(rtwdev, backup_bb_val); + _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); + _iqk_macbb_setting(rtwdev, phy_idx, path); + _iqk_preset(rtwdev, path); + _iqk_start_iqk(rtwdev, phy_idx, path); + _iqk_restore(rtwdev, path); + _iqk_afebb_restore(rtwdev, phy_idx, path); + _rfk_restore_bb_reg(rtwdev, backup_bb_val); + _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); +} + +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +{ + switch (_kpath(rtwdev, phy_idx)) { + case RF_A: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + break; + case RF_B: + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + case RF_AB: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + default: + break; + } +} + static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path) { int ret; @@ -1707,6 +2734,22 @@ void rtw8852c_dack(struct rtw89_dev *rtwdev) rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); } +void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u32 tx_en; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); + rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + _iqk_init(rtwdev); + _iqk(rtwdev, phy_idx, false); + + rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); +} + #define RXDCK_VER_8852C 0xe void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index fd07028a4964..5c0623f6af20 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -9,6 +9,7 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev); void rtw8852c_dack(struct rtw89_dev *rtwdev); +void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe); void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); -- cgit v1.2.3 From da4cea16cb1340576459e1a703cae4084f9e6514 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 07:54:08 +0800 Subject: rtw89: 8852c: rfk: add DPK DPK is short for digital pre-distortion calibration. It can adjusts digital waveform according to PA linear characteristics dynamically to enhance TX EVM. Do this calibration when we are going to run on AP channel. To prevent power offset out of boundary, it monitors thermal and set proper boundary to register. 8852c needs two backup buffers, so we enlarge the array. But, 8852a still needs only one, so it only uses first element (index zero). Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220502235408.15052-9-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.h | 11 +- drivers/net/wireless/realtek/rtw89/reg.h | 49 + drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c | 8 +- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 2 + drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 1114 +++++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 2 + 6 files changed, 1178 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 8fd719f93d6c..2921814842ff 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -2690,6 +2690,7 @@ struct rtw89_dpk_bkup_para { enum rtw89_bandwidth bw; u8 ch; bool path_ok; + u8 mdpd_en; u8 txagc_dpk; u8 ther_dpk; u8 gs; @@ -2699,11 +2700,12 @@ struct rtw89_dpk_bkup_para { struct rtw89_dpk_info { bool is_dpk_enable; bool is_dpk_reload_en; - u16 dc_i[RTW89_DPK_RF_PATH]; - u16 dc_q[RTW89_DPK_RF_PATH]; - u8 corr_val[RTW89_DPK_RF_PATH]; - u8 corr_idx[RTW89_DPK_RF_PATH]; + u16 dc_i[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; + u16 dc_q[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; + u8 corr_val[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; + u8 corr_idx[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; u8 cur_idx[RTW89_DPK_RF_PATH]; + u8 cur_k_set; struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; }; @@ -2712,6 +2714,7 @@ struct rtw89_fem_info { bool elna_5g; bool epa_2g; bool epa_5g; + bool epa_6g; }; struct rtw89_phy_ch_info { diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 120fc13520fc..6f5d1012c90c 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3191,6 +3191,7 @@ #define B_AX_GNT_BT_TX_SW_CTRL BIT(0) #define RR_MOD 0x00 +#define RR_MOD_V1 0x10000 #define RR_MOD_IQK GENMASK(19, 4) #define RR_MOD_DPK GENMASK(19, 5) #define RR_MOD_MASK GENMASK(19, 16) @@ -3357,8 +3358,16 @@ #define RR_DCK2_CYCLE GENMASK(7, 2) #define RR_DCKC 0x95 #define RR_DCKC_CHK BIT(3) +#define RR_IQGEN 0x97 +#define RR_IQGEN_BIAS GENMASK(11, 8) +#define RR_TXIQK 0x98 +#define RR_TXIQK_ATT2 GENMASK(15, 12) +#define RR_TIA 0x9e +#define RR_TIA_N6 BIT(8) #define RR_MIXER 0x9f #define RR_MIXER_GN GENMASK(4, 3) +#define RR_LOGEN 0xa3 +#define RR_LOGEN_RPT GENMASK(19, 16) #define RR_XTALX2 0xb8 #define RR_MALSEL 0xbe #define RR_LCK_TRG 0xd3 @@ -3370,6 +3379,7 @@ #define RR_RCKD_BW BIT(2) #define RR_TXADBG 0xde #define RR_LUTDBG 0xdf +#define RR_LUTDBG_TIA BIT(12) #define RR_LUTDBG_LOK BIT(2) #define RR_LUTWE2 0xee #define RR_LUTWE2_RTXBW BIT(2) @@ -3580,6 +3590,8 @@ #define B_TXAGC_TP GENMASK(2, 0) #define R_TSSI_THER 0x1C10 #define B_TSSI_THER GENMASK(29, 24) +#define R_TXAGC_BTP 0x1CA0 +#define B_TXAGC_BTP GENMASK(31, 24) #define R_TXAGC_BB 0x1C60 #define B_TXAGC_BB_OFT GENMASK(31, 16) #define B_TXAGC_BB GENMASK(31, 24) @@ -3866,10 +3878,15 @@ #define B_DPD_DIS BIT(14) #define B_DPD_GDIS BIT(13) #define B_IQK_RFC_ON BIT(1) +#define R_TXPWRB 0x56CC +#define B_TXPWRB_ON BIT(28) +#define B_TXPWRB_VAL GENMASK(27, 19) #define R_DPD_OFT_EN 0x5800 #define B_DPD_OFT_EN BIT(28) #define R_DPD_OFT_ADDR 0x5804 #define B_DPD_OFT_ADDR GENMASK(31, 27) +#define R_TXPWRB_H 0x580c +#define B_TXPWRB_RDY BIT(15) #define R_P0_TMETER 0x5810 #define B_P0_TMETER GENMASK(15, 10) #define B_P0_TMETER_DIS BIT(16) @@ -3981,16 +3998,23 @@ #define IQK_DF4_TXT_8_25MHZ 0x021 #define R_IQK_CFG 0x8034 #define B_IQK_CFG_SET GENMASK(5, 4) +#define R_TPG_SEL 0x8068 #define R_TPG_MOD 0x806C #define B_TPG_MOD_F GENMASK(2, 1) #define R_MDPK_SYNC 0x8070 #define B_MDPK_SYNC_SEL BIT(31) #define B_MDPK_SYNC_MAN GENMASK(31, 28) #define R_MDPK_RX_DCK 0x8074 +#define B_MDPK_RX_DCK_EN BIT(31) +#define R_KIP_MOD 0x8078 +#define B_KIP_MOD GENMASK(19, 0) #define R_NCTL_RW 0x8080 #define R_KIP_SYSCFG 0x8088 #define R_KIP_CLK 0x808C +#define R_DPK_IDL 0x809C +#define B_DPK_IDL BIT(8) #define R_LDL_NORM 0x80A0 +#define B_LDL_NORM_MA BIT(16) #define B_LDL_NORM_PN GENMASK(12, 8) #define B_LDL_NORM_OP GENMASK(1, 0) #define R_DPK_CTL 0x80B0 @@ -4001,12 +4025,19 @@ #define B_DPK_CFG2_ST BIT(14) #define R_DPK_CFG3 0x80C0 #define R_KPATH_CFG 0x80D0 +#define B_KPATH_CFG_ED GENMASK(21, 20) #define R_KIP_RPT1 0x80D4 #define B_KIP_RPT1_SEL GENMASK(21, 16) #define R_SRAM_IQRX 0x80D8 #define R_GAPK 0x80E0 #define B_GAPK_ADR BIT(0) #define R_SRAM_IQRX2 0x80E8 +#define R_DPK_MPA 0x80EC +#define B_DPK_MPA_T0 BIT(10) +#define B_DPK_MPA_T1 BIT(9) +#define B_DPK_MPA_T2 BIT(8) +#define R_DPK_WR 0x80F4 +#define B_DPK_WR_ST BIT(29) #define R_DPK_TRK 0x80f0 #define B_DPK_TRK_DIS BIT(31) #define R_RPT_COM 0x80FC @@ -4014,8 +4045,11 @@ #define B_PRT_COM_DCI GENMASK(27, 16) #define B_PRT_COM_CORV GENMASK(15, 8) #define B_PRT_COM_DCQ GENMASK(11, 0) +#define B_PRT_COM_RXOV BIT(8) #define B_PRT_COM_GL GENMASK(7, 4) #define B_PRT_COM_CORI GENMASK(7, 0) +#define B_PRT_COM_RXBB GENMASK(5, 0) +#define B_PRT_COM_DONE BIT(0) #define R_COEF_SEL 0x8104 #define B_COEF_SEL_IQC BIT(0) #define B_COEF_SEL_MDPD BIT(8) @@ -4045,14 +4079,22 @@ #define B_CFIR_LUT_G2 BIT(2) #define B_CFIR_LUT_GP_V1 GENMASK(2, 0) #define B_CFIR_LUT_GP GENMASK(1, 0) +#define R_DPK_GN 0x819C +#define B_DPK_GN_EN GENMASK(17, 16) +#define B_DPK_GN_AG GENMASK(9, 0) #define R_DPD_V1 0x81a0 +#define B_DPD_LBK BIT(7) #define R_DPD_CH0 0x81AC #define R_DPD_BND 0x81B4 #define R_DPD_CH0A 0x81BC +#define B_DPD_MEN GENMASK(31, 28) +#define B_DPD_ORDER GENMASK(26, 24) +#define B_DPD_SEL GENMASK(13, 8) #define R_TXAGC_RFK 0x81C4 #define B_TXAGC_RFK_CH0 GENMASK(5, 0) #define R_DPD_COM 0x81C8 #define R_KIP_IQP 0x81CC +#define B_KIP_IQP_SW GENMASK(13, 12) #define B_KIP_IQP_IQSW GENMASK(5, 0) #define R_KIP_RPT 0x81D4 #define B_KIP_RPT_SEL GENMASK(21, 16) @@ -4060,8 +4102,15 @@ #define R_LOAD_COEF 0x81DC #define B_LOAD_COEF_MDPD BIT(16) #define B_LOAD_COEF_CFIR GENMASK(1, 0) +#define B_LOAD_COEF_DI BIT(1) #define B_LOAD_COEF_AUTO BIT(0) +#define R_DPK_GL 0x81F0 +#define B_DPK_GL_A0 GENMASK(31, 28) +#define B_DPK_GL_A1 GENMASK(17, 0) #define R_RPT_PER 0x81FC +#define B_RPT_PER_TSSI GENMASK(28, 16) +#define B_RPT_PER_OF GENMASK(15, 8) +#define B_RPT_PER_TH GENMASK(5, 0) #define R_RXCFIR_P0C0 0x8D40 #define R_RXCFIR_P0C1 0x8D84 #define R_RXCFIR_P0C2 0x8DC8 diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c index aa782534e76b..e3c2fce32651 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c @@ -2189,8 +2189,8 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev, "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx, corr_val); - dpk->corr_idx[path] = corr_idx; - dpk->corr_val[path] = corr_val; + dpk->corr_idx[path][0] = corr_idx; + dpk->corr_val[path][0] = corr_val; rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); @@ -2203,8 +2203,8 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n", path, dc_i, dc_q); - dpk->dc_i[path] = dc_i; - dpk->dc_q[path] = dc_q; + dpk->dc_i[path][0] = dc_i; + dpk->dc_q[path][0] = dc_q; if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || corr_val < DPK_SYNC_TH_CORR) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 0b722675aad4..4fb3de71d032 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1789,6 +1789,7 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) rtw8852c_rx_dck(rtwdev, phy_idx, false); rtw8852c_iqk(rtwdev, phy_idx); rtw8852c_tssi(rtwdev, phy_idx); + rtw8852c_dpk(rtwdev, phy_idx); rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); } @@ -1804,6 +1805,7 @@ static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start) static void rtw8852c_rfk_track(struct rtw89_dev *rtwdev) { + rtw8852c_dpk_track(rtwdev); rtw8852c_lck_track(rtwdev); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 3108fa9cd5b2..ffc71ad24927 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -55,6 +55,11 @@ static const u32 _txk_g_track_range[TXK_GROUP_NR] = {0x5, 0x6, 0x6}; static const u32 _txk_g_gain_bb[TXK_GROUP_NR] = {0x0e, 0x0a, 0x0e}; static const u32 _txk_g_itqt[TXK_GROUP_NR] = { 0x12, 0x12, 0x12}; +static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = { + {0x8190, 0x8194, 0x8198, 0x81a4}, + {0x81a8, 0x81c4, 0x81c8, 0x81e8}, +}; + static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n", @@ -1568,6 +1573,1093 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 pat } } +#define RTW8852C_RF_REL_VERSION 34 +#define RTW8852C_DPK_VER 0x10 +#define RTW8852C_DPK_TH_AVG_NUM 4 +#define RTW8852C_DPK_RF_PATH 2 +#define RTW8852C_DPK_KIP_REG_NUM 5 +#define RTW8852C_DPK_RXSRAM_DBG 0 + +enum rtw8852c_dpk_id { + LBK_RXIQK = 0x06, + SYNC = 0x10, + MDPK_IDL = 0x11, + MDPK_MPA = 0x12, + GAIN_LOSS = 0x13, + GAIN_CAL = 0x14, + DPK_RXAGC = 0x15, + KIP_PRESET = 0x16, + KIP_RESTORE = 0x17, + DPK_TXAGC = 0x19, + D_KIP_PRESET = 0x28, + D_TXAGC = 0x29, + D_RXAGC = 0x2a, + D_SYNC = 0x2b, + D_GAIN_LOSS = 0x2c, + D_MDPK_IDL = 0x2d, + D_GAIN_NORM = 0x2f, + D_KIP_THERMAL = 0x30, + D_KIP_RESTORE = 0x31 +}; + +#define DPK_TXAGC_LOWER 0x2e +#define DPK_TXAGC_UPPER 0x3f +#define DPK_TXAGC_INVAL 0xff + +enum dpk_agc_step { + DPK_AGC_STEP_SYNC_DGAIN, + DPK_AGC_STEP_GAIN_LOSS_IDX, + DPK_AGC_STEP_GL_GT_CRITERION, + DPK_AGC_STEP_GL_LT_CRITERION, + DPK_AGC_STEP_SET_TX_GAIN, +}; + +static void _rf_direct_cntrl(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); +} + +static void _dpk_onoff(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool off); + +static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[], + u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path) +{ + u8 i; + + for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) { + reg_bkup[path][i] = + rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", + reg[i] + (path << 8), reg_bkup[path][i]); + } +} + +static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[], + u32 reg_bkup[][RTW8852C_DPK_KIP_REG_NUM], u8 path) +{ + u8 i; + + for (i = 0; i < RTW8852C_DPK_KIP_REG_NUM; i++) { + rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), + MASKDWORD, reg_bkup[path][i]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n", + reg[i] + (path << 8), reg_bkup[path][i]); + } +} + +static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, enum rtw8852c_dpk_id id) +{ + u16 dpk_cmd; + u32 val; + int ret; + + dpk_cmd = (u16)((id << 8) | (0x19 + path * 0x12)); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0); + mdelay(10); + rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] one-shot for %s = 0x%x (ret=%d)\n", + id == 0x06 ? "LBK_RXIQK" : + id == 0x10 ? "SYNC" : + id == 0x11 ? "MDPK_IDL" : + id == 0x12 ? "MDPK_MPA" : + id == 0x13 ? "GAIN_LOSS" : "PWR_CAL", + dpk_cmd, ret); + + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] one-shot over 20ms!!!!\n"); + return 1; + } + + return 0; +} + +static void _dpk_information(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + struct rtw89_hal *hal = &rtwdev->hal; + + u8 kidx = dpk->cur_idx[path]; + + dpk->bp[path][kidx].band = hal->current_band_type; + dpk->bp[path][kidx].ch = hal->current_channel; + dpk->bp[path][kidx].bw = hal->current_band_width; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", + path, dpk->cur_idx[path], phy, + rtwdev->is_tssi_mode[path] ? "on" : "off", + rtwdev->dbcc_en ? "on" : "off", + dpk->bp[path][kidx].band == 0 ? "2G" : + dpk->bp[path][kidx].band == 1 ? "5G" : "6G", + dpk->bp[path][kidx].ch, + dpk->bp[path][kidx].bw == 0 ? "20M" : + dpk->bp[path][kidx].bw == 1 ? "40M" : "80M"); +} + +static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kpath) +{ + /*1. Keep ADC_fifo reset*/ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); + + /*2. BB for IQK DBG mode*/ + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd); + + /*3.Set DAC clk*/ + rtw8852c_txck_force(rtwdev, path, true, DAC_960M); + + /*4. Set ADC clk*/ + rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), + B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041); + + /*5. ADDA fifo rst*/ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE setting\n", path); +} + +static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev, u8 path) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), + B_P0_NRBW_DBG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A1 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A3 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0x00000000); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13), B_P0_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A0 << path, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_A2 << path, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d BB/AFE restore\n", path); +} + +static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_pause) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, is_pause); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, + is_pause ? "pause" : "resume"); +} + +static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev, u8 path, bool ctrl_by_kip) +{ + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_IQK_RFC_ON, ctrl_by_kip); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n", + ctrl_by_kip ? "KIP" : "BB"); +} + +static void _dpk_txpwr_bb_force(struct rtw89_dev *rtwdev, u8 path, bool force) +{ + rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_ON, force); + rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H + (path << 13), B_TXPWRB_RDY, force); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d txpwr_bb_force %s\n", + path, force ? "on" : "off"); +} + +static void _dpk_kip_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + _dpk_one_shot(rtwdev, phy, path, D_KIP_RESTORE); + _dpk_kip_control_rfc(rtwdev, path, false); + _dpk_txpwr_bb_force(rtwdev, path, false); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); +} + +static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ +#define RX_TONE_IDX 0x00250025 /* Q.2 9.25MHz */ + u8 cur_rxbb; + u32 rf_11, reg_81cc; + + rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); + + _dpk_kip_control_rfc(rtwdev, path, false); + + cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); + rf_11 = rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK); + reg_81cc = rtw89_phy_read32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_SW); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x3); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0xd); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, 0x1f); + + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x12); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, 0x3); + + _dpk_kip_control_rfc(rtwdev, path, true); + + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, MASKDWORD, RX_TONE_IDX); + + _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path, + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD)); + + _dpk_kip_control_rfc(rtwdev, path, false); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RFREG_MASK, rf_11); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, cur_rxbb); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_SW, reg_81cc); + + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); + + _dpk_kip_control_rfc(rtwdev, path, true); +} + +static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x50121 | BIT(rtwdev->dbcc_en)); + rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x2); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); + rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] RF 0x0/0x83/0x9e/0x1a/0xdf/0x1001a = 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x/ 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_RXBB, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_TIA, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK), + rtw89_read_rf(rtwdev, path, RR_LUTDBG, RFREG_MASK), + rtw89_read_rf(rtwdev, path, 0x1001a, RFREG_MASK)); + } else { + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, + 0x50101 | BIT(rtwdev->dbcc_en)); + rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK); + + if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) { + rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8); + rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd); + } else { + rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd); + } + + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); + rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); + + if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) + rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_EBW, 0x0); + } +} + +static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160) { + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x3); + rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0x0180ff30); + } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) { + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xffe0fa00); + } else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) { + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); + rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xff4009e0); + } else { + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); + rtw89_phy_write32_mask(rtwdev, R_TPG_SEL, MASKDWORD, 0xf9f007d0); + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n", + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_160 ? "160M" : + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); +} + +static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ +#define DPK_SYNC_TH_DC_I 200 +#define DPK_SYNC_TH_DC_Q 200 +#define DPK_SYNC_TH_CORR 170 + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u16 dc_i, dc_q; + u8 corr_val, corr_idx, rxbb; + u8 rxbb_ov; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); + + corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI); + corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV); + + dpk->corr_idx[path][kidx] = corr_idx; + dpk->corr_val[path][kidx] = corr_val; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); + + dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ); + + dc_i = abs(sign_extend32(dc_i, 11)); + dc_q = abs(sign_extend32(dc_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d Corr_idx/ Corr_val /DC I/Q, = %d / %d / %d / %d\n", + path, corr_idx, corr_val, dc_i, dc_q); + + dpk->dc_i[path][kidx] = dc_i; + dpk->dc_q[path][kidx] = dc_q; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x8); + rxbb = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB); + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x31); + rxbb_ov = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXOV); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d RXBB/ RXAGC_done /RXBB_ovlmt = %d / %d / %d\n", + path, rxbb, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DONE), + rxbb_ov); + + if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || + corr_val < DPK_SYNC_TH_CORR) + return true; + else + return false; +} + +static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) +{ + u16 dgain = 0x0; + + rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL); + + dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain, dgain); + + return dgain; +} + +static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) +{ + u8 result; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); + + result = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp GL = %d\n", result); + + return result; +} + +static void _dpk_kset_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0x10); + dpk->cur_k_set = + rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), 0xE0000000) - 1; +} + +static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 dbm, bool set_from_bb) +{ + if (set_from_bb) { + dbm = clamp_t(u8, dbm, 7, 24); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set S%d txagc to %ddBm\n", path, dbm); + rtw89_phy_write32_mask(rtwdev, R_TXPWRB + (path << 13), B_TXPWRB_VAL, dbm << 2); + } + _dpk_one_shot(rtwdev, phy, path, D_TXAGC); + _dpk_kset_query(rtwdev, path); +} + +static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_one_shot(rtwdev, phy, path, D_GAIN_LOSS); + _dpk_kip_set_txagc(rtwdev, phy, path, 0xff, false); + + rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPK_GL + (path << 8), B_DPK_GL_A0, 0x0); + + return _dpk_gainloss_read(rtwdev); +} + +static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check) +{ + u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); + + if (is_check) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); + val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val1_i = abs(sign_extend32(val1_i, 11)); + val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val1_q = abs(sign_extend32(val1_q, 11)); + + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); + val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val2_i = abs(sign_extend32(val2_i, 11)); + val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val2_q = abs(sign_extend32(val2_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", + phy_div(val1_i * val1_i + val1_q * val1_q, + val2_i * val2_i + val2_q * val2_q)); + } else { + for (i = 0; i < 32; i++) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_Read[%02d]= 0x%08x\n", i, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); + } + } + + if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5) + return true; + else + return false; +} + +static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_one_shot(rtwdev, phy, path, D_RXAGC); + + return _dpk_sync_check(rtwdev, path, kidx); +} + +static void _dpk_read_rxsram(struct rtw89_dev *rtwdev) +{ + u32 addr; + + rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_pre_defs_tbl); + + for (addr = 0; addr < 0x200; addr++) { + rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 | addr); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RXSRAM[%03d] = 0x%07x\n", addr, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); + } + + rtw89_rfk_parser(rtwdev, &rtw8852c_read_rxsram_post_defs_tbl); +} + +static void _dpk_bypass_rxiqc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), B_DPD_LBK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000002); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Bypass RXIQC\n"); +} + +static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 init_xdbm, u8 loss_only) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 step = DPK_AGC_STEP_SYNC_DGAIN; + u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0; + u8 tmp_rxbb; + u8 goout = 0, agc_cnt = 0; + u16 dgain = 0; + bool is_fail = false; + int limit = 200; + + do { + switch (step) { + case DPK_AGC_STEP_SYNC_DGAIN: + is_fail = _dpk_kip_set_rxagc(rtwdev, phy, path, kidx); + + if (RTW8852C_DPK_RXSRAM_DBG) + _dpk_read_rxsram(rtwdev); + + if (is_fail) { + goout = 1; + break; + } + + dgain = _dpk_dgain_read(rtwdev); + + if (dgain > 0x5fc || dgain < 0x556) { + _dpk_one_shot(rtwdev, phy, path, D_SYNC); + dgain = _dpk_dgain_read(rtwdev); + } + + if (agc_cnt == 0) { + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) + _dpk_bypass_rxiqc(rtwdev, path); + else + _dpk_lbk_rxiqk(rtwdev, phy, path); + } + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + break; + + case DPK_AGC_STEP_GAIN_LOSS_IDX: + tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx); + + if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) || + tmp_gl_idx >= 7) + step = DPK_AGC_STEP_GL_GT_CRITERION; + else if (tmp_gl_idx == 0) + step = DPK_AGC_STEP_GL_LT_CRITERION; + else + step = DPK_AGC_STEP_SET_TX_GAIN; + break; + + case DPK_AGC_STEP_GL_GT_CRITERION: + if (tmp_dbm <= 7) { + goout = 1; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@lower bound!!\n"); + } else { + tmp_dbm = max_t(u8, tmp_dbm - 3, 7); + _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); + } + step = DPK_AGC_STEP_SYNC_DGAIN; + agc_cnt++; + break; + + case DPK_AGC_STEP_GL_LT_CRITERION: + if (tmp_dbm >= 24) { + goout = 1; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Txagc@upper bound!!\n"); + } else { + tmp_dbm = min_t(u8, tmp_dbm + 2, 24); + _dpk_kip_set_txagc(rtwdev, phy, path, tmp_dbm, true); + } + step = DPK_AGC_STEP_SYNC_DGAIN; + agc_cnt++; + break; + + case DPK_AGC_STEP_SET_TX_GAIN: + _dpk_kip_control_rfc(rtwdev, path, false); + tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB); + if (tmp_rxbb + tmp_gl_idx > 0x1f) + tmp_rxbb = 0x1f; + else + tmp_rxbb = tmp_rxbb + tmp_gl_idx; + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust RXBB (%+d) = 0x%x\n", + tmp_gl_idx, tmp_rxbb); + _dpk_kip_control_rfc(rtwdev, path, true); + goout = 1; + break; + default: + goout = 1; + break; + } + } while (!goout && agc_cnt < 6 && --limit > 0); + + if (limit <= 0) + rtw89_warn(rtwdev, "[DPK] exceed loop limit\n"); + + return is_fail; +} + +static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order) +{ + static const struct rtw89_rfk_tbl *order_tbls[] = { + &rtw8852c_dpk_mdpd_order0_defs_tbl, + &rtw8852c_dpk_mdpd_order1_defs_tbl, + &rtw8852c_dpk_mdpd_order2_defs_tbl, + &rtw8852c_dpk_mdpd_order3_defs_tbl, + }; + + if (order >= ARRAY_SIZE(order_tbls)) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Wrong MDPD order!!(0x%x)\n", order); + return; + } + + rtw89_rfk_parser(rtwdev, order_tbls[order]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n", + order == 0x0 ? "(5,3,1)" : + order == 0x1 ? "(5,3,0)" : + order == 0x2 ? "(5,0,0)" : "(7,3,1)"); +} + +static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 cnt; + u8 ov_flag; + u32 dpk_sync; + + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1); + + if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T2) == 0x1) + _dpk_set_mdpd_para(rtwdev, 0x2); + else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T1) == 0x1) + _dpk_set_mdpd_para(rtwdev, 0x1); + else if (rtw89_phy_read32_mask(rtwdev, R_DPK_MPA, B_DPK_MPA_T0) == 0x1) + _dpk_set_mdpd_para(rtwdev, 0x0); + else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_5 || + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_10 || + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_20) + _dpk_set_mdpd_para(rtwdev, 0x2); + else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 || + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) + _dpk_set_mdpd_para(rtwdev, 0x1); + else + _dpk_set_mdpd_para(rtwdev, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL, 0x0); + fsleep(1000); + + _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); + dpk_sync = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] dpk_sync = 0x%x\n", dpk_sync); + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf); + ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR); + for (cnt = 0; cnt < 5 && ov_flag == 0x1; cnt++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] ReK due to MDPK ov!!!\n"); + _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0xf); + ov_flag = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR); + } + + if (ov_flag) { + _dpk_set_mdpd_para(rtwdev, 0x2); + _dpk_one_shot(rtwdev, phy, path, D_MDPK_IDL); + } +} + +static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + bool is_reload = false; + u8 idx, cur_band, cur_ch; + + cur_band = rtwdev->hal.current_band_type; + cur_ch = rtwdev->hal.current_channel; + + for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) { + if (cur_band != dpk->bp[path][idx].band || + cur_ch != dpk->bp[path][idx].ch) + continue; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), + B_COEF_SEL_MDPD, idx); + dpk->cur_idx[path] = idx; + is_reload = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] reload S%d[%d] success\n", path, idx); + } + + return is_reload; +} + +static void _dpk_kip_pwr_clk_onoff(struct rtw89_dev *rtwdev, bool turn_on) +{ + rtw89_rfk_parser(rtwdev, turn_on ? &rtw8852c_dpk_kip_pwr_clk_on_defs_tbl : + &rtw8852c_dpk_kip_pwr_clk_off_defs_tbl); +} + +static void _dpk_kip_preset_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); + + if (rtwdev->hal.cv == CHIP_CAV) + rtw89_phy_write32_mask(rtwdev, + R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_SEL, 0x01); + else + rtw89_phy_write32_mask(rtwdev, + R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_SEL, 0x0c); + + _dpk_kip_control_rfc(rtwdev, path, true); + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx); + + _dpk_one_shot(rtwdev, phy, path, D_KIP_PRESET); +} + +static void _dpk_para_query(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ +#define _DPK_PARA_TXAGC GENMASK(15, 10) +#define _DPK_PARA_THER GENMASK(31, 26) + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u32 para; + + para = rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), + MASKDWORD); + + dpk->bp[path][kidx].txagc_dpk = FIELD_GET(_DPK_PARA_TXAGC, para); + dpk->bp[path][kidx].ther_dpk = FIELD_GET(_DPK_PARA_THER, para); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal/ txagc_RF (K%d) = 0x%x/ 0x%x\n", + dpk->cur_k_set, dpk->bp[path][kidx].ther_dpk, dpk->bp[path][kidx].txagc_dpk); +} + +static void _dpk_gain_normalize_8852c(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, bool is_execute) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (is_execute) { + rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_AG, 0x200); + rtw89_phy_write32_mask(rtwdev, R_DPK_GN + (path << 8), B_DPK_GN_EN, 0x3); + + _dpk_one_shot(rtwdev, phy, path, D_GAIN_NORM); + } else { + rtw89_phy_write32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), + 0x0000007F, 0x5b); + } + dpk->bp[path][kidx].gs = + rtw89_phy_read32_mask(rtwdev, dpk_par_regs[kidx][dpk->cur_k_set] + (path << 8), + 0x0000007F); +} + +static u8 _dpk_order_convert(struct rtw89_dev *rtwdev) +{ + u32 val32 = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP); + u8 val; + + switch (val32) { + case 0: + val = 0x6; + break; + case 1: + val = 0x2; + break; + case 2: + val = 0x0; + break; + case 3: + val = 0x7; + break; + default: + val = 0xff; + break; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val); + + return val; +} + +static void _dpk_on(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_ORDER, _dpk_order_convert(rtwdev)); + + dpk->bp[path][kidx].mdpd_en = BIT(dpk->cur_k_set); + dpk->bp[path][kidx].path_ok = true; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] path_ok = 0x%x\n", + path, kidx, dpk->bp[path][kidx].mdpd_en); + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_MEN, dpk->bp[path][kidx].mdpd_en); + + _dpk_gain_normalize_8852c(rtwdev, phy, path, kidx, false); +} + +static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 gain) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 kidx = dpk->cur_idx[path]; + u8 init_xdbm = 15; + bool is_fail; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx); + _dpk_kip_control_rfc(rtwdev, path, false); + _rf_direct_cntrl(rtwdev, path, false); + rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd); + _dpk_rf_setting(rtwdev, gain, path, kidx); + _set_rx_dck(rtwdev, phy, path, false); + _dpk_kip_pwr_clk_onoff(rtwdev, true); + _dpk_kip_preset_8852c(rtwdev, phy, path, kidx); + _dpk_txpwr_bb_force(rtwdev, path, true); + _dpk_kip_set_txagc(rtwdev, phy, path, init_xdbm, true); + _dpk_tpg_sel(rtwdev, path, kidx); + + is_fail = _dpk_agc(rtwdev, phy, path, kidx, init_xdbm, false); + if (is_fail) + goto _error; + + _dpk_idl_mpa(rtwdev, phy, path, kidx); + _dpk_para_query(rtwdev, path, kidx); + _dpk_on(rtwdev, phy, path, kidx); + +_error: + _dpk_kip_control_rfc(rtwdev, path, false); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RF_RX); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d]_K%d %s\n", path, kidx, + dpk->cur_k_set, is_fail ? "need Check" : "is Success"); + + return is_fail; +} + +static void _dpk_init(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 kidx = dpk->cur_idx[path]; + + dpk->bp[path][kidx].path_ok = false; +} + +static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); +} + +static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force, + enum rtw89_phy_idx phy, u8 kpath) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8}; + u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR]; + u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {}; + u8 path; + bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false}; + + if (dpk->is_dpk_reload_en) { + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + if (!(kpath & BIT(path))) + continue; + + reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + if (!reloaded[path] && dpk->bp[path][0].ch != 0) + dpk->cur_idx[path] = !dpk->cur_idx[path]; + else + _dpk_onoff(rtwdev, path, false); + } + } else { + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) + dpk->cur_idx[path] = 0; + } + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Init =========\n", + path, dpk->cur_idx[path]); + _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path); + _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); + _dpk_information(rtwdev, phy, path); + _dpk_init(rtwdev, path); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, true); + } + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Start =========\n", + path, dpk->cur_idx[path]); + rtw8852c_disable_rxagc(rtwdev, path, 0x0); + _dpk_drf_direct_cntrl(rtwdev, path, false); + _dpk_bb_afe_setting(rtwdev, phy, path, kpath); + is_fail = _dpk_main(rtwdev, phy, path, 1); + _dpk_onoff(rtwdev, path, is_fail); + } + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Restore =========\n", + path, dpk->cur_idx[path]); + _dpk_kip_restore(rtwdev, phy, path); + _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path); + _rfk_restore_rf_reg(rtwdev, backup_rf_val[path], path); + _dpk_bb_afe_restore(rtwdev, path); + rtw8852c_disable_rxagc(rtwdev, path, 0x1); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, false); + } + + _dpk_kip_pwr_clk_onoff(rtwdev, false); +} + +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_fem_info *fem = &rtwdev->fem; + + if (rtwdev->hal.cv == CHIP_CAV && rtwdev->hal.current_band_type != RTW89_BAND_2G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n"); + return true; + } else if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_6g && rtwdev->hal.current_band_type == RTW89_BAND_6G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n"); + return true; + } + + return false; +} + +static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 path, kpath; + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + if (kpath & BIT(path)) + _dpk_onoff(rtwdev, path, true); + } +} + +static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n", + RTW8852C_DPK_VER, rtwdev->hal.cv, + RTW8852C_RF_REL_VERSION); + + if (_dpk_bypass_check(rtwdev, phy)) + _dpk_force_bypass(rtwdev, phy); + else + _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy)); + + if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1) + rtw8852c_rx_dck(rtwdev, phy, false); +} + +static void _dpk_onoff(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool off) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 val, kidx = dpk->cur_idx[path]; + + val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok ? + dpk->bp[path][kidx].mdpd_en : 0; + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_MEN, val); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, + kidx, dpk->is_dpk_enable && !off ? "enable" : "disable"); +} + +static void _dpk_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 path, kidx; + u8 txagc_rf = 0; + s8 txagc_bb = 0, txagc_bb_tp = 0, txagc_ofst = 0; + u8 cur_ther; + s8 delta_ther = 0; + s16 pwsf_tssi_ofst; + + for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) { + kidx = dpk->cur_idx[path]; + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", + path, kidx, dpk->bp[path][kidx].ch); + + txagc_rf = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), 0x0000003f); + txagc_bb = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), MASKBYTE2); + txagc_bb_tp = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BTP + (path << 13), B_TXAGC_BTP); + + /* report from KIP */ + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xf); + cur_ther = + rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TH); + txagc_ofst = + rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_OF); + pwsf_tssi_ofst = + rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), B_RPT_PER_TSSI); + pwsf_tssi_ofst = sign_extend32(pwsf_tssi_ofst, 12); + + cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] thermal now = %d\n", cur_ther); + + if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0) + delta_ther = dpk->bp[path][kidx].ther_dpk - cur_ther; + + delta_ther = delta_ther * 1 / 2; + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] extra delta_ther = %d (0x%x / 0x%x@k)\n", + delta_ther, cur_ther, dpk->bp[path][kidx].ther_dpk); + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] delta_txagc = %d (0x%x / 0x%x@k)\n", + txagc_rf - dpk->bp[path][kidx].txagc_dpk, txagc_rf, + dpk->bp[path][kidx].txagc_dpk); + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_offset / pwsf_tssi_ofst = 0x%x / %+d\n", + txagc_ofst, pwsf_tssi_ofst); + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", + txagc_bb_tp, txagc_bb); + + if (rtw89_phy_read32_mask(rtwdev, R_DPK_WR, B_DPK_WR_ST) == 0x0 && + txagc_rf != 0 && rtwdev->hal.cv == CHIP_CAV) { + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] New pwsf = 0x%x\n", 0x78 - delta_ther); + + rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), + 0x07FC0000, 0x78 - delta_ther); + } + } +} + static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, enum rtw89_rf_path path) { @@ -2781,6 +3873,28 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a } } +void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u32 tx_en; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START); + rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + rtwdev->dpk.is_dpk_enable = true; + rtwdev->dpk.is_dpk_reload_en = false; + _dpk(rtwdev, phy_idx, false); + + rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP); +} + +void rtw8852c_dpk_track(struct rtw89_dev *rtwdev) +{ + _dpk_track(rtwdev); +} + void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) { u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index 5c0623f6af20..e42fb1a4965e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -11,6 +11,8 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev); void rtw8852c_dack(struct rtw89_dev *rtwdev); void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe); +void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852c_dpk_track(struct rtw89_dev *rtwdev); void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx); -- cgit v1.2.3 From 135433b30a53f5424c1137fb7be55bf8412b03ac Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 2 May 2022 11:49:23 +0300 Subject: mlxsw: reg: Add "desc" field to SBPR SBPR, or Shared Buffer Pools Register, configures and retrieves the shared buffer pools and configuration. The desc field determines whether the configuration relates to the byte pool or the descriptor pool. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: Paolo Abeni --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 04c4d7a4bd83..078e3aa04383 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -12641,6 +12641,12 @@ static inline void mlxsw_reg_tidem_pack(char *payload, u8 underlay_ecn, MLXSW_REG_DEFINE(sbpr, MLXSW_REG_SBPR_ID, MLXSW_REG_SBPR_LEN); +/* reg_sbpr_desc + * When set, configures descriptor buffer. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpr, desc, 0x00, 31, 1); + /* shared direstion enum for SBPR, SBCM, SBPM */ enum mlxsw_reg_sbxx_dir { MLXSW_REG_SBXX_DIR_INGRESS, -- cgit v1.2.3 From c864769add96f568ac10a6173e5d2c55057d64c2 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 2 May 2022 11:49:24 +0300 Subject: mlxsw: Configure descriptor buffers Spectrum machines have two resources related to keeping packets in an internal buffer: bytes (allocated in cell-sized units) for packet payload, and descriptors, for keeping metadata. Currently, mlxsw only configures the bytes part of the resource management. Spectrum switches permit a full parallel configuration for the descriptor resources, including port-pool and port-TC-pool quotas. By default, these are all configured to use pool 14, with an infinite quota. The ingress pool 14 is then infinite in size. However, egress pool 14 has finite size by default. The size is chip dependent, but always much lower than what the chip actually permits. As a result, we can easily construct workloads that exhaust the configured descriptor limit. Fix the issue by configuring the egress descriptor pool to be infinite in size as well. This will maintain the configuration philosophy of the default configuration, but will unlock all chip resources to be usable. In the code, include both the configuration of ingress and ingress, mostly for clarity. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: Paolo Abeni --- .../net/ethernet/mellanox/mlxsw/spectrum_buffers.c | 26 ++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 98f26f596e30..c68fc8f7ca99 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -202,6 +202,21 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, return 0; } +static int mlxsw_sp_sb_pr_desc_write(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_sbxx_dir dir, + enum mlxsw_reg_sbpr_mode mode, + u32 size, bool infi_size) +{ + char sbpr_pl[MLXSW_REG_SBPR_LEN]; + + /* The FW default descriptor buffer configuration uses only pool 14 for + * descriptors. + */ + mlxsw_reg_sbpr_pack(sbpr_pl, 14, dir, mode, size, infi_size); + mlxsw_reg_sbpr_desc_set(sbpr_pl, true); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); +} + static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u16 local_port, u8 pg_buff, u32 min_buff, u32 max_buff, bool infi_max, u16 pool_index) @@ -775,6 +790,17 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; } + + err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, + MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true); + if (err) + return err; + + err = mlxsw_sp_sb_pr_desc_write(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, + MLXSW_REG_SBPR_MODE_DYNAMIC, 0, true); + if (err) + return err; + return 0; } -- cgit v1.2.3 From eeff214dbfcb96bafbf83607925f35795d62a7aa Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 2 May 2022 17:16:06 +0900 Subject: wfx: avoid flush_workqueue(system_highpri_wq) usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flushing system-wide workqueues is dangerous and will be forbidden. Replace system_highpri_wq with per "struct wfx_dev" bh_wq. Signed-off-by: Tetsuo Handa Acked-by: Jérôme Pouiller Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/f15574a6-aba4-72bc-73af-26fdcdf9fb63@I-love.SAKURA.ne.jp --- drivers/net/wireless/silabs/wfx/bh.c | 6 +++--- drivers/net/wireless/silabs/wfx/hif_tx.c | 2 +- drivers/net/wireless/silabs/wfx/main.c | 6 ++++++ drivers/net/wireless/silabs/wfx/wfx.h | 1 + 4 files changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/silabs/wfx/bh.c b/drivers/net/wireless/silabs/wfx/bh.c index bcea9d5b119c..21dfdcf9cc27 100644 --- a/drivers/net/wireless/silabs/wfx/bh.c +++ b/drivers/net/wireless/silabs/wfx/bh.c @@ -267,7 +267,7 @@ void wfx_bh_request_rx(struct wfx_dev *wdev) wfx_control_reg_read(wdev, &cur); prev = atomic_xchg(&wdev->hif.ctrl_reg, cur); complete(&wdev->hif.ctrl_ready); - queue_work(system_highpri_wq, &wdev->hif.bh); + queue_work(wdev->bh_wq, &wdev->hif.bh); if (!(cur & CTRL_NEXT_LEN_MASK)) dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n", @@ -280,7 +280,7 @@ void wfx_bh_request_rx(struct wfx_dev *wdev) /* Driver want to send data */ void wfx_bh_request_tx(struct wfx_dev *wdev) { - queue_work(system_highpri_wq, &wdev->hif.bh); + queue_work(wdev->bh_wq, &wdev->hif.bh); } /* If IRQ is not available, this function allow to manually poll the control register and simulate @@ -295,7 +295,7 @@ void wfx_bh_poll_irq(struct wfx_dev *wdev) u32 reg; WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ"); - flush_workqueue(system_highpri_wq); + flush_workqueue(wdev->bh_wq); start = ktime_get(); for (;;) { wfx_control_reg_read(wdev, ®); diff --git a/drivers/net/wireless/silabs/wfx/hif_tx.c b/drivers/net/wireless/silabs/wfx/hif_tx.c index ae3cc5919dcd..2b92c227efbc 100644 --- a/drivers/net/wireless/silabs/wfx/hif_tx.c +++ b/drivers/net/wireless/silabs/wfx/hif_tx.c @@ -73,7 +73,7 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct wfx_hif_msg *request, if (no_reply) { /* Chip won't reply. Ensure the wq has send the buffer before to continue. */ - flush_workqueue(system_highpri_wq); + flush_workqueue(wdev->bh_wq); ret = 0; goto end; } diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c index b93b16b900c8..bbfd3fa51921 100644 --- a/drivers/net/wireless/silabs/wfx/main.c +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -345,6 +345,10 @@ int wfx_probe(struct wfx_dev *wdev) wdev->pdata.gpio_wakeup = NULL; wdev->poll_irq = true; + wdev->bh_wq = alloc_workqueue("wfx_bh_wq", WQ_HIGHPRI, 0); + if (!wdev->bh_wq) + return -ENOMEM; + wfx_bh_register(wdev); err = wfx_init_device(wdev); @@ -458,6 +462,7 @@ irq_unsubscribe: wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv); bh_unregister: wfx_bh_unregister(wdev); + destroy_workqueue(wdev->bh_wq); return err; } @@ -467,6 +472,7 @@ void wfx_release(struct wfx_dev *wdev) wfx_hif_shutdown(wdev); wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv); wfx_bh_unregister(wdev); + destroy_workqueue(wdev->bh_wq); } static int __init wfx_core_init(void) diff --git a/drivers/net/wireless/silabs/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h index 6594cc647c2f..6f5e95dae21f 100644 --- a/drivers/net/wireless/silabs/wfx/wfx.h +++ b/drivers/net/wireless/silabs/wfx/wfx.h @@ -57,6 +57,7 @@ struct wfx_dev { struct mutex rx_stats_lock; struct wfx_hif_tx_power_loop_info tx_power_loop_info; struct mutex tx_power_loop_info_lock; + struct workqueue_struct *bh_wq; }; struct wfx_vif { -- cgit v1.2.3 From 5309cd5ec9b46c893ac65618ce0507e53ca68347 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 19:59:54 +0800 Subject: rtw89: 8852c: rfk: get calibrated channels to notify firmware The commit 16b44ed0ffd3 ("rtw89: add RF H2C to notify firmware") is to add firmware command, and this commit is to prepare the channels. Then, firmware can get proper channels. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503120001.79272-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 1 + drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c | 18 ++++++++++++++++++ drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h | 1 + 3 files changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 4fb3de71d032..44b694ccada9 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1786,6 +1786,7 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev) { enum rtw89_phy_idx phy_idx = RTW89_PHY_0; + rtw8852c_mcc_get_ch_info(rtwdev, phy_idx); rtw8852c_rx_dck(rtwdev, phy_idx, false); rtw8852c_iqk(rtwdev, phy_idx); rtw8852c_tssi(rtwdev, phy_idx); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index ffc71ad24927..dfb9caba9bc4 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -3809,6 +3809,24 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, param->bandwidth); } +void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + struct rtw89_mcc_info *mcc_info = &rtwdev->mcc; + u8 idx = mcc_info->table_idx; + int i; + + for (i = 0; i < RTW89_IQK_CHS_NR; i++) { + if (mcc_info->ch[idx] == 0) + break; + if (++idx >= RTW89_IQK_CHS_NR) + idx = 0; + } + + mcc_info->table_idx = idx; + mcc_info->ch[idx] = rtwdev->hal.current_channel; + mcc_info->band[idx] = rtwdev->hal.current_band_type; +} + void rtw8852c_rck(struct rtw89_dev *rtwdev) { u8 path; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h index e42fb1a4965e..c32756f0c01a 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h @@ -7,6 +7,7 @@ #include "core.h" +void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); void rtw8852c_rck(struct rtw89_dev *rtwdev); void rtw8852c_dack(struct rtw89_dev *rtwdev); void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); -- cgit v1.2.3 From e212d5d48d8593bdff076e5b9412c0152a58196e Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 19:59:55 +0800 Subject: rtw89: 8852c: add chip_ops::bb_ctrl_btc_preagc Add to configure BT share RX path and related settings. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503120001.79272-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 64 +++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 44b694ccada9..2a8271aa6cec 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2338,6 +2338,69 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, } } +static void rtw8852c_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) +{ + if (bt_en) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_FRC_FIR_TYPE_V1, + B_PATH0_FRC_FIR_TYPE_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PATH1_FRC_FIR_TYPE_V1, + B_PATH1_FRC_FIR_TYPE_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PATH0_RXBB_V1, + B_PATH0_RXBB_MSK_V1, 0xf); + rtw89_phy_write32_mask(rtwdev, R_PATH1_RXBB_V1, + B_PATH1_RXBB_MSK_V1, 0xf); + rtw89_phy_write32_mask(rtwdev, R_PATH0_G_LNA6_OP1DB_V1, + B_PATH0_G_LNA6_OP1DB_V1, 0x80); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, + B_PATH1_G_LNA6_OP1DB_V1, 0x80); + rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA0_LNA6_OP1DB_V1, + B_PATH0_G_TIA0_LNA6_OP1DB_V1, 0x80); + rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA1_LNA6_OP1DB_V1, + B_PATH0_G_TIA1_LNA6_OP1DB_V1, 0x80); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, + B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x80); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA1_LNA6_OP1DB_V1, + B_PATH1_G_TIA1_LNA6_OP1DB_V1, 0x80); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_BACKOFF_V1, + B_PATH0_BT_BACKOFF_V1, 0x780D1E); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_BACKOFF_V1, + B_PATH1_BT_BACKOFF_V1, 0x780D1E); + rtw89_phy_write32_mask(rtwdev, R_P0_BACKOFF_IBADC_V1, + B_P0_BACKOFF_IBADC_V1, 0x34); + rtw89_phy_write32_mask(rtwdev, R_P1_BACKOFF_IBADC_V1, + B_P1_BACKOFF_IBADC_V1, 0x34); + } else { + rtw89_phy_write32_mask(rtwdev, R_PATH0_FRC_FIR_TYPE_V1, + B_PATH0_FRC_FIR_TYPE_MSK_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_FRC_FIR_TYPE_V1, + B_PATH1_FRC_FIR_TYPE_MSK_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_RXBB_V1, + B_PATH0_RXBB_MSK_V1, 0x60); + rtw89_phy_write32_mask(rtwdev, R_PATH1_RXBB_V1, + B_PATH1_RXBB_MSK_V1, 0x60); + rtw89_phy_write32_mask(rtwdev, R_PATH0_G_LNA6_OP1DB_V1, + B_PATH0_G_LNA6_OP1DB_V1, 0x1a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, + B_PATH1_G_LNA6_OP1DB_V1, 0x1a); + rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA0_LNA6_OP1DB_V1, + B_PATH0_G_TIA0_LNA6_OP1DB_V1, 0x2a); + rtw89_phy_write32_mask(rtwdev, R_PATH0_G_TIA1_LNA6_OP1DB_V1, + B_PATH0_G_TIA1_LNA6_OP1DB_V1, 0x2a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, + B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA1_LNA6_OP1DB_V1, + B_PATH1_G_TIA1_LNA6_OP1DB_V1, 0x2a); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_BACKOFF_V1, + B_PATH0_BT_BACKOFF_V1, 0x79E99E); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_BACKOFF_V1, + B_PATH1_BT_BACKOFF_V1, 0x79E99E); + rtw89_phy_write32_mask(rtwdev, R_P0_BACKOFF_IBADC_V1, + B_P0_BACKOFF_IBADC_V1, 0x26); + rtw89_phy_write32_mask(rtwdev, R_P1_BACKOFF_IBADC_V1, + B_P1_BACKOFF_IBADC_V1, 0x26); + } +} + static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; @@ -2747,6 +2810,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .init_txpwr_unit = rtw8852c_init_txpwr_unit, .get_thermal = rtw8852c_get_thermal, .query_ppdu = rtw8852c_query_ppdu, + .bb_ctrl_btc_preagc = rtw8852c_bb_ctrl_btc_preagc, .read_rf = rtw89_phy_read_rf_v1, .write_rf = rtw89_phy_write_rf_v1, .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, -- cgit v1.2.3 From 78af3cc673567c0978a8d4d610e5b8e88b01717c Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 19:59:56 +0800 Subject: rtw89: 8852c: add basic and remaining chip_info The chip_info include BT coexistence tables, size and number of hardware components, and supported functions. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503120001.79272-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 80 ++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 2a8271aa6cec..a6dd78ee30f0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2662,6 +2662,48 @@ s8 rtw8852c_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) return clamp_t(s8, val, -100, 0) + 100; } +static const struct rtw89_btc_rf_trx_para rtw89_btc_8852c_rf_ul[] = { + {255, 0, 0, 7}, /* 0 -> original */ + {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */ + {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */ + {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */ + {6, 1, 0, 7}, + {13, 1, 0, 7}, + {13, 1, 0, 7} +}; + +static const struct rtw89_btc_rf_trx_para rtw89_btc_8852c_rf_dl[] = { + {255, 0, 0, 7}, /* 0 -> original */ + {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */ + {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */ + {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */ + {255, 1, 0, 7}, + {255, 1, 0, 7}, + {255, 1, 0, 7} +}; + +static const u8 rtw89_btc_8852c_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {60, 50, 40, 30}; +static const u8 rtw89_btc_8852c_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {40, 36, 31, 28}; + +static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852c_mon_reg[] = { + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda00), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda04), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda38), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda44), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda48), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd200), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xd220), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980), +}; + static void rtw8852c_btc_bt_aci_imp(struct rtw89_dev *rtwdev) { @@ -2795,10 +2837,13 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .disable_bb_rf = rtw8852c_mac_disable_bb_rf, .bb_reset = rtw8852c_bb_reset, .bb_sethw = rtw8852c_bb_sethw, + .read_rf = rtw89_phy_read_rf_v1, + .write_rf = rtw89_phy_write_rf_v1, .set_channel = rtw8852c_set_channel, .set_channel_help = rtw8852c_set_channel_help, .read_efuse = rtw8852c_read_efuse, .read_phycap = rtw8852c_read_phycap, + .fem_setup = NULL, .rfk_init = rtw8852c_rfk_init, .rfk_channel = rtw8852c_rfk_channel, .rfk_band_changed = rtw8852c_rfk_band_changed, @@ -2809,12 +2854,11 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl, .init_txpwr_unit = rtw8852c_init_txpwr_unit, .get_thermal = rtw8852c_get_thermal, + .ctrl_btg = rtw8852c_ctrl_btg, .query_ppdu = rtw8852c_query_ppdu, .bb_ctrl_btc_preagc = rtw8852c_bb_ctrl_btc_preagc, - .read_rf = rtw89_phy_read_rf_v1, - .write_rf = rtw89_phy_write_rf_v1, - .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, .cfg_txrx_path = rtw8852c_bb_cfg_txrx_path, + .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852c_pwr_on_func, .pwr_off_func = rtw8852c_pwr_off_func, .fill_txdesc = rtw89_core_fill_txdesc_v1, @@ -2839,6 +2883,10 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .chip_id = RTL8852C, .ops = &rtw8852c_chip_ops, .fw_name = "rtw89/rtw8852c_fw.bin", + .fifo_size = 458752, + .max_amsdu_limit = 8000, + .dis_2g_40m_ul_ofdma = false, + .rsvd_ple_ofst = 0x6f800, .hfc_param_ini = rtw8852c_hfc_param_ini_pcie, .dle_mem = rtw8852c_dle_mem_pcie, .rf_base_addr = {0xe000, 0xf000}, @@ -2860,7 +2908,17 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .txpwr_factor_mac = 1, .dig_table = NULL, .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table, + .support_bands = BIT(NL80211_BAND_2GHZ) | + BIT(NL80211_BAND_5GHZ) | + BIT(NL80211_BAND_6GHZ), + .support_bw160 = true, .hw_sec_hdr = true, + .rf_path_num = 2, + .tx_nss = 2, + .rx_nss = 2, + .acam_num = 128, + .bcam_num = 20, + .scam_num = 128, .sec_ctrl_efuse_size = 4, .physical_efuse_size = 1216, .logical_efuse_size = 2048, @@ -2869,6 +2927,22 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .dav_log_efuse_size = 16, .phycap_addr = 0x590, .phycap_size = 0x60, + .para_ver = 0x05050764, + .wlcx_desired = 0x05050000, + .btcx_desired = 0x5, + .scbd = 0x1, + .mailbox = 0x1, + .afh_guard_ch = 6, + .wl_rssi_thres = rtw89_btc_8852c_wl_rssi_thres, + .bt_rssi_thres = rtw89_btc_8852c_bt_rssi_thres, + .rssi_tol = 2, + .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852c_mon_reg), + .mon_reg = rtw89_btc_8852c_mon_reg, + .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852c_rf_ul), + .rf_para_ulink = rtw89_btc_8852c_rf_ul, + .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852c_rf_dl), + .rf_para_dlink = rtw89_btc_8852c_rf_dl, + .ps_mode_supported = 0, .low_power_hci_modes = BIT(RTW89_PS_MODE_CLK_GATED) | BIT(RTW89_PS_MODE_PWR_GATED), .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_V1, -- cgit v1.2.3 From 39a7652103ff5f052b5eff944a1ed3d607f747f7 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 19:59:57 +0800 Subject: rtw89: ps: fine tune polling interval while changing low power mode By experiments, it spends ~45/1090~2480us to enter/leave low power mode, so the old polling interval 1000us can waste time. Use smaller polling interval depends on experimental results to reduce the time to transition state. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503120001.79272-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 05b94842fe66..07f6634d56a0 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1050,6 +1050,7 @@ static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter) { enum rtw89_rpwm_req_pwr_state state; + unsigned long delay = enter ? 10 : 150; int ret; if (enter) @@ -1059,7 +1060,7 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter) rtw89_mac_send_rpwm(rtwdev, state, false); ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret, - 1000, 15000, false, rtwdev, state); + delay, 15000, false, rtwdev, state); if (ret) rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n", enter ? "entering" : "leaving"); -- cgit v1.2.3 From 62440fbefad1e2a412a99b1a662d50daec422296 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 19:59:58 +0800 Subject: rtw89: correct AID settings of beamformee Without this fix, it would cause IOT issue due to AID mismatch. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503120001.79272-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 4 ++++ drivers/net/wireless/realtek/rtw89/reg.h | 5 +++++ 2 files changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 07f6634d56a0..a06ca65b339f 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -4240,6 +4240,10 @@ static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); + reg = rtw89_mac_reg_by_idx(R_AX_CSIRPT_OPTION, mac_idx); + rtw89_write32_set(rtwdev, reg, + B_AX_CSIPRT_VHTSU_AID_EN | B_AX_CSIPRT_HESU_AID_EN); + return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 6f5d1012c90c..dff7992659dc 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -2842,6 +2842,11 @@ #define R_AX_RX_SR_CTRL_C1 0xEE4A #define B_AX_SR_EN BIT(0) +#define R_AX_CSIRPT_OPTION 0xCE64 +#define R_AX_CSIRPT_OPTION_C1 0xEE64 +#define B_AX_CSIPRT_HESU_AID_EN BIT(25) +#define B_AX_CSIPRT_VHTSU_AID_EN BIT(24) + #define R_AX_RX_STATE_MONITOR 0xCEF0 #define R_AX_RX_STATE_MONITOR_C1 0xEEF0 #define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0) -- cgit v1.2.3 From 55cf5b7e2d970d2252df67141b203758f0f16ed8 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 19:59:59 +0800 Subject: rtw89: 8852c: correct register definitions used by 8852c First one could affect SER because of false alarm event. Second one can affect spur elimination. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503120001.79272-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/reg.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index dff7992659dc..5c4de043845b 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -2605,7 +2605,6 @@ B_AX_TMAC_HWSIGB_GEN | \ B_AX_TMAC_RXTB | \ B_AX_TMAC_MIMO_CTRL | \ - B_AX_RMAC_CSI | \ B_AX_RMAC_FTM) #define R_AX_WMAC_TX_TF_INFO_0 0xCCD0 @@ -3667,7 +3666,7 @@ #define R_DCFO 0x4264 #define B_DCFO GENMASK(1, 0) #define R_SEG0CSI 0x42AC -#define B_SEG0CSI_IDX GENMASK(10, 0) +#define B_SEG0CSI_IDX GENMASK(11, 0) #define R_SEG0CSI_EN 0x42C4 #define B_SEG0CSI_EN BIT(23) #define R_BSS_CLR_MAP 0x43ac -- cgit v1.2.3 From 68bf56e3b020e913c20276576317f9df9fa02fa7 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 20:00:00 +0800 Subject: rtw89: 8852c: fix warning of FIELD_PREP() mask type To fix the compiler warning of clang with i386 config, but not complain by gcc: __write_ctrl(R_AX_PWR_RATE_CTRL, B_AX_FORCE_PWR_BY_RATE_VALUE_MASK, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/net/wireless/realtek/rtw89/rtw8852c.c:2621:13: note: expanded from macro '__write_ctrl' u32 _wrt = FIELD_PREP(__msk, _val); \ ^~~~~~~~~~~~~~~~~~~~~~~ include/linux/bitfield.h:114:3: note: expanded from macro 'FIELD_PREP' __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/bitfield.h:71:53: note: expanded from macro '__BF_FIELD_CHECK' BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~ note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all) include/linux/compiler_types.h:352:22: note: expanded from macro 'compiletime_assert' _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) ~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/compiler_types.h:340:23: note: expanded from macro '_compiletime_assert' __compiletime_assert(condition, msg, prefix, suffix) ~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/compiler_types.h:332:9: note: expanded from macro '__compiletime_assert' if (!(condition)) \ ^~~~~~~~~ Reported-by: kernel test robot Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503120001.79272-8-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index a6dd78ee30f0..295b824dbecc 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2616,16 +2616,14 @@ rtw8852c_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val) #define __write_ctrl(_reg, _msk, _val, _en, _cond) \ do { \ - const typeof(_msk) __msk = _msk; \ - const typeof(_en) __en = _en; \ - u32 _wrt = FIELD_PREP(__msk, _val); \ - BUILD_BUG_ON((__msk & __en) != 0); \ + u32 _wrt = FIELD_PREP(_msk, _val); \ + BUILD_BUG_ON((_msk & _en) != 0); \ if (_cond) \ - _wrt |= __en; \ + _wrt |= _en; \ else \ - _wrt &= ~__en; \ + _wrt &= ~_en; \ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, _reg, \ - __msk | __en, _wrt); \ + _msk | _en, _wrt); \ } while (0) switch (arg.ctrl_all_time) { -- cgit v1.2.3 From 7ba49f4c6896d83b3841c0b046a0a7b1e97cc0dd Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Tue, 3 May 2022 20:00:01 +0800 Subject: rtw89: 8852c: add 8852ce to Makefile and Kconfig This initial vesion is usable now. It can support STA, AP and monitor modes, so we can add 8852ce to Kconfig and Makefile. We are still working on some features, such as deep power save, and BT coexistence. But, this version still can have a good WiFi-only performance already, and will continue to fine tune power consumption. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503120001.79272-9-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/Kconfig | 18 ++++++++++++++++-- drivers/net/wireless/realtek/rtw89/Makefile | 9 +++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig index dd02b6a6790e..93e09400aac4 100644 --- a/drivers/net/wireless/realtek/rtw89/Kconfig +++ b/drivers/net/wireless/realtek/rtw89/Kconfig @@ -19,8 +19,11 @@ config RTW89_PCI config RTW89_8852A tristate +config RTW89_8852C + tristate + config RTW89_8852AE - tristate "Realtek 8852AE PCI wireless network adapter" + tristate "Realtek 8852AE PCI wireless network (Wi-Fi 6) adapter" depends on PCI select RTW89_CORE select RTW89_PCI @@ -28,7 +31,18 @@ config RTW89_8852AE help Select this option will enable support for 8852AE chipset - 802.11ax PCIe wireless network adapter + 802.11ax PCIe wireless network (Wi-Fi 6) adapter + +config RTW89_8852CE + tristate "Realtek 8852CE PCI wireless network (Wi-Fi 6E) adapter" + depends on PCI + select RTW89_CORE + select RTW89_PCI + select RTW89_8852C + help + Select this option will enable support for 8852CE chipset + + 802.11ax PCIe wireless network (Wi-Fi 6E) adapter config RTW89_DEBUG bool diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile index 012ae60c0b81..3006482d25c7 100644 --- a/drivers/net/wireless/realtek/rtw89/Makefile +++ b/drivers/net/wireless/realtek/rtw89/Makefile @@ -23,6 +23,15 @@ rtw89_8852a-objs := rtw8852a.o \ obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o rtw89_8852ae-objs := rtw8852ae.o +obj-$(CONFIG_RTW89_8852C) += rtw89_8852c.o +rtw89_8852c-objs := rtw8852c.o \ + rtw8852c_table.o \ + rtw8852c_rfk.o \ + rtw8852c_rfk_table.o + +obj-$(CONFIG_RTW89_8852CE) += rtw89_8852ce.o +rtw89_8852ce-objs := rtw8852ce.o + rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o -- cgit v1.2.3 From f9eec4947add999e1251bf14365a48a655b786a4 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Wed, 4 May 2022 07:34:15 +0300 Subject: ath11k: Add support for targets without trustzone Add the support to attach WCN6750 and map iommu domain for targets which do not have the support of TrustZone. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00573-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220328062032.28881-1-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/ahb.c | 178 +++++++++++++++++++++++++++++++++- drivers/net/wireless/ath/ath11k/ahb.h | 9 ++ 2 files changed, 186 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 050bda828966..fa11807f48a9 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include "ahb.h" #include "debug.h" #include "hif.h" @@ -757,6 +759,172 @@ static int ath11k_ahb_setup_resources(struct ath11k_base *ab) return 0; } +static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab) +{ + struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); + struct device *dev = ab->dev; + struct device_node *node; + struct resource r; + int ret; + + node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!node) + return -ENOENT; + + ret = of_address_to_resource(node, 0, &r); + of_node_put(node); + if (ret) { + dev_err(dev, "failed to resolve msa fixed region\n"); + return ret; + } + + ab_ahb->fw.msa_paddr = r.start; + ab_ahb->fw.msa_size = resource_size(&r); + + node = of_parse_phandle(dev->of_node, "memory-region", 1); + if (!node) + return -ENOENT; + + ret = of_address_to_resource(node, 0, &r); + of_node_put(node); + if (ret) { + dev_err(dev, "failed to resolve ce fixed region\n"); + return ret; + } + + ab_ahb->fw.ce_paddr = r.start; + ab_ahb->fw.ce_size = resource_size(&r); + + return 0; +} + +static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) +{ + struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); + struct device *host_dev = ab->dev; + struct platform_device_info info = {0}; + struct iommu_domain *iommu_dom; + struct platform_device *pdev; + struct device_node *node; + int ret; + + /* Chipsets not requiring MSA need not initialize + * MSA resources, return success in such cases. + */ + if (!ab->hw_params.fixed_fw_mem) + return 0; + + ret = ath11k_ahb_setup_msa_resources(ab); + if (ret) { + ath11k_err(ab, "failed to setup msa resources\n"); + return ret; + } + + node = of_get_child_by_name(host_dev->of_node, "wifi-firmware"); + if (!node) { + ab_ahb->fw.use_tz = true; + return 0; + } + + info.fwnode = &node->fwnode; + info.parent = host_dev; + info.name = node->name; + info.dma_mask = DMA_BIT_MASK(32); + + pdev = platform_device_register_full(&info); + if (IS_ERR(pdev)) { + of_node_put(node); + return PTR_ERR(pdev); + } + + ret = of_dma_configure(&pdev->dev, node, true); + if (ret) { + ath11k_err(ab, "dma configure fail: %d\n", ret); + goto err_unregister; + } + + ab_ahb->fw.dev = &pdev->dev; + + iommu_dom = iommu_domain_alloc(&platform_bus_type); + if (!iommu_dom) { + ath11k_err(ab, "failed to allocate iommu domain\n"); + ret = -ENOMEM; + goto err_unregister; + } + + ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev); + if (ret) { + ath11k_err(ab, "could not attach device: %d\n", ret); + goto err_iommu_free; + } + + ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr, + ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size, + IOMMU_READ | IOMMU_WRITE); + if (ret) { + ath11k_err(ab, "failed to map firmware region: %d\n", ret); + goto err_iommu_detach; + } + + ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr, + ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size, + IOMMU_READ | IOMMU_WRITE); + if (ret) { + ath11k_err(ab, "failed to map firmware CE region: %d\n", ret); + goto err_iommu_unmap; + } + + ab_ahb->fw.use_tz = false; + ab_ahb->fw.iommu_domain = iommu_dom; + of_node_put(node); + + return 0; + +err_iommu_unmap: + iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); + +err_iommu_detach: + iommu_detach_device(iommu_dom, ab_ahb->fw.dev); + +err_iommu_free: + iommu_domain_free(iommu_dom); + +err_unregister: + platform_device_unregister(pdev); + of_node_put(node); + + return ret; +} + +static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab) +{ + struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); + struct iommu_domain *iommu; + size_t unmapped_size; + + if (ab_ahb->fw.use_tz) + return 0; + + iommu = ab_ahb->fw.iommu_domain; + + unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); + if (unmapped_size != ab_ahb->fw.msa_size) + ath11k_err(ab, "failed to unmap firmware: %zu\n", + unmapped_size); + + unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size); + if (unmapped_size != ab_ahb->fw.ce_size) + ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n", + unmapped_size); + + iommu_detach_device(iommu, ab_ahb->fw.dev); + iommu_domain_free(iommu); + + platform_device_unregister(to_platform_device(ab_ahb->fw.dev)); + + return 0; +} + static int ath11k_ahb_probe(struct platform_device *pdev) { struct ath11k_base *ab; @@ -816,10 +984,14 @@ static int ath11k_ahb_probe(struct platform_device *pdev) if (ret) goto err_core_free; - ret = ath11k_hal_srng_init(ab); + ret = ath11k_ahb_fw_resources_init(ab); if (ret) goto err_core_free; + ret = ath11k_hal_srng_init(ab); + if (ret) + goto err_fw_deinit; + ret = ath11k_ce_alloc_pipes(ab); if (ret) { ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); @@ -856,6 +1028,9 @@ err_ce_free: err_hal_srng_deinit: ath11k_hal_srng_deinit(ab); +err_fw_deinit: + ath11k_ahb_fw_resource_deinit(ab); + err_core_free: ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); @@ -891,6 +1066,7 @@ static int ath11k_ahb_remove(struct platform_device *pdev) qmi_fail: ath11k_ahb_free_irq(ab); ath11k_hal_srng_deinit(ab); + ath11k_ahb_fw_resource_deinit(ab); ath11k_ce_free_pipes(ab); ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h index 51e6e4a5f686..58a945411c5b 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.h +++ b/drivers/net/wireless/ath/ath11k/ahb.h @@ -12,6 +12,15 @@ struct ath11k_base; struct ath11k_ahb { struct rproc *tgt_rproc; + struct { + struct device *dev; + struct iommu_domain *iommu_domain; + dma_addr_t msa_paddr; + u32 msa_size; + dma_addr_t ce_paddr; + u32 ce_size; + bool use_tz; + } fw; }; static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab) -- cgit v1.2.3 From 301e0be800be425c5fa4f384f9f0e9b23109f809 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 6 Mar 2022 14:41:38 +0200 Subject: net/mlx5: Simplify IPsec flow steering init/cleanup functions Remove multiple function wrappers to make sure that IPsec FS initialization and cleanup functions present in one place to help with code readability. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 4 +- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 73 +++++++--------------- .../mellanox/mlx5/core/en_accel/ipsec_fs.h | 4 +- 3 files changed, 27 insertions(+), 54 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index c280a18ff002..b6e430d53fae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -424,7 +424,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) } priv->ipsec = ipsec; - mlx5e_accel_ipsec_fs_init(priv); + mlx5e_accel_ipsec_fs_init(ipsec); netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return 0; } @@ -436,7 +436,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) if (!ipsec) return; - mlx5e_accel_ipsec_fs_cleanup(priv); + mlx5e_accel_ipsec_fs_cleanup(ipsec); destroy_workqueue(ipsec->wq); kfree(ipsec); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 66b529e36ea1..029a9a70ba0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -632,81 +632,54 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, tx_del_rule(priv, ipsec_rule); } -static void fs_cleanup_tx(struct mlx5e_priv *priv) -{ - mutex_destroy(&priv->ipsec->tx_fs->mutex); - WARN_ON(priv->ipsec->tx_fs->refcnt); - kfree(priv->ipsec->tx_fs); - priv->ipsec->tx_fs = NULL; -} - -static void fs_cleanup_rx(struct mlx5e_priv *priv) +void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) { struct mlx5e_accel_fs_esp_prot *fs_prot; struct mlx5e_accel_fs_esp *accel_esp; enum accel_fs_esp_type i; - accel_esp = priv->ipsec->rx_fs; + if (!ipsec->rx_fs) + return; + + mutex_destroy(&ipsec->tx_fs->mutex); + WARN_ON(ipsec->tx_fs->refcnt); + kfree(ipsec->tx_fs); + + accel_esp = ipsec->rx_fs; for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { fs_prot = &accel_esp->fs_prot[i]; mutex_destroy(&fs_prot->prot_mutex); WARN_ON(fs_prot->refcnt); } - kfree(priv->ipsec->rx_fs); - priv->ipsec->rx_fs = NULL; -} - -static int fs_init_tx(struct mlx5e_priv *priv) -{ - priv->ipsec->tx_fs = - kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL); - if (!priv->ipsec->tx_fs) - return -ENOMEM; - - mutex_init(&priv->ipsec->tx_fs->mutex); - return 0; + kfree(ipsec->rx_fs); } -static int fs_init_rx(struct mlx5e_priv *priv) +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) { struct mlx5e_accel_fs_esp_prot *fs_prot; struct mlx5e_accel_fs_esp *accel_esp; enum accel_fs_esp_type i; + int err = -ENOMEM; - priv->ipsec->rx_fs = - kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL); - if (!priv->ipsec->rx_fs) + ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL); + if (!ipsec->tx_fs) return -ENOMEM; - accel_esp = priv->ipsec->rx_fs; + ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL); + if (!ipsec->rx_fs) + goto err_rx; + + mutex_init(&ipsec->tx_fs->mutex); + + accel_esp = ipsec->rx_fs; for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { fs_prot = &accel_esp->fs_prot[i]; mutex_init(&fs_prot->prot_mutex); } return 0; -} - -void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) -{ - if (!priv->ipsec->rx_fs) - return; - - fs_cleanup_tx(priv); - fs_cleanup_rx(priv); -} - -int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) -{ - int err; - - err = fs_init_tx(priv); - if (err) - return err; - - err = fs_init_rx(priv); - if (err) - fs_cleanup_tx(priv); +err_rx: + kfree(ipsec->tx_fs); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h index b70953979709..e4eeb2ba21c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h @@ -9,8 +9,8 @@ #include "ipsec_offload.h" #include "en/fs.h" -void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv); -int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv); +void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, struct mlx5_accel_esp_xfrm_attrs *attrs, u32 ipsec_obj_id, -- cgit v1.2.3 From 9af1968ee13b33b0bff7def9c4861f1730389a7b Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 6 Mar 2022 15:21:22 +0200 Subject: net/mlx5: Check IPsec TX flow steering namespace in advance Ensure that flow steering is usable as early as possible, to understand if crypto IPsec is supported or not. Reviewed-by: Raed Salem Reviewed-by: Saeed Mahameed Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/fs.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 1 + .../net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c | 16 +++++++++------- 4 files changed, 11 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 678ffbb48a25..4130a871de61 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -164,7 +164,6 @@ struct mlx5e_ptp_fs; struct mlx5e_flow_steering { struct mlx5_flow_namespace *ns; - struct mlx5_flow_namespace *egress_ns; #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_steering ethtool; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index b6e430d53fae..40700bf61924 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -415,6 +415,7 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) hash_init(ipsec->sadb_rx); spin_lock_init(&ipsec->sadb_rx_lock); + ipsec->mdev = priv->mdev; ipsec->en_priv = priv; ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, priv->netdev->name); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index a0e9dade09e9..bbf48d4616f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -61,6 +61,7 @@ struct mlx5e_accel_fs_esp; struct mlx5e_ipsec_tx; struct mlx5e_ipsec { + struct mlx5_core_dev *mdev; struct mlx5e_priv *en_priv; DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); spinlock_t sadb_rx_lock; /* Protects sadb_rx */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 029a9a70ba0e..55fb6d4cf4ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -35,6 +35,7 @@ struct mlx5e_accel_fs_esp { }; struct mlx5e_ipsec_tx { + struct mlx5_flow_namespace *ns; struct mlx5_flow_table *ft; struct mutex mutex; /* Protect IPsec TX steering */ u32 refcnt; @@ -338,15 +339,9 @@ static int tx_create(struct mlx5e_priv *priv) struct mlx5_flow_table *ft; int err; - priv->fs.egress_ns = - mlx5_get_flow_namespace(priv->mdev, - MLX5_FLOW_NAMESPACE_EGRESS_KERNEL); - if (!priv->fs.egress_ns) - return -EOPNOTSUPP; - ft_attr.max_fte = NUM_IPSEC_FTE; ft_attr.autogroup.max_num_groups = 1; - ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr); + ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr); if (IS_ERR(ft)) { err = PTR_ERR(ft); netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err); @@ -658,9 +653,15 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) { struct mlx5e_accel_fs_esp_prot *fs_prot; struct mlx5e_accel_fs_esp *accel_esp; + struct mlx5_flow_namespace *ns; enum accel_fs_esp_type i; int err = -ENOMEM; + ns = mlx5_get_flow_namespace(ipsec->mdev, + MLX5_FLOW_NAMESPACE_EGRESS_KERNEL); + if (!ns) + return -EOPNOTSUPP; + ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL); if (!ipsec->tx_fs) return -ENOMEM; @@ -670,6 +671,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) goto err_rx; mutex_init(&ipsec->tx_fs->mutex); + ipsec->tx_fs->ns = ns; accel_esp = ipsec->rx_fs; for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { -- cgit v1.2.3 From 021a429bdbde93be504adcea79a81a3f19349483 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 1 Mar 2022 11:55:25 +0200 Subject: net/mlx5: Don't hide fallback to software IPsec in FS code The XFRM code performs fallback to software IPsec if .xdo_dev_state_add() returns -EOPNOTSUPP. This is what mlx5 did very deep in its stack trace, despite have all the knowledge that IPsec is not going to work in very early stage. This is achieved by making sure that priv->ipsec pointer is valid for fully working and supported hardware crypto IPsec engine. In case, the hardware IPsec is not supported, the XFRM code will set NULL to xso->dev and it will prevent from calls to various .xdo_dev_state_*() callbacks. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 41 +++++++++------------- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 6 ---- 2 files changed, 17 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 40700bf61924..b04d5de91d87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -43,17 +43,7 @@ static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) { - struct mlx5e_ipsec_sa_entry *sa; - - if (!x) - return NULL; - - sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; - if (!sa) - return NULL; - - WARN_ON(sa->x != x); - return sa; + return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; } struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec, @@ -306,6 +296,8 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) int err; priv = netdev_priv(netdev); + if (!priv->ipsec) + return -EOPNOTSUPP; err = mlx5e_xfrm_validate_state(x); if (err) @@ -375,9 +367,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - if (!sa_entry) - return; - if (x->xso.flags & XFRM_OFFLOAD_INBOUND) mlx5e_ipsec_sadb_rx_del(sa_entry); } @@ -387,9 +376,6 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_priv *priv = netdev_priv(x->xso.dev); - if (!sa_entry) - return; - if (sa_entry->hw_context) { flush_workqueue(sa_entry->ipsec->wq); mlx5e_xfrm_fs_del_rule(priv, sa_entry); @@ -402,7 +388,8 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) int mlx5e_ipsec_init(struct mlx5e_priv *priv) { - struct mlx5e_ipsec *ipsec = NULL; + struct mlx5e_ipsec *ipsec; + int ret; if (!mlx5_ipsec_device_caps(priv->mdev)) { netdev_dbg(priv->netdev, "Not an IPSec offload device\n"); @@ -420,14 +407,23 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, priv->netdev->name); if (!ipsec->wq) { - kfree(ipsec); - return -ENOMEM; + ret = -ENOMEM; + goto err_wq; } + ret = mlx5e_accel_ipsec_fs_init(ipsec); + if (ret) + goto err_fs_init; + priv->ipsec = ipsec; - mlx5e_accel_ipsec_fs_init(ipsec); netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return 0; + +err_fs_init: + destroy_workqueue(ipsec->wq); +err_wq: + kfree(ipsec); + return (ret != -EOPNOTSUPP) ? ret : 0; } void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) @@ -487,9 +483,6 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) struct mlx5e_ipsec_modify_state_work *modify_work; bool need_update; - if (!sa_entry) - return; - need_update = mlx5e_ipsec_update_esn_state(sa_entry); if (!need_update) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 55fb6d4cf4ae..f733a6e61196 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -605,9 +605,6 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, u32 ipsec_obj_id, struct mlx5e_ipsec_rule *ipsec_rule) { - if (!priv->ipsec->rx_fs) - return -EOPNOTSUPP; - if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT) return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule); else @@ -618,9 +615,6 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5e_ipsec_rule *ipsec_rule) { - if (!priv->ipsec->rx_fs) - return; - if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT) rx_del_rule(priv, attrs, ipsec_rule); else -- cgit v1.2.3 From a05a54694e409e8b1a3f0219a24d919cd9e445f3 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 2 Mar 2022 11:42:52 +0200 Subject: net/mlx5: Reduce useless indirection in IPsec FS add/delete flows There is no need in one-liners wrappers to call internal functions. Let's remove them. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 25 ++++++---------------- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 4 ++-- 2 files changed, 9 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index b04d5de91d87..251178e6e82b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -271,21 +271,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return 0; } -static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry) -{ - return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs, - sa_entry->ipsec_obj_id, - &sa_entry->ipsec_rule); -} - -static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry) -{ - mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, - &sa_entry->ipsec_rule); -} - static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; @@ -334,7 +319,9 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) } sa_entry->ipsec_obj_id = sa_handle; - err = mlx5e_xfrm_fs_add_rule(priv, sa_entry); + err = mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs, + sa_entry->ipsec_obj_id, + &sa_entry->ipsec_rule); if (err) goto err_hw_ctx; @@ -351,7 +338,8 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) goto out; err_add_rule: - mlx5e_xfrm_fs_del_rule(priv, sa_entry); + mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, + &sa_entry->ipsec_rule); err_hw_ctx: mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context); err_xfrm: @@ -378,7 +366,8 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) if (sa_entry->hw_context) { flush_workqueue(sa_entry->ipsec->wq); - mlx5e_xfrm_fs_del_rule(priv, sa_entry); + mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, + &sa_entry->ipsec_rule); mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context); mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index f733a6e61196..bffad18a59d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -612,8 +612,8 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, } void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5_accel_esp_xfrm_attrs *attrs, + struct mlx5e_ipsec_rule *ipsec_rule) { if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT) rx_del_rule(priv, attrs, ipsec_rule); -- cgit v1.2.3 From c674df973ad8af2074c834788e167332d81309fa Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 8 Mar 2022 20:36:15 +0200 Subject: net/mlx5: Store IPsec ESN update work in XFRM state mlx5 IPsec code updated ESN through workqueue with allocation calls in the data path, which can be saved easily if the work is created during XFRM state initialization routine. The locking used later in the work didn't protect from anything because change of HW context is possible during XFRM state add or delete only, which can cancel work and make sure that it is not running. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 49 +++++++--------------- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 7 +++- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 39 +++-------------- include/linux/mlx5/accel.h | 10 +++-- 4 files changed, 33 insertions(+), 72 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 251178e6e82b..8283cf273a63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -271,6 +271,16 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return 0; } +static void _update_xfrm_state(struct work_struct *work) +{ + struct mlx5e_ipsec_modify_state_work *modify_work = + container_of(work, struct mlx5e_ipsec_modify_state_work, work); + struct mlx5e_ipsec_sa_entry *sa_entry = container_of( + modify_work, struct mlx5e_ipsec_sa_entry, modify_work); + + mlx5_accel_esp_modify_xfrm(sa_entry->xfrm, &modify_work->attrs); +} + static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; @@ -334,6 +344,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; } + INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state); x->xso.offload_handle = (unsigned long)sa_entry; goto out; @@ -365,7 +376,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) struct mlx5e_priv *priv = netdev_priv(x->xso.dev); if (sa_entry->hw_context) { - flush_workqueue(sa_entry->ipsec->wq); + cancel_work_sync(&sa_entry->modify_work.work); mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, &sa_entry->ipsec_rule); mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context); @@ -392,7 +403,6 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) hash_init(ipsec->sadb_rx); spin_lock_init(&ipsec->sadb_rx_lock); ipsec->mdev = priv->mdev; - ipsec->en_priv = priv; ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, priv->netdev->name); if (!ipsec->wq) { @@ -424,7 +434,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) mlx5e_accel_ipsec_fs_cleanup(ipsec); destroy_workqueue(ipsec->wq); - kfree(ipsec); priv->ipsec = NULL; } @@ -444,47 +453,19 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) return true; } -struct mlx5e_ipsec_modify_state_work { - struct work_struct work; - struct mlx5_accel_esp_xfrm_attrs attrs; - struct mlx5e_ipsec_sa_entry *sa_entry; -}; - -static void _update_xfrm_state(struct work_struct *work) -{ - int ret; - struct mlx5e_ipsec_modify_state_work *modify_work = - container_of(work, struct mlx5e_ipsec_modify_state_work, work); - struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry; - - ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm, - &modify_work->attrs); - if (ret) - netdev_warn(sa_entry->ipsec->en_priv->netdev, - "Not an IPSec offload device\n"); - - kfree(modify_work); -} - static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - struct mlx5e_ipsec_modify_state_work *modify_work; + struct mlx5e_ipsec_modify_state_work *modify_work = + &sa_entry->modify_work; bool need_update; need_update = mlx5e_ipsec_update_esn_state(sa_entry); if (!need_update) return; - modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC); - if (!modify_work) - return; - mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs); - modify_work->sa_entry = sa_entry; - - INIT_WORK(&modify_work->work, _update_xfrm_state); - WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work)); + queue_work(sa_entry->ipsec->wq, &modify_work->work); } static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index bbf48d4616f9..35a751faeb33 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -62,7 +62,6 @@ struct mlx5e_ipsec_tx; struct mlx5e_ipsec { struct mlx5_core_dev *mdev; - struct mlx5e_priv *en_priv; DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); spinlock_t sadb_rx_lock; /* Protects sadb_rx */ struct mlx5e_ipsec_sw_stats sw_stats; @@ -82,6 +81,11 @@ struct mlx5e_ipsec_rule { struct mlx5_modify_hdr *set_modify_hdr; }; +struct mlx5e_ipsec_modify_state_work { + struct work_struct work; + struct mlx5_accel_esp_xfrm_attrs attrs; +}; + struct mlx5e_ipsec_sa_entry { struct hlist_node hlist; /* Item in SADB_RX hashtable */ struct mlx5e_ipsec_esn_state esn_state; @@ -94,6 +98,7 @@ struct mlx5e_ipsec_sa_entry { struct xfrm_offload *xo); u32 ipsec_obj_id; struct mlx5e_ipsec_rule ipsec_rule; + struct mlx5e_ipsec_modify_state_work modify_work; }; int mlx5e_ipsec_init(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 37c9880719cf..bbfb6643ed80 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -18,7 +18,6 @@ struct mlx5_ipsec_sa_ctx { struct mlx5_ipsec_esp_xfrm { /* reference counter of SA ctx */ struct mlx5_ipsec_sa_ctx *sa_ctx; - struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */ struct mlx5_accel_esp_xfrm accel_xfrm; }; @@ -117,7 +116,6 @@ mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, if (!mxfrm) return ERR_PTR(-ENOMEM); - mutex_init(&mxfrm->lock); memcpy(&mxfrm->accel_xfrm.attrs, attrs, sizeof(mxfrm->accel_xfrm.attrs)); @@ -129,8 +127,6 @@ static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); - /* assuming no sa_ctx are connected to this xfrm_ctx */ - WARN_ON(mxfrm->sa_ctx); kfree(mxfrm); } @@ -232,7 +228,6 @@ static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev, sa_ctx->dev = mdev; mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); - mutex_lock(&mxfrm->lock); sa_ctx->mxfrm = mxfrm; /* key */ @@ -258,14 +253,12 @@ static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev, *hw_handle = sa_ctx->ipsec_obj_id; mxfrm->sa_ctx = sa_ctx; - mutex_unlock(&mxfrm->lock); return sa_ctx; err_enc_key: mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id); err_sa_ctx: - mutex_unlock(&mxfrm->lock); kfree(sa_ctx); return ERR_PTR(err); } @@ -273,14 +266,10 @@ err_sa_ctx: static void mlx5_ipsec_offload_delete_sa_ctx(void *context) { struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context; - struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm; - mutex_lock(&mxfrm->lock); mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id); mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id); kfree(sa_ctx); - mxfrm->sa_ctx = NULL; - mutex_unlock(&mxfrm->lock); } static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, @@ -331,29 +320,17 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) +static void mlx5_ipsec_offload_esp_modify_xfrm( + struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; struct mlx5_core_dev *mdev = xfrm->mdev; struct mlx5_ipsec_esp_xfrm *mxfrm; - int err = 0; - if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) - return 0; - - if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs)) - return -EOPNOTSUPP; - mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); - mutex_lock(&mxfrm->lock); - - if (!mxfrm->sa_ctx) - /* Not bound xfrm, change only sw attrs */ - goto change_sw_xfrm_attrs; - /* need to add find and replace in ipsec_rhash_sa the sa_ctx */ /* modify device with new hw_sa */ ipsec_attrs.accel_flags = attrs->flags; @@ -362,12 +339,8 @@ static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, &ipsec_attrs, mxfrm->sa_ctx->ipsec_obj_id); -change_sw_xfrm_attrs: if (!err) memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); - - mutex_unlock(&mxfrm->lock); - return err; } void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, @@ -413,8 +386,8 @@ void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) mlx5_ipsec_offload_esp_destroy_xfrm(xfrm); } -int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) +void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { - return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs); + mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs); } diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 0f2596297f6a..a2720ebbb9fd 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -127,8 +127,8 @@ struct mlx5_accel_esp_xfrm * mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs); void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); -int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs); +void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); #else @@ -145,9 +145,11 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, } static inline void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} -static inline int +static inline void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ +} #endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* __MLX5_ACCEL_H__ */ -- cgit v1.2.3 From 2ea36e2e4ad2b77bd5d45142a529b72eb5ca9156 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 8 Mar 2022 20:55:00 +0200 Subject: net/mlx5: Remove useless validity check All callers build xfrm attributes with help of mlx5e_ipsec_build_accel_xfrm_attrs() function that ensure validity of attributes. There is no need to recheck them again. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 44 ---------------------- include/linux/mlx5/accel.h | 10 ----- 2 files changed, 54 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index bbfb6643ed80..9d2932cf12f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -62,55 +62,11 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) } EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); -static int -mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { - mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n", - attrs->replay_type); - return -EOPNOTSUPP; - } - - if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { - mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n", - attrs->keymat_type); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.iv_algo != - MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { - mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n", - attrs->keymat.aes_gcm.iv_algo); - return -EOPNOTSUPP; - } - - if (attrs->keymat.aes_gcm.key_len != 128 && - attrs->keymat.aes_gcm.key_len != 256) { - mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n", - attrs->keymat.aes_gcm.key_len); - return -EOPNOTSUPP; - } - - if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && - !MLX5_CAP_IPSEC(mdev, ipsec_esn)) { - mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n"); - return -EOPNOTSUPP; - } - - return 0; -} - static struct mlx5_accel_esp_xfrm * mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_ipsec_esp_xfrm *mxfrm; - int err = 0; - - err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs); - if (err) - return ERR_PTR(err); mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL); if (!mxfrm) diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index a2720ebbb9fd..9c511d466e55 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -36,10 +36,6 @@ #include -enum mlx5_accel_esp_aes_gcm_keymat_iv_algo { - MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ, -}; - enum mlx5_accel_esp_flags { MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */ MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0, @@ -57,14 +53,9 @@ enum mlx5_accel_esp_keymats { MLX5_ACCEL_ESP_KEYMAT_AES_GCM, }; -enum mlx5_accel_esp_replay { - MLX5_ACCEL_ESP_REPLAY_NONE, - MLX5_ACCEL_ESP_REPLAY_BMP, -}; struct aes_gcm_keymat { u64 seq_iv; - enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo; u32 salt; u32 icv_len; @@ -81,7 +72,6 @@ struct mlx5_accel_esp_xfrm_attrs { u32 tfc_pad; u32 flags; u32 sa_handle; - enum mlx5_accel_esp_replay replay_type; union { struct { u32 size; -- cgit v1.2.3 From c6e3b421c7079af67201351c9faff62613e06f40 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 9 Mar 2022 10:35:05 +0200 Subject: net/mlx5: Merge various control path IPsec headers into one file The mlx5 IPsec code has logical separation between code that operates with XFRM objects (ipsec.c), HW objects (ipsec_offload.c), flow steering logic (ipsec_fs.c) and data path (ipsec_rxtx.c). Such separation makes sense for C-files, but isn't needed at all for H-files as they are included in batch anyway. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/params.c | 2 +- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 5 +- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 101 +++++++++++++- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 17 +-- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 3 +- .../mellanox/mlx5/core/en_accel/ipsec_offload.h | 14 -- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 5 +- .../mellanox/mlx5/core/en_accel/ipsec_stats.c | 4 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- include/linux/mlx5/accel.h | 145 --------------------- 12 files changed, 117 insertions(+), 184 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h delete mode 100644 include/linux/mlx5/accel.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 1e8700957280..3c1edfa33aa7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -5,7 +5,7 @@ #include "en/txrx.h" #include "en/port.h" #include "en_accel/en_accel.h" -#include "en_accel/ipsec_offload.h" +#include "en_accel/ipsec.h" static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 8283cf273a63..0daf9350471f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -37,9 +37,8 @@ #include #include "en.h" -#include "en_accel/ipsec.h" -#include "en_accel/ipsec_rxtx.h" -#include "en_accel/ipsec_fs.h" +#include "ipsec.h" +#include "ipsec_rxtx.h" static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 35a751faeb33..b438b0358c36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -40,11 +40,81 @@ #include #include -#include "ipsec_offload.h" - #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L +enum mlx5_accel_esp_flags { + MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */ + MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0, + MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1, + MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2, +}; + +enum mlx5_accel_esp_action { + MLX5_ACCEL_ESP_ACTION_DECRYPT, + MLX5_ACCEL_ESP_ACTION_ENCRYPT, +}; + +enum mlx5_accel_esp_keymats { + MLX5_ACCEL_ESP_KEYMAT_AES_NONE, + MLX5_ACCEL_ESP_KEYMAT_AES_GCM, +}; + +struct aes_gcm_keymat { + u64 seq_iv; + + u32 salt; + u32 icv_len; + + u32 key_len; + u32 aes_key[256 / 32]; +}; + +struct mlx5_accel_esp_xfrm_attrs { + enum mlx5_accel_esp_action action; + u32 esn; + __be32 spi; + u32 seq; + u32 tfc_pad; + u32 flags; + u32 sa_handle; + union { + struct { + u32 size; + + } bmp; + } replay; + enum mlx5_accel_esp_keymats keymat_type; + union { + struct aes_gcm_keymat aes_gcm; + } keymat; + + union { + __be32 a4; + __be32 a6[4]; + } saddr; + + union { + __be32 a4; + __be32 a6[4]; + } daddr; + + u8 is_ipv6; +}; + +struct mlx5_accel_esp_xfrm { + struct mlx5_core_dev *mdev; + struct mlx5_accel_esp_xfrm_attrs attrs; +}; + +enum mlx5_accel_ipsec_cap { + MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, + MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 1, + MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 2, + MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 3, + MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 4, +}; + struct mlx5e_priv; struct mlx5e_ipsec_sw_stats { @@ -108,6 +178,29 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev, unsigned int handle); +void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); +int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, + struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 ipsec_obj_id, + struct mlx5e_ipsec_rule *ipsec_rule); +void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, + struct mlx5_accel_esp_xfrm_attrs *attrs, + struct mlx5e_ipsec_rule *ipsec_rule); + +void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + u32 *sa_handle); +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); + +u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev); + +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs); +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); +void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); #else static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv) { @@ -122,6 +215,10 @@ static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) { } +static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) +{ + return 0; +} #endif #endif /* __MLX5E_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index bffad18a59d6..96ab2e9d6f9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -2,8 +2,9 @@ /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ #include -#include "ipsec_offload.h" -#include "ipsec_fs.h" +#include "en.h" +#include "en/fs.h" +#include "ipsec.h" #include "fs_core.h" #define NUM_IPSEC_FTE BIT(15) @@ -565,7 +566,7 @@ static int tx_add_rule(struct mlx5e_priv *priv, if (IS_ERR(rule)) { err = PTR_ERR(rule); netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", - attrs->action, err); + attrs->action, err); goto out; } @@ -579,8 +580,8 @@ out: } static void rx_del_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5_accel_esp_xfrm_attrs *attrs, + struct mlx5e_ipsec_rule *ipsec_rule) { mlx5_del_flow_rules(ipsec_rule->rule); ipsec_rule->rule = NULL; @@ -592,7 +593,7 @@ static void rx_del_rule(struct mlx5e_priv *priv, } static void tx_del_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5e_ipsec_rule *ipsec_rule) { mlx5_del_flow_rules(ipsec_rule->rule); ipsec_rule->rule = NULL; @@ -612,8 +613,8 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, } void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5_accel_esp_xfrm_attrs *attrs, + struct mlx5e_ipsec_rule *ipsec_rule) { if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT) rx_del_rule(priv, attrs, ipsec_rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 9d2932cf12f1..6c03ce8aba92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -2,9 +2,8 @@ /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ #include "mlx5_core.h" -#include "ipsec_offload.h" +#include "ipsec.h" #include "lib/mlx5.h" -#include "en_accel/ipsec_fs.h" struct mlx5_ipsec_sa_ctx { struct rhash_head hash; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h deleted file mode 100644 index 7dac104e6ef1..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ - -#ifndef __MLX5_IPSEC_OFFLOAD_H__ -#define __MLX5_IPSEC_OFFLOAD_H__ - -#include -#include - -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle); -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); -#endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 9b65c765cbd9..d30922e1b60f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -34,9 +34,8 @@ #include #include #include -#include "ipsec_offload.h" -#include "en_accel/ipsec_rxtx.h" -#include "en_accel/ipsec.h" +#include "ipsec.h" +#include "ipsec_rxtx.h" #include "en.h" enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 3aace1c2a763..9de84821dafb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -35,9 +35,7 @@ #include #include "en.h" -#include "ipsec_offload.h" -#include "fpga/sdk.h" -#include "en_accel/ipsec.h" +#include "ipsec.h" static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 12b72a0bcb1a..d27986869b8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -48,7 +48,6 @@ #include "en_accel/ipsec.h" #include "en_accel/en_accel.h" #include "en_accel/ktls.h" -#include "en_accel/ipsec_offload.h" #include "lib/vxlan.h" #include "lib/clock.h" #include "en/port.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 2dea9e4649a6..fb11081001a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -48,7 +48,7 @@ #include "en_rep.h" #include "en/rep/tc.h" #include "ipoib/ipoib.h" -#include "en_accel/ipsec_offload.h" +#include "en_accel/ipsec.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/ktls_txrx.h" #include "en/xdp.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 95d7712c2d9a..35e48ef04845 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -62,7 +62,7 @@ #include "lib/mlx5.h" #include "lib/tout.h" #include "fpga/core.h" -#include "en_accel/ipsec_offload.h" +#include "en_accel/ipsec.h" #include "lib/clock.h" #include "lib/vxlan.h" #include "lib/geneve.h" diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h deleted file mode 100644 index 9c511d466e55..000000000000 --- a/include/linux/mlx5/accel.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright (c) 2018 Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#ifndef __MLX5_ACCEL_H__ -#define __MLX5_ACCEL_H__ - -#include - -enum mlx5_accel_esp_flags { - MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */ - MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0, - MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1, - MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2, -}; - -enum mlx5_accel_esp_action { - MLX5_ACCEL_ESP_ACTION_DECRYPT, - MLX5_ACCEL_ESP_ACTION_ENCRYPT, -}; - -enum mlx5_accel_esp_keymats { - MLX5_ACCEL_ESP_KEYMAT_AES_NONE, - MLX5_ACCEL_ESP_KEYMAT_AES_GCM, -}; - - -struct aes_gcm_keymat { - u64 seq_iv; - - u32 salt; - u32 icv_len; - - u32 key_len; - u32 aes_key[256 / 32]; -}; - -struct mlx5_accel_esp_xfrm_attrs { - enum mlx5_accel_esp_action action; - u32 esn; - __be32 spi; - u32 seq; - u32 tfc_pad; - u32 flags; - u32 sa_handle; - union { - struct { - u32 size; - - } bmp; - } replay; - enum mlx5_accel_esp_keymats keymat_type; - union { - struct aes_gcm_keymat aes_gcm; - } keymat; - - union { - __be32 a4; - __be32 a6[4]; - } saddr; - - union { - __be32 a4; - __be32 a6[4]; - } daddr; - - u8 is_ipv6; -}; - -struct mlx5_accel_esp_xfrm { - struct mlx5_core_dev *mdev; - struct mlx5_accel_esp_xfrm_attrs attrs; -}; - -enum mlx5_accel_ipsec_cap { - MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, - MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 1, - MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 2, - MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 3, - MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 4, -}; - -#ifdef CONFIG_MLX5_EN_IPSEC - -u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev); - -struct mlx5_accel_esp_xfrm * -mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs); -void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); -void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs); - -#else - -static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - return 0; -} - -static inline struct mlx5_accel_esp_xfrm * -mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - return ERR_PTR(-EOPNOTSUPP); -} -static inline void -mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {} -static inline void -mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ -} - -#endif /* CONFIG_MLX5_EN_IPSEC */ -#endif /* __MLX5_ACCEL_H__ */ -- cgit v1.2.3 From a534e24d720f02395367e65af1285dc1ee3cf406 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 9 Mar 2022 11:11:31 +0200 Subject: net/mlx5: Remove indirections from esp functions This change cleanups the mlx5 esp interface. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 47 ++++++---------------- 1 file changed, 12 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 6c03ce8aba92..a7bd31d10bd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -61,9 +61,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) } EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); -static struct mlx5_accel_esp_xfrm * -mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) +struct mlx5_accel_esp_xfrm * +mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_ipsec_esp_xfrm *mxfrm; @@ -74,10 +74,11 @@ mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, memcpy(&mxfrm->accel_xfrm.attrs, attrs, sizeof(mxfrm->accel_xfrm.attrs)); + mxfrm->accel_xfrm.mdev = mdev; return &mxfrm->accel_xfrm; } -static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) { struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); @@ -275,14 +276,13 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -static void mlx5_ipsec_offload_esp_modify_xfrm( - struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) +void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; struct mlx5_core_dev *mdev = xfrm->mdev; struct mlx5_ipsec_esp_xfrm *mxfrm; - int err = 0; + int err; mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); @@ -294,8 +294,10 @@ static void mlx5_ipsec_offload_esp_modify_xfrm( &ipsec_attrs, mxfrm->sa_ctx->ipsec_obj_id); - if (!err) - memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); + if (err) + return; + + memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); } void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, @@ -321,28 +323,3 @@ void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) { mlx5_ipsec_offload_delete_sa_ctx(context); } - -struct mlx5_accel_esp_xfrm * -mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - struct mlx5_accel_esp_xfrm *xfrm; - - xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs); - if (IS_ERR(xfrm)) - return xfrm; - - xfrm->mdev = mdev; - return xfrm; -} - -void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) -{ - mlx5_ipsec_offload_esp_destroy_xfrm(xfrm); -} - -void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs); -} -- cgit v1.2.3 From b73e67287b80519bef0217637028d1a49456baac Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 9 Mar 2022 22:14:26 +0200 Subject: net/mlx5: Simplify HW context interfaces by using SA entry SA context logic used multiple structures to store same data over and over. By simplifying the SA context interfaces, we can remove extra structs. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 50 ++---- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 27 ++- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 182 +++++---------------- 3 files changed, 62 insertions(+), 197 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 0daf9350471f..537311a74bfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -63,9 +63,9 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec, return ret; } -static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry, - unsigned int handle) +static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry) { + unsigned int handle = sa_entry->ipsec_obj_id; struct mlx5e_ipsec *ipsec = sa_entry->ipsec; struct mlx5e_ipsec_sa_entry *_sa_entry; unsigned long flags; @@ -277,16 +277,14 @@ static void _update_xfrm_state(struct work_struct *work) struct mlx5e_ipsec_sa_entry *sa_entry = container_of( modify_work, struct mlx5e_ipsec_sa_entry, modify_work); - mlx5_accel_esp_modify_xfrm(sa_entry->xfrm, &modify_work->attrs); + mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs); } static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; struct net_device *netdev = x->xso.real_dev; - struct mlx5_accel_esp_xfrm_attrs attrs; struct mlx5e_priv *priv; - unsigned int sa_handle; int err; priv = netdev_priv(netdev); @@ -309,33 +307,20 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) /* check esn */ mlx5e_ipsec_update_esn_state(sa_entry); - /* create xfrm */ - mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); - sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs); - if (IS_ERR(sa_entry->xfrm)) { - err = PTR_ERR(sa_entry->xfrm); - goto err_sa_entry; - } - + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs); /* create hw context */ - sa_entry->hw_context = - mlx5_accel_esp_create_hw_context(priv->mdev, - sa_entry->xfrm, - &sa_handle); - if (IS_ERR(sa_entry->hw_context)) { - err = PTR_ERR(sa_entry->hw_context); + err = mlx5_ipsec_create_sa_ctx(sa_entry); + if (err) goto err_xfrm; - } - sa_entry->ipsec_obj_id = sa_handle; - err = mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs, + err = mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->attrs, sa_entry->ipsec_obj_id, &sa_entry->ipsec_rule); if (err) goto err_hw_ctx; if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { - err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle); + err = mlx5e_ipsec_sadb_rx_add(sa_entry); if (err) goto err_add_rule; } else { @@ -348,15 +333,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) goto out; err_add_rule: - mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, + mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->attrs, &sa_entry->ipsec_rule); err_hw_ctx: - mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context); + mlx5_ipsec_free_sa_ctx(sa_entry); err_xfrm: - mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); -err_sa_entry: kfree(sa_entry); - out: return err; } @@ -374,14 +356,10 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_priv *priv = netdev_priv(x->xso.dev); - if (sa_entry->hw_context) { - cancel_work_sync(&sa_entry->modify_work.work); - mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, - &sa_entry->ipsec_rule); - mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context); - mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); - } - + cancel_work_sync(&sa_entry->modify_work.work); + mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->attrs, + &sa_entry->ipsec_rule); + mlx5_ipsec_free_sa_ctx(sa_entry); kfree(sa_entry); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index b438b0358c36..cdcb95f90623 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -102,11 +102,6 @@ struct mlx5_accel_esp_xfrm_attrs { u8 is_ipv6; }; -struct mlx5_accel_esp_xfrm { - struct mlx5_core_dev *mdev; - struct mlx5_accel_esp_xfrm_attrs attrs; -}; - enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 1, @@ -162,11 +157,11 @@ struct mlx5e_ipsec_sa_entry { unsigned int handle; /* Handle in SADB_RX */ struct xfrm_state *x; struct mlx5e_ipsec *ipsec; - struct mlx5_accel_esp_xfrm *xfrm; - void *hw_context; + struct mlx5_accel_esp_xfrm_attrs attrs; void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_offload *xo); u32 ipsec_obj_id; + u32 enc_key_id; struct mlx5e_ipsec_rule ipsec_rule; struct mlx5e_ipsec_modify_state_work modify_work; }; @@ -188,19 +183,19 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, struct mlx5_accel_esp_xfrm_attrs *attrs, struct mlx5e_ipsec_rule *ipsec_rule); -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle); -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); +int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); +void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev); -struct mlx5_accel_esp_xfrm * -mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs); -void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); -void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, +void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry, const struct mlx5_accel_esp_xfrm_attrs *attrs); + +static inline struct mlx5_core_dev * +mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) +{ + return sa_entry->ipsec->mdev; +} #else static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index a7bd31d10bd4..817747d5229e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -5,21 +5,6 @@ #include "ipsec.h" #include "lib/mlx5.h" -struct mlx5_ipsec_sa_ctx { - struct rhash_head hash; - u32 enc_key_id; - u32 ipsec_obj_id; - /* hw ctx */ - struct mlx5_core_dev *dev; - struct mlx5_ipsec_esp_xfrm *mxfrm; -}; - -struct mlx5_ipsec_esp_xfrm { - /* reference counter of SA ctx */ - struct mlx5_ipsec_sa_ctx *sa_ctx; - struct mlx5_accel_esp_xfrm accel_xfrm; -}; - u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) { u32 caps; @@ -61,43 +46,11 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) } EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps); -struct mlx5_accel_esp_xfrm * -mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs) -{ - struct mlx5_ipsec_esp_xfrm *mxfrm; - - mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL); - if (!mxfrm) - return ERR_PTR(-ENOMEM); - - memcpy(&mxfrm->accel_xfrm.attrs, attrs, - sizeof(mxfrm->accel_xfrm.attrs)); - - mxfrm->accel_xfrm.mdev = mdev; - return &mxfrm->accel_xfrm; -} - -void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) { - struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, - accel_xfrm); - - kfree(mxfrm); -} - -struct mlx5_ipsec_obj_attrs { - const struct aes_gcm_keymat *aes_gcm; - u32 accel_flags; - u32 esn_msb; - u32 enc_key_id; -}; - -static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev, - struct mlx5_ipsec_obj_attrs *attrs, - u32 *ipsec_id) -{ - const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm; + struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); + struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {}; void *obj, *salt_p, *salt_iv_p; @@ -128,14 +81,14 @@ static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev, salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv); memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv)); /* esn */ - if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { + if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { MLX5_SET(ipsec_obj, obj, esn_en, 1); - MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb); - if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) + MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); + if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) MLX5_SET(ipsec_obj, obj, esn_overlap, 1); } - MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id); + MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id); /* general object fields set */ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, @@ -145,13 +98,15 @@ static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev, err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (!err) - *ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + sa_entry->ipsec_obj_id = + MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); return err; } -static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id) +static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) { + struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; @@ -159,79 +114,52 @@ static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id) MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id); mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *accel_xfrm, - const __be32 saddr[4], const __be32 daddr[4], - const __be32 spi, bool is_ipv6, u32 *hw_handle) +int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry) { - struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs; - struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; - struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; - struct mlx5_ipsec_esp_xfrm *mxfrm; - struct mlx5_ipsec_sa_ctx *sa_ctx; + struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.keymat.aes_gcm; + struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); int err; - /* alloc SA context */ - sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); - if (!sa_ctx) - return ERR_PTR(-ENOMEM); - - sa_ctx->dev = mdev; - - mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); - sa_ctx->mxfrm = mxfrm; - /* key */ err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key, aes_gcm->key_len / BITS_PER_BYTE, MLX5_ACCEL_OBJ_IPSEC_KEY, - &sa_ctx->enc_key_id); + &sa_entry->enc_key_id); if (err) { mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err); - goto err_sa_ctx; + return err; } - ipsec_attrs.aes_gcm = aes_gcm; - ipsec_attrs.accel_flags = accel_xfrm->attrs.flags; - ipsec_attrs.esn_msb = accel_xfrm->attrs.esn; - ipsec_attrs.enc_key_id = sa_ctx->enc_key_id; - err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs, - &sa_ctx->ipsec_obj_id); + err = mlx5_create_ipsec_obj(sa_entry); if (err) { mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err); goto err_enc_key; } - *hw_handle = sa_ctx->ipsec_obj_id; - mxfrm->sa_ctx = sa_ctx; - - return sa_ctx; + return 0; err_enc_key: - mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id); -err_sa_ctx: - kfree(sa_ctx); - return ERR_PTR(err); + mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id); + return err; } -static void mlx5_ipsec_offload_delete_sa_ctx(void *context) +void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry) { - struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context; + struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); - mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id); - mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id); - kfree(sa_ctx); + mlx5_destroy_ipsec_obj(sa_entry); + mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id); } -static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, - struct mlx5_ipsec_obj_attrs *attrs, - u32 ipsec_id) +static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { + struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {}; u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)]; u64 modify_field_select = 0; @@ -239,7 +167,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, void *obj; int err; - if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED)) + if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED)) return 0; general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types); @@ -249,11 +177,11 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, /* general object fields set */ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC); - MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (err) { mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n", - ipsec_id, err); + sa_entry->ipsec_obj_id, err); return err; } @@ -266,8 +194,8 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, return -EOPNOTSUPP; obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object); - MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb); - if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) + MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); + if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) MLX5_SET(ipsec_obj, obj, esn_overlap, 1); /* general object fields set */ @@ -276,50 +204,14 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } -void mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, +void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry, const struct mlx5_accel_esp_xfrm_attrs *attrs) { - struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; - struct mlx5_core_dev *mdev = xfrm->mdev; - struct mlx5_ipsec_esp_xfrm *mxfrm; int err; - mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); - - /* need to add find and replace in ipsec_rhash_sa the sa_ctx */ - /* modify device with new hw_sa */ - ipsec_attrs.accel_flags = attrs->flags; - ipsec_attrs.esn_msb = attrs->esn; - err = mlx5_modify_ipsec_obj(mdev, - &ipsec_attrs, - mxfrm->sa_ctx->ipsec_obj_id); - + err = mlx5_modify_ipsec_obj(sa_entry, attrs); if (err) return; - memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); -} - -void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *xfrm, - u32 *sa_handle) -{ - __be32 saddr[4] = {}, daddr[4] = {}; - - if (!xfrm->attrs.is_ipv6) { - saddr[3] = xfrm->attrs.saddr.a4; - daddr[3] = xfrm->attrs.daddr.a4; - } else { - memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr)); - memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); - } - - return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr, - xfrm->attrs.spi, - xfrm->attrs.is_ipv6, sa_handle); -} - -void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) -{ - mlx5_ipsec_offload_delete_sa_ctx(context); + memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs)); } -- cgit v1.2.3 From 82f7bdba377578e09935bff59157f7361ec6919c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 10 Mar 2022 16:07:58 +0200 Subject: net/mlx5: Clean IPsec FS add/delete rules Reuse existing struct to pass parameters instead of open code them. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 10 ++-- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 7 +-- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 55 ++++++++++++---------- 3 files changed, 34 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 537311a74bfb..81c9831ad286 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -313,9 +313,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) if (err) goto err_xfrm; - err = mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->attrs, - sa_entry->ipsec_obj_id, - &sa_entry->ipsec_rule); + err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry); if (err) goto err_hw_ctx; @@ -333,8 +331,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) goto out; err_add_rule: - mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->attrs, - &sa_entry->ipsec_rule); + mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry); err_hw_ctx: mlx5_ipsec_free_sa_ctx(sa_entry); err_xfrm: @@ -357,8 +354,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) struct mlx5e_priv *priv = netdev_priv(x->xso.dev); cancel_work_sync(&sa_entry->modify_work.work); - mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->attrs, - &sa_entry->ipsec_rule); + mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry); mlx5_ipsec_free_sa_ctx(sa_entry); kfree(sa_entry); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index cdcb95f90623..af1467cbb7c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -176,12 +176,9 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev, void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 ipsec_obj_id, - struct mlx5e_ipsec_rule *ipsec_rule); + struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - struct mlx5e_ipsec_rule *ipsec_rule); + struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 96ab2e9d6f9a..342828351254 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -454,11 +454,12 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs, } static int rx_add_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 ipsec_obj_id, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5e_ipsec_sa_entry *sa_entry) { u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; + struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; + u32 ipsec_obj_id = sa_entry->ipsec_obj_id; struct mlx5_modify_hdr *modify_hdr = NULL; struct mlx5e_accel_fs_esp_prot *fs_prot; struct mlx5_flow_destination dest = {}; @@ -532,9 +533,7 @@ out: } static int tx_add_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 ipsec_obj_id, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *rule; @@ -551,7 +550,8 @@ static int tx_add_rule(struct mlx5e_priv *priv, goto out; } - setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act); + setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec, + &flow_act); /* Add IPsec indicator in metadata_reg_a */ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; @@ -566,11 +566,11 @@ static int tx_add_rule(struct mlx5e_priv *priv, if (IS_ERR(rule)) { err = PTR_ERR(rule); netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", - attrs->action, err); + sa_entry->attrs.action, err); goto out; } - ipsec_rule->rule = rule; + sa_entry->ipsec_rule.rule = rule; out: kvfree(spec); @@ -580,21 +580,25 @@ out: } static void rx_del_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5e_ipsec_sa_entry *sa_entry) { + struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; + mlx5_del_flow_rules(ipsec_rule->rule); ipsec_rule->rule = NULL; mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr); ipsec_rule->set_modify_hdr = NULL; - rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4); + rx_ft_put(priv, + sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4); } static void tx_del_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5e_ipsec_sa_entry *sa_entry) { + struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; + mlx5_del_flow_rules(ipsec_rule->rule); ipsec_rule->rule = NULL; @@ -602,24 +606,23 @@ static void tx_del_rule(struct mlx5e_priv *priv, } int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 ipsec_obj_id, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5e_ipsec_sa_entry *sa_entry) { - if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT) - return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule); - else - return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule); + if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) + return tx_add_rule(priv, sa_entry); + + return rx_add_rule(priv, sa_entry); } void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, - struct mlx5_accel_esp_xfrm_attrs *attrs, - struct mlx5e_ipsec_rule *ipsec_rule) + struct mlx5e_ipsec_sa_entry *sa_entry) { - if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT) - rx_del_rule(priv, attrs, ipsec_rule); - else - tx_del_rule(priv, ipsec_rule); + if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) { + tx_del_rule(priv, sa_entry); + return; + } + + rx_del_rule(priv, sa_entry); } void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) -- cgit v1.2.3 From b7242ffc562ccf26121c85beb58e9ad40157b6fe Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 10 Mar 2022 20:48:03 +0200 Subject: net/mlx5: Make sure that no dangling IPsec FS pointers exist The IPsec FS code was implemented with anti-pattern there failures in create functions left the system with dangling pointers that were cleaned in global routines. The less error prone approach is to make sure that failed function cleans everything internally. As part of this change, we remove the batch of one liners and rewrite get/put functions to remove ambiguity. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 229 +++++++-------------- 1 file changed, 73 insertions(+), 156 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 342828351254..9d95a0025fd6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -60,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv, struct mlx5_modify_hdr *modify_hdr; struct mlx5_flow_handle *fte; struct mlx5_flow_spec *spec; - int err = 0; + int err; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) @@ -96,101 +96,27 @@ static int rx_err_add_rule(struct mlx5e_priv *priv, goto out; } + kvfree(spec); rx_err->rule = fte; rx_err->copy_modify_hdr = modify_hdr; + return 0; out: - if (err) - mlx5_modify_header_dealloc(mdev, modify_hdr); + mlx5_modify_header_dealloc(mdev, modify_hdr); out_spec: kvfree(spec); return err; } -static void rx_err_del_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_rx_err *rx_err) -{ - if (rx_err->rule) { - mlx5_del_flow_rules(rx_err->rule); - rx_err->rule = NULL; - } - - if (rx_err->copy_modify_hdr) { - mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr); - rx_err->copy_modify_hdr = NULL; - } -} - -static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err) -{ - rx_err_del_rule(priv, rx_err); - - if (rx_err->ft) { - mlx5_destroy_flow_table(rx_err->ft); - rx_err->ft = NULL; - } -} - -static int rx_err_create_ft(struct mlx5e_priv *priv, - struct mlx5e_accel_fs_esp_prot *fs_prot, - struct mlx5e_ipsec_rx_err *rx_err) -{ - struct mlx5_flow_table_attr ft_attr = {}; - struct mlx5_flow_table *ft; - int err; - - ft_attr.max_fte = 1; - ft_attr.autogroup.max_num_groups = 1; - ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; - ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); - if (IS_ERR(ft)) { - err = PTR_ERR(ft); - netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err); - return err; - } - - rx_err->ft = ft; - err = rx_err_add_rule(priv, fs_prot, rx_err); - if (err) - goto out_err; - - return 0; - -out_err: - mlx5_destroy_flow_table(ft); - rx_err->ft = NULL; - return err; -} - -static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot) -{ - if (fs_prot->miss_rule) { - mlx5_del_flow_rules(fs_prot->miss_rule); - fs_prot->miss_rule = NULL; - } - - if (fs_prot->miss_group) { - mlx5_destroy_flow_group(fs_prot->miss_group); - fs_prot->miss_group = NULL; - } - - if (fs_prot->ft) { - mlx5_destroy_flow_table(fs_prot->ft); - fs_prot->ft = NULL; - } -} - static int rx_fs_create(struct mlx5e_priv *priv, struct mlx5e_accel_fs_esp_prot *fs_prot) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *ft = fs_prot->ft; struct mlx5_flow_group *miss_group; struct mlx5_flow_handle *miss_rule; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_spec *spec; - struct mlx5_flow_table *ft; u32 *flow_group_in; int err = 0; @@ -201,20 +127,6 @@ static int rx_fs_create(struct mlx5e_priv *priv, goto out; } - /* Create FT */ - ft_attr.max_fte = NUM_IPSEC_FTE; - ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; - ft_attr.autogroup.num_reserved_entries = 1; - ft_attr.autogroup.max_num_groups = 1; - ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); - if (IS_ERR(ft)) { - err = PTR_ERR(ft); - netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err); - goto out; - } - fs_prot->ft = ft; - /* Create miss_group */ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); @@ -229,19 +141,19 @@ static int rx_fs_create(struct mlx5e_priv *priv, /* Create miss rule */ miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1); if (IS_ERR(miss_rule)) { + mlx5_destroy_flow_group(fs_prot->miss_group); err = PTR_ERR(miss_rule); netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err); goto out; } fs_prot->miss_rule = miss_rule; - out: kvfree(flow_group_in); kvfree(spec); return err; } -static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type) { struct mlx5e_accel_fs_esp_prot *fs_prot; struct mlx5e_accel_fs_esp *accel_esp; @@ -251,17 +163,21 @@ static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type) /* The netdev unreg already happened, so all offloaded rule are already removed */ fs_prot = &accel_esp->fs_prot[type]; - rx_fs_destroy(fs_prot); - - rx_err_destroy_ft(priv, &fs_prot->rx_err); + mlx5_del_flow_rules(fs_prot->miss_rule); + mlx5_destroy_flow_group(fs_prot->miss_group); + mlx5_destroy_flow_table(fs_prot->ft); - return 0; + mlx5_del_flow_rules(fs_prot->rx_err.rule); + mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr); + mlx5_destroy_flow_table(fs_prot->rx_err.ft); } static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) { + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5e_accel_fs_esp_prot *fs_prot; struct mlx5e_accel_fs_esp *accel_esp; + struct mlx5_flow_table *ft; int err; accel_esp = priv->ipsec->rx_fs; @@ -270,14 +186,45 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) fs_prot->default_dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type)); - err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err); + ft_attr.max_fte = 1; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft)) + return PTR_ERR(ft); + + fs_prot->rx_err.ft = ft; + err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err); if (err) - return err; + goto err_add; + + /* Create FT */ + ft_attr.max_fte = NUM_IPSEC_FTE; + ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + ft_attr.autogroup.num_reserved_entries = 1; + ft_attr.autogroup.max_num_groups = 1; + ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + goto err_fs_ft; + } + fs_prot->ft = ft; err = rx_fs_create(priv, fs_prot); if (err) - rx_destroy(priv, type); + goto err_fs; + + return 0; +err_fs: + mlx5_destroy_flow_table(fs_prot->ft); +err_fs_ft: + mlx5_del_flow_rules(fs_prot->rx_err.rule); + mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr); +err_add: + mlx5_destroy_flow_table(fs_prot->rx_err.ft); return err; } @@ -291,21 +238,21 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type) accel_esp = priv->ipsec->rx_fs; fs_prot = &accel_esp->fs_prot[type]; mutex_lock(&fs_prot->prot_mutex); - if (fs_prot->refcnt++) - goto out; + if (fs_prot->refcnt) + goto skip; /* create FT */ err = rx_create(priv, type); - if (err) { - fs_prot->refcnt--; + if (err) goto out; - } /* connect */ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = fs_prot->ft; mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest); +skip: + fs_prot->refcnt++; out: mutex_unlock(&fs_prot->prot_mutex); return err; @@ -319,7 +266,8 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type) accel_esp = priv->ipsec->rx_fs; fs_prot = &accel_esp->fs_prot[type]; mutex_lock(&fs_prot->prot_mutex); - if (--fs_prot->refcnt) + fs_prot->refcnt--; + if (fs_prot->refcnt) goto out; /* disconnect */ @@ -352,32 +300,20 @@ static int tx_create(struct mlx5e_priv *priv) return 0; } -static void tx_destroy(struct mlx5e_priv *priv) -{ - struct mlx5e_ipsec *ipsec = priv->ipsec; - - if (IS_ERR_OR_NULL(ipsec->tx_fs->ft)) - return; - - mlx5_destroy_flow_table(ipsec->tx_fs->ft); - ipsec->tx_fs->ft = NULL; -} - static int tx_ft_get(struct mlx5e_priv *priv) { struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs; int err = 0; mutex_lock(&tx_fs->mutex); - if (tx_fs->refcnt++) - goto out; + if (tx_fs->refcnt) + goto skip; err = tx_create(priv); - if (err) { - tx_fs->refcnt--; + if (err) goto out; - } - +skip: + tx_fs->refcnt++; out: mutex_unlock(&tx_fs->mutex); return err; @@ -388,11 +324,11 @@ static void tx_ft_put(struct mlx5e_priv *priv) struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs; mutex_lock(&tx_fs->mutex); - if (--tx_fs->refcnt) + tx_fs->refcnt--; + if (tx_fs->refcnt) goto out; - tx_destroy(priv); - + mlx5_destroy_flow_table(tx_fs->ft); out: mutex_unlock(&tx_fs->mutex); } @@ -579,32 +515,6 @@ out: return err; } -static void rx_del_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry) -{ - struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; - - mlx5_del_flow_rules(ipsec_rule->rule); - ipsec_rule->rule = NULL; - - mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr); - ipsec_rule->set_modify_hdr = NULL; - - rx_ft_put(priv, - sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4); -} - -static void tx_del_rule(struct mlx5e_priv *priv, - struct mlx5e_ipsec_sa_entry *sa_entry) -{ - struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; - - mlx5_del_flow_rules(ipsec_rule->rule); - ipsec_rule->rule = NULL; - - tx_ft_put(priv); -} - int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, struct mlx5e_ipsec_sa_entry *sa_entry) { @@ -617,12 +527,19 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, struct mlx5e_ipsec_sa_entry *sa_entry) { + struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; + struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); + + mlx5_del_flow_rules(ipsec_rule->rule); + if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) { - tx_del_rule(priv, sa_entry); + tx_ft_put(priv); return; } - rx_del_rule(priv, sa_entry); + mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr); + rx_ft_put(priv, + sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4); } void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) -- cgit v1.2.3 From a8444b0bdd1ae9c437fe6d3ef44f1ba4721c4329 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 20 Mar 2022 15:50:16 +0200 Subject: net/mlx5: Don't advertise IPsec netdev support for non-IPsec device Device that lacks proper IPsec capabilities won't pass mlx5e_ipsec_init() later, so no need to advertise HW netdev offload support for something that isn't going to work anyway. Fixes: 8ad893e516a7 ("net/mlx5e: Remove dependency in IPsec initialization flows") Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 81c9831ad286..28729b1cc6e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -454,6 +454,9 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; struct net_device *netdev = priv->netdev; + if (!mlx5_ipsec_device_caps(mdev)) + return; + if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || !MLX5_CAP_ETH(mdev, swp)) { mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n"); -- cgit v1.2.3 From effbe2675165515e47e45aa26d70c3caedc9f6bc Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 20 Mar 2022 16:07:14 +0200 Subject: net/mlx5: Simplify IPsec capabilities logic Reduce number of hard-coded IPsec capabilities by making sure that mlx5_ipsec_device_caps() sets only supported bits. As part of this change, remove _ACCEL_ notations from the capabilities names as they represent IPsec-capable device, so it is aligned with MLX5_CAP_IPSEC() macro. And prepare the code to IPsec full offload mode. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 16 ++-------------- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 9 +++------ .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 22 +++++++++++----------- 3 files changed, 16 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 28729b1cc6e6..be7650d2cfd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -215,7 +215,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return -EINVAL; } if (x->props.flags & XFRM_STATE_ESN && - !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) { + !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_ESN)) { netdev_info(netdev, "Cannot offload ESN xfrm states\n"); return -EINVAL; } @@ -262,11 +262,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n"); return -EINVAL; } - if (x->props.family == AF_INET6 && - !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) { - netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n"); - return -EINVAL; - } return 0; } @@ -457,12 +452,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) if (!mlx5_ipsec_device_caps(mdev)) return; - if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) || - !MLX5_CAP_ETH(mdev, swp)) { - mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n"); - return; - } - mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n"); netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops; netdev->features |= NETIF_F_HW_ESP; @@ -476,8 +465,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv) netdev->features |= NETIF_F_HW_ESP_TX_CSUM; netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM; - if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) || - !MLX5_CAP_ETH(mdev, swp_lso)) { + if (!MLX5_CAP_ETH(mdev, swp_lso)) { mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n"); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index af1467cbb7c7..97c55620089d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -102,12 +102,9 @@ struct mlx5_accel_esp_xfrm_attrs { u8 is_ipv6; }; -enum mlx5_accel_ipsec_cap { - MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0, - MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 1, - MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 2, - MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 3, - MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 4, +enum mlx5_ipsec_cap { + MLX5_IPSEC_CAP_CRYPTO = 1 << 0, + MLX5_IPSEC_CAP_ESN = 1 << 1, }; struct mlx5e_priv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 817747d5229e..b44bce3f4ef1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -7,7 +7,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) { - u32 caps; + u32 caps = 0; if (!MLX5_CAP_GEN(mdev, ipsec_offload)) return 0; @@ -19,23 +19,23 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) return 0; - if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) || - !MLX5_CAP_ETH(mdev, insert_trailer)) - return 0; - if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) || !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt)) return 0; - caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | - MLX5_ACCEL_IPSEC_CAP_LSO; + if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) || + !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) + return 0; - if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && - MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) - caps |= MLX5_ACCEL_IPSEC_CAP_ESP; + if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) && + MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp)) + caps |= MLX5_IPSEC_CAP_CRYPTO; + + if (!caps) + return 0; if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) - caps |= MLX5_ACCEL_IPSEC_CAP_ESN; + caps |= MLX5_IPSEC_CAP_ESN; /* We can accommodate up to 2^24 different IPsec objects * because we use up to 24 bit in flow table metadata -- cgit v1.2.3 From 1c4a59b9fa98c222bd6a8fa82bff942312165c1a Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 27 Mar 2022 19:23:19 +0300 Subject: net/mlx5: Remove not-supported ICV length mlx5 doesn't allow to configure any AEAD ICV length other than 128, so remove the logic that configures other unsupported values. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 17 +---------------- include/linux/mlx5/mlx5_ifc.h | 2 -- 2 files changed, 1 insertion(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index b44bce3f4ef1..91ec8b8bf1ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -62,22 +62,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt); memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt)); - switch (aes_gcm->icv_len) { - case 64: - MLX5_SET(ipsec_obj, obj, icv_length, - MLX5_IPSEC_OBJECT_ICV_LEN_8B); - break; - case 96: - MLX5_SET(ipsec_obj, obj, icv_length, - MLX5_IPSEC_OBJECT_ICV_LEN_12B); - break; - case 128: - MLX5_SET(ipsec_obj, obj, icv_length, - MLX5_IPSEC_OBJECT_ICV_LEN_16B); - break; - default: - return -EINVAL; - } + MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B); salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv); memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv)); /* esn */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 7f4ec9faa180..7bab3e51c61e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -11379,8 +11379,6 @@ enum { enum { MLX5_IPSEC_OBJECT_ICV_LEN_16B, - MLX5_IPSEC_OBJECT_ICV_LEN_12B, - MLX5_IPSEC_OBJECT_ICV_LEN_8B, }; struct mlx5_ifc_ipsec_obj_bits { -- cgit v1.2.3 From 6cd2126ac602305f52e43666400b135b20b46b07 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 28 Mar 2022 12:06:21 +0300 Subject: net/mlx5: Cleanup XFRM attributes struct Remove everything that is not used or from mlx5_accel_esp_xfrm_attrs, together with change type of spi to store proper type from the beginning. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 10 ++-------- .../ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 21 ++------------------- .../ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c | 4 ++-- .../mellanox/mlx5/core/en_accel/ipsec_offload.c | 4 ++-- 4 files changed, 8 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index be7650d2cfd3..35e2bb301c26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -137,7 +137,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs) { struct xfrm_state *x = sa_entry->x; - struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm; + struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; struct aead_geniv_ctx *geniv_ctx; struct crypto_aead *aead; unsigned int crypto_data_len, key_len; @@ -171,12 +171,6 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP; } - /* rx handle */ - attrs->sa_handle = sa_entry->handle; - - /* algo type */ - attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM; - /* action */ attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ? MLX5_ACCEL_ESP_ACTION_ENCRYPT : @@ -187,7 +181,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, MLX5_ACCEL_ESP_FLAGS_TUNNEL; /* spi */ - attrs->spi = x->id.spi; + attrs->spi = be32_to_cpu(x->id.spi); /* source , destination ips */ memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 97c55620089d..16bcceec16c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -55,11 +55,6 @@ enum mlx5_accel_esp_action { MLX5_ACCEL_ESP_ACTION_ENCRYPT, }; -enum mlx5_accel_esp_keymats { - MLX5_ACCEL_ESP_KEYMAT_AES_NONE, - MLX5_ACCEL_ESP_KEYMAT_AES_GCM, -}; - struct aes_gcm_keymat { u64 seq_iv; @@ -73,21 +68,9 @@ struct aes_gcm_keymat { struct mlx5_accel_esp_xfrm_attrs { enum mlx5_accel_esp_action action; u32 esn; - __be32 spi; - u32 seq; - u32 tfc_pad; + u32 spi; u32 flags; - u32 sa_handle; - union { - struct { - u32 size; - - } bmp; - } replay; - enum mlx5_accel_esp_keymats keymat_type; - union { - struct aes_gcm_keymat aes_gcm; - } keymat; + struct aes_gcm_keymat aes_gcm; union { __be32 a4; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 9d95a0025fd6..8315e8f603d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -356,8 +356,8 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs, /* SPI number */ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, - be32_to_cpu(attrs->spi)); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters.outer_esp_spi, attrs->spi); if (ip_version == 4) { memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 91ec8b8bf1ec..b13e152fe9fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -50,7 +50,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); - struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm; + struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {}; void *obj, *salt_p, *salt_iv_p; @@ -106,7 +106,7 @@ static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry) int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry) { - struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.keymat.aes_gcm; + struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm; struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry); int err; -- cgit v1.2.3 From bd24d1ffb445fc74679c3d9725c8b891f0739231 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 6 Apr 2022 14:53:13 +0300 Subject: net/mlx5: Don't perform lookup after already known sec_path There is no need to perform extra lookup in order to get already known sec_path that was set a couple of lines above. Simply reuse it. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index d30922e1b60f..6859f1c1a831 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -332,7 +332,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, return; } - sp = skb_sec_path(skb); sp->xvec[sp->len++] = xs; sp->olen++; -- cgit v1.2.3 From 656d33890732978919f79bdbc96921dfca6f28bb Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 5 Apr 2022 19:33:39 +0300 Subject: net/mlx5: Allow future addition of IPsec object modifiers Currently, all released FW versions support only two IPsec object modifiers, and modify_field_select get and set same value with proper bits. However, it is not future compatible, as new FW can have more modifiers and "default" will cause to overwrite not-changed fields. Fix it by setting explicitly fields that need to be overwritten. Fixes: 7ed92f97a1ad ("net/mlx5e: IPsec: Add Connect-X IPsec ESN update offload support") Signed-off-by: Huy Nguyen Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index b13e152fe9fc..792724ce7336 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -179,6 +179,9 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry, return -EOPNOTSUPP; obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object); + MLX5_SET64(ipsec_obj, obj, modify_field_select, + MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP | + MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB); MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn); if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) MLX5_SET(ipsec_obj, obj, esn_overlap, 1); -- cgit v1.2.3 From 0c38a5bd60ebce1172e27cfaf37e7b855f5392a3 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Wed, 4 May 2022 08:49:53 +0100 Subject: sfc: Disable Siena support Disable the build of Siena code until later in this patch series. Prevent sfc.ko from binding to Siena NICs. efx_init_sriov/efx_fini_sriov is only used for Siena. Remove calls to those. Signed-off-by: Martin Habets Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/Kconfig | 8 ++++---- drivers/net/ethernet/sfc/Makefile | 4 ++-- drivers/net/ethernet/sfc/efx.c | 17 ----------------- drivers/net/ethernet/sfc/nic.h | 4 ---- 4 files changed, 6 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 97ce64079855..846fff16fa48 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -17,14 +17,14 @@ config NET_VENDOR_SOLARFLARE if NET_VENDOR_SOLARFLARE config SFC - tristate "Solarflare SFC9000/SFC9100/EF100-family support" + tristate "Solarflare SFC9100/EF100-family support" depends on PCI depends on PTP_1588_CLOCK_OPTIONAL select MDIO select CRC32 help This driver supports 10/40-gigabit Ethernet cards based on - the Solarflare SFC9000-family and SFC9100-family controllers. + the Solarflare SFC9100-family controllers. It also supports 10/25/40/100-gigabit Ethernet cards based on the Solarflare EF100 networking IP in Xilinx FPGAs. @@ -47,11 +47,11 @@ config SFC_MCDI_MON This exposes the on-board firmware-managed sensors as a hardware monitor device. config SFC_SRIOV - bool "Solarflare SFC9000-family SR-IOV support" + bool "Solarflare SFC9000/SFC9100-family SR-IOV support" depends on SFC && PCI_IOV default y help - This enables support for the SFC9000 I/O Virtualization + This enables support for the Single Root I/O Virtualization features, allowing accelerated network performance in virtualized environments. config SFC_MCDI_LOGGING diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 5ba98769b52b..9b3374cf7937 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 sfc-y += efx.o efx_common.o efx_channels.o nic.o \ - farch.o siena.o ef10.o \ + ef10.o \ tx.o tx_common.o tx_tso.o rx.o rx_common.o \ selftest.o ethtool.o ethtool_common.o ptp.o \ mcdi.o mcdi_port.o mcdi_port_common.o \ @@ -8,7 +8,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ ef100.o ef100_nic.o ef100_netdev.o \ ef100_ethtool.o ef100_rx.o ef100_tx.o sfc-$(CONFIG_SFC_MTD) += mtd.o -sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o ef100_sriov.o +sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o obj-$(CONFIG_SFC) += sfc.o diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 302dc835ac3d..5e7fe75cb1d4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -795,10 +795,6 @@ static void efx_unregister_netdev(struct efx_nic *efx) /* PCI device ID table */ static const struct pci_device_id efx_pci_table[] = { - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ - .driver_data = (unsigned long) &siena_a0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ - .driver_data = (unsigned long) &siena_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ @@ -1294,12 +1290,6 @@ static int __init efx_init_module(void) if (rc) goto err_notifier; -#ifdef CONFIG_SFC_SRIOV - rc = efx_init_sriov(); - if (rc) - goto err_sriov; -#endif - rc = efx_create_reset_workqueue(); if (rc) goto err_reset; @@ -1319,10 +1309,6 @@ static int __init efx_init_module(void) err_pci: efx_destroy_reset_workqueue(); err_reset: -#ifdef CONFIG_SFC_SRIOV - efx_fini_sriov(); - err_sriov: -#endif unregister_netdevice_notifier(&efx_netdev_notifier); err_notifier: return rc; @@ -1335,9 +1321,6 @@ static void __exit efx_exit_module(void) pci_unregister_driver(&ef100_pci_driver); pci_unregister_driver(&efx_pci_driver); efx_destroy_reset_workqueue(); -#ifdef CONFIG_SFC_SRIOV - efx_fini_sriov(); -#endif unregister_netdevice_notifier(&efx_netdev_notifier); } diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 5c2fe3ce3f4d..251868235ae4 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -301,10 +301,6 @@ struct efx_ef10_nic_data { int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, bool *data_mapped); -int efx_init_sriov(void); -void efx_fini_sriov(void); - -extern const struct efx_nic_type siena_a0_nic_type; extern const struct efx_nic_type efx_hunt_a0_nic_type; extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; -- cgit v1.2.3 From 6b73f20ab6c401a1a7860f02734ab11bf748e69b Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Wed, 4 May 2022 08:50:44 +0100 Subject: sfc: Copy a subset of mcdi_pcol.h to siena For Siena we do not need new messages that were defined for the EF100 architecture. Several debug messages have also been removed. Signed-off-by: Martin Habets Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/siena/mcdi_pcol.h | 17204 +++++++++++++++++++++++++++ 1 file changed, 17204 insertions(+) create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_pcol.h (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h new file mode 100644 index 000000000000..89a7fd47b057 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h @@ -0,0 +1,17204 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2009-2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + + +#ifndef MCDI_PCOL_H +#define MCDI_PCOL_H + +/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */ +/* Power-on reset state */ +#define MC_FW_STATE_POR (1) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. */ +#define MC_FW_WARM_BOOT_OK (2) +/* The MC main image has started to boot. */ +#define MC_FW_STATE_BOOTING (4) +/* The Scheduler has started. */ +#define MC_FW_STATE_SCHED (8) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. + * Unlike a warm boot, assume DMEM has been reloaded, so that + * the MC persistent data must be reinitialised. */ +#define MC_FW_TEPID_BOOT_OK (16) +/* We have entered the main firmware via recovery mode. This + * means that MC persistent data must be reinitialised, but that + * we shouldn't touch PCIe config. */ +#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32) +/* BIST state has been initialized */ +#define MC_FW_BIST_INIT_OK (128) + +/* Siena MC shared memmory offsets */ +/* The 'doorbell' addresses are hard-wired to alert the MC when written */ +#define MC_SMEM_P0_DOORBELL_OFST 0x000 +#define MC_SMEM_P1_DOORBELL_OFST 0x004 +/* The rest of these are firmware-defined */ +#define MC_SMEM_P0_PDU_OFST 0x008 +#define MC_SMEM_P1_PDU_OFST 0x108 +#define MC_SMEM_PDU_LEN 0x100 +#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0 +#define MC_SMEM_P0_STATUS_OFST 0x7f8 +#define MC_SMEM_P1_STATUS_OFST 0x7fc + +/* Values to be written to the per-port status dword in shared + * memory on reboot and assert */ +#define MC_STATUS_DWORD_REBOOT (0xb007b007) +#define MC_STATUS_DWORD_ASSERT (0xdeaddead) + +/* Check whether an mcfw version (in host order) belongs to a bootloader */ +#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007) + +/* The current version of the MCDI protocol. + * + * Note that the ROM burnt into the card only talks V0, so at the very + * least every driver must support version 0 and MCDI_PCOL_VERSION + */ +#define MCDI_PCOL_VERSION 2 + +/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ + +/* MCDI version 1 + * + * Each MCDI request starts with an MCDI_HEADER, which is a 32bit + * structure, filled in by the client. + * + * 0 7 8 16 20 22 23 24 31 + * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS | + * | | | + * | | \--- Response + * | \------- Error + * \------------------------------ Resync (always set) + * + * The client writes it's request into MC shared memory, and rings the + * doorbell. Each request is completed by either by the MC writing + * back into shared memory, or by writing out an event. + * + * All MCDI commands support completion by shared memory response. Each + * request may also contain additional data (accounted for by HEADER.LEN), + * and some response's may also contain additional data (again, accounted + * for by HEADER.LEN). + * + * Some MCDI commands support completion by event, in which any associated + * response data is included in the event. + * + * The protocol requires one response to be delivered for every request, a + * request should not be sent unless the response for the previous request + * has been received (either by polling shared memory, or by receiving + * an event). + */ + +/** Request/Response structure */ +#define MCDI_HEADER_OFST 0 +#define MCDI_HEADER_CODE_LBN 0 +#define MCDI_HEADER_CODE_WIDTH 7 +#define MCDI_HEADER_RESYNC_LBN 7 +#define MCDI_HEADER_RESYNC_WIDTH 1 +#define MCDI_HEADER_DATALEN_LBN 8 +#define MCDI_HEADER_DATALEN_WIDTH 8 +#define MCDI_HEADER_SEQ_LBN 16 +#define MCDI_HEADER_SEQ_WIDTH 4 +#define MCDI_HEADER_RSVD_LBN 20 +#define MCDI_HEADER_RSVD_WIDTH 1 +#define MCDI_HEADER_NOT_EPOCH_LBN 21 +#define MCDI_HEADER_NOT_EPOCH_WIDTH 1 +#define MCDI_HEADER_ERROR_LBN 22 +#define MCDI_HEADER_ERROR_WIDTH 1 +#define MCDI_HEADER_RESPONSE_LBN 23 +#define MCDI_HEADER_RESPONSE_WIDTH 1 +#define MCDI_HEADER_XFLAGS_LBN 24 +#define MCDI_HEADER_XFLAGS_WIDTH 8 +/* Request response using event */ +#define MCDI_HEADER_XFLAGS_EVREQ 0x01 +/* Request (and signal) early doorbell return */ +#define MCDI_HEADER_XFLAGS_DBRET 0x02 + +/* Maximum number of payload bytes */ +#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc +#define MCDI_CTL_SDU_LEN_MAX_V2 0x400 + +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2 + + +/* The MC can generate events for two reasons: + * - To advance a shared memory request if XFLAGS_EVREQ was set + * - As a notification (link state, i2c event), controlled + * via MC_CMD_LOG_CTRL + * + * Both events share a common structure: + * + * 0 32 33 36 44 52 60 + * | Data | Cont | Level | Src | Code | Rsvd | + * | + * \ There is another event pending in this notification + * + * If Code==CMDDONE, then the fields are further interpreted as: + * + * - LEVEL==INFO Command succeeded + * - LEVEL==ERR Command failed + * + * 0 8 16 24 32 + * | Seq | Datalen | Errno | Rsvd | + * + * These fields are taken directly out of the standard MCDI header, i.e., + * LEVEL==ERR, Datalen == 0 => Reboot + * + * Events can be squirted out of the UART (using LOG_CTRL) without a + * MCDI header. An event can be distinguished from a MCDI response by + * examining the first byte which is 0xc0. This corresponds to the + * non-existent MCDI command MC_CMD_DEBUG_LOG. + * + * 0 7 8 + * | command | Resync | = 0xc0 + * + * Since the event is written in big-endian byte order, this works + * providing bits 56-63 of the event are 0xc0. + * + * 56 60 63 + * | Rsvd | Code | = 0xc0 + * + * Which means for convenience the event code is 0xc for all MC + * generated events. + */ +#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc + + +/* Operation not permitted. */ +#define MC_CMD_ERR_EPERM 1 +/* Non-existent command target */ +#define MC_CMD_ERR_ENOENT 2 +/* assert() has killed the MC */ +#define MC_CMD_ERR_EINTR 4 +/* I/O failure */ +#define MC_CMD_ERR_EIO 5 +/* Already exists */ +#define MC_CMD_ERR_EEXIST 6 +/* Try again */ +#define MC_CMD_ERR_EAGAIN 11 +/* Out of memory */ +#define MC_CMD_ERR_ENOMEM 12 +/* Caller does not hold required locks */ +#define MC_CMD_ERR_EACCES 13 +/* Resource is currently unavailable (e.g. lock contention) */ +#define MC_CMD_ERR_EBUSY 16 +/* No such device */ +#define MC_CMD_ERR_ENODEV 19 +/* Invalid argument to target */ +#define MC_CMD_ERR_EINVAL 22 +/* Broken pipe */ +#define MC_CMD_ERR_EPIPE 32 +/* Read-only */ +#define MC_CMD_ERR_EROFS 30 +/* Out of range */ +#define MC_CMD_ERR_ERANGE 34 +/* Non-recursive resource is already acquired */ +#define MC_CMD_ERR_EDEADLK 35 +/* Operation not implemented */ +#define MC_CMD_ERR_ENOSYS 38 +/* Operation timed out */ +#define MC_CMD_ERR_ETIME 62 +/* Link has been severed */ +#define MC_CMD_ERR_ENOLINK 67 +/* Protocol error */ +#define MC_CMD_ERR_EPROTO 71 +/* Operation not supported */ +#define MC_CMD_ERR_ENOTSUP 95 +/* Address not available */ +#define MC_CMD_ERR_EADDRNOTAVAIL 99 +/* Not connected */ +#define MC_CMD_ERR_ENOTCONN 107 +/* Operation already in progress */ +#define MC_CMD_ERR_EALREADY 114 + +/* Resource allocation failed. */ +#define MC_CMD_ERR_ALLOC_FAIL 0x1000 +/* V-adaptor not found. */ +#define MC_CMD_ERR_NO_VADAPTOR 0x1001 +/* EVB port not found. */ +#define MC_CMD_ERR_NO_EVB_PORT 0x1002 +/* V-switch not found. */ +#define MC_CMD_ERR_NO_VSWITCH 0x1003 +/* Too many VLAN tags. */ +#define MC_CMD_ERR_VLAN_LIMIT 0x1004 +/* Bad PCI function number. */ +#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005 +/* Invalid VLAN mode. */ +#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006 +/* Invalid v-switch type. */ +#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007 +/* Invalid v-port type. */ +#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008 +/* MAC address exists. */ +#define MC_CMD_ERR_MAC_EXIST 0x1009 +/* Slave core not present */ +#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a +/* The datapath is disabled. */ +#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* The requested operation might require the + command to be passed between MCs, and the + transport doesn't support that. Should + only ever been seen over the UART. */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* Notifies the driver that the request has been relayed + * to an admin function for authorization. The driver should + * wait for a PROXY_RESPONSE event and then resend its request. + * This error code is followed by a 32-bit handle that + * helps matching it with the respective PROXY_RESPONSE event. */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 +/* The request cannot be passed for authorization because + * another request from the same function is currently being + * authorized. The drvier should try again later. */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that + * doesn't await an authorization. */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. + * Normally it is translated to EPERM by send_cmd_err(), + * but it may also be used to trigger some special mechanism + * for handling such case, e.g. to relay the failed request + * to a designated admin function for authorization. */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. + * May also returned for other operations such as sub-variant switching. */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 +/* The clock whose frequency you've attempted to set set + * doesn't exist on this NIC */ +#define MC_CMD_ERR_NO_CLOCK 0x1015 +/* Returned by MC_CMD_TESTASSERT if the action that should + * have caused an assertion failed to do so. */ +#define MC_CMD_ERR_UNREACHABLE 0x1016 +/* This command needs to be processed in the background but there were no + * resources to do so. Send it again after a command has completed. */ +#define MC_CMD_ERR_QUEUE_FULL 0x1017 +/* The operation could not be completed because the PCIe link has gone + * away. This error code is never expected to be returned over the TLP + * transport. */ +#define MC_CMD_ERR_NO_PCIE 0x1018 +/* The operation could not be completed because the datapath has gone + * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the + * datapath absence may be temporary*/ +#define MC_CMD_ERR_NO_DATAPATH 0x1019 +/* The operation could not complete because some VIs are allocated */ +#define MC_CMD_ERR_VIS_PRESENT 0x101a +/* The operation could not complete because some PIO buffers are allocated */ +#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b + +#define MC_CMD_ERR_CODE_OFST 0 + +/* We define 8 "escape" commands to allow + for command number space extension */ + +#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78 +#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79 +#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A +#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B +#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C +#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D +#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E +#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F + +/* Vectors in the boot ROM */ +/* Point to the copycode entry point. */ +#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) +#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) +#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) +/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */ +#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) +/* Points to the recovery mode entry point. Same as above, but the right name. */ +#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4) + +/* Points to noflash mode entry point. */ +#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4) + +/* The command set exported by the boot ROM (MCDI v0) */ +#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ + (1 << MC_CMD_READ32) | \ + (1 << MC_CMD_WRITE32) | \ + (1 << MC_CMD_COPYCODE) | \ + (1 << MC_CMD_GET_VERSION), \ + 0, 0, 0 } + +#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \ + (MC_CMD_SENSOR_ENTRY_OFST + (_x)) + +#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default + * stack ID (which must be in the range 1-255) along with an EVB port ID. + */ +#define EVB_STACK_ID(n) (((n) & 0xff) << 16) + + +/* Version 2 adds an optional argument to error returns: the errno value + * may be followed by the (0-based) number of the first argument that + * could not be processed. + */ +#define MC_CMD_ERR_ARG_OFST 4 + +/* No space */ +#define MC_CMD_ERR_ENOSPC 28 + +/* MCDI_EVENT structuredef */ +#define MCDI_EVENT_LEN 8 +#define MCDI_EVENT_CONT_LBN 32 +#define MCDI_EVENT_CONT_WIDTH 1 +#define MCDI_EVENT_LEVEL_LBN 33 +#define MCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MCDI_EVENT_LEVEL_FATAL 0x3 +#define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_SEQ_OFST 0 +#define MCDI_EVENT_CMDDONE_SEQ_LBN 0 +#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 +#define MCDI_EVENT_CMDDONE_DATALEN_OFST 0 +#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 +#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 +#define MCDI_EVENT_CMDDONE_ERRNO_OFST 0 +#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 +#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_OFST 0 +#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: Link is down or link speed could not be determined */ +#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0 +/* enum: 100Mbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +/* enum: 1Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +/* enum: 10Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +/* enum: 40Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +/* enum: 25Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5 +/* enum: 50Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 +/* enum: 100Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 +#define MCDI_EVENT_LINKCHANGE_FCNTL_OFST 0 +#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 +#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_STATE_OFST 0 +#define MCDI_EVENT_SENSOREVT_STATE_LBN 8 +#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_VALUE_OFST 0 +#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 +#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 +#define MCDI_EVENT_FWALERT_DATA_OFST 0 +#define MCDI_EVENT_FWALERT_DATA_LBN 8 +#define MCDI_EVENT_FWALERT_DATA_WIDTH 24 +#define MCDI_EVENT_FWALERT_REASON_OFST 0 +#define MCDI_EVENT_FWALERT_REASON_LBN 0 +#define MCDI_EVENT_FWALERT_REASON_WIDTH 8 +/* enum: SRAM Access. */ +#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 +#define MCDI_EVENT_FLR_VF_OFST 0 +#define MCDI_EVENT_FLR_VF_LBN 0 +#define MCDI_EVENT_FLR_VF_WIDTH 8 +#define MCDI_EVENT_TX_ERR_TXQ_OFST 0 +#define MCDI_EVENT_TX_ERR_TXQ_LBN 0 +#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 +#define MCDI_EVENT_TX_ERR_TYPE_OFST 0 +#define MCDI_EVENT_TX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 +/* enum: Descriptor loader reported failure */ +#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 +/* enum: Descriptor ring empty and no EOP seen for packet */ +#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 +/* enum: Overlength packet */ +#define MCDI_EVENT_TX_ERR_2BIG 0x3 +/* enum: Malformed option descriptor */ +#define MCDI_EVENT_TX_BAD_OPTDESC 0x5 +/* enum: Option descriptor part way through a packet */ +#define MCDI_EVENT_TX_OPT_IN_PKT 0x8 +/* enum: DMA or PIO data access error */ +#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9 +#define MCDI_EVENT_TX_ERR_INFO_OFST 0 +#define MCDI_EVENT_TX_ERR_INFO_LBN 16 +#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_OFST 0 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_TX_FLUSH_TXQ_OFST 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 +#define MCDI_EVENT_PTP_ERR_TYPE_OFST 0 +#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 +#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 +/* enum: PLL lost lock */ +#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 +/* enum: Filter overflow (PDMA) */ +#define MCDI_EVENT_PTP_ERR_FILTER 0x2 +/* enum: FIFO overflow (FPGA) */ +#define MCDI_EVENT_PTP_ERR_FIFO 0x3 +/* enum: Merge queue overflow */ +#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 +#define MCDI_EVENT_AOE_ERR_TYPE_OFST 0 +#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0 +#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8 +/* enum: AOE failed to load - no valid image? */ +#define MCDI_EVENT_AOE_NO_LOAD 0x1 +/* enum: AOE FC reported an exception */ +#define MCDI_EVENT_AOE_FC_ASSERT 0x2 +/* enum: AOE FC watchdogged */ +#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3 +/* enum: AOE FC failed to start */ +#define MCDI_EVENT_AOE_FC_NO_START 0x4 +/* enum: Generic AOE fault - likely to have been reported via other means too + * but intended for use by aoex driver. + */ +#define MCDI_EVENT_AOE_FAULT 0x5 +/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6 +/* enum: AOE loaded successfully */ +#define MCDI_EVENT_AOE_LOAD 0x7 +/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_DMA 0x8 +/* enum: AOE byteblaster connected/disconnected (Connection status in + * AOE_ERR_DATA) + */ +#define MCDI_EVENT_AOE_BYTEBLASTER 0x9 +/* enum: DDR ECC status update */ +#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb +/* enum: FPGA header incorrect */ +#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc +/* enum: FPGA Powered Off due to error in powering up FPGA */ +#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd +/* enum: AOE FPGA load failed due to MC to MUM communication failure */ +#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe +/* enum: Notify that invalid flash type detected */ +#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf +/* enum: Notify that the attempt to run FPGA Controller firmware timedout */ +#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 +/* enum: Failure to probe one or more FPGA boot flash chips */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 +/* enum: FPGA boot-flash contains an invalid image header */ +#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12 +/* enum: Failed to program clocks required by the FPGA */ +#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 +/* enum: Notify that FPGA Controller is alive to serve MCDI requests */ +#define MCDI_EVENT_AOE_FC_RUNNING 0x14 +#define MCDI_EVENT_AOE_ERR_DATA_OFST 0 +#define MCDI_EVENT_AOE_ERR_DATA_LBN 8 +#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_OFST 0 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 +/* enum: FC Assert happened, but the register information is not available */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 +/* enum: The register information for FC Assert is ready for readinng by driver + */ +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 +/* enum: Reading from NV failed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0 +/* enum: Invalid Magic Number if FPGA header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1 +/* enum: Invalid Silicon type detected in header */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2 +/* enum: Unsupported VRatio */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3 +/* enum: Unsupported DDR Type */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4 +/* enum: DDR Voltage out of supported range */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5 +/* enum: Unsupported DDR speed */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6 +/* enum: Unsupported DDR size */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7 +/* enum: Unsupported DDR rank */ +#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8 +/* enum: Primary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0 +/* enum: Secondary boot flash */ +#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_OFST 0 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8 +#define MCDI_EVENT_RX_ERR_RXQ_OFST 0 +#define MCDI_EVENT_RX_ERR_RXQ_LBN 0 +#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 +#define MCDI_EVENT_RX_ERR_TYPE_OFST 0 +#define MCDI_EVENT_RX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4 +#define MCDI_EVENT_RX_ERR_INFO_OFST 0 +#define MCDI_EVENT_RX_ERR_INFO_LBN 16 +#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_OFST 0 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_RX_FLUSH_RXQ_OFST 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 +#define MCDI_EVENT_MC_REBOOT_COUNT_OFST 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_OFST 0 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_OFST 0 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DBRET_SEQ_OFST 0 +#define MCDI_EVENT_DBRET_SEQ_LBN 0 +#define MCDI_EVENT_DBRET_SEQ_WIDTH 8 +#define MCDI_EVENT_SUC_ERR_TYPE_OFST 0 +#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0 +#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8 +/* enum: Corrupted or bad SUC application. */ +#define MCDI_EVENT_SUC_BAD_APP 0x1 +/* enum: SUC application reported an assert. */ +#define MCDI_EVENT_SUC_ASSERT 0x2 +/* enum: SUC application reported an exception. */ +#define MCDI_EVENT_SUC_EXCEPTION 0x3 +/* enum: SUC watchdog timer expired. */ +#define MCDI_EVENT_SUC_WATCHDOG 0x4 +#define MCDI_EVENT_SUC_ERR_ADDRESS_OFST 0 +#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8 +#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24 +#define MCDI_EVENT_SUC_ERR_DATA_OFST 0 +#define MCDI_EVENT_SUC_ERR_DATA_LBN 8 +#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4 +/* Enum values, see field(s): */ +/* MCDI_EVENT/LINKCHANGE_SPEED */ +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MCDI_EVENT_MODULECHANGE_LD_CAP_OFST 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0 +#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2 +#define MCDI_EVENT_DATA_LBN 0 +#define MCDI_EVENT_DATA_WIDTH 32 +/* Alias for PTP_DATA. */ +#define MCDI_EVENT_SRC_LBN 36 +#define MCDI_EVENT_SRC_WIDTH 8 +/* Data associated with PTP events which doesn't fit into the main DATA field + */ +#define MCDI_EVENT_PTP_DATA_LBN 36 +#define MCDI_EVENT_PTP_DATA_WIDTH 8 +/* EF100 specific. Defined by QDMA. The phase bit, changes each time round the + * event ring + */ +#define MCDI_EVENT_EV_EVQ_PHASE_LBN 59 +#define MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1 +#define MCDI_EVENT_EV_CODE_LBN 60 +#define MCDI_EVENT_EV_CODE_WIDTH 4 +#define MCDI_EVENT_CODE_LBN 44 +#define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 +/* enum: Bad assert. */ +#define MCDI_EVENT_CODE_BADSSERT 0x1 +/* enum: PM Notice. */ +#define MCDI_EVENT_CODE_PMNOTICE 0x2 +/* enum: Command done. */ +#define MCDI_EVENT_CODE_CMDDONE 0x3 +/* enum: Link change. */ +#define MCDI_EVENT_CODE_LINKCHANGE 0x4 +/* enum: Sensor Event. */ +#define MCDI_EVENT_CODE_SENSOREVT 0x5 +/* enum: Schedule error. */ +#define MCDI_EVENT_CODE_SCHEDERR 0x6 +/* enum: Reboot. */ +#define MCDI_EVENT_CODE_REBOOT 0x7 +/* enum: Mac stats DMA. */ +#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 +/* enum: Firmware alert. */ +#define MCDI_EVENT_CODE_FWALERT 0x9 +/* enum: Function level reset. */ +#define MCDI_EVENT_CODE_FLR 0xa +/* enum: Transmit error */ +#define MCDI_EVENT_CODE_TX_ERR 0xb +/* enum: Tx flush has completed */ +#define MCDI_EVENT_CODE_TX_FLUSH 0xc +/* enum: PTP packet received timestamp */ +#define MCDI_EVENT_CODE_PTP_RX 0xd +/* enum: PTP NIC failure */ +#define MCDI_EVENT_CODE_PTP_FAULT 0xe +/* enum: PTP PPS event */ +#define MCDI_EVENT_CODE_PTP_PPS 0xf +/* enum: Rx flush has completed */ +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +/* enum: Receive error */ +#define MCDI_EVENT_CODE_RX_ERR 0x11 +/* enum: AOE fault */ +#define MCDI_EVENT_CODE_AOE 0x12 +/* enum: Network port calibration failed (VCAL). */ +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +/* enum: HW PPS event */ +#define MCDI_EVENT_CODE_HW_PPS 0x14 +/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and + * a different format) + */ +#define MCDI_EVENT_CODE_MC_REBOOT 0x15 +/* enum: the MC has detected a parity error */ +#define MCDI_EVENT_CODE_PAR_ERR 0x16 +/* enum: the MC has detected a correctable error */ +#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17 +/* enum: the MC has detected an uncorrectable error */ +#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18 +/* enum: The MC has entered offline BIST mode */ +#define MCDI_EVENT_CODE_MC_BIST 0x19 +/* enum: PTP tick event providing current NIC time */ +#define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d +/* enum: MCDI command accepted. New commands can be issued but this command is + * not done yet. + */ +#define MCDI_EVENT_CODE_DBRET 0x1e +/* enum: The MC has detected a fault on the SUC */ +#define MCDI_EVENT_CODE_SUC 0x1f +/* enum: Link change. This event is sent instead of LINKCHANGE if + * WANT_V2_LINKCHANGES was set on driver attach. + */ +#define MCDI_EVENT_CODE_LINKCHANGE_V2 0x20 +/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach + * when the local device capabilities changes. This will usually correspond to + * a module change. + */ +#define MCDI_EVENT_CODE_MODULECHANGE 0x21 +/* enum: Notification that the sensors have been added and/or removed from the + * sensor table. This event includes the new sensor table generation count, if + * this does not match the driver's local copy it is expected to call + * DYNAMIC_SENSORS_LIST to refresh it. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22 +/* enum: Notification that a sensor has changed state as a result of a reading + * crossing a threshold. This is sent as two events, the first event contains + * the handle and the sensor's state (in the SRC field), and the second + * contains the value. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23 +/* enum: Notification that a descriptor proxy function configuration has been + * pushed to "live" status (visible to host). SRC field contains the handle of + * the affected descriptor proxy function. DATA field contains the generation + * count of configuration set applied. See MC_CMD_DESC_PROXY_FUNC_CONFIG_SET / + * MC_CMD_DESC_PROXY_FUNC_CONFIG_COMMIT and SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_CONFIG_COMMITTED 0x24 +/* enum: Notification that a descriptor proxy function has been reset. SRC + * field contains the handle of the affected descriptor proxy function. See + * SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_RESET 0x25 +/* enum: Notification that a driver attached to a descriptor proxy function. + * SRC field contains the handle of the affected descriptor proxy function. For + * Virtio proxy functions this message consists of two MCDI events, where the + * first event's (CONT=1) DATA field carries negotiated virtio feature bits 0 + * to 31 and the second (CONT=0) carries bits 32 to 63. For EF100 proxy + * functions event length and meaning of DATA field is not yet defined. See + * SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26 +/* enum: Artificial event generated by host and posted via MC for test + * purposes. + */ +#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_DATA_LBN 0 +#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 +#define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LEN 4 +#define MCDI_EVENT_SENSOREVT_DATA_LBN 0 +#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 +#define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LEN 4 +#define MCDI_EVENT_TX_ERR_DATA_LBN 0 +#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of + * timestamp + */ +#define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LEN 4 +#define MCDI_EVENT_PTP_SECONDS_LBN 0 +#define MCDI_EVENT_PTP_SECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of + * timestamp + */ +#define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LEN 4 +#define MCDI_EVENT_PTP_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_MAJOR_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field + * of timestamp + */ +#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4 +#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 +#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of + * timestamp + */ +#define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LEN 4 +#define MCDI_EVENT_PTP_MINOR_LBN 0 +#define MCDI_EVENT_PTP_MINOR_WIDTH 32 +/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet + */ +#define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LEN 4 +#define MCDI_EVENT_PTP_UUID_LBN 0 +#define MCDI_EVENT_PTP_UUID_WIDTH 32 +#define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LEN 4 +#define MCDI_EVENT_RX_ERR_DATA_LBN 0 +#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LEN 4 +#define MCDI_EVENT_PAR_ERR_DATA_LBN 0 +#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 +/* For CODE_PTP_TIME events, the major value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4 +#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 +/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +/* For CODE_PTP_TIME events, most significant bits of the minor value of the + * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21. + */ +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 +#define MCDI_EVENT_DBRET_DATA_OFST 0 +#define MCDI_EVENT_DBRET_DATA_LEN 4 +#define MCDI_EVENT_DBRET_DATA_LBN 0 +#define MCDI_EVENT_DBRET_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32 +#define MCDI_EVENT_MODULECHANGE_DATA_OFST 0 +#define MCDI_EVENT_MODULECHANGE_DATA_LEN 4 +#define MCDI_EVENT_MODULECHANGE_DATA_LBN 0 +#define MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32 +/* The new generation count after a sensor has been added or deleted. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32 +/* The handle of a dynamic sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32 +/* The current values of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32 +/* The current state of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36 +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8 +#define MCDI_EVENT_DESC_PROXY_DATA_OFST 0 +#define MCDI_EVENT_DESC_PROXY_DATA_LEN 4 +#define MCDI_EVENT_DESC_PROXY_DATA_LBN 0 +#define MCDI_EVENT_DESC_PROXY_DATA_WIDTH 32 +/* Generation count of applied configuration set */ +#define MCDI_EVENT_DESC_PROXY_GENERATION_OFST 0 +#define MCDI_EVENT_DESC_PROXY_GENERATION_LEN 4 +#define MCDI_EVENT_DESC_PROXY_GENERATION_LBN 0 +#define MCDI_EVENT_DESC_PROXY_GENERATION_WIDTH 32 +/* Virtio features negotiated with the host driver. First event (CONT=1) + * carries bits 0 to 31. Second event (CONT=0) carries bits 32 to 63. + */ +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_OFST 0 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LEN 4 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32 + +/* FCDI_EVENT structuredef */ +#define FCDI_EVENT_LEN 8 +#define FCDI_EVENT_CONT_LBN 32 +#define FCDI_EVENT_CONT_WIDTH 1 +#define FCDI_EVENT_LEVEL_LBN 33 +#define FCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define FCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define FCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define FCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define FCDI_EVENT_LEVEL_FATAL 0x3 +#define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0 +#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 +#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 +#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ +#define FCDI_EVENT_LINK_UP 0x1 /* enum */ +#define FCDI_EVENT_DATA_LBN 0 +#define FCDI_EVENT_DATA_WIDTH 32 +#define FCDI_EVENT_SRC_LBN 36 +#define FCDI_EVENT_SRC_WIDTH 8 +#define FCDI_EVENT_EV_CODE_LBN 60 +#define FCDI_EVENT_EV_CODE_WIDTH 4 +#define FCDI_EVENT_CODE_LBN 44 +#define FCDI_EVENT_CODE_WIDTH 8 +/* enum: The FC was rebooted. */ +#define FCDI_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define FCDI_EVENT_CODE_ASSERT 0x2 +/* enum: DDR3 test result. */ +#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3 +/* enum: Link status. */ +#define FCDI_EVENT_CODE_LINK_STATE 0x4 +/* enum: A timed read is ready to be serviced. */ +#define FCDI_EVENT_CODE_TIMED_READ 0x5 +/* enum: One or more PPS IN events */ +#define FCDI_EVENT_CODE_PPS_IN 0x6 +/* enum: Tick event from PTP clock */ +#define FCDI_EVENT_CODE_PTP_TICK 0x7 +/* enum: ECC error counters */ +#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +/* enum: Current status of PTP */ +#define FCDI_EVENT_CODE_PTP_STATUS 0x9 +/* enum: Port id config to map MC-FC port idx */ +#define FCDI_EVENT_CODE_PORT_CONFIG 0xa +/* enum: Boot result or error code */ +#define FCDI_EVENT_CODE_BOOT_RESULT 0xb +#define FCDI_EVENT_REBOOT_SRC_LBN 36 +#define FCDI_EVENT_REBOOT_SRC_WIDTH 8 +#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ +#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 +#define FCDI_EVENT_ASSERT_TYPE_LBN 36 +#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 +#define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_DATA_LBN 0 +#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_STATE_LEN 4 +#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ +#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ +#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ +#define FCDI_EVENT_PTP_STATE_LBN 0 +#define FCDI_EVENT_PTP_STATE_WIDTH 32 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 +/* Index of MC port being referred to */ +#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 +#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 +/* FC Port index that matches the MC port index in SRC */ +#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4 +#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 +#define FCDI_EVENT_BOOT_RESULT_OFST 0 +#define FCDI_EVENT_BOOT_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ +#define FCDI_EVENT_BOOT_RESULT_LBN 0 +#define FCDI_EVENT_BOOT_RESULT_WIDTH 32 + +/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events + * to the MC. Note that this structure | is overlayed over a normal FCDI event + * such that bits 32-63 containing | event code, level, source etc remain the + * same. In this case the data | field of the header is defined to be the + * number of timestamps + */ +#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016 +#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8) +/* Number of timestamps following */ +#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 +/* Seconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 +/* Nanoseconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 +/* Timestamp records comprising the event */ +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 + +/* MUM_EVENT structuredef */ +#define MUM_EVENT_LEN 8 +#define MUM_EVENT_CONT_LBN 32 +#define MUM_EVENT_CONT_WIDTH 1 +#define MUM_EVENT_LEVEL_LBN 33 +#define MUM_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MUM_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MUM_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MUM_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MUM_EVENT_LEVEL_FATAL 0x3 +#define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_DATA_LEN 4 +#define MUM_EVENT_SENSOR_ID_OFST 0 +#define MUM_EVENT_SENSOR_ID_LBN 0 +#define MUM_EVENT_SENSOR_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_OFST 0 +#define MUM_EVENT_SENSOR_STATE_LBN 8 +#define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_OFST 0 +#define MUM_EVENT_PORT_PHY_READY_LBN 0 +#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0 +#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 +#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0 +#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 +#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 +#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0 +#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 +#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0 +#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 +#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 +#define MUM_EVENT_DATA_LBN 0 +#define MUM_EVENT_DATA_WIDTH 32 +#define MUM_EVENT_SRC_LBN 36 +#define MUM_EVENT_SRC_WIDTH 8 +#define MUM_EVENT_EV_CODE_LBN 60 +#define MUM_EVENT_EV_CODE_WIDTH 4 +#define MUM_EVENT_CODE_LBN 44 +#define MUM_EVENT_CODE_WIDTH 8 +/* enum: The MUM was rebooted. */ +#define MUM_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define MUM_EVENT_CODE_ASSERT 0x2 +/* enum: Sensor failure. */ +#define MUM_EVENT_CODE_SENSOR 0x3 +/* enum: Link fault has been asserted, or has cleared. */ +#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 +#define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LEN 4 +#define MUM_EVENT_SENSOR_DATA_LBN 0 +#define MUM_EVENT_SENSOR_DATA_WIDTH 32 +#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4 +#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 +#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 +#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LEN 4 +#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 +#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_TECH_LEN 4 +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ +#define MUM_EVENT_PORT_PHY_TECH_LBN 0 +#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 + + +/***********************************/ +/* MC_CMD_READ32 + * Read multiple 32byte words from MC memory. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_READ32 0x1 +#undef MC_CMD_0x1_PRIVILEGE_CTG + +#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_READ32_IN msgrequest */ +#define MC_CMD_READ32_IN_LEN 8 +#define MC_CMD_READ32_IN_ADDR_OFST 0 +#define MC_CMD_READ32_IN_ADDR_LEN 4 +#define MC_CMD_READ32_IN_NUMWORDS_OFST 4 +#define MC_CMD_READ32_IN_NUMWORDS_LEN 4 + +/* MC_CMD_READ32_OUT msgresponse */ +#define MC_CMD_READ32_OUT_LENMIN 4 +#define MC_CMD_READ32_OUT_LENMAX 252 +#define MC_CMD_READ32_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_WRITE32 + * Write multiple 32byte words to MC memory. + */ +#define MC_CMD_WRITE32 0x2 +#undef MC_CMD_0x2_PRIVILEGE_CTG + +#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_WRITE32_IN msgrequest */ +#define MC_CMD_WRITE32_IN_LENMIN 8 +#define MC_CMD_WRITE32_IN_LENMAX 252 +#define MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020 +#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) +#define MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4) +#define MC_CMD_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_WRITE32_IN_ADDR_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_OFST 4 +#define MC_CMD_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254 + +/* MC_CMD_WRITE32_OUT msgresponse */ +#define MC_CMD_WRITE32_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_COPYCODE + * Copy MC code between two locations and jump. Note - this command really + * belongs to INSECURE category but is required by shmboot. The command handler + * has additional checks to reject insecure calls. + */ +#define MC_CMD_COPYCODE 0x3 +#undef MC_CMD_0x3_PRIVILEGE_CTG + +#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_COPYCODE_IN msgrequest */ +#define MC_CMD_COPYCODE_IN_LEN 16 +/* Source address + * + * The main image should be entered via a copy of a single word from and to a + * magic address, which controls various aspects of the boot. The magic address + * is a bitfield, with each bit as documented below. + */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ +#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) + */ +#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see + * below) + */ +#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 +/* Destination address */ +#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 +#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4 +#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4 +/* Address of where to jump after copy. */ +#define MC_CMD_COPYCODE_IN_JUMP_OFST 12 +#define MC_CMD_COPYCODE_IN_JUMP_LEN 4 +/* enum: Control should return to the caller rather than jumping */ +#define MC_CMD_COPYCODE_JUMP_NONE 0x1 + +/* MC_CMD_COPYCODE_OUT msgresponse */ +#define MC_CMD_COPYCODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_FUNC + * Select function for function-specific commands. + */ +#define MC_CMD_SET_FUNC 0x4 +#undef MC_CMD_0x4_PRIVILEGE_CTG + +#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_FUNC_IN msgrequest */ +#define MC_CMD_SET_FUNC_IN_LEN 4 +/* Set function */ +#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 +#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4 + +/* MC_CMD_SET_FUNC_OUT msgresponse */ +#define MC_CMD_SET_FUNC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_BOOT_STATUS + * Get the instruction address from which the MC booted. + */ +#define MC_CMD_GET_BOOT_STATUS 0x5 +#undef MC_CMD_0x5_PRIVILEGE_CTG + +#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ +#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 + +/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */ +#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 +/* ?? */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4 +/* enum: indicates that the MC wasn't flash booted */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_ASSERTS + * Get (and optionally clear) the current assertion status. Only + * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other + * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS + */ +#define MC_CMD_GET_ASSERTS 0x6 +#undef MC_CMD_0x6_PRIVILEGE_CTG + +#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_ASSERTS_IN msgrequest */ +#define MC_CMD_GET_ASSERTS_IN_LEN 4 +/* Set to clear assertion */ +#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 +#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4 + +/* MC_CMD_GET_ASSERTS_OUT msgresponse */ +#define MC_CMD_GET_ASSERTS_OUT_LEN 140 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 +/* enum: A system-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 +/* enum: A thread-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 +/* enum: The system was reset by the watchdog. */ +#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 +/* enum: An illegal address trap stopped the system (huntington and later) */ +#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4 + +/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs + * found on Riverhead designs + */ +#define MC_CMD_GET_ASSERTS_OUT_V2_LEN 240 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26 + +/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted + * firmware version information + */ +#define MC_CMD_GET_ASSERTS_OUT_V3_LEN 360 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20 +/* MC firmware build date (as Unix timestamp) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264 +/* MC firmware version number */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272 +/* MC firmware security level */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4 +/* MC firmware extra version info (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64 + + +/***********************************/ +/* MC_CMD_LOG_CTRL + * Configure the output stream for log events such as link state changes, + * sensor notifications and MCDI completions + */ +#define MC_CMD_LOG_CTRL 0x7 +#undef MC_CMD_0x7_PRIVILEGE_CTG + +#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LOG_CTRL_IN msgrequest */ +#define MC_CMD_LOG_CTRL_IN_LEN 8 +/* Log destination */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4 +/* enum: UART. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 +/* enum: Event queue. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +/* Legacy argument. Must be zero. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4 + +/* MC_CMD_LOG_CTRL_OUT msgresponse */ +#define MC_CMD_LOG_CTRL_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VERSION + * Get version information about adapter components. + */ +#define MC_CMD_GET_VERSION 0x8 +#undef MC_CMD_0x8_PRIVILEGE_CTG + +#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VERSION_IN msgrequest */ +#define MC_CMD_GET_VERSION_IN_LEN 0 + +/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */ +#define MC_CMD_GET_VERSION_EXT_IN_LEN 4 +/* placeholder, set to 0 */ +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4 + +/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ +#define MC_CMD_GET_VERSION_V0_OUT_LEN 4 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 +/* enum: Reserved version number to indicate "any" version. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff +/* enum: Bootrom version value for Siena. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 +/* enum: Bootrom version value for Huntington. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 +/* enum: Bootrom version value for Medford2. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002 + +/* MC_CMD_GET_VERSION_OUT msgresponse */ +#define MC_CMD_GET_VERSION_OUT_LEN 32 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28 + +/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ +#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 + +/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version + * information for all adapter components. For Riverhead based designs, base MC + * firmware version fields refer to NMC firmware, while CMC firmware data is in + * dedicated CMC fields. Flags indicate which data is present in the response + * (depending on which components exist on a particular adapter) + */ +#define MC_CMD_GET_VERSION_V2_OUT_LEN 304 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16 +/* Flags indicating which extended fields are valid */ +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20 +/* MC firmware security level */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4 +/* SUC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4 +/* The CMC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4 +/* CMC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188 +/* FPGA version as three numbers. On Riverhead based systems this field uses + * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG): + * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1 + * => B, ...) FPGA_VERSION[2]: Sub-revision number + */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3 +/* Extra FPGA revision information (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16 +/* Board name / adapter model (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16 +/* Board revision number */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4 +/* Board serial number (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64 + + +/***********************************/ +/* MC_CMD_PTP + * Perform PTP operation + */ +#define MC_CMD_PTP 0xb +#undef MC_CMD_0xb_PRIVILEGE_CTG + +#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PTP_IN msgrequest */ +#define MC_CMD_PTP_IN_LEN 1 +/* PTP operation code */ +#define MC_CMD_PTP_IN_OP_OFST 0 +#define MC_CMD_PTP_IN_OP_LEN 1 +/* enum: Enable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_ENABLE 0x1 +/* enum: Disable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_DISABLE 0x2 +/* enum: Send a PTP packet. This operation is used on Siena and Huntington. + * From Medford onwards it is not supported: on those platforms PTP transmit + * timestamping is done using the fast path. + */ +#define MC_CMD_PTP_OP_TRANSMIT 0x3 +/* enum: Read the current NIC time. */ +#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 +/* enum: Get the current PTP status. Note that the clock frequency returned (in + * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666). + */ +#define MC_CMD_PTP_OP_STATUS 0x5 +/* enum: Adjust the PTP NIC's time. */ +#define MC_CMD_PTP_OP_ADJUST 0x6 +/* enum: Synchronize host and NIC time. */ +#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 +/* enum: Basic manufacturing tests. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 +/* enum: Packet based manufacturing tests. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 +/* enum: Reset some of the PTP related statistics */ +#define MC_CMD_PTP_OP_RESET_STATS 0xa +/* enum: Debug operations to MC. */ +#define MC_CMD_PTP_OP_DEBUG 0xb +/* enum: Read an FPGA register. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_FPGAREAD 0xc +/* enum: Write an FPGA register. Siena PTP adapters only. */ +#define MC_CMD_PTP_OP_FPGAWRITE 0xd +/* enum: Apply an offset to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe +/* enum: Change the frequency correction applied to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf +/* enum: Set the MC packet filter VLAN tags for received PTP packets. + * Deprecated for Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 +/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for + * Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 +/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated + * for Huntington onwards. + */ +#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 +/* enum: Set the clock source. Required for snapper tests on Huntington and + * Medford. Not implemented for Siena or Medford2. + */ +#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 +/* enum: Reset value of Timer Reg. Not implemented. */ +#define MC_CMD_PTP_OP_RST_CLK 0x14 +/* enum: Enable the forwarding of PPS events to the host */ +#define MC_CMD_PTP_OP_PPS_ENABLE 0x15 +/* enum: Get the time format used by this NIC for PTP operations */ +#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16 +/* enum: Get the clock attributes. NOTE- extended version of + * MC_CMD_PTP_OP_GET_TIME_FORMAT + */ +#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16 +/* enum: Get corrections that should be applied to the various different + * timestamps + */ +#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17 +/* enum: Subscribe to receive periodic time events indicating the current NIC + * time + */ +#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18 +/* enum: Unsubscribe to stop receiving time events */ +#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 +/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS + * input on the same NIC. Siena PTP adapters only. + */ +#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Set the PTP sync status. Status is used by firmware to report to event + * subscribers. + */ +#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b +/* enum: Above this for future use. */ +#define MC_CMD_PTP_OP_MAX 0x1c + +/* MC_CMD_PTP_IN_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_ENABLE_LEN 16 +#define MC_CMD_PTP_IN_CMD_OFST 0 +#define MC_CMD_PTP_IN_CMD_LEN 4 +#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 +#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4 +/* Not used. Events are always sent to function relative queue 0. */ +#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4 +/* PTP timestamping mode. Not used from Huntington onwards. */ +#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 +#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4 +/* enum: PTP, version 1 */ +#define MC_CMD_PTP_MODE_V1 0x0 +/* enum: PTP, version 1, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V1_VLAN 0x1 +/* enum: PTP, version 2 */ +#define MC_CMD_PTP_MODE_V2 0x2 +/* enum: PTP, version 2, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V2_VLAN 0x3 +/* enum: PTP, version 2, with improved UUID filtering */ +#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 +/* enum: FCoE (seconds and microseconds) */ +#define MC_CMD_PTP_MODE_FCOE 0x5 + +/* MC_CMD_PTP_IN_DISABLE msgrequest */ +#define MC_CMD_PTP_IN_DISABLE_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_TRANSMIT msgrequest */ +#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Transmit packet length */ +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4 +/* Transmit packet data */ +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008 + +/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_STATUS msgrequest */ +#define MC_CMD_PTP_IN_STATUS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ +/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ + * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES + * field. + */ +/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20 +#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24 +#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Number of time readings to capture */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4 +/* Host address in which to write "synchronization started" indication (64 + * bits) + */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16 + +/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Enable or disable packet testing */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4 + +/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */ +#define MC_CMD_PTP_IN_RESET_STATS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_DEBUG msgrequest */ +#define MC_CMD_PTP_IN_DEBUG_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Debug operations */ +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4 + +/* MC_CMD_PTP_IN_FPGAREAD msgrequest */ +#define MC_CMD_PTP_IN_FPGAREAD_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4 + +/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ +#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4 +/* Upper 32bits of major time offset adjustment */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16 +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 +/* Enum values, see field(s): */ +/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */ + +/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Number of VLAN tags, 0 if not VLAN */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4 +/* Set of VLAN tags to filter against */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3 + +/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable UUID filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4 +/* UUID to filter against */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16 + +/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable Domain filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4 +/* Domain number to filter against */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4 + +/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Set the clock source. */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4 +/* enum: Internal. */ +#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 + +/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */ +#define MC_CMD_PTP_IN_RST_CLK_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* Enable or disable */ +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4 +/* enum: Enable */ +#define MC_CMD_PTP_ENABLE_PPS 0x0 +/* enum: Disable */ +#define MC_CMD_PTP_DISABLE_PPS 0x1 +/* Not used. Events are always sent to function relative queue 0. */ +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4 + +/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ +#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ +#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ +#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + +/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Original field containing queue ID. Now extended to include flags. */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 + +/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Unsubscribe options */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4 +/* enum: Unsubscribe a single queue */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 +/* enum: Unsubscribe all queues */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 +/* Event queue ID */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4 + +/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* 1 to enable PPS test mode, 0 to disable and return result. */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4 + +/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* NIC - Host System Clock Synchronization status */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4 +/* enum: Host System clock and NIC clock are not in sync */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 +/* enum: Host System clock and NIC clock are synchronized */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1 +/* If synchronized, number of seconds until clocks should be considered to be + * no longer in sync. + */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4 + +/* MC_CMD_PTP_OUT msgresponse */ +#define MC_CMD_PTP_OUT_LEN 0 + +/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */ +#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4 +/* Upper 32bits of major timestamp value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8 +#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4 + +/* MC_CMD_PTP_OUT_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_STATUS_LEN 64 +/* Frequency of NIC's hardware clock */ +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4 +/* Number of packets transmitted and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4 +/* Number of packets received and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4 +/* Number of packets timestamped by the FPGA */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4 +/* Number of packets filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4 +/* Number of packets not filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4 +/* Number of PPS overflows (noise on input?) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4 +/* Number of PPS bad periods */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4 +/* Minimum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4 +/* Maximum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4 +/* Last period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4 +/* Mean period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4 +/* Minimum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4 +/* Maximum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4 +/* Last offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4 +/* Mean offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4 + +/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20) +/* A set of host and NIC times */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51 +/* Host time immediately before NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4 +/* Host time immediately after NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4 +/* Number of nanoseconds waited after reading NIC's hardware clock */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4 +/* enum: Successful test */ +#define MC_CMD_PTP_MANF_SUCCESS 0x0 +/* enum: FPGA load failed */ +#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 +/* enum: FPGA version invalid */ +#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 +/* enum: FPGA registers incorrect */ +#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 +/* enum: Oscillator possibly not working? */ +#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 +/* enum: Timestamps not increasing */ +#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 +/* enum: Mismatched packet count */ +#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 +/* enum: Mismatched packet count (Siena filter and FPGA) */ +#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 +/* enum: Not enough packets to perform timestamp check */ +#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 +/* enum: Timestamp trigger GPIO not working */ +#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 +/* enum: Insufficient PPS events to perform checks */ +#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa +/* enum: PPS time event period not sufficiently close to 1s. */ +#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb +/* enum: PPS time event nS reading not sufficiently close to zero. */ +#define MC_CMD_PTP_MANF_PPS_NS 0xc +/* enum: PTP peripheral registers incorrect */ +#define MC_CMD_PTP_MANF_REGISTERS 0xd +/* enum: Failed to read time from PTP peripheral */ +#define MC_CMD_PTP_MANF_CLOCK_READ 0xe +/* Presence of external oscillator */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4 +/* Number of packets received by FPGA */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4 +/* Number of packets received by Siena filters */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4 + +/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ +#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020 +#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num)) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020 + +/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. Note this enum is deprecated. Do not add to it- use the + * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead. + */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 + +/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* enum: Major register units are seconds, minor units are quarter nanoseconds + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3 +/* Minimum acceptable value for a corrected synchronization timeset. When + * comparing host and NIC clock times, the MC returns a set of samples that + * contain the host start and end time, the MC time when the host start was + * detected and the time the MC waited between reading the time and detecting + * the host end. The corrected sync window is the difference between the host + * end and start times minus the time that the MC waited for host end. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4 +/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4 +/* Uncorrected error on non-PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ + +/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 + + +/***********************************/ +/* MC_CMD_CSR_READ32 + * Read 32bit words from the indirect memory map. + */ +#define MC_CMD_CSR_READ32 0xc +#undef MC_CMD_0xc_PRIVILEGE_CTG + +#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CSR_READ32_IN msgrequest */ +#define MC_CMD_CSR_READ32_IN_LEN 12 +/* Address */ +#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4 +#define MC_CMD_CSR_READ32_IN_STEP_OFST 4 +#define MC_CMD_CSR_READ32_IN_STEP_LEN 4 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4 + +/* MC_CMD_CSR_READ32_OUT msgresponse */ +#define MC_CMD_CSR_READ32_OUT_LENMIN 4 +#define MC_CMD_CSR_READ32_OUT_LENMAX 252 +#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) +/* The last dword is the status, not a value read */ +#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_CSR_WRITE32 + * Write 32bit dwords to the indirect memory map. + */ +#define MC_CMD_CSR_WRITE32 0xd +#undef MC_CMD_0xd_PRIVILEGE_CTG + +#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_CSR_WRITE32_IN msgrequest */ +#define MC_CMD_CSR_WRITE32_IN_LENMIN 12 +#define MC_CMD_CSR_WRITE32_IN_LENMAX 252 +#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020 +#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) +#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4) +/* Address */ +#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 +#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253 + +/* MC_CMD_CSR_WRITE32_OUT msgresponse */ +#define MC_CMD_CSR_WRITE32_OUT_LEN 4 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_HP + * These commands are used for HP related features. They are grouped under one + * MCDI command to avoid creating too many MCDI commands. + */ +#define MC_CMD_HP 0x54 +#undef MC_CMD_0x54_PRIVILEGE_CTG + +#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_HP_IN msgrequest */ +#define MC_CMD_HP_IN_LEN 16 +/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at + * the specified address with the specified interval.When address is NULL, + * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current + * state / 2: (debug) Show temperature reported by one of the supported + * sensors. + */ +#define MC_CMD_HP_IN_SUBCMD_OFST 0 +#define MC_CMD_HP_IN_SUBCMD_LEN 4 +/* enum: OCSD (Option Card Sensor Data) sub-command. */ +#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 +/* enum: Last known valid HP sub-command. */ +#define MC_CMD_HP_IN_LAST_SUBCMD 0x0 +/* The address to the array of sensor fields. (Or NULL to use a sub-command.) + */ +#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8 +/* The requested update interval, in seconds. (Or the sub-command if ADDR is + * NULL.) + */ +#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 +#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4 + +/* MC_CMD_HP_OUT msgresponse */ +#define MC_CMD_HP_OUT_LEN 4 +#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4 +/* enum: OCSD stopped for this card. */ +#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 +/* enum: OCSD was successfully started with the address provided. */ +#define MC_CMD_HP_OUT_OCSD_STARTED 0x2 +/* enum: OCSD was already started for this card. */ +#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3 + + +/***********************************/ +/* MC_CMD_STACKINFO + * Get stack information. + */ +#define MC_CMD_STACKINFO 0xf +#undef MC_CMD_0xf_PRIVILEGE_CTG + +#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_STACKINFO_IN msgrequest */ +#define MC_CMD_STACKINFO_IN_LEN 0 + +/* MC_CMD_STACKINFO_OUT msgresponse */ +#define MC_CMD_STACKINFO_OUT_LENMIN 12 +#define MC_CMD_STACKINFO_OUT_LENMAX 252 +#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12) +/* (thread ptr, stack size, free space) for each thread in system */ +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_MDIO_READ + * MDIO register read. + */ +#define MC_CMD_MDIO_READ 0x10 +#undef MC_CMD_0x10_PRIVILEGE_CTG + +#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MDIO_READ_IN msgrequest */ +#define MC_CMD_MDIO_READ_IN_LEN 16 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_READ_IN_BUS_OFST 0 +#define MC_CMD_MDIO_READ_IN_BUS_LEN 4 +/* enum: Internal. */ +#define MC_CMD_MDIO_BUS_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 +/* Port address */ +#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +#define MC_CMD_MDIO_CLAUSE22 0x20 +/* Address */ +#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4 + +/* MC_CMD_MDIO_READ_OUT msgresponse */ +#define MC_CMD_MDIO_READ_OUT_LEN 8 +/* Value */ +#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4 +/* Status the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 +#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4 +/* enum: Good. */ +#define MC_CMD_MDIO_STATUS_GOOD 0x8 + + +/***********************************/ +/* MC_CMD_MDIO_WRITE + * MDIO register write. + */ +#define MC_CMD_MDIO_WRITE 0x11 +#undef MC_CMD_0x11_PRIVILEGE_CTG + +#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MDIO_WRITE_IN msgrequest */ +#define MC_CMD_MDIO_WRITE_IN_LEN 20 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4 +/* enum: Internal. */ +/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ +/* enum: External. */ +/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ +/* Port address */ +#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +/* MC_CMD_MDIO_CLAUSE22 0x20 */ +/* Address */ +#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 +#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4 + +/* MC_CMD_MDIO_WRITE_OUT msgresponse */ +#define MC_CMD_MDIO_WRITE_OUT_LEN 4 +/* Status; the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4 +/* enum: Good. */ +/* MC_CMD_MDIO_STATUS_GOOD 0x8 */ + + +/***********************************/ +/* MC_CMD_DBI_WRITE + * Write DBI register(s). + */ +#define MC_CMD_DBI_WRITE 0x12 +#undef MC_CMD_0x12_PRIVILEGE_CTG + +#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DBI_WRITE_IN msgrequest */ +#define MC_CMD_DBI_WRITE_IN_LENMIN 12 +#define MC_CMD_DBI_WRITE_IN_LENMAX 252 +#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) +#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12) +/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset + * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF. + */ +#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85 + +/* MC_CMD_DBI_WRITE_OUT msgresponse */ +#define MC_CMD_DBI_WRITE_OUT_LEN 0 + +/* MC_CMD_DBIWROP_TYPEDEF structuredef */ +#define MC_CMD_DBIWROP_TYPEDEF_LEN 12 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PORT_READ32 + * Read a 32-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ32 0x14 + +/* MC_CMD_PORT_READ32_IN msgrequest */ +#define MC_CMD_PORT_READ32_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4 + +/* MC_CMD_PORT_READ32_OUT msgresponse */ +#define MC_CMD_PORT_READ32_OUT_LEN 8 +/* Value */ +#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4 +/* Status */ +#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 +#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE32 + * Write a 32-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE32 0x15 + +/* MC_CMD_PORT_WRITE32_IN msgrequest */ +#define MC_CMD_PORT_WRITE32_IN_LEN 8 +/* Address */ +#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4 + +/* MC_CMD_PORT_WRITE32_OUT msgresponse */ +#define MC_CMD_PORT_WRITE32_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_READ128 + * Read a 128-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ128 0x16 + +/* MC_CMD_PORT_READ128_IN msgrequest */ +#define MC_CMD_PORT_READ128_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4 + +/* MC_CMD_PORT_READ128_OUT msgresponse */ +#define MC_CMD_PORT_READ128_OUT_LEN 20 +/* Value */ +#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 +/* Status */ +#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 +#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE128 + * Write a 128-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE128 0x17 + +/* MC_CMD_PORT_WRITE128_IN msgrequest */ +#define MC_CMD_PORT_WRITE128_IN_LEN 20 +/* Address */ +#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4 +/* Value */ +#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 + +/* MC_CMD_PORT_WRITE128_OUT msgresponse */ +#define MC_CMD_PORT_WRITE128_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 +#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4 + +/* MC_CMD_CAPABILITIES structuredef */ +#define MC_CMD_CAPABILITIES_LEN 4 +/* Small buf table. */ +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0 +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1 +/* Turbo mode (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_LBN 1 +#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1 +/* Turbo mode active (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2 +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1 +/* PTP offload. */ +#define MC_CMD_CAPABILITIES_PTP_LBN 3 +#define MC_CMD_CAPABILITIES_PTP_WIDTH 1 +/* AOE mode. */ +#define MC_CMD_CAPABILITIES_AOE_LBN 4 +#define MC_CMD_CAPABILITIES_AOE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5 +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6 +#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1 +#define MC_CMD_CAPABILITIES_RESERVED_LBN 7 +#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25 + + +/***********************************/ +/* MC_CMD_GET_BOARD_CFG + * Returns the MC firmware configuration structure. + */ +#define MC_CMD_GET_BOARD_CFG 0x18 +#undef MC_CMD_0x18_PRIVILEGE_CTG + +#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_BOARD_CFG_IN msgrequest */ +#define MC_CMD_GET_BOARD_CFG_IN_LEN 0 + +/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */ +#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2) +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 +/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4 +/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on + * EF10 and later (use MC_CMD_GET_CAPABILITIES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4 +/* Base MAC address for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 +/* Base MAC address for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 +/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4 +/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use + * MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port0. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4 +/* Increment between addresses in MAC address pool for Siena Port1. Unused on + * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4 +/* Siena only. This field contains a 16-bit value for each of the types of + * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a + * specific board type, but otherwise have no meaning to the MC; they are used + * by the driver to manage selection of appropriate firmware updates. Unused on + * EF10 and later (use MC_CMD_NVRAM_METADATA). + */ +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32 + + +/***********************************/ +/* MC_CMD_DBI_READX + * Read DBI register(s) -- extended functionality + */ +#define MC_CMD_DBI_READX 0x19 +#undef MC_CMD_0x19_PRIVILEGE_CTG + +#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_DBI_READX_IN msgrequest */ +#define MC_CMD_DBI_READX_IN_LENMIN 8 +#define MC_CMD_DBI_READX_IN_LENMAX 248 +#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016 +#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) +#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8) +/* Each Read op consists of an address (offset 0), VF/CS2) */ +#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127 + +/* MC_CMD_DBI_READX_OUT msgresponse */ +#define MC_CMD_DBI_READX_OUT_LENMIN 4 +#define MC_CMD_DBI_READX_OUT_LENMAX 252 +#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4) +/* Value */ +#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 +#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 +#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255 + +/* MC_CMD_DBIRDOP_TYPEDEF structuredef */ +#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_SET_RAND_SEED + * Set the 16byte seed for the MC pseudo-random generator. + */ +#define MC_CMD_SET_RAND_SEED 0x1a +#undef MC_CMD_0x1a_PRIVILEGE_CTG + +#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_RAND_SEED_IN msgrequest */ +#define MC_CMD_SET_RAND_SEED_IN_LEN 16 +/* Seed value. */ +#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 +#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 + +/* MC_CMD_SET_RAND_SEED_OUT msgresponse */ +#define MC_CMD_SET_RAND_SEED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LTSSM_HIST + * Retrieve the history of the LTSSM, if the build supports it. + */ +#define MC_CMD_LTSSM_HIST 0x1b + +/* MC_CMD_LTSSM_HIST_IN msgrequest */ +#define MC_CMD_LTSSM_HIST_IN_LEN 0 + +/* MC_CMD_LTSSM_HIST_OUT msgresponse */ +#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4) +/* variable number of LTSSM values, as bytes. The history is read-to-clear. */ +#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_DRV_ATTACH + * Inform MCPU that this port is managed on the host (i.e. driver active). For + * Huntington, also request the preferred datapath firmware to use if possible + * (it may not be possible for this request to be fulfilled; the driver must + * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which + * features are actually available). The FIRMWARE_ID field is ignored by older + * platforms. + */ +#define MC_CMD_DRV_ATTACH 0x1c +#undef MC_CMD_0x1c_PRIVILEGE_CTG + +#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRV_ATTACH_IN msgrequest */ +#define MC_CMD_DRV_ATTACH_IN_LEN 12 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 +#define MC_CMD_DRV_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1 +#define MC_CMD_DRV_PREBOOT_OFST 0 +#define MC_CMD_DRV_PREBOOT_LBN 1 +#define MC_CMD_DRV_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +#define MC_CMD_FW_FULL_FEATURED 0x0 +/* enum: Prefer to use firmware with fewer features but lower latency */ +#define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +#define MC_CMD_FW_RULES_ENGINE 0x5 +/* enum: Prefer to use firmware with additional DPDK support */ +#define MC_CMD_FW_DPDK 0x6 +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +#define MC_CMD_FW_L3XUDP 0x7 +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe +/* enum: Only this option is allowed for non-admin functions */ +#define MC_CMD_FW_DONT_CARE 0xffffffff + +/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver + * version + */ +#define MC_CMD_DRV_ATTACH_IN_V2_LEN 32 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4 +/* MC_CMD_DRV_ATTACH_OFST 0 */ +/* MC_CMD_DRV_ATTACH_LBN 0 */ +/* MC_CMD_DRV_ATTACH_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1 +/* MC_CMD_DRV_PREBOOT_OFST 0 */ +/* MC_CMD_DRV_PREBOOT_LBN 1 */ +/* MC_CMD_DRV_PREBOOT_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +/* MC_CMD_FW_FULL_FEATURED 0x0 */ +/* enum: Prefer to use firmware with fewer features but lower latency */ +/* MC_CMD_FW_LOW_LATENCY 0x1 */ +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +/* MC_CMD_FW_PACKED_STREAM 0x2 */ +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +/* MC_CMD_FW_HIGH_TX_RATE 0x3 */ +/* enum: Reserved value */ +/* MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */ +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +/* MC_CMD_FW_RULES_ENGINE 0x5 */ +/* enum: Prefer to use firmware with additional DPDK support */ +/* MC_CMD_FW_DPDK 0x6 */ +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +/* MC_CMD_FW_L3XUDP 0x7 */ +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +/* MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */ +/* enum: Only this option is allowed for non-admin functions */ +/* MC_CMD_FW_DONT_CARE 0xffffffff */ +/* Version of the driver to be reported by management protocols (e.g. NC-SI) + * handled by the NIC. This is a zero-terminated ASCII string. + */ +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12 +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20 + +/* MC_CMD_DRV_ATTACH_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_OUT_LEN 4 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4 + +/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 +/* previous or existing state, see the bitmask at NEW_STATE */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4 +/* Flags associated with this function */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4 +/* enum: Labels the lowest-numbered function visible to the OS */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 +/* enum: The function can control the link state of the physical port it is + * bound to. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 +/* enum: The function can perform privileged operations */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 +/* enum: The function does not have an active port associated with it. The port + * refers to the Sorrento external FPGA port. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 +/* enum: If set, indicates that VI spreading is currently enabled. Will always + * indicate the current state, regardless of the value in the WANT_VI_SPREADING + * input. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4 +/* enum: Used during development only. Should no longer be used. */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5 +/* enum: If set, indicates that TX only spreading is enabled. Even-numbered + * TXQs will use one engine, and odd-numbered TXQs will use the other. This + * also has the effect that only even-numbered RXQs will receive traffic. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5 + + +/***********************************/ +/* MC_CMD_SHMUART + * Route UART output to circular buffer in shared memory instead. + */ +#define MC_CMD_SHMUART 0x1f + +/* MC_CMD_SHMUART_IN msgrequest */ +#define MC_CMD_SHMUART_IN_LEN 4 +/* ??? */ +#define MC_CMD_SHMUART_IN_FLAG_OFST 0 +#define MC_CMD_SHMUART_IN_FLAG_LEN 4 + +/* MC_CMD_SHMUART_OUT msgresponse */ +#define MC_CMD_SHMUART_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PORT_RESET + * Generic per-port reset. There is no equivalent for per-board reset. Locks + * required: None; Return code: 0, ETIME. NOTE: This command is deprecated - + * use MC_CMD_ENTITY_RESET instead. + */ +#define MC_CMD_PORT_RESET 0x20 +#undef MC_CMD_0x20_PRIVILEGE_CTG + +#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PORT_RESET_IN msgrequest */ +#define MC_CMD_PORT_RESET_IN_LEN 0 + +/* MC_CMD_PORT_RESET_OUT msgresponse */ +#define MC_CMD_PORT_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ENTITY_RESET + * Generic per-resource reset. There is no equivalent for per-board reset. + * Locks required: None; Return code: 0, ETIME. NOTE: This command is an + * extended version of the deprecated MC_CMD_PORT_RESET with added fields. + */ +#define MC_CMD_ENTITY_RESET 0x20 +/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */ + +/* MC_CMD_ENTITY_RESET_IN msgrequest */ +#define MC_CMD_ENTITY_RESET_IN_LEN 4 +/* Optional flags field. Omitting this will perform a "legacy" reset action + * (TBD). + */ +#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 + +/* MC_CMD_ENTITY_RESET_OUT msgresponse */ +#define MC_CMD_ENTITY_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PCIE_CREDITS + * Read instantaneous and minimum flow control thresholds. + */ +#define MC_CMD_PCIE_CREDITS 0x21 + +/* MC_CMD_PCIE_CREDITS_IN msgrequest */ +#define MC_CMD_PCIE_CREDITS_IN_LEN 8 +/* poll period. 0 is disabled */ +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4 +/* wipe statistics */ +#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 +#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4 + +/* MC_CMD_PCIE_CREDITS_OUT msgresponse */ +#define MC_CMD_PCIE_CREDITS_OUT_LEN 16 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2 + + +/***********************************/ +/* MC_CMD_RXD_MONITOR + * Get histogram of RX queue fill level. + */ +#define MC_CMD_RXD_MONITOR 0x22 + +/* MC_CMD_RXD_MONITOR_IN msgrequest */ +#define MC_CMD_RXD_MONITOR_IN_LEN 12 +#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4 +#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 +#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4 + +/* MC_CMD_RXD_MONITOR_OUT msgresponse */ +#define MC_CMD_RXD_MONITOR_OUT_LEN 80 +#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4 + + +/***********************************/ +/* MC_CMD_PUTS + * Copy the given ASCII string out onto UART and/or out of the network port. + */ +#define MC_CMD_PUTS 0x23 +#undef MC_CMD_0x23_PRIVILEGE_CTG + +#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_PUTS_IN msgrequest */ +#define MC_CMD_PUTS_IN_LENMIN 13 +#define MC_CMD_PUTS_IN_LENMAX 252 +#define MC_CMD_PUTS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) +#define MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1) +#define MC_CMD_PUTS_IN_DEST_OFST 0 +#define MC_CMD_PUTS_IN_DEST_LEN 4 +#define MC_CMD_PUTS_IN_UART_OFST 0 +#define MC_CMD_PUTS_IN_UART_LBN 0 +#define MC_CMD_PUTS_IN_UART_WIDTH 1 +#define MC_CMD_PUTS_IN_PORT_OFST 0 +#define MC_CMD_PUTS_IN_PORT_LBN 1 +#define MC_CMD_PUTS_IN_PORT_WIDTH 1 +#define MC_CMD_PUTS_IN_DHOST_OFST 4 +#define MC_CMD_PUTS_IN_DHOST_LEN 6 +#define MC_CMD_PUTS_IN_STRING_OFST 12 +#define MC_CMD_PUTS_IN_STRING_LEN 1 +#define MC_CMD_PUTS_IN_STRING_MINNUM 1 +#define MC_CMD_PUTS_IN_STRING_MAXNUM 240 +#define MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008 + +/* MC_CMD_PUTS_OUT msgresponse */ +#define MC_CMD_PUTS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PHY_CFG + * Report PHY configuration. This guarantees to succeed even if the PHY is in a + * 'zombie' state. Locks required: None + */ +#define MC_CMD_GET_PHY_CFG 0x24 +#undef MC_CMD_0x24_PRIVILEGE_CTG + +#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_CFG_IN msgrequest */ +#define MC_CMD_GET_PHY_CFG_IN_LEN 0 + +/* MC_CMD_GET_PHY_CFG_OUT msgresponse */ +#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 +/* flags */ +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4 +/* Bitmask of supported capabilities */ +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4 +#define MC_CMD_PHY_CAP_10HDX_OFST 8 +#define MC_CMD_PHY_CAP_10HDX_LBN 1 +#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10FDX_OFST 8 +#define MC_CMD_PHY_CAP_10FDX_LBN 2 +#define MC_CMD_PHY_CAP_10FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100HDX_OFST 8 +#define MC_CMD_PHY_CAP_100HDX_LBN 3 +#define MC_CMD_PHY_CAP_100HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100FDX_OFST 8 +#define MC_CMD_PHY_CAP_100FDX_LBN 4 +#define MC_CMD_PHY_CAP_100FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000HDX_OFST 8 +#define MC_CMD_PHY_CAP_1000HDX_LBN 5 +#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000FDX_OFST 8 +#define MC_CMD_PHY_CAP_1000FDX_LBN 6 +#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10000FDX_OFST 8 +#define MC_CMD_PHY_CAP_10000FDX_LBN 7 +#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_PAUSE_OFST 8 +#define MC_CMD_PHY_CAP_PAUSE_LBN 8 +#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 +#define MC_CMD_PHY_CAP_ASYM_OFST 8 +#define MC_CMD_PHY_CAP_ASYM_LBN 9 +#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 +#define MC_CMD_PHY_CAP_AN_OFST 8 +#define MC_CMD_PHY_CAP_AN_LBN 10 +#define MC_CMD_PHY_CAP_AN_WIDTH 1 +#define MC_CMD_PHY_CAP_40000FDX_OFST 8 +#define MC_CMD_PHY_CAP_40000FDX_LBN 11 +#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_DDM_OFST 8 +#define MC_CMD_PHY_CAP_DDM_LBN 12 +#define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_OFST 8 +#define MC_CMD_PHY_CAP_100000FDX_LBN 13 +#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_OFST 8 +#define MC_CMD_PHY_CAP_25000FDX_LBN 14 +#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_OFST 8 +#define MC_CMD_PHY_CAP_50000FDX_LBN 15 +#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_OFST 8 +#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 +#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_OFST 8 +#define MC_CMD_PHY_CAP_RS_FEC_LBN 18 +#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_OFST 8 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 +#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4 +/* enum: Xaui. */ +#define MC_CMD_MEDIA_XAUI 0x1 +/* enum: CX4. */ +#define MC_CMD_MEDIA_CX4 0x2 +/* enum: KX4. */ +#define MC_CMD_MEDIA_KX4 0x3 +/* enum: XFP Far. */ +#define MC_CMD_MEDIA_XFP 0x4 +/* enum: SFP+. */ +#define MC_CMD_MEDIA_SFP_PLUS 0x5 +/* enum: 10GBaseT. */ +#define MC_CMD_MEDIA_BASE_T 0x6 +/* enum: QSFP+. */ +#define MC_CMD_MEDIA_QSFP_PLUS 0x7 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4 +/* enum: Native clause 22 */ +#define MC_CMD_MMD_CLAUSE22 0x0 +#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ +#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */ +#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ +#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ +#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ +/* enum: Clause22 proxied over clause45 by PHY. */ +#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d +#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ +#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 + + +/***********************************/ +/* MC_CMD_START_BIST + * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST + * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) + */ +#define MC_CMD_START_BIST 0x25 +#undef MC_CMD_0x25_PRIVILEGE_CTG + +#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_START_BIST_IN msgrequest */ +#define MC_CMD_START_BIST_IN_LEN 4 +/* Type of test. */ +#define MC_CMD_START_BIST_IN_TYPE_OFST 0 +#define MC_CMD_START_BIST_IN_TYPE_LEN 4 +/* enum: Run the PHY's short cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 +/* enum: Run the PHY's long cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 +/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */ +#define MC_CMD_BPX_SERDES_BIST 0x3 +/* enum: Run the MC loopback tests. */ +#define MC_CMD_MC_LOOPBACK_BIST 0x4 +/* enum: Run the PHY's standard BIST. */ +#define MC_CMD_PHY_BIST 0x5 +/* enum: Run MC RAM test. */ +#define MC_CMD_MC_MEM_BIST 0x6 +/* enum: Run Port RAM test. */ +#define MC_CMD_PORT_MEM_BIST 0x7 +/* enum: Run register test. */ +#define MC_CMD_REG_BIST 0x8 + +/* MC_CMD_START_BIST_OUT msgresponse */ +#define MC_CMD_START_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_POLL_BIST + * Poll for BIST completion. Returns a single status code, and optionally some + * PHY specific bist output. The driver should only consume the BIST output + * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't + * successfully parse the BIST output, it should still respect the pass/Fail in + * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0, + * EACCES (if PHY_LOCK is not held). + */ +#define MC_CMD_POLL_BIST 0x26 +#undef MC_CMD_0x26_PRIVILEGE_CTG + +#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_POLL_BIST_IN msgrequest */ +#define MC_CMD_POLL_BIST_IN_LEN 0 + +/* MC_CMD_POLL_BIST_OUT msgresponse */ +#define MC_CMD_POLL_BIST_OUT_LEN 8 +/* result */ +#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 +#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 +/* enum: Running. */ +#define MC_CMD_POLL_BIST_RUNNING 0x1 +/* enum: Passed. */ +#define MC_CMD_POLL_BIST_PASSED 0x2 +/* enum: Failed. */ +#define MC_CMD_POLL_BIST_FAILED 0x3 +/* enum: Timed-out. */ +#define MC_CMD_POLL_BIST_TIMEOUT 0x4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4 + +/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4 +/* Status of each channel A */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4 +/* enum: Ok. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 +/* enum: Open. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 +/* enum: Intra-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 +/* enum: Inter-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 +/* enum: Busy. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 +/* Status of each channel B */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel C */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel D */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ + +/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4 +/* enum: Complete. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 +/* enum: Bus switch off I2C write. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 +/* enum: Bus switch off I2C no access IO exp. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 +/* enum: Bus switch off I2C no access module. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 +/* enum: IO exp I2C configure. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 +/* enum: Bus switch I2C no cross talk. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 +/* enum: Module presence. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 +/* enum: Module ID I2C access. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 +/* enum: Module ID sane value. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 + +/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4 +/* enum: Test has completed. */ +#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 +/* enum: RAM test - walk ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1 +/* enum: RAM test - walk zeros. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2 +/* enum: RAM test - walking inversions zeros/ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3 +/* enum: RAM test - walking inversions checkerboard. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4 +/* enum: Register test - set / clear individual bits. */ +#define MC_CMD_POLL_BIST_MEM_REG 0x5 +/* enum: ECC error detected. */ +#define MC_CMD_POLL_BIST_MEM_ECC 0x6 +/* Failure address, only valid if result is POLL_BIST_FAILED */ +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4 +/* Bus or address space to which the failure address corresponds */ +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4 +/* enum: MC MIPS bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 +/* enum: CSR IREG bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1 +/* enum: RX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2 +/* enum: TX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3 +/* enum: TX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4 +/* enum: RX0 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5 +/* enum: TX DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6 +/* enum: RX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7 +/* enum: RX1 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 +/* Pattern written to RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4 +/* Actual value read from RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4 +/* ECC error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4 +/* ECC parity error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4 +/* ECC fatal error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4 + + +/***********************************/ +/* MC_CMD_FLUSH_RX_QUEUES + * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ + * flushes should be initiated via this MCDI operation, rather than via + * directly writing FLUSH_CMD. + * + * The flush is completed (either done/fail) asynchronously (after this command + * returns). The driver must still wait for flush done/failure events as usual. + */ +#define MC_CMD_FLUSH_RX_QUEUES 0x27 + +/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */ +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num)) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255 + +/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */ +#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LOOPBACK_MODES + * Returns a bitmask of loopback modes available at each speed. + */ +#define MC_CMD_GET_LOOPBACK_MODES 0x28 +#undef MC_CMD_0x28_PRIVILEGE_CTG + +#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 + +/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 +/* enum: None. */ +#define MC_CMD_LOOPBACK_NONE 0x0 +/* enum: Data. */ +#define MC_CMD_LOOPBACK_DATA 0x1 +/* enum: GMAC. */ +#define MC_CMD_LOOPBACK_GMAC 0x2 +/* enum: XGMII. */ +#define MC_CMD_LOOPBACK_XGMII 0x3 +/* enum: XGXS. */ +#define MC_CMD_LOOPBACK_XGXS 0x4 +/* enum: XAUI. */ +#define MC_CMD_LOOPBACK_XAUI 0x5 +/* enum: GMII. */ +#define MC_CMD_LOOPBACK_GMII 0x6 +/* enum: SGMII. */ +#define MC_CMD_LOOPBACK_SGMII 0x7 +/* enum: XGBR. */ +#define MC_CMD_LOOPBACK_XGBR 0x8 +/* enum: XFI. */ +#define MC_CMD_LOOPBACK_XFI 0x9 +/* enum: XAUI Far. */ +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +/* enum: GMII Far. */ +#define MC_CMD_LOOPBACK_GMII_FAR 0xb +/* enum: SGMII Far. */ +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +/* enum: XFI Far. */ +#define MC_CMD_LOOPBACK_XFI_FAR 0xd +/* enum: GPhy. */ +#define MC_CMD_LOOPBACK_GPHY 0xe +/* enum: PhyXS. */ +#define MC_CMD_LOOPBACK_PHYXS 0xf +/* enum: PCS. */ +#define MC_CMD_LOOPBACK_PCS 0x10 +/* enum: PMA-PMD. */ +#define MC_CMD_LOOPBACK_PMAPMD 0x11 +/* enum: Cross-Port. */ +#define MC_CMD_LOOPBACK_XPORT 0x12 +/* enum: XGMII-Wireside. */ +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +/* enum: XAUI Wireside. */ +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +/* enum: XAUI Wireside Far. */ +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +/* enum: XAUI Wireside near. */ +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +/* enum: GMII Wireside. */ +#define MC_CMD_LOOPBACK_GMII_WS 0x17 +/* enum: XFI Wireside. */ +#define MC_CMD_LOOPBACK_XFI_WS 0x18 +/* enum: XFI Wireside Far. */ +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +/* enum: PhyXS Wireside. */ +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +/* enum: PMA lanes MAC-Serdes. */ +#define MC_CMD_LOOPBACK_PMA_INT 0x1b +/* enum: KR Serdes Parallel (Encoder). */ +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +/* enum: KR Serdes Serial. */ +#define MC_CMD_LOOPBACK_SD_FAR 0x1d +/* enum: PMA lanes MAC-Serdes Wireside. */ +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +/* enum: KR Serdes Serial Wireside. */ +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +/* enum: Near side of AOE Siena side port */ +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* enum: Medford Wireside datapath loopback */ +#define MC_CMD_LOOPBACK_DATA_WS 0x24 +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ + +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for + * newer NICs with 25G/50G/100G support + */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4 +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60 +/* Enum values, see field(s): */ +/* 100M */ + +/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ +#define AN_TYPE_LEN 4 +#define AN_TYPE_TYPE_OFST 0 +#define AN_TYPE_TYPE_LEN 4 +/* enum: None, AN disabled or not supported */ +#define MC_CMD_AN_NONE 0x0 +/* enum: Clause 28 - BASE-T */ +#define MC_CMD_AN_CLAUSE28 0x1 +/* enum: Clause 37 - BASE-X */ +#define MC_CMD_AN_CLAUSE37 0x2 +/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable + * assemblies. Includes Clause 72/Clause 92 link-training. + */ +#define MC_CMD_AN_CLAUSE73 0x3 +#define AN_TYPE_TYPE_LBN 0 +#define AN_TYPE_TYPE_WIDTH 32 + +/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 + */ +#define FEC_TYPE_LEN 4 +#define FEC_TYPE_TYPE_OFST 0 +#define FEC_TYPE_TYPE_LEN 4 +/* enum: No FEC */ +#define MC_CMD_FEC_NONE 0x0 +/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */ +#define MC_CMD_FEC_BASER 0x1 +/* enum: Clause 91/Clause 108 Reed-Solomon FEC */ +#define MC_CMD_FEC_RS 0x2 +#define FEC_TYPE_TYPE_LBN 0 +#define FEC_TYPE_TYPE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_GET_LINK + * Read the unified MAC/PHY link state. Locks required: None Return code: 0, + * ETIME. + */ +#define MC_CMD_GET_LINK 0x29 +#undef MC_CMD_0x29_PRIVILEGE_CTG + +#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LINK_IN msgrequest */ +#define MC_CMD_GET_LINK_IN_LEN 0 + +/* MC_CMD_GET_LINK_OUT msgresponse */ +#define MC_CMD_GET_LINK_OUT_LEN 28 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 + +/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ +#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4 +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */ +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */ +/* True local device capabilities (taking into account currently used PMD/MDI, + * e.g. plugged-in module). In general, subset of + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST + * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal + * to SUPPORTED_CAP for non-pluggable PMDs. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28 +#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4 +/* Auto-negotiation type used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32 +#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ +/* Forward error correction used on the link */ +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36 +#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_WIDTH 1 + + +/***********************************/ +/* MC_CMD_SET_LINK + * Write the unified MAC/PHY link configuration. Locks required: None. Return + * code: 0, EINVAL, ETIME, EAGAIN + */ +#define MC_CMD_SET_LINK 0x2a +#undef MC_CMD_0x2a_PRIVILEGE_CTG + +#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_LINK_IN msgrequest */ +#define MC_CMD_SET_LINK_IN_LEN 16 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4 + +/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence + * number to ensure this SET_LINK command corresponds to the latest + * MODULECHANGE event. + */ +#define MC_CMD_SET_LINK_IN_V2_LEN 17 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_V2_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_V2_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1 + +/* MC_CMD_SET_LINK_OUT msgresponse */ +#define MC_CMD_SET_LINK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_ID_LED + * Set identification LED state. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_ID_LED 0x2b +#undef MC_CMD_0x2b_PRIVILEGE_CTG + +#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_ID_LED_IN msgrequest */ +#define MC_CMD_SET_ID_LED_IN_LEN 4 +/* Set LED state. */ +#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 +#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4 +#define MC_CMD_LED_OFF 0x0 /* enum */ +#define MC_CMD_LED_ON 0x1 /* enum */ +#define MC_CMD_LED_DEFAULT 0x2 /* enum */ + +/* MC_CMD_SET_ID_LED_OUT msgresponse */ +#define MC_CMD_SET_ID_LED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MAC + * Set MAC configuration. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_MAC 0x2c +#undef MC_CMD_0x2c_PRIVILEGE_CTG + +#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_MAC_IN msgrequest */ +#define MC_CMD_SET_MAC_IN_LEN 28 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +#define MC_CMD_FCNTL_OFF 0x0 +/* enum: Respond to flow control. */ +#define MC_CMD_FCNTL_RESPOND 0x1 +/* enum: Respond to and Issue flow control. */ +#define MC_CMD_FCNTL_BIDIR 0x2 +/* enum: Auto neg flow control. */ +#define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_EXT_IN msgrequest */ +#define MC_CMD_SET_MAC_EXT_IN_LEN 32 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control (eftest builds only). */ +/* MC_CMD_FCNTL_QBB 0x4 */ +/* enum: Issue flow control. */ +/* MC_CMD_FCNTL_GENERATE 0x5 */ +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in + * capabilities then this field is ignored (and all flags are assumed to be + * set). + */ +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1 + +/* MC_CMD_SET_MAC_OUT msgresponse */ +#define MC_CMD_SET_MAC_OUT_LEN 0 + +/* MC_CMD_SET_MAC_V2_OUT msgresponse */ +#define MC_CMD_SET_MAC_V2_OUT_LEN 4 +/* MTU as configured after processing the request. See comment at + * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL + * to 0. + */ +#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 +#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4 + + +/***********************************/ +/* MC_CMD_PHY_STATS + * Get generic PHY statistics. This call returns the statistics for a generic + * PHY in a sparse array (indexed by the enumerate). Each value is represented + * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the + * statistics may be read from the message response. If DMA_ADDR != 0, then the + * statistics are dmad to that (page-aligned location). Locks required: None. + * Returns: 0, ETIME + */ +#define MC_CMD_PHY_STATS 0x2d +#undef MC_CMD_0x2d_PRIVILEGE_CTG + +#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_PHY_STATS_IN msgrequest */ +#define MC_CMD_PHY_STATS_IN_LEN 8 +/* ??? */ +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 + +/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3) +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS +/* enum: OUI. */ +#define MC_CMD_OUI 0x0 +/* enum: PMA-PMD Link Up. */ +#define MC_CMD_PMA_PMD_LINK_UP 0x1 +/* enum: PMA-PMD RX Fault. */ +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +/* enum: PMA-PMD TX Fault. */ +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +/* enum: PMA-PMD Signal */ +#define MC_CMD_PMA_PMD_SIGNAL 0x4 +/* enum: PMA-PMD SNR A. */ +#define MC_CMD_PMA_PMD_SNR_A 0x5 +/* enum: PMA-PMD SNR B. */ +#define MC_CMD_PMA_PMD_SNR_B 0x6 +/* enum: PMA-PMD SNR C. */ +#define MC_CMD_PMA_PMD_SNR_C 0x7 +/* enum: PMA-PMD SNR D. */ +#define MC_CMD_PMA_PMD_SNR_D 0x8 +/* enum: PCS Link Up. */ +#define MC_CMD_PCS_LINK_UP 0x9 +/* enum: PCS RX Fault. */ +#define MC_CMD_PCS_RX_FAULT 0xa +/* enum: PCS TX Fault. */ +#define MC_CMD_PCS_TX_FAULT 0xb +/* enum: PCS BER. */ +#define MC_CMD_PCS_BER 0xc +/* enum: PCS Block Errors. */ +#define MC_CMD_PCS_BLOCK_ERRORS 0xd +/* enum: PhyXS Link Up. */ +#define MC_CMD_PHYXS_LINK_UP 0xe +/* enum: PhyXS RX Fault. */ +#define MC_CMD_PHYXS_RX_FAULT 0xf +/* enum: PhyXS TX Fault. */ +#define MC_CMD_PHYXS_TX_FAULT 0x10 +/* enum: PhyXS Align. */ +#define MC_CMD_PHYXS_ALIGN 0x11 +/* enum: PhyXS Sync. */ +#define MC_CMD_PHYXS_SYNC 0x12 +/* enum: AN link-up. */ +#define MC_CMD_AN_LINK_UP 0x13 +/* enum: AN Complete. */ +#define MC_CMD_AN_COMPLETE 0x14 +/* enum: AN 10GBaseT Status. */ +#define MC_CMD_AN_10GBT_STATUS 0x15 +/* enum: Clause 22 Link-Up. */ +#define MC_CMD_CL22_LINK_UP 0x16 +/* enum: (Last entry) */ +#define MC_CMD_PHY_NSTATS 0x17 + + +/***********************************/ +/* MC_CMD_MAC_STATS + * Get generic MAC statistics. This call returns unified statistics maintained + * by the MC as it switches between the GMAC and XMAC. The MC will write out + * all supported stats. The driver should zero initialise the buffer to + * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is + * performed, and the statistics may be read from the message response. If + * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). + * Locks required: None. The PERIODIC_CLEAR option is not used and now has no + * effect. Returns: 0, ETIME + */ +#define MC_CMD_MAC_STATS 0x2e +#undef MC_CMD_0x2e_PRIVILEGE_CTG + +#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAC_STATS_IN msgrequest */ +#define MC_CMD_MAC_STATS_IN_LEN 20 +/* ??? */ +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_MAC_STATS_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_IN_CMD_LEN 4 +#define MC_CMD_MAC_STATS_IN_DMA_OFST 8 +#define MC_CMD_MAC_STATS_IN_DMA_LBN 0 +#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_OFST 8 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ +#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4 + +/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS +#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ +#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ +#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ +#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ +#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ +#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ +#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ +#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ +#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ +#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ +#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ +#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ +#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ +#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ +#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ +#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ +#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ +#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ +#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ +#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ +#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ +#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ +#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ +#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ +#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ +#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ +#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ +#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ +#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ +#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ +#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ +#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ +#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ +#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ +#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ +#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ +#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ +#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ +#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ +#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ +#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ +#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ +#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ +#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ +#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ +#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ +#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ +#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ +#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ +#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ +#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ +#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ +#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ +#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ +#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ +#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ +#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ +#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ +/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +/* enum: PM discard_bb_overflow counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +/* enum: PM discard_vfifo_full counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +/* enum: RXDP counter: Number of packets dropped due to the queue being + * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 + * with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. + * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 +/* enum: RXDP counter: Number of times the DPCPU waited for an existing + * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ +/* enum: Start of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_START 0x40 +/* enum: End of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_END 0x5f +/* enum: GENERATION_END value, used together with GENERATION_START to verify + * consistency of DMAd data. For legacy firmware / drivers without extended + * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise, + * this value is invalid/ reserved and GENERATION_END is written as the last + * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that + * this is consistent with the legacy behaviour, in the sense that entry 96 is + * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS * + * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details. + */ +#define MC_CMD_MAC_GENERATION_END 0x60 +#define MC_CMD_MAC_NSTATS 0x61 /* enum */ + +/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3) +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 +/* enum: Start of FEC stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_FEC_DMABUF_START 0x61 +/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61 +/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2) + */ +#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62 +/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63 +/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64 +/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65 +/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ +#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66 +/* enum: This includes the space at offset 103 which is the final + * GENERATION_END in a MAC_STATS_V2 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V2 0x68 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3) +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 +/* enum: Start of CTPIO stats buffer space, Medford2 and up */ +#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 +/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the + * target VI + */ +#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68 +/* enum: Number of times a CTPIO send wrote beyond frame end (informational + * only) + */ +#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69 +/* enum: Number of CTPIO failures because the TX doorbell was written before + * the end of the frame data + */ +#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a +/* enum: Number of CTPIO failures because the internal FIFO overflowed */ +#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b +/* enum: Number of CTPIO failures because the host did not deliver data fast + * enough to avoid MAC underflow + */ +#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c +/* enum: Number of CTPIO failures because the host did not deliver all the + * frame data within the timeout + */ +#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d +/* enum: Number of CTPIO failures because the frame data arrived out of order + * or with gaps + */ +#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e +/* enum: Number of CTPIO failures because the host started a new frame before + * completing the previous one + */ +#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f +/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits + * or not 32-bit aligned + */ +#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70 +/* enum: Number of CTPIO fallbacks because another VI on the same port was + * sending a CTPIO frame + */ +#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71 +/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled + */ +#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72 +/* enum: Number of CTPIO fallbacks because length in header was less than 29 + * bytes + */ +#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73 +/* enum: Total number of successful CTPIO sends on this port */ +#define MC_CMD_MAC_CTPIO_SUCCESS 0x74 +/* enum: Total number of CTPIO fallbacks on this port */ +#define MC_CMD_MAC_CTPIO_FALLBACK 0x75 +/* enum: Total number of CTPIO poisoned frames on this port, whether erased or + * not + */ +#define MC_CMD_MAC_CTPIO_POISON 0x76 +/* enum: Total number of CTPIO erased frames on this port */ +#define MC_CMD_MAC_CTPIO_ERASE 0x77 +/* enum: This includes the space at offset 120 which is the final + * GENERATION_END in a MAC_STATS_V3 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V3 0x79 +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ + +/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3) +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4 +/* enum: Start of V4 stats buffer space */ +#define MC_CMD_MAC_V4_DMABUF_START 0x79 +/* enum: RXDP counter: Number of packets truncated because scattering was + * disabled. + */ +#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79 +/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting + * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a +/* enum: RXDP counter: Number of times the RXDP timed out while head of line + * blocking. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b +/* enum: This includes the space at offset 124 which is the final + * GENERATION_END in a MAC_STATS_V4 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V4 0x7d +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */ + + +/***********************************/ +/* MC_CMD_SRIOV + * to be documented + */ +#define MC_CMD_SRIOV 0x30 + +/* MC_CMD_SRIOV_IN msgrequest */ +#define MC_CMD_SRIOV_IN_LEN 12 +#define MC_CMD_SRIOV_IN_ENABLE_OFST 0 +#define MC_CMD_SRIOV_IN_ENABLE_LEN 4 +#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 +#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4 +#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 +#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4 + +/* MC_CMD_SRIOV_OUT msgresponse */ +#define MC_CMD_SRIOV_OUT_LEN 8 +#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 +#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4 + +/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 +/* this is only used for the first record */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 + + +/***********************************/ +/* MC_CMD_MEMCPY + * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data + * embedded directly in the command. + * + * A common pattern is for a client to use generation counts to signal a dma + * update of a datastructure. To facilitate this, this MCDI operation can + * contain multiple requests which are executed in strict order. Requests take + * the form of duplicating the entire MCDI request continuously (including the + * requests record, which is ignored in all but the first structure) + * + * The source data can either come from a DMA from the host, or it can be + * embedded within the request directly, thereby eliminating a DMA read. To + * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and + * ADDR_LO=offset, and inserts the data at %offset from the start of the + * payload. It's the callers responsibility to ensure that the embedded data + * doesn't overlap the records. + * + * Returns: 0, EINVAL (invalid RID) + */ +#define MC_CMD_MEMCPY 0x31 + +/* MC_CMD_MEMCPY_IN msgrequest */ +#define MC_CMD_MEMCPY_IN_LENMIN 32 +#define MC_CMD_MEMCPY_IN_LENMAX 224 +#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992 +#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) +#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32) +/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */ +#define MC_CMD_MEMCPY_IN_RECORD_OFST 0 +#define MC_CMD_MEMCPY_IN_RECORD_LEN 32 +#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31 + +/* MC_CMD_MEMCPY_OUT msgresponse */ +#define MC_CMD_MEMCPY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_SET + * Set a WoL filter. + */ +#define MC_CMD_WOL_FILTER_SET 0x32 +#undef MC_CMD_0x32_PRIVILEGE_CTG + +#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_SET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 +#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ +#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ +/* A type value of 1 is unused. */ +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 +/* enum: Magic */ +#define MC_CMD_WOL_TYPE_MAGIC 0x0 +/* enum: MS Windows Magic */ +#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 +/* enum: IPv4 Syn */ +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +/* enum: IPv6 Syn */ +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +/* enum: Bitmap */ +#define MC_CMD_WOL_TYPE_BITMAP 0x5 +/* enum: Link */ +#define MC_CMD_WOL_TYPE_LINK 0x6 +/* enum: (Above this for future use) */ +#define MC_CMD_WOL_TYPE_MAX 0x7 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 + +/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1 + +/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 + +/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_REMOVE + * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS + */ +#define MC_CMD_WOL_FILTER_REMOVE 0x33 +#undef MC_CMD_0x33_PRIVILEGE_CTG + +#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ +#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4 + +/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_RESET + * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0, + * ENOSYS + */ +#define MC_CMD_WOL_FILTER_RESET 0x34 +#undef MC_CMD_0x34_PRIVILEGE_CTG + +#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ +#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ + +/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MCAST_HASH + * Set the MCAST hash value without otherwise reconfiguring the MAC + */ +#define MC_CMD_SET_MCAST_HASH 0x35 + +/* MC_CMD_SET_MCAST_HASH_IN msgrequest */ +#define MC_CMD_SET_MCAST_HASH_IN_LEN 32 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16 + +/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */ +#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_TYPES + * Return bitfield indicating available types of virtual NVRAM partitions. + * Locks required: none. Returns: 0 + */ +#define MC_CMD_NVRAM_TYPES 0x36 +#undef MC_CMD_0x36_PRIVILEGE_CTG + +#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_TYPES_IN msgrequest */ +#define MC_CMD_NVRAM_TYPES_IN_LEN 0 + +/* MC_CMD_NVRAM_TYPES_OUT msgresponse */ +#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 +/* Bit mask of supported types. */ +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4 +/* enum: Disabled callisto. */ +#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 +/* enum: MC firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 +/* enum: MC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 +/* enum: Static configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 +/* enum: Static configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 +/* enum: Dynamic configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 +/* enum: Dynamic configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 +/* enum: Expansion Rom. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 +/* enum: Expansion Rom Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 +/* enum: Expansion Rom Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 +/* enum: Phy Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa +/* enum: Phy Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb +/* enum: Log. */ +#define MC_CMD_NVRAM_TYPE_LOG 0xc +/* enum: FPGA image. */ +#define MC_CMD_NVRAM_TYPE_FPGA 0xd +/* enum: FPGA backup image */ +#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe +/* enum: FC firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW 0xf +/* enum: FC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10 +/* enum: CPLD image. */ +#define MC_CMD_NVRAM_TYPE_CPLD 0x11 +/* enum: Licensing information. */ +#define MC_CMD_NVRAM_TYPE_LICENSE 0x12 +/* enum: FC Log. */ +#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 +/* enum: Additional flash on FPGA. */ +#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14 + + +/***********************************/ +/* MC_CMD_NVRAM_INFO + * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0, + * EINVAL (bad type). + */ +#define MC_CMD_NVRAM_INFO 0x37 +#undef MC_CMD_0x37_PRIVILEGE_CTG + +#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_INFO_IN msgrequest */ +#define MC_CMD_NVRAM_INFO_IN_LEN 4 +#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_INFO_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_OUT_LEN 24 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CRC_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3 +#define MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_A_B_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4 + +/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4 +/* Writes must be multiples of this size. Added to support the MUM on Sorrento. + */ +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_START + * Start a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if + * PHY_LOCK required and not held). In an adapter bound to a TSA controller, + * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types + * i.e. static config, dynamic config and expansion ROM config. Attempting to + * perform this operation on a restricted partition will return the error + * EPERM. + */ +#define MC_CMD_NVRAM_UPDATE_START 0x38 +#undef MC_CMD_0x38_PRIVILEGE_CTG + +#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request. + * Use NVRAM_UPDATE_START_V2_IN in new code + */ +#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START + * request with additional flags indicating version of command in use. See + * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use + * paired up with NVRAM_UPDATE_FINISH_V2_IN. + */ +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 + +/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */ +#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_READ + * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_READ 0x39 +#undef MC_CMD_0x39_PRIVILEGE_CTG + +#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_READ_IN msgrequest */ +#define MC_CMD_NVRAM_READ_IN_LEN 12 +#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4 + +/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ +#define MC_CMD_NVRAM_READ_IN_V2_LEN 16 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4 +/* Optional control info. If a partition is stored with an A/B versioning + * scheme (i.e. in more than one physical partition in NVRAM) the host can set + * this to control which underlying physical partition is used to read data + * from. This allows it to perform a read-modify-write-verify with the write + * lock continuously held by calling NVRAM_UPDATE_START, reading the old + * contents using MODE=TARGET_CURRENT, overwriting the old partition and then + * verifying by reading with MODE=TARGET_BACKUP. + */ +#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4 +/* enum: Same as omitting MODE: caller sees data in current partition unless it + * holds the write lock in which case it sees data in the partition it is + * updating. + */ +#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0 +/* enum: Read from the current partition of an A/B pair, even if holding the + * write lock. + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1 +/* enum: Read from the non-current (i.e. to be updated) partition of an A/B + * pair + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2 + +/* MC_CMD_NVRAM_READ_OUT msgresponse */ +#define MC_CMD_NVRAM_READ_OUT_LENMIN 1 +#define MC_CMD_NVRAM_READ_OUT_LENMAX 252 +#define MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_NVRAM_WRITE + * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_WRITE 0x3a +#undef MC_CMD_0x3a_PRIVILEGE_CTG + +#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_WRITE_IN msgrequest */ +#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1) +#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008 + +/* MC_CMD_NVRAM_WRITE_OUT msgresponse */ +#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_ERASE + * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_ERASE 0x3b +#undef MC_CMD_0x3b_PRIVILEGE_CTG + +#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_ERASE_IN msgrequest */ +#define MC_CMD_NVRAM_ERASE_IN_LEN 12 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4 + +/* MC_CMD_NVRAM_ERASE_OUT msgresponse */ +#define MC_CMD_NVRAM_ERASE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_FINISH + * Finish a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/ + * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to + * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of + * partition types i.e. static config, dynamic config and expansion ROM config. + * Attempting to perform this operation on a restricted partition will return + * the error EPERM. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c +#undef MC_CMD_0x3c_PRIVILEGE_CTG + +#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH + * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4 + +/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH + * request with additional flags indicating version of NVRAM_UPDATE commands in + * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended + * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1 + +/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH + * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 + +/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse: + * + * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure + * firmware validation where applicable back to the host. + * + * Medford only: For signed firmware images, such as those for medford, the MC + * firmware verifies the signature before marking the firmware image as valid. + * This process takes a few seconds to complete. So is likely to take more than + * the MCDI timeout. Hence signature verification is initiated when + * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the + * MCDI command is run in a background MCDI processing thread. This response + * payload includes the results of the signature verification. Note that the + * per-partition nvram lock in firmware is only released after the verification + * has completed. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4 +/* Result of nvram update completion processing. Result codes that indicate an + * internal build failure and therefore not expected to be seen by customers in + * the field are marked with a prefix 'Internal-error'. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4 +/* enum: Invalid return code; only non-zero values are defined. Defined as + * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT. + */ +#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 +/* enum: Verify succeeded without any errors. */ +#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 +/* enum: CMS format verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 +/* enum: Invalid CMS format in image metadata. */ +#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 +/* enum: Message digest verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 +/* enum: Error in message digest calculated over the reflash-header, payload + * and reflash-trailer. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 +/* enum: Signature verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 +/* enum: There are no valid signatures in the image. */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 +/* enum: Trusted approvers verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 +/* enum: The Trusted approver's list is empty. */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 +/* enum: Signature chain verification failed due to an internal error. */ +#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa +/* enum: The signers of the signatures in the image are not listed in the + * Trusted approver's list. + */ +#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb +/* enum: The image contains a test-signed certificate, but the adapter accepts + * only production signed images. + */ +#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc +/* enum: The image has a lower security level than the current firmware. */ +#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd +/* enum: Internal-error. The signed image is missing the 'contents' section, + * where the 'contents' section holds the actual image payload to be applied. + */ +#define MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe +/* enum: Internal-error. The bundle header is invalid. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf +/* enum: Internal-error. The bundle does not have a valid reflash image layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 +/* enum: Internal-error. The bundle has an inconsistent layout of components or + * incorrect checksum. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 +/* enum: Internal-error. The bundle manifest is inconsistent with components in + * the bundle. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 +/* enum: Internal-error. The number of components in a bundle do not match the + * number of components advertised by the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 +/* enum: Internal-error. The bundle contains too many components for the MC + * firmware to process + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 +/* enum: Internal-error. The bundle manifest has an invalid/inconsistent + * component. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 +/* enum: Internal-error. The hash of a component does not match the hash stored + * in the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 +/* enum: Internal-error. Component hash calculation failed. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 +/* enum: Internal-error. The component does not have a valid reflash image + * layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 +/* enum: The bundle processing code failed to copy a component to its target + * partition. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 +/* enum: The update operation is in-progress. */ +#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a + + +/***********************************/ +/* MC_CMD_REBOOT + * Reboot the MC. + * + * The AFTER_ASSERTION flag is intended to be used when the driver notices an + * assertion failure (at which point it is expected to perform a complete tear + * down and reinitialise), to allow both ports to reset the MC once in an + * atomic fashion. + * + * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, + * which means that they will automatically reboot out of the assertion + * handler, so this is in practise an optional operation. It is still + * recommended that drivers execute this to support custom firmwares with + * REBOOT_ON_ASSERT=0. + * + * Locks required: NONE Returns: Nothing. You get back a response with ERR=1, + * DATALEN=0 + */ +#define MC_CMD_REBOOT 0x3d +#undef MC_CMD_0x3d_PRIVILEGE_CTG + +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_REBOOT_IN msgrequest */ +#define MC_CMD_REBOOT_IN_LEN 4 +#define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_IN_FLAGS_LEN 4 +#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ + +/* MC_CMD_REBOOT_OUT msgresponse */ +#define MC_CMD_REBOOT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SCHEDINFO + * Request scheduler info. Locks required: NONE. Returns: An array of + * (timeslice,maximum overrun), one for each thread, in ascending order of + * thread address. + */ +#define MC_CMD_SCHEDINFO 0x3e +#undef MC_CMD_0x3e_PRIVILEGE_CTG + +#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SCHEDINFO_IN msgrequest */ +#define MC_CMD_SCHEDINFO_IN_LEN 0 + +/* MC_CMD_SCHEDINFO_OUT msgresponse */ +#define MC_CMD_SCHEDINFO_OUT_LENMIN 4 +#define MC_CMD_SCHEDINFO_OUT_LENMAX 252 +#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4) +#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0 +#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4 +#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_REBOOT_MODE + * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot + * mode to the specified value. Returns the old mode. + */ +#define MC_CMD_REBOOT_MODE 0x3f +#undef MC_CMD_0x3f_PRIVILEGE_CTG + +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_REBOOT_MODE_IN msgrequest */ +#define MC_CMD_REBOOT_MODE_IN_LEN 4 +#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4 +/* enum: Normal. */ +#define MC_CMD_REBOOT_MODE_NORMAL 0x0 +/* enum: Power-on Reset. */ +#define MC_CMD_REBOOT_MODE_POR 0x2 +/* enum: Snapper. */ +#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 +/* enum: snapper fake POR */ +#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 +#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0 +#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 +#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 + +/* MC_CMD_REBOOT_MODE_OUT msgresponse */ +#define MC_CMD_REBOOT_MODE_OUT_LEN 4 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4 + + +/***********************************/ +/* MC_CMD_SENSOR_INFO + * Returns information about every available sensor. + * + * Each sensor has a single (16bit) value, and a corresponding state. The + * mapping between value and state is nominally determined by the MC, but may + * be implemented using up to 2 ranges per sensor. + * + * This call returns a mask (32bit) of the sensors that are supported by this + * platform, then an array of sensor information structures, in order of sensor + * type (but without gaps for unimplemented sensors). Each structure defines + * the ranges for the corresponding sensor. An unused range is indicated by + * equal limit values. If one range is used, a value outside that range results + * in STATE_FATAL. If two ranges are used, a value outside the second range + * results in STATE_FATAL while a value outside the first and inside the second + * range results in STATE_WARNING. + * + * Sensor masks and sensor information arrays are organised into pages. For + * backward compatibility, older host software can only use sensors in page 0. + * Bit 32 in the sensor mask was previously unused, and is no reserved for use + * as the next page flag. + * + * If the request does not contain a PAGE value then firmware will only return + * page 0 of sensor information, with bit 31 in the sensor mask cleared. + * + * If the request contains a PAGE value then firmware responds with the sensor + * mask and sensor information array for that page of sensors. In this case bit + * 31 in the mask is set if another page exists. + * + * Locks required: None Returns: 0 + */ +#define MC_CMD_SENSOR_INFO 0x41 +#undef MC_CMD_0x41_PRIVILEGE_CTG + +#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SENSOR_INFO_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_IN_LEN 0 + +/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4 + +/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1 + +/* MC_CMD_SENSOR_INFO_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) +#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 +/* enum: Controller temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +/* enum: Phy common temperature: degC */ +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +/* enum: Controller cooling: bool */ +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +/* enum: Phy 0 temperature: degC */ +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +/* enum: Phy 0 cooling: bool */ +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +/* enum: Phy 1 temperature: degC */ +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +/* enum: Phy 1 cooling: bool */ +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +/* enum: 1.0v power: mV */ +#define MC_CMD_SENSOR_IN_1V0 0x7 +/* enum: 1.2v power: mV */ +#define MC_CMD_SENSOR_IN_1V2 0x8 +/* enum: 1.8v power: mV */ +#define MC_CMD_SENSOR_IN_1V8 0x9 +/* enum: 2.5v power: mV */ +#define MC_CMD_SENSOR_IN_2V5 0xa +/* enum: 3.3v power: mV */ +#define MC_CMD_SENSOR_IN_3V3 0xb +/* enum: 12v power: mV */ +#define MC_CMD_SENSOR_IN_12V0 0xc +/* enum: 1.2v analogue power: mV */ +#define MC_CMD_SENSOR_IN_1V2A 0xd +/* enum: reference voltage: mV */ +#define MC_CMD_SENSOR_IN_VREF 0xe +/* enum: AOE FPGA power: mV */ +#define MC_CMD_SENSOR_OUT_VAOE 0xf +/* enum: AOE FPGA temperature: degC */ +#define MC_CMD_SENSOR_AOE_TEMP 0x10 +/* enum: AOE FPGA PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +/* enum: AOE PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_TEMP 0x12 +/* enum: Fan 0 speed: RPM */ +#define MC_CMD_SENSOR_FAN_0 0x13 +/* enum: Fan 1 speed: RPM */ +#define MC_CMD_SENSOR_FAN_1 0x14 +/* enum: Fan 2 speed: RPM */ +#define MC_CMD_SENSOR_FAN_2 0x15 +/* enum: Fan 3 speed: RPM */ +#define MC_CMD_SENSOR_FAN_3 0x16 +/* enum: Fan 4 speed: RPM */ +#define MC_CMD_SENSOR_FAN_4 0x17 +/* enum: AOE FPGA input power: mV */ +#define MC_CMD_SENSOR_IN_VAOE 0x18 +/* enum: AOE FPGA current: mA */ +#define MC_CMD_SENSOR_OUT_IAOE 0x19 +/* enum: AOE FPGA input current: mA */ +#define MC_CMD_SENSOR_IN_IAOE 0x1a +/* enum: NIC power consumption: W */ +#define MC_CMD_SENSOR_NIC_POWER 0x1b +/* enum: 0.9v power voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9 0x1c +/* enum: 0.9v power current: mA */ +#define MC_CMD_SENSOR_IN_I0V9 0x1d +/* enum: 1.2v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V2 0x1e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +/* enum: 0.9v power voltage (at ADC): mV */ +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +/* enum: Controller temperature 2: degC */ +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +/* enum: Voltage regulator internal temperature: degC */ +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +/* enum: 0.9V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +/* enum: 1.2V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +/* enum: controller internal temperature sensor voltage (internal ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +/* enum: controller internal temperature (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +/* enum: controller internal temperature sensor voltage (external ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +/* enum: controller internal temperature (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +/* enum: ambient temperature: degC */ +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +/* enum: air flow: bool */ +#define MC_CMD_SENSOR_AIRFLOW 0x2a +/* enum: voltage between VSS08D and VSS08D at CSR: mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +/* enum: Hotpoint temperature: degC */ +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* enum: Port 0 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +/* enum: Port 1 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage: mV */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 +/* enum: 0.9v power phase A voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_A 0x31 +/* enum: 0.9v power phase A current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +/* enum: 0.9V voltage regulator phase A temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +/* enum: 0.9v power phase B voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_B 0x34 +/* enum: 0.9v power phase B current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +/* enum: 0.9V voltage regulator phase B temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +/* enum: CCOM AVREG 1v2 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: CCOM RTS temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +/* enum: controller internal temperature sensor voltage on master core + * (internal ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +/* enum: controller internal temperature on master core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +/* enum: controller internal temperature sensor voltage on master core + * (external ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +/* enum: controller internal temperature on master core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +/* enum: controller internal temperature on slave core sensor voltage (internal + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +/* enum: controller internal temperature on slave core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +/* enum: controller internal temperature on slave core sensor voltage (external + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +/* enum: controller internal temperature on slave core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +/* enum: Voltage supplied to the SODIMMs from their power supply: mV */ +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +/* enum: Temperature of SODIMM 0 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +/* enum: Temperature of SODIMM 1 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY0_VCC 0x4c +/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY1_VCC 0x4d +/* enum: Controller die temperature (TDIODE): degC */ +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +/* enum: Board temperature (front): degC */ +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +/* enum: Board temperature (back): degC */ +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 +/* enum: 1.8v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V8 0x51 +/* enum: 2.5v power current: mA */ +#define MC_CMD_SENSOR_IN_I2V5 0x52 +/* enum: 3.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I3V3 0x53 +/* enum: 12v power current: mA */ +#define MC_CMD_SENSOR_IN_I12V0 0x54 +/* enum: 1.3v power: mV */ +#define MC_CMD_SENSOR_IN_1V3 0x55 +/* enum: 1.3v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V3 0x56 +/* enum: Engineering sensor 1 */ +#define MC_CMD_SENSOR_ENGINEERING_1 0x57 +/* enum: Engineering sensor 2 */ +#define MC_CMD_SENSOR_ENGINEERING_2 0x58 +/* enum: Engineering sensor 3 */ +#define MC_CMD_SENSOR_ENGINEERING_3 0x59 +/* enum: Engineering sensor 4 */ +#define MC_CMD_SENSOR_ENGINEERING_4 0x5a +/* enum: Engineering sensor 5 */ +#define MC_CMD_SENSOR_ENGINEERING_5 0x5b +/* enum: Engineering sensor 6 */ +#define MC_CMD_SENSOR_ENGINEERING_6 0x5c +/* enum: Engineering sensor 7 */ +#define MC_CMD_SENSOR_ENGINEERING_7 0x5d +/* enum: Engineering sensor 8 */ +#define MC_CMD_SENSOR_ENGINEERING_8 0x5e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +#define MC_CMD_SENSOR_ENTRY_OFST 4 +#define MC_CMD_SENSOR_ENTRY_LEN 8 +#define MC_CMD_SENSOR_ENTRY_LO_OFST 4 +#define MC_CMD_SENSOR_ENTRY_HI_OFST 8 +#define MC_CMD_SENSOR_ENTRY_MINNUM 0 +#define MC_CMD_SENSOR_ENTRY_MAXNUM 31 +#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 + +/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO_OUT */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1 +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +/* MC_CMD_SENSOR_ENTRY_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_LEN 8 */ +/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */ +/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */ + +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16 + + +/***********************************/ +/* MC_CMD_READ_SENSORS + * Returns the current reading from each sensor. DMAs an array of sensor + * readings, in order of sensor type (but without gaps for unimplemented + * sensors), into host memory. Each array element is a + * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword. + * + * If the request does not contain the LENGTH field then only sensors 0 to 30 + * are reported, to avoid DMA buffer overflow in older host software. If the + * sensor reading require more space than the LENGTH allows, then return + * EINVAL. + * + * The MC will send a SENSOREVT event every time any sensor changes state. The + * driver is responsible for ensuring that it doesn't miss any events. The + * board will function normally if all sensors are in STATE_OK or + * STATE_WARNING. Otherwise the board should not be expected to function. + */ +#define MC_CMD_READ_SENSORS 0x42 +#undef MC_CMD_0x42_PRIVILEGE_CTG + +#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_READ_SENSORS_IN msgrequest */ +#define MC_CMD_READ_SENSORS_IN_LEN 8 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 + +/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4 + +/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1 + +/* MC_CMD_READ_SENSORS_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_OUT_LEN 0 + +/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0 + +/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 +/* enum: Ok. */ +#define MC_CMD_SENSOR_STATE_OK 0x0 +/* enum: Breached warning threshold. */ +#define MC_CMD_SENSOR_STATE_WARNING 0x1 +/* enum: Breached fatal threshold. */ +#define MC_CMD_SENSOR_STATE_FATAL 0x2 +/* enum: Fault with sensor. */ +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +/* enum: Sensor is working but does not currently have a reading. */ +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 +/* enum: Sensor initialisation failed. */ +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8 + + +/***********************************/ +/* MC_CMD_GET_PHY_STATE + * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot + * (e.g. due to missing or corrupted firmware). Locks required: None. Return + * code: 0 + */ +#define MC_CMD_GET_PHY_STATE 0x43 +#undef MC_CMD_0x43_PRIVILEGE_CTG + +#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PHY_STATE_IN msgrequest */ +#define MC_CMD_GET_PHY_STATE_IN_LEN 0 + +/* MC_CMD_GET_PHY_STATE_OUT msgresponse */ +#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4 +/* enum: Ok. */ +#define MC_CMD_PHY_STATE_OK 0x1 +/* enum: Faulty. */ +#define MC_CMD_PHY_STATE_ZOMBIE 0x2 + + +/***********************************/ +/* MC_CMD_SETUP_8021QBB + * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to + * disable 802.Qbb for a given priority. + */ +#define MC_CMD_SETUP_8021QBB 0x44 + +/* MC_CMD_SETUP_8021QBB_IN msgrequest */ +#define MC_CMD_SETUP_8021QBB_IN_LEN 32 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32 + +/* MC_CMD_SETUP_8021QBB_OUT msgresponse */ +#define MC_CMD_SETUP_8021QBB_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_GET + * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS + */ +#define MC_CMD_WOL_FILTER_GET 0x45 +#undef MC_CMD_0x45_PRIVILEGE_CTG + +#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_WOL_FILTER_GET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_GET_IN_LEN 0 + +/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD + * Add a protocol offload to NIC for lights-out state. Locks required: None. + * Returns: 0, ENOSYS + */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 +#undef MC_CMD_0x46_PRIVILEGE_CTG + +#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD + * Remove a protocol offload from NIC for lights-out state. Locks required: + * None. Returns: 0, ENOSYS + */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 +#undef MC_CMD_0x47_PRIVILEGE_CTG + +#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4 + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAC_RESET_RESTORE + * Restore MAC after block reset. Locks required: None. Returns: 0. + */ +#define MC_CMD_MAC_RESET_RESTORE 0x48 + +/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */ +#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 + +/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */ +#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TESTASSERT + * Deliberately trigger an assert-detonation in the firmware for testing + * purposes (i.e. to allow tests that the driver copes gracefully). Locks + * required: None Returns: 0 + */ +#define MC_CMD_TESTASSERT 0x49 +#undef MC_CMD_0x49_PRIVILEGE_CTG + +#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_TESTASSERT_IN msgrequest */ +#define MC_CMD_TESTASSERT_IN_LEN 0 + +/* MC_CMD_TESTASSERT_OUT msgresponse */ +#define MC_CMD_TESTASSERT_OUT_LEN 0 + +/* MC_CMD_TESTASSERT_V2_IN msgrequest */ +#define MC_CMD_TESTASSERT_V2_IN_LEN 4 +/* How to provoke the assertion */ +#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0 +#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4 +/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless + * you're testing firmware, this is what you want. + */ +#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 +/* enum: Assert using assert(0); */ +#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 +/* enum: Deliberately trigger a watchdog */ +#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 +/* enum: Deliberately trigger a trap by loading from an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 +/* enum: Deliberately trigger a trap by storing to an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 +/* enum: Jump to an invalid address */ +#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 + +/* MC_CMD_TESTASSERT_V2_OUT msgresponse */ +#define MC_CMD_TESTASSERT_V2_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WORKAROUND + * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't + * understand the given workaround number - which should not be treated as a + * hard error by client code. This op does not imply any semantics about each + * workaround, that's between the driver and the mcfw on a per-workaround + * basis. Locks required: None. Returns: 0, EINVAL . + */ +#define MC_CMD_WORKAROUND 0x4a +#undef MC_CMD_0x4a_PRIVILEGE_CTG + +#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_WORKAROUND_IN msgrequest */ +#define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ +#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_WORKAROUND_BUG17230 0x1 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_WORKAROUND_BUG35388 0x2 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_WORKAROUND_BUG61265 0x7 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ +#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 +#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4 + +/* MC_CMD_WORKAROUND_OUT msgresponse */ +#define MC_CMD_WORKAROUND_OUT_LEN 0 + +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_PHY_MEDIA_INFO + * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for + * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG + * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the + * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1 + * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80. + * Anything else: currently undefined. Locks required: None. Return code: 0. + */ +#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b +#undef MC_CMD_0x4b_PRIVILEGE_CTG + +#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4 + +/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_NVRAM_TEST + * Test a particular NVRAM partition for valid contents (where "valid" depends + * on the type of partition). + */ +#define MC_CMD_NVRAM_TEST 0x4c +#undef MC_CMD_0x4c_PRIVILEGE_CTG + +#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_TEST_IN msgrequest */ +#define MC_CMD_NVRAM_TEST_IN_LEN 4 +#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_TEST_OUT msgresponse */ +#define MC_CMD_NVRAM_TEST_OUT_LEN 4 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4 +/* enum: Passed. */ +#define MC_CMD_NVRAM_TEST_PASS 0x0 +/* enum: Failed. */ +#define MC_CMD_NVRAM_TEST_FAIL 0x1 +/* enum: Not supported. */ +#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 + + +/***********************************/ +/* MC_CMD_MRSFP_TWEAK + * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds. + * I2C I/O expander bits are always read; if equaliser parameters are supplied, + * they are configured first. Locks required: None. Return code: 0, EINVAL. + */ +#define MC_CMD_MRSFP_TWEAK 0x4d + +/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 +/* 0-6 low->high de-emph. */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4 +/* 0-8 0-8 low->high boost */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4 + +/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 + +/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ +#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 +/* input bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4 +/* output bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4 +/* direction */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4 +/* enum: Out. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 +/* enum: In. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 + + +/***********************************/ +/* MC_CMD_SENSOR_SET_LIMS + * Adjusts the sensor limits. This is a warranty-voiding operation. Returns: + * ENOENT if the sensor specified does not exist, EINVAL if the limits are out + * of range. + */ +#define MC_CMD_SENSOR_SET_LIMS 0x4e +#undef MC_CMD_0x4e_PRIVILEGE_CTG + +#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4 + +/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ +#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RESOURCE_LIMITS + */ +#define MC_CMD_GET_RESOURCE_LIMITS 0x4f + +/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */ +#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0 + +/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4 + + +/***********************************/ +/* MC_CMD_NVRAM_PARTITIONS + * Reads the list of available virtual NVRAM partition types. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_PARTITIONS 0x51 +#undef MC_CMD_0x51_PRIVILEGE_CTG + +#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */ +#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0 + +/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4) +/* total number of partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4 +/* type ID code for each of NUM_PARTITIONS partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254 + + +/***********************************/ +/* MC_CMD_NVRAM_METADATA + * Reads soft metadata for a virtual NVRAM partition type. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_METADATA 0x52 +#undef MC_CMD_0x52_PRIVILEGE_CTG + +#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_NVRAM_METADATA_IN msgrequest */ +#define MC_CMD_NVRAM_METADATA_IN_LEN 4 +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4 + +/* MC_CMD_NVRAM_METADATA_OUT msgresponse */ +#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1) +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 +/* Subtype ID code for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4 +/* 1st component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 +/* 2nd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2 +/* 3rd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2 +/* 4th component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2 +/* Zero-terminated string describing the content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000 + + +/***********************************/ +/* MC_CMD_GET_MAC_ADDRESSES + * Returns the base MAC, count and stride for the requesting function + */ +#define MC_CMD_GET_MAC_ADDRESSES 0x55 +#undef MC_CMD_0x55_PRIVILEGE_CTG + +#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0 + +/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16 +/* Base MAC address */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6 +/* Padding */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 +/* Number of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4 +/* Spacing of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4 + + +/***********************************/ +/* MC_CMD_CLP + * Perform a CLP related operation, see SF-110495-PS for details of CLP + * processing. This command has been extended to accomodate the requirements of + * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC, + * SF-120509-TC and SF-117282-PS. + */ +#define MC_CMD_CLP 0x56 +#undef MC_CMD_0x56_PRIVILEGE_CTG + +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_CLP_IN msgrequest */ +#define MC_CMD_CLP_IN_LEN 4 +/* Sub operation */ +#define MC_CMD_CLP_IN_OP_OFST 0 +#define MC_CMD_CLP_IN_OP_LEN 4 +/* enum: Return to factory default settings */ +#define MC_CMD_CLP_OP_DEFAULT 0x1 +/* enum: Set MAC address */ +#define MC_CMD_CLP_OP_SET_MAC 0x2 +/* enum: Get MAC address */ +#define MC_CMD_CLP_OP_GET_MAC 0x3 +/* enum: Set UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_SET_BOOT 0x4 +/* enum: Get UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_GET_BOOT 0x5 + +/* MC_CMD_CLP_OUT msgresponse */ +#define MC_CMD_CLP_OUT_LEN 0 + +/* MC_CMD_CLP_IN_DEFAULT msgrequest */ +#define MC_CMD_CLP_IN_DEFAULT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ +#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_LEN 12 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1 + +/* MC_CMD_CLP_IN_GET_MAC msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1 + +/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 +/* MAC address assigned to port */ +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* Boot flag */ +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 + +/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 + +/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ + +/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 +/* Boot flag */ +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 + + +/***********************************/ +/* MC_CMD_MUM + * Perform a MUM operation + */ +#define MC_CMD_MUM 0x57 +#undef MC_CMD_0x57_PRIVILEGE_CTG + +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_MUM_IN msgrequest */ +#define MC_CMD_MUM_IN_LEN 4 +#define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_OP_OFST 0 +#define MC_CMD_MUM_IN_OP_LBN 0 +#define MC_CMD_MUM_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to MUM */ +#define MC_CMD_MUM_OP_NULL 0x1 +/* enum: Get MUM version */ +#define MC_CMD_MUM_OP_GET_VERSION 0x2 +/* enum: Issue raw I2C command to MUM */ +#define MC_CMD_MUM_OP_RAW_CMD 0x3 +/* enum: Read from registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_READ 0x4 +/* enum: Write to registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_WRITE 0x5 +/* enum: Control UART logging. */ +#define MC_CMD_MUM_OP_LOG 0x6 +/* enum: Operations on MUM GPIO lines */ +#define MC_CMD_MUM_OP_GPIO 0x7 +/* enum: Get sensor readings from MUM */ +#define MC_CMD_MUM_OP_READ_SENSORS 0x8 +/* enum: Initiate clock programming on the MUM */ +#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 +/* enum: Initiate FPGA load from flash on the MUM */ +#define MC_CMD_MUM_OP_FPGA_LOAD 0xa +/* enum: Request sensor reading from MUM ADC resulting from earlier request via + * MUM ATB + */ +#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb +/* enum: Send commands relating to the QSFP ports via the MUM for PHY + * operations + */ +#define MC_CMD_MUM_OP_QSFP 0xc +/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage + * level) from MUM + */ +#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd + +/* MC_CMD_MUM_IN_NULL msgrequest */ +#define MC_CMD_MUM_IN_NULL_LEN 4 +/* MUM cmd header */ +#define MC_CMD_MUM_IN_CMD_OFST 0 +#define MC_CMD_MUM_IN_CMD_LEN 4 + +/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ +#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_READ_LEN 16 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* ID of (device connected to MUM) to read from registers of */ +#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE 0x1 +/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 +/* 32-bit address to read from */ +#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +#define MC_CMD_MUM_IN_READ_ADDR_LEN 4 +/* Number of words to read. */ +#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 +#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4 + +/* MC_CMD_MUM_IN_WRITE msgrequest */ +#define MC_CMD_MUM_IN_WRITE_LENMIN 16 +#define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* ID of (device connected to MUM) to write to registers of */ +#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +/* MC_CMD_MUM_DEV_HITTITE 0x1 */ +/* 32-bit address to write to */ +#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4 +/* Words to write */ +#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252 + +/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ +#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* MUM I2C cmd code */ +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4 +/* Number of bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4 +/* Number of bytes to read */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4 +/* Bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004 + +/* MC_CMD_MUM_IN_LOG msgrequest */ +#define MC_CMD_MUM_IN_LOG_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_LEN 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ + +/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ +#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */ +/* Enable/disable debug output to UART */ +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4 + +/* MC_CMD_MUM_IN_GPIO msgrequest */ +#define MC_CMD_MUM_IN_GPIO_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ + +/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4 +/* The first 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4 +/* The second 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4 +/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4 +/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ +#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 + +/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* Bit-mask of clocks to be programmed */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4 +#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ +/* Control flags for clock programming */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1 + +/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ +#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +/* Enable/Disable FPGA config from flash */ +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4 + +/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ +#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_IN_QSFP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 +#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ +#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ +#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4 + +/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4 + +/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ +#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_CMD_LEN 4 */ + +/* MC_CMD_MUM_OUT msgresponse */ +#define MC_CMD_MUM_OUT_LEN 0 + +/* MC_CMD_MUM_OUT_NULL msgresponse */ +#define MC_CMD_MUM_OUT_NULL_LEN 0 + +/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ +#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ +#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1) +/* returned data */ +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020 + +/* MC_CMD_MUM_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_READ_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4) +#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 +#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255 + +/* MC_CMD_MUM_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_LOG msgresponse */ +#define MC_CMD_MUM_OUT_LOG_LEN 0 + +/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ +#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 +/* The first 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4 +/* The second 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 +/* The first 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4 +/* The second 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 + +/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 + +/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4 + +/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ +#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 + +/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 + +/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 + +/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1) +/* in bytes */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016 + +/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4 + +/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4 + +/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8) +/* Discrete (soldered) DDR resistor strap info */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 +/* Number of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4 +/* Array of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8 +/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0 +/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1 +/* enum: Total number of SODIMM banks */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */ +/* enum: Values 5-15 are reserved for future usage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4 +/* enum: No module present */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0 +/* enum: Module present supported and powered on */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1 +/* enum: Module present but bad type */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2 +/* enum: Module present but incompatible voltage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3 +/* enum: Module present but unknown SPD */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4 +/* enum: Module present but slot cannot support it */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5 +/* enum: Modules may or may not be present, but cannot establish contact by I2C + */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12 + +/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This + * should match the equivalent structure in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24 +/* A value below this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32 +/* A value below this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32 +/* A value below this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32 +/* A value above this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32 +/* A value above this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32 +/* A value above this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32 + +/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64 +/* The handle used to identify the sensor in calls to + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32 +/* A human-readable name for the sensor (zero terminated string, max 32 bytes) + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256 +/* The type of the sensor device, and by implication the unit of that the + * values will be reported in + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4 +/* enum: A voltage sensor. Unit is mV */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0 +/* enum: A current sensor. Unit is mA */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1 +/* enum: A power sensor. Unit is mW */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2 +/* enum: A temperature sensor. Unit is Celsius */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3 +/* enum: A cooling fan sensor. Unit is RPM */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32 +/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192 + +/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_READING_LEN 12 +/* The handle used to identify the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32 +/* The current value of the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32 +/* The sensor's condition, e.g. good, broken or removed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4 +/* enum: Sensor working normally within limits */ +#define MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0 +/* enum: Warning threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1 +/* enum: Critical threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2 +/* enum: Fatal threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3 +/* enum: Sensor not working */ +#define MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4 +/* enum: Sensor working but no reading available */ +#define MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5 +/* enum: Sensor initialization failed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_LIST + * Return a complete list of handles for sensors currently managed by the MC, + * and a generation count for this version of the sensor table. On systems + * advertising the DYNAMIC_SENSORS capability bit, this replaces the + * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors + * added by the NMC. + * + * Sensor handles are persistent for the lifetime of the sensor and are used to + * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. + * + * The generation count is maintained by the MC, is persistent across reboots + * and will be incremented each time the sensor table is modified. When the + * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated + * containing the new generation count. The driver should compare this against + * the current generation count, and if it is different, call + * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table. + * + * The sensor count is provided to allow a future path to supporting more than + * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e. + * the maximum number that will fit in a single response. As this is a fairly + * large number (253) it is not anticipated that this will be needed in the + * near future, so can currently be ignored. + * + * On Riverhead this command is implemented as a a wrapper for `list` in the + * sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66 +#undef MC_CMD_0x66_PRIVILEGE_CTG + +#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0 + +/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4) +/* Generation count, which will be updated each time a sensor is added to or + * removed from the MC sensor table. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4 +/* Number of sensors managed by the MC. Note that in principle, this can be + * larger than the size of the HANDLES array. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4 +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS + * Get descriptions for a set of sensors, specified as an array of sensor + * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST + * + * Any handles which do not correspond to a sensor currently managed by the MC + * will be dropped from from the response. This may happen when a sensor table + * update is in progress, and effectively means the set of usable sensors is + * the intersection between the sets of sensors known to the driver and the MC. + * + * On Riverhead this command is implemented as a a wrapper for + * `get_descriptions` in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67 +#undef MC_CMD_0x67_PRIVILEGE_CTG + +#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64) +/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS + * Read the state and value for a set of sensors, specified as an array of + * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. + * + * In the case of a broken sensor, then the state of the response's + * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value + * provided should be treated as erroneous. + * + * Any handles which do not correspond to a sensor currently managed by the MC + * will be dropped from from the response. This may happen when a sensor table + * update is in progress, and effectively means the set of usable sensors is + * the intersection between the sets of sensors known to the driver and the MC. + * + * On Riverhead this command is implemented as a a wrapper for `get_readings` + * in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68 +#undef MC_CMD_0x68_PRIVILEGE_CTG + +#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12) +/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_EVENT_CTRL + * Configure which categories of unsolicited events the driver expects to + * receive (Riverhead). + */ +#define MC_CMD_EVENT_CTRL 0x69 +#undef MC_CMD_0x69_PRIVILEGE_CTG + +#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVENT_CTRL_IN msgrequest */ +#define MC_CMD_EVENT_CTRL_IN_LENMIN 0 +#define MC_CMD_EVENT_CTRL_IN_LENMAX 252 +#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020 +#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num)) +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4) +/* Array of event categories for which the driver wishes to receive events. */ +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255 +/* enum: Driver wishes to receive LINKCHANGE events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0 +/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events. + */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1 +/* enum: Driver wishes to receive receive errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2 +/* enum: Driver wishes to receive transmit errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3 +/* enum: Driver wishes to receive firmware alerts. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4 +/* enum: Driver wishes to receive reboot events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5 + +/* MC_CMD_EVENT_CTRL_OUT msgrequest */ +#define MC_CMD_EVENT_CTRL_OUT_LEN 0 + +/* EVB_PORT_ID structuredef */ +#define EVB_PORT_ID_LEN 4 +#define EVB_PORT_ID_PORT_ID_OFST 0 +#define EVB_PORT_ID_PORT_ID_LEN 4 +/* enum: An invalid port handle. */ +#define EVB_PORT_ID_NULL 0x0 +/* enum: The port assigned to this function.. */ +#define EVB_PORT_ID_ASSIGNED 0x1000000 +/* enum: External network port 0 */ +#define EVB_PORT_ID_MAC0 0x2000000 +/* enum: External network port 1 */ +#define EVB_PORT_ID_MAC1 0x2000001 +/* enum: External network port 2 */ +#define EVB_PORT_ID_MAC2 0x2000002 +/* enum: External network port 3 */ +#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_PORT_ID_LBN 0 +#define EVB_PORT_ID_PORT_ID_WIDTH 32 + +/* EVB_VLAN_TAG structuredef */ +#define EVB_VLAN_TAG_LEN 2 +/* The VLAN tag value */ +#define EVB_VLAN_TAG_VLAN_ID_LBN 0 +#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12 +#define EVB_VLAN_TAG_MODE_LBN 12 +#define EVB_VLAN_TAG_MODE_WIDTH 4 +/* enum: Insert the VLAN. */ +#define EVB_VLAN_TAG_INSERT 0x0 +/* enum: Replace the VLAN if already present. */ +#define EVB_VLAN_TAG_REPLACE 0x1 + +/* BUFTBL_ENTRY structuredef */ +#define BUFTBL_ENTRY_LEN 12 +/* the owner ID */ +#define BUFTBL_ENTRY_OID_OFST 0 +#define BUFTBL_ENTRY_OID_LEN 2 +#define BUFTBL_ENTRY_OID_LBN 0 +#define BUFTBL_ENTRY_OID_WIDTH 16 +/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */ +#define BUFTBL_ENTRY_PGSZ_OFST 2 +#define BUFTBL_ENTRY_PGSZ_LEN 2 +#define BUFTBL_ENTRY_PGSZ_LBN 16 +#define BUFTBL_ENTRY_PGSZ_WIDTH 16 +/* the raw 64-bit address field from the SMC, not adjusted for page size */ +#define BUFTBL_ENTRY_RAWADDR_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_LEN 8 +#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8 +#define BUFTBL_ENTRY_RAWADDR_LBN 32 +#define BUFTBL_ENTRY_RAWADDR_WIDTH 64 + +/* NVRAM_PARTITION_TYPE structuredef */ +#define NVRAM_PARTITION_TYPE_LEN 2 +#define NVRAM_PARTITION_TYPE_ID_OFST 0 +#define NVRAM_PARTITION_TYPE_ID_LEN 2 +/* enum: Primary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +/* enum: Secondary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +/* enum: Expansion ROM partition */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +/* enum: Static configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +/* enum: Dynamic configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +/* enum: Expansion ROM configuration data for port 0 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 +/* enum: Expansion ROM configuration data for port 1 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +/* enum: Expansion ROM configuration data for port 2 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +/* enum: Expansion ROM configuration data for port 3 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +/* enum: Non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_LOG 0x700 +/* enum: Non-volatile log output of second core on dual-core device */ +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 +/* enum: Device state dump output partition */ +#define NVRAM_PARTITION_TYPE_DUMP 0x800 +/* enum: Application license key storage partition */ +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Primary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +/* enum: Secondary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +/* enum: FC firmware partition */ +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +/* enum: FC License partition */ +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +/* enum: Non-volatile log output partition for FC */ +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +/* enum: MUM firmware partition */ +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: SUC firmware partition (this is intentionally an alias of + * MUM_FIRMWARE) + */ +#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00 +/* enum: MUM Non-volatile log output partition. */ +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +/* enum: MUM Application table partition. */ +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +/* enum: MUM boot rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +/* enum: MUM production signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +/* enum: MUM user signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +/* enum: MUM fuses and lockbits partition. */ +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +/* enum: UEFI expansion ROM if separate from PXE */ +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +/* enum: Used by the expansion ROM for logging */ +#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000 +/* enum: Used for XIP code of shmbooted images */ +#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100 +/* enum: Spare partition 2 */ +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +/* enum: Manufacturing partition. Used during manufacture to pass information + * between XJTAG and Manftest. + */ +#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300 +/* enum: Spare partition 4 */ +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +/* enum: Spare partition 5 */ +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 +/* enum: Partition for reporting MC status. See mc_flash_layout.h + * medford_mc_status_hdr_t for layout on Medford. + */ +#define NVRAM_PARTITION_TYPE_STATUS 0x1600 +/* enum: Spare partition 13 */ +#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700 +/* enum: Spare partition 14 */ +#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800 +/* enum: Spare partition 15 */ +#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900 +/* enum: Spare partition 16 */ +#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00 +/* enum: Factory defaults for dynamic configuration */ +#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00 +/* enum: Factory defaults for expansion ROM configuration */ +#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00 +/* enum: Field Replaceable Unit inventory information for use on IPMI + * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a + * subset of the information stored in this partition. + */ +#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 +/* enum: Bundle image partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00 +/* enum: Bundle metadata partition that holds additional information related to + * a bundle update in TLV format + */ +#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01 +/* enum: Bundle update non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02 +/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03 +/* enum: Start of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +/* enum: End of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +/* enum: Recovery partition map (provided if real map is missing or corrupt) */ +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +/* enum: Partition map (real map as stored in flash) */ +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +#define NVRAM_PARTITION_TYPE_ID_LBN 0 +#define NVRAM_PARTITION_TYPE_ID_WIDTH 16 + +/* LICENSED_APP_ID structuredef */ +#define LICENSED_APP_ID_LEN 4 +#define LICENSED_APP_ID_ID_OFST 0 +#define LICENSED_APP_ID_ID_LEN 4 +/* enum: OpenOnload */ +#define LICENSED_APP_ID_ONLOAD 0x1 +/* enum: PTP timestamping */ +#define LICENSED_APP_ID_PTP 0x2 +/* enum: SolarCapture Pro */ +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +/* enum: SolarSecure filter engine */ +#define LICENSED_APP_ID_SOLARSECURE 0x8 +/* enum: Performance monitor */ +#define LICENSED_APP_ID_PERF_MONITOR 0x10 +/* enum: SolarCapture Live */ +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +/* enum: Capture SolarSystem */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +/* enum: Network Access Control */ +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 +/* enum: TCP Direct */ +#define LICENSED_APP_ID_TCP_DIRECT 0x100 +/* enum: Low Latency */ +#define LICENSED_APP_ID_LOW_LATENCY 0x200 +/* enum: SolarCapture Tap */ +#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400 +/* enum: Capture SolarSystem 40G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800 +/* enum: Capture SolarSystem 1G */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000 +/* enum: ScaleOut Onload */ +#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000 +/* enum: SCS Network Analytics Dashboard */ +#define LICENSED_APP_ID_DSHBRD 0x4000 +/* enum: SolarCapture Trading Analytics */ +#define LICENSED_APP_ID_SCATRD 0x8000 +#define LICENSED_APP_ID_ID_LBN 0 +#define LICENSED_APP_ID_ID_WIDTH 32 + +/* LICENSED_FEATURES structuredef */ +#define LICENSED_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_FEATURES_MASK_OFST 0 +#define LICENSED_FEATURES_MASK_LEN 8 +#define LICENSED_FEATURES_MASK_LO_OFST 0 +#define LICENSED_FEATURES_MASK_HI_OFST 4 +#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_PIO_OFST 0 +#define LICENSED_FEATURES_PIO_LBN 1 +#define LICENSED_FEATURES_PIO_WIDTH 1 +#define LICENSED_FEATURES_EVQ_TIMER_OFST 0 +#define LICENSED_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_FEATURES_CLOCK_OFST 0 +#define LICENSED_FEATURES_CLOCK_LBN 3 +#define LICENSED_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0 +#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0 +#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_RX_SNIFF_OFST 0 +#define LICENSED_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_TX_SNIFF_OFST 0 +#define LICENSED_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_MASK_LBN 0 +#define LICENSED_FEATURES_MASK_WIDTH 64 + +/* LICENSED_V3_APPS structuredef */ +#define LICENSED_V3_APPS_LEN 8 +/* Bitmask of licensed applications */ +#define LICENSED_V3_APPS_MASK_OFST 0 +#define LICENSED_V3_APPS_MASK_LEN 8 +#define LICENSED_V3_APPS_MASK_LO_OFST 0 +#define LICENSED_V3_APPS_MASK_HI_OFST 4 +#define LICENSED_V3_APPS_ONLOAD_OFST 0 +#define LICENSED_V3_APPS_ONLOAD_LBN 0 +#define LICENSED_V3_APPS_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_PTP_OFST 0 +#define LICENSED_V3_APPS_PTP_LBN 1 +#define LICENSED_V3_APPS_PTP_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1 +#define LICENSED_V3_APPS_SOLARSECURE_OFST 0 +#define LICENSED_V3_APPS_SOLARSECURE_LBN 3 +#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1 +#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0 +#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4 +#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1 +#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0 +#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8 +#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1 +#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0 +#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9 +#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_DSHBRD_OFST 0 +#define LICENSED_V3_APPS_DSHBRD_LBN 14 +#define LICENSED_V3_APPS_DSHBRD_WIDTH 1 +#define LICENSED_V3_APPS_SCATRD_OFST 0 +#define LICENSED_V3_APPS_SCATRD_LBN 15 +#define LICENSED_V3_APPS_SCATRD_WIDTH 1 +#define LICENSED_V3_APPS_MASK_LBN 0 +#define LICENSED_V3_APPS_MASK_WIDTH 64 + +/* LICENSED_V3_FEATURES structuredef */ +#define LICENSED_V3_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_V3_FEATURES_MASK_OFST 0 +#define LICENSED_V3_FEATURES_MASK_LEN 8 +#define LICENSED_V3_FEATURES_MASK_LO_OFST 0 +#define LICENSED_V3_FEATURES_MASK_HI_OFST 4 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_PIO_OFST 0 +#define LICENSED_V3_FEATURES_PIO_LBN 1 +#define LICENSED_V3_FEATURES_PIO_WIDTH 1 +#define LICENSED_V3_FEATURES_EVQ_TIMER_OFST 0 +#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_V3_FEATURES_CLOCK_OFST 0 +#define LICENSED_V3_FEATURES_CLOCK_LBN 3 +#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_OFST 0 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_OFST 0 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_SNIFF_OFST 0 +#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_SNIFF_OFST 0 +#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_OFST 0 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_OFST 0 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_MASK_LBN 0 +#define LICENSED_V3_FEATURES_MASK_WIDTH 64 + +/* TX_TIMESTAMP_EVENT structuredef */ +#define TX_TIMESTAMP_EVENT_LEN 6 +/* lower 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16 +/* Type of TX event, ordinary TX completion, low or high part of TX timestamp + */ +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 +/* enum: This is a TX completion event, not a timestamp */ +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is a TX completion event for a CTPIO transmit. The event format + * is the same as for TX_EV_COMPLETION. + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11 +/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_LO + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12 +/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The + * event format is the same as for TX_EV_TSTAMP_HI + */ +#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13 +/* enum: This is the low part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +/* enum: This is the high part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 +/* upper 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16 + +/* RSS_MODE structuredef */ +#define RSS_MODE_LEN 1 +/* The RSS mode for a particular packet type is a value from 0 - 15 which can + * be considered as 4 bits selecting which fields are included in the hash. (A + * value 0 effectively disables RSS spreading for the packet type.) The YAML + * generation tools require this structure to be a whole number of bytes wide, + * but only 4 bits are relevant. + */ +#define RSS_MODE_HASH_SELECTOR_OFST 0 +#define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_OFST 0 +#define RSS_MODE_HASH_SRC_ADDR_LBN 0 +#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_OFST 0 +#define RSS_MODE_HASH_DST_ADDR_LBN 1 +#define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_OFST 0 +#define RSS_MODE_HASH_SRC_PORT_LBN 2 +#define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_OFST 0 +#define RSS_MODE_HASH_DST_PORT_LBN 3 +#define RSS_MODE_HASH_DST_PORT_WIDTH 1 +#define RSS_MODE_HASH_SELECTOR_LBN 0 +#define RSS_MODE_HASH_SELECTOR_WIDTH 8 + +/* CTPIO_STATS_MAP structuredef */ +#define CTPIO_STATS_MAP_LEN 4 +/* The (function relative) VI number */ +#define CTPIO_STATS_MAP_VI_OFST 0 +#define CTPIO_STATS_MAP_VI_LEN 2 +#define CTPIO_STATS_MAP_VI_LBN 0 +#define CTPIO_STATS_MAP_VI_WIDTH 16 +/* The target bucket for the VI */ +#define CTPIO_STATS_MAP_BUCKET_OFST 2 +#define CTPIO_STATS_MAP_BUCKET_LEN 2 +#define CTPIO_STATS_MAP_BUCKET_LBN 16 +#define CTPIO_STATS_MAP_BUCKET_WIDTH 16 + + +/***********************************/ +/* MC_CMD_READ_REGS + * Get a dump of the MCPU registers + */ +#define MC_CMD_READ_REGS 0x50 +#undef MC_CMD_0x50_PRIVILEGE_CTG + +#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_REGS_IN msgrequest */ +#define MC_CMD_READ_REGS_IN_LEN 0 + +/* MC_CMD_READ_REGS_OUT msgresponse */ +#define MC_CMD_READ_REGS_OUT_LEN 308 +/* Whether the corresponding register entry contains a valid value */ +#define MC_CMD_READ_REGS_OUT_MASK_OFST 0 +#define MC_CMD_READ_REGS_OUT_MASK_LEN 16 +/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr, + * fir, fp) + */ +#define MC_CMD_READ_REGS_OUT_REGS_OFST 16 +#define MC_CMD_READ_REGS_OUT_REGS_LEN 4 +#define MC_CMD_READ_REGS_OUT_REGS_NUM 73 + + +/***********************************/ +/* MC_CMD_INIT_EVQ + * Set up an event queue according to the supplied parameters. The IN arguments + * end with an address for each 4k of host memory required to back the EVQ. + */ +#define MC_CMD_INIT_EVQ 0x80 +#undef MC_CMD_0x80_PRIVILEGE_CTG + +#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_EVQ_IN msgrequest */ +#define MC_CMD_INIT_EVQ_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548 +#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64 + +/* MC_CMD_INIT_EVQ_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_OUT_LEN 4 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4 + +/* MC_CMD_INIT_EVQ_V2_IN msgrequest */ +#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548 +#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4 +/* tbd */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4 +/* enum: All initialisation flags specified by host. */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the lowest latency achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the best throughput achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2 +/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by + * firmware based on licenses and firmware variant. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_LBN 11 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64 + +/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4 +/* Actual configuration applied on the card */ +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 + +/* QUEUE_CRC_MODE structuredef */ +#define QUEUE_CRC_MODE_LEN 1 +#define QUEUE_CRC_MODE_MODE_LBN 0 +#define QUEUE_CRC_MODE_MODE_WIDTH 4 +/* enum: No CRC. */ +#define QUEUE_CRC_MODE_NONE 0x0 +/* enum: CRC Fiber channel over ethernet. */ +#define QUEUE_CRC_MODE_FCOE 0x1 +/* enum: CRC (digest) iSCSI header only. */ +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +/* enum: CRC (digest) iSCSI header and payload. */ +#define QUEUE_CRC_MODE_ISCSI 0x3 +/* enum: CRC Fiber channel over IP over ethernet. */ +#define QUEUE_CRC_MODE_FCOIPOE 0x4 +/* enum: CRC MPA. */ +#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_SPARE_LBN 4 +#define QUEUE_CRC_MODE_SPARE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_INIT_RXQ + * set up a receive queue according to the supplied parameters. The IN + * arguments end with an address for each 4k of host memory required to back + * the RXQ. + */ +#define MC_CMD_INIT_RXQ 0x81 +#undef MC_CMD_0x81_PRIVILEGE_CTG + +#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_RXQ_IN_LENMIN 36 +#define MC_CMD_INIT_RXQ_IN_LENMAX 252 +#define MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020 +#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_UNUSED_OFST 16 +#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10 +#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 + +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4 + +/* MC_CMD_INIT_RXQ_V3_IN msgrequest */ +#define MC_CMD_INIT_RXQ_V3_IN_LEN 560 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 + +/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required + * for systems with a QDMA (currently, Riverhead) + */ +#define MC_CMD_INIT_RXQ_V4_IN_LEN 564 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4 + +/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a + * different RX packet prefix + */ +#define MC_CMD_INIT_RXQ_V5_IN_LEN 568 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4 +/* Prefix id for the RX prefix format to use on packets delivered this queue. + * Zero is always a valid prefix id and means the default prefix format + * documented for the platform. Other prefix ids can be obtained by calling + * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields. + */ +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564 +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4 + +/* MC_CMD_INIT_RXQ_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V4_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V5_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_INIT_TXQ + */ +#define MC_CMD_INIT_TXQ 0x82 +#undef MC_CMD_0x82_PRIVILEGE_CTG + +#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ +#define MC_CMD_INIT_TXQ_IN_LENMIN 36 +#define MC_CMD_INIT_TXQ_IN_LENMAX 252 +#define MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020 +#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 + +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_LBN 15 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + +/* MC_CMD_INIT_TXQ_OUT msgresponse */ +#define MC_CMD_INIT_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_EVQ + * Teardown an EVQ. + * + * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first + * or the operation will fail with EBUSY + */ +#define MC_CMD_FINI_EVQ 0x83 +#undef MC_CMD_0x83_PRIVILEGE_CTG + +#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_EVQ_IN msgrequest */ +#define MC_CMD_FINI_EVQ_IN_LEN 4 +/* Instance of EVQ to destroy. Should be the same instance as that previously + * passed to INIT_EVQ + */ +#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_EVQ_OUT msgresponse */ +#define MC_CMD_FINI_EVQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_RXQ + * Teardown a RXQ. + */ +#define MC_CMD_FINI_RXQ 0x84 +#undef MC_CMD_0x84_PRIVILEGE_CTG + +#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_RXQ_IN msgrequest */ +#define MC_CMD_FINI_RXQ_IN_LEN 4 +/* Instance of RXQ to destroy */ +#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_RXQ_OUT msgresponse */ +#define MC_CMD_FINI_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_TXQ + * Teardown a TXQ. + */ +#define MC_CMD_FINI_TXQ 0x85 +#undef MC_CMD_0x85_PRIVILEGE_CTG + +#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FINI_TXQ_IN msgrequest */ +#define MC_CMD_FINI_TXQ_IN_LEN 4 +/* Instance of TXQ to destroy */ +#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 +#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4 + +/* MC_CMD_FINI_TXQ_OUT msgresponse */ +#define MC_CMD_FINI_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DRIVER_EVENT + * Generate an event on an EVQ belonging to the function issuing the command. + */ +#define MC_CMD_DRIVER_EVENT 0x86 +#undef MC_CMD_0x86_PRIVILEGE_CTG + +#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DRIVER_EVENT_IN msgrequest */ +#define MC_CMD_DRIVER_EVENT_IN_LEN 12 +/* Handle of target EVQ */ +#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4 +/* Bits 0 - 63 of event */ +#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8 + +/* MC_CMD_DRIVER_EVENT_OUT msgresponse */ +#define MC_CMD_DRIVER_EVENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_BUFTBL_CHUNK + * Allocate a set of buffer table entries using the specified owner ID. This + * operation allocates the required buffer table entries (and fails if it + * cannot do so). The buffer table entries will initially be zeroed. + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 +#undef MC_CMD_0x87_PRIVILEGE_CTG + +#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 +/* Owner ID to use */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4 +/* Size of buffer table pages to use, in bytes (note that only a few values are + * legal on any specific hardware). + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4 + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4 +/* Buffer table IDs for use in DMA descriptors. */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES + * Reprogram a set of buffer table entries in the specified chunk. + */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 +#undef MC_CMD_0x88_PRIVILEGE_CTG + +#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 +/* ID */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 +/* Num entries */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 +/* Buffer table entry address */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32 + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FREE_BUFTBL_CHUNK + */ +#define MC_CMD_FREE_BUFTBL_CHUNK 0x89 +#undef MC_CMD_0x89_PRIVILEGE_CTG + +#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4 + +/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FILTER_OP + * Multiplexed MCDI call for filter operations + */ +#define MC_CMD_FILTER_OP 0x8a +#undef MC_CMD_0x8a_PRIVILEGE_CTG + +#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FILTER_OP_IN msgrequest */ +#define MC_CMD_FILTER_OP_IN_LEN 108 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_IN_OP_LEN 4 +/* enum: single-recipient filter insert */ +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +/* enum: single-recipient filter remove */ +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +/* enum: multi-recipient filter subscribe */ +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +/* enum: multi-recipient filter unsubscribe */ +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +/* enum: replace one recipient with another (warning - the filter handle may + * change) + */ +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4 +/* Firmware defined register 1 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional + * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via + * its rte_flow API. This extension is only useful with the sfc_efx driver + * included as part of DPDK, used in conjunction with the dpdk datapath + * firmware variant. + */ +#define MC_CMD_FILTER_OP_V3_IN_LEN 180 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0 +#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12 +#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20 +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24 +#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4 +/* receive mode */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28 +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32 +#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36 +#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_OFST 40 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68 +#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_OFST 72 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16 +/* Set an action for all packets matching this filter. The DPDK driver and dpdk + * f/w variant use their own specific delivery structures, which are documented + * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything + * other than MATCH_ACTION_NONE when the NIC is running another f/w variant + * will cause the filter insertion to fail with ENOTSUP. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4 +/* enum: do nothing extra */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0 +/* enum: Set the match flag in the packet prefix for packets matching the + * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "FLAG" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1 +/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching + * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to + * support the DPDK rte_flow "MARK" action. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2 +/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the + * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX) + * will cause the filter insertion to fail with EINVAL. + */ +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_INFO + * Get information related to the parser-dispatcher subsystem + */ +#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 +#undef MC_CMD_0xe4_PRIVILEGE_CTG + +#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4 +/* enum: read the list of supported RX filter matches */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +/* enum: read properties relating to security rules (Medford-only; for use by + * SolarSecure apps, not directly by drivers. See SF-114946-SW.) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +/* enum: read the list of supported RX filter matches for VXLAN/NVGRE + * encapsulated frames, which follow a different match sequence to normal + * frames (Medford only) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 +/* enum: read the list of supported matches for the encapsulation detection + * rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5 + +/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4 +/* array of supported match types (valid MATCH_FIELDS values for + * MC_CMD_FILTER_OP) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 + +/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* bitfield of filter insertion restrictions */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 + +/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is + * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value + * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the + * supported match types that can be used in the encapsulation detection rules + * inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. + */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) +/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES is returned. */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_LEN 4 +/* array of supported match types (valid MATCH_FLAGS values for + * MC_CMD_VNIC_ENCAP_RULE_ADD) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 + + +/***********************************/ +/* MC_CMD_GET_PORT_ASSIGNMENT + * Get port assignment for current PCI function. + */ +#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 +#undef MC_CMD_0xb8_PRIVILEGE_CTG + +#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0 + +/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_PORT_ASSIGNMENT + * Set port assignment for current PCI function. + */ +#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 +#undef MC_CMD_0xb9_PRIVILEGE_CTG + +#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4 + +/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_VIS + * Allocate VIs for current PCI function. + */ +#define MC_CMD_ALLOC_VIS 0x8b +#undef MC_CMD_0x8b_PRIVILEGE_CTG + +#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOC_VIS_IN msgrequest */ +#define MC_CMD_ALLOC_VIS_IN_LEN 8 +/* The minimum number of VIs that is acceptable */ +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4 +/* The maximum number of VIs that would be useful */ +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4 + +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ +#define MC_CMD_ALLOC_VIS_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4 + +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4 + + +/***********************************/ +/* MC_CMD_FREE_VIS + * Free VIs for current PCI function. Any linked PIO buffers will be unlinked, + * but not freed. + */ +#define MC_CMD_FREE_VIS 0x8c +#undef MC_CMD_0x8c_PRIVILEGE_CTG + +#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FREE_VIS_IN msgrequest */ +#define MC_CMD_FREE_VIS_IN_LEN 0 + +/* MC_CMD_FREE_VIS_OUT msgresponse */ +#define MC_CMD_FREE_VIS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_SRIOV_CFG + * Get SRIOV config for this PF. + */ +#define MC_CMD_GET_SRIOV_CFG 0xba +#undef MC_CMD_0xba_PRIVILEGE_CTG + +#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0 + +/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4 +/* RID offset of each subsequent VF from the previous. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4 + + +/***********************************/ +/* MC_CMD_SET_SRIOV_CFG + * Set SRIOV config for this PF. + */ +#define MC_CMD_SET_SRIOV_CFG 0xbb +#undef MC_CMD_0xbb_PRIVILEGE_CTG + +#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF, or 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4 +/* RID offset of each subsequent VF from the previous, 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4 + +/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_ALLOC_INFO + * Get information about number of VI's and base VI number allocated to this + * function. + */ +#define MC_CMD_GET_VI_ALLOC_INFO 0x8d +#undef MC_CMD_0x8d_PRIVILEGE_CTG + +#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */ +#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 + +/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4 + + +/***********************************/ +/* MC_CMD_DUMP_VI_STATE + * For CmdClient use. Dump pertinent information on a specific absolute VI. + */ +#define MC_CMD_DUMP_VI_STATE 0x8e +#undef MC_CMD_0x8e_PRIVILEGE_CTG + +#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DUMP_VI_STATE_IN msgrequest */ +#define MC_CMD_DUMP_VI_STATE_IN_LEN 4 +/* The VI number to query. */ +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4 + +/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ +#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96 +/* The PF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2 +/* The VF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2 +/* Base of VIs allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2 +/* Count of VIs allocated to the owner function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2 +/* Base interrupt vector allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2 +/* Number of interrupt vectors allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2 +/* Raw evq ptr table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16 +/* Raw evq timer table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76 +/* Reserved, currently 0. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8 + + +/***********************************/ +/* MC_CMD_ALLOC_PIOBUF + * Allocate a push I/O buffer for later use with a tx queue. + */ +#define MC_CMD_ALLOC_PIOBUF 0x8f +#undef MC_CMD_0x8f_PRIVILEGE_CTG + +#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */ +#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0 + +/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */ +#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_FREE_PIOBUF + * Free a push I/O buffer. + */ +#define MC_CMD_FREE_PIOBUF 0x90 +#undef MC_CMD_0x90_PRIVILEGE_CTG + +#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_FREE_PIOBUF_IN msgrequest */ +#define MC_CMD_FREE_PIOBUF_IN_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 + +/* MC_CMD_FREE_PIOBUF_OUT msgresponse */ +#define MC_CMD_FREE_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_CAPABILITIES + * Get device capabilities. + * + * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to + * reference inherent device capabilities as opposed to current NVRAM config. + */ +#define MC_CMD_GET_CAPABILITIES 0xbe +#undef MC_CMD_0xbe_PRIVILEGE_CTG + +#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CAPABILITIES_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 + +/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 + +/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156 + +/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156 +/* The minimum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum number of queues that can be used by an RSS context in exclusive + * mode. In exclusive mode the context has a configurable indirection table and + * a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +/* The maximum number of queues that can be used by an RSS context in even- + * spreading mode. In even-spreading mode the context has no indirection table + * but it does have a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +/* The total number of RSS contexts supported. Note that the number of + * available contexts using indirection tables is also limited by the + * availability of indirection table space allocated from a common pool. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4 +/* The total amount of indirection table space that can be shared between RSS + * contexts. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4 + + +/***********************************/ +/* MC_CMD_V2_EXTN + * Encapsulation for a v2 extended command + */ +#define MC_CMD_V2_EXTN 0x7f + +/* MC_CMD_V2_EXTN_IN msgrequest */ +#define MC_CMD_V2_EXTN_IN_LEN 4 +/* the extended command number */ +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0 +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1 +/* the actual length of the encapsulated command (which is not in the v1 + * header) + */ +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 +#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2 +/* Type of command/response */ +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28 +#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4 +/* enum: MCDI command directed to or response originating from the MC. */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0 +/* enum: MCDI command directed to a TSA controller. MCDI responses of this type + * are not defined. + */ +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 + + +/***********************************/ +/* MC_CMD_LINK_PIOBUF + * Link a push I/O buffer to a TxQ + */ +#define MC_CMD_LINK_PIOBUF 0x92 +#undef MC_CMD_0x92_PRIVILEGE_CTG + +#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_LINK_PIOBUF_IN msgrequest */ +#define MC_CMD_LINK_PIOBUF_IN_LEN 8 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 + +/* MC_CMD_LINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_LINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UNLINK_PIOBUF + * Unlink a push I/O buffer from a TxQ + */ +#define MC_CMD_UNLINK_PIOBUF 0x93 +#undef MC_CMD_0x93_PRIVILEGE_CTG + +#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */ +#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4 + +/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_ALLOC + * allocate and initialise a v-switch. + */ +#define MC_CMD_VSWITCH_ALLOC 0x94 +#undef MC_CMD_0x94_PRIVILEGE_CTG + +#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */ +#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 +/* The port to connect to the v-switch's upstream port. */ +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of v-switch to create. */ +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4 +/* enum: VLAN */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +/* enum: VEB */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* enum: MUX */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +/* enum: Snapper specific; semantics TBD */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 +/* Flags controlling v-port creation */ +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, + * this must be one or greated, and the attached v-ports must have exactly this + * number of tags. For other v-switch types, this must be zero of greater, and + * is an upper limit on the number of VLAN tags for attached v-ports. An error + * will be returned if existing configuration means we can't support attached + * v-ports with this number of tags. + */ +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 + +/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ +#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_FREE + * de-allocate a v-switch. + */ +#define MC_CMD_VSWITCH_FREE 0x95 +#undef MC_CMD_0x95_PRIVILEGE_CTG + +#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_FREE_IN msgrequest */ +#define MC_CMD_VSWITCH_FREE_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VSWITCH_FREE_OUT msgresponse */ +#define MC_CMD_VSWITCH_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_QUERY + * read some config of v-switch. For now this command is an empty placeholder. + * It may be used to check if a v-switch is connected to a given EVB port (if + * not, then the command returns ENOENT). + */ +#define MC_CMD_VSWITCH_QUERY 0x63 +#undef MC_CMD_0x63_PRIVILEGE_CTG + +#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_QUERY_IN msgrequest */ +#define MC_CMD_VSWITCH_QUERY_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ +#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_ALLOC + * allocate a v-port. + */ +#define MC_CMD_VPORT_ALLOC 0x96 +#undef MC_CMD_0x96_PRIVILEGE_CTG + +#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_ALLOC_IN msgrequest */ +#define MC_CMD_VPORT_ALLOC_IN_LEN 20 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of the new v-port. */ +#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4 +/* enum: VLAN (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +/* enum: VEB (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +/* enum: A normal v-port receives packets which match a specified MAC and/or + * VLAN. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +/* enum: An expansion v-port packets traffic which don't match any other + * v-port. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +/* enum: An test v-port receives packets which match any filters installed by + * its downstream components. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +/* Flags controlling v-port creation */ +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16 + +/* MC_CMD_VPORT_ALLOC_OUT msgresponse */ +#define MC_CMD_VPORT_ALLOC_OUT_LEN 4 +/* The handle of the new v-port */ +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_VPORT_FREE + * de-allocate a v-port. + */ +#define MC_CMD_VPORT_FREE 0x97 +#undef MC_CMD_0x97_PRIVILEGE_CTG + +#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_FREE_IN msgrequest */ +#define MC_CMD_VPORT_FREE_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4 + +/* MC_CMD_VPORT_FREE_OUT msgresponse */ +#define MC_CMD_VPORT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_ALLOC + * allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_ALLOC 0x98 +#undef MC_CMD_0x98_PRIVILEGE_CTG + +#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */ +#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30 +/* The port to connect to the v-adaptor's port. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* Flags controlling v-adaptor creation */ +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +/* The number of VLAN tags to strip on receive */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4 +/* The number of VLAN tags to transparently insert/remove. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16 +/* The MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24 +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6 +/* enum: Derive the MAC address from the upstream port */ +#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 + +/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_FREE + * de-allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_FREE 0x99 +#undef MC_CMD_0x99_PRIVILEGE_CTG + +#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_FREE_IN msgrequest */ +#define MC_CMD_VADAPTOR_FREE_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ +#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_SET_MAC + * assign a new MAC address to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_SET_MAC 0x5d +#undef MC_CMD_0x5d_PRIVILEGE_CTG + +#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The new MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4 +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6 + +/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_GET_MAC + * read the MAC address assigned to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_GET_MAC 0x5e +#undef MC_CMD_0x5e_PRIVILEGE_CTG + +#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 +/* The MAC address assigned to this v-adaptor */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6 + + +/***********************************/ +/* MC_CMD_VADAPTOR_QUERY + * read some config of v-adaptor. + */ +#define MC_CMD_VADAPTOR_QUERY 0x61 +#undef MC_CMD_0x61_PRIVILEGE_CTG + +#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */ +#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ +#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4 +/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4 +/* The number of VLAN tags that may still be added */ +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_EVB_PORT_ASSIGN + * assign a port to a PCI function. + */ +#define MC_CMD_EVB_PORT_ASSIGN 0x9a +#undef MC_CMD_0x9a_PRIVILEGE_CTG + +#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 +/* The port to assign. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4 +/* The target function to modify. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16 + +/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */ +#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RDWR_A64_REGIONS + * Assign the 64 bit region addresses. + */ +#define MC_CMD_RDWR_A64_REGIONS 0x9b +#undef MC_CMD_0x9b_PRIVILEGE_CTG + +#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ +#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4 +/* Write enable bits 0-3, set to write, clear to read. */ +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1 + +/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included + * regardless of state of write bits in the request. + */ +#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_ALLOC + * Allocate an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c +#undef MC_CMD_0x9c_PRIVILEGE_CTG + +#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 +/* The handle of the owning upstream port */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 + +/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 +/* The handle of the new Onload stack */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_FREE + * Free an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_FREE 0x9d +#undef MC_CMD_0x9d_PRIVILEGE_CTG + +#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + +/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 +/* The handle of the Onload stack */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4 + +/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_ALLOC + * Allocate an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e +#undef MC_CMD_0x9e_PRIVILEGE_CTG + +#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +/* enum: Allocate a context to spread evenly across an arbitrary number of + * queues. No indirection table space is allocated for this context. (EF100 and + * later) + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EVEN_SPREADING 0x2 +/* Number of queues spanned by this context. For exclusive contexts this must + * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where + * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if + * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in + * the indirection table will be in the range 0 to NUM_QUEUES-1. For even- + * spreading contexts this must be in the range 1 to + * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note + * that specifying NUM_QUEUES = 1 will not perform any spreading but may still + * be useful as a way of obtaining the Toeplitz hash. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4 + +/* MC_CMD_RSS_CONTEXT_ALLOC_V2_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_LEN 16 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_LEN 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_SHARED 0x1 +/* enum: Allocate a context to spread evenly across an arbitrary number of + * queues. No indirection table space is allocated for this context. (EF100 and + * later) + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EVEN_SPREADING 0x2 +/* Number of queues spanned by this context. For exclusive contexts this must + * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where + * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if + * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in + * the indirection table will be in the range 0 to NUM_QUEUES-1. For even- + * spreading contexts this must be in the range 1 to + * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note + * that specifying NUM_QUEUES = 1 will not perform any spreading but may still + * be useful as a way of obtaining the Toeplitz hash. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_LEN 4 +/* Size of indirection table to be allocated to this context from the pool. + * Must be a power of 2. The minimum and maximum table size can be queried + * using MC_CMD_GET_CAPABILITIES_V9. If there is not enough space remaining in + * the common pool to allocate the requested table size, due to allocating + * table space to other RSS contexts, then the command will fail with + * MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_OFST 12 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_LEN 4 + +/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 +/* The handle of the new RSS context. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4 +/* enum: guaranteed invalid RSS context handle value */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_FREE + * Free an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_FREE 0x9f +#undef MC_CMD_0x9f_PRIVILEGE_CTG + +#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_KEY + * Set the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0 +#undef MC_CMD_0xa0_PRIVILEGE_CTG + +#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 + +/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_KEY + * Get the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1 +#undef MC_CMD_0xa1_PRIVILEGE_CTG + +#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_TABLE + * Set the indirection table for an RSS context. This command should only be + * used with indirection tables containing 128 entries, which is the default + * when the RSS context is allocated without specifying a table size. + */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2 +#undef MC_CMD_0xa2_PRIVILEGE_CTG + +#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_TABLE + * Get the indirection table for an RSS context. This command should only be + * used with indirection tables containing 128 entries, which is the default + * when the RSS context is allocated without specifying a table size. + */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3 +#undef MC_CMD_0xa3_PRIVILEGE_CTG + +#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE + * Write a portion of a selectable-size indirection table for an RSS context. + * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the + * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. + */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e +#undef MC_CMD_0x13e_PRIVILEGE_CTG + +#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4) +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* An array of index-value pairs to be written to the table. Structure is + * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY. + */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254 + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0 + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4 +/* The index of the table entry to be written. */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16 +/* The value to write into the table entry. */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_READ_TABLE + * Read a portion of a selectable-size indirection table for an RSS context. + * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the + * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. + */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f +#undef MC_CMD_0x13f_PRIVILEGE_CTG + +#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num)) +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2) +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* An array containing the indices of the entries to be read. */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508 + +/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2) +/* A buffer containing the requested entries read from the table. */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_FLAGS + * Set various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1 +#undef MC_CMD_0xe1_PRIVILEGE_CTG + +#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 +/* Hash control flags. The _EN bits are always supported, but new modes are + * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: + * in this case, the MODE fields may be set to non-zero values, and will take + * effect regardless of the settings of the _EN flags. See the RSS_MODE + * structure for the meaning of the mode bits. Drivers must check the + * capability before trying to set any _MODE fields, as older firmware will + * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In + * the case where all the _MODE flags are zero, the _EN flags take effect, + * providing backward compatibility for existing drivers. (Setting all _MODE + * *and* all _EN flags to zero is valid, to disable RSS spreading for that + * particular packet type.) + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_FLAGS + * Get various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2 +#undef MC_CMD_0xe2_PRIVILEGE_CTG + +#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4 + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 +/* Hash control flags. If all _MODE bits are zero (which will always be true + * for older firmware which does not report the ADDITIONAL_RSS_MODES + * capability), the _EN bits report the state. If any _MODE bits are non-zero + * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES) + * then the _EN bits should be disregarded, although the _MODE flags are + * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS + * context and in the case where the _EN flags were used in the SET. This + * provides backward compatibility: old drivers will not be attempting to + * derive any meaning from the _MODE bits (and can never set them to any value + * not representable by the _EN bits); new drivers can always determine the + * mode by looking only at the _MODE bits; the value returned by a GET can + * always be used for a SET regardless of old/new driver vs. old/new firmware. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_VPORT_ADD_MAC_ADDRESS + * Add a MAC address to a v-port + */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 +#undef MC_CMD_0xa8_PRIVILEGE_CTG + +#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4 +/* MAC address to add */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_DEL_MAC_ADDRESS + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 +#undef MC_CMD_0xa9_PRIVILEGE_CTG + +#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4 +/* MAC address to add */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_GET_MAC_ADDRESSES + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa +#undef MC_CMD_0xaa_PRIVILEGE_CTG + +#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4 + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6) +/* The number of MAC addresses returned */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4 +/* Array of MAC addresses */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169 + + +/***********************************/ +/* MC_CMD_VPORT_RECONFIGURE + * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port + * has already been passed to another function (v-port's user), then that + * function will be reset before applying the changes. + */ +#define MC_CMD_VPORT_RECONFIGURE 0xeb +#undef MC_CMD_0xeb_PRIVILEGE_CTG + +#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */ +#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 +/* The handle of the v-port */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4 +/* Flags requesting what should be changed. */ +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 +/* The number of MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4 +/* MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4 + +/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ +#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_EVB_PORT_QUERY + * read some config of v-port. + */ +#define MC_CMD_EVB_PORT_QUERY 0x62 +#undef MC_CMD_0x62_PRIVILEGE_CTG + +#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */ +#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4 + +/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ +#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4 +/* The number of VLAN tags that may be used on a v-adaptor connected to this + * EVB port. + */ +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_CLOCK + * Return the system and PDCPU clock frequencies. + */ +#define MC_CMD_GET_CLOCK 0xac +#undef MC_CMD_0xac_PRIVILEGE_CTG + +#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CLOCK_IN msgrequest */ +#define MC_CMD_GET_CLOCK_IN_LEN 0 + +/* MC_CMD_GET_CLOCK_OUT msgresponse */ +#define MC_CMD_GET_CLOCK_OUT_LEN 8 +/* System frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4 +/* DPCPU frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4 + + +/***********************************/ +/* MC_CMD_TRIGGER_INTERRUPT + * Trigger an interrupt by prodding the BIU. + */ +#define MC_CMD_TRIGGER_INTERRUPT 0xe3 +#undef MC_CMD_0xe3_PRIVILEGE_CTG + +#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 +/* Interrupt level relative to base for function. */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4 + +/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ +#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SHMBOOT_OP + * Special operations to support (for now) shmboot. + */ +#define MC_CMD_SHMBOOT_OP 0xe6 +#undef MC_CMD_0xe6_PRIVILEGE_CTG + +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SHMBOOT_OP_IN msgrequest */ +#define MC_CMD_SHMBOOT_OP_IN_LEN 4 +/* Identifies the operation to perform */ +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 +/* enum: Copy slave_data section to the slave core. (Greenport only) */ +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 + +/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ +#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_PSU + * Adjusts power supply parameters. This is a warranty-voiding operation. + * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if + * the parameter is out of range. + */ +#define MC_CMD_SET_PSU 0xea +#undef MC_CMD_0xea_PRIVILEGE_CTG + +#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_SET_PSU_IN msgrequest */ +#define MC_CMD_SET_PSU_IN_LEN 12 +#define MC_CMD_SET_PSU_IN_PARAM_OFST 0 +#define MC_CMD_SET_PSU_IN_PARAM_LEN 4 +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_OFST 4 +#define MC_CMD_SET_PSU_IN_RAIL_LEN 4 +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +/* desired value, eg voltage in mV */ +#define MC_CMD_SET_PSU_IN_VALUE_OFST 8 +#define MC_CMD_SET_PSU_IN_VALUE_LEN 4 + +/* MC_CMD_SET_PSU_OUT msgresponse */ +#define MC_CMD_SET_PSU_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_FUNCTION_INFO + * Get function information. PF and VF number. + */ +#define MC_CMD_GET_FUNCTION_INFO 0xec +#undef MC_CMD_0xec_PRIVILEGE_CTG + +#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */ +#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0 + +/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ +#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4 + + +/***********************************/ +/* MC_CMD_ENABLE_OFFLINE_BIST + * Enters offline BIST mode. All queues are torn down, chip enters quiescent + * mode, calling function gets exclusive MCDI ownership. The only way out is + * reboot. + */ +#define MC_CMD_ENABLE_OFFLINE_BIST 0xed +#undef MC_CMD_0xed_PRIVILEGE_CTG + +#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ +#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 + +/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */ +#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_READ_FUSES + * Read data programmed into the device One-Time-Programmable (OTP) Fuses + */ +#define MC_CMD_READ_FUSES 0xf0 +#undef MC_CMD_0xf0_PRIVILEGE_CTG + +#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_READ_FUSES_IN msgrequest */ +#define MC_CMD_READ_FUSES_IN_LEN 8 +/* Offset in OTP to read */ +#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4 +/* Length of data to read in bytes */ +#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 +#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4 + +/* MC_CMD_READ_FUSES_OUT msgresponse */ +#define MC_CMD_READ_FUSES_OUT_LENMIN 4 +#define MC_CMD_READ_FUSES_OUT_LENMAX 252 +#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1) +/* Length of returned OTP data in bytes */ +#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 +/* Returned data */ +#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 +#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 +#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_LICENSING + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - not used for V3 licensing + */ +#define MC_CMD_LICENSING 0xf3 +#undef MC_CMD_0xf3_PRIVILEGE_CTG + +#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_IN msgrequest */ +#define MC_CMD_LICENSING_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_IN_OP_OFST 0 +#define MC_CMD_LICENSING_IN_OP_LEN 4 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses */ +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 + +/* MC_CMD_LICENSING_OUT msgresponse */ +#define MC_CMD_LICENSING_OUT_LEN 28 +/* count of application keys which are valid */ +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4 +/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being blacklisted */ +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4 +/* count of application keys which are invalid due to being for the wrong node + */ +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 + + +/***********************************/ +/* MC_CMD_LICENSING_V3 + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_V3 0xd0 +#undef MC_CMD_0xd0_PRIVILEGE_CTG + +#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_V3_IN msgrequest */ +#define MC_CMD_LICENSING_V3_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_IN_OP_LEN 4 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses Returns EAGAIN if license + * processing (updating) has been started but not yet completed. + */ +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 + +/* MC_CMD_LICENSING_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_V3_OUT_LEN 88 +/* count of keys which are valid */ +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4 +/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4 +/* count of keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4 +/* count of keys which are invalid due to being for the wrong node */ +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +/* bitmask of licensed applications */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24 + + +/***********************************/ +/* MC_CMD_LICENSING_GET_ID_V3 + * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license + * partition - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_GET_ID_V3 0xd1 +#undef MC_CMD_0xd1_PRIVILEGE_CTG + +#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */ +#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0 + +/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1) +/* type of license (eg 3) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 +/* length of the license ID (in bytes) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4 +/* the unique license ID of the adapter */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation + * or a reboot of the MC.) Not used for V3 licensing + */ +#define MC_CMD_GET_LICENSED_APP_STATE 0xf5 +#undef MC_CMD_0xf5_PRIVILEGE_CTG + +#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 +/* application ID to query (LICENSED_APP_ID_xxx) */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4 + +/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2 +#undef MC_CMD_0xd2_PRIVILEGE_CTG + +#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8 +/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit + * mask + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES + * Query the state of an one or more licensed features. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3 +#undef MC_CMD_0xd3_PRIVILEGE_CTG + +#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8 +/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or + * more bits set + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8 +/* states of these features - bit set for licensed, clear for not licensed */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4 + + +/***********************************/ +/* MC_CMD_LICENSED_APP_OP + * Perform an action for an individual licensed application - not used for V3 + * licensing. + */ +#define MC_CMD_LICENSED_APP_OP 0xf6 +#undef MC_CMD_0xf6_PRIVILEGE_CTG + +#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_APP_OP_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4) +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 +/* enum: validate application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* enum: mask application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 +/* arguments specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253 + +/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4) +/* result specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4 +/* validation challenge */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 +/* feature expiry (time_t) */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4 +/* validation response */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4 +/* flag */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4 + +/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_VALIDATE_APP + * Perform validation for an individual licensed application - V3 licensing + * (Medford) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4 +#undef MC_CMD_0xd4_PRIVILEGE_CTG + +#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56 +/* challenge for validation (384 bits) */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48 +/* application ID expressed as a single bit mask */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52 + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116 +/* validation response to challenge in the form of ECDSA signature consisting + * of two 384-bit integers, r and s, in big-endian order. The signature signs a + * SHA-384 digest of a message constructed from the concatenation of the input + * message and the remaining fields of this output message, e.g. challenge[48 + * bytes] ... expiry_time[4 bytes] ... + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96 +/* application expiry time */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4 +/* application expiry units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 +/* enum: expiry units are accounting units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +/* enum: expiry units are calendar days */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +/* base MAC address of the NIC stored in NVRAM (note that this is a constant + * value for a given NIC regardless which function is calling, effectively this + * is PF0 base MAC address) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6 +/* MAC address of v-adaptor associated with the client. If no such v-adapator + * exists, then the field is filled with 0xFF. + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_MASK_FEATURES + * Mask features - V3 licensing (Medford) + */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 +#undef MC_CMD_0xd5_PRIVILEGE_CTG + +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 +/* mask to be applied to features to be changed */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 +/* whether to turn on or turn off the masked features */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 +/* enum: turn the features off */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +/* enum: turn the features back on */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LICENSING_V3_TEMPORARY + * Perform operations to support installation of a single temporary license in + * the adapter, in addition to those found in the licensing partition. See + * SF-116124-SW for an overview of how this could be used. The license is + * stored in MC persistent data and so will survive a MC reboot, but will be + * erased when the adapter is power cycled + */ +#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 +#undef MC_CMD_0xd6_PRIVILEGE_CTG + +#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4 +/* operation code */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4 +/* enum: install a new license, overwriting any existing temporary license. + * This is an asynchronous operation owing to the time taken to validate an + * ECDSA license + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 +/* enum: clear the license immediately rather than waiting for the next power + * cycle + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 +/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET + * operation + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4 +/* ECDSA license and signature */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4 + +/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */ +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4 + +/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12 +/* status code */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 +/* enum: finished validating and installing license */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 +/* enum: license validation and installation in progress */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 +/* enum: licensing error. More specific error messages are not provided to + * avoid exposing details of the licensing system to the client + */ +#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4 +#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8 + + +/***********************************/ +/* MC_CMD_SET_PARSER_DISP_CONFIG + * Change configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 +#undef MC_CMD_0xf9_PRIVILEGE_CTG + +#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4) +/* the type of configuration setting to change */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 +/* enum: Per-TXQ enable for multicast UDP destination lookup for possible + * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +/* enum: Per-v-adaptor enable for suppression of self-transmissions on the + * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single + * boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +/* handle for the entity to update: queue handle, EVB port ID, etc. depending + * on the type of configuration setting being changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 +/* new value: the details depend on the type of configuration setting being + * changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253 + +/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_CONFIG + * Read configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa +#undef MC_CMD_0xfa_PRIVILEGE_CTG + +#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 +/* the type of configuration setting to read */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ +/* handle for the entity to query: queue handle, EVB port ID, etc. depending on + * the type of configuration setting being read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 + +/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4) +/* current value: the details depend on the type of configuration setting being + * read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255 + + +/***********************************/ +/* MC_CMD_GET_PORT_MODES + * Find out about available port modes + */ +#define MC_CMD_GET_PORT_MODES 0xff +#undef MC_CMD_0xff_PRIVILEGE_CTG + +#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_MODES_IN msgrequest */ +#define MC_CMD_GET_PORT_MODES_IN_LEN 0 + +/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 + +/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4 +/* Bitmask of engineering port modes available on the board (indexed by + * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that + * contains all modes implemented in firmware for a particular board. Modes + * listed in MODES are considered production modes and should be exposed in + * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES + * should be considered hidden (not to be exposed in userland tools) and for + * engineering use only. There are no other semantic differences and any mode + * listed in either MODES or ENGINEERING_MODES can be set on the board. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12 +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4 + + +/***********************************/ +/* MC_CMD_OVERRIDE_PORT_MODE + * Override flash config port mode for subsequent MC reboot(s). Override data + * is stored in the presistent data section of DMEM and activated on next MC + * warm reboot. A cold reboot resets the override. It is assumed that a + * sufficient number of PFs are available and that port mapping is valid for + * the new port mode, as the override does not affect PF configuration. + */ +#define MC_CMD_OVERRIDE_PORT_MODE 0x137 +#undef MC_CMD_0x137_PRIVILEGE_CTG + +#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1 +/* New mode (TLV_PORT_MODE_*) to set, if override enabled */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4 + +/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */ +#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 +#undef MC_CMD_0x59_PRIVILEGE_CTG + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MASK + * Read/set privileges of an arbitrary PCIe function + */ +#define MC_CMD_PRIVILEGE_MASK 0x5a +#undef MC_CMD_0x5a_PRIVILEGE_CTG + +#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8 +/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF + * 1,3 = 0x00030001 + */ +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +/* New privilege mask to be set. The mask will only be changed if the MSB is + * set to 1. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC + * adress. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +/* enum: Privilege that allows a Function to change the MAC address configured + * in its associated vAdapter/vPort. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +/* enum: Privilege that allows a Function to install filters that specify VLANs + * that are not in the permit list for the associated vPort. This privilege is + * primarily to support ESX where vPorts are created that restrict traffic to + * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 +/* enum: Privilege for insecure commands. Commands that belong to this group + * are not permitted on secure adapters regardless of the privilege mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 +/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for + * administrator-level operations that are not allowed from the local host once + * an adapter has Bound to a remote ServerLock Controller (see doxbox + * SF-117064-DG for background). + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000 +/* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 + +/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 +/* For an admin function, always all the privileges are reported. */ +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4 + + +/***********************************/ +/* MC_CMD_LINK_STATE_MODE + * Read/set link state mode of a VF + */ +#define MC_CMD_LINK_STATE_MODE 0x5c +#undef MC_CMD_0x5c_PRIVILEGE_CTG + +#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ +#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 +/* The target function to have its link state mode read or set, must be a VF + * e.g. VF 1,3 = 0x00030001 + */ +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 +/* New link state mode to be set */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +/* enum: Use this value to just read the existing setting without modifying it. + */ +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff + +/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4 + + +/***********************************/ +/* MC_CMD_FUSE_DIAGS + * Additional fuse diagnostics + */ +#define MC_CMD_FUSE_DIAGS 0x102 +#undef MC_CMD_0x102_PRIVILEGE_CTG + +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_FUSE_DIAGS_IN msgrequest */ +#define MC_CMD_FUSE_DIAGS_IN_LEN 0 + +/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ +#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 +/* Total number of mismatched bits between pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4 +/* Total number of mismatched bits between pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4 +/* Total number of mismatched bits between pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4 +/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4 +/* Checksum of data after logical OR of pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MODIFY + * Modify the privileges of a set of PCIe functions. Note that this operation + * only effects non-admin functions unless the admin privilege itself is + * included in one of the masks provided. + */ +#define MC_CMD_PRIVILEGE_MODIFY 0x60 +#undef MC_CMD_0x60_PRIVILEGE_CTG + +#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 +/* The groups of functions to have their privilege masks modified. */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +/* For VFS_OF_PF specify the PF, for ONE specify the target function */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 +/* Privileges to be added to the target functions. For privilege definitions + * refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4 +/* Privileges to be removed from the target functions. For privilege + * definitions refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4 + +/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 + + +/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 +/* UDP port (the standard ports are named below but any port may be used) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2 +/* enum: the IANA allocated UDP port for VXLAN */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5 +/* enum: the IANA allocated UDP port for Geneve */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16 +/* tunnel encapsulation protocol (only those named below are supported) */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2 +/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0 +/* enum: This port will be used for Geneve on both IPv4 and IPv6 */ +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16 +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16 + + +/***********************************/ +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS + * Configure UDP ports for tunnel encapsulation hardware acceleration. The + * parser-dispatcher will attempt to parse traffic on these ports as tunnel + * encapsulation PDUs and filter them using the tunnel encapsulation filter + * chain rather than the standard filter chain. Note that this command can + * cause all functions to see a reset. (Available on Medford only.) + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117 +#undef MC_CMD_0x117_PRIVILEGE_CTG + +#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num)) +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4) +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1 +/* The number of entries in the ENTRIES array */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2 +/* Entries defining the UDP port to protocol mapping, each laid out as a + * TUNNEL_ENCAP_UDP_PORT_ENTRY + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16 + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2 +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 + + +/***********************************/ +/* MC_CMD_VNIC_ENCAP_RULE_ADD + * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d +#undef MC_CMD_0x16d_PRIVILEGE_CTG + +#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36 +/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4 +/* Any non-zero bits other than the ones named below or an unsupported + * combination will cause the NIC to return EOPNOTSUPP. In the future more + * flags may be added. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1 +/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order. + * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2 +/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order. + * (Deprecated) + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12 +/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12 +/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the + * case of IPv4, the IP should be in the first 4 bytes and all other bytes + * should be zero. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16 +/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1 +/* Actions that should be applied to packets match the rule. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1 +/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2 +/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4 + +/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4 +/* Handle to inserted rule. Used for removing the rule. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE + * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule. + */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e +#undef MC_CMD_0x16e_PRIVILEGE_CTG + +#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4 +/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4 + +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0 + +/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are + * defined in SF-120734-TC with more information in SF-122717-TC. + */ +#define FUNCTION_PERSONALITY_LEN 4 +#define FUNCTION_PERSONALITY_ID_OFST 0 +#define FUNCTION_PERSONALITY_ID_LEN 4 +/* enum: Function has no assigned personality */ +#define FUNCTION_PERSONALITY_NULL 0x0 +/* enum: Function has an EF100-style function control window and VI windows + * with both EF100 and vDPA doorbells. + */ +#define FUNCTION_PERSONALITY_EF100 0x1 +/* enum: Function has virtio net device configuration registers and doorbells + * for virtio queue pairs. + */ +#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2 +/* enum: Function has virtio block device configuration registers and a + * doorbell for a single virtqueue. + */ +#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3 +/* enum: Function is a Xilinx acceleration device - management function */ +#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4 +/* enum: Function is a Xilinx acceleration device - user function */ +#define FUNCTION_PERSONALITY_ACCEL_USR 0x5 +#define FUNCTION_PERSONALITY_ID_LBN 0 +#define FUNCTION_PERSONALITY_ID_WIDTH 32 + +/* PCIE_FUNCTION structuredef: Structure representing a PCIe function ID + * (interface/PF/VF tuple) + */ +#define PCIE_FUNCTION_LEN 8 +/* PCIe PF function number */ +#define PCIE_FUNCTION_PF_OFST 0 +#define PCIE_FUNCTION_PF_LEN 2 +/* enum: Wildcard value representing any available function (e.g in resource + * allocation requests) + */ +#define PCIE_FUNCTION_PF_ANY 0xfffe +/* enum: Value representing invalid (null) function */ +#define PCIE_FUNCTION_PF_NULL 0xffff +#define PCIE_FUNCTION_PF_LBN 0 +#define PCIE_FUNCTION_PF_WIDTH 16 +/* PCIe VF Function number (PF relative) */ +#define PCIE_FUNCTION_VF_OFST 2 +#define PCIE_FUNCTION_VF_LEN 2 +/* enum: Wildcard value representing any available function (e.g in resource + * allocation requests) + */ +#define PCIE_FUNCTION_VF_ANY 0xfffe +/* enum: Function is a PF (when PF != PF_NULL) or invalid function (when PF == + * PF_NULL) + */ +#define PCIE_FUNCTION_VF_NULL 0xffff +#define PCIE_FUNCTION_VF_LBN 16 +#define PCIE_FUNCTION_VF_WIDTH 16 +/* PCIe interface of the function */ +#define PCIE_FUNCTION_INTF_OFST 4 +#define PCIE_FUNCTION_INTF_LEN 4 +/* enum: Host PCIe interface */ +#define PCIE_FUNCTION_INTF_HOST 0x0 +/* enum: Application Processor interface */ +#define PCIE_FUNCTION_INTF_AP 0x1 +#define PCIE_FUNCTION_INTF_LBN 32 +#define PCIE_FUNCTION_INTF_WIDTH 32 + +#endif /* MCDI_PCOL_H */ -- cgit v1.2.3 From b6b584562cbe7dc357083459d6dd5b171e12cadb Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 4 May 2022 09:29:05 +0300 Subject: mlxsw: spectrum_dcb: Do not warn about priority changes The idea behind the warnings is that the user would get warned in case when more than one priority is configured for a given DSCP value on a netdevice. The warning is currently wrong, because dcb_ieee_getapp_mask() returns the first matching entry, not all of them, and the warning will then claim that some priority is "current", when in fact it is not. But more importantly, the warning is misleading in general. Consider the following commands: # dcb app flush dev swp19 dscp-prio # dcb app add dev swp19 dscp-prio 24:3 # dcb app replace dev swp19 dscp-prio 24:2 The last command will issue the following warning: mlxsw_spectrum3 0000:07:00.0 swp19: Ignoring new priority 2 for DSCP 24 in favor of current value of 3 The reason is that the "replace" command works by first adding the new value, and then removing all old values. This is the only way to make the replacement without causing the traffic to be prioritized to whatever the chip defaults to. The warning is issued in response to adding the new priority, and then no warning is shown when the old priority is removed. The upshot is that the canonical way to change traffic prioritization always produces a warning about ignoring the new priority, but what gets configured is in fact what the user intended. An option to just emit warning every time that the prioritization changes just to make it clear that it happened is obviously unsatisfactory. Therefore, in this patch, remove the warnings. Reported-by: Maksym Yaremchuk Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 5f92b1691360..aff6d4f35cd2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -168,8 +168,6 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev, static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev, struct dcb_app *app) { - int prio; - if (app->priority >= IEEE_8021QAZ_MAX_TCS) { netdev_err(dev, "APP entry with priority value %u is invalid\n", app->priority); @@ -183,17 +181,6 @@ static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev, app->protocol); return -EINVAL; } - - /* Warn about any DSCP APP entries with the same PID. */ - prio = fls(dcb_ieee_getapp_mask(dev, app)); - if (prio--) { - if (prio < app->priority) - netdev_warn(dev, "Choosing priority %d for DSCP %d in favor of previously-active value of %d\n", - app->priority, app->protocol, prio); - else if (prio > app->priority) - netdev_warn(dev, "Ignoring new priority %d for DSCP %d in favor of current value of %d\n", - app->priority, app->protocol, prio); - } break; case IEEE_8021QAZ_APP_SEL_ETHERTYPE: -- cgit v1.2.3 From 0106668cd2f91bf913fb78972840dedfba80a3c3 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 4 May 2022 09:29:06 +0300 Subject: mlxsw: Treat LLDP packets as control When trapping packets for on-CPU processing, Spectrum machines differentiate between control and non-control traps. Traffic trapped through non-control traps is treated as data and kept in shared buffer in pools 0-4. Traffic trapped through control traps is kept in the dedicated control buffer 9. The advantage of marking traps as control is that pressure in the data plane does not prevent the control traffic to be processed. When the LLDP trap was introduced, it was marked as a control trap. But then in commit aed4b5721143 ("mlxsw: spectrum: PTP: Hook into packet receive path"), PTP traps were introduced. Because Ethernet-encapsulated PTP packets look to the Spectrum-1 ASIC as LLDP traffic and are trapped under the LLDP trap, this trap was reconfigured as non-control, in sync with the PTP traps. There is however no requirement that PTP traffic be handled as data. Besides, the usual encapsulation for PTP traffic is UDP, not bare Ethernet, and that is in deployments that even need PTP, which is far less common than LLDP. This is reflected by the default policer, which was not bumped up to the 19Kpps / 24Kpps that is the expected load of a PTP-enabled Spectrum-1 switch. Marking of LLDP trap as non-control was therefore probably misguided. In this patch, change it back to control. Reported-by: Maksym Yaremchuk Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 47b061b99160..ed4d0d3448f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -864,7 +864,7 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { .trap = MLXSW_SP_TRAP_CONTROL(LLDP, LLDP, TRAP), .listeners_arr = { MLXSW_RXL(mlxsw_sp_rx_ptp_listener, LLDP, TRAP_TO_CPU, - false, SP_LLDP, DISCARD), + true, SP_LLDP, DISCARD), }, }, { -- cgit v1.2.3 From d1314096fbe9601ea6606b925ec4563c027936d9 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 4 May 2022 09:29:07 +0300 Subject: mlxsw: spectrum_acl: Do not report activity for multicast routes The driver periodically queries the device for activity of ACL rules in order to report it to tc upon 'FLOW_CLS_STATS'. In Spectrum-2 and later ASICs, multicast routes are programmed as ACL rules, but unlike rules installed by tc, their activity is of no interest. Avoid unnecessary activity query for such rules by always reporting them as inactive. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 31f7f4c3acc3..3b9ba8fa247a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -1827,10 +1827,9 @@ static int mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp, void *rule_priv, bool *activity) { - struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; + *activity = false; - return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry, - activity); + return 0; } static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = { -- cgit v1.2.3 From b8950003849de2a78f7aed9e348233e2b678b755 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 4 May 2022 09:29:08 +0300 Subject: mlxsw: spectrum_switchdev: Only query FDB notifications when necessary The driver periodically queries the device for FDB notifications (e.g., learned, aged-out) in order to update the bridge driver. These notifications can only be generated when bridges are offloaded to the device. Avoid unnecessary queries by starting to query upon installation of the first bridge and stop querying upon removal of the last bridge. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 31 ++++++++++++++-------- 1 file changed, 20 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 3bf12092a8a2..a6d2e806cba9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -207,6 +207,16 @@ static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge, } } +static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp, + bool no_delay) +{ + struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; + unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval; + + mlxsw_core_schedule_dw(&bridge->fdb_notify.dw, + msecs_to_jiffies(interval)); +} + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, struct net_device *br_dev, @@ -245,6 +255,8 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, bridge_device->ops = bridge->bridge_8021d_ops; } INIT_LIST_HEAD(&bridge_device->mids_list); + if (list_empty(&bridge->bridges_list)) + mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false); list_add(&bridge_device->list, &bridge->bridges_list); /* It is possible we already have VXLAN devices enslaved to the bridge. @@ -273,6 +285,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp, bridge_device->dev); list_del(&bridge_device->list); + if (list_empty(&bridge->bridges_list)) + cancel_delayed_work(&bridge->fdb_notify.dw); if (bridge_device->vlan_enabled) bridge->vlan_enabled_exists = false; WARN_ON(!list_empty(&bridge_device->ports_list)); @@ -2886,22 +2900,13 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, } } -static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp, - bool no_delay) -{ - struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; - unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval; - - mlxsw_core_schedule_dw(&bridge->fdb_notify.dw, - msecs_to_jiffies(interval)); -} - #define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10 static void mlxsw_sp_fdb_notify_work(struct work_struct *work) { struct mlxsw_sp_bridge *bridge; struct mlxsw_sp *mlxsw_sp; + bool reschedule = false; char *sfn_pl; int queries; u8 num_rec; @@ -2916,6 +2921,9 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = bridge->mlxsw_sp; rtnl_lock(); + if (list_empty(&bridge->bridges_list)) + goto out; + reschedule = true; queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION; while (queries > 0) { mlxsw_reg_sfn_pack(sfn_pl); @@ -2935,6 +2943,8 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) out: rtnl_unlock(); kfree(sfn_pl); + if (!reschedule) + return; mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries); } @@ -3665,7 +3675,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work); bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; - mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, false); return 0; err_register_switchdev_blocking_notifier: -- cgit v1.2.3 From cff9437605d5c282c1fe00c63ea5f312a7465646 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 4 May 2022 09:29:09 +0300 Subject: mlxsw: spectrum_router: Only query neighbour activity when necessary The driver periodically queries the device for activity of neighbour entries in order to report it to the kernel's neighbour table. Avoid unnecessary activity query when no neighbours are installed. Use an atomic variable to track the number of neighbours, as it is read without any locks. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 6 ++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h | 1 + 2 files changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index dc820d9f2696..9ac4f3c00349 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2360,6 +2360,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) goto err_neigh_entry_insert; mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry); + atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count); list_add(&neigh_entry->rif_list_node, &rif->neigh_list); return neigh_entry; @@ -2374,6 +2375,7 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry) { list_del(&neigh_entry->rif_list_node); + atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count); mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry); mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); mlxsw_sp_neigh_entry_free(neigh_entry); @@ -2571,6 +2573,9 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) char *rauhtd_pl; int err; + if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count)) + return 0; + rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); if (!rauhtd_pl) return -ENOMEM; @@ -2950,6 +2955,7 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_router_neighs_update_work); INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw, mlxsw_sp_router_probe_unresolved_nexthops); + atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0); mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0); mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index fa829658a11b..6e704d807a78 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -56,6 +56,7 @@ struct mlxsw_sp_router { struct { struct delayed_work dw; unsigned long interval; /* ms */ + atomic_t neigh_count; } neighs_update; struct delayed_work nexthop_probe_dw; #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ -- cgit v1.2.3 From 0a448bba50090a6147c144f452f49301025111ec Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 3 May 2022 15:01:46 +0300 Subject: net: mscc: ocelot: use list_add_tail in ocelot_vcap_filter_add_to_block() list_add(..., pos->prev) and list_add_tail(..., pos) are equivalent, use the later form to unify with the case where the list is empty later. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mscc/ocelot_vcap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 1e74bdb215ec..5b6f22bd0e5f 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -1013,7 +1013,7 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, if (filter->prio < tmp->prio) break; } - list_add(&filter->list, pos->prev); + list_add_tail(&filter->list, pos); return 0; } -- cgit v1.2.3 From 3825a0d02748b4eb355b1d3926ff750fd3846178 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 3 May 2022 15:01:47 +0300 Subject: net: mscc: ocelot: add to tail of empty list in ocelot_vcap_filter_add_to_block This makes no functional difference but helps in minimizing the delta for a future change. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mscc/ocelot_vcap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 5b6f22bd0e5f..4d8ce4b425b2 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -1004,7 +1004,7 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, block->count++; if (list_empty(&block->rules)) { - list_add(&filter->list, &block->rules); + list_add_tail(&filter->list, &block->rules); return 0; } -- cgit v1.2.3 From 09fd1e0d14815fd2c80577f4116c8976d8d080f8 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 3 May 2022 15:01:48 +0300 Subject: net: mscc: ocelot: use list_for_each_entry in ocelot_vcap_filter_add_to_block Unify the code paths for adding to an empty list and to a list with elements by keeping a "pos" list_head element that indicates where to insert. Initialize "pos" with the list head itself in case list_for_each_entry() doesn't iterate over any element. Note that list_for_each_safe() isn't needed because no element is removed from the list while iterating. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mscc/ocelot_vcap.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 4d8ce4b425b2..627165a5414a 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -993,8 +993,8 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, struct ocelot_vcap_filter *filter, struct netlink_ext_ack *extack) { + struct list_head *pos = &block->rules; struct ocelot_vcap_filter *tmp; - struct list_head *pos, *n; int ret; ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack); @@ -1003,15 +1003,11 @@ static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, block->count++; - if (list_empty(&block->rules)) { - list_add_tail(&filter->list, &block->rules); - return 0; - } - - list_for_each_safe(pos, n, &block->rules) { - tmp = list_entry(pos, struct ocelot_vcap_filter, list); - if (filter->prio < tmp->prio) + list_for_each_entry(tmp, &block->rules, list) { + if (filter->prio < tmp->prio) { + pos = &tmp->list; break; + } } list_add_tail(&filter->list, pos); -- cgit v1.2.3 From 8e90c499bd6816f6dc4127f071179e7bd190e5aa Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 3 May 2022 15:01:49 +0300 Subject: net: mscc: ocelot: drop port argument from qos_policer_conf_set The "port" argument is used for nothing else except printing on the error path. Print errors on behalf of the policer index, which is less confusing anyway. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mscc/ocelot_police.c | 26 +++++++++++++++----------- drivers/net/ethernet/mscc/ocelot_police.h | 2 +- drivers/net/ethernet/mscc/ocelot_vcap.c | 4 ++-- 3 files changed, 18 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c index a65606bb84a0..7e1f67be38f5 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.c +++ b/drivers/net/ethernet/mscc/ocelot_police.c @@ -20,7 +20,7 @@ /* Default policer order */ #define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */ -int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, +int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix, struct qos_policer_conf *conf) { u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE; @@ -102,26 +102,30 @@ int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, /* Check limits */ if (pir > GENMASK(15, 0)) { - dev_err(ocelot->dev, "Invalid pir for port %d: %u (max %lu)\n", - port, pir, GENMASK(15, 0)); + dev_err(ocelot->dev, + "Invalid pir for policer %u: %u (max %lu)\n", + pol_ix, pir, GENMASK(15, 0)); return -EINVAL; } if (cir > GENMASK(15, 0)) { - dev_err(ocelot->dev, "Invalid cir for port %d: %u (max %lu)\n", - port, cir, GENMASK(15, 0)); + dev_err(ocelot->dev, + "Invalid cir for policer %u: %u (max %lu)\n", + pol_ix, cir, GENMASK(15, 0)); return -EINVAL; } if (pbs > pbs_max) { - dev_err(ocelot->dev, "Invalid pbs for port %d: %u (max %u)\n", - port, pbs, pbs_max); + dev_err(ocelot->dev, + "Invalid pbs for policer %u: %u (max %u)\n", + pol_ix, pbs, pbs_max); return -EINVAL; } if (cbs > cbs_max) { - dev_err(ocelot->dev, "Invalid cbs for port %d: %u (max %u)\n", - port, cbs, cbs_max); + dev_err(ocelot->dev, + "Invalid cbs for policer %u: %u (max %u)\n", + pol_ix, cbs, cbs_max); return -EINVAL; } @@ -211,7 +215,7 @@ int ocelot_port_policer_add(struct ocelot *ocelot, int port, dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n", __func__, port, pp.pir, pp.pbs); - err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp); + err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp); if (err) return err; @@ -235,7 +239,7 @@ int ocelot_port_policer_del(struct ocelot *ocelot, int port) pp.mode = MSCC_QOS_RATE_MODE_DISABLED; - err = qos_policer_conf_set(ocelot, port, POL_IX_PORT + port, &pp); + err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp); if (err) return err; diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h index 7552995f8b17..0749f23684f2 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.h +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -31,7 +31,7 @@ struct qos_policer_conf { u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */ }; -int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, +int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix, struct qos_policer_conf *conf); int ocelot_policer_validate(const struct flow_action *action, diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 627165a5414a..d8a5e0d3e99c 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -914,7 +914,7 @@ int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, if (!tmp) return -ENOMEM; - ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + ret = qos_policer_conf_set(ocelot, pol_ix, &pp); if (ret) { kfree(tmp); return ret; @@ -945,7 +945,7 @@ int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix) if (z) { pp.mode = MSCC_QOS_RATE_MODE_DISABLED; - return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + return qos_policer_conf_set(ocelot, pol_ix, &pp); } return 0; -- cgit v1.2.3 From 91d350d661bf9c7d4afeb585a81ad694280cc0fb Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 3 May 2022 15:01:50 +0300 Subject: net: mscc: ocelot: don't use magic numbers for OCELOT_POLICER_DISCARD OCELOT_POLICER_DISCARD helps "kill dropped packets dead" since a PERMIT/DENY mask mode with a port mask of 0 isn't enough to stop the CPU port from receiving packets removed from the forwarding path. The hardcoded initialization done for it in ocelot_vcap_init() is confusing. All we need from it is to have a rate and a burst size of 0. Reuse qos_policer_conf_set() for that purpose. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mscc/ocelot_vcap.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index d8a5e0d3e99c..cdbe29f2ddc7 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -1394,22 +1394,18 @@ static void ocelot_vcap_detect_constants(struct ocelot *ocelot, int ocelot_vcap_init(struct ocelot *ocelot) { - int i; + struct qos_policer_conf cpu_drop = { + .mode = MSCC_QOS_RATE_MODE_DATA, + }; + int ret, i; /* Create a policer that will drop the frames for the cpu. * This policer will be used as action in the acl rules to drop * frames. */ - ocelot_write_gix(ocelot, 0x299, ANA_POL_MODE_CFG, - OCELOT_POLICER_DISCARD); - ocelot_write_gix(ocelot, 0x1, ANA_POL_PIR_CFG, - OCELOT_POLICER_DISCARD); - ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_PIR_STATE, - OCELOT_POLICER_DISCARD); - ocelot_write_gix(ocelot, 0x0, ANA_POL_CIR_CFG, - OCELOT_POLICER_DISCARD); - ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE, - OCELOT_POLICER_DISCARD); + ret = qos_policer_conf_set(ocelot, OCELOT_POLICER_DISCARD, &cpu_drop); + if (ret) + return ret; for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) { struct ocelot_vcap_block *block = &ocelot->block[i]; -- cgit v1.2.3 From 1c1ed5a48411e1686997157c21633653fbe045c6 Mon Sep 17 00:00:00 2001 From: Casper Andersson Date: Tue, 3 May 2022 11:39:22 +0200 Subject: net: sparx5: Add handling of host MDB entries Handle adding and removing MDB entries for host Signed-off-by: Casper Andersson Link: https://lore.kernel.org/r/20220503093922.1630804-1-casper.casan@gmail.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index 5389fffc694a..3429660cd2e5 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -396,6 +396,11 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev, u32 mact_entry; int res, err; + if (netif_is_bridge_master(v->obj.orig_dev)) { + sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid); + return 0; + } + /* When VLAN unaware the vlan value is not parsed and we receive vid 0. * Fall back to bridge vid 1. */ @@ -461,6 +466,11 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev, u32 mact_entry, res, pgid_entry[3]; int err; + if (netif_is_bridge_master(v->obj.orig_dev)) { + sparx5_mact_forget(spx5, v->addr, v->vid); + return 0; + } + if (!br_vlan_enabled(spx5->hw_bridge_dev)) vid = 1; else @@ -500,6 +510,7 @@ static int sparx5_handle_port_obj_add(struct net_device *dev, SWITCHDEV_OBJ_PORT_VLAN(obj)); break; case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: err = sparx5_handle_port_mdb_add(dev, nb, SWITCHDEV_OBJ_PORT_MDB(obj)); break; @@ -552,6 +563,7 @@ static int sparx5_handle_port_obj_del(struct net_device *dev, SWITCHDEV_OBJ_PORT_VLAN(obj)->vid); break; case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: err = sparx5_handle_port_mdb_del(dev, nb, SWITCHDEV_OBJ_PORT_MDB(obj)); break; -- cgit v1.2.3 From 833fbbbbfc8b4b1effb95e1487b196efdaf842ea Mon Sep 17 00:00:00 2001 From: Jeff Daly Date: Thu, 14 Apr 2022 16:21:04 -0400 Subject: ixgbe: Fix module_param allow_unsupported_sfp type The module_param allow_unsupported_sfp should be a boolean to match the type in the ixgbe_hw struct. Signed-off-by: Jeff Daly Tested-by: Gurucharan (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c4a4954aa317..e5732a058bec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -151,8 +151,8 @@ MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); #endif /* CONFIG_PCI_IOV */ -static unsigned int allow_unsupported_sfp; -module_param(allow_unsupported_sfp, uint, 0); +static bool allow_unsupported_sfp; +module_param(allow_unsupported_sfp, bool, 0); MODULE_PARM_DESC(allow_unsupported_sfp, "Allow unsupported and untested SFP+ modules on 82599-based adapters"); -- cgit v1.2.3 From b35413f415c6469c79b430c52a718de17c62db2f Mon Sep 17 00:00:00 2001 From: Alaa Mohamed Date: Wed, 20 Apr 2022 01:43:13 +0200 Subject: igb: Convert kmap() to kmap_local_page() kmap() is being deprecated and these usages are all local to the thread so there is no reason kmap_local_page() can't be used. Replace kmap() calls with kmap_local_page(). Signed-off-by: Alaa Mohamed Reviewed-by: Ira Weiny Tested-by: Gurucharan (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 2a5782063f4c..c14fc871dd41 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1798,14 +1798,14 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, frame_size >>= 1; - data = kmap(rx_buffer->page); + data = kmap_local_page(rx_buffer->page); if (data[3] != 0xFF || data[frame_size + 10] != 0xBE || data[frame_size + 12] != 0xAF) match = false; - kunmap(rx_buffer->page); + kunmap_local(data); return match; } -- cgit v1.2.3 From 187dbc15d8a7c4437c7757eba8542594d7eab22e Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Mon, 21 Mar 2022 21:59:47 +0800 Subject: ice: use min_t() to make code cleaner in ice_gnss Fix the following coccicheck warning: ./drivers/net/ethernet/intel/ice/ice_gnss.c:79:26-27: WARNING opportunity for min() Signed-off-by: Wan Jiabing Tested-by: Gurucharan (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_gnss.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index 35579cf4283f..57586a2e6dec 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -76,8 +76,7 @@ static void ice_gnss_read(struct kthread_work *work) for (i = 0; i < data_len; i += bytes_read) { u16 bytes_left = data_len - i; - bytes_read = bytes_left < ICE_MAX_I2C_DATA_SIZE ? bytes_left : - ICE_MAX_I2C_DATA_SIZE; + bytes_read = min_t(typeof(bytes_left), bytes_left, ICE_MAX_I2C_DATA_SIZE); err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA), -- cgit v1.2.3 From 295819b562fa13e48d0de72eebae7e75dc7c2cc4 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 24 Mar 2022 12:49:07 +0100 Subject: ice: introduce common helper for retrieving VSI by vsi_num Both ice_idc.c and ice_virtchnl.c carry their own implementation of a helper function that is looking for a given VSI based on provided vsi_num. Their functionality is the same, so let's introduce the common function in ice.h that both of the mentioned sites will use. This is a strictly cleanup thing, no functionality is changed. Reviewed-by: Alexander Lobakin Signed-off-by: Maciej Fijalkowski Tested-by: Konrad Jankowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 15 +++++++++++++++ drivers/net/ethernet/intel/ice/ice_idc.c | 15 --------------- drivers/net/ethernet/intel/ice/ice_virtchnl.c | 22 ++-------------------- 3 files changed, 17 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 8ed3c9ab7ff7..2fbb3d737dfd 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -757,6 +757,21 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf) return pf->vsi[pf->ctrl_vsi_idx]; } +/** + * ice_find_vsi - Find the VSI from VSI ID + * @pf: The PF pointer to search in + * @vsi_num: The VSI ID to search for + */ +static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) +{ + int i; + + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) + return pf->vsi[i]; + return NULL; +} + /** * ice_is_switchdev_running - check if switchdev is configured * @pf: pointer to PF structure diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 25a436d342c2..7e941264ae21 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -47,21 +47,6 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) device_unlock(&pf->adev->dev); } -/** - * ice_find_vsi - Find the VSI from VSI ID - * @pf: The PF pointer to search in - * @vsi_num: The VSI ID to search for - */ -static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) -{ - int i; - - ice_for_each_vsi(pf, i) - if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) - return pf->vsi[i]; - return NULL; -} - /** * ice_add_rdma_qset - Add Leaf Node for RDMA Qset * @pf: PF struct diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index b72606c9e6d0..83583ea33a1d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -514,24 +514,6 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf) ice_reset_vf(vf, 0); } -/** - * ice_find_vsi_from_id - * @pf: the PF structure to search for the VSI - * @id: ID of the VSI it is searching for - * - * searches for the VSI with the given ID - */ -static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) -{ - int i; - - ice_for_each_vsi(pf, i) - if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) - return pf->vsi[i]; - - return NULL; -} - /** * ice_vc_isvalid_vsi_id * @vf: pointer to the VF info @@ -544,7 +526,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - vsi = ice_find_vsi_from_id(pf, vsi_id); + vsi = ice_find_vsi(pf, vsi_id); return (vsi && (vsi->vf == vf)); } @@ -559,7 +541,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) */ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) { - struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id); /* allocated Tx and Rx queues should be always equal for VF VSI */ return (vsi && (qid < vsi->alloc_txq)); } -- cgit v1.2.3 From bd1ffe8e5df4e24fc637d85b01f40e282c29856d Mon Sep 17 00:00:00 2001 From: Wojciech Drewek Date: Wed, 20 Apr 2022 12:55:41 +0200 Subject: ice: return ENOSPC when exceeding ICE_MAX_CHAIN_WORDS When number of words exceeds ICE_MAX_CHAIN_WORDS, -ENOSPC should be returned not -EINVAL. Do not overwrite this error code in ice_add_tc_flower_adv_fltr. Signed-off-by: Wojciech Drewek Suggested-by: Marcin Szycik Acked-by: Maciej Fijalkowski Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_switch.c | 5 ++++- drivers/net/ethernet/intel/ice/ice_tc_lib.c | 1 - 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 496250f9f8fc..9f0a4dfb4818 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -5992,9 +5992,12 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, word_cnt++; } - if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS) + if (!word_cnt) return -EINVAL; + if (word_cnt > ICE_MAX_CHAIN_WORDS) + return -ENOSPC; + /* locate a dummy packet */ profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type); diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 3acd9f921c44..0a0c55fb8699 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -622,7 +622,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, } else if (ret) { NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter due to error"); - ret = -EIO; goto exit; } -- cgit v1.2.3 From 4b889474adc628326df35bb2196cf36b506c7706 Mon Sep 17 00:00:00 2001 From: Michal Swiatkowski Date: Wed, 20 Apr 2022 13:22:43 +0200 Subject: ice: get switch id on switchdev devices Switch id should be the same for each netdevice on a driver. The id must be unique between devices on the same system, but does not need to be unique between devices on different systems. The switch id is used to locate ports on a switch and to know if aggregated ports belong to the same switch. To meet this requirements, use pci_get_dsn as switch id value, as this is unique value for each devices on the same system. Implementing switch id is needed by automatic tools for kubernetes. Set switch id by setting devlink port attribiutes and calling devlink_port_attrs_set while creating pf (for uplink) and vf (for representator) devlink port. To get switch id (in switchdev mode): cat /sys/class/net/$PF0/phys_switch_id Signed-off-by: Michal Swiatkowski Signed-off-by: Marcin Szycik Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_devlink.c | 22 ++++++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_main.c | 15 +++++++++++++++ 2 files changed, 37 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index a230edb38466..d12852d698af 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -647,6 +647,23 @@ void ice_devlink_unregister(struct ice_pf *pf) devlink_unregister(priv_to_devlink(pf)); } +/** + * ice_devlink_set_switch_id - Set unique switch id based on pci dsn + * @pf: the PF to create a devlink port for + * @ppid: struct with switch id information + */ +static void +ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = pf->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + int ice_devlink_register_params(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); @@ -704,6 +721,9 @@ int ice_devlink_create_pf_port(struct ice_pf *pf) attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = pf->hw.bus.func; + + ice_devlink_set_switch_id(pf, &attrs.switch_id); + devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); @@ -760,6 +780,8 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) attrs.pci_vf.pf = pf->hw.bus.func; attrs.pci_vf.vf = vf->vf_id; + ice_devlink_set_switch_id(pf, &attrs.switch_id); + devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 6d8beb84d852..049a3f48caff 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -296,6 +296,20 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m) return status; } +/** + * ice_get_devlink_port - Get devlink port from netdev + * @netdev: the netdevice structure + */ +static struct devlink_port *ice_get_devlink_port(struct net_device *netdev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if (!ice_is_switchdev_running(pf)) + return NULL; + + return &pf->devlink_port; +} + /** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI @@ -8926,4 +8940,5 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_bpf = ice_xdp, .ndo_xdp_xmit = ice_xdp_xmit, .ndo_xsk_wakeup = ice_xsk_wakeup, + .ndo_get_devlink_port = ice_get_devlink_port, }; -- cgit v1.2.3 From 9880d3d6f9e3ab264d58368306dd83218ea6a3c1 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 11 Apr 2022 16:29:02 -0700 Subject: ice: add newline to dev_dbg in ice_vf_fdir_dump_info The debug print in ice_vf_fdir_dump_info does not end in newlines. This can look confusing when reading the kernel log, as the next print will immediately continue on the same line. Fix this by adding the forgotten newline. Signed-off-by: Jacob Keller Reviewed-by: Paul Menzel Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 8e38ee2faf58..dbc1965c0609 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1349,7 +1349,7 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf) fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num)); - dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x", + dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n", vf->vf_id, (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S, (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S, -- cgit v1.2.3 From baeb705fd6a7245cc1fa69ed991a9cffdf44a174 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 11 Apr 2022 16:29:03 -0700 Subject: ice: always check VF VSI pointer values The ice_get_vf_vsi function can return NULL in some cases, such as if handling messages during a reset where the VSI is being removed and recreated. Several places throughout the driver do not bother to check whether this VSI pointer is valid. Static analysis tools maybe report issues because they detect paths where a potentially NULL pointer could be dereferenced. Fix this by checking the return value of ice_get_vf_vsi everywhere. Signed-off-by: Jacob Keller Reviewed-by: Paul Menzel Tested-by: Konrad Jankowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_devlink.c | 5 +++- drivers/net/ethernet/intel/ice/ice_repr.c | 7 ++++- drivers/net/ethernet/intel/ice/ice_sriov.c | 32 ++++++++++++++++++++-- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 28 ++++++++++++++++++- drivers/net/ethernet/intel/ice/ice_virtchnl.c | 5 ++++ drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c | 7 ++++- 6 files changed, 77 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index d12852d698af..3991d62473bf 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -773,9 +773,12 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) pf = vf->pf; dev = ice_pf_to_dev(pf); - vsi = ice_get_vf_vsi(vf); devlink_port = &vf->devlink_port; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; attrs.pci_vf.pf = pf->hw.bus.func; attrs.pci_vf.vf = vf->vf_id; diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index 848f2adea563..a91b81c3088b 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -293,8 +293,13 @@ static int ice_repr_add(struct ice_vf *vf) struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; + struct ice_vsi *vsi; int err; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + repr = kzalloc(sizeof(*repr), GFP_KERNEL); if (!repr) return -ENOMEM; @@ -313,7 +318,7 @@ static int ice_repr_add(struct ice_vf *vf) goto err_alloc; } - repr->src_vsi = ice_get_vf_vsi(vf); + repr->src_vsi = vsi; repr->vf = vf; vf->repr = repr; np = netdev_priv(repr->netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 0c438219f7a3..bb1721f1321d 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -46,7 +46,12 @@ static void ice_free_vf_entries(struct ice_pf *pf) */ static void ice_vf_vsi_release(struct ice_vf *vf) { - ice_vsi_release(ice_get_vf_vsi(vf)); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + return; + + ice_vsi_release(vsi); ice_vf_invalidate_vsi(vf); } @@ -104,6 +109,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) hw = &pf->hw; vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return; dev = ice_pf_to_dev(pf); wr32(hw, VPINT_ALLOC(vf->vf_id), 0); @@ -341,6 +348,9 @@ static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq) struct ice_hw *hw = &vf->pf->hw; u32 reg; + if (WARN_ON(!vsi)) + return; + /* set regardless of mapping mode */ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M); @@ -386,6 +396,9 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return; + ice_ena_vf_msix_mappings(vf); ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq); } @@ -1128,6 +1141,8 @@ static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq) u16 rxq_idx; vsi = ice_get_vf_vsi(vf); + if (!vsi) + continue; ice_for_each_rxq(vsi, rxq_idx) if (vsi->rxq_map[rxq_idx] == pfq) { @@ -1521,8 +1536,15 @@ static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf) static bool ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate) { - int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf)); - int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + int all_vfs_min_tx_rate; + int link_speed_mbps; + + if (WARN_ON(!vsi)) + return false; + + link_speed_mbps = ice_get_link_speed_mbps(vsi); + all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf); /* this VF's previous rate is being overwritten */ all_vfs_min_tx_rate -= vf->min_tx_rate; @@ -1566,6 +1588,10 @@ ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, goto out_put_vf; vsi = ice_get_vf_vsi(vf); + if (!vsi) { + ret = -EINVAL; + goto out_put_vf; + } /* when max_tx_rate is zero that means no max Tx rate limiting, so only * check if max_tx_rate is non-zero diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 6578059d9479..aefd66a4db80 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -220,8 +220,10 @@ static void ice_vf_clear_counters(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); + if (vsi) + vsi->num_vlan = 0; + vf->num_mac = 0; - vsi->num_vlan = 0; memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); } @@ -251,6 +253,9 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf) struct ice_vsi *vsi = ice_get_vf_vsi(vf); struct ice_pf *pf = vf->pf; + if (WARN_ON(!vsi)) + return -EINVAL; + if (ice_vsi_rebuild(vsi, true)) { dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", vf->vf_id); @@ -514,6 +519,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) { + err = -EIO; + goto out_unlock; + } ice_dis_vf_qs(vf); @@ -572,6 +581,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) vf->vf_ops->post_vsi_rebuild(vf); vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) { + err = -EINVAL; + goto out_unlock; + } + ice_eswitch_update_repr(vsi); ice_eswitch_replay_vf_mac_rule(vf); @@ -610,6 +624,9 @@ void ice_dis_vf_qs(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return; + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); ice_vsi_stop_all_rx_rings(vsi); ice_set_vf_state_qs_dis(vf); @@ -790,6 +807,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf) u8 broadcast[ETH_ALEN]; int status; + if (WARN_ON(!vsi)) + return -EINVAL; + if (ice_is_eswitch_mode_switchdev(vf->pf)) return 0; @@ -875,6 +895,9 @@ static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf) struct ice_vsi *vsi = ice_get_vf_vsi(vf); int err; + if (WARN_ON(!vsi)) + return -EINVAL; + if (vf->min_tx_rate) { err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000); if (err) { @@ -938,6 +961,9 @@ void ice_vf_rebuild_host_cfg(struct ice_vf *vf) struct device *dev = ice_pf_to_dev(vf->pf); struct ice_vsi *vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return; + ice_vf_set_host_trust_cfg(vf); if (ice_vf_rebuild_host_mac_cfg(vf)) diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 83583ea33a1d..b47577a2841a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2342,6 +2342,11 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) } vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index dbc1965c0609..c6a58343d81d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1344,7 +1344,12 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf) pf = vf->pf; hw = &pf->hw; dev = ice_pf_to_dev(pf); - vf_vsi = pf->vsi[vf->lan_vsi_idx]; + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id); + return; + } + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num)); -- cgit v1.2.3 From 00be8197c9741b91700289a33c25b7b6de951cc9 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 11 Apr 2022 16:29:04 -0700 Subject: ice: remove return value comment for ice_reset_all_vfs Since commit fe99d1c06c16 ("ice: make ice_reset_all_vfs void"), the ice_reset_all_vfs function has not returned anything. The function comment still indicated it did. Fix this. While here, also add a line to clarify the function resets all VFs at once in response to hardware resets such as a PF reset. Signed-off-by: Jacob Keller Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index aefd66a4db80..24cf6a5b49fe 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -359,12 +359,12 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * + * Reset all VFs at once, in response to a PF or other device reset. + * * First, tell the hardware to reset each VF, then do all the waiting in one * chunk, and finally finish restoring each VF after the wait. This is useful * during PF routines which need to reset all VFs, as otherwise it must perform * these resets in a serialized fashion. - * - * Returns true if any VFs were reset, and false otherwise. */ void ice_reset_all_vfs(struct ice_pf *pf) { -- cgit v1.2.3 From 19c3e1ede517bf2e4c9ead30316866aa9ac6e331 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 11 Apr 2022 16:29:05 -0700 Subject: ice: fix wording in comment for ice_reset_vf The comment explaining ice_reset_vf has an extraneous "the" with the "if the resets are disabled". Remove it. Signed-off-by: Jacob Keller Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 24cf6a5b49fe..8f875a17755f 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -477,8 +477,8 @@ static void ice_notify_vf_reset(struct ice_vf *vf) * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting * - * Returns 0 if the VF is currently in reset, if the resets are disabled, or - * if the VF resets successfully. Returns an error code if the VF fails to + * Returns 0 if the VF is currently in reset, if resets are disabled, or if + * the VF resets successfully. Returns an error code if the VF fails to * rebuild. */ int ice_reset_vf(struct ice_vf *vf, u32 flags) -- cgit v1.2.3 From 71c114e8753978c0728edf639a17756cf90ae629 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 11 Apr 2022 16:29:06 -0700 Subject: ice: add a function comment for ice_cfg_mac_antispoof This function definition was missing a comment describing its implementation. Add one. Signed-off-by: Jacob Keller Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 8f875a17755f..cd8e6b50968c 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -657,6 +657,13 @@ struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf) return vf->pf->hw.port_info; } +/** + * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior + * @vsi: the VSI to configure + * @enable: whether to enable or disable the spoof checking + * + * Configure a VSI to enable (or disable) spoof checking behavior. + */ static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable) { struct ice_vsi_ctx *ctx; -- cgit v1.2.3 From 4eaf1797bca19ed766dc3d7b607f0b0617214e7e Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 11 Apr 2022 16:29:07 -0700 Subject: ice: remove period on argument description in ice_for_each_vf The ice_for_each_vf macros have comments describing the implementation. One of the arguments has a period on the end, which is not our typical style. Remove the unnecessary period. Signed-off-by: Jacob Keller Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_vf_lib.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 831b667dc5b2..1b4380d6d949 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -176,7 +176,7 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) * ice_for_each_vf - Iterate over each VF entry * @pf: pointer to the PF private structure * @bkt: bucket index used for iteration - * @vf: pointer to the VF entry currently being processed in the loop. + * @vf: pointer to the VF entry currently being processed in the loop * * The bkt variable is an unsigned integer iterator used to traverse the VF * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. @@ -192,7 +192,7 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU * @pf: pointer to the PF private structure * @bkt: bucket index used for iteration - * @vf: pointer to the VF entry currently being processed in the loop. + * @vf: pointer to the VF entry currently being processed in the loop * * The bkt variable is an unsigned integer iterator used to traverse the VF * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is. -- cgit v1.2.3 From c4a67a21a6d255ddcbaa076c0412aad73c7e0c02 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 May 2022 08:40:37 -0700 Subject: Revert "Merge branch 'mlxsw-line-card-model'" This reverts commit 5e927a9f4b9f29d78a7c7d66ea717bb5c8bbad8e, reversing changes made to cfc1d91a7d78cf9de25b043d81efcc16966d55b3. The discussion is still ongoing so let's remove the uAPI until the discussion settles. Link: https://lore.kernel.org/all/20220425090021.32e9a98f@kernel.org/ Reviewed-by: Ido Schimmel Link: https://lore.kernel.org/r/20220504154037.539442-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- .../networking/devlink/devlink-linecard.rst | 4 - Documentation/networking/devlink/mlxsw.rst | 33 --- drivers/net/ethernet/mellanox/mlxsw/core.h | 1 - .../net/ethernet/mellanox/mlxsw/core_linecards.c | 237 +--------------- drivers/net/ethernet/mellanox/mlxsw/reg.h | 87 +----- include/net/devlink.h | 18 +- include/uapi/linux/devlink.h | 5 - net/core/devlink.c | 303 +-------------------- .../drivers/net/mlxsw/devlink_linecard.sh | 61 ----- 9 files changed, 10 insertions(+), 739 deletions(-) (limited to 'drivers') diff --git a/Documentation/networking/devlink/devlink-linecard.rst b/Documentation/networking/devlink/devlink-linecard.rst index a98b468ad479..6c0b8928bc13 100644 --- a/Documentation/networking/devlink/devlink-linecard.rst +++ b/Documentation/networking/devlink/devlink-linecard.rst @@ -14,7 +14,6 @@ system. Following operations are provided: * Get a list of supported line card types. * Provision of a slot with specific line card type. * Get and monitor of line card state and its change. - * Get information about line card versions and devices. Line card according to the type may contain one or more gearboxes to mux the lanes with certain speed to multiple ports with lanes @@ -121,6 +120,3 @@ Example usage # Set slot 8 to be unprovisioned: $ devlink lc set pci/0000:01:00.0 lc 8 notype - - # Set info for slot 8: - $ devlink lc info pci/0000:01:00.0 lc 8 diff --git a/Documentation/networking/devlink/mlxsw.rst b/Documentation/networking/devlink/mlxsw.rst index 0af345680510..cf857cb4ba8f 100644 --- a/Documentation/networking/devlink/mlxsw.rst +++ b/Documentation/networking/devlink/mlxsw.rst @@ -58,39 +58,6 @@ The ``mlxsw`` driver reports the following versions - running - Three digit firmware version -Line card info versions -======================= - -The ``mlxsw`` driver reports the following versions for line cards - -.. list-table:: devlink line card info versions implemented - :widths: 5 5 90 - - * - Name - - Type - - Description - * - ``hw.revision`` - - fixed - - The hardware revision for this line card - * - ``ini.version`` - - running - - Version of line card INI loaded - -Line card device info versions -============================== - -The ``mlxsw`` driver reports the following versions for line card devices - -.. list-table:: devlink line card device info versions implemented - :widths: 5 5 90 - - * - Name - - Type - - Description - * - ``fw.version`` - - running - - Three digit firmware version - Driver-specific Traps ===================== diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index d008282d7f2e..c2a891287047 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -581,7 +581,6 @@ struct mlxsw_linecard { active:1; u16 hw_revision; u16 ini_version; - struct list_head device_list; }; struct mlxsw_linecard_types_info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 2abd31a62776..5c9869dcf674 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -87,191 +87,11 @@ static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard) return linecard->name; } -struct mlxsw_linecard_device_info { - u16 fw_major; - u16 fw_minor; - u16 fw_sub_minor; -}; - -struct mlxsw_linecard_device { - struct list_head list; - u8 index; - struct mlxsw_linecard *linecard; - struct devlink_linecard_device *devlink_device; - struct mlxsw_linecard_device_info info; -}; - -static struct mlxsw_linecard_device * -mlxsw_linecard_device_lookup(struct mlxsw_linecard *linecard, u8 index) -{ - struct mlxsw_linecard_device *device; - - list_for_each_entry(device, &linecard->device_list, list) - if (device->index == index) - return device; - return NULL; -} - -static int mlxsw_linecard_device_attach(struct mlxsw_core *mlxsw_core, - struct mlxsw_linecard *linecard, - u8 device_index, bool flash_owner) -{ - struct mlxsw_linecard_device *device; - int err; - - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) - return -ENOMEM; - device->index = device_index; - device->linecard = linecard; - - device->devlink_device = devlink_linecard_device_create(linecard->devlink_linecard, - device_index, device); - if (IS_ERR(device->devlink_device)) { - err = PTR_ERR(device->devlink_device); - goto err_devlink_linecard_device_attach; - } - - list_add_tail(&device->list, &linecard->device_list); - return 0; - -err_devlink_linecard_device_attach: - kfree(device); - return err; -} - -static void mlxsw_linecard_device_detach(struct mlxsw_core *mlxsw_core, - struct mlxsw_linecard *linecard, - struct mlxsw_linecard_device *device) -{ - list_del(&device->list); - devlink_linecard_device_destroy(linecard->devlink_linecard, - device->devlink_device); - kfree(device); -} - -static void mlxsw_linecard_devices_detach(struct mlxsw_linecard *linecard) -{ - struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; - struct mlxsw_linecard_device *device, *tmp; - - list_for_each_entry_safe(device, tmp, &linecard->device_list, list) - mlxsw_linecard_device_detach(mlxsw_core, linecard, device); -} - -static int mlxsw_linecard_devices_attach(struct mlxsw_linecard *linecard) -{ - struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; - u8 msg_seq = 0; - int err; - - do { - char mddq_pl[MLXSW_REG_MDDQ_LEN]; - bool flash_owner; - bool data_valid; - u8 device_index; - - mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index, - msg_seq); - err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); - if (err) - return err; - mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, - &data_valid, &flash_owner, - &device_index, NULL, - NULL, NULL); - if (!data_valid) - break; - err = mlxsw_linecard_device_attach(mlxsw_core, linecard, - device_index, flash_owner); - if (err) - goto rollback; - } while (msg_seq); - - return 0; - -rollback: - mlxsw_linecard_devices_detach(linecard); - return err; -} - -static void mlxsw_linecard_device_update(struct mlxsw_linecard *linecard, - u8 device_index, - struct mlxsw_linecard_device_info *info) -{ - struct mlxsw_linecard_device *device; - - device = mlxsw_linecard_device_lookup(linecard, device_index); - if (!device) - return; - device->info = *info; -} - -static int mlxsw_linecard_devices_update(struct mlxsw_linecard *linecard) -{ - struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core; - u8 msg_seq = 0; - - do { - struct mlxsw_linecard_device_info info; - char mddq_pl[MLXSW_REG_MDDQ_LEN]; - bool data_valid; - u8 device_index; - int err; - - mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index, - msg_seq); - err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl); - if (err) - return err; - mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq, - &data_valid, NULL, - &device_index, - &info.fw_major, - &info.fw_minor, - &info.fw_sub_minor); - if (!data_valid) - break; - mlxsw_linecard_device_update(linecard, device_index, &info); - } while (msg_seq); - - return 0; -} - -static int -mlxsw_linecard_device_info_get(struct devlink_linecard_device *devlink_linecard_device, - void *priv, struct devlink_info_req *req, - struct netlink_ext_ack *extack) -{ - struct mlxsw_linecard_device *device = priv; - struct mlxsw_linecard_device_info *info; - struct mlxsw_linecard *linecard; - char buf[32]; - - linecard = device->linecard; - mutex_lock(&linecard->lock); - if (!linecard->active) { - mutex_unlock(&linecard->lock); - return 0; - } - - info = &device->info; - - sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor, - info->fw_sub_minor); - mutex_unlock(&linecard->lock); - - return devlink_info_version_running_put(req, - DEVLINK_INFO_VERSION_GENERIC_FW, - buf); -} - static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard) { linecard->provisioned = false; linecard->ready = false; linecard->active = false; - mlxsw_linecard_devices_detach(linecard); devlink_linecard_provision_fail(linecard->devlink_linecard); } @@ -412,7 +232,6 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, { struct mlxsw_linecards *linecards = linecard->linecards; const char *type; - int err; type = mlxsw_linecard_types_lookup(linecards, card_type); mlxsw_linecard_status_event_done(linecard, @@ -430,11 +249,6 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type, return PTR_ERR(type); } } - err = mlxsw_linecard_devices_attach(linecard); - if (err) { - mlxsw_linecard_provision_fail(linecard); - return err; - } linecard->provisioned = true; linecard->hw_revision = hw_revision; linecard->ini_version = ini_version; @@ -447,7 +261,6 @@ static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard) mlxsw_linecard_status_event_done(linecard, MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION); linecard->provisioned = false; - mlxsw_linecard_devices_detach(linecard); devlink_linecard_provision_clear(linecard->devlink_linecard); } @@ -479,18 +292,11 @@ static int mlxsw_linecard_ready_clear(struct mlxsw_linecard *linecard) return 0; } -static int mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) +static void mlxsw_linecard_active_set(struct mlxsw_linecard *linecard) { - int err; - - err = mlxsw_linecard_devices_update(linecard); - if (err) - return err; - mlxsw_linecard_active_ops_call(linecard); linecard->active = true; devlink_linecard_activate(linecard->devlink_linecard); - return 0; } static void mlxsw_linecard_active_clear(struct mlxsw_linecard *linecard) @@ -539,11 +345,8 @@ static int mlxsw_linecard_status_process(struct mlxsw_linecards *linecards, goto out; } - if (active && linecard->active != active) { - err = mlxsw_linecard_active_set(linecard); - if (err) - goto out; - } + if (active && linecard->active != active) + mlxsw_linecard_active_set(linecard); if (!active && linecard->active != active) mlxsw_linecard_active_clear(linecard); @@ -934,44 +737,12 @@ static void mlxsw_linecard_types_get(struct devlink_linecard *devlink_linecard, *type_priv = ini_file; } -static int -mlxsw_linecard_info_get(struct devlink_linecard *devlink_linecard, void *priv, - struct devlink_info_req *req, - struct netlink_ext_ack *extack) -{ - struct mlxsw_linecard *linecard = priv; - char buf[32]; - int err; - - mutex_lock(&linecard->lock); - if (!linecard->provisioned) { - err = 0; - goto unlock; - } - - sprintf(buf, "%d", linecard->hw_revision); - err = devlink_info_version_fixed_put(req, "hw.revision", buf); - if (err) - goto unlock; - - sprintf(buf, "%d", linecard->ini_version); - err = devlink_info_version_running_put(req, "ini.version", buf); - if (err) - goto unlock; - -unlock: - mutex_unlock(&linecard->lock); - return err; -} - static const struct devlink_linecard_ops mlxsw_linecard_ops = { .provision = mlxsw_linecard_provision, .unprovision = mlxsw_linecard_unprovision, .same_provision = mlxsw_linecard_same_provision, .types_count = mlxsw_linecard_types_count, .types_get = mlxsw_linecard_types_get, - .info_get = mlxsw_linecard_info_get, - .device_info_get = mlxsw_linecard_device_info_get, }; struct mlxsw_linecard_status_event { @@ -1069,7 +840,6 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core, linecard->slot_index = slot_index; linecard->linecards = linecards; mutex_init(&linecard->lock); - INIT_LIST_HEAD(&linecard->device_list); devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core), slot_index, &mlxsw_linecard_ops, @@ -1115,7 +885,6 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core, mlxsw_core_flush_owq(); if (linecard->active) mlxsw_linecard_active_clear(linecard); - mlxsw_linecard_devices_detach(linecard); devlink_linecard_destroy(linecard->devlink_linecard); mutex_destroy(&linecard->lock); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 078e3aa04383..93af6c974ece 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -11643,11 +11643,7 @@ MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1); enum mlxsw_reg_mddq_query_type { MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1, - MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices - * on the slot, data_valid - * will be '0'. - */ - MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME, + MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME = 3, }; /* reg_mddq_query_type @@ -11661,28 +11657,6 @@ MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8); */ MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4); -/* reg_mddq_response_msg_seq - * Response message sequential number. For a specific request, the response - * message sequential number is the following one. In addition, the last - * message should be 0. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8); - -/* reg_mddq_request_msg_seq - * Request message sequential number. - * The first message number should be 0. - * Access: Index - */ -MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8); - -/* reg_mddq_data_valid - * If set, the data in the data field is valid and contain the information - * for the queried index. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1); - /* reg_mddq_slot_info_provisioned * If set, the INI file is applied and the card is provisioned. * Access: RO @@ -11769,65 +11743,6 @@ mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index, *p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload); } -/* reg_mddq_device_info_flash_owner - * If set, the device is the flash owner. Otherwise, a shared flash - * is used by this device (another device is the flash owner). - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1); - -/* reg_mddq_device_info_device_index - * Device index. The first device should number 0. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8); - -/* reg_mddq_device_info_fw_major - * Major FW version number. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16); - -/* reg_mddq_device_info_fw_minor - * Minor FW version number. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16); - -/* reg_mddq_device_info_fw_sub_minor - * Sub-minor FW version number. - * Access: RO - */ -MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16); - -static inline void -mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index, - u8 request_msg_seq) -{ - __mlxsw_reg_mddq_pack(payload, slot_index, - MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO); - mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq); -} - -static inline void -mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq, - bool *p_data_valid, bool *p_flash_owner, - u8 *p_device_index, u16 *p_fw_major, - u16 *p_fw_minor, u16 *p_fw_sub_minor) -{ - *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload); - *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload); - if (p_flash_owner) - *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload); - *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload); - if (p_fw_major) - *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload); - if (p_fw_minor) - *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload); - if (p_fw_sub_minor) - *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload); -} - #define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20 /* reg_mddq_slot_ascii_name diff --git a/include/net/devlink.h b/include/net/devlink.h index 062895973656..2a2a2a0c93f7 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -150,9 +150,6 @@ struct devlink_port_new_attrs { sfnum_valid:1; }; -struct devlink_info_req; -struct devlink_linecard_device; - /** * struct devlink_linecard_ops - Linecard operations * @provision: callback to provision the linecard slot with certain @@ -171,8 +168,6 @@ struct devlink_linecard_device; * provisioned. * @types_count: callback to get number of supported types * @types_get: callback to get next type in list - * @info_get: callback to get linecard info - * @device_info_get: callback to get linecard device info */ struct devlink_linecard_ops { int (*provision)(struct devlink_linecard *linecard, void *priv, @@ -187,12 +182,6 @@ struct devlink_linecard_ops { void (*types_get)(struct devlink_linecard *linecard, void *priv, unsigned int index, const char **type, const void **type_priv); - int (*info_get)(struct devlink_linecard *linecard, void *priv, - struct devlink_info_req *req, - struct netlink_ext_ack *extack); - int (*device_info_get)(struct devlink_linecard_device *device, - void *priv, struct devlink_info_req *req, - struct netlink_ext_ack *extack); }; struct devlink_sb_pool_info { @@ -639,6 +628,7 @@ struct devlink_flash_update_params { #define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(1) struct devlink_region; +struct devlink_info_req; /** * struct devlink_region_ops - Region operations @@ -1588,12 +1578,6 @@ struct devlink_linecard * devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index, const struct devlink_linecard_ops *ops, void *priv); void devlink_linecard_destroy(struct devlink_linecard *linecard); -struct devlink_linecard_device * -devlink_linecard_device_create(struct devlink_linecard *linecard, - unsigned int device_index, void *priv); -void -devlink_linecard_device_destroy(struct devlink_linecard *linecard, - struct devlink_linecard_device *linecard_device); void devlink_linecard_provision_set(struct devlink_linecard *linecard, const char *type); void devlink_linecard_provision_clear(struct devlink_linecard *linecard); diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index fb8c3864457f..b3d40a5d72ff 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -136,8 +136,6 @@ enum devlink_command { DEVLINK_CMD_LINECARD_NEW, DEVLINK_CMD_LINECARD_DEL, - DEVLINK_CMD_LINECARD_INFO_GET, /* can dump */ - /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -577,9 +575,6 @@ enum devlink_attr { DEVLINK_ATTR_LINECARD_STATE, /* u8 */ DEVLINK_ATTR_LINECARD_TYPE, /* string */ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES, /* nested */ - DEVLINK_ATTR_LINECARD_DEVICE_LIST, /* nested */ - DEVLINK_ATTR_LINECARD_DEVICE, /* nested */ - DEVLINK_ATTR_LINECARD_DEVICE_INDEX, /* u32 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 5f441a0e34f4..5cc88490f18f 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -83,11 +83,10 @@ struct devlink_linecard { const struct devlink_linecard_ops *ops; void *priv; enum devlink_linecard_state state; - struct mutex state_lock; /* Protects state and device_list */ + struct mutex state_lock; /* Protects state */ const char *type; struct devlink_linecard_type *types; unsigned int types_count; - struct list_head device_list; }; /** @@ -2059,56 +2058,6 @@ struct devlink_linecard_type { const void *priv; }; -struct devlink_linecard_device { - struct list_head list; - unsigned int index; - void *priv; -}; - -static int -devlink_nl_linecard_device_fill(struct sk_buff *msg, - struct devlink_linecard_device *linecard_device) -{ - struct nlattr *attr; - - attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE); - if (!attr) - return -EMSGSIZE; - if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_DEVICE_INDEX, - linecard_device->index)) { - nla_nest_cancel(msg, attr); - return -EMSGSIZE; - } - nla_nest_end(msg, attr); - - return 0; -} - -static int devlink_nl_linecard_devices_fill(struct sk_buff *msg, - struct devlink_linecard *linecard) -{ - struct devlink_linecard_device *linecard_device; - struct nlattr *attr; - int err; - - if (list_empty(&linecard->device_list)) - return 0; - - attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE_LIST); - if (!attr) - return -EMSGSIZE; - list_for_each_entry(linecard_device, &linecard->device_list, list) { - err = devlink_nl_linecard_device_fill(msg, linecard_device); - if (err) { - nla_nest_cancel(msg, attr); - return err; - } - } - nla_nest_end(msg, attr); - - return 0; -} - static int devlink_nl_linecard_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_linecard *linecard, @@ -2119,7 +2068,6 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg, struct devlink_linecard_type *linecard_type; struct nlattr *attr; void *hdr; - int err; int i; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); @@ -2152,10 +2100,6 @@ static int devlink_nl_linecard_fill(struct sk_buff *msg, nla_nest_end(msg, attr); } - err = devlink_nl_linecard_devices_fill(msg, linecard); - if (err) - goto nla_put_failure; - genlmsg_end(msg, hdr); return 0; @@ -2425,191 +2369,6 @@ static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb, return 0; } -struct devlink_info_req { - struct sk_buff *msg; -}; - -static int -devlink_nl_linecard_device_info_fill(struct sk_buff *msg, - struct devlink_linecard *linecard, - struct devlink_linecard_device *linecard_device, - struct netlink_ext_ack *extack) -{ - struct nlattr *attr; - - attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE); - if (!attr) - return -EMSGSIZE; - if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_DEVICE_INDEX, - linecard_device->index)) { - nla_nest_cancel(msg, attr); - return -EMSGSIZE; - } - if (linecard->ops->device_info_get) { - struct devlink_info_req req; - int err; - - req.msg = msg; - err = linecard->ops->device_info_get(linecard_device, - linecard_device->priv, - &req, extack); - if (err) { - nla_nest_cancel(msg, attr); - return err; - } - } - nla_nest_end(msg, attr); - - return 0; -} - -static int devlink_nl_linecard_devices_info_fill(struct sk_buff *msg, - struct devlink_linecard *linecard, - struct netlink_ext_ack *extack) -{ - struct devlink_linecard_device *linecard_device; - struct nlattr *attr; - int err; - - if (list_empty(&linecard->device_list)) - return 0; - - attr = nla_nest_start(msg, DEVLINK_ATTR_LINECARD_DEVICE_LIST); - if (!attr) - return -EMSGSIZE; - list_for_each_entry(linecard_device, &linecard->device_list, list) { - err = devlink_nl_linecard_device_info_fill(msg, linecard, - linecard_device, - extack); - if (err) { - nla_nest_cancel(msg, attr); - return err; - } - } - nla_nest_end(msg, attr); - - return 0; -} - -static int -devlink_nl_linecard_info_fill(struct sk_buff *msg, struct devlink *devlink, - struct devlink_linecard *linecard, - enum devlink_command cmd, u32 portid, - u32 seq, int flags, struct netlink_ext_ack *extack) -{ - struct devlink_info_req req; - void *hdr; - int err; - - hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); - if (!hdr) - return -EMSGSIZE; - - err = -EMSGSIZE; - if (devlink_nl_put_handle(msg, devlink)) - goto nla_put_failure; - if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index)) - goto nla_put_failure; - - req.msg = msg; - err = linecard->ops->info_get(linecard, linecard->priv, &req, extack); - if (err) - goto nla_put_failure; - - err = devlink_nl_linecard_devices_info_fill(msg, linecard, extack); - if (err) - goto nla_put_failure; - - genlmsg_end(msg, hdr); - return 0; - -nla_put_failure: - genlmsg_cancel(msg, hdr); - return err; -} - -static int devlink_nl_cmd_linecard_info_get_doit(struct sk_buff *skb, - struct genl_info *info) -{ - struct devlink_linecard *linecard = info->user_ptr[1]; - struct devlink *devlink = linecard->devlink; - struct sk_buff *msg; - int err; - - if (!linecard->ops->info_get) - return -EOPNOTSUPP; - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; - - mutex_lock(&linecard->state_lock); - err = devlink_nl_linecard_info_fill(msg, devlink, linecard, - DEVLINK_CMD_LINECARD_INFO_GET, - info->snd_portid, info->snd_seq, 0, - info->extack); - mutex_unlock(&linecard->state_lock); - if (err) { - nlmsg_free(msg); - return err; - } - - return genlmsg_reply(msg, info); -} - -static int devlink_nl_cmd_linecard_info_get_dumpit(struct sk_buff *msg, - struct netlink_callback *cb) -{ - struct devlink_linecard *linecard; - struct devlink *devlink; - int start = cb->args[0]; - unsigned long index; - int idx = 0; - int err = 0; - - mutex_lock(&devlink_mutex); - xa_for_each_marked(&devlinks, index, devlink, DEVLINK_REGISTERED) { - if (!devlink_try_get(devlink)) - continue; - - if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) - goto retry; - - mutex_lock(&devlink->linecards_lock); - list_for_each_entry(linecard, &devlink->linecard_list, list) { - if (idx < start || !linecard->ops->info_get) { - idx++; - continue; - } - mutex_lock(&linecard->state_lock); - err = devlink_nl_linecard_info_fill(msg, devlink, linecard, - DEVLINK_CMD_LINECARD_INFO_GET, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI, - cb->extack); - mutex_unlock(&linecard->state_lock); - if (err) { - mutex_unlock(&devlink->linecards_lock); - devlink_put(devlink); - goto out; - } - idx++; - } - mutex_unlock(&devlink->linecards_lock); -retry: - devlink_put(devlink); - } -out: - mutex_unlock(&devlink_mutex); - - if (err != -EMSGSIZE) - return err; - - cb->args[0] = idx; - return msg->len; -} - static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_sb *devlink_sb, enum devlink_command cmd, u32 portid, @@ -6602,6 +6361,10 @@ out_dev: return err; } +struct devlink_info_req { + struct sk_buff *msg; +}; + int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name) { return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, name); @@ -9321,13 +9084,6 @@ static const struct genl_small_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, }, - { - .cmd = DEVLINK_CMD_LINECARD_INFO_GET, - .doit = devlink_nl_cmd_linecard_info_get_doit, - .dumpit = devlink_nl_cmd_linecard_info_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD, - /* can be retrieved by unprivileged users */ - }, { .cmd = DEVLINK_CMD_SB_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@ -10508,7 +10264,6 @@ devlink_linecard_create(struct devlink *devlink, unsigned int linecard_index, linecard->priv = priv; linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; mutex_init(&linecard->state_lock); - INIT_LIST_HEAD(&linecard->device_list); err = devlink_linecard_types_init(linecard); if (err) { @@ -10536,7 +10291,6 @@ void devlink_linecard_destroy(struct devlink_linecard *linecard) struct devlink *devlink = linecard->devlink; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); - WARN_ON(!list_empty(&linecard->device_list)); mutex_lock(&devlink->linecards_lock); list_del(&linecard->list); devlink_linecard_types_fini(linecard); @@ -10545,52 +10299,6 @@ void devlink_linecard_destroy(struct devlink_linecard *linecard) } EXPORT_SYMBOL_GPL(devlink_linecard_destroy); -/** - * devlink_linecard_device_create - Create a device on linecard - * - * @linecard: devlink linecard - * @device_index: index of the linecard device - * @priv: user priv pointer - * - * Return: Line card device structure or an ERR_PTR() encoded error code. - */ -struct devlink_linecard_device * -devlink_linecard_device_create(struct devlink_linecard *linecard, - unsigned int device_index, void *priv) -{ - struct devlink_linecard_device *linecard_device; - - linecard_device = kzalloc(sizeof(*linecard_device), GFP_KERNEL); - if (!linecard_device) - return ERR_PTR(-ENOMEM); - linecard_device->index = device_index; - linecard_device->priv = priv; - mutex_lock(&linecard->state_lock); - list_add_tail(&linecard_device->list, &linecard->device_list); - devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); - mutex_unlock(&linecard->state_lock); - return linecard_device; -} -EXPORT_SYMBOL_GPL(devlink_linecard_device_create); - -/** - * devlink_linecard_device_destroy - Destroy device on linecard - * - * @linecard: devlink linecard - * @linecard_device: devlink linecard device - */ -void -devlink_linecard_device_destroy(struct devlink_linecard *linecard, - struct devlink_linecard_device *linecard_device) -{ - mutex_lock(&linecard->state_lock); - list_del(&linecard_device->list); - devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); - mutex_unlock(&linecard->state_lock); - kfree(linecard_device); -} -EXPORT_SYMBOL_GPL(devlink_linecard_device_destroy); - /** * devlink_linecard_provision_set - Set provisioning on linecard * @@ -10623,7 +10331,6 @@ EXPORT_SYMBOL_GPL(devlink_linecard_provision_set); void devlink_linecard_provision_clear(struct devlink_linecard *linecard) { mutex_lock(&linecard->state_lock); - WARN_ON(!list_empty(&linecard->device_list)); linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED; linecard->type = NULL; devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW); diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh index 53a65f416770..08a922d8b86a 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh @@ -152,7 +152,6 @@ unprovision_test() LC_16X100G_TYPE="16x100G" LC_16X100G_PORT_COUNT=16 -LC_16X100G_DEVICE_COUNT=4 supported_types_check() { @@ -178,42 +177,6 @@ supported_types_check() check_err $? "16X100G not found between supported types of linecard $lc" } -lc_info_check() -{ - local lc=$1 - local fixed_hw_revision - local running_ini_version - - fixed_hw_revision=$(devlink lc -v info $DEVLINK_DEV lc $lc -j | \ - jq -e -r '.[][][].versions.fixed."hw.revision"') - check_err $? "Failed to get linecard $lc fixed.hw.revision" - log_info "Linecard $lc fixed.hw.revision: \"$fixed_hw_revision\"" - running_ini_version=$(devlink lc -v info $DEVLINK_DEV lc $lc -j | \ - jq -e -r '.[][][].versions.running."ini.version"') - check_err $? "Failed to get linecard $lc running.ini.version" - log_info "Linecard $lc running.ini.version: \"$running_ini_version\"" -} - -lc_devices_check() -{ - local lc=$1 - local expected_device_count=$2 - local device_count - local device - - device_count=$(devlink lc show $DEVLINK_DEV lc $lc -j | \ - jq -e -r ".[][][].devices |length") - check_err $? "Failed to get linecard $lc device count" - [ $device_count != 0 ] - check_err $? "No device found on linecard $lc" - [ $device_count == $expected_device_count ] - check_err $? "Unexpected device count on linecard $lc (got $expected_device_count, expected $device_count)" - for (( device=0; device Date: Wed, 4 May 2022 13:16:09 +0200 Subject: sungem: Prepare cleanup of powerpc's asm/prom.h powerpc's includes some headers that it doesn't need itself. In order to clean powerpc's up in a further step, first clean all files that include sungem_phy.c doesn't use any object provided by . But removing inclusion of leads to the following errors: CC drivers/net/sungem_phy.o drivers/net/sungem_phy.c: In function 'bcm5421_init': drivers/net/sungem_phy.c:448:42: error: implicit declaration of function 'of_get_parent'; did you mean 'dget_parent'? [-Werror=implicit-function-declaration] 448 | struct device_node *np = of_get_parent(phy->platform_data); | ^~~~~~~~~~~~~ | dget_parent drivers/net/sungem_phy.c:448:42: warning: initialization of 'struct device_node *' from 'int' makes pointer from integer without a cast [-Wint-conversion] drivers/net/sungem_phy.c:450:35: error: implicit declaration of function 'of_get_property' [-Werror=implicit-function-declaration] 450 | if (np == NULL || of_get_property(np, "no-autolowpower", NULL)) | ^~~~~~~~~~~~~~~ Remove from included headers but add to handle the above. Signed-off-by: Christophe Leroy Link: https://lore.kernel.org/r/f7a7fab3ec5edf803d934fca04df22631c2b449d.1651662885.git.christophe.leroy@csgroup.eu Signed-off-by: Jakub Kicinski --- drivers/net/sungem_phy.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index 4daac5fda073..ff22b6b1c686 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -29,11 +29,7 @@ #include #include #include - -#ifdef CONFIG_PPC_PMAC -#include -#endif - +#include #include /* Link modes of the BCM5400 PHY */ -- cgit v1.2.3 From 6bff3ffcf6eed522cfc302e2d934b9a875d20cd2 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 4 May 2022 13:32:17 +0200 Subject: net: ethernet: Prepare cleanup of powerpc's asm/prom.h powerpc's asm/prom.h includes some headers that it doesn't need itself. In order to clean powerpc's asm/prom.h up in a further step, first clean all files that include asm/prom.h Some files don't need asm/prom.h at all. For those ones, just remove inclusion of asm/prom.h Some files don't need any of the items provided by asm/prom.h, but need some of the headers included by asm/prom.h. For those ones, add the needed headers that are brought by asm/prom.h at the moment and remove asm/prom.h Some files really need asm/prom.h but also need some of the headers included by asm/prom.h. For those one, leave asm/prom.h but also add the needed headers so that they can be removed from asm/prom.h in a later step. Signed-off-by: Christophe Leroy Link: https://lore.kernel.org/r/09a13d592d628de95d30943e59b2170af5b48110.1651663857.git.christophe.leroy@csgroup.eu Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/apple/bmac.c | 1 - drivers/net/ethernet/apple/mace.c | 1 - drivers/net/ethernet/freescale/fec_mpc52xx.c | 2 ++ drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | 1 + drivers/net/ethernet/ibm/ehea/ehea.h | 1 + drivers/net/ethernet/ibm/ehea/ehea_main.c | 2 ++ drivers/net/ethernet/ibm/ibmvnic.c | 1 + drivers/net/ethernet/sun/sungem.c | 1 - drivers/net/ethernet/toshiba/spider_net.c | 1 + 9 files changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 4d2ba30c2fbd..334de0d93c89 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 6f8c91eb1263..d0a771b65e88 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index be0bd4b44926..5ddb769bdfb4 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -29,7 +29,9 @@ #include #include #include +#include #include +#include #include #include #include diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index b5497e308302..f85b5e81dfc1 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h index b140835d4c23..208c440a602b 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea.h +++ b/drivers/net/ethernet/ibm/ehea/ehea.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index bad94e4d50f4..8ce3348edf08 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3699c5435396..7e7fe5bdf1f8 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 036856102c50..45bd89153de2 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -52,7 +52,6 @@ #endif #ifdef CONFIG_PPC_PMAC -#include #include #include #endif diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index c09cd961edbb..bc4914c758ad 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include "spider_net.h" -- cgit v1.2.3 From fd49f8e61cd3a62251838e47507ee6a423e88214 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 May 2022 09:39:39 -0700 Subject: jme: remove an unnecessary indirection Remove a define which looks like a OS abstraction layer and makes spatch conversions on this driver problematic. Link: https://lore.kernel.org/r/20220504163939.551231-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/jme.c | 2 +- drivers/net/ethernet/jme.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b6c5122da995..f43d6616bc0d 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -3009,7 +3009,7 @@ jme_init_one(struct pci_dev *pdev, jwrite32(jme, JME_APMC, apmc); } - NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) + netif_napi_add(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT); spin_lock_init(&jme->phy_lock); spin_lock_init(&jme->macaddr_lock); diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 2af76329b4a2..860494ff3714 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -379,8 +379,6 @@ struct jme_ring { #define DECLARE_NET_DEVICE_STATS #define DECLARE_NAPI_STRUCT struct napi_struct napi; -#define NETIF_NAPI_SET(dev, napis, pollfn, q) \ - netif_napi_add(dev, napis, pollfn, q); #define JME_NAPI_HOLDER(holder) struct napi_struct *holder #define JME_NAPI_WEIGHT(w) int w #define JME_NAPI_WEIGHT_VAL(w) w -- cgit v1.2.3 From 16d083e28f1a4f6deef82be92d6a0f5aa2fe7e08 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 May 2022 09:37:24 -0700 Subject: net: switch to netif_napi_add_tx() Switch net callers to the new API not requiring the NAPI_POLL_WEIGHT argument. Acked-by: Florian Fainelli Reviewed-by: Alex Elder Acked-by: Mat Martineau Acked-by: Alexandra Winter Link: https://lore.kernel.org/r/20220504163725.550782-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bcm4908_enet.c | 2 +- drivers/net/ethernet/broadcom/bcmsysport.c | 2 +- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 3 +-- drivers/net/ethernet/fungible/funeth/funeth_main.c | 3 +-- drivers/net/ethernet/lantiq_xrx200.c | 4 ++-- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 3 +-- drivers/net/ethernet/microsoft/mana/mana_en.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 9 ++++----- drivers/net/ethernet/rocker/rocker_main.c | 3 +-- drivers/net/ethernet/socionext/sni_ave.c | 3 +-- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 ++--- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 4 ++-- drivers/net/ethernet/ti/cpsw.c | 5 ++--- drivers/net/ethernet/ti/cpsw_new.c | 5 ++--- drivers/net/ethernet/ti/netcp_core.c | 2 +- drivers/net/ipa/gsi.c | 4 ++-- drivers/net/tun.c | 3 +-- drivers/s390/net/qeth_core_main.c | 3 +-- net/mptcp/protocol.c | 4 ++-- 19 files changed, 29 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 4a2622b05ee1..c131d8118489 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -722,7 +722,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev) netdev->min_mtu = ETH_ZLEN; netdev->mtu = ETH_DATA_LEN; netdev->max_mtu = ENET_MTU_MAX; - netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT); + netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx); netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT); err = register_netdev(netdev); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 60dde29974bf..a4ce96bb3903 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1517,7 +1517,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, /* Initialize SW view of the ring */ spin_lock_init(&ring->lock); ring->priv = priv; - netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); + netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll); ring->index = index; ring->size = size; ring->clean_index = 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index bf1ec8fdc2ad..65606351634e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2671,8 +2671,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, DMA_END_ADDR); /* Initialize Tx NAPI */ - netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll); } /* Initialize a RDMA ring */ diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c index 67dd02ed1fa3..9485cf699c5d 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_main.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c @@ -330,8 +330,7 @@ static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx, return PTR_ERR(irq); fp->num_tx_irqs++; - netif_tx_napi_add(dev, &irq->napi, fun_txq_napi_poll, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(dev, &irq->napi, fun_txq_napi_poll); } for (i = fp->num_rx_irqs; i < nrx; i++) { diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 5712c3e94be8..5edb68a8aab1 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -615,8 +615,8 @@ static int xrx200_probe(struct platform_device *pdev) /* setup NAPI */ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, NAPI_POLL_WEIGHT); - netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(net_dev, &priv->chan_tx.napi, + xrx200_tx_housekeeping); platform_set_drvdata(pdev, priv); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index d5fc72b1a36f..6affbd241264 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -147,8 +147,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, switch (cq->type) { case TX: cq->mcq.comp = mlx4_en_tx_irq; - netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq); napi_enable(&cq->napi); break; case RX: diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index b7d3ba1b4d17..06f853c5c141 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1371,7 +1371,7 @@ static int mana_create_txq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT); + netif_napi_add_tx(net, &cq->napi, mana_poll); napi_enable(&cq->napi); mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index bcf3746220df..8d43ca282956 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1608,8 +1608,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; - netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(netdev, &tx_ring->napi, + qlcnic_tx_poll); } } @@ -2138,9 +2138,8 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; - netif_tx_napi_add(netdev, &tx_ring->napi, - qlcnic_83xx_msix_tx_poll, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(netdev, &tx_ring->napi, + qlcnic_83xx_msix_tx_poll); } } diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 3fcea211716c..fc83ec23bd1d 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2573,8 +2573,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port_dev_addr_init(rocker_port); dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; - netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(dev, &rocker_port->napi_tx, rocker_port_poll_tx); netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, NAPI_POLL_WEIGHT); rocker_carrier_init(rocker_port); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 2c48f8b8ab71..f0c8de2c6075 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1689,8 +1689,7 @@ static int ave_probe(struct platform_device *pdev) /* Register as a NAPI supported driver */ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, NAPI_POLL_WEIGHT); - netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx); platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 65ae3ae9582f..3b81d4e9dc83 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -6759,9 +6759,8 @@ static void stmmac_napi_add(struct net_device *dev) NAPI_POLL_WEIGHT); } if (queue < priv->plat->tx_queues_to_use) { - netif_tx_napi_add(dev, &ch->tx_napi, - stmmac_napi_poll_tx, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(dev, &ch->tx_napi, + stmmac_napi_poll_tx); } if (queue < priv->plat->rx_queues_to_use && queue < priv->plat->tx_queues_to_use) { diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index b7ebd741f284..34197c67f8d9 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2032,8 +2032,8 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) for (i = 0; i < common->tx_ch_num; i++) { struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; - netif_tx_napi_add(common->dma_ndev, &tx_chn->napi_tx, - am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT); + netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, + am65_cpsw_nuss_tx_poll); ret = devm_request_irq(dev, tx_chn->irq, am65_cpsw_nuss_tx_irq, diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 662435e36805..ed66c4d4d830 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1640,9 +1640,8 @@ static int cpsw_probe(struct platform_device *pdev) netif_napi_add(ndev, &cpsw->napi_rx, cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, NAPI_POLL_WEIGHT); - netif_tx_napi_add(ndev, &cpsw->napi_tx, - cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(ndev, &cpsw->napi_tx, + cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll); /* register the network device */ SET_NETDEV_DEV(ndev, dev); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 299319320830..353e58b22c51 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1420,10 +1420,9 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, NAPI_POLL_WEIGHT); - netif_tx_napi_add(ndev, &cpsw->napi_tx, + netif_napi_add_tx(ndev, &cpsw->napi_tx, cpsw->quirk_irq ? - cpsw_tx_poll : cpsw_tx_mq_poll, - NAPI_POLL_WEIGHT); + cpsw_tx_poll : cpsw_tx_mq_poll); } napi_ndev = ndev; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 21b0e961eab5..b15d44261e76 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2096,7 +2096,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, /* NAPI register */ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NAPI_POLL_WEIGHT); - netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NAPI_POLL_WEIGHT); + netif_napi_add_tx(ndev, &netcp->tx_napi, netcp_tx_poll); /* Register the network device */ ndev->dev_id = 0; diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index bc981043cc80..db4cb2de218c 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1614,8 +1614,8 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) gsi_channel_program(channel, true); if (channel->toward_ipa) - netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, - gsi_channel_poll, NAPI_POLL_WEIGHT); + netif_napi_add_tx(&gsi->dummy_dev, &channel->napi, + gsi_channel_poll); else netif_napi_add(&gsi->dummy_dev, &channel->napi, gsi_channel_poll, NAPI_POLL_WEIGHT); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index dbe4c0a4be2c..87a635aac008 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -268,8 +268,7 @@ static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, tfile->napi_enabled = napi_en; tfile->napi_frags_enabled = napi_en && napi_frags; if (napi_en) { - netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); napi_enable(&tfile->napi); } } diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ae85179ca49a..9e54fe76a9b2 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -7099,8 +7099,7 @@ int qeth_open(struct net_device *dev) local_bh_disable(); qeth_for_each_output_queue(card, queue, i) { - netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll); napi_enable(&queue->napi); napi_schedule(&queue->napi); } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 52ed2c0ac901..7a9e2545884f 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3786,8 +3786,8 @@ void __init mptcp_proto_init(void) for_each_possible_cpu(cpu) { delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu); INIT_LIST_HEAD(&delegated->head); - netif_tx_napi_add(&mptcp_napi_dev, &delegated->napi, mptcp_napi_poll, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(&mptcp_napi_dev, &delegated->napi, + mptcp_napi_poll); napi_enable(&delegated->napi); } -- cgit v1.2.3 From 8d602e1a132e91eb7bc1a2cbe96019371eb8da64 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 May 2022 09:37:25 -0700 Subject: net: move snowflake callers to netif_napi_add_tx_weight() Make the drivers with custom tx napi weight call netif_napi_add_tx_weight(). Reviewed-by: Xuan Zhuo Link: https://lore.kernel.org/r/20220504163725.550782-2-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/freescale/gianfar.c | 4 ++-- drivers/net/ethernet/microchip/lan743x_main.c | 6 +++--- drivers/net/virtio_net.c | 5 +++-- 3 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index f0b652a65043..3dc9369a33f7 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3233,8 +3233,8 @@ static int gfar_probe(struct platform_device *ofdev) for (i = 0; i < priv->num_grps; i++) { netif_napi_add(dev, &priv->gfargrp[i].napi_rx, gfar_poll_rx_sq, NAPI_POLL_WEIGHT); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx_sq, 2); + netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 9ac0c2b96a15..efbddf24ba31 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2044,9 +2044,9 @@ static int lan743x_tx_open(struct lan743x_tx *tx) tx->vector_flags = lan743x_intr_get_vector_flags(adapter, INT_BIT_DMA_TX_ (tx->channel_number)); - netif_tx_napi_add(adapter->netdev, - &tx->napi, lan743x_tx_napi_poll, - tx->ring_size - 1); + netif_napi_add_tx_weight(adapter->netdev, + &tx->napi, lan743x_tx_napi_poll, + tx->ring_size - 1); napi_enable(&tx->napi); data = 0; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cbba9d2e8f32..ebb98b796352 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3315,8 +3315,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) vi->rq[i].pages = NULL; netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, napi_weight); - netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, - napi_tx ? napi_weight : 0); + netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, + virtnet_poll_tx, + napi_tx ? napi_weight : 0); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); -- cgit v1.2.3 From 10b4a11fe70f295426fb4ebbb8b20370885c3806 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 4 May 2022 12:14:07 +0300 Subject: firmware: tee_bnxt: Use UUID API for exporting the UUID There is export_uuid() function which exports uuid_t to the u8 array. Use it instead of open coding variant. This allows to hide the uuid_t internals. Signed-off-by: Andy Shevchenko Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220504091407.70661-1-andriy.shevchenko@linux.intel.com Signed-off-by: Jakub Kicinski --- drivers/firmware/broadcom/tee_bnxt_fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/firmware/broadcom/tee_bnxt_fw.c b/drivers/firmware/broadcom/tee_bnxt_fw.c index a5bf4c3f6dc7..40e3183a3d11 100644 --- a/drivers/firmware/broadcom/tee_bnxt_fw.c +++ b/drivers/firmware/broadcom/tee_bnxt_fw.c @@ -197,7 +197,7 @@ static int tee_bnxt_fw_probe(struct device *dev) return -ENODEV; /* Open session with Bnxt load Trusted App */ - memcpy(sess_arg.uuid, bnxt_device->id.uuid.b, TEE_IOCTL_UUID_LEN); + export_uuid(sess_arg.uuid, &bnxt_device->id.uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; sess_arg.num_params = 0; -- cgit v1.2.3 From f1c5d4ded782800d58321a9f10d0caff2af56fc3 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 May 2022 09:33:14 -0700 Subject: wil6210: switch to netif_napi_add_tx() Switch to the new API not requiring passing in NAPI_POLL_WEIGHT. Signed-off-by: Jakub Kicinski Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504163316.549648-2-kuba@kernel.org --- drivers/net/wireless/ath/wil6210/netdev.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 390648066382..87a88f26233e 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -458,16 +458,14 @@ int wil_if_add(struct wil6210_priv *wil) netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx_edma, NAPI_POLL_WEIGHT); - netif_tx_napi_add(&wil->napi_ndev, - &wil->napi_tx, wil6210_netdev_poll_tx_edma, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(&wil->napi_ndev, + &wil->napi_tx, wil6210_netdev_poll_tx_edma); } else { netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx, NAPI_POLL_WEIGHT); - netif_tx_napi_add(&wil->napi_ndev, - &wil->napi_tx, wil6210_netdev_poll_tx, - NAPI_POLL_WEIGHT); + netif_napi_add_tx(&wil->napi_ndev, + &wil->napi_tx, wil6210_netdev_poll_tx); } wil_update_net_queues_bh(wil, vif, NULL, true); -- cgit v1.2.3 From 3ed27b602cc3e13e37a411980083bb850aff0a01 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 May 2022 09:33:15 -0700 Subject: mt76: switch to netif_napi_add_tx() Switch to the new API not requiring passing in NAPI_POLL_WEIGHT. Signed-off-by: Jakub Kicinski Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504163316.549648-3-kuba@kernel.org --- drivers/net/wireless/mediatek/mt76/mt7603/dma.c | 4 ++-- drivers/net/wireless/mediatek/mt76/mt7615/dma.c | 4 ++-- drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c | 4 ++-- drivers/net/wireless/mediatek/mt76/mt7915/dma.c | 4 ++-- drivers/net/wireless/mediatek/mt76/mt7921/dma.c | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 37b092e3ea51..2c1c79dc7fda 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -223,8 +223,8 @@ int mt7603_dma_init(struct mt7603_dev *dev) if (ret) return ret; - netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, - mt7603_poll_tx, NAPI_POLL_WEIGHT); + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt7603_poll_tx); napi_enable(&dev->mt76.tx_napi); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 00aefea1bf61..95dbb413678a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -284,8 +284,8 @@ int mt7615_dma_init(struct mt7615_dev *dev) if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, - mt7615_poll_tx, NAPI_POLL_WEIGHT); + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt7615_poll_tx); napi_enable(&dev->mt76.tx_napi); mt76_poll(dev, MT_WPDMA_GLO_CFG, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 8bcd8afa0d3a..23d2864d37fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -230,8 +230,8 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) if (ret) return ret; - netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, - mt76x02_poll_tx, NAPI_POLL_WEIGHT); + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt76x02_poll_tx); napi_enable(&dev->mt76.tx_napi); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index 49b4d8ade16b..75678aabff32 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -434,8 +434,8 @@ int mt7915_dma_init(struct mt7915_dev *dev) if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, - mt7915_poll_tx, NAPI_POLL_WEIGHT); + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt7915_poll_tx); napi_enable(&dev->mt76.tx_napi); mt7915_dma_enable(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c index ca7e20fb5fc0..34b3effe14f3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c @@ -296,8 +296,8 @@ int mt7921_dma_init(struct mt7921_dev *dev) if (ret < 0) return ret; - netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, - mt7921_poll_tx, NAPI_POLL_WEIGHT); + netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + mt7921_poll_tx); napi_enable(&dev->mt76.tx_napi); return mt7921_dma_enable(dev); -- cgit v1.2.3 From 193eb523d27c4225aa6cf6fe7debac4d3eac942f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 May 2022 09:33:16 -0700 Subject: qtnfmac: switch to netif_napi_add_weight() qtnfmac chooses its own magic NAPI weight so switch to the new API created for those who don't use NAPI_POLL_WEIGHT. Signed-off-by: Jakub Kicinski Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504163316.549648-4-kuba@kernel.org --- drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c | 4 ++-- drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c index 840728ed57b2..8c23a77d1671 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c @@ -1146,8 +1146,8 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size, } tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn); - netif_napi_add(&bus->mux_dev, &bus->mux_napi, - qtnf_pcie_pearl_rx_poll, 10); + netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi, + qtnf_pcie_pearl_rx_poll, 10); ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int; ipc_int.arg = ps; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c index 9534e1b33780..d83362578374 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c @@ -1159,8 +1159,8 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, } tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn); - netif_napi_add(&bus->mux_dev, &bus->mux_napi, - qtnf_topaz_rx_poll, 10); + netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi, + qtnf_topaz_rx_poll, 10); ipc_int.fn = qtnf_topaz_ipc_gen_ep_int; ipc_int.arg = ts; -- cgit v1.2.3 From 454744754cbf2c21b3fc7344e46e10bee2768094 Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Mon, 2 May 2022 14:38:32 +0200 Subject: wl1251: dynamically allocate memory used for DMA With introduction of vmap'ed stacks, stack parameters can no longer be used for DMA and now leads to kernel panic. It happens at several places for the wl1251 (e.g. when accessed through SDIO) making it unuseable on e.g. the OpenPandora. We solve this by allocating temporary buffers or use wl1251_read32(). Tested on v5.18-rc5 with OpenPandora. Fixes: a1c510d0adc6 ("ARM: implement support for vmap'ed stacks") Signed-off-by: H. Nikolaus Schaller Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1676021ae8b6d7aada0b1806fed99b1b8359bdc4.1651495112.git.hns@goldelico.com --- drivers/net/wireless/ti/wl1251/event.c | 22 ++++++++++++++-------- drivers/net/wireless/ti/wl1251/io.c | 20 ++++++++++++++------ drivers/net/wireless/ti/wl1251/tx.c | 15 +++++++++++---- 3 files changed, 39 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index e6d426edab56..e945aafd88ee 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -169,11 +169,9 @@ int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms) msleep(1); /* read from both event fields */ - wl1251_mem_read(wl, wl->mbox_ptr[0], &events_vector, - sizeof(events_vector)); + events_vector = wl1251_mem_read32(wl, wl->mbox_ptr[0]); event = events_vector & mask; - wl1251_mem_read(wl, wl->mbox_ptr[1], &events_vector, - sizeof(events_vector)); + events_vector = wl1251_mem_read32(wl, wl->mbox_ptr[1]); event |= events_vector & mask; } while (!event); @@ -202,7 +200,7 @@ void wl1251_event_mbox_config(struct wl1251 *wl) int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num) { - struct event_mailbox mbox; + struct event_mailbox *mbox; int ret; wl1251_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); @@ -210,12 +208,20 @@ int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num) if (mbox_num > 1) return -EINVAL; + mbox = kmalloc(sizeof(*mbox), GFP_KERNEL); + if (!mbox) { + wl1251_error("can not allocate mbox buffer"); + return -ENOMEM; + } + /* first we read the mbox descriptor */ - wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox, - sizeof(struct event_mailbox)); + wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], mbox, + sizeof(*mbox)); /* process the descriptor */ - ret = wl1251_event_process(wl, &mbox); + ret = wl1251_event_process(wl, mbox); + kfree(mbox); + if (ret < 0) return ret; diff --git a/drivers/net/wireless/ti/wl1251/io.c b/drivers/net/wireless/ti/wl1251/io.c index 5ebe7958ed5c..e8d567af74b4 100644 --- a/drivers/net/wireless/ti/wl1251/io.c +++ b/drivers/net/wireless/ti/wl1251/io.c @@ -121,7 +121,13 @@ void wl1251_set_partition(struct wl1251 *wl, u32 mem_start, u32 mem_size, u32 reg_start, u32 reg_size) { - struct wl1251_partition partition[2]; + struct wl1251_partition_set *partition; + + partition = kmalloc(sizeof(*partition), GFP_KERNEL); + if (!partition) { + wl1251_error("can not allocate partition buffer"); + return; + } wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", mem_start, mem_size); @@ -164,10 +170,10 @@ void wl1251_set_partition(struct wl1251 *wl, reg_start, reg_size); } - partition[0].start = mem_start; - partition[0].size = mem_size; - partition[1].start = reg_start; - partition[1].size = reg_size; + partition->mem.start = mem_start; + partition->mem.size = mem_size; + partition->reg.start = reg_start; + partition->reg.size = reg_size; wl->physical_mem_addr = mem_start; wl->physical_reg_addr = reg_start; @@ -176,5 +182,7 @@ void wl1251_set_partition(struct wl1251 *wl, wl->virtual_reg_addr = mem_size; wl->if_ops->write(wl, HW_ACCESS_PART0_SIZE_ADDR, partition, - sizeof(partition)); + sizeof(*partition)); + + kfree(partition); } diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c index 98cd39619d57..e9dc3c72bb11 100644 --- a/drivers/net/wireless/ti/wl1251/tx.c +++ b/drivers/net/wireless/ti/wl1251/tx.c @@ -443,19 +443,25 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl, void wl1251_tx_complete(struct wl1251 *wl) { int i, result_index, num_complete = 0, queue_len; - struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr; + struct tx_result *result, *result_ptr; unsigned long flags; if (unlikely(wl->state != WL1251_STATE_ON)) return; + result = kmalloc_array(FW_TX_CMPLT_BLOCK_SIZE, sizeof(*result), GFP_KERNEL); + if (!result) { + wl1251_error("can not allocate result buffer"); + return; + } + /* First we read the result */ - wl1251_mem_read(wl, wl->data_path->tx_complete_addr, - result, sizeof(result)); + wl1251_mem_read(wl, wl->data_path->tx_complete_addr, result, + FW_TX_CMPLT_BLOCK_SIZE * sizeof(*result)); result_index = wl->next_tx_complete; - for (i = 0; i < ARRAY_SIZE(result); i++) { + for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) { result_ptr = &result[result_index]; if (result_ptr->done_1 == 1 && @@ -538,6 +544,7 @@ void wl1251_tx_complete(struct wl1251 *wl) } + kfree(result); wl->next_tx_complete = result_index; } -- cgit v1.2.3 From 80c5075f39996dc75ee8bb7748c4805ffea0e2fb Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 4 May 2022 09:00:43 +0300 Subject: ath11k: mac: fix too long line checkpatch warns: drivers/net/wireless/ath/ath11k/mac.c:7760: line length of 91 exceeds 90 columns This was introduced by commit 046d2e7c50e3 ("mac80211: prepare sta handling for MLO support"). Compile tested only. Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503060415.24499-1-kvalo@kernel.org --- drivers/net/wireless/ath/ath11k/mac.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 1957e1713548..df15fa42d8ad 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -7741,6 +7741,7 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b bool he_fixed_rate = false, vht_fixed_rate = false; struct ath11k_peer *peer, *tmp; const u16 *vht_mcs_mask, *he_mcs_mask; + struct ieee80211_link_sta *deflink; u8 vht_nss, he_nss; bool ret = true; @@ -7763,13 +7764,16 @@ ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_b spin_lock_bh(&ar->ab->base_lock); list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) { if (peer->sta) { - if (vht_fixed_rate && (!peer->sta->deflink.vht_cap.vht_supported || - peer->sta->deflink.rx_nss < vht_nss)) { + deflink = &peer->sta->deflink; + + if (vht_fixed_rate && (!deflink->vht_cap.vht_supported || + deflink->rx_nss < vht_nss)) { ret = false; goto out; } - if (he_fixed_rate && (!peer->sta->deflink.he_cap.has_he || - peer->sta->deflink.rx_nss < he_nss)) { + + if (he_fixed_rate && (!deflink->he_cap.has_he || + deflink->rx_nss < he_nss)) { ret = false; goto out; } -- cgit v1.2.3 From d9e441855c64e3b4c1d83b7e82d723c12d5de3fa Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Wed, 4 May 2022 09:00:44 +0300 Subject: ath10k: mac: fix too long lines checkpatch warns: drivers/net/wireless/ath/ath10k/mac.c:2696: line length of 92 exceeds 90 columns drivers/net/wireless/ath/ath10k/mac.c:6942: line length of 94 exceeds 90 columns drivers/net/wireless/ath/ath10k/mac.c:6948: line length of 91 exceeds 90 columns These were introduced by commit 046d2e7c50e3 ("mac80211: prepare sta handling for MLO support"). Compile tested only. Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220503060415.24499-2-kvalo@kernel.org --- drivers/net/wireless/ath/ath10k/mac.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 06a51a48c1d9..3570a5895ea8 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2692,8 +2692,10 @@ static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, struct ieee80211_sta *sta) { + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { - switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { + switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: return MODE_11AC_VHT160; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: @@ -6926,6 +6928,9 @@ static int ath10k_mac_validate_rate_mask(struct ath10k *ar, struct ieee80211_sta *sta, u32 rate_ctrl_flag, u8 nss) { + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + if (nss > sta->deflink.rx_nss) { ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n", nss, sta->deflink.rx_nss); @@ -6933,19 +6938,19 @@ static int ath10k_mac_validate_rate_mask(struct ath10k *ar, } if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) { - if (!sta->deflink.vht_cap.vht_supported) { + if (!vht_cap->vht_supported) { ath10k_warn(ar, "Invalid VHT rate for sta %pM\n", sta->addr); return -EINVAL; } } else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) { - if (!sta->deflink.ht_cap.ht_supported || sta->deflink.vht_cap.vht_supported) { + if (!ht_cap->ht_supported || vht_cap->vht_supported) { ath10k_warn(ar, "Invalid HT rate for sta %pM\n", sta->addr); return -EINVAL; } } else { - if (sta->deflink.ht_cap.ht_supported || sta->deflink.vht_cap.vht_supported) + if (ht_cap->ht_supported || vht_cap->vht_supported) return -EINVAL; } -- cgit v1.2.3 From f2a7064a78b22f2b68b9fcbc8a6f4c5e61c5ba64 Mon Sep 17 00:00:00 2001 From: Robert Marko Date: Sun, 10 Oct 2021 00:17:11 +0200 Subject: ath10k: support bus and device specific API 1 BDF selection Some ath10k IPQ40xx devices like the MikroTik hAP ac2 and ac3 require the BDF-s to be extracted from the device storage instead of shipping packaged API 2 BDF-s. This is required as MikroTik has started shipping boards that require BDF-s to be updated, as otherwise their WLAN performance really suffers. This is however impossible as the devices that require this are release under the same revision and its not possible to differentiate them from devices using the older BDF-s. In OpenWrt we are extracting the calibration data during runtime and we are able to extract the BDF-s in the same manner, however we cannot package the BDF-s to API 2 format on the fly and can only use API 1 to provide BDF-s on the fly. This is an issue as the ath10k driver explicitly looks only for the board.bin file and not for something like board-bus-device.bin like it does for pre-cal data. Due to this we have no way of providing correct BDF-s on the fly, so lets extend the ath10k driver to first look for BDF-s in the board-bus-device.bin format, for example: board-ahb-a800000.wifi.bin If that fails, look for the default board file name as defined previously. Signed-off-by: Robert Marko Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20211009221711.2315352-1-robimarko@gmail.com --- drivers/net/wireless/ath/ath10k/core.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 2092bfd02cd1..688177453b07 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1233,6 +1233,7 @@ success: static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type) { const struct firmware *fw; + char boardname[100]; if (bd_ie_type == ATH10K_BD_IE_BOARD) { if (!ar->hw_params.fw.board) { @@ -1240,9 +1241,19 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type) return -EINVAL; } + scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin", + ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); + ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.board); + boardname); + if (IS_ERR(ar->normal_mode_fw.board)) { + fw = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.board); + ar->normal_mode_fw.board = fw; + } + if (IS_ERR(ar->normal_mode_fw.board)) return PTR_ERR(ar->normal_mode_fw.board); -- cgit v1.2.3 From 22cc687326e049fe294da118d9a067538c0066cb Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Wed, 4 May 2022 14:09:00 +0530 Subject: ath11k: Fix RX de-fragmentation issue on WCN6750 The offset of REO register where the RX fragment destination ring is configured is different in WCN6750 as compared to WCN6855. Due to this differnce in offsets, on WCN6750, fragment destination ring will be configured incorrectly, leading to RX fragments not getting delivered to the driver. Fix this by defining HW specific offsets for the REO MISC CTL register. Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-00887-QCAMSLSWPLZ-1 Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.5.0.1-01100-QCAHKSWPL_SILICONZ-1 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.4.0.1-00192-QCAHKSWPL_SILICONZ-1 Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504083900.31513-1-quic_mpubbise@quicinc.com --- drivers/net/wireless/ath/ath11k/hal.h | 2 +- drivers/net/wireless/ath/ath11k/hw.c | 23 +++++++++++++++++++++-- drivers/net/wireless/ath/ath11k/hw.h | 1 + 3 files changed, 23 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 1aadb1566df8..110c337ddf33 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -121,7 +121,7 @@ struct ath11k_base; #define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008 #define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c #define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010 -#define HAL_REO1_MISC_CTL 0x00000630 +#define HAL_REO1_MISC_CTL(ab) ab->hw_params.regs->hal_reo1_misc_ctl #define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb #define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb #define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index 09ce357f0f0d..96db85c55585 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -771,10 +771,10 @@ static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab) FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1); ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val); - val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL); + val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL(ab)); val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING; val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1); - ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL, val); + ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL(ab), val); ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab), HAL_DEFAULT_REO_TIMEOUT_USEC); @@ -1983,6 +1983,9 @@ const struct ath11k_hw_regs ipq8074_regs = { /* Shadow register area */ .hal_shadow_base_addr = 0x0, + + /* REO misc control register, not used in IPQ8074 */ + .hal_reo1_misc_ctl = 0x0, }; const struct ath11k_hw_regs qca6390_regs = { @@ -2065,6 +2068,9 @@ const struct ath11k_hw_regs qca6390_regs = { /* Shadow register area */ .hal_shadow_base_addr = 0x000008fc, + + /* REO misc control register, not used in QCA6390 */ + .hal_reo1_misc_ctl = 0x0, }; const struct ath11k_hw_regs qcn9074_regs = { @@ -2147,6 +2153,9 @@ const struct ath11k_hw_regs qcn9074_regs = { /* Shadow register area */ .hal_shadow_base_addr = 0x0, + + /* REO misc control register, not used in QCN9074 */ + .hal_reo1_misc_ctl = 0x0, }; const struct ath11k_hw_regs wcn6855_regs = { @@ -2229,6 +2238,11 @@ const struct ath11k_hw_regs wcn6855_regs = { /* Shadow register area */ .hal_shadow_base_addr = 0x000008fc, + + /* REO misc control register, used for fragment + * destination ring config in WCN6855. + */ + .hal_reo1_misc_ctl = 0x00000630, }; const struct ath11k_hw_regs wcn6750_regs = { @@ -2311,6 +2325,11 @@ const struct ath11k_hw_regs wcn6750_regs = { /* Shadow register area */ .hal_shadow_base_addr = 0x00000504, + + /* REO misc control register, used for fragment + * destination ring config in WCN6750. + */ + .hal_reo1_misc_ctl = 0x000005d8, }; const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = { diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 6d588cd80093..3a2abde63489 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -379,6 +379,7 @@ struct ath11k_hw_regs { u32 pcie_pcs_osc_dtct_config_base; u32 hal_shadow_base_addr; + u32 hal_reo1_misc_ctl; }; extern const struct ath11k_hw_regs ipq8074_regs; -- cgit v1.2.3 From 0c05ab78e3f2105de0f7be56e244906b1e8d176d Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 5 May 2022 13:06:42 +0300 Subject: ixgbe: propagate XFRM offload state direction instead of flags Convert the ixgbe driver to rely on XFRM offload state direction instead of flags bits that were not checked at all. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Steffen Klassert --- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 9 ++++----- drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h | 2 +- drivers/net/ethernet/intel/ixgbevf/ipsec.c | 6 +++--- drivers/net/ethernet/intel/ixgbevf/ipsec.h | 2 +- 4 files changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 69d11ff7677d..774de63dd93a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -585,7 +585,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; if (xs->calg) { @@ -757,7 +757,7 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) u32 zerobuf[4] = {0, 0, 0, 0}; u16 sa_idx; - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa *rsa; u8 ipi; @@ -903,8 +903,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* Tx IPsec offload doesn't seem to work on this * device, so block these requests for now. */ - sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6; - if (sam->flags != XFRM_OFFLOAD_INBOUND) { + if (sam->dir != XFRM_DEV_OFFLOAD_IN) { err = -EOPNOTSUPP; goto err_out; } @@ -915,7 +914,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) goto err_out; } - xs->xso.flags = sam->flags; + xs->xso.dir = sam->dir; xs->id.spi = sam->spi; xs->id.proto = sam->proto; xs->props.family = sam->family; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h index d2b64ff8eb4e..809ab51a7842 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -74,7 +74,7 @@ struct ixgbe_ipsec { struct sa_mbx_msg { __be32 spi; - u8 flags; + u8 dir; u8 proto; u16 family; __be32 addr[4]; diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index e763cee0695e..9984ebc62d78 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -25,7 +25,7 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, /* send the important bits to the PF */ sam = (struct sa_mbx_msg *)(&msgbuf[1]); - sam->flags = xs->xso.flags; + sam->dir = xs->xso.dir; sam->spi = xs->id.spi; sam->proto = xs->id.proto; sam->family = xs->props.family; @@ -280,7 +280,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { struct rx_sa rsa; if (xs->calg) { @@ -394,7 +394,7 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) adapter = netdev_priv(dev); ipsec = adapter->ipsec; - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; if (!ipsec->rx_tbl[sa_idx].used) { diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h index 3740725041c3..d22990165353 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.h +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h @@ -57,7 +57,7 @@ struct ixgbevf_ipsec { struct sa_mbx_msg { __be32 spi; - u8 flags; + u8 dir; u8 proto; u16 family; __be32 addr[4]; -- cgit v1.2.3 From 55e2f83afb1c142885da63c5a9ce2998b6f6ab21 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 5 May 2022 13:06:43 +0300 Subject: netdevsim: rely on XFRM state direction instead of flags Make sure that netdevsim relies on direction and not on flags. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Acked-by: Jakub Kicinski Signed-off-by: Steffen Klassert --- drivers/net/netdevsim/ipsec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index b80ed2ffd45e..386336a38f34 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -171,7 +171,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs) return ret; } - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { sa.rx = true; if (xs->props.family == AF_INET6) -- cgit v1.2.3 From 3ef535eccea33b2a4fce20f4f298e09647350d71 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 5 May 2022 13:06:44 +0300 Subject: net/mlx5e: Use XFRM state direction instead of flags Convert mlx5 driver to use XFRM state direction. Reviewed-by: Raed Salem Signed-off-by: Leon Romanovsky Signed-off-by: Steffen Klassert --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 35e2bb301c26..2a8fd7020622 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -172,9 +172,9 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, } /* action */ - attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ? - MLX5_ACCEL_ESP_ACTION_ENCRYPT : - MLX5_ACCEL_ESP_ACTION_DECRYPT; + attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ? + MLX5_ACCEL_ESP_ACTION_ENCRYPT : + MLX5_ACCEL_ESP_ACTION_DECRYPT; /* flags */ attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ? MLX5_ACCEL_ESP_FLAGS_TRANSPORT : @@ -306,7 +306,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) if (err) goto err_hw_ctx; - if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) { err = mlx5e_ipsec_sadb_rx_add(sa_entry); if (err) goto err_add_rule; @@ -333,7 +333,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); - if (x->xso.flags & XFRM_OFFLOAD_INBOUND) + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) mlx5e_ipsec_sadb_rx_del(sa_entry); } -- cgit v1.2.3 From 29c691347e38248607eeab74691bed05115fca79 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:40 +0900 Subject: nfp: flower: add infrastructure for pre_tun rework The previous implementation of using a pre_tun_table for decap has some limitations, causing flows to end up unoffloaded when in fact we are able to offload them. This is because the pre_tun_table does not have enough matching resolution. The next step is to instead make use of the neighbour table which already exists for the encap direction. This patch prepares for this by: - Moving nfp_tun_neigh/_v6 to main.h. - Creating two new "wrapping" structures, one to keep track of neighbour entries (previously they were send-and-forget), and another to keep track of pre_tun flows. - Create a new list in nfp_flower_priv to keep track of pre_tunnel flows - Create a new table in nfp_flower_priv to keep track of next neighbour entries - Initialising and destroying these new list/tables - Extending nfp_fl_payload->pre_tun_rule to save more information for future use. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 87 ++++++++++++++++++++++ .../net/ethernet/netronome/nfp/flower/metadata.c | 19 ++++- .../ethernet/netronome/nfp/flower/tunnel_conf.c | 32 -------- 3 files changed, 105 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index fa902ce2dd82..454fdb6ea4a5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -51,6 +51,7 @@ struct nfp_app; #define NFP_FL_FEATS_VLAN_QINQ BIT(8) #define NFP_FL_FEATS_QOS_PPS BIT(9) #define NFP_FL_FEATS_QOS_METER BIT(10) +#define NFP_FL_FEATS_DECAP_V2 BIT(11) #define NFP_FL_FEATS_HOST_ACK BIT(31) #define NFP_FL_ENABLE_FLOW_MERGE BIT(0) @@ -109,6 +110,80 @@ struct nfp_fl_tunnel_offloads { struct notifier_block neigh_nb; }; +/** + * struct nfp_tun_neigh - neighbour/route entry on the NFP + * @dst_ipv4: Destination IPv4 address + * @src_ipv4: Source IPv4 address + * @dst_addr: Destination MAC address + * @src_addr: Source MAC address + * @port_id: NFP port to output packet on - associated with source IPv4 + * @vlan_tpid: VLAN_TPID match field + * @vlan_tci: VLAN_TCI match field + * @host_ctx: Host context ID to be saved here + */ +struct nfp_tun_neigh { + __be32 dst_ipv4; + __be32 src_ipv4; + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be32 port_id; + __be16 vlan_tpid; + __be16 vlan_tci; + __be32 host_ctx; +}; + +/** + * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP + * @dst_ipv6: Destination IPv6 address + * @src_ipv6: Source IPv6 address + * @dst_addr: Destination MAC address + * @src_addr: Source MAC address + * @port_id: NFP port to output packet on - associated with source IPv6 + * @vlan_tpid: VLAN_TPID match field + * @vlan_tci: VLAN_TCI match field + * @host_ctx: Host context ID to be saved here + */ +struct nfp_tun_neigh_v6 { + struct in6_addr dst_ipv6; + struct in6_addr src_ipv6; + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be32 port_id; + __be16 vlan_tpid; + __be16 vlan_tci; + __be32 host_ctx; +}; + +/** + * struct nfp_neigh_entry + * @neigh_cookie: Cookie for hashtable lookup + * @ht_node: rhash_head entry for hashtable + * @list_head: Needed as member of linked_nn_entries list + * @payload: The neighbour info payload + * @flow: Linked flow rule + * @is_ipv6: Flag to indicate if payload is ipv6 or ipv4 + */ +struct nfp_neigh_entry { + unsigned long neigh_cookie; + struct rhash_head ht_node; + struct list_head list_head; + char *payload; + struct nfp_predt_entry *flow; + bool is_ipv6; +}; + +/** + * struct nfp_predt_entry + * @list_head: List head to attach to predt_list + * @flow_pay: Direct link to flow_payload + * @nn_list: List of linked nfp_neigh_entries + */ +struct nfp_predt_entry { + struct list_head list_head; + struct nfp_fl_payload *flow_pay; + struct list_head nn_list; +}; + /** * struct nfp_mtu_conf - manage MTU setting * @portnum: NFP port number of repr with requested MTU change @@ -202,6 +277,9 @@ struct nfp_fl_internal_ports { * @ct_zone_table: Hash table used to store the different zones * @ct_zone_wc: Special zone entry for wildcarded zone matches * @ct_map_table: Hash table used to referennce ct flows + * @predt_list: List to keep track of decap pretun flows + * @neigh_table: Table to keep track of neighbor entries + * @predt_lock: Lock to serialise predt/neigh table updates */ struct nfp_flower_priv { struct nfp_app *app; @@ -241,6 +319,9 @@ struct nfp_flower_priv { struct rhashtable ct_zone_table; struct nfp_fl_ct_zone_entry *ct_zone_wc; struct rhashtable ct_map_table; + struct list_head predt_list; + struct rhashtable neigh_table; + spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */ }; /** @@ -344,9 +425,14 @@ struct nfp_fl_payload { struct list_head linked_flows; bool in_hw; struct { + struct nfp_predt_entry *predt; struct net_device *dev; + __be16 vlan_tpid; __be16 vlan_tci; __be16 port_idx; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + bool is_ipv6; } pre_tun_rule; }; @@ -369,6 +455,7 @@ struct nfp_fl_payload_link { extern const struct rhashtable_params nfp_flower_table_params; extern const struct rhashtable_params merge_table_params; +extern const struct rhashtable_params neigh_table_params; struct nfp_merge_info { u64 parent_ctx; diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index f448c5682594..74e1b279c13b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -502,6 +502,12 @@ const struct rhashtable_params nfp_ct_map_params = { .automatic_shrinking = true, }; +const struct rhashtable_params neigh_table_params = { + .key_offset = offsetof(struct nfp_neigh_entry, neigh_cookie), + .head_offset = offsetof(struct nfp_neigh_entry, ht_node), + .key_len = sizeof(unsigned long), +}; + int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_num_mems) { @@ -530,6 +536,12 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, if (err) goto err_free_ct_zone_table; + err = rhashtable_init(&priv->neigh_table, &neigh_table_params); + if (err) + goto err_free_ct_map_table; + + INIT_LIST_HEAD(&priv->predt_list); + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ @@ -537,7 +549,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - goto err_free_ct_map_table; + goto err_free_neigh_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; @@ -565,6 +577,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, goto err_free_ring_buf; spin_lock_init(&priv->stats_lock); + spin_lock_init(&priv->predt_lock); return 0; @@ -574,6 +587,8 @@ err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_neigh_table: + rhashtable_destroy(&priv->neigh_table); err_free_ct_map_table: rhashtable_destroy(&priv->ct_map_table); err_free_ct_zone_table: @@ -700,6 +715,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) rhashtable_free_and_destroy(&priv->ct_map_table, nfp_free_map_table_entry, NULL); + rhashtable_free_and_destroy(&priv->neigh_table, + nfp_check_rhashtable_empty, NULL); kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index c71bd555f482..f5e8ed14e517 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -76,38 +76,6 @@ struct nfp_tun_active_tuns_v6 { } tun_info[]; }; -/** - * struct nfp_tun_neigh - neighbour/route entry on the NFP - * @dst_ipv4: destination IPv4 address - * @src_ipv4: source IPv4 address - * @dst_addr: destination MAC address - * @src_addr: source MAC address - * @port_id: NFP port to output packet on - associated with source IPv4 - */ -struct nfp_tun_neigh { - __be32 dst_ipv4; - __be32 src_ipv4; - u8 dst_addr[ETH_ALEN]; - u8 src_addr[ETH_ALEN]; - __be32 port_id; -}; - -/** - * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP - * @dst_ipv6: destination IPv6 address - * @src_ipv6: source IPv6 address - * @dst_addr: destination MAC address - * @src_addr: source MAC address - * @port_id: NFP port to output packet on - associated with source IPv6 - */ -struct nfp_tun_neigh_v6 { - struct in6_addr dst_ipv6; - struct in6_addr src_ipv6; - u8 dst_addr[ETH_ALEN]; - u8 src_addr[ETH_ALEN]; - __be32 port_id; -}; - /** * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup * @ingress_port: ingress port of packet that signalled request -- cgit v1.2.3 From e30b2b68c14f51156ae982047903bb37d4799a16 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:41 +0900 Subject: nfp: flower: add/remove predt_list entries Add calls to add and remove flows to the predt_table. This very simply just allocates and add a new pretun entry if detected as such, and removes it when encountered on a delete flow. Compatibility for older firmware is kept in place through the DECAP_V2 feature bit. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 3 +- .../net/ethernet/netronome/nfp/flower/offload.c | 43 +++++++++++++++++++--- 2 files changed, 39 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 1b9421e844a9..0147de405365 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -220,7 +220,8 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, } output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); } else if (nfp_flower_internal_port_can_offload(app, out_dev)) { - if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) { + if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES) && + !(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 92e8ade4854e..0fe018bef410 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1362,11 +1362,29 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_release_metadata; } - if (flow_pay->pre_tun_rule.dev) - err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); - else + if (flow_pay->pre_tun_rule.dev) { + if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { + struct nfp_predt_entry *predt; + + predt = kzalloc(sizeof(*predt), GFP_KERNEL); + if (!predt) { + err = -ENOMEM; + goto err_remove_rhash; + } + predt->flow_pay = flow_pay; + INIT_LIST_HEAD(&predt->nn_list); + spin_lock_bh(&priv->predt_lock); + list_add(&predt->list_head, &priv->predt_list); + spin_unlock_bh(&priv->predt_lock); + flow_pay->pre_tun_rule.predt = predt; + } else { + err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); + } + } else { err = nfp_flower_xmit_flow(app, flow_pay, NFP_FLOWER_CMSG_TYPE_FLOW_ADD); + } + if (err) goto err_remove_rhash; @@ -1538,11 +1556,24 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_merge_flow; } - if (nfp_flow->pre_tun_rule.dev) - err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); - else + if (nfp_flow->pre_tun_rule.dev) { + if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { + struct nfp_predt_entry *predt; + + predt = nfp_flow->pre_tun_rule.predt; + if (predt) { + spin_lock_bh(&priv->predt_lock); + list_del(&predt->list_head); + spin_unlock_bh(&priv->predt_lock); + kfree(predt); + } + } else { + err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); + } + } else { err = nfp_flower_xmit_flow(app, nfp_flow, NFP_FLOWER_CMSG_TYPE_FLOW_DEL); + } /* Fall through on error. */ err_free_merge_flow: -- cgit v1.2.3 From 38fc158e172b2df03f24d4a6ea6a89eab37a4b29 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:42 +0900 Subject: nfp: flower: enforce more strict pre_tun checks Make sure that the rule also matches on source MAC address. On top of that also now save the src and dst MAC addresses similar to how vlan_tci is saved - this will be used in later comparisons with neighbour entries. Indicate if the flow matched on ipv4 or ipv6. Populate the vlan_tpid field that got added to the pre_run_rule struct as well. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/flower/offload.c | 41 ++++++++++++++++++---- 1 file changed, 34 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 0fe018bef410..5fea3e3415fe 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1170,6 +1170,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, return -EOPNOTSUPP; } + if (key_layer & NFP_FLOWER_LAYER_IPV6) + flow->pre_tun_rule.is_ipv6 = true; + else + flow->pre_tun_rule.is_ipv6 = false; + /* Skip fields known to exist. */ mask += sizeof(struct nfp_flower_meta_tci); ext += sizeof(struct nfp_flower_meta_tci); @@ -1180,13 +1185,6 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, mask += sizeof(struct nfp_flower_in_port); ext += sizeof(struct nfp_flower_in_port); - /* Ensure destination MAC address matches pre_tun_dev. */ - mac = (struct nfp_flower_mac_mpls *)ext; - if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { - NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); - return -EOPNOTSUPP; - } - /* Ensure destination MAC address is fully matched. */ mac = (struct nfp_flower_mac_mpls *)mask; if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { @@ -1194,11 +1192,36 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, return -EOPNOTSUPP; } + /* Ensure source MAC address is fully matched. This is only needed + * for firmware with the DECAP_V2 feature enabled. Don't do this + * for firmware without this feature to keep old behaviour. + */ + if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { + mac = (struct nfp_flower_mac_mpls *)mask; + if (!is_broadcast_ether_addr(&mac->mac_src[0])) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported pre-tunnel rule: source MAC field must not be masked"); + return -EOPNOTSUPP; + } + } + if (mac->mpls_lse) { NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); return -EOPNOTSUPP; } + /* Ensure destination MAC address matches pre_tun_dev. */ + mac = (struct nfp_flower_mac_mpls *)ext; + if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); + return -EOPNOTSUPP; + } + + /* Save mac addresses in pre_tun_rule entry for later use */ + memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN); + memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN); + mask += sizeof(struct nfp_flower_mac_mpls); ext += sizeof(struct nfp_flower_mac_mpls); if (key_layer & NFP_FLOWER_LAYER_IPV4 || @@ -1227,17 +1250,21 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app, if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { struct nfp_flower_vlan *vlan_tags; + u16 vlan_tpid; u16 vlan_tci; vlan_tags = (struct nfp_flower_vlan *)ext; vlan_tci = be16_to_cpu(vlan_tags->outer_tci); + vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid); vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); + flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid); vlan = true; } else { flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); + flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff); } } -- cgit v1.2.3 From 9d5447ed44b5ddca07d82aba4d8a9436c4e910c4 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:43 +0900 Subject: nfp: flower: fixup ipv6/ipv4 route lookup for neigh events When a callback is received to invalidate a neighbour entry there is no need to try and populate any other flow information. Only the flowX->daddr information is needed as lookup key to delete an entry from the NFP neighbour table. Fix this by only doing the lookup if the callback is for a new entry. As part of this cleanup remove the setting of flow6.flowi6_proto, as this is not needed either, it looks to be a possible leftover from a previous implementation. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/flower/tunnel_conf.c | 52 ++++++++++++++-------- 1 file changed, 33 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index f5e8ed14e517..0cb016afbab3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -494,7 +494,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, struct flowi6 flow6 = {}; struct neighbour *n; struct nfp_app *app; - struct rtable *rt; + bool neigh_invalid; bool ipv6 = false; int err; @@ -513,6 +513,8 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, if (n->tbl->family == AF_INET6) ipv6 = true; + neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; + if (ipv6) flow6.daddr = *(struct in6_addr *)n->primary_key; else @@ -533,29 +535,41 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, #if IS_ENABLED(CONFIG_INET) if (ipv6) { #if IS_ENABLED(CONFIG_IPV6) - struct dst_entry *dst; - - dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL, - &flow6, NULL); - if (IS_ERR(dst)) - return NOTIFY_DONE; - - dst_release(dst); - flow6.flowi6_proto = IPPROTO_UDP; + if (!neigh_invalid) { + struct dst_entry *dst; + /* Use ipv6_dst_lookup_flow to populate flow6->saddr + * and other fields. This information is only needed + * for new entries, lookup can be skipped when an entry + * gets invalidated - as only the daddr is needed for + * deleting. + */ + dst = ip6_dst_lookup_flow(dev_net(n->dev), NULL, + &flow6, NULL); + if (IS_ERR(dst)) + return NOTIFY_DONE; + + dst_release(dst); + } nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC); #else return NOTIFY_DONE; #endif /* CONFIG_IPV6 */ } else { - /* Do a route lookup to populate flow data. */ - rt = ip_route_output_key(dev_net(n->dev), &flow4); - err = PTR_ERR_OR_ZERO(rt); - if (err) - return NOTIFY_DONE; - - ip_rt_put(rt); - - flow4.flowi4_proto = IPPROTO_UDP; + if (!neigh_invalid) { + struct rtable *rt; + /* Use ip_route_output_key to populate flow4->saddr and + * other fields. This information is only needed for + * new entries, lookup can be skipped when an entry + * gets invalidated - as only the daddr is needed for + * deleting. + */ + rt = ip_route_output_key(dev_net(n->dev), &flow4); + err = PTR_ERR_OR_ZERO(rt); + if (err) + return NOTIFY_DONE; + + ip_rt_put(rt); + } nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC); } #else -- cgit v1.2.3 From 9ee7c42183d1eaa7d559a7c20cbbf7803a25523e Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:44 +0900 Subject: nfp: flower: update nfp_tun_neigh structs Prepare for more rework in following patches by updating the existing nfp_neigh_structs. The update allows for the same headers to be used for both old and new firmware, with a slight length adjustment when sending the control message to the firmware. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 50 +++++++++++++--------- .../ethernet/netronome/nfp/flower/tunnel_conf.c | 30 +++++++------ 2 files changed, 47 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 454fdb6ea4a5..2c011d60c212 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -111,47 +111,55 @@ struct nfp_fl_tunnel_offloads { }; /** - * struct nfp_tun_neigh - neighbour/route entry on the NFP - * @dst_ipv4: Destination IPv4 address - * @src_ipv4: Source IPv4 address + * struct nfp_tun_neigh - basic neighbour data * @dst_addr: Destination MAC address * @src_addr: Source MAC address * @port_id: NFP port to output packet on - associated with source IPv4 - * @vlan_tpid: VLAN_TPID match field - * @vlan_tci: VLAN_TCI match field - * @host_ctx: Host context ID to be saved here */ struct nfp_tun_neigh { - __be32 dst_ipv4; - __be32 src_ipv4; u8 dst_addr[ETH_ALEN]; u8 src_addr[ETH_ALEN]; __be32 port_id; +}; + +/** + * struct nfp_tun_neigh_ext - extended neighbour data + * @vlan_tpid: VLAN_TPID match field + * @vlan_tci: VLAN_TCI match field + * @host_ctx: Host context ID to be saved here + */ +struct nfp_tun_neigh_ext { __be16 vlan_tpid; __be16 vlan_tci; __be32 host_ctx; }; /** - * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP + * struct nfp_tun_neigh_v4 - neighbour/route entry on the NFP for IPv4 + * @dst_ipv4: Destination IPv4 address + * @src_ipv4: Source IPv4 address + * @common: Neighbour/route common info + * @ext: Neighbour/route extended info + */ +struct nfp_tun_neigh_v4 { + __be32 dst_ipv4; + __be32 src_ipv4; + struct nfp_tun_neigh common; + struct nfp_tun_neigh_ext ext; +}; + +/** + * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP for IPv6 * @dst_ipv6: Destination IPv6 address * @src_ipv6: Source IPv6 address - * @dst_addr: Destination MAC address - * @src_addr: Source MAC address - * @port_id: NFP port to output packet on - associated with source IPv6 - * @vlan_tpid: VLAN_TPID match field - * @vlan_tci: VLAN_TCI match field - * @host_ctx: Host context ID to be saved here + * @common: Neighbour/route common info + * @ext: Neighbour/route extended info */ struct nfp_tun_neigh_v6 { struct in6_addr dst_ipv6; struct in6_addr src_ipv6; - u8 dst_addr[ETH_ALEN]; - u8 src_addr[ETH_ALEN]; - __be32 port_id; - __be16 vlan_tpid; - __be16 vlan_tci; - __be32 host_ctx; + struct nfp_tun_neigh common; + struct nfp_tun_neigh_ext ext; }; /** diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 0cb016afbab3..174888272a30 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -281,9 +281,15 @@ static int nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, gfp_t flag) { + struct nfp_flower_priv *priv = app->priv; struct sk_buff *skb; unsigned char *msg; + if (!(priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) && + (mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH || + mtype == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6)) + plen -= sizeof(struct nfp_tun_neigh_ext); + skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag); if (!skb) return -ENOMEM; @@ -416,14 +422,14 @@ static void nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app, struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) { - struct nfp_tun_neigh payload; + struct nfp_tun_neigh_v4 payload; u32 port_id; port_id = nfp_flower_get_port_id_from_netdev(app, netdev); if (!port_id) return; - memset(&payload, 0, sizeof(struct nfp_tun_neigh)); + memset(&payload, 0, sizeof(struct nfp_tun_neigh_v4)); payload.dst_ipv4 = flow->daddr; /* If entry has expired send dst IP with all other fields 0. */ @@ -436,15 +442,15 @@ nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app, /* Have a valid neighbour so populate rest of entry. */ payload.src_ipv4 = flow->saddr; - ether_addr_copy(payload.src_addr, netdev->dev_addr); - neigh_ha_snapshot(payload.dst_addr, neigh, netdev); - payload.port_id = cpu_to_be32(port_id); + ether_addr_copy(payload.common.src_addr, netdev->dev_addr); + neigh_ha_snapshot(payload.common.dst_addr, neigh, netdev); + payload.common.port_id = cpu_to_be32(port_id); /* Add destination of new route to NFP cache. */ nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4); send_msg: nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, - sizeof(struct nfp_tun_neigh), + sizeof(struct nfp_tun_neigh_v4), (unsigned char *)&payload, flag); } @@ -472,9 +478,9 @@ nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app, /* Have a valid neighbour so populate rest of entry. */ payload.src_ipv6 = flow->saddr; - ether_addr_copy(payload.src_addr, netdev->dev_addr); - neigh_ha_snapshot(payload.dst_addr, neigh, netdev); - payload.port_id = cpu_to_be32(port_id); + ether_addr_copy(payload.common.src_addr, netdev->dev_addr); + neigh_ha_snapshot(payload.common.dst_addr, neigh, netdev); + payload.common.port_id = cpu_to_be32(port_id); /* Add destination of new route to NFP cache. */ nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6); @@ -1372,7 +1378,7 @@ void nfp_tunnel_config_stop(struct nfp_app *app) struct nfp_flower_priv *priv = app->priv; struct nfp_ipv4_addr_entry *ip_entry; struct nfp_tun_neigh_v6 ipv6_route; - struct nfp_tun_neigh ipv4_route; + struct nfp_tun_neigh_v4 ipv4_route; struct list_head *ptr, *storage; unregister_netevent_notifier(&priv->tun.neigh_nb); @@ -1398,7 +1404,7 @@ void nfp_tunnel_config_stop(struct nfp_app *app) kfree(route_entry); nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, - sizeof(struct nfp_tun_neigh), + sizeof(struct nfp_tun_neigh_v4), (unsigned char *)&ipv4_route, GFP_KERNEL); } @@ -1412,7 +1418,7 @@ void nfp_tunnel_config_stop(struct nfp_app *app) kfree(route_entry); nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, - sizeof(struct nfp_tun_neigh), + sizeof(struct nfp_tun_neigh_v6), (unsigned char *)&ipv6_route, GFP_KERNEL); } -- cgit v1.2.3 From f1df7956c11f93b2a90ceae78cdc4bfc00f5e38d Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:45 +0900 Subject: nfp: flower: rework tunnel neighbour configuration This patch updates the way in which the tunnel neighbour entries are handled. Previously they were mostly send-and-forget, with just the destination IP's cached in a list. This update changes to a scheme where the neighbour entry information is stored in a hash table. The reason for this is that the neighbour table will now also be used on the decapsulation path, whereas previously it was only used for encapsulation. We need to save more of the neighbour information in order to link them with flower flows in follow up patches. Updating of the neighbour table is now also handled by the same function, instead of separate *_write_neigh_vX functions. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/flower/tunnel_conf.c | 174 ++++++++++++++------- 1 file changed, 115 insertions(+), 59 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 174888272a30..4ca149501409 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -419,75 +419,131 @@ nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) } static void -nfp_tun_write_neigh_v4(struct net_device *netdev, struct nfp_app *app, - struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) +nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, + void *flow, struct neighbour *neigh, bool is_ipv6) { - struct nfp_tun_neigh_v4 payload; + bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead; + size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) : + sizeof(struct nfp_tun_neigh_v4); + unsigned long cookie = (unsigned long)neigh; + struct nfp_flower_priv *priv = app->priv; + struct nfp_neigh_entry *nn_entry; u32 port_id; + u8 mtype; port_id = nfp_flower_get_port_id_from_netdev(app, netdev); if (!port_id) return; - memset(&payload, 0, sizeof(struct nfp_tun_neigh_v4)); - payload.dst_ipv4 = flow->daddr; + spin_lock_bh(&priv->predt_lock); + nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie, + neigh_table_params); + if (!nn_entry && !neigh_invalid) { + struct nfp_tun_neigh_ext *ext; + struct nfp_tun_neigh *common; + + nn_entry = kzalloc(sizeof(*nn_entry) + neigh_size, + GFP_ATOMIC); + if (!nn_entry) + goto err; + + nn_entry->payload = (char *)&nn_entry[1]; + nn_entry->neigh_cookie = cookie; + nn_entry->is_ipv6 = is_ipv6; + nn_entry->flow = NULL; + if (is_ipv6) { + struct flowi6 *flowi6 = (struct flowi6 *)flow; + struct nfp_tun_neigh_v6 *payload; + + payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; + payload->src_ipv6 = flowi6->saddr; + payload->dst_ipv6 = flowi6->daddr; + common = &payload->common; + ext = &payload->ext; + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; + } else { + struct flowi4 *flowi4 = (struct flowi4 *)flow; + struct nfp_tun_neigh_v4 *payload; + + payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; + payload->src_ipv4 = flowi4->saddr; + payload->dst_ipv4 = flowi4->daddr; + common = &payload->common; + ext = &payload->ext; + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; + } + ext->host_ctx = cpu_to_be32(U32_MAX); + ext->vlan_tpid = cpu_to_be16(U16_MAX); + ext->vlan_tci = cpu_to_be16(U16_MAX); + ether_addr_copy(common->src_addr, netdev->dev_addr); + neigh_ha_snapshot(common->dst_addr, neigh, netdev); + common->port_id = cpu_to_be32(port_id); + + if (rhashtable_insert_fast(&priv->neigh_table, + &nn_entry->ht_node, + neigh_table_params)) + goto err; + + /* Add entries to the relevant route cache */ + if (is_ipv6) { + struct nfp_tun_neigh_v6 *payload; + + payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; + nfp_tun_add_route_to_cache_v6(app, &payload->dst_ipv6); + } else { + struct nfp_tun_neigh_v4 *payload; + + payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; + nfp_tun_add_route_to_cache_v4(app, &payload->dst_ipv4); + } - /* If entry has expired send dst IP with all other fields 0. */ - if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { - nfp_tun_del_route_from_cache_v4(app, &payload.dst_ipv4); + nfp_flower_xmit_tun_conf(app, mtype, neigh_size, + nn_entry->payload, + GFP_ATOMIC); + } else if (nn_entry && neigh_invalid) { + if (is_ipv6) { + struct flowi6 *flowi6 = (struct flowi6 *)flow; + struct nfp_tun_neigh_v6 *payload; + + payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; + memset(payload, 0, sizeof(struct nfp_tun_neigh_v6)); + payload->dst_ipv6 = flowi6->daddr; + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; + nfp_tun_del_route_from_cache_v6(app, + &payload->dst_ipv6); + } else { + struct flowi4 *flowi4 = (struct flowi4 *)flow; + struct nfp_tun_neigh_v4 *payload; + + payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; + memset(payload, 0, sizeof(struct nfp_tun_neigh_v4)); + payload->dst_ipv4 = flowi4->daddr; + mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; + nfp_tun_del_route_from_cache_v4(app, + &payload->dst_ipv4); + } /* Trigger ARP to verify invalid neighbour state. */ neigh_event_send(neigh, NULL); - goto send_msg; - } - - /* Have a valid neighbour so populate rest of entry. */ - payload.src_ipv4 = flow->saddr; - ether_addr_copy(payload.common.src_addr, netdev->dev_addr); - neigh_ha_snapshot(payload.common.dst_addr, neigh, netdev); - payload.common.port_id = cpu_to_be32(port_id); - /* Add destination of new route to NFP cache. */ - nfp_tun_add_route_to_cache_v4(app, &payload.dst_ipv4); - -send_msg: - nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, - sizeof(struct nfp_tun_neigh_v4), - (unsigned char *)&payload, flag); -} + rhashtable_remove_fast(&priv->neigh_table, + &nn_entry->ht_node, + neigh_table_params); -static void -nfp_tun_write_neigh_v6(struct net_device *netdev, struct nfp_app *app, - struct flowi6 *flow, struct neighbour *neigh, gfp_t flag) -{ - struct nfp_tun_neigh_v6 payload; - u32 port_id; + nfp_flower_xmit_tun_conf(app, mtype, neigh_size, + nn_entry->payload, + GFP_ATOMIC); - port_id = nfp_flower_get_port_id_from_netdev(app, netdev); - if (!port_id) - return; - - memset(&payload, 0, sizeof(struct nfp_tun_neigh_v6)); - payload.dst_ipv6 = flow->daddr; - - /* If entry has expired send dst IP with all other fields 0. */ - if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { - nfp_tun_del_route_from_cache_v6(app, &payload.dst_ipv6); - /* Trigger probe to verify invalid neighbour state. */ - neigh_event_send(neigh, NULL); - goto send_msg; + if (nn_entry->flow) + list_del(&nn_entry->list_head); + kfree(nn_entry); } - /* Have a valid neighbour so populate rest of entry. */ - payload.src_ipv6 = flow->saddr; - ether_addr_copy(payload.common.src_addr, netdev->dev_addr); - neigh_ha_snapshot(payload.common.dst_addr, neigh, netdev); - payload.common.port_id = cpu_to_be32(port_id); - /* Add destination of new route to NFP cache. */ - nfp_tun_add_route_to_cache_v6(app, &payload.dst_ipv6); - -send_msg: - nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, - sizeof(struct nfp_tun_neigh_v6), - (unsigned char *)&payload, flag); + spin_unlock_bh(&priv->predt_lock); + return; + +err: + kfree(nn_entry); + spin_unlock_bh(&priv->predt_lock); + nfp_flower_cmsg_warn(app, "Neighbour configuration failed.\n"); } static int @@ -556,7 +612,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, dst_release(dst); } - nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC); + nfp_tun_write_neigh(n->dev, app, &flow6, n, true); #else return NOTIFY_DONE; #endif /* CONFIG_IPV6 */ @@ -576,7 +632,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, ip_rt_put(rt); } - nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC); + nfp_tun_write_neigh(n->dev, app, &flow4, n, false); } #else return NOTIFY_DONE; @@ -619,7 +675,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb) ip_rt_put(rt); if (!n) goto fail_rcu_unlock; - nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC); + nfp_tun_write_neigh(n->dev, app, &flow, n, false); neigh_release(n); rcu_read_unlock(); return; @@ -661,7 +717,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb) if (!n) goto fail_rcu_unlock; - nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC); + nfp_tun_write_neigh(n->dev, app, &flow, n, true); neigh_release(n); rcu_read_unlock(); return; -- cgit v1.2.3 From 591c90a1d0b06f93695e443a1adaebdc8865647e Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:46 +0900 Subject: nfp: flower: link pre_tun flow rules with neigh entries Add helper functions that can create links between flow rules and cached neighbour entries. Also add the relevant calls to these functions. * When a new neighbour entry gets added cycle through the saved pre_tun flow list and link any relevant matches. Update the neighbour table on the nfp with this new information. * When a new pre_tun flow rule gets added iterate through the save neighbour entries and link any relevant matches. Once again update the nfp neighbour table with any new links. * Do the inverse when deleting - remove any created links and also inform the nfp of this. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 4 + .../net/ethernet/netronome/nfp/flower/offload.c | 4 +- .../ethernet/netronome/nfp/flower/tunnel_conf.c | 147 +++++++++++++++++++++ 3 files changed, 154 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 2c011d60c212..6bc7a9cbf131 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -675,6 +675,10 @@ void nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, struct net_device *netdev); +void nfp_tun_link_and_update_nn_entries(struct nfp_app *app, + struct nfp_predt_entry *predt); +void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, + struct nfp_predt_entry *predt); int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, struct nfp_fl_payload *flow); int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 5fea3e3415fe..9d65459bdba5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1402,8 +1402,9 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, INIT_LIST_HEAD(&predt->nn_list); spin_lock_bh(&priv->predt_lock); list_add(&predt->list_head, &priv->predt_list); - spin_unlock_bh(&priv->predt_lock); flow_pay->pre_tun_rule.predt = predt; + nfp_tun_link_and_update_nn_entries(app, predt); + spin_unlock_bh(&priv->predt_lock); } else { err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); } @@ -1590,6 +1591,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, predt = nfp_flow->pre_tun_rule.predt; if (predt) { spin_lock_bh(&priv->predt_lock); + nfp_tun_unlink_and_update_nn_entries(app, predt); list_del(&predt->list_head); spin_unlock_bh(&priv->predt_lock); kfree(predt); diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 4ca149501409..fa9df24bec27 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -301,6 +301,150 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, return 0; } +static void +nfp_tun_mutual_link(struct nfp_predt_entry *predt, + struct nfp_neigh_entry *neigh) +{ + struct nfp_fl_payload *flow_pay = predt->flow_pay; + struct nfp_tun_neigh_ext *ext; + struct nfp_tun_neigh *common; + + if (flow_pay->pre_tun_rule.is_ipv6 != neigh->is_ipv6) + return; + + /* In the case of bonding it is possible that there might already + * be a flow linked (as the MAC address gets shared). If a flow + * is already linked just return. + */ + if (neigh->flow) + return; + + common = neigh->is_ipv6 ? + &((struct nfp_tun_neigh_v6 *)neigh->payload)->common : + &((struct nfp_tun_neigh_v4 *)neigh->payload)->common; + ext = neigh->is_ipv6 ? + &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : + &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; + + if (memcmp(flow_pay->pre_tun_rule.loc_mac, + common->src_addr, ETH_ALEN) || + memcmp(flow_pay->pre_tun_rule.rem_mac, + common->dst_addr, ETH_ALEN)) + return; + + list_add(&neigh->list_head, &predt->nn_list); + neigh->flow = predt; + ext->host_ctx = flow_pay->meta.host_ctx_id; + ext->vlan_tci = flow_pay->pre_tun_rule.vlan_tci; + ext->vlan_tpid = flow_pay->pre_tun_rule.vlan_tpid; +} + +static void +nfp_tun_link_predt_entries(struct nfp_app *app, + struct nfp_neigh_entry *nn_entry) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_predt_entry *predt, *tmp; + + list_for_each_entry_safe(predt, tmp, &priv->predt_list, list_head) { + nfp_tun_mutual_link(predt, nn_entry); + } +} + +void nfp_tun_link_and_update_nn_entries(struct nfp_app *app, + struct nfp_predt_entry *predt) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_neigh_entry *nn_entry; + struct rhashtable_iter iter; + size_t neigh_size; + u8 type; + + rhashtable_walk_enter(&priv->neigh_table, &iter); + rhashtable_walk_start(&iter); + while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) { + if (IS_ERR(nn_entry)) + continue; + nfp_tun_mutual_link(predt, nn_entry); + neigh_size = nn_entry->is_ipv6 ? + sizeof(struct nfp_tun_neigh_v6) : + sizeof(struct nfp_tun_neigh_v4); + type = nn_entry->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; + nfp_flower_xmit_tun_conf(app, type, neigh_size, + nn_entry->payload, + GFP_ATOMIC); + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); +} + +static void nfp_tun_cleanup_nn_entries(struct nfp_app *app) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_neigh_entry *neigh; + struct nfp_tun_neigh_ext *ext; + struct rhashtable_iter iter; + size_t neigh_size; + u8 type; + + rhashtable_walk_enter(&priv->neigh_table, &iter); + rhashtable_walk_start(&iter); + while ((neigh = rhashtable_walk_next(&iter)) != NULL) { + if (IS_ERR(neigh)) + continue; + ext = neigh->is_ipv6 ? + &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : + &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; + ext->host_ctx = cpu_to_be32(U32_MAX); + ext->vlan_tpid = cpu_to_be16(U16_MAX); + ext->vlan_tci = cpu_to_be16(U16_MAX); + + neigh_size = neigh->is_ipv6 ? + sizeof(struct nfp_tun_neigh_v6) : + sizeof(struct nfp_tun_neigh_v4); + type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; + nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, + GFP_ATOMIC); + + rhashtable_remove_fast(&priv->neigh_table, &neigh->ht_node, + neigh_table_params); + if (neigh->flow) + list_del(&neigh->list_head); + kfree(neigh); + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); +} + +void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, + struct nfp_predt_entry *predt) +{ + struct nfp_neigh_entry *neigh, *tmp; + struct nfp_tun_neigh_ext *ext; + size_t neigh_size; + u8 type; + + list_for_each_entry_safe(neigh, tmp, &predt->nn_list, list_head) { + ext = neigh->is_ipv6 ? + &((struct nfp_tun_neigh_v6 *)neigh->payload)->ext : + &((struct nfp_tun_neigh_v4 *)neigh->payload)->ext; + neigh->flow = NULL; + ext->host_ctx = cpu_to_be32(U32_MAX); + ext->vlan_tpid = cpu_to_be16(U16_MAX); + ext->vlan_tci = cpu_to_be16(U16_MAX); + list_del(&neigh->list_head); + neigh_size = neigh->is_ipv6 ? + sizeof(struct nfp_tun_neigh_v6) : + sizeof(struct nfp_tun_neigh_v4); + type = neigh->is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; + nfp_flower_xmit_tun_conf(app, type, neigh_size, neigh->payload, + GFP_ATOMIC); + } +} + static bool __nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock, void *add, int add_len) @@ -497,6 +641,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, nfp_tun_add_route_to_cache_v4(app, &payload->dst_ipv4); } + nfp_tun_link_predt_entries(app, nn_entry); nfp_flower_xmit_tun_conf(app, mtype, neigh_size, nn_entry->payload, GFP_ATOMIC); @@ -1482,4 +1627,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app) /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ rhashtable_free_and_destroy(&priv->tun.offloaded_macs, nfp_check_rhashtable_empty, NULL); + + nfp_tun_cleanup_nn_entries(app); } -- cgit v1.2.3 From c83a0fbe9766b3ae970b96435b7c9786299a9b12 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:47 +0900 Subject: nfp: flower: remove unused neighbour cache With the neighbour entries now stored in a dedicated table there is no use to make use of the tunnel route cache anymore, so remove this. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 8 - .../ethernet/netronome/nfp/flower/tunnel_conf.c | 175 --------------------- 2 files changed, 183 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 6bc7a9cbf131..66f847414693 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -87,12 +87,8 @@ struct nfp_fl_stats_id { * @offloaded_macs: Hashtable of the offloaded MAC addresses * @ipv4_off_list: List of IPv4 addresses to offload * @ipv6_off_list: List of IPv6 addresses to offload - * @neigh_off_list_v4: List of IPv4 neighbour offloads - * @neigh_off_list_v6: List of IPv6 neighbour offloads * @ipv4_off_lock: Lock for the IPv4 address list * @ipv6_off_lock: Lock for the IPv6 address list - * @neigh_off_lock_v4: Lock for the IPv4 neighbour address list - * @neigh_off_lock_v6: Lock for the IPv6 neighbour address list * @mac_off_ids: IDA to manage id assignment for offloaded MACs * @neigh_nb: Notifier to monitor neighbour state */ @@ -100,12 +96,8 @@ struct nfp_fl_tunnel_offloads { struct rhashtable offloaded_macs; struct list_head ipv4_off_list; struct list_head ipv6_off_list; - struct list_head neigh_off_list_v4; - struct list_head neigh_off_list_v6; struct mutex ipv4_off_lock; struct mutex ipv6_off_lock; - spinlock_t neigh_off_lock_v4; - spinlock_t neigh_off_lock_v6; struct ida mac_off_ids; struct notifier_block neigh_nb; }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index fa9df24bec27..9c37ed6943d0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -445,123 +445,6 @@ void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app, } } -static bool -__nfp_tun_has_route(struct list_head *route_list, spinlock_t *list_lock, - void *add, int add_len) -{ - struct nfp_offloaded_route *entry; - - spin_lock_bh(list_lock); - list_for_each_entry(entry, route_list, list) - if (!memcmp(entry->ip_add, add, add_len)) { - spin_unlock_bh(list_lock); - return true; - } - spin_unlock_bh(list_lock); - return false; -} - -static int -__nfp_tun_add_route_to_cache(struct list_head *route_list, - spinlock_t *list_lock, void *add, int add_len) -{ - struct nfp_offloaded_route *entry; - - spin_lock_bh(list_lock); - list_for_each_entry(entry, route_list, list) - if (!memcmp(entry->ip_add, add, add_len)) { - spin_unlock_bh(list_lock); - return 0; - } - - entry = kmalloc(struct_size(entry, ip_add, add_len), GFP_ATOMIC); - if (!entry) { - spin_unlock_bh(list_lock); - return -ENOMEM; - } - - memcpy(entry->ip_add, add, add_len); - list_add_tail(&entry->list, route_list); - spin_unlock_bh(list_lock); - - return 0; -} - -static void -__nfp_tun_del_route_from_cache(struct list_head *route_list, - spinlock_t *list_lock, void *add, int add_len) -{ - struct nfp_offloaded_route *entry; - - spin_lock_bh(list_lock); - list_for_each_entry(entry, route_list, list) - if (!memcmp(entry->ip_add, add, add_len)) { - list_del(&entry->list); - kfree(entry); - break; - } - spin_unlock_bh(list_lock); -} - -static bool nfp_tun_has_route_v4(struct nfp_app *app, __be32 *ipv4_addr) -{ - struct nfp_flower_priv *priv = app->priv; - - return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4, - &priv->tun.neigh_off_lock_v4, ipv4_addr, - sizeof(*ipv4_addr)); -} - -static bool -nfp_tun_has_route_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) -{ - struct nfp_flower_priv *priv = app->priv; - - return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6, - &priv->tun.neigh_off_lock_v6, ipv6_addr, - sizeof(*ipv6_addr)); -} - -static void -nfp_tun_add_route_to_cache_v4(struct nfp_app *app, __be32 *ipv4_addr) -{ - struct nfp_flower_priv *priv = app->priv; - - __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4, - &priv->tun.neigh_off_lock_v4, ipv4_addr, - sizeof(*ipv4_addr)); -} - -static void -nfp_tun_add_route_to_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) -{ - struct nfp_flower_priv *priv = app->priv; - - __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6, - &priv->tun.neigh_off_lock_v6, ipv6_addr, - sizeof(*ipv6_addr)); -} - -static void -nfp_tun_del_route_from_cache_v4(struct nfp_app *app, __be32 *ipv4_addr) -{ - struct nfp_flower_priv *priv = app->priv; - - __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4, - &priv->tun.neigh_off_lock_v4, ipv4_addr, - sizeof(*ipv4_addr)); -} - -static void -nfp_tun_del_route_from_cache_v6(struct nfp_app *app, struct in6_addr *ipv6_addr) -{ - struct nfp_flower_priv *priv = app->priv; - - __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6, - &priv->tun.neigh_off_lock_v6, ipv6_addr, - sizeof(*ipv6_addr)); -} - static void nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, void *flow, struct neighbour *neigh, bool is_ipv6) @@ -628,19 +511,6 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, neigh_table_params)) goto err; - /* Add entries to the relevant route cache */ - if (is_ipv6) { - struct nfp_tun_neigh_v6 *payload; - - payload = (struct nfp_tun_neigh_v6 *)nn_entry->payload; - nfp_tun_add_route_to_cache_v6(app, &payload->dst_ipv6); - } else { - struct nfp_tun_neigh_v4 *payload; - - payload = (struct nfp_tun_neigh_v4 *)nn_entry->payload; - nfp_tun_add_route_to_cache_v4(app, &payload->dst_ipv4); - } - nfp_tun_link_predt_entries(app, nn_entry); nfp_flower_xmit_tun_conf(app, mtype, neigh_size, nn_entry->payload, @@ -654,8 +524,6 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, memset(payload, 0, sizeof(struct nfp_tun_neigh_v6)); payload->dst_ipv6 = flowi6->daddr; mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6; - nfp_tun_del_route_from_cache_v6(app, - &payload->dst_ipv6); } else { struct flowi4 *flowi4 = (struct flowi4 *)flow; struct nfp_tun_neigh_v4 *payload; @@ -664,8 +532,6 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, memset(payload, 0, sizeof(struct nfp_tun_neigh_v4)); payload->dst_ipv4 = flowi4->daddr; mtype = NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; - nfp_tun_del_route_from_cache_v4(app, - &payload->dst_ipv4); } /* Trigger ARP to verify invalid neighbour state. */ neigh_event_send(neigh, NULL); @@ -734,11 +600,6 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, !nfp_flower_internal_port_can_offload(app, n->dev)) return NOTIFY_DONE; - /* Only concerned with changes to routes already added to NFP. */ - if ((ipv6 && !nfp_tun_has_route_v6(app, &flow6.daddr)) || - (!ipv6 && !nfp_tun_has_route_v4(app, &flow4.daddr))) - return NOTIFY_DONE; - #if IS_ENABLED(CONFIG_INET) if (ipv6) { #if IS_ENABLED(CONFIG_IPV6) @@ -1557,10 +1418,6 @@ int nfp_tunnel_config_start(struct nfp_app *app) INIT_LIST_HEAD(&priv->tun.ipv6_off_list); /* Initialise priv data for neighbour offloading. */ - spin_lock_init(&priv->tun.neigh_off_lock_v4); - INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4); - spin_lock_init(&priv->tun.neigh_off_lock_v6); - INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6); priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; err = register_netevent_notifier(&priv->tun.neigh_nb); @@ -1575,11 +1432,8 @@ int nfp_tunnel_config_start(struct nfp_app *app) void nfp_tunnel_config_stop(struct nfp_app *app) { - struct nfp_offloaded_route *route_entry, *temp; struct nfp_flower_priv *priv = app->priv; struct nfp_ipv4_addr_entry *ip_entry; - struct nfp_tun_neigh_v6 ipv6_route; - struct nfp_tun_neigh_v4 ipv4_route; struct list_head *ptr, *storage; unregister_netevent_notifier(&priv->tun.neigh_nb); @@ -1595,35 +1449,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app) mutex_destroy(&priv->tun.ipv6_off_lock); - /* Free memory in the route list and remove entries from fw cache. */ - list_for_each_entry_safe(route_entry, temp, - &priv->tun.neigh_off_list_v4, list) { - memset(&ipv4_route, 0, sizeof(ipv4_route)); - memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add, - sizeof(ipv4_route.dst_ipv4)); - list_del(&route_entry->list); - kfree(route_entry); - - nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, - sizeof(struct nfp_tun_neigh_v4), - (unsigned char *)&ipv4_route, - GFP_KERNEL); - } - - list_for_each_entry_safe(route_entry, temp, - &priv->tun.neigh_off_list_v6, list) { - memset(&ipv6_route, 0, sizeof(ipv6_route)); - memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add, - sizeof(ipv6_route.dst_ipv6)); - list_del(&route_entry->list); - kfree(route_entry); - - nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6, - sizeof(struct nfp_tun_neigh_v6), - (unsigned char *)&ipv6_route, - GFP_KERNEL); - } - /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ rhashtable_free_and_destroy(&priv->tun.offloaded_macs, nfp_check_rhashtable_empty, NULL); -- cgit v1.2.3 From a7da2a864a4fa63fe39c808eec8abfb76abf1dbd Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Thu, 5 May 2022 14:43:48 +0900 Subject: nfp: flower: enable decap_v2 bit Finally enable the decap_v2 feature bit now that all the other bits are in place to configure it correctly. Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 66f847414693..cb799d18682d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -68,7 +68,8 @@ struct nfp_app; NFP_FL_FEATS_IPV6_TUN | \ NFP_FL_FEATS_VLAN_QINQ | \ NFP_FL_FEATS_QOS_PPS | \ - NFP_FL_FEATS_QOS_METER) + NFP_FL_FEATS_QOS_METER | \ + NFP_FL_FEATS_DECAP_V2) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; -- cgit v1.2.3 From 6df6398f7c8b481ce83f28143bc08a5231616deb Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 5 May 2022 19:51:31 -0700 Subject: net: add netif_inherit_tso_max() To make later patches smaller create a helper for inheriting the TSO limitations of a lower device. The TSO in the name is not an accident, subsequent patches will replace GSO with TSO in more names. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 3 +-- drivers/net/ipvlan/ipvlan_main.c | 6 ++---- drivers/net/macvlan.c | 6 ++---- drivers/net/veth.c | 3 +-- drivers/net/vxlan/vxlan_core.c | 3 +-- include/linux/netdevice.h | 3 +++ net/8021q/vlan.c | 3 +-- net/8021q/vlan_dev.c | 3 +-- net/core/dev.c | 12 ++++++++++++ 9 files changed, 24 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index ba3fa7eac98d..790e1d5e4b4a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -286,8 +286,7 @@ nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower) if (repr->dst->u.port_info.lower_dev != lower) return; - netif_set_gso_max_size(netdev, lower->gso_max_size); - netif_set_gso_max_segs(netdev, lower->gso_max_segs); + netif_inherit_tso_max(netdev, lower); netdev_update_features(netdev); } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 696e245f6d00..aa28a29e228c 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -139,8 +139,7 @@ static int ipvlan_init(struct net_device *dev) dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->hw_enc_features |= dev->features; - netif_set_gso_max_size(dev, phy_dev->gso_max_size); - netif_set_gso_max_segs(dev, phy_dev->gso_max_segs); + netif_inherit_tso_max(dev, phy_dev); dev->hard_header_len = phy_dev->hard_header_len; netdev_lockdep_set_classes(dev); @@ -762,8 +761,7 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - netif_set_gso_max_size(ipvlan->dev, dev->gso_max_size); - netif_set_gso_max_segs(ipvlan->dev, dev->gso_max_segs); + netif_inherit_tso_max(ipvlan->dev, dev); netdev_update_features(ipvlan->dev); } break; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b00bc8173abe..0017bd0fe3bb 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -904,8 +904,7 @@ static int macvlan_init(struct net_device *dev) dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->vlan_features |= ALWAYS_ON_OFFLOADS; dev->hw_enc_features |= dev->features; - netif_set_gso_max_size(dev, lowerdev->gso_max_size); - netif_set_gso_max_segs(dev, lowerdev->gso_max_segs); + netif_inherit_tso_max(dev, lowerdev); dev->hard_header_len = lowerdev->hard_header_len; macvlan_set_lockdep_class(dev); @@ -1763,8 +1762,7 @@ static int macvlan_device_event(struct notifier_block *unused, break; case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { - netif_set_gso_max_size(vlan->dev, dev->gso_max_size); - netif_set_gso_max_segs(vlan->dev, dev->gso_max_segs); + netif_inherit_tso_max(vlan->dev, dev); netdev_update_features(vlan->dev); } break; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 3592014e50cc..f474e79a7745 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1758,8 +1758,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; - netif_set_gso_max_size(peer, dev->gso_max_size); - netif_set_gso_max_segs(peer, dev->gso_max_segs); + netif_inherit_tso_max(peer, dev); err = register_netdevice(peer); put_net(net); diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 8a5e3a6d32d7..2bda692f70c5 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -3683,8 +3683,7 @@ static void vxlan_config_apply(struct net_device *dev, if (lowerdev) { dst->remote_ifindex = conf->remote_ifindex; - netif_set_gso_max_size(dev, lowerdev->gso_max_size); - netif_set_gso_max_segs(dev, lowerdev->gso_max_segs); + netif_inherit_tso_max(dev, lowerdev); needed_headroom = lowerdev->hard_header_len; needed_headroom += lowerdev->needed_headroom; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index eaf66e57d891..006bb5c0e413 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4895,6 +4895,9 @@ static inline void netif_set_gro_max_size(struct net_device *dev, WRITE_ONCE(dev->gro_max_size, size); } +void netif_inherit_tso_max(struct net_device *to, + const struct net_device *from); + static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, int pulled_hlen, u16 mac_offset, int mac_len) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 788076b002b3..e40aa3e3641c 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -319,8 +319,7 @@ static void vlan_transfer_features(struct net_device *dev, { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); - netif_set_gso_max_size(vlandev, dev->gso_max_size); - netif_set_gso_max_segs(vlandev, dev->gso_max_segs); + netif_inherit_tso_max(vlandev, dev); if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) vlandev->hard_header_len = dev->hard_header_len; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e5d23e75572a..839f2020b015 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -573,8 +573,7 @@ static int vlan_dev_init(struct net_device *dev) NETIF_F_ALL_FCOE; dev->features |= dev->hw_features | NETIF_F_LLTX; - netif_set_gso_max_size(dev, real_dev->gso_max_size); - netif_set_gso_max_segs(dev, real_dev->gso_max_segs); + netif_inherit_tso_max(dev, real_dev); if (dev->features & NETIF_F_VLAN_FEATURES) netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); diff --git a/net/core/dev.c b/net/core/dev.c index c2d73595a7c3..eef1d19b130f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2992,6 +2992,18 @@ undo_rx: } EXPORT_SYMBOL(netif_set_real_num_queues); +/** + * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper + * @to: netdev to update + * @from: netdev from which to copy the limits + */ +void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) +{ + netif_set_gso_max_size(to, from->gso_max_size); + netif_set_gso_max_segs(to, from->gso_max_segs); +} +EXPORT_SYMBOL(netif_inherit_tso_max); + /** * netif_get_num_default_rss_queues - default number of RSS queues * -- cgit v1.2.3 From ee8b7a1156f357613646d6c69d07ac5a087a1071 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 5 May 2022 19:51:33 -0700 Subject: net: make drivers set the TSO limit not the GSO limit Drivers should call the TSO setting helper, GSO is controllable by user space. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 12 ++++++------ drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 +- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +- drivers/net/ethernet/emulex/benet/be_main.c | 2 +- drivers/net/ethernet/freescale/fec_main.c | 2 +- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +- drivers/net/ethernet/marvell/mvneta.c | 2 +- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 +- drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 +- drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 2 +- drivers/net/ethernet/realtek/r8169_main.c | 8 ++++---- drivers/net/ethernet/sfc/ef100_nic.c | 9 ++++++--- drivers/net/ethernet/sfc/efx.c | 2 +- drivers/net/ethernet/sfc/falcon/efx.c | 2 +- drivers/net/hyperv/rndis_filter.c | 2 +- drivers/net/usb/aqc111.c | 2 +- drivers/net/usb/ax88179_178a.c | 2 +- drivers/net/usb/lan78xx.c | 2 +- drivers/net/usb/r8152.c | 2 +- drivers/s390/net/qeth_l2_main.c | 2 +- drivers/s390/net/qeth_l3_main.c | 2 +- net/bridge/br_if.c | 12 ++++++------ net/core/dev.c | 4 ++-- 28 files changed, 49 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c9e75a9de282..f245e439f44c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1419,8 +1419,8 @@ static void bond_compute_features(struct bonding *bond) struct list_head *iter; struct slave *slave; unsigned short max_hard_header_len = ETH_HLEN; - unsigned int gso_max_size = GSO_MAX_SIZE; - u16 gso_max_segs = GSO_MAX_SEGS; + unsigned int tso_max_size = TSO_MAX_SIZE; + u16 tso_max_segs = TSO_MAX_SEGS; if (!bond_has_slaves(bond)) goto done; @@ -1449,8 +1449,8 @@ static void bond_compute_features(struct bonding *bond) if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; - gso_max_size = min(gso_max_size, slave->dev->gso_max_size); - gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); + tso_max_size = min(tso_max_size, slave->dev->tso_max_size); + tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs); } bond_dev->hard_header_len = max_hard_header_len; @@ -1463,8 +1463,8 @@ done: bond_dev->hw_enc_features |= xfrm_features; #endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->mpls_features = mpls_features; - netif_set_gso_max_segs(bond_dev, gso_max_segs); - netif_set_gso_max_size(bond_dev, gso_max_size); + netif_set_tso_max_segs(bond_dev, tso_max_segs); + netif_set_tso_max_size(bond_dev, tso_max_size); bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) && diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 56e5f440e666..20681860a599 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -2395,7 +2395,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->reset_task, atl1e_reset_task); INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); - netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); + netif_set_tso_max_size(netdev, MAX_TSO_SEG_SIZE); err = register_netdev(netdev); if (err) { netdev_err(netdev, "register netdevice failed\n"); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index ba28aa444e5a..bee35ce60171 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3563,7 +3563,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO; } - netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); + netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); /* Copy of transmit encapsulation capabilities: * TSO, TSO6, Checksums for this device diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 568f211d91cc..ac196883f07e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -2094,7 +2094,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_LRO; - netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); + netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); /* Copy of transmit encapsulation capabilities: * TSO, TSO6, Checksums for this device diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 5939068a8f62..cd4e243da5fa 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5204,7 +5204,7 @@ static void be_netdev_init(struct net_device *netdev) netdev->flags |= IFF_MULTICAST; - netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN); + netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN); netdev->netdev_ops = &be_netdev_ops; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9f33ec838b52..6e52f3ad182f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3566,7 +3566,7 @@ static int fec_enet_init(struct net_device *ndev) ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; if (fep->quirks & FEC_QUIRK_HAS_CSUM) { - netif_set_gso_max_segs(ndev, FEC_MAX_TSO_SEGS); + netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS); /* enable hw accelerator */ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 22a463e15678..2f0bd21a9082 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1782,7 +1782,7 @@ static int hns_nic_set_features(struct net_device *netdev, priv->ops.fill_desc = fill_tso_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; /* The chip only support 7*4096 */ - netif_set_gso_max_size(netdev, 7 * 4096); + netif_set_tso_max_size(netdev, 7 * 4096); } else { priv->ops.fill_desc = fill_v2_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; @@ -2168,7 +2168,7 @@ static void hns_nic_set_priv_ops(struct net_device *netdev) priv->ops.fill_desc = fill_tso_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; /* This chip only support 7*4096 */ - netif_set_gso_max_size(netdev, 7 * 4096); + netif_set_tso_max_size(netdev, 7 * 4096); } else { priv->ops.fill_desc = fill_v2_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c4a4954aa317..292a025e5423 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5051,12 +5051,12 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 65536); + netif_set_tso_max_size(adapter->netdev, 65536); return; } if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(adapter->netdev, 32768); + netif_set_tso_max_size(adapter->netdev, 32768); #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index c18801490649..57eff4e9e6de 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -3207,7 +3207,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) dev->hw_features = dev->features; dev->priv_flags |= IFF_UNICAST_FLT; - netif_set_gso_max_segs(dev, MV643XX_MAX_TSO_SEGS); + netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS); /* MTU range: 64 - 9500 */ dev->min_mtu = 64; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f6a54c7f0c69..384f5a16753d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -5617,7 +5617,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - netif_set_gso_max_segs(dev, MVNETA_MAX_TSO_SEGS); + netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS); /* MTU range: 68 - 9676 */ dev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 1a835b48791b..2b7eade373be 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -6861,7 +6861,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, mvpp2_set_hw_csum(port, port->pool_long->id); dev->vlan_features |= features; - netif_set_gso_max_segs(dev, MVPP2_MAX_TSO_SEGS); + netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS); dev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 68 - 9704 */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 441aafc26a08..53b2706d65a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2704,7 +2704,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; - netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS); + netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2_netdev_ops; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 9e87836ed8bf..86653bb8e403 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -652,7 +652,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_RXALL; netdev->hw_features |= NETIF_F_HW_TC; - netif_set_gso_max_segs(netdev, OTX2_MAX_GSO_SEGS); + netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS); netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2vf_netdev_ops; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 5528d12d1f48..c60ead337d06 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2320,7 +2320,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = nn->max_mtu; - netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS); + netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS); netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 790e1d5e4b4a..75b5018f2e1b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -380,7 +380,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, /* Advertise but disable TSO by default. */ netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); - netif_set_gso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS); + netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS); netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL; netdev->features |= NETIF_F_LLTX; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 33f5c5698ccb..3098d6672192 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5442,12 +5442,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (rtl_chip_supports_csum_v2(tp)) { dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; - netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V2); - netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V2); + netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2); + netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V2); } else { dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; - netif_set_gso_max_size(dev, RTL_GSO_MAX_SIZE_V1); - netif_set_gso_max_segs(dev, RTL_GSO_MAX_SEGS_V1); + netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V1); + netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1); } dev->hw_features |= NETIF_F_RXALL; diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index b04911bc8c57..a69d756e09b9 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -1009,11 +1009,13 @@ static int ef100_process_design_param(struct efx_nic *efx, return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN: nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE); - netif_set_gso_max_size(efx->net_dev, nic_data->tso_max_payload_len); + netif_set_tso_max_size(efx->net_dev, + nic_data->tso_max_payload_len); return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS: nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff); - netif_set_gso_max_segs(efx->net_dev, nic_data->tso_max_payload_num_segs); + netif_set_tso_max_segs(efx->net_dev, + nic_data->tso_max_payload_num_segs); return 0; case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES: nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff); @@ -1138,7 +1140,8 @@ static int ef100_probe_main(struct efx_nic *efx) nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT; nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT; nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT; - netif_set_gso_max_segs(net_dev, ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT); + netif_set_tso_max_segs(net_dev, + ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT); /* Read design parameters */ rc = ef100_check_design_params(efx); if (rc) { diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 5e7fe75cb1d4..5a772354da83 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -710,7 +710,7 @@ static int efx_register_netdev(struct efx_nic *efx) if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) net_dev->priv_flags |= IFF_UNICAST_FLT; net_dev->ethtool_ops = &efx_ethtool_ops; - netif_set_gso_max_segs(net_dev, EFX_TSO_MAX_SEGS); + netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); net_dev->min_mtu = EFX_MIN_MTU; net_dev->max_mtu = EFX_MAX_MTU; diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 60c595ef7589..b7282331faec 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2267,7 +2267,7 @@ static int ef4_register_netdev(struct ef4_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &ef4_netdev_ops; net_dev->ethtool_ops = &ef4_ethtool_ops; - netif_set_gso_max_segs(net_dev, EF4_TSO_MAX_SEGS); + netif_set_tso_max_segs(net_dev, EF4_TSO_MAX_SEGS); net_dev->min_mtu = EF4_MIN_MTU; net_dev->max_mtu = EF4_MAX_MTU; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 448fcc325ed7..866af2cc27a3 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1431,7 +1431,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, */ net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features; - netif_set_gso_max_size(net, gso_max_size); + netif_set_tso_max_size(net, gso_max_size); ret = rndis_filter_set_offload_params(net, nvdev, &offloads); diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index ca409d450a29..3020e81159d0 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -735,7 +735,7 @@ static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->features |= AQ_SUPPORT_FEATURE; dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; - netif_set_gso_max_size(dev->net, 65535); + netif_set_tso_max_size(dev->net, 65535); aqc111_read_fw_version(dev, aqc111_data); aqc111_data->autoneg = AUTONEG_ENABLE; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index e2fa56b92685..7a8c11a26eb5 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1382,7 +1382,7 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->hw_features |= dev->net->features; - netif_set_gso_max_size(dev->net, 16384); + netif_set_tso_max_size(dev->net, 16384); /* Enable checksum offload */ *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 94e571fb61da..636a405844c5 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -4372,7 +4372,7 @@ static int lan78xx_probe(struct usb_interface *intf, /* MTU range: 68 - 9000 */ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; - netif_set_gso_max_size(netdev, LAN78XX_TSO_SIZE(dev)); + netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev)); netif_napi_add(netdev, &dev->napi, lan78xx_poll, NAPI_POLL_WEIGHT); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ee41088c5251..d3b53db57c26 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -9658,7 +9658,7 @@ static int rtl8152_probe(struct usb_interface *intf, } netdev->ethtool_ops = &ops; - netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); + netif_set_tso_max_size(netdev, RTL_LIMITED_TSO_SIZE); /* MTU range: 68 - 1500 or 9194 */ netdev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 92698f79a4e0..2d4436cbcb47 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1129,7 +1129,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) { card->dev->needed_headroom = sizeof(struct qeth_hdr_tso); netif_keep_dst(card->dev); - netif_set_gso_max_size(card->dev, + netif_set_tso_max_size(card->dev, PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)); } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ea3b6b18aa6e..8d44bce0477a 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1907,7 +1907,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) netif_keep_dst(card->dev); if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) - netif_set_gso_max_size(card->dev, + netif_set_tso_max_size(card->dev, PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 55f47cadb114..47fcbade7389 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -517,16 +517,16 @@ void br_mtu_auto_adjust(struct net_bridge *br) static void br_set_gso_limits(struct net_bridge *br) { - unsigned int gso_max_size = GSO_MAX_SIZE; - u16 gso_max_segs = GSO_MAX_SEGS; + unsigned int tso_max_size = TSO_MAX_SIZE; const struct net_bridge_port *p; + u16 tso_max_segs = TSO_MAX_SEGS; list_for_each_entry(p, &br->port_list, list) { - gso_max_size = min(gso_max_size, p->dev->gso_max_size); - gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs); + tso_max_size = min(tso_max_size, p->dev->tso_max_size); + tso_max_segs = min(tso_max_segs, p->dev->tso_max_segs); } - netif_set_gso_max_size(br->dev, gso_max_size); - netif_set_gso_max_segs(br->dev, gso_max_segs); + netif_set_tso_max_size(br->dev, tso_max_size); + netif_set_tso_max_segs(br->dev, tso_max_segs); } /* diff --git a/net/core/dev.c b/net/core/dev.c index 6ae085b11373..f036ccb61da4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3032,8 +3032,8 @@ EXPORT_SYMBOL(netif_set_tso_max_segs); */ void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) { - netif_set_gso_max_size(to, from->gso_max_size); - netif_set_gso_max_segs(to, from->gso_max_segs); + netif_set_tso_max_size(to, from->tso_max_size); + netif_set_tso_max_segs(to, from->tso_max_segs); } EXPORT_SYMBOL(netif_inherit_tso_max); -- cgit v1.2.3 From 6384b7695953dcb18da04a255b6616ae13c51b01 Mon Sep 17 00:00:00 2001 From: Michal Swiatkowski Date: Mon, 28 Mar 2022 16:58:27 +0200 Subject: ice: link representors to PCI device Link port representors to parent PCI device to benefit from systemd defined naming scheme. Example from ip tool: - without linking: eth0 ... - with linking: eth0 ... altname enp24s0f0npf0vf0 The port representor name is being shown in altname, because the name is longer than IFNAMSIZ (16) limit. Altname can be used in ip tool. Signed-off-by: Michal Swiatkowski Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_repr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index a91b81c3088b..0dac67cd9c77 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -338,6 +338,7 @@ static int ice_repr_add(struct ice_vf *vf) repr->netdev->min_mtu = ETH_MIN_MTU; repr->netdev->max_mtu = ICE_MAX_MTU; + SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); err = ice_repr_reg_netdev(repr->netdev); if (err) goto err_netdev; -- cgit v1.2.3 From e0c7402270d9928d4a19669704056147b830bc69 Mon Sep 17 00:00:00 2001 From: Marcin Szycik Date: Mon, 28 Mar 2022 16:58:28 +0200 Subject: Revert "ice: Hide bus-info in ethtool for PRs in switchdev mode" This reverts commit bfaaba99e680bf82bf2cbf69866c3f37434ff766. Commit bfaaba99e680 ("ice: Hide bus-info in ethtool for PRs in switchdev mode") was a workaround for lshw tool displaying incorrect descriptions for port representors and PF in switchdev mode. Now the issue has been fixed in the lshw tool itself [1]. Removing the workaround can be considered a regression, as the user might be running older, unpatched lshw version. However, another important change (ice: link representors to PCI device, which improves port representor netdev naming with SET_NETDEV_DEV) also causes the same "regression" as removing the workaround, i.e. unpatched lshw is able to access bus-info information (this time not via ethtool) and the bug can occur. Therefore, the workaround no longer prevents the bug and can be removed. [1] https://ezix.org/src/pkg/lshw/commit/9bf4e4c9c1 Signed-off-by: Marcin Szycik Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 24cda7e1f916..476bd1c83c87 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -190,19 +190,17 @@ __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo, snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, nvm->eetrack, orom->major, orom->build, orom->patch); + + strscpy(drvinfo->bus_info, pci_name(pf->pdev), + sizeof(drvinfo->bus_info)); } static void ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; __ice_get_drvinfo(netdev, drvinfo, np->vsi); - - strscpy(drvinfo->bus_info, pci_name(pf->pdev), - sizeof(drvinfo->bus_info)); - drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } -- cgit v1.2.3 From 0a02e282bad4dad455553fc2b9268cf1d003f132 Mon Sep 17 00:00:00 2001 From: David Thompson Date: Thu, 5 May 2022 12:23:09 -0400 Subject: mlxbf_gige: increase MDIO polling rate to 5us This patch increases the polling rate used by the mlxbf_gige driver on the MDIO bus. The previous polling rate was every 100us, and the new rate is every 5us. With this change the amount of time spent waiting for the MDIO BUSY signal to de-assert drops from ~100us to ~27us for each operation. Signed-off-by: David Thompson Signed-off-by: Asmaa Mnebhi Link: https://lore.kernel.org/r/20220505162309.20050-1-davthompson@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c index 7905179a9575..2e6c1b7af096 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c @@ -105,7 +105,8 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg) writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET, - val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000); + val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK), + 5, 1000000); if (ret) { writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); @@ -137,7 +138,8 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add, /* If the poll timed out, drop the request */ ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET, - temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), 100, 1000000); + temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), + 5, 1000000); return ret; } -- cgit v1.2.3 From 2c110abc46168c3a8cc9f855c986a55650c777dd Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 5 May 2022 19:22:10 +0300 Subject: net: dsa: felix: use PGID_CPU for FDB entry migration on NPI port ocelot_fdb_add() redirects FDB entries installed on the NPI port towards the special reserved PGID_CPU used for host-filtered addresses. PGID_CPU contains BIT(ocelot->num_phys_ports) in the destination port mask, which is code name for the CPU port module. Whereas felix_migrate_fdbs_to_*_port() uses the ocelot->num_phys_ports PGID directly, and it appears that this works too. Even if this PGID is set to zero, apparently its number is special and packets still reach the CPU port module. Nonetheless, in the end, these addresses end up in the same place regardless of whether they go through an extra indirection layer or not. Use PGID_CPU across to have more uniformity. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 33cb124ca912..8abe48ae8c3a 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -49,14 +49,13 @@ static int felix_migrate_fdbs_to_npi_port(struct dsa_switch *ds, int port, { struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - int cpu = ocelot->num_phys_ports; int err; err = ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); if (err) return err; - return ocelot_fdb_add(ocelot, cpu, addr, vid, bridge_dev); + return ocelot_fdb_add(ocelot, PGID_CPU, addr, vid, bridge_dev); } static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port, @@ -128,10 +127,9 @@ felix_migrate_fdbs_to_tag_8021q_port(struct dsa_switch *ds, int port, { struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; - int cpu = ocelot->num_phys_ports; int err; - err = ocelot_fdb_del(ocelot, cpu, addr, vid, bridge_dev); + err = ocelot_fdb_del(ocelot, PGID_CPU, addr, vid, bridge_dev); if (err) return err; -- cgit v1.2.3 From a51c1c3f3218c74c07fed55ea2ad5b18c374dc25 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 5 May 2022 19:22:11 +0300 Subject: net: dsa: felix: stop migrating FDBs back and forth on tag proto change I just realized we don't need to migrate the host-filtered FDB entries when the tagging protocol changes from "ocelot" to "ocelot-8021q". Host-filtered addresses are learned towards the PGID_CPU "multicast" port group, reserved by software, which contains BIT(ocelot->num_phys_ports). That is the "special" port entry in the analyzer block for the CPU port module. In "ocelot" mode, the CPU port module's packets are redirected to the NPI port. In "ocelot-8021q" mode, felix_8021q_cpu_port_init() does something funny anyway, and changes PGID_CPU to stop pointing at the CPU port module and start pointing at the physical port where the DSA master is attached. The fact that we can alter the destination of packets learned towards PGID_CPU without altering the MAC table entries themselves means that it is pointless to walk through the FDB entries, forget that they were learned towards PGID_CPU, and re-learn them towards the "unicast" PGID associated with the physical port connected to the DSA master. We can let the PGID_CPU value change simply alter the destination of the host-filtered unicast packets in one fell swoop. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 55 ++---------------------------------------- 1 file changed, 2 insertions(+), 53 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 8abe48ae8c3a..6443cd573040 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -42,22 +42,6 @@ static struct net_device *felix_classify_db(struct dsa_db db) } } -/* We are called before felix_npi_port_init(), so ocelot->npi is -1. */ -static int felix_migrate_fdbs_to_npi_port(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) -{ - struct net_device *bridge_dev = felix_classify_db(db); - struct ocelot *ocelot = ds->priv; - int err; - - err = ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); - if (err) - return err; - - return ocelot_fdb_add(ocelot, PGID_CPU, addr, vid, bridge_dev); -} - static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, struct dsa_db db) @@ -116,26 +100,6 @@ felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port) felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC); } -/* ocelot->npi was already set to -1 by felix_npi_port_deinit, so - * ocelot_fdb_add() will not redirect FDB entries towards the - * CPU port module here, which is what we want. - */ -static int -felix_migrate_fdbs_to_tag_8021q_port(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) -{ - struct net_device *bridge_dev = felix_classify_db(db); - struct ocelot *ocelot = ds->priv; - int err; - - err = ocelot_fdb_del(ocelot, PGID_CPU, addr, vid, bridge_dev); - if (err) - return err; - - return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); -} - static int felix_migrate_mdbs_to_tag_8021q_port(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, @@ -486,13 +450,9 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) if (err) return err; - err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port); - if (err) - goto out_tag_8021q_unregister; - err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port); if (err) - goto out_migrate_fdbs; + goto out_tag_8021q_unregister; felix_migrate_flood_to_tag_8021q_port(ds, cpu); @@ -514,8 +474,6 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) out_migrate_flood: felix_migrate_flood_to_npi_port(ds, cpu); dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port); -out_migrate_fdbs: - dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port); out_tag_8021q_unregister: dsa_tag_8021q_unregister(ds); return err; @@ -595,24 +553,15 @@ static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) struct ocelot *ocelot = ds->priv; int err; - err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port); - if (err) - return err; - err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port); if (err) - goto out_migrate_fdbs; + return err; felix_migrate_flood_to_npi_port(ds, cpu); felix_npi_port_init(ocelot, cpu); return 0; - -out_migrate_fdbs: - dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port); - - return err; } static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) -- cgit v1.2.3 From 28de0f9fec5a84bd5b56d6364432a8730eac410a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 5 May 2022 19:22:12 +0300 Subject: net: dsa: felix: perform MDB migration based on ocelot->multicast list The felix driver is the only user of dsa_port_walk_mdbs(), and there isn't even a good reason for it, considering that the host MDB entries are already saved by the ocelot switch lib in the ocelot->multicast list. Rewrite the multicast entry migration procedure around the ocelot->multicast list so we can delete dsa_port_walk_mdbs(). Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 51 ++++--------------------------- drivers/net/ethernet/mscc/ocelot.c | 61 ++++++++++++++++++++++++++++++++++++++ include/soc/mscc/ocelot.h | 3 ++ 3 files changed, 69 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 6443cd573040..a4882e493cf4 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -42,27 +42,6 @@ static struct net_device *felix_classify_db(struct dsa_db db) } } -static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) -{ - struct net_device *bridge_dev = felix_classify_db(db); - struct switchdev_obj_port_mdb mdb; - struct ocelot *ocelot = ds->priv; - int cpu = ocelot->num_phys_ports; - int err; - - memset(&mdb, 0, sizeof(mdb)); - ether_addr_copy(mdb.addr, addr); - mdb.vid = vid; - - err = ocelot_port_mdb_del(ocelot, port, &mdb, bridge_dev); - if (err) - return err; - - return ocelot_port_mdb_add(ocelot, cpu, &mdb, bridge_dev); -} - static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to, int pgid) { @@ -100,28 +79,6 @@ felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port) felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC); } -static int -felix_migrate_mdbs_to_tag_8021q_port(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) -{ - struct net_device *bridge_dev = felix_classify_db(db); - struct switchdev_obj_port_mdb mdb; - struct ocelot *ocelot = ds->priv; - int cpu = ocelot->num_phys_ports; - int err; - - memset(&mdb, 0, sizeof(mdb)); - ether_addr_copy(mdb.addr, addr); - mdb.vid = vid; - - err = ocelot_port_mdb_del(ocelot, cpu, &mdb, bridge_dev); - if (err) - return err; - - return ocelot_port_mdb_add(ocelot, port, &mdb, bridge_dev); -} - /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that * the tagger can perform RX source port identification. */ @@ -450,7 +407,8 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) if (err) return err; - err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port); + err = ocelot_migrate_mdbs(ocelot, BIT(ocelot->num_phys_ports), + BIT(cpu)); if (err) goto out_tag_8021q_unregister; @@ -473,7 +431,7 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) out_migrate_flood: felix_migrate_flood_to_npi_port(ds, cpu); - dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port); + ocelot_migrate_mdbs(ocelot, BIT(cpu), BIT(ocelot->num_phys_ports)); out_tag_8021q_unregister: dsa_tag_8021q_unregister(ds); return err; @@ -553,7 +511,8 @@ static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) struct ocelot *ocelot = ds->priv; int err; - err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port); + err = ocelot_migrate_mdbs(ocelot, BIT(cpu), + BIT(ocelot->num_phys_ports)); if (err) return err; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 0825a92599a5..9336f3b00c6e 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -2610,6 +2610,67 @@ static void ocelot_setup_logical_port_ids(struct ocelot *ocelot) } } +static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc, + unsigned long from_mask, unsigned long to_mask) +{ + unsigned char addr[ETH_ALEN]; + struct ocelot_pgid *pgid; + u16 vid = mc->vid; + + dev_dbg(ocelot->dev, + "Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n", + mc->addr, mc->vid, from_mask, to_mask); + + /* First clean up the current port mask from hardware, because + * we'll be modifying it. + */ + ocelot_pgid_free(ocelot, mc->pgid); + ocelot_encode_ports_to_mdb(addr, mc); + ocelot_mact_forget(ocelot, addr, vid); + + mc->ports &= ~from_mask; + mc->ports |= to_mask; + + pgid = ocelot_mdb_get_pgid(ocelot, mc); + if (IS_ERR(pgid)) { + dev_err(ocelot->dev, + "Cannot allocate PGID for mdb %pM vid %d\n", + mc->addr, mc->vid); + devm_kfree(ocelot->dev, mc); + return PTR_ERR(pgid); + } + mc->pgid = pgid; + + ocelot_encode_ports_to_mdb(addr, mc); + + if (mc->entry_type != ENTRYTYPE_MACv4 && + mc->entry_type != ENTRYTYPE_MACv6) + ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, + pgid->index); + + return ocelot_mact_learn(ocelot, pgid->index, addr, vid, + mc->entry_type); +} + +int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask, + unsigned long to_mask) +{ + struct ocelot_multicast *mc; + int err; + + list_for_each_entry(mc, &ocelot->multicast, list) { + if (!(mc->ports & from_mask)) + continue; + + err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs); + /* Documentation for PORTID_VAL says: * Logical port number for front port. If port is not a member of a LLAG, * then PORTID must be set to the physical port number. diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 8d8d46778f7e..e88bcfe4b2cd 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -998,6 +998,9 @@ int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx, enum macaccess_entry_type type, int sfid, int ssid); +int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask, + unsigned long to_mask); + int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, struct ocelot_policer *pol); int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix); -- cgit v1.2.3 From be8af67fabcb94b2f4e0897e5782bccd3104bef3 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 6 May 2022 10:07:47 -0700 Subject: caif_virtio: switch to netif_napi_add_weight() caif_virtio uses a custom napi weight, switch to the new API for setting custom weights. Signed-off-by: Jakub Kicinski Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/caif/caif_virtio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 444ef6a342f6..5458f57177a0 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -714,7 +714,8 @@ static int cfv_probe(struct virtio_device *vdev) /* Initialize NAPI poll context data */ vringh_kiov_init(&cfv->ctx.riov, NULL, 0); cfv->ctx.head = USHRT_MAX; - netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA); + netif_napi_add_weight(netdev, &cfv->napi, cfv_rx_poll, + CFV_DEFAULT_QUOTA); tasklet_setup(&cfv->tx_release_tasklet, cfv_tx_release_tasklet); -- cgit v1.2.3 From b707b89f7be36147187ebc52d91c085040c26de9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 6 May 2022 10:07:48 -0700 Subject: eth: switch to netif_napi_add_weight() Switch all Ethernet drivers which use custom napi weights to the new API. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/typhoon.c | 2 +- drivers/net/ethernet/adaptec/starfire.c | 2 +- drivers/net/ethernet/amd/amd8111e.c | 2 +- drivers/net/ethernet/amd/pcnet32.c | 3 ++- drivers/net/ethernet/arc/emac_main.c | 3 ++- drivers/net/ethernet/atheros/ag71xx.c | 3 ++- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 4 ++-- drivers/net/ethernet/broadcom/sb1250-mac.c | 2 +- drivers/net/ethernet/dec/tulip/tulip_core.c | 2 +- drivers/net/ethernet/ezchip/nps_enet.c | 4 ++-- drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | 3 ++- drivers/net/ethernet/hisilicon/hisi_femac.c | 3 ++- drivers/net/ethernet/huawei/hinic/hinic_rx.c | 3 ++- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 3 ++- drivers/net/ethernet/ibm/emac/mal.c | 4 ++-- drivers/net/ethernet/ibm/ibmveth.c | 2 +- drivers/net/ethernet/intel/e100.c | 2 +- drivers/net/ethernet/lantiq_etop.c | 8 ++++---- drivers/net/ethernet/marvell/pxa168_eth.c | 3 ++- drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c | 3 ++- drivers/net/ethernet/microsoft/mana/mana_en.c | 2 +- drivers/net/ethernet/moxa/moxart_ether.c | 2 +- drivers/net/ethernet/mscc/ocelot_fdma.c | 4 ++-- drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 4 ++-- drivers/net/ethernet/neterion/vxge/vxge-main.c | 9 +++++---- drivers/net/ethernet/nxp/lpc_eth.c | 2 +- drivers/net/ethernet/realtek/8139cp.c | 2 +- drivers/net/ethernet/sfc/efx_channels.c | 4 ++-- drivers/net/ethernet/sfc/falcon/efx.c | 4 ++-- drivers/net/ethernet/smsc/smsc911x.c | 3 ++- drivers/net/ethernet/toshiba/tc35815.c | 2 +- drivers/net/ethernet/wiznet/w5100.c | 2 +- drivers/net/ethernet/wiznet/w5300.c | 2 +- drivers/net/ethernet/xscale/ixp4xx_eth.c | 2 +- 34 files changed, 58 insertions(+), 47 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index ad57209007e1..cad4f354cc76 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -2464,7 +2464,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* The chip-specific entries in the device structure. */ dev->netdev_ops = &typhoon_netdev_ops; - netif_napi_add(dev, &tp->napi, typhoon_poll, 16); + netif_napi_add_weight(dev, &tp->napi, typhoon_poll, 16); dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &typhoon_ethtool_ops; diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index c6982f7caf9b..8f0a6b9c518e 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -772,7 +772,7 @@ static int starfire_init_one(struct pci_dev *pdev, dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = ðtool_ops; - netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work); + netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work); if (mtu) dev->mtu = mtu; diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 9421afb950f7..05ac8d9ccb2f 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1828,7 +1828,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; dev->min_mtu = AMD8111E_MIN_MTU; dev->max_mtu = AMD8111E_MAX_MTU; - netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); + netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32); #if AMD8111E_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index c20c369c7eb8..b5ff47283cfe 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1881,7 +1881,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* napi.weight is used in both the napi and non-napi cases */ lp->napi.weight = lp->rx_ring_size / 2; - netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); + netif_napi_add_weight(dev, &lp->napi, pcnet32_poll, + lp->rx_ring_size / 2); if (fdx && !(lp->options & PCNET32_PORT_ASEL) && ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index c642c3d3e600..288e2961823e 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -981,7 +981,8 @@ int arc_emac_probe(struct net_device *ndev, int interface) dev_info(dev, "connected to %s phy with id 0x%x\n", phydev->drv->name, phydev->phy_id); - netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); + netif_napi_add_weight(ndev, &priv->napi, arc_emac_poll, + ARC_EMAC_NAPI_WEIGHT); err = register_netdev(ndev); if (err) { diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index ec167af0e3b2..cac509708e9d 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1922,7 +1922,8 @@ static int ag71xx_probe(struct platform_device *pdev) return err; } - netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); + netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll, + AG71XX_NAPI_WEIGHT); err = clk_prepare_enable(ag->clk_eth); if (err) { diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index c1b97e8c55ef..698438a2ee0f 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1859,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev) /* register netdevice */ dev->netdev_ops = &bcm_enet_ops; - netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16); dev->ethtool_ops = &bcm_enet_ethtool_ops; /* MTU range: 46 - 2028 */ @@ -2714,7 +2714,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) /* register netdevice */ dev->netdev_ops = &bcm_enetsw_ops; - netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16); dev->ethtool_ops = &bcm_enetsw_ethtool_ops; SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5d5f10180158..f02facb60fd1 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2203,7 +2203,7 @@ static int sbmac_init(struct platform_device *pldev, long long base) dev->min_mtu = 0; dev->max_mtu = ENET_PACKET_SIZE; - netif_napi_add(dev, &sc->napi, sbmac_poll, 16); + netif_napi_add_weight(dev, &sc->napi, sbmac_poll, 16); dev->irq = UNIT_INT(idx); diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 79df5a72877b..434d8bf0e8f9 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1689,7 +1689,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &tulip_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_TULIP_NAPI - netif_napi_add(dev, &tp->napi, tulip_poll, 16); + netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16); #endif dev->ethtool_ops = &ops; diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 69dbf950d451..f1eb660aaee2 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -612,8 +612,8 @@ static s32 nps_enet_probe(struct platform_device *pdev) goto out_netdev; } - netif_napi_add(ndev, &priv->napi, nps_enet_poll, - NPS_ENET_NAPI_POLL_WEIGHT); + netif_napi_add_weight(ndev, &priv->napi, nps_enet_poll, + NPS_ENET_NAPI_POLL_WEIGHT); /* Register the driver. Should be the last thing in probe */ err = register_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index bacf25318f87..b3dae17e067e 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1020,7 +1020,8 @@ static int fs_enet_probe(struct platform_device *ofdev) ndev->netdev_ops = &fs_enet_netdev_ops; ndev->watchdog_timeo = 2 * HZ; INIT_WORK(&fep->timeout_work, fs_timeout_work); - netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight); + netif_napi_add_weight(ndev, &fep->napi, fs_enet_napi, + fpi->napi_weight); ndev->ethtool_ops = &fs_ethtool_ops; diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index a6c18b6527f9..93846bace028 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -852,7 +852,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev) ndev->priv_flags |= IFF_UNICAST_FLT; ndev->netdev_ops = &hisi_femac_netdev_ops; ndev->ethtool_ops = &hisi_femac_ethtools_ops; - netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT); + netif_napi_add_weight(ndev, &priv->napi, hisi_femac_poll, + FEMAC_POLL_WEIGHT); hisi_femac_port_init(priv); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index b33ed4d92b71..24b7b819dbfb 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -481,7 +481,8 @@ static void rx_add_napi(struct hinic_rxq *rxq) { struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); - netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); + netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll, + nic_dev->rx_weight); napi_enable(&rxq->napi); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 8d59babbf476..87408e7bb809 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -809,7 +809,8 @@ static int tx_request_irq(struct hinic_txq *txq) qp = container_of(sq, struct hinic_qp, sq); - netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight); + netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll, + nic_dev->tx_weight); hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 075c07303f16..ff5487bbebe3 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -605,8 +605,8 @@ static int mal_probe(struct platform_device *ofdev) init_dummy_netdev(&mal->dummy_dev); - netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll, - CONFIG_IBM_EMAC_POLL_WEIGHT); + netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll, + CONFIG_IBM_EMAC_POLL_WEIGHT); /* Load power-on reset defaults */ mal_reset(mal); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 22fb0d109a68..5c6a04d29f5b 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1674,7 +1674,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->pool_config = 0; ibmveth_init_link_settings(netdev); - netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); + netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16); netdev->irq = dev->irq; netdev->netdev_ops = &ibmveth_netdev_ops; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 4a8013f20152..36418b510dde 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2848,7 +2848,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); nic = netdev_priv(netdev); - netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); nic->netdev = netdev; nic->pdev = pdev; nic->msg_enable = (1 << debug) - 1; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 9b6fa27b7daf..7cedbe1fdfd7 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -701,11 +701,11 @@ ltq_etop_probe(struct platform_device *pdev) for (i = 0; i < MAX_DMA_CHAN; i++) { if (IS_TX(i)) - netif_napi_add(dev, &priv->ch[i].napi, - ltq_etop_poll_tx, 8); + netif_napi_add_weight(dev, &priv->ch[i].napi, + ltq_etop_poll_tx, 8); else if (IS_RX(i)) - netif_napi_add(dev, &priv->ch[i].napi, - ltq_etop_poll_rx, 32); + netif_napi_add_weight(dev, &priv->ch[i].napi, + ltq_etop_poll_rx, 32); priv->ch[i].netdev = dev; } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 52bef50f5a0d..349b8a94e939 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1486,7 +1486,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) /* Hardware supports only 3 ports */ BUG_ON(pep->port_num > 2); - netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); + netif_napi_add_weight(dev, &pep->napi, pxa168_rx_poll, + pep->rx_ring_size); memset(&pep->timeout, 0, sizeof(struct timer_list)); timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 1e9ff365459e..66360c8c5a38 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -381,7 +381,8 @@ static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) } sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx); } - netif_napi_add(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, FDMA_WEIGHT); + netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, + FDMA_WEIGHT); napi_enable(&rx->napi); sparx5_fdma_rx_activate(sparx5, rx); return 0; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 06f853c5c141..b1d773823232 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1602,7 +1602,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - netif_napi_add(ndev, &cq->napi, mana_poll, 1); + netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx, cq->napi.napi_id)); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index afb7dcadb8d2..a3214a762e4b 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -533,7 +533,7 @@ static int moxart_mac_probe(struct platform_device *pdev) } ndev->netdev_ops = &moxart_netdev_ops; - netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM); + netif_napi_add_weight(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM); ndev->priv_flags |= IFF_UNICAST_FLT; ndev->irq = irq; diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c index dffa597bffe6..083fddd263ec 100644 --- a/drivers/net/ethernet/mscc/ocelot_fdma.c +++ b/drivers/net/ethernet/mscc/ocelot_fdma.c @@ -799,8 +799,8 @@ void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev) return; fdma->ndev = dev; - netif_napi_add(dev, &fdma->napi, ocelot_fdma_napi_poll, - OCELOT_FDMA_WEIGHT); + netif_napi_add_weight(dev, &fdma->napi, ocelot_fdma_napi_poll, + OCELOT_FDMA_WEIGHT); } void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev) diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index fe5e77330f5f..61497c3e4cfb 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3586,8 +3586,8 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) goto abort; ss->mgp = mgp; ss->dev = mgp->dev; - netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, - myri10ge_napi_weight); + netif_napi_add_weight(ss->dev, &ss->napi, myri10ge_poll, + myri10ge_napi_weight); } return 0; abort: diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index db4dfae8c01d..d2de8ac44f72 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2720,8 +2720,8 @@ static int vxge_open(struct net_device *dev) } if (vdev->config.intr_type != MSI_X) { - netif_napi_add(dev, &vdev->napi, vxge_poll_inta, - vdev->config.napi_weight); + netif_napi_add_weight(dev, &vdev->napi, vxge_poll_inta, + vdev->config.napi_weight); napi_enable(&vdev->napi); for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; @@ -2730,8 +2730,9 @@ static int vxge_open(struct net_device *dev) } else { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; - netif_napi_add(dev, &vpath->ring.napi, - vxge_poll_msix, vdev->config.napi_weight); + netif_napi_add_weight(dev, &vpath->ring.napi, + vxge_poll_msix, + vdev->config.napi_weight); napi_enable(&vpath->ring.napi); vpath->ring.napi_p = &vpath->ring.napi; } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 756f97dce85b..f606d75b33b4 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1373,7 +1373,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) pldat->duplex = DUPLEX_FULL; __lpc_params_setup(pldat); - netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT); + netif_napi_add_weight(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT); ret = register_netdev(ndev); if (ret) { diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index ad7b9e9d7f95..e0feeec13da6 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1986,7 +1986,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) eth_hw_addr_set(dev, (u8 *)addr); dev->netdev_ops = &cp_netdev_ops; - netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); + netif_napi_add_weight(dev, &cp->napi, cp_rx_poll, 16); dev->ethtool_ops = &cp_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index eec80b024195..3f28f9861dfa 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -1316,8 +1316,8 @@ void efx_init_napi_channel(struct efx_channel *channel) struct efx_nic *efx = channel->efx; channel->napi_dev = efx->net_dev; - netif_napi_add(channel->napi_dev, &channel->napi_str, - efx_poll, napi_weight); + netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll, + napi_weight); } void efx_init_napi(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index b7282331faec..f619ffb26787 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2017,8 +2017,8 @@ static void ef4_init_napi_channel(struct ef4_channel *channel) struct ef4_nic *efx = channel->efx; channel->napi_dev = efx->net_dev; - netif_napi_add(channel->napi_dev, &channel->napi_str, - ef4_poll, napi_weight); + netif_napi_add_weight(channel->napi_dev, &channel->napi_str, ef4_poll, + napi_weight); } static void ef4_init_napi(struct ef4_nic *efx) diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index c854efdf1f25..3bf20211cceb 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2304,7 +2304,8 @@ static int smsc911x_init(struct net_device *dev) return -ENODEV; dev->flags |= IFF_MULTICAST; - netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); + netif_napi_add_weight(dev, &pdata->napi, smsc911x_poll, + SMSC_NAPI_WEIGHT); dev->netdev_ops = &smsc911x_netdev_ops; dev->ethtool_ops = &smsc911x_ethtool_ops; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index ce38f7515225..47aab9c132c8 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -804,7 +804,7 @@ static int tc35815_init_one(struct pci_dev *pdev, dev->netdev_ops = &tc35815_netdev_ops; dev->ethtool_ops = &tc35815_ethtool_ops; dev->watchdog_timeo = TC35815_TX_TIMEOUT; - netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); + netif_napi_add_weight(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); dev->irq = pdev->irq; dev->base_addr = (unsigned long)ioaddr; diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 4fd7c39e1123..acd78120e53c 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1133,7 +1133,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, ndev->netdev_ops = &w5100_netdev_ops; ndev->ethtool_ops = &w5100_ethtool_ops; - netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16); + netif_napi_add_weight(ndev, &priv->napi, w5100_napi_poll, 16); /* This chip doesn't support VLAN packets with normal MTU, * so disable VLAN for this device. diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 402d5036f266..773f8c77909a 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -603,7 +603,7 @@ static int w5300_probe(struct platform_device *pdev) ndev->netdev_ops = &w5300_netdev_ops; ndev->ethtool_ops = &w5300_ethtool_ops; ndev->watchdog_timeo = HZ; - netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16); + netif_napi_add_weight(ndev, &priv->napi, w5300_napi_poll, 16); /* This chip doesn't support VLAN packets with normal MTU, * so disable VLAN for this device. diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index d947955621ee..89770c2e0ffb 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1480,7 +1480,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) ndev->dev.dma_mask = dev->dma_mask; ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; - netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT); + netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT); if (!(port->npe = npe_request(NPE_ID(port->id)))) return -EIO; -- cgit v1.2.3 From 8ded532cd1cbe48461821c605dbad9447555755a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 6 May 2022 10:07:49 -0700 Subject: r8152: switch to netif_napi_add_weight() r8152 uses a custom napi weight, switch to the new API for setting custom weight. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d3b53db57c26..c2da3438387c 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -9732,10 +9732,8 @@ static int rtl8152_probe(struct usb_interface *intf, usb_set_intfdata(intf, tp); - if (tp->support_2500full) - netif_napi_add(netdev, &tp->napi, r8152_poll, 256); - else - netif_napi_add(netdev, &tp->napi, r8152_poll, 64); + netif_napi_add_weight(netdev, &tp->napi, r8152_poll, + tp->support_2500full ? 256 : 64); ret = register_netdev(netdev); if (ret != 0) { -- cgit v1.2.3 From d484735dcf923e7872d5e353aacfaa4f42dea1d4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 6 May 2022 10:07:50 -0700 Subject: net: virtio: switch to netif_napi_add_weight() virtio netdev driver uses a custom napi weight, switch to the new API for setting custom weight. Signed-off-by: Jakub Kicinski Reviewed-by: Xuan Zhuo Acked-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ebb98b796352..db05b5e930be 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3313,8 +3313,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) INIT_DELAYED_WORK(&vi->refill, refill_work); for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].pages = NULL; - netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, - napi_weight); + netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, + napi_weight); netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, napi_tx ? napi_weight : 0); -- cgit v1.2.3 From 6f83cb8cbfbfc2f26c644dba98992356a74aec4c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 6 May 2022 10:07:51 -0700 Subject: net: wan: switch to netif_napi_add_weight() A handful of WAN drivers use custom napi weights, switch them to the new API. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/wan/fsl_ucc_hdlc.c | 2 +- drivers/net/wan/hd64572.c | 3 ++- drivers/net/wan/ixp4xx_hss.c | 2 +- drivers/net/wan/lapbether.c | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 5ae2d27b5da9..22edea6ca4b8 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1231,7 +1231,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev) dev->watchdog_timeo = 2 * HZ; hdlc->attach = ucc_hdlc_attach; hdlc->xmit = ucc_hdlc_tx; - netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); + netif_napi_add_weight(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); if (register_hdlc_device(dev)) { ret = -ENOBUFS; pr_err("ucc_hdlc: unable to register hdlc device\n"); diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index b89b03a6aba7..534369ffe5de 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c @@ -173,7 +173,8 @@ static void sca_init_port(port_t *port) sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */ sca_set_carrier(port); - netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT); + netif_napi_add_weight(port->netdev, &port->napi, sca_poll, + NAPI_WEIGHT); } /* MSCI interrupt service */ diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 863c3e34e136..e46b7f5ee49e 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -1504,7 +1504,7 @@ static int ixp4xx_hss_probe(struct platform_device *pdev) port->clock_reg = CLK42X_SPEED_2048KHZ; port->id = pdev->id; port->dev = &pdev->dev; - netif_napi_add(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); + netif_napi_add_weight(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT); err = register_hdlc_device(ndev); if (err) diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 282192b82404..960f1393595c 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -408,7 +408,7 @@ static int lapbeth_new_device(struct net_device *dev) spin_lock_init(&lapbeth->up_lock); skb_queue_head_init(&lapbeth->rx_queue); - netif_napi_add(ndev, &lapbeth->napi, lapbeth_napi_poll, 16); + netif_napi_add_weight(ndev, &lapbeth->napi, lapbeth_napi_poll, 16); rc = -EIO; if (register_netdevice(ndev)) -- cgit v1.2.3 From 7cf0f96df1d86a4d08ac0160931df85bc88d42e8 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 May 2022 11:08:14 +0300 Subject: mlxsw: spectrum: Tolerate enslaving of various devices to VRF Enslaving netdevices to VRF is currently handled through an mlxsw_sp_is_vrf_event() conditional in mlxsw_sp_netdevice_event(). In the following patch sets, VRF enslavement will be handled purely in the router code. Therefore make handlers of NETDEV_PRECHANGEUPPER tolerant of enslaving to VRF, so that they do not bounce the change. For NETDEV_CHANGEUPPER, drop the WARN_ON(1) and bounce from mlxsw_sp_netdevice_port_vlan_event(). This is the only handler that warned and bounces even in the CHANGEUPPER code, other handler quietly do nothing when they encounter an unfamiliar upper. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ac6348e2ff1f..12fd846a778f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4525,7 +4525,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, !netif_is_lag_master(upper_dev) && !netif_is_bridge_master(upper_dev) && !netif_is_ovs_master(upper_dev) && - !netif_is_macvlan(upper_dev)) { + !netif_is_macvlan(upper_dev) && + !netif_is_l3_master(upper_dev)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } @@ -4724,7 +4725,8 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; if (!netif_is_bridge_master(upper_dev) && - !netif_is_macvlan(upper_dev)) { + !netif_is_macvlan(upper_dev) && + !netif_is_l3_master(upper_dev)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } @@ -4763,9 +4765,6 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, } else if (netif_is_macvlan(upper_dev)) { if (!info->linking) mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); - } else { - err = -EINVAL; - WARN_ON(1); } break; } @@ -4813,7 +4812,8 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!netif_is_macvlan(upper_dev)) { + if (!netif_is_macvlan(upper_dev) && + !netif_is_l3_master(upper_dev)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EOPNOTSUPP; } @@ -4874,7 +4874,9 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) { + if (!is_vlan_dev(upper_dev) && + !netif_is_macvlan(upper_dev) && + !netif_is_l3_master(upper_dev)) { NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EOPNOTSUPP; } @@ -4918,16 +4920,20 @@ static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); struct netdev_notifier_changeupper_info *info = ptr; struct netlink_ext_ack *extack; + struct net_device *upper_dev; if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER) return 0; extack = netdev_notifier_info_to_extack(&info->info); + upper_dev = info->upper_dev; - /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */ - NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + if (!netif_is_l3_master(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + return -EOPNOTSUPP; + } - return -EOPNOTSUPP; + return 0; } static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) -- cgit v1.2.3 From 0a27cb1692dedd44516745d86e0cb8e9524004c0 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 May 2022 11:08:15 +0300 Subject: mlxsw: spectrum_router: Add a dedicated notifier block Currently all netdevice events are handled in the centralized notifier handler maintained by spectrum.c. Since a number of events are involving router code, spectrum.c needs to dispatch them to spectrum_router.c. The spectrum module therefore needs to know more about the router code than it should have, and there is are several API points through which the two modules communicate. To simplify the notifier handlers, introduce a new notifier into the router module. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 20 ++++++++++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 1 + 2 files changed, 21 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9ac4f3c00349..3fcb848836f0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -9508,6 +9508,14 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, return err; } +static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + int err = 0; + + return notifier_from_errno(err); +} + static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, struct netdev_nested_priv *priv) { @@ -10692,8 +10700,18 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_register_fib_notifier; + mlxsw_sp->router->netdevice_nb.notifier_call = + mlxsw_sp_router_netdevice_event; + err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), + &mlxsw_sp->router->netdevice_nb); + if (err) + goto err_register_netdev_notifier; + return 0; +err_register_netdev_notifier: + unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), + &mlxsw_sp->router->fib_nb); err_register_fib_notifier: unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->nexthop_nb); @@ -10741,6 +10759,8 @@ err_router_ops_init: void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) { + unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), + &mlxsw_sp->router->netdevice_nb); unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb); unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 6e704d807a78..37411b74c3e6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -67,6 +67,7 @@ struct mlxsw_sp_router { struct notifier_block netevent_nb; struct notifier_block inetaddr_nb; struct notifier_block inet6addr_nb; + struct notifier_block netdevice_nb; const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_ipip_ops **ipip_ops_arr; struct mlxsw_sp_router_nve_decap nve_decap_config; -- cgit v1.2.3 From 4f8afb680f1343a2ce42d5ff3c31e8486650a3a6 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 May 2022 11:08:16 +0300 Subject: mlxsw: spectrum: Move handling of VRF events to router code Events involving VRF, as L3 concern, are handled in the router code, by the helper mlxsw_sp_netdevice_vrf_event(). The handler is currently invoked from the centralized dispatcher in spectrum.c. Instead, move the call to the newly-introduced router-specific notifier handler. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 11 ----------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 -- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 18 ++++++++++++++++-- 3 files changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 12fd846a778f..867c1f3810e6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4936,15 +4936,6 @@ static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev, return 0; } -static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) -{ - struct netdev_notifier_changeupper_info *info = ptr; - - if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) - return false; - return netif_is_l3_master(info->upper_dev); -} - static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, unsigned long event, void *ptr) @@ -5055,8 +5046,6 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, event, ptr); else if (mlxsw_sp_netdevice_event_is_router(event)) err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); - else if (mlxsw_sp_is_vrf_event(event, ptr)) - err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); else if (mlxsw_sp_port_dev_check(dev)) err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); else if (netif_is_lag_master(dev)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 2ad29ae1c640..a20e2a1b8569 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -726,8 +726,6 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, unsigned long event, void *ptr); int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, unsigned long event, void *ptr); -int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, - struct netdev_notifier_changeupper_info *info); bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3fcb848836f0..fa4d9bf7da75 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -9476,8 +9476,18 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL); } -int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, - struct netdev_notifier_changeupper_info *info) +static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + + if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) + return false; + return netif_is_l3_master(info->upper_dev); +} + +static int +mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, + struct netdev_notifier_changeupper_info *info) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); int err = 0; @@ -9511,8 +9521,12 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { + struct net_device *dev = netdev_notifier_info_to_dev(ptr); int err = 0; + if (mlxsw_sp_is_vrf_event(event, ptr)) + err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); + return notifier_from_errno(err); } -- cgit v1.2.3 From f40e600b369e00b990d788bf3c68631234ff2843 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 May 2022 11:08:17 +0300 Subject: mlxsw: spectrum: Move handling of HW stats events to router code L3 HW stats are implemented in mlxsw as RIF counters, and therefore the code resides in spectrum_router. Exclude the offload xstats events from the mlxsw_sp_netdevice_event_is_router() predicate, and instead recreate the glue code in the router module. Previously, the order of dispatch was that for events on tunnels, a dedicated handler was called, which however did not handle HW stats events. But there is nothing special about tunnel devices as far as HW stats: there is a RIF associated with the tunnel netdevice, and that RIF is where the counter should be installed. Therefore now, HW stats events are tested first, independent of netdevice type. The upshot is that as of this commit, mlxsw supports L3 HW stats work on GRE tunnels. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 -- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 47 ++++++++++++++++++---- 2 files changed, 40 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 867c1f3810e6..367b9b6e040a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -5010,10 +5010,6 @@ static bool mlxsw_sp_netdevice_event_is_router(unsigned long event) case NETDEV_PRE_CHANGEADDR: case NETDEV_CHANGEADDR: case NETDEV_CHANGEMTU: - case NETDEV_OFFLOAD_XSTATS_ENABLE: - case NETDEV_OFFLOAD_XSTATS_DISABLE: - case NETDEV_OFFLOAD_XSTATS_REPORT_USED: - case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA: return true; default: return false; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index fa4d9bf7da75..01f10200aeaa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -9378,6 +9378,19 @@ static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif, return -ENOBUFS; } +static bool mlxsw_sp_is_offload_xstats_event(unsigned long event) +{ + switch (event) { + case NETDEV_OFFLOAD_XSTATS_ENABLE: + case NETDEV_OFFLOAD_XSTATS_DISABLE: + case NETDEV_OFFLOAD_XSTATS_REPORT_USED: + case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA: + return true; + } + + return false; +} + static int mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif, unsigned long event, @@ -9407,6 +9420,24 @@ mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif, return 0; } +static int +mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + unsigned long event, + struct netdev_notifier_offload_xstats_info *info) +{ + struct mlxsw_sp_rif *rif; + int err = 0; + + mutex_lock(&mlxsw_sp->router->lock); + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (rif) + err = mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info); + mutex_unlock(&mlxsw_sp->router->lock); + + return err; +} + int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, unsigned long event, void *ptr) { @@ -9432,12 +9463,6 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, case NETDEV_PRE_CHANGEADDR: err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr); break; - case NETDEV_OFFLOAD_XSTATS_ENABLE: - case NETDEV_OFFLOAD_XSTATS_DISABLE: - case NETDEV_OFFLOAD_XSTATS_REPORT_USED: - case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA: - err = mlxsw_sp_router_port_offload_xstats_cmd(rif, event, ptr); - break; default: WARN_ON_ONCE(1); break; @@ -9522,9 +9547,17 @@ static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct mlxsw_sp_router *router; + struct mlxsw_sp *mlxsw_sp; int err = 0; - if (mlxsw_sp_is_vrf_event(event, ptr)) + router = container_of(nb, struct mlxsw_sp_router, netdevice_nb); + mlxsw_sp = router->mlxsw_sp; + + if (mlxsw_sp_is_offload_xstats_event(event)) + err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev, + event, ptr); + else if (mlxsw_sp_is_vrf_event(event, ptr)) err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); return notifier_from_errno(err); -- cgit v1.2.3 From ba81954cd5266c2cbcd3e1513f0dd10986d23948 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 May 2022 11:08:18 +0300 Subject: mlxsw: spectrum: Move handling of router events to router code The events NETDEV_PRE_CHANGEADDR, NETDEV_CHANGEADDR and NETDEV_CHANGEMTU have implications for in-ASIC router interface objects, and as such are handled in the router module. Move the handling from the central dispatcher in spectrum.c to the new notifier handler in the router module. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 14 -------------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 -- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 18 ++++++++++++++++-- 3 files changed, 16 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 367b9b6e040a..afc03ba93826 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -5004,18 +5004,6 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, return 0; } -static bool mlxsw_sp_netdevice_event_is_router(unsigned long event) -{ - switch (event) { - case NETDEV_PRE_CHANGEADDR: - case NETDEV_CHANGEADDR: - case NETDEV_CHANGEMTU: - return true; - default: - return false; - } -} - static int mlxsw_sp_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -5040,8 +5028,6 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, event, ptr); - else if (mlxsw_sp_netdevice_event_is_router(event)) - err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); else if (mlxsw_sp_port_dev_check(dev)) err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); else if (netif_is_lag_master(dev)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a20e2a1b8569..983195ee881e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -718,8 +718,6 @@ union mlxsw_sp_l3addr { int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, struct netlink_ext_ack *extack); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, - unsigned long event, void *ptr); void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, const struct net_device *macvlan_dev); int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 01f10200aeaa..d5a8307aecb3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -9438,8 +9438,20 @@ mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp, return err; } -int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, - unsigned long event, void *ptr) +static bool mlxsw_sp_is_router_event(unsigned long event) +{ + switch (event) { + case NETDEV_PRE_CHANGEADDR: + case NETDEV_CHANGEADDR: + case NETDEV_CHANGEMTU: + return true; + default: + return false; + } +} + +static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, + unsigned long event, void *ptr) { struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct mlxsw_sp *mlxsw_sp; @@ -9557,6 +9569,8 @@ static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb, if (mlxsw_sp_is_offload_xstats_event(event)) err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev, event, ptr); + else if (mlxsw_sp_is_router_event(event)) + err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); else if (mlxsw_sp_is_vrf_event(event, ptr)) err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); -- cgit v1.2.3 From 75ef4342282a2e2d008b97d70155475a5d342927 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 May 2022 11:08:19 +0300 Subject: mlxsw: spectrum: Move handling of tunnel events to router code The events related to IPIP tunnels are handled by the router code. Move the handling from the central dispatcher in spectrum.c to the new notifier handler in the router module. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 6 ------ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 13 ------------ .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 24 ++++++++++++++-------- 3 files changed, 15 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index afc03ba93826..5ced3df92aab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -5022,12 +5022,6 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, if (netif_is_vxlan(dev)) err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr); - if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) - err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, - event, ptr); - else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) - err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, - event, ptr); else if (mlxsw_sp_port_dev_check(dev)) err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr); else if (netif_is_lag_master(dev)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 983195ee881e..a60d2bbd3aa6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -724,19 +724,6 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, unsigned long event, void *ptr); int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, unsigned long event, void *ptr); -bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); -bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); -int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev, - unsigned long event, - struct netdev_notifier_info *info); -int -mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev, - unsigned long event, - struct netdev_notifier_info *info); int mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, struct net_device *l3_dev, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d5a8307aecb3..54e19e988c01 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1530,8 +1530,8 @@ static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, return false; } -bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev) +static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) { return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL); } @@ -1575,8 +1575,8 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp, return NULL; } -bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev) +static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) { bool is_ipip_ul; @@ -1960,10 +1960,10 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp, } } -int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev, - unsigned long event, - struct netdev_notifier_info *info) +static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev, + unsigned long event, + struct netdev_notifier_info *info) { struct netdev_notifier_changeupper_info *chup; struct netlink_ext_ack *extack; @@ -2038,7 +2038,7 @@ __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, return 0; } -int +static int mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ul_dev, unsigned long event, @@ -9569,6 +9569,12 @@ static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb, if (mlxsw_sp_is_offload_xstats_event(event)) err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev, event, ptr); + else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) + err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, + event, ptr); + else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) + err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, + event, ptr); else if (mlxsw_sp_is_router_event(event)) err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); else if (mlxsw_sp_is_vrf_event(event, ptr)) -- cgit v1.2.3 From 05a8d7d4fadfc0ab65c6e8d81f8a02484d8db116 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 May 2022 11:08:20 +0300 Subject: mlxsw: spectrum: Update a comment The position of netdevice notifier registration no longer depends on the router initialization, because the event handler no longer dispatches to the router code. Update the comment at the registration to that effect. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5ced3df92aab..cafd206e8d7e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3122,9 +3122,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, } } - /* Initialize netdevice notifier after router and SPAN is initialized, - * so that the event handler can use router structures and call SPAN - * respin. + /* Initialize netdevice notifier after SPAN is initialized, so that the + * event handler can call SPAN respin. */ mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp), -- cgit v1.2.3 From c353fb0d4c932e4b8b561d31afc54b5a60df3d95 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 8 May 2022 11:08:21 +0300 Subject: mlxsw: spectrum_router: Take router lock in router notifier handler For notifications that the router needs to handle, router lock is taken. Further, at least to determine whether an event is related to a tunnel underlay, router lock also needs to be taken. Due to this, the router lock is always taken for each unhandled event, and also for some handled events, even if they are not related to underlay. Thus each event implies at least one router lock, sometimes two. Instead of deferring the locking to the leaf handlers, take the lock in the router notifier handler always. This simplifies thinking about the locking state, and in some cases saves one lock cycle. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 47 +++++++--------------- 1 file changed, 15 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 54e19e988c01..9dbb573d53ea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1578,13 +1578,7 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev) { - bool is_ipip_ul; - - mutex_lock(&mlxsw_sp->router->lock); - is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL); - mutex_unlock(&mlxsw_sp->router->lock); - - return is_ipip_ul; + return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL); } static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp, @@ -1969,7 +1963,6 @@ static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, struct netlink_ext_ack *extack; int err = 0; - mutex_lock(&mlxsw_sp->router->lock); switch (event) { case NETDEV_REGISTER: err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev); @@ -2000,7 +1993,6 @@ static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev); break; } - mutex_unlock(&mlxsw_sp->router->lock); return err; } @@ -2045,9 +2037,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, struct netdev_notifier_info *info) { struct mlxsw_sp_ipip_entry *ipip_entry = NULL; - int err = 0; + int err; - mutex_lock(&mlxsw_sp->router->lock); while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, ul_dev, ipip_entry))) { @@ -2060,7 +2051,7 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, if (err) { mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp, ul_dev); - break; + return err; } if (demote_this) { @@ -2077,9 +2068,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, ipip_entry = prev; } } - mutex_unlock(&mlxsw_sp->router->lock); - return err; + return 0; } int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, @@ -9427,15 +9417,12 @@ mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp, struct netdev_notifier_offload_xstats_info *info) { struct mlxsw_sp_rif *rif; - int err = 0; - mutex_lock(&mlxsw_sp->router->lock); rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (rif) - err = mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info); - mutex_unlock(&mlxsw_sp->router->lock); + if (!rif) + return 0; - return err; + return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info); } static bool mlxsw_sp_is_router_event(unsigned long event) @@ -9456,33 +9443,27 @@ static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp_rif *rif; - int err = 0; mlxsw_sp = mlxsw_sp_lower_get(dev); if (!mlxsw_sp) return 0; - mutex_lock(&mlxsw_sp->router->lock); rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); if (!rif) - goto out; + return 0; switch (event) { case NETDEV_CHANGEMTU: case NETDEV_CHANGEADDR: - err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack); - break; + return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack); case NETDEV_PRE_CHANGEADDR: - err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr); - break; + return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr); default: WARN_ON_ONCE(1); break; } -out: - mutex_unlock(&mlxsw_sp->router->lock); - return err; + return 0; } static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, @@ -9535,7 +9516,6 @@ mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, if (!mlxsw_sp || netif_is_macvlan(l3_dev)) return 0; - mutex_lock(&mlxsw_sp->router->lock); switch (event) { case NETDEV_PRECHANGEUPPER: break; @@ -9550,7 +9530,6 @@ mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, } break; } - mutex_unlock(&mlxsw_sp->router->lock); return err; } @@ -9566,6 +9545,8 @@ static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb, router = container_of(nb, struct mlxsw_sp_router, netdevice_nb); mlxsw_sp = router->mlxsw_sp; + mutex_lock(&mlxsw_sp->router->lock); + if (mlxsw_sp_is_offload_xstats_event(event)) err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev, event, ptr); @@ -9580,6 +9561,8 @@ static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb, else if (mlxsw_sp_is_vrf_event(event, ptr)) err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); + mutex_unlock(&mlxsw_sp->router->lock); + return notifier_from_errno(err); } -- cgit v1.2.3 From 39d439047f1dc88f98b755d6f3a53a4ef8f0de21 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:12:59 -0700 Subject: net: wwan: t7xx: Add control DMA interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cross Layer DMA (CLDMA) Hardware interface (HIF) enables the control path of Host-Modem data transfers. CLDMA HIF layer provides a common interface to the Port Layer. CLDMA manages 8 independent RX/TX physical channels with data flow control in HW queues. CLDMA uses ring buffers of General Packet Descriptors (GPD) for TX/RX. GPDs can represent multiple or single data buffers (DB). CLDMA HIF initializes GPD rings, registers ISR handlers for CLDMA interrupts, and initializes CLDMA HW registers. CLDMA TX flow: 1. Port Layer write 2. Get DB address 3. Configure GPD 4. Triggering processing via HW register write CLDMA RX flow: 1. CLDMA HW sends a RX "done" to host 2. Driver starts thread to safely read GPD 3. DB is sent to Port layer 4. Create a new buffer for GPD ring Note: This patch does not enable compilation since it has dependencies such as t7xx_pcie_mac_clear_int()/t7xx_pcie_mac_set_int() and struct t7xx_pci_dev which are added by the core patch. Signed-off-by: Haijun Liu Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Loic Poulain Reviewed-by: Ilpo Järvinen Reviewed-by: Sergey Ryazanov Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/t7xx_cldma.c | 281 ++++++++ drivers/net/wwan/t7xx/t7xx_cldma.h | 180 +++++ drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 1192 ++++++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 126 ++++ drivers/net/wwan/t7xx/t7xx_reg.h | 33 + 5 files changed, 1812 insertions(+) create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.c create mode 100644 drivers/net/wwan/t7xx/t7xx_cldma.h create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.c create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_cldma.h create mode 100644 drivers/net/wwan/t7xx/t7xx_reg.h (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.c b/drivers/net/wwan/t7xx/t7xx_cldma.c new file mode 100644 index 000000000000..9f43f256db1d --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_cldma.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" + +#define ADDR_SIZE 8 + +void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info) +{ + u32 val; + + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); + val |= IP_BUSY_WAKEUP; + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY); +} + +/** + * t7xx_cldma_hw_restore() - Restore CLDMA HW registers. + * @hw_info: Pointer to struct t7xx_cldma_hw. + * + * Restore HW after resume. Writes uplink configuration for CLDMA HW. + */ +void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info) +{ + u32 ul_cfg; + + ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + ul_cfg &= ~UL_CFG_BIT_MODE_MASK; + + if (hw_info->hw_mode == MODE_BIT_64) + ul_cfg |= UL_CFG_BIT_MODE_64; + else if (hw_info->hw_mode == MODE_BIT_40) + ul_cfg |= UL_CFG_BIT_MODE_40; + else if (hw_info->hw_mode == MODE_BIT_36) + ul_cfg |= UL_CFG_BIT_MODE_36; + + iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + /* Disable TX and RX invalid address check */ + iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); + iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); +} + +void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD : + hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info) +{ + /* Enable the TX & RX interrupts */ + iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); + iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); + /* Enable the empty queue interrupt */ + iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0); + iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0); +} + +void t7xx_cldma_hw_reset(void __iomem *ao_base) +{ + u32 val; + + val = ioread32(ao_base + REG_INFRA_RST2_SET); + val |= RST2_PMIC_SW_RST_SET; + iowrite32(val, ao_base + REG_INFRA_RST2_SET); + val = ioread32(ao_base + REG_INFRA_RST4_SET); + val |= RST4_CLDMA1_SW_RST_SET; + iowrite32(val, ao_base + REG_INFRA_RST4_SET); + udelay(1); + + val = ioread32(ao_base + REG_INFRA_RST4_CLR); + val |= RST4_CLDMA1_SW_RST_CLR; + iowrite32(val, ao_base + REG_INFRA_RST4_CLR); + val = ioread32(ao_base + REG_INFRA_RST2_CLR); + val |= RST2_PMIC_SW_RST_CLR; + iowrite32(val, ao_base + REG_INFRA_RST2_CLR); +} + +bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno) +{ + u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE; + + return ioread64(hw_info->ap_pdn_base + offset); +} + +void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address, + enum mtk_txrx tx_rx) +{ + u32 offset = qno * ADDR_SIZE; + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 : + hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0; + iowrite64(address, reg + offset); +} + +void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *base = hw_info->ap_pdn_base; + + if (tx_rx == MTK_RX) + iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD); + else + iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD); +} + +unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 mask, val; + + mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS : + hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS; + val = ioread32(reg); + + return val & mask; +} + +void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) +{ + unsigned int ch_id; + + ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + ch_id &= bitmask; + /* Clear the ch IDs in the TX interrupt status register */ + iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); +} + +void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask) +{ + unsigned int ch_id; + + ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + ch_id &= bitmask; + /* Clear the ch IDs in the RX interrupt status register */ + iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); +} + +unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0; + val = ioread32(reg); + return val & bitmask; +} + +void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val << EQ_STA_BIT_OFFSET, reg); +} + +void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val, reg); +} + +void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + u32 val; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0; + val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno); + iowrite32(val << EQ_STA_BIT_OFFSET, reg); +} + +/** + * t7xx_cldma_hw_init() - Initialize CLDMA HW. + * @hw_info: Pointer to struct t7xx_cldma_hw. + * + * Write uplink and downlink configuration to CLDMA HW. + */ +void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info) +{ + u32 ul_cfg, dl_cfg; + + ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG); + /* Configure the DRAM address mode */ + ul_cfg &= ~UL_CFG_BIT_MODE_MASK; + dl_cfg &= ~DL_CFG_BIT_MODE_MASK; + + if (hw_info->hw_mode == MODE_BIT_64) { + ul_cfg |= UL_CFG_BIT_MODE_64; + dl_cfg |= DL_CFG_BIT_MODE_64; + } else if (hw_info->hw_mode == MODE_BIT_40) { + ul_cfg |= UL_CFG_BIT_MODE_40; + dl_cfg |= DL_CFG_BIT_MODE_40; + } else if (hw_info->hw_mode == MODE_BIT_36) { + ul_cfg |= UL_CFG_BIT_MODE_36; + dl_cfg |= DL_CFG_BIT_MODE_36; + } + + iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG); + dl_cfg |= DL_CFG_UP_HW_LAST; + iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG); + iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK); + iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK); + iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM); + iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM); +} + +void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD : + hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD; + iowrite32(CLDMA_ALL_Q, reg); +} + +void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx) +{ + void __iomem *reg; + + reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 : + hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0; + iowrite32(TXRX_STATUS_BITMASK, reg); + iowrite32(EMPTY_STATUS_BITMASK, reg); +} diff --git a/drivers/net/wwan/t7xx/t7xx_cldma.h b/drivers/net/wwan/t7xx/t7xx_cldma.h new file mode 100644 index 000000000000..8949e8377fb0 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_cldma.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Sreehari Kancharla + */ + +#ifndef __T7XX_CLDMA_H__ +#define __T7XX_CLDMA_H__ + +#include +#include + +#define CLDMA_TXQ_NUM 8 +#define CLDMA_RXQ_NUM 8 +#define CLDMA_ALL_Q GENMASK(7, 0) + +/* Interrupt status bits */ +#define EMPTY_STATUS_BITMASK GENMASK(15, 8) +#define TXRX_STATUS_BITMASK GENMASK(7, 0) +#define EQ_STA_BIT_OFFSET 8 +#define L2_INT_BIT_COUNT 16 +#define EQ_STA_BIT(index) (BIT((index) + EQ_STA_BIT_OFFSET) & EMPTY_STATUS_BITMASK) + +#define TQ_ERR_INT_BITMASK GENMASK(23, 16) +#define TQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) + +#define RQ_ERR_INT_BITMASK GENMASK(23, 16) +#define RQ_ACTIVE_START_ERR_INT_BITMASK GENMASK(31, 24) + +#define CLDMA0_AO_BASE 0x10049000 +#define CLDMA0_PD_BASE 0x1021d000 +#define CLDMA1_AO_BASE 0x1004b000 +#define CLDMA1_PD_BASE 0x1021f000 + +#define CLDMA_R_AO_BASE 0x10023000 +#define CLDMA_R_PD_BASE 0x1023d000 + +/* CLDMA TX */ +#define REG_CLDMA_UL_START_ADDRL_0 0x0004 +#define REG_CLDMA_UL_START_ADDRH_0 0x0008 +#define REG_CLDMA_UL_CURRENT_ADDRL_0 0x0044 +#define REG_CLDMA_UL_CURRENT_ADDRH_0 0x0048 +#define REG_CLDMA_UL_STATUS 0x0084 +#define REG_CLDMA_UL_START_CMD 0x0088 +#define REG_CLDMA_UL_RESUME_CMD 0x008c +#define REG_CLDMA_UL_STOP_CMD 0x0090 +#define REG_CLDMA_UL_ERROR 0x0094 +#define REG_CLDMA_UL_CFG 0x0098 +#define UL_CFG_BIT_MODE_36 BIT(5) +#define UL_CFG_BIT_MODE_40 BIT(6) +#define UL_CFG_BIT_MODE_64 BIT(7) +#define UL_CFG_BIT_MODE_MASK GENMASK(7, 5) + +#define REG_CLDMA_UL_MEM 0x009c +#define UL_MEM_CHECK_DIS BIT(0) + +/* CLDMA RX */ +#define REG_CLDMA_DL_START_CMD 0x05bc +#define REG_CLDMA_DL_RESUME_CMD 0x05c0 +#define REG_CLDMA_DL_STOP_CMD 0x05c4 +#define REG_CLDMA_DL_MEM 0x0508 +#define DL_MEM_CHECK_DIS BIT(0) + +#define REG_CLDMA_DL_CFG 0x0404 +#define DL_CFG_UP_HW_LAST BIT(2) +#define DL_CFG_BIT_MODE_36 BIT(10) +#define DL_CFG_BIT_MODE_40 BIT(11) +#define DL_CFG_BIT_MODE_64 BIT(12) +#define DL_CFG_BIT_MODE_MASK GENMASK(12, 10) + +#define REG_CLDMA_DL_START_ADDRL_0 0x0478 +#define REG_CLDMA_DL_START_ADDRH_0 0x047c +#define REG_CLDMA_DL_CURRENT_ADDRL_0 0x04b8 +#define REG_CLDMA_DL_CURRENT_ADDRH_0 0x04bc +#define REG_CLDMA_DL_STATUS 0x04f8 + +/* CLDMA MISC */ +#define REG_CLDMA_L2TISAR0 0x0810 +#define REG_CLDMA_L2TISAR1 0x0814 +#define REG_CLDMA_L2TIMR0 0x0818 +#define REG_CLDMA_L2TIMR1 0x081c +#define REG_CLDMA_L2TIMCR0 0x0820 +#define REG_CLDMA_L2TIMCR1 0x0824 +#define REG_CLDMA_L2TIMSR0 0x0828 +#define REG_CLDMA_L2TIMSR1 0x082c +#define REG_CLDMA_L3TISAR0 0x0830 +#define REG_CLDMA_L3TISAR1 0x0834 +#define REG_CLDMA_L2RISAR0 0x0850 +#define REG_CLDMA_L2RISAR1 0x0854 +#define REG_CLDMA_L3RISAR0 0x0870 +#define REG_CLDMA_L3RISAR1 0x0874 +#define REG_CLDMA_IP_BUSY 0x08b4 +#define IP_BUSY_WAKEUP BIT(0) +#define CLDMA_L2TISAR0_ALL_INT_MASK GENMASK(15, 0) +#define CLDMA_L2RISAR0_ALL_INT_MASK GENMASK(15, 0) + +/* CLDMA MISC */ +#define REG_CLDMA_L2RIMR0 0x0858 +#define REG_CLDMA_L2RIMR1 0x085c +#define REG_CLDMA_L2RIMCR0 0x0860 +#define REG_CLDMA_L2RIMCR1 0x0864 +#define REG_CLDMA_L2RIMSR0 0x0868 +#define REG_CLDMA_L2RIMSR1 0x086c +#define REG_CLDMA_BUSY_MASK 0x0954 +#define BUSY_MASK_PCIE BIT(0) +#define BUSY_MASK_AP BIT(1) +#define BUSY_MASK_MD BIT(2) + +#define REG_CLDMA_INT_MASK 0x0960 + +/* CLDMA RESET */ +#define REG_INFRA_RST4_SET 0x0730 +#define RST4_CLDMA1_SW_RST_SET BIT(20) + +#define REG_INFRA_RST4_CLR 0x0734 +#define RST4_CLDMA1_SW_RST_CLR BIT(20) + +#define REG_INFRA_RST2_SET 0x0140 +#define RST2_PMIC_SW_RST_SET BIT(18) + +#define REG_INFRA_RST2_CLR 0x0144 +#define RST2_PMIC_SW_RST_CLR BIT(18) + +enum mtk_txrx { + MTK_TX, + MTK_RX, +}; + +enum t7xx_hw_mode { + MODE_BIT_32, + MODE_BIT_36, + MODE_BIT_40, + MODE_BIT_64, +}; + +struct t7xx_cldma_hw { + enum t7xx_hw_mode hw_mode; + void __iomem *ap_ao_base; + void __iomem *ap_pdn_base; + u32 phy_interrupt_id; +}; + +void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx); +unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); +void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask); +void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); +void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, + unsigned int qno, u64 address, enum mtk_txrx tx_rx); +void t7xx_cldma_hw_reset(void __iomem *ao_base); +void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx); +unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask, + enum mtk_txrx tx_rx); +void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info); +void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info); +bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno); +#endif diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c new file mode 100644 index 000000000000..c756b1d0b519 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -0,0 +1,1192 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define MAX_TX_BUDGET 16 +#define MAX_RX_BUDGET 16 + +#define CHECK_Q_STOP_TIMEOUT_US 1000000 +#define CHECK_Q_STOP_STEP_US 10000 + +#define CLDMA_JUMBO_BUFF_SZ 64528 /* 63kB + CCCI header */ + +static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, + enum mtk_txrx tx_rx, unsigned int index) +{ + queue->dir = tx_rx; + queue->index = index; + queue->md_ctrl = md_ctrl; + queue->tr_ring = NULL; + queue->tr_done = NULL; + queue->tx_next = NULL; +} + +static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, + enum mtk_txrx tx_rx, unsigned int index) +{ + md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index); + init_waitqueue_head(&queue->req_wq); + spin_lock_init(&queue->ring_lock); +} + +static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr) +{ + gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr)); + gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr)); +} + +static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr) +{ + gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr)); + gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr)); +} + +static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, + size_t size) +{ + req->skb = __dev_alloc_skb(size, GFP_KERNEL); + if (!req->skb) + return -ENOMEM; + + req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, + skb_data_area_size(req->skb), DMA_FROM_DEVICE); + if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { + dev_kfree_skb_any(req->skb); + req->skb = NULL; + req->mapped_buff = 0; + dev_err(md_ctrl->dev, "DMA mapping failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + unsigned int hwo_polling_count = 0; + struct t7xx_cldma_hw *hw_info; + bool rx_not_done = true; + unsigned long flags; + int count = 0; + + hw_info = &md_ctrl->hw_info; + + do { + struct cldma_request *req; + struct cldma_gpd *gpd; + struct sk_buff *skb; + int ret; + + req = queue->tr_done; + if (!req) + return -ENODATA; + + gpd = req->gpd; + if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { + dma_addr_t gpd_addr; + + if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) { + dev_err(md_ctrl->dev, "PCIe Link disconnected\n"); + return -ENODEV; + } + + gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 + + queue->index * sizeof(u64)); + if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100) + return 0; + + udelay(1); + continue; + } + + hwo_polling_count = 0; + skb = req->skb; + + if (req->mapped_buff) { + dma_unmap_single(md_ctrl->dev, req->mapped_buff, + skb_data_area_size(skb), DMA_FROM_DEVICE); + req->mapped_buff = 0; + } + + skb->len = 0; + skb_reset_tail_pointer(skb); + skb_put(skb, le16_to_cpu(gpd->data_buff_len)); + + ret = md_ctrl->recv_skb(queue, skb); + /* Break processing, will try again later */ + if (ret < 0) + return ret; + + req->skb = NULL; + t7xx_cldma_gpd_set_data_ptr(gpd, 0); + + spin_lock_irqsave(&queue->ring_lock, flags); + queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + req = queue->rx_refill; + + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size); + if (ret) + return ret; + + gpd = req->gpd; + t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); + gpd->data_buff_len = 0; + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + + spin_lock_irqsave(&queue->ring_lock, flags); + queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + rx_not_done = ++count < budget || !need_resched(); + } while (rx_not_done); + + *over_budget = true; + return 0; +} + +static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct t7xx_cldma_hw *hw_info; + unsigned int pending_rx_int; + bool over_budget = false; + unsigned long flags; + int ret; + + hw_info = &md_ctrl->hw_info; + + do { + ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget); + if (ret == -ENODATA) + return 0; + else if (ret) + return ret; + + pending_rx_int = 0; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->rxq_active & BIT(queue->index)) { + if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX)) + t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX); + + pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index), + MTK_RX); + if (pending_rx_int) { + t7xx_cldma_hw_rx_done(hw_info, pending_rx_int); + + if (over_budget) { + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + return -EAGAIN; + } + } + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } while (pending_rx_int); + + return 0; +} + +static void t7xx_cldma_rx_done(struct work_struct *work) +{ + struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + int value; + + value = t7xx_cldma_gpd_rx_collect(queue, queue->budget); + if (value && md_ctrl->rxq_active & BIT(queue->index)) { + queue_work(queue->worker, &queue->cldma_work); + return; + } + + t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); +} + +static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + unsigned int dma_len, count = 0; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + dma_addr_t dma_free; + struct sk_buff *skb; + + while (!kthread_should_stop()) { + spin_lock_irqsave(&queue->ring_lock, flags); + req = queue->tr_done; + if (!req) { + spin_unlock_irqrestore(&queue->ring_lock, flags); + break; + } + gpd = req->gpd; + if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { + spin_unlock_irqrestore(&queue->ring_lock, flags); + break; + } + queue->budget++; + dma_free = req->mapped_buff; + dma_len = le16_to_cpu(gpd->data_buff_len); + skb = req->skb; + req->skb = NULL; + queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + count++; + dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } + + if (count) + wake_up_nr(&queue->req_wq, count); + + return count; +} + +static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct cldma_request *req; + dma_addr_t ul_curr_addr; + unsigned long flags; + bool pending_gpd; + + if (!(md_ctrl->txq_active & BIT(queue->index))) + return; + + spin_lock_irqsave(&queue->ring_lock, flags); + req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (pending_gpd) { + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + + /* Check current processing TGPD, 64-bit address is in a table by Q index */ + ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + + queue->index * sizeof(u64)); + if (req->gpd_addr != ul_curr_addr) { + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", + md_ctrl->hif_id, queue->index); + return; + } + + t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX); + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static void t7xx_cldma_tx_done(struct work_struct *work) +{ + struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct t7xx_cldma_hw *hw_info; + unsigned int l2_tx_int; + unsigned long flags; + + hw_info = &md_ctrl->hw_info; + t7xx_cldma_gpd_tx_collect(queue); + l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index), + MTK_TX); + if (l2_tx_int & EQ_STA_BIT(queue->index)) { + t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index)); + t7xx_cldma_txq_empty_hndl(queue); + } + + if (l2_tx_int & BIT(queue->index)) { + t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index)); + queue_work(queue->worker, &queue->cldma_work); + return; + } + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->txq_active & BIT(queue->index)) { + t7xx_cldma_clear_ip_busy(hw_info); + t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX); + t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, + struct cldma_ring *ring, enum dma_data_direction tx_rx) +{ + struct cldma_request *req_cur, *req_next; + + list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { + if (req_cur->mapped_buff && req_cur->skb) { + dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, + skb_data_area_size(req_cur->skb), tx_rx); + req_cur->mapped_buff = 0; + } + + dev_kfree_skb_any(req_cur->skb); + + if (req_cur->gpd) + dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr); + + list_del(&req_cur->entry); + kfree(req_cur); + } +} + +static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size) +{ + struct cldma_request *req; + int val; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); + if (!req->gpd) + goto err_free_req; + + val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size); + if (val) + goto err_free_pool; + + return req; + +err_free_pool: + dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr); + +err_free_req: + kfree(req); + + return NULL; +} + +static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) +{ + struct cldma_request *req; + struct cldma_gpd *gpd; + int i; + + INIT_LIST_HEAD(&ring->gpd_ring); + ring->length = MAX_RX_BUDGET; + + for (i = 0; i < ring->length; i++) { + req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size); + if (!req) { + t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE); + return -ENOMEM; + } + + gpd = req->gpd; + t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); + gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size); + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + INIT_LIST_HEAD(&req->entry); + list_add_tail(&req->entry, &ring->gpd_ring); + } + + /* Link previous GPD to next GPD, circular */ + list_for_each_entry(req, &ring->gpd_ring, entry) { + t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); + gpd = req->gpd; + } + + return 0; +} + +static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl) +{ + struct cldma_request *req; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); + if (!req->gpd) { + kfree(req); + return NULL; + } + + return req; +} + +static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) +{ + struct cldma_request *req; + struct cldma_gpd *gpd; + int i; + + INIT_LIST_HEAD(&ring->gpd_ring); + ring->length = MAX_TX_BUDGET; + + for (i = 0; i < ring->length; i++) { + req = t7xx_alloc_tx_request(md_ctrl); + if (!req) { + t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE); + return -ENOMEM; + } + + gpd = req->gpd; + gpd->flags = GPD_FLAGS_IOC; + INIT_LIST_HEAD(&req->entry); + list_add_tail(&req->entry, &ring->gpd_ring); + } + + /* Link previous GPD to next GPD, circular */ + list_for_each_entry(req, &ring->gpd_ring, entry) { + t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); + gpd = req->gpd; + } + + return 0; +} + +/** + * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values. + * @queue: Pointer to the queue structure. + * + * Called with ring_lock (unless called during initialization phase) + */ +static void t7xx_cldma_q_reset(struct cldma_queue *queue) +{ + struct cldma_request *req; + + req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry); + queue->tr_done = req; + queue->budget = queue->tr_ring->length; + + if (queue->dir == MTK_TX) + queue->tx_next = req; + else + queue->rx_refill = req; +} + +static void t7xx_cldma_rxq_init(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + + queue->dir = MTK_RX; + queue->tr_ring = &md_ctrl->rx_ring[queue->index]; + t7xx_cldma_q_reset(queue); +} + +static void t7xx_cldma_txq_init(struct cldma_queue *queue) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + + queue->dir = MTK_TX; + queue->tr_ring = &md_ctrl->tx_ring[queue->index]; + t7xx_cldma_q_reset(queue); +} + +static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl) +{ + t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); +} + +static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl) +{ + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); +} + +static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) +{ + unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val; + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int i; + + /* L2 raw interrupt status */ + l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); + l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); + l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0); + l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0); + l2_tx_int &= ~l2_tx_int_msk; + l2_rx_int &= ~l2_rx_int_msk; + + if (l2_tx_int) { + if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) { + /* Read and clear L3 TX interrupt status */ + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); + } + + t7xx_cldma_hw_tx_done(hw_info, l2_tx_int); + if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { + for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { + if (i < CLDMA_TXQ_NUM) { + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); + queue_work(md_ctrl->txq[i].worker, + &md_ctrl->txq[i].cldma_work); + } else { + t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]); + } + } + } + } + + if (l2_rx_int) { + if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) { + /* Read and clear L3 RX interrupt status */ + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); + val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); + iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); + } + + t7xx_cldma_hw_rx_done(hw_info, l2_rx_int); + if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { + l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; + for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { + t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); + queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); + } + } + } +} + +static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned int tx_active; + unsigned int rx_active; + + if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) + return false; + + tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX); + rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX); + + return tx_active || rx_active; +} + +/** + * t7xx_cldma_stop() - Stop CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Stop TX and RX queues. Disable L1 and L2 interrupts. + * Clear status registers. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from polling cldma_queues_active. + */ +int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + bool active; + int i, ret; + + md_ctrl->rxq_active = 0; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); + md_ctrl->txq_active = 0; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); + md_ctrl->txq_started = 0; + t7xx_cldma_disable_irq(md_ctrl); + t7xx_cldma_hw_stop(hw_info, MTK_RX); + t7xx_cldma_hw_stop(hw_info, MTK_TX); + t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK); + t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK); + + if (md_ctrl->is_late_init) { + for (i = 0; i < CLDMA_TXQ_NUM; i++) + flush_work(&md_ctrl->txq[i].cldma_work); + + for (i = 0; i < CLDMA_RXQ_NUM; i++) + flush_work(&md_ctrl->rxq[i].cldma_work); + } + + ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US, + CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl); + if (ret) + dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id); + + return ret; +} + +static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl) +{ + int i; + + if (!md_ctrl->is_late_init) + return; + + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); + + for (i = 0; i < CLDMA_RXQ_NUM; i++) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE); + + dma_pool_destroy(md_ctrl->gpd_dmapool); + md_ctrl->gpd_dmapool = NULL; + md_ctrl->is_late_init = false; +} + +void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_ctrl->txq_active = 0; + md_ctrl->rxq_active = 0; + t7xx_cldma_disable_irq(md_ctrl); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + cancel_work_sync(&md_ctrl->txq[i].cldma_work); + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + cancel_work_sync(&md_ctrl->rxq[i].cldma_work); + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + t7xx_cldma_late_release(md_ctrl); +} + +/** + * t7xx_cldma_start() - Start CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Set TX/RX start address. + * Start all RX queues and enable L2 interrupt. + */ +void t7xx_cldma_start(struct cldma_ctrl *md_ctrl) +{ + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->is_late_init) { + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int i; + + t7xx_cldma_enable_irq(md_ctrl); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + if (md_ctrl->txq[i].tr_done) + t7xx_cldma_hw_set_start_addr(hw_info, i, + md_ctrl->txq[i].tr_done->gpd_addr, + MTK_TX); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + if (md_ctrl->rxq[i].tr_done) + t7xx_cldma_hw_set_start_addr(hw_info, i, + md_ctrl->rxq[i].tr_done->gpd_addr, + MTK_RX); + } + + /* Enable L2 interrupt */ + t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_start(hw_info); + md_ctrl->txq_started = 0; + md_ctrl->txq_active |= TXRX_STATUS_BITMASK; + md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum) +{ + struct cldma_queue *txq = &md_ctrl->txq[qnum]; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + + spin_lock_irqsave(&txq->ring_lock, flags); + t7xx_cldma_q_reset(txq); + list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) { + gpd = req->gpd; + gpd->flags &= ~GPD_FLAGS_HWO; + t7xx_cldma_gpd_set_data_ptr(gpd, 0); + gpd->data_buff_len = 0; + dev_kfree_skb_any(req->skb); + req->skb = NULL; + } + spin_unlock_irqrestore(&txq->ring_lock, flags); +} + +static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) +{ + struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; + struct cldma_request *req; + struct cldma_gpd *gpd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&rxq->ring_lock, flags); + t7xx_cldma_q_reset(rxq); + list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { + gpd = req->gpd; + gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; + gpd->data_buff_len = 0; + + if (req->skb) { + req->skb->len = 0; + skb_reset_tail_pointer(req->skb); + } + } + + list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { + if (req->skb) + continue; + + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size); + if (ret) + break; + + t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff); + } + spin_unlock_irqrestore(&rxq->ring_lock, flags); + + return ret; +} + +void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) +{ + int i; + + if (tx_rx == MTK_TX) { + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_clear_txq(md_ctrl, i); + } else { + for (i = 0; i < CLDMA_RXQ_NUM; i++) + t7xx_cldma_clear_rxq(md_ctrl, i); + } +} + +void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx); + if (tx_rx == MTK_RX) + md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; + else + md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, + struct sk_buff *skb) +{ + struct cldma_ctrl *md_ctrl = queue->md_ctrl; + struct cldma_gpd *gpd = tx_req->gpd; + unsigned long flags; + + /* Update GPD */ + tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); + + if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { + dev_err(md_ctrl->dev, "DMA mapping failed\n"); + return -ENOMEM; + } + + t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff); + gpd->data_buff_len = cpu_to_le16(skb->len); + + /* This lock must cover TGPD setting, as even without a resume operation, + * CLDMA can send next HWO=1 if last TGPD just finished. + */ + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (md_ctrl->txq_active & BIT(queue->index)) + gpd->flags |= GPD_FLAGS_HWO; + + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + tx_req->skb = skb; + return 0; +} + +/* Called with cldma_lock */ +static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, + struct cldma_request *prev_req) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + + /* Check whether the device was powered off (CLDMA start address is not set) */ + if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { + t7xx_cldma_hw_init(hw_info); + t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); + md_ctrl->txq_started &= ~BIT(qno); + } + + if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { + if (md_ctrl->txq_started & BIT(qno)) + t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); + else + t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); + + md_ctrl->txq_started |= BIT(qno); + } +} + +/** + * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. + * @md_ctrl: CLDMA context structure. + * @recv_skb: Receiving skb callback. + */ +void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) +{ + md_ctrl->recv_skb = recv_skb; +} + +/** + * t7xx_cldma_send_skb() - Send control data to modem. + * @md_ctrl: CLDMA context structure. + * @qno: Queue number. + * @skb: Socket buffer. + * + * Return: + * * 0 - Success. + * * -ENOMEM - Allocation failure. + * * -EINVAL - Invalid queue request. + * * -EIO - Queue is not active. + * * -ETIMEDOUT - Timeout waiting for the device to wake up. + */ +int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) +{ + struct cldma_request *tx_req; + struct cldma_queue *queue; + unsigned long flags; + int ret; + + if (qno >= CLDMA_TXQ_NUM) + return -EINVAL; + + queue = &md_ctrl->txq[qno]; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + if (!(md_ctrl->txq_active & BIT(qno))) { + ret = -EIO; + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + goto allow_sleep; + } + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + do { + spin_lock_irqsave(&queue->ring_lock, flags); + tx_req = queue->tx_next; + if (queue->budget > 0 && !tx_req->skb) { + struct list_head *gpd_ring = &queue->tr_ring->gpd_ring; + + queue->budget--; + t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb); + queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); + spin_unlock_irqrestore(&queue->ring_lock, flags); + + /* Protect the access to the modem for queues operations (resume/start) + * which access shared locations by all the queues. + * cldma_lock is independent of ring_lock which is per queue. + */ + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + break; + } + spin_unlock_irqrestore(&queue->ring_lock, flags); + + if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + } + + ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0); + } while (!ret); + +allow_sleep: + return ret; +} + +static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) +{ + char dma_pool_name[32]; + int i, j, ret; + + if (md_ctrl->is_late_init) { + dev_err(md_ctrl->dev, "CLDMA late init was already done\n"); + return -EALREADY; + } + + snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id); + + md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev, + sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0); + if (!md_ctrl->gpd_dmapool) { + dev_err(md_ctrl->dev, "DMA pool alloc fail\n"); + return -ENOMEM; + } + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]); + if (ret) { + dev_err(md_ctrl->dev, "control TX ring init fail\n"); + goto err_free_tx_ring; + } + } + + for (j = 0; j < CLDMA_RXQ_NUM; j++) { + md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU; + + if (j == CLDMA_RXQ_NUM - 1) + md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ; + + ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); + if (ret) { + dev_err(md_ctrl->dev, "Control RX ring init fail\n"); + goto err_free_rx_ring; + } + } + + for (i = 0; i < CLDMA_TXQ_NUM; i++) + t7xx_cldma_txq_init(&md_ctrl->txq[i]); + + for (j = 0; j < CLDMA_RXQ_NUM; j++) + t7xx_cldma_rxq_init(&md_ctrl->rxq[j]); + + md_ctrl->is_late_init = true; + return 0; + +err_free_rx_ring: + while (j--) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE); + +err_free_tx_ring: + while (i--) + t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); + + return ret; +} + +static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr) +{ + return addr + phy_addr - addr_trs1; +} + +static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr; + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + u32 phy_ao_base, phy_pd_base; + + if (md_ctrl->hif_id != CLDMA_ID_MD) + return; + + phy_ao_base = CLDMA1_AO_BASE; + phy_pd_base = CLDMA1_PD_BASE; + hw_info->phy_interrupt_id = CLDMA1_INT; + hw_info->hw_mode = MODE_BIT_64; + hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, + pbase->pcie_dev_reg_trsl_addr, phy_ao_base); + hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, + pbase->pcie_dev_reg_trsl_addr, phy_pd_base); +} + +static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); + return 0; +} + +int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct cldma_ctrl *md_ctrl; + + md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); + if (!md_ctrl) + return -ENOMEM; + + md_ctrl->t7xx_dev = t7xx_dev; + md_ctrl->dev = dev; + md_ctrl->hif_id = hif_id; + md_ctrl->recv_skb = t7xx_cldma_default_recv_skb; + t7xx_hw_info_init(md_ctrl); + t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; + return 0; +} + +void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_stop(hw_info, MTK_TX); + t7xx_cldma_hw_stop(hw_info, MTK_RX); + t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); + t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); + t7xx_cldma_hw_init(hw_info); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data) +{ + struct cldma_ctrl *md_ctrl = data; + u32 interrupt; + + interrupt = md_ctrl->hw_info.phy_interrupt_id; + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt); + t7xx_cldma_irq_work_cb(md_ctrl); + t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt); + t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt); + return IRQ_HANDLED; +} + +static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) +{ + int i; + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + if (md_ctrl->txq[i].worker) { + destroy_workqueue(md_ctrl->txq[i].worker); + md_ctrl->txq[i].worker = NULL; + } + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + if (md_ctrl->rxq[i].worker) { + destroy_workqueue(md_ctrl->rxq[i].worker); + md_ctrl->rxq[i].worker = NULL; + } + } +} + +/** + * t7xx_cldma_init() - Initialize CLDMA. + * @md_ctrl: CLDMA context structure. + * + * Initialize HIF TX/RX queue structure. + * Register CLDMA callback ISR with PCIe driver. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) +{ + struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; + int i; + + md_ctrl->txq_active = 0; + md_ctrl->rxq_active = 0; + md_ctrl->is_late_init = false; + + spin_lock_init(&md_ctrl->cldma_lock); + + for (i = 0; i < CLDMA_TXQ_NUM; i++) { + md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); + md_ctrl->txq[i].worker = + alloc_workqueue("md_hif%d_tx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), + 1, md_ctrl->hif_id, i); + if (!md_ctrl->txq[i].worker) + goto err_workqueue; + + INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done); + } + + for (i = 0; i < CLDMA_RXQ_NUM; i++) { + md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); + INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); + + md_ctrl->rxq[i].worker = alloc_workqueue("md_hif%d_rx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM, + 1, md_ctrl->hif_id, i); + if (!md_ctrl->rxq[i].worker) + goto err_workqueue; + } + + t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); + md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler; + md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL; + md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl; + t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); + return 0; + +err_workqueue: + t7xx_cldma_destroy_wqs(md_ctrl); + return -ENOMEM; +} + +void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl) +{ + t7xx_cldma_late_release(md_ctrl); + t7xx_cldma_late_init(md_ctrl); +} + +void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) +{ + t7xx_cldma_stop(md_ctrl); + t7xx_cldma_late_release(md_ctrl); + t7xx_cldma_destroy_wqs(md_ctrl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h new file mode 100644 index 000000000000..deb239e4f803 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Eliot Lee + */ + +#ifndef __T7XX_HIF_CLDMA_H__ +#define __T7XX_HIF_CLDMA_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_pci.h" + +/** + * enum cldma_id - Identifiers for CLDMA HW units. + * @CLDMA_ID_MD: Modem control channel. + * @CLDMA_ID_AP: Application Processor control channel (not used at the moment). + * @CLDMA_NUM: Number of CLDMA HW units available. + */ +enum cldma_id { + CLDMA_ID_MD, + CLDMA_ID_AP, + CLDMA_NUM +}; + +struct cldma_gpd { + u8 flags; + u8 not_used1; + __le16 rx_data_allow_len; + __le32 next_gpd_ptr_h; + __le32 next_gpd_ptr_l; + __le32 data_buff_bd_ptr_h; + __le32 data_buff_bd_ptr_l; + __le16 data_buff_len; + __le16 not_used2; +}; + +struct cldma_request { + struct cldma_gpd *gpd; /* Virtual address for CPU */ + dma_addr_t gpd_addr; /* Physical address for DMA */ + struct sk_buff *skb; + dma_addr_t mapped_buff; + struct list_head entry; +}; + +struct cldma_ring { + struct list_head gpd_ring; /* Ring of struct cldma_request */ + unsigned int length; /* Number of struct cldma_request */ + int pkt_size; +}; + +struct cldma_queue { + struct cldma_ctrl *md_ctrl; + enum mtk_txrx dir; + unsigned int index; + struct cldma_ring *tr_ring; + struct cldma_request *tr_done; + struct cldma_request *rx_refill; + struct cldma_request *tx_next; + int budget; /* Same as ring buffer size by default */ + spinlock_t ring_lock; + wait_queue_head_t req_wq; /* Only for TX */ + struct workqueue_struct *worker; + struct work_struct cldma_work; +}; + +struct cldma_ctrl { + enum cldma_id hif_id; + struct device *dev; + struct t7xx_pci_dev *t7xx_dev; + struct cldma_queue txq[CLDMA_TXQ_NUM]; + struct cldma_queue rxq[CLDMA_RXQ_NUM]; + unsigned short txq_active; + unsigned short rxq_active; + unsigned short txq_started; + spinlock_t cldma_lock; /* Protects CLDMA structure */ + /* Assumes T/R GPD/BD/SPD have the same size */ + struct dma_pool *gpd_dmapool; + struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; + struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; + struct t7xx_cldma_hw hw_info; + bool is_late_init; + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); +}; + +#define GPD_FLAGS_HWO BIT(0) +#define GPD_FLAGS_IOC BIT(7) +#define GPD_DMAPOOL_ALIGN 16 + +#define CLDMA_MTU 3584 /* 3.5kB */ + +int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev); +void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl); +int t7xx_cldma_init(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_start(struct cldma_ctrl *md_ctrl); +int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl); +void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl, + int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)); +int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb); +void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); +void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx); + +#endif /* __T7XX_HIF_CLDMA_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h new file mode 100644 index 000000000000..7dc6c77a59e3 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_reg.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Chiranjeevi Rapolu + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * Sreehari Kancharla + */ + +#ifndef __T7XX_REG_H__ +#define __T7XX_REG_H__ + +enum t7xx_int { + DPMAIF_INT, + CLDMA0_INT, + CLDMA1_INT, + CLDMA2_INT, + MHCCIF_INT, + DPMAIF2_INT, + SAP_RGU_INT, + CLDMA3_INT, +}; + +#endif /* __T7XX_REG_H__ */ -- cgit v1.2.3 From 13e920d93e37fcaef4a9309515798a3cae9dcf19 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:00 -0700 Subject: net: wwan: t7xx: Add core components MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Registers the t7xx device driver with the kernel. Setup all the core components: PCIe layer, Modem Host Cross Core Interface (MHCCIF), modem control operations, modem state machine, and build infrastructure. * PCIe layer code implements driver probe and removal. * MHCCIF provides interrupt channels to communicate events such as handshake, PM and port enumeration. * Modem control implements the entry point for modem init, reset and exit. * The modem status monitor is a state machine used by modem control to complete initialization and stop. It is used also to propagate exception events reported by other components. Signed-off-by: Haijun Liu Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Loic Poulain Reviewed-by: Ilpo Järvinen Reviewed-by: Sergey Ryazanov Signed-off-by: David S. Miller --- drivers/net/wwan/Kconfig | 14 + drivers/net/wwan/Makefile | 1 + drivers/net/wwan/t7xx/Makefile | 12 + drivers/net/wwan/t7xx/t7xx_mhccif.c | 102 ++++++ drivers/net/wwan/t7xx/t7xx_mhccif.h | 37 ++ drivers/net/wwan/t7xx/t7xx_modem_ops.c | 498 ++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_modem_ops.h | 85 +++++ drivers/net/wwan/t7xx/t7xx_pci.c | 225 ++++++++++++ drivers/net/wwan/t7xx/t7xx_pci.h | 64 ++++ drivers/net/wwan/t7xx/t7xx_pcie_mac.c | 262 ++++++++++++++ drivers/net/wwan/t7xx/t7xx_pcie_mac.h | 31 ++ drivers/net/wwan/t7xx/t7xx_reg.h | 104 ++++++ drivers/net/wwan/t7xx/t7xx_state_monitor.c | 540 +++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_state_monitor.h | 133 +++++++ 14 files changed, 2108 insertions(+) create mode 100644 drivers/net/wwan/t7xx/Makefile create mode 100644 drivers/net/wwan/t7xx/t7xx_mhccif.c create mode 100644 drivers/net/wwan/t7xx/t7xx_mhccif.h create mode 100644 drivers/net/wwan/t7xx/t7xx_modem_ops.c create mode 100644 drivers/net/wwan/t7xx/t7xx_modem_ops.h create mode 100644 drivers/net/wwan/t7xx/t7xx_pci.c create mode 100644 drivers/net/wwan/t7xx/t7xx_pci.h create mode 100644 drivers/net/wwan/t7xx/t7xx_pcie_mac.c create mode 100644 drivers/net/wwan/t7xx/t7xx_pcie_mac.h create mode 100644 drivers/net/wwan/t7xx/t7xx_state_monitor.c create mode 100644 drivers/net/wwan/t7xx/t7xx_state_monitor.h (limited to 'drivers') diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index 609fd4a2c865..3486ffe94ac4 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -105,6 +105,20 @@ config IOSM If unsure, say N. +config MTK_T7XX + tristate "MediaTek PCIe 5G WWAN modem T7xx device" + depends on PCI + help + Enables MediaTek PCIe based 5G WWAN modem (T7xx series) device. + Adapts WWAN framework and provides network interface like wwan0 + and tty interfaces like wwan0at0 (AT protocol), wwan0mbim0 + (MBIM protocol), etc. + + To compile this driver as a module, choose M here: the module will be + called mtk_t7xx. + + If unsure, say N. + endif # WWAN endmenu diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile index e722650bebea..3960c0ae2445 100644 --- a/drivers/net/wwan/Makefile +++ b/drivers/net/wwan/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_MHI_WWAN_MBIM) += mhi_wwan_mbim.o obj-$(CONFIG_QCOM_BAM_DMUX) += qcom_bam_dmux.o obj-$(CONFIG_RPMSG_WWAN_CTRL) += rpmsg_wwan_ctrl.o obj-$(CONFIG_IOSM) += iosm/ +obj-$(CONFIG_MTK_T7XX) += t7xx/ diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile new file mode 100644 index 000000000000..6a49013bc343 --- /dev/null +++ b/drivers/net/wwan/t7xx/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +ccflags-y += -Werror + +obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o +mtk_t7xx-y:= t7xx_pci.o \ + t7xx_pcie_mac.o \ + t7xx_mhccif.o \ + t7xx_state_monitor.o \ + t7xx_modem_ops.o \ + t7xx_cldma.o \ + t7xx_hif_cldma.o \ diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.c b/drivers/net/wwan/t7xx/t7xx_mhccif.c new file mode 100644 index 000000000000..5826e2d0b5a8 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Ricardo Martinez + */ + +#include +#include +#include +#include +#include + +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" + +static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask) +{ + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; + + /* Clear level 2 interrupt */ + iowrite32(mask, mhccif_pbase + REG_EP2RC_SW_INT_ACK); + /* Ensure write is complete */ + t7xx_mhccif_read_sw_int_sts(t7xx_dev); + /* Clear level 1 interrupt */ + t7xx_pcie_mac_clear_int_status(t7xx_dev, MHCCIF_INT); +} + +static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + u32 int_status, val; + + val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); + iowrite32(val, IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + + int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); + if (int_status & D2H_SW_INT_MASK) { + int ret = t7xx_pci_mhccif_isr(t7xx_dev); + + if (ret) + dev_err(&t7xx_dev->pdev->dev, "PCI MHCCIF ISR failure: %d", ret); + } + + t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); + return IRQ_HANDLED; +} + +u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev) +{ + return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_STS); +} + +void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val) +{ + iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_SET); +} + +void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val) +{ + iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_CLR); +} + +u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev) +{ + return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK); +} + +static irqreturn_t t7xx_mhccif_isr_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_dev->base_addr.mhccif_rc_base = t7xx_dev->base_addr.pcie_ext_reg_base + + MHCCIF_RC_DEV_BASE - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; + + t7xx_dev->intr_handler[MHCCIF_INT] = t7xx_mhccif_isr_handler; + t7xx_dev->intr_thread[MHCCIF_INT] = t7xx_mhccif_isr_thread; + t7xx_dev->callback_param[MHCCIF_INT] = t7xx_dev; +} + +void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel) +{ + void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; + + iowrite32(BIT(channel), mhccif_pbase + REG_RC2EP_SW_BSY); + iowrite32(channel, mhccif_pbase + REG_RC2EP_SW_TCHNUM); +} diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.h b/drivers/net/wwan/t7xx/t7xx_mhccif.h new file mode 100644 index 000000000000..209b386bc088 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Ricardo Martinez + */ + +#ifndef __T7XX_MHCCIF_H__ +#define __T7XX_MHCCIF_H__ + +#include + +#include "t7xx_pci.h" +#include "t7xx_reg.h" + +#define D2H_SW_INT_MASK (D2H_INT_EXCEPTION_INIT | \ + D2H_INT_EXCEPTION_INIT_DONE | \ + D2H_INT_EXCEPTION_CLEARQ_DONE | \ + D2H_INT_EXCEPTION_ALLQ_RESET | \ + D2H_INT_PORT_ENUM | \ + D2H_INT_ASYNC_MD_HK) + +void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val); +void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val); +u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev); +void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev); +u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev); +void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel); + +#endif /*__T7XX_MHCCIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c new file mode 100644 index 000000000000..30292e3f1c37 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_cldma.h" +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define RGU_RESET_DELAY_MS 10 +#define PORT_RESET_DELAY_MS 2000 +#define EX_HS_TIMEOUT_MS 5000 +#define EX_HS_POLL_DELAY_MS 10 + +static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev) +{ + return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK; +} + +/** + * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts. + * @t7xx_dev: MTK device. + * + * Check the interrupt status and queue commands accordingly. + * + * Returns: + ** 0 - Success. + ** -EINVAL - Failure to get FSM control. + */ +int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + struct t7xx_fsm_ctl *ctl; + unsigned int int_sta; + int ret = 0; + u32 mask; + + ctl = md->fsm_ctl; + if (!ctl) { + dev_err_ratelimited(&t7xx_dev->pdev->dev, + "MHCCIF interrupt received before initializing MD monitor\n"); + return -EINVAL; + } + + spin_lock_bh(&md->exp_lock); + int_sta = t7xx_get_interrupt_status(t7xx_dev); + md->exp_id |= int_sta; + if (md->exp_id & D2H_INT_EXCEPTION_INIT) { + if (ctl->md_state == MD_STATE_INVALID || + ctl->md_state == MD_STATE_WAITING_FOR_HS1 || + ctl->md_state == MD_STATE_WAITING_FOR_HS2 || + ctl->md_state == MD_STATE_READY) { + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; + ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX); + } + } else if (md->exp_id & D2H_INT_PORT_ENUM) { + md->exp_id &= ~D2H_INT_PORT_ENUM; + + if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START || + ctl->curr_state == FSM_STATE_STOPPED) + ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM); + } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) { + mask = t7xx_mhccif_mask_get(t7xx_dev); + if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) { + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + queue_work(md->handshake_wq, &md->handshake_work); + } + } + spin_unlock_bh(&md->exp_lock); + + return ret; +} + +static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr; + void __iomem *reset_pcie_reg; + u32 val; + + reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA - + pbase_addr->pcie_dev_reg_trsl_addr; + val = ioread32(reset_pcie_reg); + iowrite32(val, reset_pcie_reg); +} + +void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev) +{ + /* Clear L2 */ + t7xx_clr_device_irq_via_pcie(t7xx_dev); + /* Clear L1 */ + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); +} + +static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name) +{ +#ifdef CONFIG_ACPI + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct device *dev = &t7xx_dev->pdev->dev; + acpi_status acpi_ret; + acpi_handle handle; + + handle = ACPI_HANDLE(dev); + if (!handle) { + dev_err(dev, "ACPI handle not found\n"); + return -EFAULT; + } + + if (!acpi_has_method(handle, fn_name)) { + dev_err(dev, "%s method not found\n", fn_name); + return -EFAULT; + } + + acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer); + if (ACPI_FAILURE(acpi_ret)) { + dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret)); + return -EFAULT; + } + +#endif + return 0; +} + +int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev) +{ + return t7xx_acpi_reset(t7xx_dev, "_RST"); +} + +static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev) +{ + u32 val; + + val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + if (val & MISC_RESET_TYPE_PLDR) + t7xx_acpi_reset(t7xx_dev, "MRST._RST"); + else if (val & MISC_RESET_TYPE_FLDR) + t7xx_acpi_fldr_func(t7xx_dev); +} + +static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + + msleep(RGU_RESET_DELAY_MS); + t7xx_reset_device_via_pmic(t7xx_dev); + return IRQ_HANDLED; +} + +static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data) +{ + struct t7xx_pci_dev *t7xx_dev = data; + struct t7xx_modem *modem; + + t7xx_clear_rgu_irq(t7xx_dev); + if (!t7xx_dev->rgu_pci_irq_en) + return IRQ_HANDLED; + + modem = t7xx_dev->md; + modem->rgu_irq_asserted = true; + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + return IRQ_WAKE_THREAD; +} + +static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev) +{ + /* Registers RGU callback ISR with PCIe driver */ + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); + + t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler; + t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread; + t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); +} + +/** + * t7xx_cldma_exception() - CLDMA exception handler. + * @md_ctrl: modem control struct. + * @stage: exception stage. + * + * Part of the modem exception recovery. + * Stages are one after the other as describe below: + * HIF_EX_INIT: Disable and clear TXQ. + * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. + * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. + */ + +/* Modem Exception Handshake Flow + * + * Modem HW Exception interrupt received + * (MD_IRQ_CCIF_EX) + * | + * +---------v--------+ + * | HIF_EX_INIT | : Disable and clear TXQ + * +------------------+ + * | + * +---------v--------+ + * | HIF_EX_INIT_DONE | : Wait for the init to be done + * +------------------+ + * | + * +---------v--------+ + * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ + * +------------------+ : Flush TX/RX workqueues + * | + * +---------v--------+ + * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA + * +------------------+ + */ +static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage) +{ + switch (stage) { + case HIF_EX_INIT: + t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX); + t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX); + break; + + case HIF_EX_CLEARQ_DONE: + /* We do not want to get CLDMA IRQ when MD is + * resetting CLDMA after it got clearq_ack. + */ + t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX); + t7xx_cldma_stop(md_ctrl); + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base); + + t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX); + break; + + case HIF_EX_ALLQ_RESET: + t7xx_cldma_hw_init(&md_ctrl->hw_info); + t7xx_cldma_start(md_ctrl); + break; + + default: + break; + } +} + +static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) +{ + struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev; + + if (stage == HIF_EX_CLEARQ_DONE) { + /* Give DHL time to flush data */ + msleep(PORT_RESET_DELAY_MS); + } + + t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); + + if (stage == HIF_EX_INIT) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK); + else if (stage == HIF_EX_CLEARQ_DONE) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK); +} + +static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id) +{ + unsigned int waited_time_ms = 0; + + do { + if (md->exp_id & event_id) + return 0; + + waited_time_ms += EX_HS_POLL_DELAY_MS; + msleep(EX_HS_POLL_DELAY_MS); + } while (waited_time_ms < EX_HS_TIMEOUT_MS); + + return -EFAULT; +} + +static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev) +{ + /* Register the MHCCIF ISR for MD exception, port enum and + * async handshake notifications. + */ + t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); + t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM); + + /* Register RGU IRQ handler for sAP exception notification */ + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_register_rgu_isr(t7xx_dev); +} + +static void t7xx_md_hk_wq(struct work_struct *work) +{ + struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); + t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); + md->core_md.ready = true; + wake_up(&ctl->async_hk_wq); +} + +void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + void __iomem *mhccif_base; + unsigned int int_sta; + unsigned long flags; + + switch (evt_id) { + case FSM_PRE_START: + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM); + break; + + case FSM_START: + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM); + + spin_lock_irqsave(&md->exp_lock, flags); + int_sta = t7xx_get_interrupt_status(md->t7xx_dev); + md->exp_id |= int_sta; + if (md->exp_id & D2H_INT_EXCEPTION_INIT) { + ctl->exp_flg = true; + md->exp_id &= ~D2H_INT_EXCEPTION_INIT; + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + } else if (ctl->exp_flg) { + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + } else if (md->exp_id & D2H_INT_ASYNC_MD_HK) { + queue_work(md->handshake_wq, &md->handshake_work); + md->exp_id &= ~D2H_INT_ASYNC_MD_HK; + mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base; + iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK); + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + } else { + t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + } + spin_unlock_irqrestore(&md->exp_lock, flags); + + t7xx_mhccif_mask_clr(md->t7xx_dev, + D2H_INT_EXCEPTION_INIT | + D2H_INT_EXCEPTION_INIT_DONE | + D2H_INT_EXCEPTION_CLEARQ_DONE | + D2H_INT_EXCEPTION_ALLQ_RESET); + break; + + case FSM_READY: + t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK); + break; + + default: + break; + } +} + +void t7xx_md_exception_handshake(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + int ret; + + t7xx_md_exception(md, HIF_EX_INIT); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE); + + t7xx_md_exception(md, HIF_EX_INIT_DONE); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE); + + t7xx_md_exception(md, HIF_EX_CLEARQ_DONE); + ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET); + if (ret) + dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET); + + t7xx_md_exception(md, HIF_EX_ALLQ_RESET); +} + +static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct t7xx_modem *md; + + md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL); + if (!md) + return NULL; + + md->t7xx_dev = t7xx_dev; + t7xx_dev->md = md; + spin_lock_init(&md->exp_lock); + md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, + 0, "md_hk_wq"); + if (!md->handshake_wq) + return NULL; + + INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); + return md; +} + +int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + + md->md_init_finish = false; + md->exp_id = 0; + t7xx_fsm_reset(md); + t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); + md->md_init_finish = true; + return 0; +} + +/** + * t7xx_md_init() - Initialize modem. + * @t7xx_dev: MTK device. + * + * Allocate and initialize MD control block, and initialize data path. + * Register MHCCIF ISR and RGU ISR, and start the state machine. + * + * Return: + ** 0 - Success. + ** -ENOMEM - Allocation failure. + */ +int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md; + int ret; + + md = t7xx_md_alloc(t7xx_dev); + if (!md) + return -ENOMEM; + + ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev); + if (ret) + goto err_destroy_hswq; + + ret = t7xx_fsm_init(md); + if (ret) + goto err_destroy_hswq; + + ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); + if (ret) + goto err_uninit_fsm; + + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); + if (ret) /* fsm_uninit flushes cmd queue */ + goto err_uninit_md_cldma; + + t7xx_md_sys_sw_init(t7xx_dev); + md->md_init_finish = true; + return 0; + +err_uninit_md_cldma: + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + +err_uninit_fsm: + t7xx_fsm_uninit(md); + +err_destroy_hswq: + destroy_workqueue(md->handshake_wq); + dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n"); + return ret; +} + +void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_modem *md = t7xx_dev->md; + + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + + if (!md->md_init_finish) + return; + + t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + t7xx_fsm_uninit(md); + destroy_workqueue(md->handshake_wq); +} diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h new file mode 100644 index 000000000000..1fc286fa1a14 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#ifndef __T7XX_MODEM_OPS_H__ +#define __T7XX_MODEM_OPS_H__ + +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_pci.h" + +#define FEATURE_COUNT 64 + +/** + * enum hif_ex_stage - HIF exception handshake stages with the HW. + * @HIF_EX_INIT: Disable and clear TXQ. + * @HIF_EX_INIT_DONE: Polling for initialization to be done. + * @HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX. + * @HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart. + */ +enum hif_ex_stage { + HIF_EX_INIT, + HIF_EX_INIT_DONE, + HIF_EX_CLEARQ_DONE, + HIF_EX_ALLQ_RESET, +}; + +struct mtk_runtime_feature { + u8 feature_id; + u8 support_info; + u8 reserved[2]; + __le32 data_len; + __le32 data[]; +}; + +enum md_event_id { + FSM_PRE_START, + FSM_START, + FSM_READY, +}; + +struct t7xx_sys_info { + bool ready; +}; + +struct t7xx_modem { + struct cldma_ctrl *md_ctrl[CLDMA_NUM]; + struct t7xx_pci_dev *t7xx_dev; + struct t7xx_sys_info core_md; + bool md_init_finish; + bool rgu_irq_asserted; + struct workqueue_struct *handshake_wq; + struct work_struct handshake_work; + struct t7xx_fsm_ctl *fsm_ctl; + struct port_proxy *port_prox; + unsigned int exp_id; + spinlock_t exp_lock; /* Protects exception events */ +}; + +void t7xx_md_exception_handshake(struct t7xx_modem *md); +void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id); +int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev); +int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev); +void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev); +int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev); +int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev); + +#endif /* __T7XX_MODEM_OPS_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c new file mode 100644 index 000000000000..8beb32a4185d --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Moises Veleta + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" + +#define T7XX_PCI_IREG_BASE 0 +#define T7XX_PCI_EREG_BASE 2 + +static int t7xx_request_irq(struct pci_dev *pdev) +{ + struct t7xx_pci_dev *t7xx_dev; + int ret, i; + + t7xx_dev = pci_get_drvdata(pdev); + + for (i = 0; i < EXT_INT_NUM; i++) { + const char *irq_descr; + int irq_vec; + + if (!t7xx_dev->intr_handler[i]) + continue; + + irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", + dev_driver_string(&pdev->dev), i); + if (!irq_descr) { + ret = -ENOMEM; + break; + } + + irq_vec = pci_irq_vector(pdev, i); + ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], + t7xx_dev->intr_thread[i], 0, irq_descr, + t7xx_dev->callback_param[i]); + if (ret) { + dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); + break; + } + } + + if (ret) { + while (i--) { + if (!t7xx_dev->intr_handler[i]) + continue; + + free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); + } + } + + return ret; +} + +static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev) +{ + struct pci_dev *pdev = t7xx_dev->pdev; + int ret; + + /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ + ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); + return ret; + } + + ret = t7xx_request_irq(pdev); + if (ret) { + pci_free_irq_vectors(pdev); + return ret; + } + + t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); + return 0; +} + +static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev) +{ + int ret, i; + + if (!t7xx_dev->pdev->msix_cap) + return -EINVAL; + + ret = t7xx_setup_msix(t7xx_dev); + if (ret) + return ret; + + /* IPs enable interrupts when ready */ + for (i = 0; i < EXT_INT_NUM; i++) + t7xx_pcie_mac_set_int(t7xx_dev, i); + + return 0; +} + +static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + + INFRACFG_AO_DEV_CHIP - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; +} + +static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct t7xx_pci_dev *t7xx_dev; + int ret; + + t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); + if (!t7xx_dev) + return -ENOMEM; + + pci_set_drvdata(pdev, t7xx_dev); + t7xx_dev->pdev = pdev; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE), + pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "Could not request BARs: %d\n", ret); + return -ENOMEM; + } + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); + return ret; + } + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); + return ret; + } + + IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; + t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; + + t7xx_pcie_mac_atr_init(t7xx_dev); + t7xx_pci_infracfg_ao_calc(t7xx_dev); + t7xx_mhccif_init(t7xx_dev); + + ret = t7xx_md_init(t7xx_dev); + if (ret) + return ret; + + t7xx_pcie_mac_interrupts_dis(t7xx_dev); + + ret = t7xx_interrupt_init(t7xx_dev); + if (ret) { + t7xx_md_exit(t7xx_dev); + return ret; + } + + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); + t7xx_pcie_mac_interrupts_en(t7xx_dev); + + return 0; +} + +static void t7xx_pci_remove(struct pci_dev *pdev) +{ + struct t7xx_pci_dev *t7xx_dev; + int i; + + t7xx_dev = pci_get_drvdata(pdev); + t7xx_md_exit(t7xx_dev); + + for (i = 0; i < EXT_INT_NUM; i++) { + if (!t7xx_dev->intr_handler[i]) + continue; + + free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); + } + + pci_free_irq_vectors(t7xx_dev->pdev); +} + +static const struct pci_device_id t7xx_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) }, + { } +}; +MODULE_DEVICE_TABLE(pci, t7xx_pci_table); + +static struct pci_driver t7xx_pci_driver = { + .name = "mtk_t7xx", + .id_table = t7xx_pci_table, + .probe = t7xx_pci_probe, + .remove = t7xx_pci_remove, +}; + +module_pci_driver(t7xx_pci_driver); + +MODULE_AUTHOR("MediaTek Inc"); +MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h new file mode 100644 index 000000000000..dfd4a2f2095b --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Moises Veleta + */ + +#ifndef __T7XX_PCI_H__ +#define __T7XX_PCI_H__ + +#include +#include +#include + +#include "t7xx_reg.h" + +/* struct t7xx_addr_base - holds base addresses + * @pcie_mac_ireg_base: PCIe MAC register base + * @pcie_ext_reg_base: used to calculate base addresses for CLDMA, DPMA and MHCCIF registers + * @pcie_dev_reg_trsl_addr: used to calculate the register base address + * @infracfg_ao_base: base address used in CLDMA reset operations + * @mhccif_rc_base: host view of MHCCIF rc base addr + */ +struct t7xx_addr_base { + void __iomem *pcie_mac_ireg_base; + void __iomem *pcie_ext_reg_base; + u32 pcie_dev_reg_trsl_addr; + void __iomem *infracfg_ao_base; + void __iomem *mhccif_rc_base; +}; + +typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); + +/* struct t7xx_pci_dev - MTK device context structure + * @intr_handler: array of handler function for request_threaded_irq + * @intr_thread: array of thread_fn for request_threaded_irq + * @callback_param: array of cookie passed back to interrupt functions + * @pdev: PCI device + * @base_addr: memory base addresses of HW components + * @md: modem interface + * @ccmni_ctlb: context structure used to control the network data path + * @rgu_pci_irq_en: RGU callback ISR registered and active + */ +struct t7xx_pci_dev { + t7xx_intr_callback intr_handler[EXT_INT_NUM]; + t7xx_intr_callback intr_thread[EXT_INT_NUM]; + void *callback_param[EXT_INT_NUM]; + struct pci_dev *pdev; + struct t7xx_addr_base base_addr; + struct t7xx_modem *md; + struct t7xx_ccmni_ctrl *ccmni_ctlb; + bool rgu_pci_irq_en; +}; + +#endif /* __T7XX_PCI_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.c b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c new file mode 100644 index 000000000000..76da4c15e3de --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Sreehari Kancharla + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Ricardo Martinez + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" + +#define T7XX_PCIE_REG_BAR 2 +#define T7XX_PCIE_REG_PORT ATR_SRC_PCI_WIN0 +#define T7XX_PCIE_REG_TABLE_NUM 0 +#define T7XX_PCIE_REG_TRSL_PORT ATR_DST_AXIM_0 + +#define T7XX_PCIE_DEV_DMA_PORT_START ATR_SRC_AXIS_0 +#define T7XX_PCIE_DEV_DMA_PORT_END ATR_SRC_AXIS_2 +#define T7XX_PCIE_DEV_DMA_TABLE_NUM 0 +#define T7XX_PCIE_DEV_DMA_TRSL_ADDR 0 +#define T7XX_PCIE_DEV_DMA_SRC_ADDR 0 +#define T7XX_PCIE_DEV_DMA_TRANSPARENT 1 +#define T7XX_PCIE_DEV_DMA_SIZE 0 + +enum t7xx_atr_src_port { + ATR_SRC_PCI_WIN0, + ATR_SRC_PCI_WIN1, + ATR_SRC_AXIS_0, + ATR_SRC_AXIS_1, + ATR_SRC_AXIS_2, + ATR_SRC_AXIS_3, +}; + +enum t7xx_atr_dst_port { + ATR_DST_PCI_TRX, + ATR_DST_PCI_CONFIG, + ATR_DST_AXIM_0 = 4, + ATR_DST_AXIM_1, + ATR_DST_AXIM_2, + ATR_DST_AXIM_3, +}; + +struct t7xx_atr_config { + u64 src_addr; + u64 trsl_addr; + u64 size; + u32 port; + u32 table; + enum t7xx_atr_dst_port trsl_id; + u32 transparent; +}; + +static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_port port) +{ + void __iomem *reg; + int i, offset; + + for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) { + offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i; + reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; + iowrite64(0, reg); + } +} + +static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_config *cfg) +{ + struct device *dev = &t7xx_dev->pdev->dev; + void __iomem *pbase = IREG_BASE(t7xx_dev); + int atr_size, pos, offset; + void __iomem *reg; + u64 value; + + if (cfg->transparent) { + /* No address conversion is performed */ + atr_size = ATR_TRANSPARENT_SIZE; + } else { + if (cfg->src_addr & (cfg->size - 1)) { + dev_err(dev, "Source address is not aligned to size\n"); + return -EINVAL; + } + + if (cfg->trsl_addr & (cfg->size - 1)) { + dev_err(dev, "Translation address %llx is not aligned to size %llx\n", + cfg->trsl_addr, cfg->size - 1); + return -EINVAL; + } + + pos = __ffs64(cfg->size); + + /* HW calculates the address translation space as 2^(atr_size + 1) */ + atr_size = pos - 1; + } + + offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table; + + reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset; + value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT; + iowrite64(value, reg); + + reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset; + iowrite32(cfg->trsl_id, reg); + + reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset; + value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0); + iowrite64(value, reg); + + /* Ensure ATR is set */ + ioread64(reg); + return 0; +} + +/** + * t7xx_pcie_mac_atr_init() - Initialize address translation. + * @t7xx_dev: MTK device. + * + * Setup ATR for ports & device. + */ +void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_atr_config cfg; + u32 i; + + /* Disable for all ports */ + for (i = ATR_SRC_PCI_WIN0; i <= ATR_SRC_AXIS_3; i++) + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), i); + + memset(&cfg, 0, sizeof(cfg)); + /* Config ATR for RC to access device's register */ + cfg.src_addr = pci_resource_start(t7xx_dev->pdev, T7XX_PCIE_REG_BAR); + cfg.size = T7XX_PCIE_REG_SIZE_CHIP; + cfg.trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; + cfg.port = T7XX_PCIE_REG_PORT; + cfg.table = T7XX_PCIE_REG_TABLE_NUM; + cfg.trsl_id = T7XX_PCIE_REG_TRSL_PORT; + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); + t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); + + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP; + + /* Config ATR for EP to access RC's memory */ + for (i = T7XX_PCIE_DEV_DMA_PORT_START; i <= T7XX_PCIE_DEV_DMA_PORT_END; i++) { + cfg.src_addr = T7XX_PCIE_DEV_DMA_SRC_ADDR; + cfg.size = T7XX_PCIE_DEV_DMA_SIZE; + cfg.trsl_addr = T7XX_PCIE_DEV_DMA_TRSL_ADDR; + cfg.port = i; + cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM; + cfg.trsl_id = ATR_DST_PCI_TRX; + cfg.transparent = T7XX_PCIE_DEV_DMA_TRANSPARENT; + t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port); + t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg); + } +} + +/** + * t7xx_pcie_mac_enable_disable_int() - Enable/disable interrupts. + * @t7xx_dev: MTK device. + * @enable: Enable/disable. + * + * Enable or disable device interrupts. + */ +static void t7xx_pcie_mac_enable_disable_int(struct t7xx_pci_dev *t7xx_dev, bool enable) +{ + u32 value; + + value = ioread32(IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); + + if (enable) + value &= ~ISTAT_HST_CTRL_DIS; + else + value |= ISTAT_HST_CTRL_DIS; + + iowrite32(value, IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL); +} + +void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_mac_enable_disable_int(t7xx_dev, true); +} + +void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_mac_enable_disable_int(t7xx_dev, false); +} + +/** + * t7xx_pcie_mac_clear_set_int() - Clear/set interrupt by type. + * @t7xx_dev: MTK device. + * @int_type: Interrupt type. + * @clear: Clear/set. + * + * Clear or set device interrupt by type. + */ +static void t7xx_pcie_mac_clear_set_int(struct t7xx_pci_dev *t7xx_dev, + enum t7xx_int int_type, bool clear) +{ + void __iomem *reg; + u32 val; + + if (clear) + reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0; + else + reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_SET_GRP0_0; + + val = BIT(EXT_INT_START + int_type); + iowrite32(val, reg); +} + +void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, true); +} + +void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, false); +} + +/** + * t7xx_pcie_mac_clear_int_status() - Clear interrupt status by type. + * @t7xx_dev: MTK device. + * @int_type: Interrupt type. + * + * Enable or disable device interrupts' status by type. + */ +void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type) +{ + void __iomem *reg = IREG_BASE(t7xx_dev) + MSIX_ISTAT_HST_GRP0_0; + u32 val = BIT(EXT_INT_START + int_type); + + iowrite32(val, reg); +} + +/** + * t7xx_pcie_set_mac_msix_cfg() - Write MSIX control configuration. + * @t7xx_dev: MTK device. + * @irq_count: Number of MSIX IRQ vectors. + * + * Write IRQ count to device. + */ +void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count) +{ + u32 val = ffs(irq_count) * 2 - 1; + + iowrite32(val, IREG_BASE(t7xx_dev) + T7XX_PCIE_CFG_MSIX); +} diff --git a/drivers/net/wwan/t7xx/t7xx_pcie_mac.h b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h new file mode 100644 index 000000000000..50336fa7e8c2 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_pcie_mac.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Sreehari Kancharla + * + * Contributors: + * Moises Veleta + * Ricardo Martinez + */ + +#ifndef __T7XX_PCIE_MAC_H__ +#define __T7XX_PCIE_MAC_H__ + +#include "t7xx_pci.h" +#include "t7xx_reg.h" + +#define IREG_BASE(t7xx_dev) ((t7xx_dev)->base_addr.pcie_mac_ireg_base) + +void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type); +void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count); + +#endif /* __T7XX_PCIE_MAC_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h index 7dc6c77a59e3..ad2d30d03cf4 100644 --- a/drivers/net/wwan/t7xx/t7xx_reg.h +++ b/drivers/net/wwan/t7xx/t7xx_reg.h @@ -19,6 +19,110 @@ #ifndef __T7XX_REG_H__ #define __T7XX_REG_H__ +#include + +/* Device base address offset */ +#define MHCCIF_RC_DEV_BASE 0x10024000 + +#define REG_RC2EP_SW_BSY 0x04 +#define REG_RC2EP_SW_INT_START 0x08 + +#define REG_RC2EP_SW_TCHNUM 0x0c +#define H2D_CH_EXCEPTION_ACK 1 +#define H2D_CH_EXCEPTION_CLEARQ_ACK 2 +#define H2D_CH_DS_LOCK 3 +/* Channels 4-8 are reserved */ +#define H2D_CH_SUSPEND_REQ 9 +#define H2D_CH_RESUME_REQ 10 +#define H2D_CH_SUSPEND_REQ_AP 11 +#define H2D_CH_RESUME_REQ_AP 12 +#define H2D_CH_DEVICE_RESET 13 +#define H2D_CH_DRM_DISABLE_AP 14 + +#define REG_EP2RC_SW_INT_STS 0x10 +#define REG_EP2RC_SW_INT_ACK 0x14 +#define REG_EP2RC_SW_INT_EAP_MASK 0x20 +#define REG_EP2RC_SW_INT_EAP_MASK_SET 0x30 +#define REG_EP2RC_SW_INT_EAP_MASK_CLR 0x40 + +#define D2H_INT_DS_LOCK_ACK BIT(0) +#define D2H_INT_EXCEPTION_INIT BIT(1) +#define D2H_INT_EXCEPTION_INIT_DONE BIT(2) +#define D2H_INT_EXCEPTION_CLEARQ_DONE BIT(3) +#define D2H_INT_EXCEPTION_ALLQ_RESET BIT(4) +#define D2H_INT_PORT_ENUM BIT(5) +/* Bits 6-10 are reserved */ +#define D2H_INT_SUSPEND_ACK BIT(11) +#define D2H_INT_RESUME_ACK BIT(12) +#define D2H_INT_SUSPEND_ACK_AP BIT(13) +#define D2H_INT_RESUME_ACK_AP BIT(14) +#define D2H_INT_ASYNC_SAP_HK BIT(15) +#define D2H_INT_ASYNC_MD_HK BIT(16) + +/* Register base */ +#define INFRACFG_AO_DEV_CHIP 0x10001000 + +/* ATR setting */ +#define T7XX_PCIE_REG_TRSL_ADDR_CHIP 0x10000000 +#define T7XX_PCIE_REG_SIZE_CHIP 0x00400000 + +/* Reset Generic Unit (RGU) */ +#define TOPRGU_CH_PCIE_IRQ_STA 0x1000790c + +#define ATR_PORT_OFFSET 0x100 +#define ATR_TABLE_OFFSET 0x20 +#define ATR_TABLE_NUM_PER_ATR 8 +#define ATR_TRANSPARENT_SIZE 0x3f + +/* PCIE_MAC_IREG Register Definition */ + +#define ISTAT_HST_CTRL 0x01ac +#define ISTAT_HST_CTRL_DIS BIT(0) + +#define T7XX_PCIE_MISC_CTRL 0x0348 +#define T7XX_PCIE_MISC_MAC_SLEEP_DIS BIT(7) + +#define T7XX_PCIE_CFG_MSIX 0x03ec +#define ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR 0x0600 +#define ATR_PCIE_WIN0_T0_TRSL_ADDR 0x0608 +#define ATR_PCIE_WIN0_T0_TRSL_PARAM 0x0610 +#define ATR_PCIE_WIN0_ADDR_ALGMT GENMASK_ULL(63, 12) + +#define ATR_SRC_ADDR_INVALID 0x007f + +#define T7XX_PCIE_PM_RESUME_STATE 0x0d0c + +enum t7xx_pm_resume_state { + PM_RESUME_REG_STATE_L3, + PM_RESUME_REG_STATE_L1, + PM_RESUME_REG_STATE_INIT, + PM_RESUME_REG_STATE_EXP, + PM_RESUME_REG_STATE_L2, + PM_RESUME_REG_STATE_L2_EXP, +}; + +#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c +#define MISC_STAGE_MASK GENMASK(2, 0) +#define MISC_RESET_TYPE_PLDR BIT(26) +#define MISC_RESET_TYPE_FLDR BIT(27) +#define LINUX_STAGE 4 + +#define T7XX_PCIE_RESOURCE_STATUS 0x0d28 +#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0) + +#define DISABLE_ASPM_LOWPWR 0x0e50 +#define ENABLE_ASPM_LOWPWR 0x0e54 +#define T7XX_L1_BIT(i) BIT((i) * 4 + 1) +#define T7XX_L1_1_BIT(i) BIT((i) * 4 + 2) +#define T7XX_L1_2_BIT(i) BIT((i) * 4 + 3) + +#define MSIX_ISTAT_HST_GRP0_0 0x0f00 +#define IMASK_HOST_MSIX_SET_GRP0_0 0x3000 +#define IMASK_HOST_MSIX_CLR_GRP0_0 0x3080 +#define EXT_INT_START 24 +#define EXT_INT_NUM 8 +#define MSIX_MSK_SET_ALL GENMASK(31, 24) + enum t7xx_int { DPMAIF_INT, CLDMA0_INT, diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c new file mode 100644 index 000000000000..4b2e6913c45e --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -0,0 +1,540 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_mhccif.h" +#include "t7xx_modem_ops.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_reg.h" +#include "t7xx_state_monitor.h" + +#define FSM_DRM_DISABLE_DELAY_MS 200 +#define FSM_EVENT_POLL_INTERVAL_MS 20 +#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000 +#define FSM_MD_EX_PASS_TIMEOUT_MS 45000 +#define FSM_CMD_TIMEOUT_MS 2000 + +void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_add_tail(¬ifier->entry, &ctl->notifier_list); + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier) +{ + struct t7xx_fsm_notifier *notifier_cur, *notifier_next; + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) { + if (notifier_cur == notifier) + list_del(¬ifier->entry); + } + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + struct t7xx_fsm_notifier *notifier; + unsigned long flags; + + spin_lock_irqsave(&ctl->notifier_lock, flags); + list_for_each_entry(notifier, &ctl->notifier_list, entry) { + spin_unlock_irqrestore(&ctl->notifier_lock, flags); + if (notifier->notifier_fn) + notifier->notifier_fn(state, notifier->data); + + spin_lock_irqsave(&ctl->notifier_lock, flags); + } + spin_unlock_irqrestore(&ctl->notifier_lock, flags); +} + +void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) +{ + ctl->md_state = state; + fsm_state_notify(ctl->md, state); +} + +static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) +{ + if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + *cmd->ret = result; + complete_all(cmd->done); + } + + kfree(cmd); +} + +static void fsm_del_kf_event(struct t7xx_fsm_event *event) +{ + list_del(&event->entry); + kfree(event); +} + +static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + struct t7xx_fsm_event *event, *evt_next; + struct t7xx_fsm_command *cmd, *cmd_next; + unsigned long flags; + + spin_lock_irqsave(&ctl->command_lock, flags); + list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) { + dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id); + list_del(&cmd->entry); + fsm_finish_command(ctl, cmd, -EINVAL); + } + spin_unlock_irqrestore(&ctl->command_lock, flags); + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { + dev_warn(dev, "Unhandled event %d\n", event->event_id); + fsm_del_kf_event(event); + } + spin_unlock_irqrestore(&ctl->event_lock, flags); +} + +static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected, + enum t7xx_fsm_event_state event_ignore, int retries) +{ + struct t7xx_fsm_event *event; + bool event_received = false; + unsigned long flags; + int cnt = 0; + + while (cnt++ < retries && !event_received) { + bool sleep_required = true; + + if (kthread_should_stop()) + return; + + spin_lock_irqsave(&ctl->event_lock, flags); + event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry); + if (event) { + event_received = event->event_id == event_expected; + if (event_received || event->event_id == event_ignore) { + fsm_del_kf_event(event); + sleep_required = false; + } + } + spin_unlock_irqrestore(&ctl->event_lock, flags); + + if (sleep_required) + msleep(FSM_EVENT_POLL_INTERVAL_MS); + } +} + +static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, + enum t7xx_ex_reason reason) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + + if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) { + if (cmd) + fsm_finish_command(ctl, cmd, -EINVAL); + + return; + } + + ctl->curr_state = FSM_STATE_EXCEPTION; + + switch (reason) { + case EXCEPTION_HS_TIMEOUT: + dev_err(dev, "Boot Handshake failure\n"); + break; + + case EXCEPTION_EVENT: + dev_err(dev, "Exception event\n"); + t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); + t7xx_md_exception_handshake(ctl->md); + + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, + FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); + fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID, + FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS); + break; + + default: + dev_err(dev, "Exception %d\n", reason); + break; + } + + if (cmd) + fsm_finish_command(ctl, cmd, 0); +} + +static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl) +{ + ctl->curr_state = FSM_STATE_STOPPED; + + t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED); + return t7xx_md_reset(ctl->md->t7xx_dev); +} + +static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + if (ctl->curr_state == FSM_STATE_STOPPED) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); +} + +static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + struct t7xx_pci_dev *t7xx_dev; + struct cldma_ctrl *md_ctrl; + int err; + + if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD]; + t7xx_dev = ctl->md->t7xx_dev; + + ctl->curr_state = FSM_STATE_STOPPING; + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP); + t7xx_cldma_stop(md_ctrl); + + if (!ctl->md->rgu_irq_asserted) { + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP); + /* Wait for the DRM disable to take effect */ + msleep(FSM_DRM_DISABLE_DELAY_MS); + + err = t7xx_acpi_fldr_func(t7xx_dev); + if (err) + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET); + } + + fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl)); +} + +static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl->md_state != MD_STATE_WAITING_FOR_HS2) + return; + + ctl->md_state = MD_STATE_READY; + + fsm_state_notify(ctl->md, MD_STATE_READY); +} + +static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) +{ + struct t7xx_modem *md = ctl->md; + + ctl->curr_state = FSM_STATE_READY; + t7xx_fsm_broadcast_ready_state(ctl); + t7xx_md_event_notify(md, FSM_READY); +} + +static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) +{ + struct t7xx_modem *md = ctl->md; + struct device *dev; + + ctl->curr_state = FSM_STATE_STARTING; + + t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1); + t7xx_md_event_notify(md, FSM_START); + + wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg, + HZ * 60); + dev = &md->t7xx_dev->pdev->dev; + + if (ctl->exp_flg) + dev_err(dev, "MD exception is captured during handshake\n"); + + if (!md->core_md.ready) { + dev_err(dev, "MD handshake timeout\n"); + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); + return -ETIMEDOUT; + } + + fsm_routine_ready(ctl); + return 0; +} + +static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd) +{ + struct t7xx_modem *md = ctl->md; + u32 dev_status; + int ret; + + if (!md) + return; + + if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START && + ctl->curr_state != FSM_STATE_STOPPED) { + fsm_finish_command(ctl, cmd, -EINVAL); + return; + } + + ctl->curr_state = FSM_STATE_PRE_START; + t7xx_md_event_notify(md, FSM_PRE_START); + + ret = read_poll_timeout(ioread32, dev_status, + (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000, + false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS); + if (ret) { + struct device *dev = &md->t7xx_dev->pdev->dev; + + fsm_finish_command(ctl, cmd, -ETIMEDOUT); + dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK); + return; + } + + t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]); + fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl)); +} + +static int fsm_main_thread(void *data) +{ + struct t7xx_fsm_ctl *ctl = data; + struct t7xx_fsm_command *cmd; + unsigned long flags; + + while (!kthread_should_stop()) { + if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&ctl->command_lock, flags); + cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry); + list_del(&cmd->entry); + spin_unlock_irqrestore(&ctl->command_lock, flags); + + switch (cmd->cmd_id) { + case FSM_CMD_START: + fsm_routine_start(ctl, cmd); + break; + + case FSM_CMD_EXCEPTION: + fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag)); + break; + + case FSM_CMD_PRE_STOP: + fsm_routine_stopping(ctl, cmd); + break; + + case FSM_CMD_STOP: + fsm_routine_stopped(ctl, cmd); + break; + + default: + fsm_finish_command(ctl, cmd, -EINVAL); + fsm_flush_event_cmd_qs(ctl); + break; + } + } + + return 0; +} + +int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) +{ + DECLARE_COMPLETION_ONSTACK(done); + struct t7xx_fsm_command *cmd; + unsigned long flags; + int ret; + + cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + INIT_LIST_HEAD(&cmd->entry); + cmd->cmd_id = cmd_id; + cmd->flag = flag; + if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + cmd->done = &done; + cmd->ret = &ret; + } + + spin_lock_irqsave(&ctl->command_lock, flags); + list_add_tail(&cmd->entry, &ctl->command_queue); + spin_unlock_irqrestore(&ctl->command_lock, flags); + + wake_up(&ctl->command_wq); + + if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { + unsigned long wait_ret; + + wait_ret = wait_for_completion_timeout(&done, + msecs_to_jiffies(FSM_CMD_TIMEOUT_MS)); + if (!wait_ret) + return -ETIMEDOUT; + + return ret; + } + + return 0; +} + +int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, + unsigned char *data, unsigned int length) +{ + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + struct t7xx_fsm_event *event; + unsigned long flags; + + if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) { + dev_err(dev, "Invalid event %d\n", event_id); + return -EINVAL; + } + + event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); + if (!event) + return -ENOMEM; + + INIT_LIST_HEAD(&event->entry); + event->event_id = event_id; + event->length = length; + + if (data && length) + memcpy(event->data, data, length); + + spin_lock_irqsave(&ctl->event_lock, flags); + list_add_tail(&event->entry, &ctl->event_queue); + spin_unlock_irqrestore(&ctl->event_lock, flags); + + wake_up_all(&ctl->event_wq); + return 0; +} + +void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id) +{ + struct t7xx_fsm_event *event, *evt_next; + unsigned long flags; + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) { + if (event->event_id == event_id) + fsm_del_kf_event(event); + } + spin_unlock_irqrestore(&ctl->event_lock, flags); +} + +enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl) + return ctl->md_state; + + return MD_STATE_INVALID; +} + +unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl) +{ + if (ctl) + return ctl->curr_state; + + return FSM_STATE_STOPPED; +} + +int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type) +{ + unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT; + + if (type == MD_IRQ_PORT_ENUM) { + return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags); + } else if (type == MD_IRQ_CCIF_EX) { + ctl->exp_flg = true; + wake_up(&ctl->async_hk_wq); + cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT); + return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags); + } + + return -EINVAL; +} + +void t7xx_fsm_reset(struct t7xx_modem *md) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + fsm_flush_event_cmd_qs(ctl); + ctl->curr_state = FSM_STATE_STOPPED; + ctl->exp_flg = false; +} + +int t7xx_fsm_init(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + struct t7xx_fsm_ctl *ctl; + + ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL); + if (!ctl) + return -ENOMEM; + + md->fsm_ctl = ctl; + ctl->md = md; + ctl->curr_state = FSM_STATE_INIT; + INIT_LIST_HEAD(&ctl->command_queue); + INIT_LIST_HEAD(&ctl->event_queue); + init_waitqueue_head(&ctl->async_hk_wq); + init_waitqueue_head(&ctl->event_wq); + INIT_LIST_HEAD(&ctl->notifier_list); + init_waitqueue_head(&ctl->command_wq); + spin_lock_init(&ctl->event_lock); + spin_lock_init(&ctl->command_lock); + ctl->exp_flg = false; + spin_lock_init(&ctl->notifier_lock); + + ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm"); + return PTR_ERR_OR_ZERO(ctl->fsm_thread); +} + +void t7xx_fsm_uninit(struct t7xx_modem *md) +{ + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + if (!ctl) + return; + + if (ctl->fsm_thread) + kthread_stop(ctl->fsm_thread); + + fsm_flush_event_cmd_qs(ctl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h new file mode 100644 index 000000000000..a6041a51809e --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * + * Contributors: + * Eliot Lee + * Ricardo Martinez + * Sreehari Kancharla + */ + +#ifndef __T7XX_MONITOR_H__ +#define __T7XX_MONITOR_H__ + +#include +#include +#include +#include +#include + +#include "t7xx_modem_ops.h" + +enum t7xx_fsm_state { + FSM_STATE_INIT, + FSM_STATE_PRE_START, + FSM_STATE_STARTING, + FSM_STATE_READY, + FSM_STATE_EXCEPTION, + FSM_STATE_STOPPING, + FSM_STATE_STOPPED, +}; + +enum t7xx_fsm_event_state { + FSM_EVENT_INVALID, + FSM_EVENT_MD_EX, + FSM_EVENT_MD_EX_REC_OK, + FSM_EVENT_MD_EX_PASS, + FSM_EVENT_MAX +}; + +enum t7xx_fsm_cmd_state { + FSM_CMD_INVALID, + FSM_CMD_START, + FSM_CMD_EXCEPTION, + FSM_CMD_PRE_STOP, + FSM_CMD_STOP, +}; + +enum t7xx_ex_reason { + EXCEPTION_HS_TIMEOUT, + EXCEPTION_EVENT, +}; + +enum t7xx_md_irq_type { + MD_IRQ_WDT, + MD_IRQ_CCIF_EX, + MD_IRQ_PORT_ENUM, +}; + +enum md_state { + MD_STATE_INVALID, + MD_STATE_WAITING_FOR_HS1, + MD_STATE_WAITING_FOR_HS2, + MD_STATE_READY, + MD_STATE_EXCEPTION, + MD_STATE_WAITING_TO_STOP, + MD_STATE_STOPPED, +}; + +#define FSM_CMD_FLAG_WAIT_FOR_COMPLETION BIT(0) +#define FSM_CMD_FLAG_FLIGHT_MODE BIT(1) +#define FSM_CMD_FLAG_IN_INTERRUPT BIT(2) +#define FSM_CMD_EX_REASON GENMASK(23, 16) + +struct t7xx_fsm_ctl { + struct t7xx_modem *md; + enum md_state md_state; + unsigned int curr_state; + struct list_head command_queue; + struct list_head event_queue; + wait_queue_head_t command_wq; + wait_queue_head_t event_wq; + wait_queue_head_t async_hk_wq; + spinlock_t event_lock; /* Protects event queue */ + spinlock_t command_lock; /* Protects command queue */ + struct task_struct *fsm_thread; + bool exp_flg; + spinlock_t notifier_lock; /* Protects notifier list */ + struct list_head notifier_list; +}; + +struct t7xx_fsm_event { + struct list_head entry; + enum t7xx_fsm_event_state event_id; + unsigned int length; + unsigned char data[]; +}; + +struct t7xx_fsm_command { + struct list_head entry; + enum t7xx_fsm_cmd_state cmd_id; + unsigned int flag; + struct completion *done; + int *ret; +}; + +struct t7xx_fsm_notifier { + struct list_head entry; + int (*notifier_fn)(enum md_state state, void *data); + void *data; +}; + +int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, + unsigned int flag); +int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id, + unsigned char *data, unsigned int length); +void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id); +void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state); +void t7xx_fsm_reset(struct t7xx_modem *md); +int t7xx_fsm_init(struct t7xx_modem *md); +void t7xx_fsm_uninit(struct t7xx_modem *md); +int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type); +enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl); +unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl); +void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); +void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier); + +#endif /* __T7XX_MONITOR_H__ */ -- cgit v1.2.3 From 48cc2f5ef846e76dc3bb1501a4014be18c644c1b Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:01 -0700 Subject: net: wwan: t7xx: Add port proxy infrastructure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Port-proxy provides a common interface to interact with different types of ports. Ports export their configuration via `struct t7xx_port` and operate as defined by `struct port_ops`. Signed-off-by: Haijun Liu Co-developed-by: Chandrashekar Devegowda Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Loic Poulain Reviewed-by: Ilpo Järvinen Reviewed-by: Sergey Ryazanov Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/Makefile | 1 + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 3 +- drivers/net/wwan/t7xx/t7xx_modem_ops.c | 14 +- drivers/net/wwan/t7xx/t7xx_port.h | 132 +++++++++ drivers/net/wwan/t7xx/t7xx_port_proxy.c | 452 +++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_port_proxy.h | 72 +++++ drivers/net/wwan/t7xx/t7xx_state_monitor.c | 5 + 7 files changed, 677 insertions(+), 2 deletions(-) create mode 100644 drivers/net/wwan/t7xx/t7xx_port.h create mode 100644 drivers/net/wwan/t7xx/t7xx_port_proxy.c create mode 100644 drivers/net/wwan/t7xx/t7xx_port_proxy.h (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile index 6a49013bc343..99f9ca3b4b51 100644 --- a/drivers/net/wwan/t7xx/Makefile +++ b/drivers/net/wwan/t7xx/Makefile @@ -10,3 +10,4 @@ mtk_t7xx-y:= t7xx_pci.o \ t7xx_modem_ops.o \ t7xx_cldma.o \ t7xx_hif_cldma.o \ + t7xx_port_proxy.o \ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index c756b1d0b519..7a8c09f7435a 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -46,6 +46,7 @@ #include "t7xx_mhccif.h" #include "t7xx_pci.h" #include "t7xx_pcie_mac.h" +#include "t7xx_port_proxy.h" #include "t7xx_reg.h" #include "t7xx_state_monitor.h" @@ -55,7 +56,7 @@ #define CHECK_Q_STOP_TIMEOUT_US 1000000 #define CHECK_Q_STOP_STEP_US 10000 -#define CLDMA_JUMBO_BUFF_SZ 64528 /* 63kB + CCCI header */ +#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header)) static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx, unsigned int index) diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c index 30292e3f1c37..d1d3384608be 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -34,6 +34,8 @@ #include "t7xx_modem_ops.h" #include "t7xx_pci.h" #include "t7xx_pcie_mac.h" +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" #include "t7xx_reg.h" #include "t7xx_state_monitor.h" @@ -273,6 +275,7 @@ static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage) if (stage == HIF_EX_CLEARQ_DONE) { /* Give DHL time to flush data */ msleep(PORT_RESET_DELAY_MS); + t7xx_port_proxy_reset(md->port_prox); } t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage); @@ -426,6 +429,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev) md->exp_id = 0; t7xx_fsm_reset(md); t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); + t7xx_port_proxy_reset(md->port_prox); md->md_init_finish = true; return 0; } @@ -462,14 +466,21 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) if (ret) goto err_uninit_fsm; + ret = t7xx_port_proxy_init(md); + if (ret) + goto err_uninit_md_cldma; + ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0); if (ret) /* fsm_uninit flushes cmd queue */ - goto err_uninit_md_cldma; + goto err_uninit_proxy; t7xx_md_sys_sw_init(t7xx_dev); md->md_init_finish = true; return 0; +err_uninit_proxy: + t7xx_port_proxy_uninit(md->port_prox); + err_uninit_md_cldma: t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); @@ -492,6 +503,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) return; t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + t7xx_port_proxy_uninit(md->port_prox); t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); t7xx_fsm_uninit(md); destroy_workqueue(md->handshake_wq); diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h new file mode 100644 index 000000000000..0f3ffba5f9c2 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chandrashekar Devegowda + * Eliot Lee + */ + +#ifndef __T7XX_PORT_H__ +#define __T7XX_PORT_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_pci.h" + +#define PORT_CH_ID_MASK GENMASK(7, 0) + +/* Channel ID and Message ID definitions. + * The channel number consists of peer_id(15:12) , channel_id(11:0) + * peer_id: + * 0:reserved, 1: to sAP, 2: to MD + */ +enum port_ch { + /* to MD */ + PORT_CH_CONTROL_RX = 0x2000, + PORT_CH_CONTROL_TX = 0x2001, + PORT_CH_UART1_RX = 0x2006, /* META */ + PORT_CH_UART1_TX = 0x2008, + PORT_CH_UART2_RX = 0x200a, /* AT */ + PORT_CH_UART2_TX = 0x200c, + PORT_CH_MD_LOG_RX = 0x202a, /* MD logging */ + PORT_CH_MD_LOG_TX = 0x202b, + PORT_CH_LB_IT_RX = 0x203e, /* Loop back test */ + PORT_CH_LB_IT_TX = 0x203f, + PORT_CH_STATUS_RX = 0x2043, /* Status events */ + PORT_CH_MIPC_RX = 0x20ce, /* MIPC */ + PORT_CH_MIPC_TX = 0x20cf, + PORT_CH_MBIM_RX = 0x20d0, + PORT_CH_MBIM_TX = 0x20d1, + PORT_CH_DSS0_RX = 0x20d2, + PORT_CH_DSS0_TX = 0x20d3, + PORT_CH_DSS1_RX = 0x20d4, + PORT_CH_DSS1_TX = 0x20d5, + PORT_CH_DSS2_RX = 0x20d6, + PORT_CH_DSS2_TX = 0x20d7, + PORT_CH_DSS3_RX = 0x20d8, + PORT_CH_DSS3_TX = 0x20d9, + PORT_CH_DSS4_RX = 0x20da, + PORT_CH_DSS4_TX = 0x20db, + PORT_CH_DSS5_RX = 0x20dc, + PORT_CH_DSS5_TX = 0x20dd, + PORT_CH_DSS6_RX = 0x20de, + PORT_CH_DSS6_TX = 0x20df, + PORT_CH_DSS7_RX = 0x20e0, + PORT_CH_DSS7_TX = 0x20e1, +}; + +struct t7xx_port; +struct port_ops { + int (*init)(struct t7xx_port *port); + int (*recv_skb)(struct t7xx_port *port, struct sk_buff *skb); + void (*md_state_notify)(struct t7xx_port *port, unsigned int md_state); + void (*uninit)(struct t7xx_port *port); + int (*enable_chl)(struct t7xx_port *port); + int (*disable_chl)(struct t7xx_port *port); +}; + +struct t7xx_port_conf { + enum port_ch tx_ch; + enum port_ch rx_ch; + unsigned char txq_index; + unsigned char rxq_index; + unsigned char txq_exp_index; + unsigned char rxq_exp_index; + enum cldma_id path_id; + struct port_ops *ops; + char *name; + enum wwan_port_type port_type; +}; + +struct t7xx_port { + /* Members not initialized in definition */ + const struct t7xx_port_conf *port_conf; + struct wwan_port *wwan_port; + struct t7xx_pci_dev *t7xx_dev; + struct device *dev; + u16 seq_nums[2]; /* TX/RX sequence numbers */ + atomic_t usage_cnt; + struct list_head entry; + struct list_head queue_entry; + /* TX and RX flows are asymmetric since ports are multiplexed on + * queues. + * + * TX: data blocks are sent directly to a queue. Each port + * does not maintain a TX list; instead, they only provide + * a wait_queue_head for blocking writes. + * + * RX: Each port uses a RX list to hold packets, + * allowing the modem to dispatch RX packet as quickly as possible. + */ + struct sk_buff_head rx_skb_list; + spinlock_t port_update_lock; /* Protects port configuration */ + wait_queue_head_t rx_wq; + int rx_length_th; + bool chan_enable; + struct task_struct *thread; +}; + +struct sk_buff *t7xx_port_alloc_skb(int payload); +int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb); +int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, + unsigned int ex_msg); + +#endif /* __T7XX_PORT_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c new file mode 100644 index 000000000000..be4171a1d384 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chandrashekar Devegowda + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_modem_ops.h" +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define Q_IDX_CTRL 0 +#define Q_IDX_MBIM 2 +#define Q_IDX_AT_CMD 5 + +#define INVALID_SEQ_NUM GENMASK(15, 0) + +#define for_each_proxy_port(i, p, proxy) \ + for (i = 0, (p) = &(proxy)->ports[i]; \ + i < (proxy)->port_count; \ + i++, (p) = &(proxy)->ports[i]) + +static const struct t7xx_port_conf t7xx_md_port_conf[] = { +}; + +static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) +{ + const struct t7xx_port_conf *port_conf; + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + port_conf = port->port_conf; + if (port_conf->rx_ch == ch || port_conf->tx_ch == ch) + return port; + } + + return NULL; +} + +static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h) +{ + u32 status = le32_to_cpu(ccci_h->status); + u16 seq_num, next_seq_num; + bool assert_bit; + + seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status); + next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD); + assert_bit = status & CCCI_H_AST_BIT; + if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM) + return next_seq_num; + + if (seq_num != port->seq_nums[MTK_RX]) + dev_warn_ratelimited(port->dev, + "seq num out-of-order %u != %u (header %X, len %X)\n", + seq_num, port->seq_nums[MTK_RX], + le32_to_cpu(ccci_h->packet_header), + le32_to_cpu(ccci_h->packet_len)); + + return next_seq_num; +} + +void t7xx_port_proxy_reset(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; + port->seq_nums[MTK_TX] = 0; + } +} + +static int t7xx_port_get_queue_no(struct t7xx_port *port) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + + return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ? + port_conf->txq_exp_index : port_conf->txq_index; +} + +static void t7xx_port_struct_init(struct t7xx_port *port) +{ + INIT_LIST_HEAD(&port->entry); + INIT_LIST_HEAD(&port->queue_entry); + skb_queue_head_init(&port->rx_skb_list); + init_waitqueue_head(&port->rx_wq); + port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; + port->seq_nums[MTK_TX] = 0; + atomic_set(&port->usage_cnt, 0); +} + +struct sk_buff *t7xx_port_alloc_skb(int payload) +{ + struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL); + + if (skb) + skb_reserve(skb, sizeof(struct ccci_header)); + + return skb; +} + +/** + * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list. + * @port: port context. + * @skb: received skb. + * + * Return: + * * 0 - Success. + * * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed. + */ +int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&port->rx_wq.lock, flags); + if (port->rx_skb_list.qlen >= port->rx_length_th) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + return -ENOBUFS; + } + __skb_queue_tail(&port->rx_skb_list, skb); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + wake_up_all(&port->rx_wq); + return 0; +} + +static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + enum cldma_id path_id = port->port_conf->path_id; + struct cldma_ctrl *md_ctrl; + int ret, tx_qno; + + md_ctrl = port->t7xx_dev->md->md_ctrl[path_id]; + tx_qno = t7xx_port_get_queue_no(port); + ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb); + if (ret) + dev_err(port->dev, "Failed to send skb: %d\n", ret); + + return ret; +} + +static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb, + unsigned int pkt_header, unsigned int ex_msg) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct ccci_header *ccci_h; + u32 status; + int ret; + + ccci_h = skb_push(skb, sizeof(*ccci_h)); + status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) | + FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT; + ccci_h->status = cpu_to_le32(status); + ccci_h->packet_header = cpu_to_le32(pkt_header); + ccci_h->packet_len = cpu_to_le32(skb->len); + ccci_h->ex_msg = cpu_to_le32(ex_msg); + + ret = t7xx_port_send_raw_skb(port, skb); + if (ret) + return ret; + + port->seq_nums[MTK_TX]++; + return 0; +} + +int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, + unsigned int ex_msg) +{ + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + unsigned int fsm_state; + + fsm_state = t7xx_fsm_get_ctl_state(ctl); + if (fsm_state != FSM_STATE_PRE_START) { + const struct t7xx_port_conf *port_conf = port->port_conf; + enum md_state md_state = t7xx_fsm_get_md_state(ctl); + + switch (md_state) { + case MD_STATE_EXCEPTION: + if (port_conf->tx_ch != PORT_CH_MD_LOG_TX) + return -EBUSY; + break; + + case MD_STATE_WAITING_FOR_HS1: + case MD_STATE_WAITING_FOR_HS2: + case MD_STATE_STOPPED: + case MD_STATE_WAITING_TO_STOP: + case MD_STATE_INVALID: + return -ENODEV; + + default: + break; + } + } + + return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); +} + +static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + + int i, j; + + for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++) + INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]); + + for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) { + for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++) + INIT_LIST_HEAD(&port_prox->queue_ports[j][i]); + } + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + enum cldma_id path_id = port_conf->path_id; + u8 ch_id; + + ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch); + list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]); + list_add_tail(&port->queue_entry, + &port_prox->queue_ports[path_id][port_conf->rxq_index]); + } +} + +static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev, + struct cldma_queue *queue, u16 channel) +{ + struct port_proxy *port_prox = t7xx_dev->md->port_prox; + struct list_head *port_list; + struct t7xx_port *port; + u8 ch_id; + + ch_id = FIELD_GET(PORT_CH_ID_MASK, channel); + port_list = &port_prox->rx_ch_ports[ch_id]; + list_for_each_entry(port, port_list, entry) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (queue->md_ctrl->hif_id == port_conf->path_id && + channel == port_conf->rx_ch) + return port; + } + + return NULL; +} + +/** + * t7xx_port_proxy_recv_skb() - Dispatch received skb. + * @queue: CLDMA queue. + * @skb: Socket buffer. + * + * Return: + ** 0 - Packet consumed. + ** -ERROR - Failed to process skb. + */ +static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) +{ + struct ccci_header *ccci_h = (struct ccci_header *)skb->data; + struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev; + struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl; + struct device *dev = queue->md_ctrl->dev; + const struct t7xx_port_conf *port_conf; + struct t7xx_port *port; + u16 seq_num, channel; + int ret; + + if (!skb) + return -EINVAL; + + channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status)); + if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) { + dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel); + goto drop_skb; + } + + port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel); + if (!port) { + dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel); + goto drop_skb; + } + + seq_num = t7xx_port_next_rx_seq_num(port, ccci_h); + port_conf = port->port_conf; + skb_pull(skb, sizeof(*ccci_h)); + + ret = port_conf->ops->recv_skb(port, skb); + /* Error indicates to try again later */ + if (ret) { + skb_push(skb, sizeof(*ccci_h)); + return ret; + } + + port->seq_nums[MTK_RX] = seq_num; + return 0; + +drop_skb: + dev_kfree_skb_any(skb); + return 0; +} + +/** + * t7xx_port_proxy_md_status_notify() - Notify all ports of state. + *@port_prox: The port_proxy pointer. + *@state: State. + * + * Called by t7xx_fsm. Used to dispatch modem status for all ports, + * which want to know MD state transition. + */ +void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (port_conf->ops->md_state_notify) + port_conf->ops->md_state_notify(port, state); + } +} + +static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) +{ + struct port_proxy *port_prox = md->port_prox; + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + t7xx_port_struct_init(port); + + port->t7xx_dev = md->t7xx_dev; + port->dev = &md->t7xx_dev->pdev->dev; + spin_lock_init(&port->port_update_lock); + port->chan_enable = false; + + if (port_conf->ops->init) + port_conf->ops->init(port); + } + + t7xx_proxy_setup_ch_mapping(port_prox); +} + +static int t7xx_proxy_alloc(struct t7xx_modem *md) +{ + unsigned int port_count = ARRAY_SIZE(t7xx_md_port_conf); + struct device *dev = &md->t7xx_dev->pdev->dev; + struct port_proxy *port_prox; + int i; + + port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count, + GFP_KERNEL); + if (!port_prox) + return -ENOMEM; + + md->port_prox = port_prox; + port_prox->dev = dev; + + for (i = 0; i < port_count; i++) + port_prox->ports[i].port_conf = &t7xx_md_port_conf[i]; + + port_prox->port_count = port_count; + t7xx_proxy_init_all_ports(md); + return 0; +} + +/** + * t7xx_port_proxy_init() - Initialize ports. + * @md: Modem. + * + * Create all port instances. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_port_proxy_init(struct t7xx_modem *md) +{ + int ret; + + ret = t7xx_proxy_alloc(md); + if (ret) + return ret; + + t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); + return 0; +} + +void t7xx_port_proxy_uninit(struct port_proxy *port_prox) +{ + struct t7xx_port *port; + int i; + + for_each_proxy_port(i, port, port_prox) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (port_conf->ops->uninit) + port_conf->ops->uninit(port); + } +} + +int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, + bool en_flag) +{ + struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id); + const struct t7xx_port_conf *port_conf; + + if (!port) + return -EINVAL; + + port_conf = port->port_conf; + + if (en_flag) { + if (port_conf->ops->enable_chl) + port_conf->ops->enable_chl(port); + } else { + if (port_conf->ops->disable_chl) + port_conf->ops->disable_chl(port); + } + + return 0; +} diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h new file mode 100644 index 000000000000..e3ef269f190e --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_PORT_PROXY_H__ +#define __T7XX_PORT_PROXY_H__ + +#include +#include +#include +#include + +#include "t7xx_hif_cldma.h" +#include "t7xx_modem_ops.h" +#include "t7xx_port.h" + +#define MTK_QUEUES 16 +#define RX_QUEUE_MAXLEN 32 +#define CTRL_QUEUE_MAXLEN 16 + +struct port_proxy { + int port_count; + struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1]; + struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES]; + struct device *dev; + struct t7xx_port ports[]; +}; + +struct ccci_header { + __le32 packet_header; + __le32 packet_len; + __le32 status; + __le32 ex_msg; +}; + +/* Coupled with HW - indicates if there is data following the CCCI header or not */ +#define CCCI_HEADER_NO_DATA 0xffffffff + +#define CCCI_H_AST_BIT BIT(31) +#define CCCI_H_SEQ_FLD GENMASK(30, 16) +#define CCCI_H_CHN_FLD GENMASK(15, 0) + +#define PORT_INFO_RSRVD GENMASK(31, 16) +#define PORT_INFO_ENFLG BIT(15) +#define PORT_INFO_CH_ID GENMASK(14, 0) + +#define PORT_ENUM_VER 0 +#define PORT_ENUM_HEAD_PATTERN 0x5a5a5a5a +#define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5 +#define PORT_ENUM_VER_MISMATCH 0x00657272 + +void t7xx_port_proxy_reset(struct port_proxy *port_prox); +void t7xx_port_proxy_uninit(struct port_proxy *port_prox); +int t7xx_port_proxy_init(struct t7xx_modem *md); +void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state); +int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, + bool en_flag); + +#endif /* __T7XX_PORT_PROXY_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c index 4b2e6913c45e..30f3739f1d83 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -37,6 +37,7 @@ #include "t7xx_modem_ops.h" #include "t7xx_pci.h" #include "t7xx_pcie_mac.h" +#include "t7xx_port_proxy.h" #include "t7xx_reg.h" #include "t7xx_state_monitor.h" @@ -90,6 +91,9 @@ static void fsm_state_notify(struct t7xx_modem *md, enum md_state state) void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) { ctl->md_state = state; + + /* Update to port first, otherwise sending message on HS2 may fail */ + t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state); fsm_state_notify(ctl->md, state); } @@ -258,6 +262,7 @@ static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl) ctl->md_state = MD_STATE_READY; fsm_state_notify(ctl->md, MD_STATE_READY); + t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY); } static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl) -- cgit v1.2.3 From da45d2566a1d4e260b894ff5d96be64b21c7fa79 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:02 -0700 Subject: net: wwan: t7xx: Add control port MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Control Port implements driver control messages such as modem-host handshaking, controls port enumeration, and handles exception messages. The handshaking process between the driver and the modem happens during the init sequence. The process involves the exchange of a list of supported runtime features to make sure that modem and host are ready to provide proper feature lists including port enumeration. Further features can be enabled and controlled in this handshaking process. Signed-off-by: Haijun Liu Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Loic Poulain Reviewed-by: Ilpo Järvinen Reviewed-by: Sergey Ryazanov Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/Makefile | 1 + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 214 +++++++++++++++++++++- drivers/net/wwan/t7xx/t7xx_modem_ops.h | 3 + drivers/net/wwan/t7xx/t7xx_port.h | 3 + drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c | 273 +++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_port_proxy.c | 40 +++++ drivers/net/wwan/t7xx/t7xx_port_proxy.h | 25 +++ drivers/net/wwan/t7xx/t7xx_state_monitor.c | 3 + drivers/net/wwan/t7xx/t7xx_state_monitor.h | 2 + 9 files changed, 561 insertions(+), 3 deletions(-) create mode 100644 drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile index 99f9ca3b4b51..63e1c67b82b5 100644 --- a/drivers/net/wwan/t7xx/Makefile +++ b/drivers/net/wwan/t7xx/Makefile @@ -11,3 +11,4 @@ mtk_t7xx-y:= t7xx_pci.o \ t7xx_cldma.o \ t7xx_hif_cldma.o \ t7xx_port_proxy.o \ + t7xx_port_ctrl_msg.o \ diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c index d1d3384608be..a1b60173d89d 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -16,6 +16,8 @@ */ #include +#include +#include #include #include #include @@ -26,6 +28,7 @@ #include #include #include +#include #include #include "t7xx_cldma.h" @@ -39,11 +42,24 @@ #include "t7xx_reg.h" #include "t7xx_state_monitor.h" +#define RT_ID_MD_PORT_ENUM 0 +/* Modem feature query identification code - "ICCC" */ +#define MD_FEATURE_QUERY_ID 0x49434343 + +#define FEATURE_VER GENMASK(7, 4) +#define FEATURE_MSK GENMASK(3, 0) + #define RGU_RESET_DELAY_MS 10 #define PORT_RESET_DELAY_MS 2000 #define EX_HS_TIMEOUT_MS 5000 #define EX_HS_POLL_DELAY_MS 10 +enum mtk_feature_support_type { + MTK_FEATURE_DOES_NOT_EXIST, + MTK_FEATURE_NOT_SUPPORTED, + MTK_FEATURE_MUST_BE_SUPPORTED, +}; + static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev) { return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK; @@ -314,16 +330,205 @@ static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev) t7xx_pcie_register_rgu_isr(t7xx_dev); } +struct feature_query { + __le32 head_pattern; + u8 feature_set[FEATURE_COUNT]; + __le32 tail_pattern; +}; + +static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core) +{ + struct feature_query *ft_query; + struct sk_buff *skb; + + skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query)); + if (!skb) + return; + + ft_query = skb_put(skb, sizeof(*ft_query)); + ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); + memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT); + ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID); + + /* Send HS1 message to device */ + t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0); +} + +static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev, + void *data) +{ + struct feature_query *md_feature = data; + struct mtk_runtime_feature *rt_feature; + unsigned int i, rt_data_len = 0; + struct sk_buff *skb; + + /* Parse MD runtime data query */ + if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID || + le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) { + dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n", + le32_to_cpu(md_feature->head_pattern), + le32_to_cpu(md_feature->tail_pattern)); + return -EINVAL; + } + + for (i = 0; i < FEATURE_COUNT; i++) { + if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) != + MTK_FEATURE_MUST_BE_SUPPORTED) + rt_data_len += sizeof(*rt_feature); + } + + skb = t7xx_ctrl_alloc_skb(rt_data_len); + if (!skb) + return -ENOMEM; + + rt_feature = skb_put(skb, rt_data_len); + memset(rt_feature, 0, rt_data_len); + + /* Fill runtime feature */ + for (i = 0; i < FEATURE_COUNT; i++) { + u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]); + + if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED) + continue; + + rt_feature->feature_id = i; + if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST) + rt_feature->support_info = md_feature->feature_set[i]; + + rt_feature++; + } + + /* Send HS3 message to device */ + t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0); + return 0; +} + +static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core, + struct device *dev, void *data, int data_length) +{ + enum mtk_feature_support_type ft_spt_st, ft_spt_cfg; + struct mtk_runtime_feature *rt_feature; + int i, offset; + + offset = sizeof(struct feature_query); + for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) { + rt_feature = data + offset; + offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len); + + ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]); + if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED) + continue; + + ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info); + if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED) + return -EINVAL; + + if (i == RT_ID_MD_PORT_ENUM) + t7xx_port_enum_msg_handler(ctl->md, rt_feature->data); + } + + return 0; +} + +static int t7xx_core_reset(struct t7xx_modem *md) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + + md->core_md.ready = false; + + if (!ctl) { + dev_err(dev, "FSM is not initialized\n"); + return -EINVAL; + } + + if (md->core_md.handshake_ongoing) { + int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); + + if (ret) + return ret; + } + + md->core_md.handshake_ongoing = false; + return 0; +} + +static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl, + enum t7xx_fsm_event_state event_id, + enum t7xx_fsm_event_state err_detect) +{ + struct t7xx_sys_info *core_info = &md->core_md; + struct device *dev = &md->t7xx_dev->pdev->dev; + struct t7xx_fsm_event *event, *event_next; + unsigned long flags; + int ret; + + t7xx_prepare_host_rt_data_query(core_info); + + while (!kthread_should_stop()) { + bool event_received = false; + + spin_lock_irqsave(&ctl->event_lock, flags); + list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) { + if (event->event_id == err_detect) { + list_del(&event->entry); + spin_unlock_irqrestore(&ctl->event_lock, flags); + dev_err(dev, "Core handshake error event received\n"); + goto err_free_event; + } else if (event->event_id == event_id) { + list_del(&event->entry); + event_received = true; + break; + } + } + spin_unlock_irqrestore(&ctl->event_lock, flags); + + if (event_received) + break; + + wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) || + kthread_should_stop()); + if (kthread_should_stop()) + goto err_free_event; + } + + if (ctl->exp_flg) + goto err_free_event; + + ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length); + if (ret) { + dev_err(dev, "Host failure parsing runtime data: %d\n", ret); + goto err_free_event; + } + + if (ctl->exp_flg) + goto err_free_event; + + ret = t7xx_prepare_device_rt_data(core_info, dev, event->data); + if (ret) { + dev_err(dev, "Device failure parsing runtime data: %d", ret); + goto err_free_event; + } + + core_info->ready = true; + core_info->handshake_ongoing = false; + wake_up(&ctl->async_hk_wq); +err_free_event: + kfree(event); +} + static void t7xx_md_hk_wq(struct work_struct *work) { struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work); struct t7xx_fsm_ctl *ctl = md->fsm_ctl; + /* Clear the HS2 EXIT event appended in core_reset() */ + t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT); t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]); t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]); t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2); - md->core_md.ready = true; - wake_up(&ctl->async_hk_wq); + md->core_md.handshake_ongoing = true; + t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT); } void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id) @@ -418,6 +623,9 @@ static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev) return NULL; INIT_WORK(&md->handshake_work, t7xx_md_hk_wq); + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK; + md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |= + FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED); return md; } @@ -431,7 +639,7 @@ int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev) t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]); t7xx_port_proxy_reset(md->port_prox); md->md_init_finish = true; - return 0; + return t7xx_core_reset(md); } /** diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h index 1fc286fa1a14..7469ed636ae8 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h @@ -57,6 +57,9 @@ enum md_event_id { struct t7xx_sys_info { bool ready; + bool handshake_ongoing; + u8 feature_set[FEATURE_COUNT]; + struct t7xx_port *ctl_port; }; struct t7xx_modem { diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h index 0f3ffba5f9c2..dc4133eb433a 100644 --- a/drivers/net/wwan/t7xx/t7xx_port.h +++ b/drivers/net/wwan/t7xx/t7xx_port.h @@ -125,8 +125,11 @@ struct t7xx_port { }; struct sk_buff *t7xx_port_alloc_skb(int payload); +struct sk_buff *t7xx_ctrl_alloc_skb(int payload); int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb); int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, unsigned int ex_msg); +int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, + unsigned int ex_msg); #endif /* __T7XX_PORT_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c new file mode 100644 index 000000000000..68430b130a67 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Ricardo Martinez + * Moises Veleta + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define PORT_MSG_VERSION GENMASK(31, 16) +#define PORT_MSG_PRT_CNT GENMASK(15, 0) + +struct port_msg { + __le32 head_pattern; + __le32 info; + __le32 tail_pattern; + __le32 data[]; +}; + +static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg) +{ + struct sk_buff *skb; + int ret; + + skb = t7xx_ctrl_alloc_skb(0); + if (!skb) + return -ENOMEM; + + ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg); + if (ret) + dev_kfree_skb_any(skb); + + return ret; +} + +static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl, + struct sk_buff *skb) +{ + struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data; + struct device *dev = &ctl->md->t7xx_dev->pdev->dev; + enum md_state md_state; + int ret = -EINVAL; + + md_state = t7xx_fsm_get_md_state(ctl); + if (md_state != MD_STATE_EXCEPTION) { + dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n", + ctrl_msg_h->ex_msg, md_state); + return -EINVAL; + } + + switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) { + case CTL_ID_MD_EX: + if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) { + dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg); + break; + } + + ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID); + if (ret) { + dev_err(dev, "Failed to send exception message to modem\n"); + break; + } + + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception event"); + + break; + + case CTL_ID_MD_EX_ACK: + if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) { + dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg); + break; + } + + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception Received event"); + + break; + + case CTL_ID_MD_EX_PASS: + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0); + if (ret) + dev_err(dev, "Failed to append Modem Exception Passed event"); + + break; + + case CTL_ID_DRV_VER_ERROR: + dev_err(dev, "AP/MD driver version mismatch\n"); + } + + return ret; +} + +/** + * t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes. + * @md: Modem context. + * @msg: Message. + * + * Used to control create/remove device node. + * + * Return: + * * 0 - Success. + * * -EFAULT - Message check failure. + */ +int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg) +{ + struct device *dev = &md->t7xx_dev->pdev->dev; + unsigned int version, port_count, i; + struct port_msg *port_msg = msg; + + version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info)); + if (version != PORT_ENUM_VER || + le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN || + le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) { + dev_err(dev, "Invalid port control message %x:%x:%x\n", + version, le32_to_cpu(port_msg->head_pattern), + le32_to_cpu(port_msg->tail_pattern)); + return -EFAULT; + } + + port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info)); + for (i = 0; i < port_count; i++) { + u32 port_info = le32_to_cpu(port_msg->data[i]); + unsigned int ch_id; + bool en_flag; + + ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info); + en_flag = port_info & PORT_INFO_ENFLG; + if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag)) + dev_dbg(dev, "Port:%x not found\n", ch_id); + } + + return 0; +} + +static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; + struct ctrl_msg_header *ctrl_msg_h; + int ret = 0; + + ctrl_msg_h = (struct ctrl_msg_header *)skb->data; + switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) { + case CTL_ID_HS2_MSG: + skb_pull(skb, sizeof(*ctrl_msg_h)); + + if (port_conf->rx_ch == PORT_CH_CONTROL_RX) { + ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data, + le32_to_cpu(ctrl_msg_h->data_length)); + if (ret) + dev_err(port->dev, "Failed to append Handshake 2 event"); + } + + dev_kfree_skb_any(skb); + break; + + case CTL_ID_MD_EX: + case CTL_ID_MD_EX_ACK: + case CTL_ID_MD_EX_PASS: + case CTL_ID_DRV_VER_ERROR: + ret = fsm_ee_message_handler(port, ctl, skb); + dev_kfree_skb_any(skb); + break; + + case CTL_ID_PORT_ENUM: + skb_pull(skb, sizeof(*ctrl_msg_h)); + ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data); + if (!ret) + ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0); + else + ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, + PORT_ENUM_VER_MISMATCH); + + break; + + default: + ret = -EINVAL; + dev_err(port->dev, "Unknown control message ID to FSM %x\n", + le32_to_cpu(ctrl_msg_h->ctrl_msg_id)); + break; + } + + if (ret) + dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret); + + return ret; +} + +static int port_ctl_rx_thread(void *arg) +{ + while (!kthread_should_stop()) { + struct t7xx_port *port = arg; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&port->rx_wq.lock, flags); + if (skb_queue_empty(&port->rx_skb_list) && + wait_event_interruptible_locked_irq(port->rx_wq, + !skb_queue_empty(&port->rx_skb_list) || + kthread_should_stop())) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + continue; + } + if (kthread_should_stop()) { + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + break; + } + skb = __skb_dequeue(&port->rx_skb_list); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); + + control_msg_handler(port, skb); + } + + return 0; +} + +static int port_ctl_init(struct t7xx_port *port) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + + port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name); + if (IS_ERR(port->thread)) { + dev_err(port->dev, "Failed to start port control thread\n"); + return PTR_ERR(port->thread); + } + + port->rx_length_th = CTRL_QUEUE_MAXLEN; + return 0; +} + +static void port_ctl_uninit(struct t7xx_port *port) +{ + unsigned long flags; + struct sk_buff *skb; + + if (port->thread) + kthread_stop(port->thread); + + spin_lock_irqsave(&port->rx_wq.lock, flags); + port->rx_length_th = 0; + while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&port->rx_wq.lock, flags); +} + +struct port_ops ctl_port_ops = { + .init = port_ctl_init, + .recv_skb = t7xx_port_enqueue_skb, + .uninit = port_ctl_uninit, +}; diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c index be4171a1d384..55f932d2c126 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -49,6 +49,15 @@ i++, (p) = &(proxy)->ports[i]) static const struct t7xx_port_conf t7xx_md_port_conf[] = { + { + .tx_ch = PORT_CH_CONTROL_TX, + .rx_ch = PORT_CH_CONTROL_RX, + .txq_index = Q_IDX_CTRL, + .rxq_index = Q_IDX_CTRL, + .path_id = CLDMA_ID_MD, + .ops = &ctl_port_ops, + .name = "t7xx_ctrl", + }, }; static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) @@ -129,6 +138,16 @@ struct sk_buff *t7xx_port_alloc_skb(int payload) return skb; } +struct sk_buff *t7xx_ctrl_alloc_skb(int payload) +{ + struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header)); + + if (skb) + skb_reserve(skb, sizeof(struct ctrl_msg_header)); + + return skb; +} + /** * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list. * @port: port context. @@ -194,6 +213,24 @@ static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb, return 0; } +int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, + unsigned int ex_msg) +{ + struct ctrl_msg_header *ctrl_msg_h; + unsigned int msg_len = skb->len; + u32 pkt_header = 0; + + ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h)); + ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg); + ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg); + ctrl_msg_h->data_length = cpu_to_le32(msg_len); + + if (!msg_len) + pkt_header = CCCI_HEADER_NO_DATA; + + return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); +} + int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, unsigned int ex_msg) { @@ -359,6 +396,9 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) t7xx_port_struct_init(port); + if (port_conf->tx_ch == PORT_CH_CONTROL_TX) + md->core_md.ctl_port = port; + port->t7xx_dev = md->t7xx_dev; port->dev = &md->t7xx_dev->pdev->dev; spin_lock_init(&port->port_update_lock); diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h index e3ef269f190e..23f6af0ba3eb 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h @@ -53,6 +53,27 @@ struct ccci_header { #define CCCI_H_SEQ_FLD GENMASK(30, 16) #define CCCI_H_CHN_FLD GENMASK(15, 0) +struct ctrl_msg_header { + __le32 ctrl_msg_id; + __le32 ex_msg; + __le32 data_length; +}; + +/* Control identification numbers for AP<->MD messages */ +#define CTL_ID_HS1_MSG 0x0 +#define CTL_ID_HS2_MSG 0x1 +#define CTL_ID_HS3_MSG 0x2 +#define CTL_ID_MD_EX 0x4 +#define CTL_ID_DRV_VER_ERROR 0x5 +#define CTL_ID_MD_EX_ACK 0x6 +#define CTL_ID_MD_EX_PASS 0x8 +#define CTL_ID_PORT_ENUM 0x9 + +/* Modem exception check identification code - "EXCP" */ +#define MD_EX_CHK_ID 0x45584350 +/* Modem exception check acknowledge identification code - "EREC" */ +#define MD_EX_CHK_ACK_ID 0x45524543 + #define PORT_INFO_RSRVD GENMASK(31, 16) #define PORT_INFO_ENFLG BIT(15) #define PORT_INFO_CH_ID GENMASK(14, 0) @@ -62,10 +83,14 @@ struct ccci_header { #define PORT_ENUM_TAIL_PATTERN 0xa5a5a5a5 #define PORT_ENUM_VER_MISMATCH 0x00657272 +/* Port operations mapping */ +extern struct port_ops ctl_port_ops; + void t7xx_port_proxy_reset(struct port_proxy *port_prox); void t7xx_port_proxy_uninit(struct port_proxy *port_prox); int t7xx_port_proxy_init(struct t7xx_modem *md); void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state); +int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg); int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, bool en_flag); diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c index 30f3739f1d83..d841f5051af8 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -293,6 +293,9 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) if (!md->core_md.ready) { dev_err(dev, "MD handshake timeout\n"); + if (md->core_md.handshake_ongoing) + t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0); + fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT); return -ETIMEDOUT; } diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h index a6041a51809e..b1af0259d4c5 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h @@ -37,9 +37,11 @@ enum t7xx_fsm_state { enum t7xx_fsm_event_state { FSM_EVENT_INVALID, + FSM_EVENT_MD_HS2, FSM_EVENT_MD_EX, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX_PASS, + FSM_EVENT_MD_HS2_EXIT, FSM_EVENT_MAX }; -- cgit v1.2.3 From 61b7a2916a0ef91be2e9a4b0d0a5bdf9a371cbee Mon Sep 17 00:00:00 2001 From: Chandrashekar Devegowda Date: Fri, 6 May 2022 11:13:03 -0700 Subject: net: wwan: t7xx: Add AT and MBIM WWAN ports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds AT and MBIM ports to the port proxy infrastructure. The initialization method is responsible for creating the corresponding ports using the WWAN framework infrastructure. The implemented WWAN port operations are start, stop, and TX. Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Loic Poulain Reviewed-by: Sergey Ryazanov Reviewed-by: Ilpo Järvinen Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/Makefile | 1 + drivers/net/wwan/t7xx/t7xx_port_proxy.c | 20 ++++ drivers/net/wwan/t7xx/t7xx_port_proxy.h | 1 + drivers/net/wwan/t7xx/t7xx_port_wwan.c | 176 ++++++++++++++++++++++++++++++++ 4 files changed, 198 insertions(+) create mode 100644 drivers/net/wwan/t7xx/t7xx_port_wwan.c (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile index 63e1c67b82b5..9eec2e2472fb 100644 --- a/drivers/net/wwan/t7xx/Makefile +++ b/drivers/net/wwan/t7xx/Makefile @@ -12,3 +12,4 @@ mtk_t7xx-y:= t7xx_pci.o \ t7xx_hif_cldma.o \ t7xx_port_proxy.o \ t7xx_port_ctrl_msg.o \ + t7xx_port_wwan.o \ diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c index 55f932d2c126..7d2c0e81e33d 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -50,6 +50,26 @@ static const struct t7xx_port_conf t7xx_md_port_conf[] = { { + .tx_ch = PORT_CH_UART2_TX, + .rx_ch = PORT_CH_UART2_RX, + .txq_index = Q_IDX_AT_CMD, + .rxq_index = Q_IDX_AT_CMD, + .txq_exp_index = 0xff, + .rxq_exp_index = 0xff, + .path_id = CLDMA_ID_MD, + .ops = &wwan_sub_port_ops, + .name = "AT", + .port_type = WWAN_PORT_AT, + }, { + .tx_ch = PORT_CH_MBIM_TX, + .rx_ch = PORT_CH_MBIM_RX, + .txq_index = Q_IDX_MBIM, + .rxq_index = Q_IDX_MBIM, + .path_id = CLDMA_ID_MD, + .ops = &wwan_sub_port_ops, + .name = "MBIM", + .port_type = WWAN_PORT_MBIM, + }, { .tx_ch = PORT_CH_CONTROL_TX, .rx_ch = PORT_CH_CONTROL_RX, .txq_index = Q_IDX_CTRL, diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h index 23f6af0ba3eb..bc1ff5c6c700 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h @@ -84,6 +84,7 @@ struct ctrl_msg_header { #define PORT_ENUM_VER_MISMATCH 0x00657272 /* Port operations mapping */ +extern struct port_ops wwan_sub_port_ops; extern struct port_ops ctl_port_ops; void t7xx_port_proxy_reset(struct port_proxy *port_prox); diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c new file mode 100644 index 000000000000..33931bfd78fd --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Chandrashekar Devegowda + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_port.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +static int t7xx_port_ctrl_start(struct wwan_port *port) +{ + struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); + + if (atomic_read(&port_mtk->usage_cnt)) + return -EBUSY; + + atomic_inc(&port_mtk->usage_cnt); + return 0; +} + +static void t7xx_port_ctrl_stop(struct wwan_port *port) +{ + struct t7xx_port *port_mtk = wwan_port_get_drvdata(port); + + atomic_dec(&port_mtk->usage_cnt); +} + +static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb) +{ + struct t7xx_port *port_private = wwan_port_get_drvdata(port); + size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU; + const struct t7xx_port_conf *port_conf; + struct t7xx_fsm_ctl *ctl; + enum md_state md_state; + + len = skb->len; + if (!len || !port_private->chan_enable) + return -EINVAL; + + port_conf = port_private->port_conf; + ctl = port_private->t7xx_dev->md->fsm_ctl; + md_state = t7xx_fsm_get_md_state(ctl); + if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) { + dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n", + port_conf->name, md_state); + return -ENODEV; + } + + for (offset = 0; offset < len; offset += chunk_len) { + struct sk_buff *skb_ccci; + int ret; + + chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header)); + skb_ccci = t7xx_port_alloc_skb(chunk_len); + if (!skb_ccci) + return -ENOMEM; + + skb_put_data(skb_ccci, skb->data + offset, chunk_len); + ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0); + if (ret) { + dev_kfree_skb_any(skb_ccci); + dev_err(port_private->dev, "Write error on %s port, %d\n", + port_conf->name, ret); + return ret; + } + } + + dev_kfree_skb(skb); + return 0; +} + +static const struct wwan_port_ops wwan_ops = { + .start = t7xx_port_ctrl_start, + .stop = t7xx_port_ctrl_stop, + .tx = t7xx_port_ctrl_tx, +}; + +static int t7xx_port_wwan_init(struct t7xx_port *port) +{ + port->rx_length_th = RX_QUEUE_MAXLEN; + return 0; +} + +static void t7xx_port_wwan_uninit(struct t7xx_port *port) +{ + if (!port->wwan_port) + return; + + port->rx_length_th = 0; + wwan_remove_port(port->wwan_port); + port->wwan_port = NULL; +} + +static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb) +{ + if (!atomic_read(&port->usage_cnt) || !port->chan_enable) { + const struct t7xx_port_conf *port_conf = port->port_conf; + + dev_kfree_skb_any(skb); + dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n", + port_conf->name); + /* Dropping skb, caller should not access skb.*/ + return 0; + } + + wwan_port_rx(port->wwan_port, skb); + return 0; +} + +static int t7xx_port_wwan_enable_chl(struct t7xx_port *port) +{ + spin_lock(&port->port_update_lock); + port->chan_enable = true; + spin_unlock(&port->port_update_lock); + + return 0; +} + +static int t7xx_port_wwan_disable_chl(struct t7xx_port *port) +{ + spin_lock(&port->port_update_lock); + port->chan_enable = false; + spin_unlock(&port->port_update_lock); + + return 0; +} + +static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state) +{ + const struct t7xx_port_conf *port_conf = port->port_conf; + + if (state != MD_STATE_READY) + return; + + if (!port->wwan_port) { + port->wwan_port = wwan_create_port(port->dev, port_conf->port_type, + &wwan_ops, port); + if (IS_ERR(port->wwan_port)) + dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name); + } +} + +struct port_ops wwan_sub_port_ops = { + .init = t7xx_port_wwan_init, + .recv_skb = t7xx_port_wwan_recv_skb, + .uninit = t7xx_port_wwan_uninit, + .enable_chl = t7xx_port_wwan_enable_chl, + .disable_chl = t7xx_port_wwan_disable_chl, + .md_state_notify = t7xx_port_wwan_md_state_notify, +}; -- cgit v1.2.3 From 33f78ab5a38a79cc5227eb59da9c98b25cfb0ff1 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:04 -0700 Subject: net: wwan: t7xx: Data path HW layer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Data Path Modem AP Interface (DPMAIF) HW layer provides HW abstraction for the upper layer (DPMAIF HIF). It implements functions to do the HW configuration, TX/RX control and interrupt handling. Signed-off-by: Haijun Liu Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Loic Poulain Reviewed-by: Ilpo Järvinen Reviewed-by: Sergey Ryazanov Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/t7xx_dpmaif.c | 1283 +++++++++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_dpmaif.h | 179 +++++ drivers/net/wwan/t7xx/t7xx_reg.h | 213 ++++++ 3 files changed, 1675 insertions(+) create mode 100644 drivers/net/wwan/t7xx/t7xx_dpmaif.c create mode 100644 drivers/net/wwan/t7xx/t7xx_dpmaif.h (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_dpmaif.c new file mode 100644 index 000000000000..c8bf6929af51 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c @@ -0,0 +1,1283 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_reg.h" + +#define ioread32_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ + readx_poll_timeout_atomic(ioread32, addr, val, cond, delay_us, timeout_us) + +static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask; + u32 value, ul_intr_enable, dl_intr_enable; + int ret; + + ul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable; + iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + + /* Set interrupt enable mask */ + iowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0); + iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0); + + /* Check mask status */ + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_intr_enable) != ul_intr_enable, 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + return ret; + + dl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR; + isr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable; + ul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN | + DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN; + isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable; + iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); + + /* Set DL ISR PD enable mask */ + iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0, + value, (value & ul_intr_enable) != ul_intr_enable, 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + return ret; + + isr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY; + iowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY); + iowrite32(isr_en_msk->ap_udl_ip_busy_en_msk, + hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK); + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0); + value |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0); + iowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK); + + return 0; +} + +static void t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + struct dpmaif_isr_en_mask *isr_en_msk; + u32 value, ul_int_que_done; + int ret; + + isr_en_msk = &hw_info->isr_en_mask; + ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk &= ~ul_int_que_done; + iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_int_que_done) == ul_int_que_done, 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, + "Could not mask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); +} + +void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + struct dpmaif_isr_en_mask *isr_en_msk; + u32 value, ul_int_que_done; + int ret; + + isr_en_msk = &hw_info->isr_en_mask; + ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK; + isr_en_msk->ap_ul_l2intr_en_msk |= ul_int_que_done; + iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0, + value, (value & ul_int_que_done) != ul_int_que_done, 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, + "Could not unmask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); +} + +void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info) +{ + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_BATCNT_LEN_ERR; + iowrite32(DP_DL_INT_BATCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info) +{ + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_PITCNT_LEN_ERR; + iowrite32(DP_DL_INT_PITCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +static u32 t7xx_update_dlq_intr(struct dpmaif_hw_info *hw_info, u32 q_done) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0); + iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + return value; +} + +static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 value, q_done; + int ret; + + q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; + iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + + ret = read_poll_timeout_atomic(t7xx_update_dlq_intr, value, value & q_done, + 0, DPMAIF_CHECK_TIMEOUT_US, false, hw_info, q_done); + if (ret) { + dev_err(hw_info->dev, + "Could not mask the DL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n", + value); + return -ETIMEDOUT; + } + + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~q_done; + return 0; +} + +void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 mask; + + mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE; + iowrite32(mask, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= mask; +} + +void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info) +{ + u32 ip_busy_sts; + + ip_busy_sts = ioread32(hw_info->pcie_base + DPMAIF_AP_IP_BUSY); + iowrite32(ip_busy_sts, hw_info->pcie_base + DPMAIF_AP_IP_BUSY); +} + +static void t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno) +{ + if (qno == DPF_RX_QNO0) + iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + else + iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); +} + +void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno) +{ + if (qno == DPF_RX_QNO0) + iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); + else + iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0); +} + +void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); +} + +void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); +} + +static void t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para *para, + enum dpmaif_hw_intr_type intr_type, unsigned int intr_queue) +{ + para->intr_types[para->intr_cnt] = intr_type; + para->intr_queues[para->intr_cnt] = intr_queue; + para->intr_cnt++; +} + +/* The para->intr_cnt counter is set to zero before this function is called. + * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues. + */ +static void t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info *hw_info, + unsigned int intr_status, + struct dpmaif_hw_intr_st_para *para) +{ + unsigned long value; + + value = FIELD_GET(DP_UL_INT_QDONE_MSK, intr_status); + if (value) { + unsigned int index; + + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DONE, value); + + for_each_set_bit(index, &value, DPMAIF_TXQ_NUM) + t7xx_dpmaif_mask_ulq_intr(hw_info, index); + } + + value = FIELD_GET(DP_UL_INT_EMPTY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DRB_EMPTY, value); + + value = FIELD_GET(DP_UL_INT_MD_NOTREADY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_NOTREADY, value); + + value = FIELD_GET(DP_UL_INT_MD_PWR_NOTREADY_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_PWR_NOTREADY, value); + + value = FIELD_GET(DP_UL_INT_ERR_MSK, intr_status); + if (value) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_LEN_ERR, value); + + /* Clear interrupt status */ + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); +} + +/* The para->intr_cnt counter is set to zero before this function is called. + * It does not check for overflow as there is no risk of overflowing intr_types or intr_queues. + */ +static void t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info *hw_info, + unsigned int intr_status, + struct dpmaif_hw_intr_st_para *para, int qno) +{ + if (qno == DPF_RX_QNO_DFT) { + if (intr_status & DP_DL_INT_SKB_LEN_ERR) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_SKB_LEN_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_BATCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_BATCNT_LEN_ERR, DPF_RX_QNO_DFT); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_BATCNT_LEN_ERR; + iowrite32(DP_DL_INT_BATCNT_LEN_ERR, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + } + + if (intr_status & DP_DL_INT_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PITCNT_LEN_ERR, DPF_RX_QNO_DFT); + hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_PITCNT_LEN_ERR; + iowrite32(DP_DL_INT_PITCNT_LEN_ERR, + hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0); + } + + if (intr_status & DP_DL_INT_PKT_EMPTY_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PKT_EMPTY_SET, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_FRG_EMPTY_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRG_EMPTY_SET, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_MTU_ERR_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_MTU_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_FRG_LEN_ERR_MSK) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRGCNT_LEN_ERR, DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_Q0_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_PITCNT_LEN_ERR, BIT(qno)); + t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno); + } + + if (intr_status & DP_DL_INT_HPC_ENT_TYPE_ERR) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_HPC_ENT_TYPE_ERR, + DPF_RX_QNO_DFT); + + if (intr_status & DP_DL_INT_Q0_DONE) { + /* Mask RX done interrupt immediately after it occurs, do not clear + * the interrupt if the mask operation fails. + */ + if (!t7xx_mask_dlq_intr(hw_info, qno)) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_DONE, BIT(qno)); + else + intr_status &= ~DP_DL_INT_Q0_DONE; + } + } else { + if (intr_status & DP_DL_INT_Q1_PITCNT_LEN_ERR) { + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_PITCNT_LEN_ERR, BIT(qno)); + t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno); + } + + if (intr_status & DP_DL_INT_Q1_DONE) { + if (!t7xx_mask_dlq_intr(hw_info, qno)) + t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_DONE, BIT(qno)); + else + intr_status &= ~DP_DL_INT_Q1_DONE; + } + } + + intr_status |= DP_DL_INT_BATCNT_LEN_ERR; + /* Clear interrupt status */ + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); +} + +/** + * t7xx_dpmaif_hw_get_intr_cnt() - Reads interrupt status and count from HW. + * @hw_info: Pointer to struct hw_info. + * @para: Pointer to struct dpmaif_hw_intr_st_para. + * @qno: Queue number. + * + * Reads RX/TX interrupt status from HW and clears UL/DL status as needed. + * + * Return: Interrupt count. + */ +int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_intr_st_para *para, int qno) +{ + u32 rx_intr_status, tx_intr_status = 0; + u32 rx_intr_qdone, tx_intr_qdone = 0; + + rx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0); + rx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0); + + /* TX interrupt status */ + if (qno == DPF_RX_QNO_DFT) { + /* All ULQ and DLQ0 interrupts use the same source no need to check ULQ interrupts + * when a DLQ1 interrupt has occurred. + */ + tx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + tx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0); + } + + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + + if (qno == DPF_RX_QNO_DFT) { + /* Do not schedule bottom half again or clear UL interrupt status when we + * have already masked it. + */ + tx_intr_status &= ~tx_intr_qdone; + if (tx_intr_status) + t7xx_dpmaif_hw_check_tx_intr(hw_info, tx_intr_status, para); + } + + if (rx_intr_status) { + if (qno == DPF_RX_QNO0) { + rx_intr_status &= DP_DL_Q0_STATUS_MASK; + if (rx_intr_qdone & DPMAIF_DL_INT_DLQ0_QDONE) + /* Do not schedule bottom half again or clear DL + * queue done interrupt status when we have already masked it. + */ + rx_intr_status &= ~DP_DL_INT_Q0_DONE; + } else { + rx_intr_status &= DP_DL_Q1_STATUS_MASK; + if (rx_intr_qdone & DPMAIF_DL_INT_DLQ1_QDONE) + rx_intr_status &= ~DP_DL_INT_Q1_DONE; + } + + if (rx_intr_status) + t7xx_dpmaif_hw_check_rx_intr(hw_info, rx_intr_status, para, qno); + } + + return para->intr_cnt; +} + +static int t7xx_dpmaif_sram_init(struct dpmaif_hw_info *hw_info) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AP_MEM_CLR); + value |= DPMAIF_MEM_CLR; + iowrite32(value, hw_info->pcie_base + DPMAIF_AP_MEM_CLR); + + return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_MEM_CLR, + value, !(value & DPMAIF_MEM_CLR), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); +} + +static void t7xx_dpmaif_hw_reset(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_ASSERT); + udelay(2); + iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_ASSERT); + udelay(2); + iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_DEASSERT); + udelay(2); + iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_DEASSERT); + udelay(2); +} + +static int t7xx_dpmaif_hw_config(struct dpmaif_hw_info *hw_info) +{ + u32 ap_port_mode; + int ret; + + t7xx_dpmaif_hw_reset(hw_info); + + ret = t7xx_dpmaif_sram_init(hw_info); + if (ret) + return ret; + + ap_port_mode = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + ap_port_mode |= DPMAIF_PORT_MODE_PCIE; + iowrite32(ap_port_mode, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + iowrite32(DPMAIF_CG_EN, hw_info->pcie_base + DPMAIF_AP_CG_EN); + return 0; +} + +static void t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_PCIE_MODE_SET_VALUE, hw_info->pcie_base + DPMAIF_UL_RESERVE_AO_RW); +} + +static void t7xx_dpmaif_dl_performance(struct dpmaif_hw_info *hw_info) +{ + u32 enable_bat_cache, enable_pit_burst; + + enable_bat_cache = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + enable_bat_cache |= DPMAIF_DL_BAT_CACHE_PRI; + iowrite32(enable_bat_cache, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + enable_pit_burst = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + enable_pit_burst |= DPMAIF_DL_BURST_PIT_EN; + iowrite32(enable_pit_burst, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + + /* DPMAIF DL DLQ part HW setting */ + +static void t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = DPMAIF_HPC_DLQ_PATH_MODE | DPMAIF_HPC_ADD_MODE_DF << 2; + value |= DPMAIF_HASH_PRIME_DF << 4; + value |= DPMAIF_HPC_TOTAL_NUM << 8; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_HPC_CNTL); +} + +static void t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = DPMAIF_AGG_MAX_LEN_DF | DPMAIF_AGG_TBL_ENT_NUM_DF << 16; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_DLQ_AGG_CFG); +} + +static void t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_DLQ_HASH_BIT_CHOOSE_DF, + hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_INIT_CON5); +} + +static void t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_MID_TIMEOUT_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT0); +} + +static void t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info *hw_info) +{ + unsigned int value, i; + + /* Each register holds two DLQ threshold timeout values */ + for (i = 0; i < DPMAIF_HPC_MAX_TOTAL_NUM / 2; i++) { + value = FIELD_PREP(DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS, DPMAIF_DLQ_TIMEOUT_THRES_DF); + value |= FIELD_PREP(DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK, + DPMAIF_DLQ_TIMEOUT_THRES_DF); + iowrite32(value, + hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT1 + sizeof(u32) * i); + } +} + +static void t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_DLQ_PRS_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TRIG_THRES); +} + +static void t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info *hw_info) +{ + t7xx_dpmaif_hw_hpc_cntl_set(hw_info); + t7xx_dpmaif_hw_agg_cfg_set(hw_info); + t7xx_dpmaif_hw_hash_bit_choose_set(hw_info); + t7xx_dpmaif_hw_mid_pit_timeout_thres_set(hw_info); + t7xx_dpmaif_hw_dlq_timeout_thres_set(hw_info); + t7xx_dpmaif_hw_dlq_start_prs_thres_set(hw_info); +} + +static int t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info *hw_info, bool frg_en) +{ + u32 value, dl_bat_init = 0; + int ret; + + if (frg_en) + dl_bat_init = DPMAIF_DL_BAT_FRG_INIT; + + dl_bat_init |= DPMAIF_DL_BAT_INIT_ALLSET; + dl_bat_init |= DPMAIF_DL_BAT_INIT_EN; + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n"); + return ret; + } + + iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (ret) + dev_err(hw_info->dev, "Data plane modem DL BAT initialization failed\n"); + + return ret; +} + +static void t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info *hw_info, + dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON0); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON3); +} + +static void t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info *hw_info, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + value &= ~DPMAIF_BAT_SIZE_MSK; + value |= size & DPMAIF_BAT_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); +} + +static void t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + if (enable) + value |= DPMAIF_BAT_EN_MSK; + else + value &= ~DPMAIF_BAT_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); +} + +static void t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); + value &= ~DPMAIF_BAT_BID_MAXCNT_MSK; + value |= FIELD_PREP(DPMAIF_BAT_BID_MAXCNT_MSK, DPMAIF_HW_PKT_BIDCNT); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); +} + +static void t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info *hw_info) +{ + iowrite32(DPMAIF_HW_MTU_SIZE, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON1); +} + +static void t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_PIT_CHK_NUM_MSK; + value |= FIELD_PREP(DPMAIF_PIT_CHK_NUM_MSK, DPMAIF_HW_CHK_PIT_NUM); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); + value &= ~DPMAIF_BAT_REMAIN_MINSZ_MSK; + value |= FIELD_PREP(DPMAIF_BAT_REMAIN_MINSZ_MSK, + DPMAIF_HW_BAT_REMAIN / DPMAIF_BAT_REMAIN_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0); +} + +static void t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_BAT_BUF_SZ_MSK; + value |= FIELD_PREP(DPMAIF_BAT_BUF_SZ_MSK, + DPMAIF_HW_BAT_PKTBUF / DPMAIF_BAT_BUFFER_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); + value &= ~DPMAIF_BAT_RSV_LEN_MSK; + value |= DPMAIF_HW_BAT_RSVLEN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2); +} + +static void t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value &= ~DPMAIF_PKT_ALIGN_MSK; + value |= DPMAIF_PKT_ALIGN_EN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value |= DPMAIF_DL_PKT_CHECKSUM_EN; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + value &= ~DPMAIF_FRG_CHECK_THRES_MSK; + value |= DPMAIF_HW_CHK_FRG_NUM; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + value &= ~DPMAIF_FRG_BUF_SZ_MSK; + value |= FIELD_PREP(DPMAIF_FRG_BUF_SZ_MSK, + DPMAIF_HW_FRG_PKTBUF / DPMAIF_FRG_BUFFER_SZ_BASE); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); + + if (enable) + value |= DPMAIF_FRG_EN_MSK; + else + value &= ~DPMAIF_FRG_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES); +} + +static void t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); + value &= ~DPMAIF_BAT_CHECK_THRES_MSK; + value |= FIELD_PREP(DPMAIF_BAT_CHECK_THRES_MSK, DPMAIF_HW_CHK_BAT_NUM); + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES); +} + +static void t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END); + value &= ~DPMAIF_DL_PIT_SEQ_MSK; + value |= DPMAIF_DL_PIT_SEQ_VALUE; + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END); +} + +static void t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info *hw_info, + dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON0); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON4); +} + +static void t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info *hw_info, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1); + value &= ~DPMAIF_PIT_SIZE_MSK; + value |= size & DPMAIF_PIT_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON2); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON5); + iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON6); +} + +static void t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info *hw_info) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); + value |= DPMAIF_DLQPIT_EN_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3); +} + +static void t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info *hw_info, + unsigned int pit_idx) +{ + unsigned int dl_pit_init; + int timeout; + u32 value; + + dl_pit_init = DPMAIF_DL_PIT_INIT_ALLSET; + dl_pit_init |= (pit_idx << DPMAIF_DLQPIT_CHAN_OFS); + dl_pit_init |= DPMAIF_DL_PIT_INIT_EN; + + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT, + value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY), + DPMAIF_CHECK_DELAY_US, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) { + dev_err(hw_info->dev, "Data plane modem DL PIT is not ready\n"); + return; + } + + iowrite32(dl_pit_init, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT); + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT, + value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY), + DPMAIF_CHECK_DELAY_US, + DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Data plane modem DL PIT initialization failed\n"); +} + +static void t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info *hw_info, unsigned int q_num, + struct dpmaif_dl *dl_que) +{ + t7xx_dpmaif_dl_set_dlq_pit_base_addr(hw_info, dl_que->pit_base); + t7xx_dpmaif_dl_set_dlq_pit_size(hw_info, dl_que->pit_size_cnt); + t7xx_dpmaif_dl_dlq_pit_en(hw_info); + t7xx_dpmaif_dl_dlq_pit_init_done(hw_info, q_num); +} + +static void t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info *hw_info) +{ + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) + t7xx_dpmaif_config_dlq_pit_hw(hw_info, i, &hw_info->dl_que[i]); +} + +static void t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + u32 dl_bat_init, value; + int timeout; + + value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + + if (enable) + value |= DPMAIF_BAT_EN_MSK; + else + value &= ~DPMAIF_BAT_EN_MSK; + + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1); + dl_bat_init = DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT; + dl_bat_init |= DPMAIF_DL_BAT_INIT_EN; + + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Timeout updating BAT setting to HW\n"); + + iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT); + timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT, + value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (timeout) + dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n"); +} + +static int t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_dl *dl_que; + int ret; + + t7xx_dpmaif_dl_dlq_hpc_hw_init(hw_info); + + dl_que = &hw_info->dl_que[0]; /* All queues share one BAT/frag BAT table */ + if (!dl_que->que_started) + return -EBUSY; + + t7xx_dpmaif_dl_set_ao_remain_minsz(hw_info); + t7xx_dpmaif_dl_set_ao_bat_bufsz(hw_info); + t7xx_dpmaif_dl_set_ao_frg_bufsz(hw_info); + t7xx_dpmaif_dl_set_ao_bat_rsv_length(hw_info); + t7xx_dpmaif_dl_set_ao_bid_maxcnt(hw_info); + t7xx_dpmaif_dl_set_pkt_alignment(hw_info); + t7xx_dpmaif_dl_set_pit_seqnum(hw_info); + t7xx_dpmaif_dl_set_ao_mtu(hw_info); + t7xx_dpmaif_dl_set_ao_pit_chknum(hw_info); + t7xx_dpmaif_dl_set_ao_bat_check_thres(hw_info); + t7xx_dpmaif_dl_set_ao_frg_check_thres(hw_info); + t7xx_dpmaif_dl_frg_ao_en(hw_info, true); + + t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->frg_base); + t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->frg_size_cnt); + t7xx_dpmaif_dl_bat_en(hw_info, true); + + ret = t7xx_dpmaif_dl_bat_init_done(hw_info, true); + if (ret) + return ret; + + t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->bat_base); + t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->bat_size_cnt); + t7xx_dpmaif_dl_bat_en(hw_info, false); + + ret = t7xx_dpmaif_dl_bat_init_done(hw_info, false); + if (ret) + return ret; + + /* Init PIT (two PIT table) */ + t7xx_dpmaif_config_all_dlq_hw(hw_info); + t7xx_dpmaif_dl_all_q_en(hw_info, true); + t7xx_dpmaif_dl_set_pkt_checksum(hw_info); + return 0; +} + +static void t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info *hw_info, + unsigned int q_num, unsigned int size) +{ + unsigned int value; + + value = ioread32(hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num)); + value &= ~DPMAIF_DRB_SIZE_MSK; + value |= size & DPMAIF_DRB_SIZE_MSK; + iowrite32(value, hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num)); +} + +static void t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info *hw_info, + unsigned int q_num, dma_addr_t addr) +{ + iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_ULQSAR_n(q_num)); + iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_UL_DRB_ADDRH_n(q_num)); +} + +static void t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info *hw_info, + unsigned int q_num, bool ready) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (ready) + value |= BIT(q_num); + else + value &= ~BIT(q_num); + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static void t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info *hw_info, + unsigned int q_num, bool enable) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (enable) + value |= BIT(q_num + 8); + else + value &= ~BIT(q_num + 8); + + iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static void t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info *hw_info) +{ + struct dpmaif_ul *ul_que; + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + ul_que = &hw_info->ul_que[i]; + if (ul_que->que_started) { + t7xx_dpmaif_ul_update_drb_size(hw_info, i, ul_que->drb_size_cnt * + DPMAIF_UL_DRB_SIZE_WORD); + t7xx_dpmaif_ul_update_drb_base_addr(hw_info, i, ul_que->drb_base); + t7xx_dpmaif_ul_rdy_en(hw_info, i, true); + t7xx_dpmaif_ul_arb_en(hw_info, i, true); + } else { + t7xx_dpmaif_ul_arb_en(hw_info, i, false); + } + } +} + +static int t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info *hw_info) +{ + u32 ap_cfg; + int ret; + + ap_cfg = ioread32(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG); + ap_cfg |= DPMAIF_SRAM_SYNC; + iowrite32(ap_cfg, hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG, + ap_cfg, !(ap_cfg & DPMAIF_SRAM_SYNC), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) + return ret; + + iowrite32(DPMAIF_UL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_UL_INIT_SET); + iowrite32(DPMAIF_DL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_DL_INIT_SET); + return 0; +} + +static bool t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info *hw_info) +{ + u32 dpmaif_dl_is_busy = ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY); + + return !(dpmaif_dl_is_busy & DPMAIF_DL_IDLE_STS); +} + +static void t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info *hw_info, bool enable) +{ + u32 ul_arb_en = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); + + if (enable) + ul_arb_en |= DPMAIF_UL_ALL_QUE_ARB_EN; + else + ul_arb_en &= ~DPMAIF_UL_ALL_QUE_ARB_EN; + + iowrite32(ul_arb_en, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0); +} + +static bool t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info *hw_info) +{ + u32 dpmaif_ul_is_busy = ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY); + + return !(dpmaif_ul_is_busy & DPMAIF_UL_IDLE_STS); +} + +void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num, + unsigned int drb_entry_cnt) +{ + u32 ul_update, value; + int err; + + ul_update = drb_entry_cnt & DPMAIF_UL_ADD_COUNT_MASK; + ul_update |= DPMAIF_UL_ADD_UPDATE; + + err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num), + value, !(value & DPMAIF_UL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (err) { + dev_err(hw_info->dev, "UL add is not ready\n"); + return; + } + + iowrite32(ul_update, hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num)); + + err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num), + value, !(value & DPMAIF_UL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (err) + dev_err(hw_info->dev, "Timeout updating UL add\n"); +} + +unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + unsigned int value = ioread32(hw_info->pcie_base + DPMAIF_ULQ_STA0_n(q_num)); + + return FIELD_GET(DPMAIF_UL_DRB_RIDX_MSK, value) / DPMAIF_UL_DRB_SIZE_WORD; +} + +int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx, + unsigned int pit_remain_cnt) +{ + u32 dl_update, value; + int ret; + + dl_update = pit_remain_cnt & DPMAIF_PIT_REM_CNT_MSK; + dl_update |= DPMAIF_DL_ADD_UPDATE | (dlq_pit_idx << DPMAIF_ADD_DLQ_PIT_CHAN_OFS); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem is not ready to add dlq\n"); + return ret; + } + + iowrite32(dl_update, hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD); + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + if (ret) { + dev_err(hw_info->dev, "Data plane modem add dlq failed\n"); + return ret; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, + unsigned int dlq_pit_idx) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_DLQ_WR_IDX + + dlq_pit_idx * DLQ_PIT_IDX_SIZE); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +static bool t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info) +{ + u32 value; + int ret; + + ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD, + value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, + DPMAIF_CHECK_TIMEOUT_US); + return ret; +} + +int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt) +{ + unsigned int value; + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "DL add BAT not ready\n"); + return -EBUSY; + } + + value = bat_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK; + value |= DPMAIF_DL_ADD_UPDATE; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD); + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "DL add BAT timeout\n"); + return -EBUSY; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_RD_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_WR_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt) +{ + unsigned int value; + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "Data plane modem is not ready to add frag DLQ\n"); + return -EBUSY; + } + + value = frg_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK; + value |= DPMAIF_DL_FRG_ADD_UPDATE | DPMAIF_DL_ADD_UPDATE; + iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD); + + if (t7xx_dl_add_timedout(hw_info)) { + dev_err(hw_info->dev, "Data plane modem add frag DLQ failed"); + return -EBUSY; + } + + return 0; +} + +unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num) +{ + u32 value; + + value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_FRGBAT_RD_IDX); + return value & DPMAIF_DL_RD_WR_IDX_MSK; +} + +static void t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_params *init_para) +{ + struct dpmaif_dl *dl_que; + struct dpmaif_ul *ul_que; + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + dl_que = &hw_info->dl_que[i]; + dl_que->bat_base = init_para->pkt_bat_base_addr[i]; + dl_que->bat_size_cnt = init_para->pkt_bat_size_cnt[i]; + dl_que->pit_base = init_para->pit_base_addr[i]; + dl_que->pit_size_cnt = init_para->pit_size_cnt[i]; + dl_que->frg_base = init_para->frg_bat_base_addr[i]; + dl_que->frg_size_cnt = init_para->frg_bat_size_cnt[i]; + dl_que->que_started = true; + } + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + ul_que = &hw_info->ul_que[i]; + ul_que->drb_base = init_para->drb_base_addr[i]; + ul_que->drb_size_cnt = init_para->drb_size_cnt[i]; + ul_que->que_started = true; + } +} + +/** + * t7xx_dpmaif_hw_stop_all_txq() - Stop all TX queues. + * @hw_info: Pointer to struct hw_info. + * + * Disable HW UL queues. Checks busy UL queues to go to idle + * with an attempt count of 1000000. + * + * Return: + * * 0 - Success + * * -ETIMEDOUT - Timed out checking busy queues + */ +int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info) +{ + int count = 0; + + t7xx_dpmaif_ul_all_q_en(hw_info, false); + while (t7xx_dpmaif_ul_idle_check(hw_info)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(hw_info->dev, "Failed to stop TX, status: 0x%x\n", + ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY)); + return -ETIMEDOUT; + } + } + + return 0; +} + +/** + * t7xx_dpmaif_hw_stop_all_rxq() - Stop all RX queues. + * @hw_info: Pointer to struct hw_info. + * + * Disable HW DL queue. Checks busy UL queues to go to idle + * with an attempt count of 1000000. + * Check that HW PIT write index equals read index with the same + * attempt count. + * + * Return: + * * 0 - Success. + * * -ETIMEDOUT - Timed out checking busy queues. + */ +int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info) +{ + unsigned int wr_idx, rd_idx; + int count = 0; + + t7xx_dpmaif_dl_all_q_en(hw_info, false); + while (t7xx_dpmaif_dl_idle_check(hw_info)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(hw_info->dev, "Failed to stop RX, status: 0x%x\n", + ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY)); + return -ETIMEDOUT; + } + } + + /* Check middle PIT sync done */ + count = 0; + do { + wr_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_WR_IDX); + wr_idx &= DPMAIF_DL_RD_WR_IDX_MSK; + rd_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_RD_IDX); + rd_idx &= DPMAIF_DL_RD_WR_IDX_MSK; + + if (wr_idx == rd_idx) + return 0; + } while (++count < DPMAIF_MAX_CHECK_COUNT); + + dev_err(hw_info->dev, "Check middle PIT sync fail\n"); + return -ETIMEDOUT; +} + +void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info) +{ + t7xx_dpmaif_ul_all_q_en(hw_info, true); + t7xx_dpmaif_dl_all_q_en(hw_info, true); +} + +/** + * t7xx_dpmaif_hw_init() - Initialize HW data path API. + * @hw_info: Pointer to struct hw_info. + * @init_param: Pointer to struct dpmaif_hw_params. + * + * Configures port mode, clock config, HW interrupt initialization, and HW queue. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param) +{ + int ret; + + ret = t7xx_dpmaif_hw_config(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW config failed\n"); + return ret; + } + + ret = t7xx_dpmaif_init_intr(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW interrupts init failed\n"); + return ret; + } + + t7xx_dpmaif_set_queue_property(hw_info, init_param); + t7xx_dpmaif_pcie_dpmaif_sign(hw_info); + t7xx_dpmaif_dl_performance(hw_info); + + ret = t7xx_dpmaif_config_dlq_hw(hw_info); + if (ret) { + dev_err(hw_info->dev, "DPMAIF HW dlq config failed\n"); + return ret; + } + + t7xx_dpmaif_config_ulq_hw(hw_info); + + ret = t7xx_dpmaif_hw_init_done(hw_info); + if (ret) + dev_err(hw_info->dev, "DPMAIF HW queue init failed\n"); + + return ret; +} + +bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno) +{ + u32 intr_status; + + intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + intr_status &= BIT(DP_UL_INT_DONE_OFFSET + qno); + if (intr_status) { + iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0); + return true; + } + + return false; +} diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_dpmaif.h new file mode 100644 index 000000000000..ae292355a33d --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_DPMAIF_H__ +#define __T7XX_DPMAIF_H__ + +#include +#include + +#define DPMAIF_DL_PIT_SEQ_VALUE 251 +#define DPMAIF_UL_DRB_SIZE_WORD 4 + +#define DPMAIF_MAX_CHECK_COUNT 1000000 +#define DPMAIF_CHECK_TIMEOUT_US 10000 +#define DPMAIF_CHECK_INIT_TIMEOUT_US 100000 +#define DPMAIF_CHECK_DELAY_US 10 + +#define DPMAIF_RXQ_NUM 2 +#define DPMAIF_TXQ_NUM 5 + +struct dpmaif_isr_en_mask { + unsigned int ap_ul_l2intr_en_msk; + unsigned int ap_dl_l2intr_en_msk; + unsigned int ap_udl_ip_busy_en_msk; + unsigned int ap_dl_l2intr_err_en_msk; +}; + +struct dpmaif_ul { + bool que_started; + unsigned char reserved[3]; + dma_addr_t drb_base; + unsigned int drb_size_cnt; +}; + +struct dpmaif_dl { + bool que_started; + unsigned char reserved[3]; + dma_addr_t pit_base; + unsigned int pit_size_cnt; + dma_addr_t bat_base; + unsigned int bat_size_cnt; + dma_addr_t frg_base; + unsigned int frg_size_cnt; + unsigned int pit_seq; +}; + +struct dpmaif_hw_info { + struct device *dev; + void __iomem *pcie_base; + struct dpmaif_dl dl_que[DPMAIF_RXQ_NUM]; + struct dpmaif_ul ul_que[DPMAIF_TXQ_NUM]; + struct dpmaif_isr_en_mask isr_en_mask; +}; + +/* DPMAIF HW Initialization parameter structure */ +struct dpmaif_hw_params { + /* UL part */ + dma_addr_t drb_base_addr[DPMAIF_TXQ_NUM]; + unsigned int drb_size_cnt[DPMAIF_TXQ_NUM]; + /* DL part */ + dma_addr_t pkt_bat_base_addr[DPMAIF_RXQ_NUM]; + unsigned int pkt_bat_size_cnt[DPMAIF_RXQ_NUM]; + dma_addr_t frg_bat_base_addr[DPMAIF_RXQ_NUM]; + unsigned int frg_bat_size_cnt[DPMAIF_RXQ_NUM]; + dma_addr_t pit_base_addr[DPMAIF_RXQ_NUM]; + unsigned int pit_size_cnt[DPMAIF_RXQ_NUM]; +}; + +enum dpmaif_hw_intr_type { + DPF_INTR_INVALID_MIN, + DPF_INTR_UL_DONE, + DPF_INTR_UL_DRB_EMPTY, + DPF_INTR_UL_MD_NOTREADY, + DPF_INTR_UL_MD_PWR_NOTREADY, + DPF_INTR_UL_LEN_ERR, + DPF_INTR_DL_DONE, + DPF_INTR_DL_SKB_LEN_ERR, + DPF_INTR_DL_BATCNT_LEN_ERR, + DPF_INTR_DL_PITCNT_LEN_ERR, + DPF_INTR_DL_PKT_EMPTY_SET, + DPF_INTR_DL_FRG_EMPTY_SET, + DPF_INTR_DL_MTU_ERR, + DPF_INTR_DL_FRGCNT_LEN_ERR, + DPF_INTR_DL_Q0_PITCNT_LEN_ERR, + DPF_INTR_DL_Q1_PITCNT_LEN_ERR, + DPF_INTR_DL_HPC_ENT_TYPE_ERR, + DPF_INTR_DL_Q0_DONE, + DPF_INTR_DL_Q1_DONE, + DPF_INTR_INVALID_MAX +}; + +#define DPF_RX_QNO0 0 +#define DPF_RX_QNO1 1 +#define DPF_RX_QNO_DFT DPF_RX_QNO0 + +struct dpmaif_hw_intr_st_para { + unsigned int intr_cnt; + enum dpmaif_hw_intr_type intr_types[DPF_INTR_INVALID_MAX - 1]; + unsigned int intr_queues[DPF_INTR_INVALID_MAX - 1]; +}; + +#define DPMAIF_HW_BAT_REMAIN 64 +#define DPMAIF_HW_BAT_PKTBUF (128 * 28) +#define DPMAIF_HW_FRG_PKTBUF 128 +#define DPMAIF_HW_BAT_RSVLEN 64 +#define DPMAIF_HW_PKT_BIDCNT 1 +#define DPMAIF_HW_MTU_SIZE (3 * 1024 + 8) +#define DPMAIF_HW_CHK_BAT_NUM 62 +#define DPMAIF_HW_CHK_FRG_NUM 3 +#define DPMAIF_HW_CHK_PIT_NUM (2 * DPMAIF_HW_CHK_BAT_NUM) + +#define DP_UL_INT_DONE_OFFSET 0 +#define DP_UL_INT_QDONE_MSK GENMASK(4, 0) +#define DP_UL_INT_EMPTY_MSK GENMASK(9, 5) +#define DP_UL_INT_MD_NOTREADY_MSK GENMASK(14, 10) +#define DP_UL_INT_MD_PWR_NOTREADY_MSK GENMASK(19, 15) +#define DP_UL_INT_ERR_MSK GENMASK(24, 20) + +#define DP_DL_INT_QDONE_MSK BIT(0) +#define DP_DL_INT_SKB_LEN_ERR BIT(1) +#define DP_DL_INT_BATCNT_LEN_ERR BIT(2) +#define DP_DL_INT_PITCNT_LEN_ERR BIT(3) +#define DP_DL_INT_PKT_EMPTY_MSK BIT(4) +#define DP_DL_INT_FRG_EMPTY_MSK BIT(5) +#define DP_DL_INT_MTU_ERR_MSK BIT(6) +#define DP_DL_INT_FRG_LEN_ERR_MSK BIT(7) +#define DP_DL_INT_Q0_PITCNT_LEN_ERR BIT(8) +#define DP_DL_INT_Q1_PITCNT_LEN_ERR BIT(9) +#define DP_DL_INT_HPC_ENT_TYPE_ERR BIT(10) +#define DP_DL_INT_Q0_DONE BIT(13) +#define DP_DL_INT_Q1_DONE BIT(14) + +#define DP_DL_Q0_STATUS_MASK (DP_DL_INT_Q0_PITCNT_LEN_ERR | DP_DL_INT_Q0_DONE) +#define DP_DL_Q1_STATUS_MASK (DP_DL_INT_Q1_PITCNT_LEN_ERR | DP_DL_INT_Q1_DONE) + +int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param); +int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info); +int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info); +int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info, + struct dpmaif_hw_intr_st_para *para, int qno); +void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num); +void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num, + unsigned int drb_entry_cnt); +int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt); +int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt); +int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx, + unsigned int pit_remain_cnt); +void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info, + unsigned int qno); +void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno); +bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno); +void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info); +void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info); +unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num); +unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, + unsigned int dlq_pit_idx); + +#endif /* __T7XX_DPMAIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h index ad2d30d03cf4..7c1b81091a0f 100644 --- a/drivers/net/wwan/t7xx/t7xx_reg.h +++ b/drivers/net/wwan/t7xx/t7xx_reg.h @@ -134,4 +134,217 @@ enum t7xx_int { CLDMA3_INT, }; +/* DPMA definitions */ + +#define DPMAIF_PD_BASE 0x1022d000 +#define BASE_DPMAIF_UL DPMAIF_PD_BASE +#define BASE_DPMAIF_DL (DPMAIF_PD_BASE + 0x100) +#define BASE_DPMAIF_AP_MISC (DPMAIF_PD_BASE + 0x400) +#define BASE_DPMAIF_MMW_HPC (DPMAIF_PD_BASE + 0x600) +#define BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX (DPMAIF_PD_BASE + 0x900) +#define BASE_DPMAIF_PD_SRAM_DL (DPMAIF_PD_BASE + 0xc00) +#define BASE_DPMAIF_PD_SRAM_UL (DPMAIF_PD_BASE + 0xd00) + +#define DPMAIF_AO_BASE 0x10014000 +#define BASE_DPMAIF_AO_UL DPMAIF_AO_BASE +#define BASE_DPMAIF_AO_DL (DPMAIF_AO_BASE + 0x400) + +#define DPMAIF_UL_ADD_DESC (BASE_DPMAIF_UL + 0x00) +#define DPMAIF_UL_CHK_BUSY (BASE_DPMAIF_UL + 0x88) +#define DPMAIF_UL_RESERVE_AO_RW (BASE_DPMAIF_UL + 0xac) +#define DPMAIF_UL_ADD_DESC_CH0 (BASE_DPMAIF_UL + 0xb0) + +#define DPMAIF_DL_BAT_INIT (BASE_DPMAIF_DL + 0x00) +#define DPMAIF_DL_BAT_ADD (BASE_DPMAIF_DL + 0x04) +#define DPMAIF_DL_BAT_INIT_CON0 (BASE_DPMAIF_DL + 0x08) +#define DPMAIF_DL_BAT_INIT_CON1 (BASE_DPMAIF_DL + 0x0c) +#define DPMAIF_DL_BAT_INIT_CON2 (BASE_DPMAIF_DL + 0x10) +#define DPMAIF_DL_BAT_INIT_CON3 (BASE_DPMAIF_DL + 0x50) +#define DPMAIF_DL_CHK_BUSY (BASE_DPMAIF_DL + 0xb4) + +#define DPMAIF_AP_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x00) +#define DPMAIF_AP_APDL_L2TISAR0 (BASE_DPMAIF_AP_MISC + 0x50) +#define DPMAIF_AP_IP_BUSY (BASE_DPMAIF_AP_MISC + 0x60) +#define DPMAIF_AP_CG_EN (BASE_DPMAIF_AP_MISC + 0x68) +#define DPMAIF_AP_OVERWRITE_CFG (BASE_DPMAIF_AP_MISC + 0x90) +#define DPMAIF_AP_MEM_CLR (BASE_DPMAIF_AP_MISC + 0x94) +#define DPMAIF_AP_ALL_L2TISAR0_MASK GENMASK(31, 0) +#define DPMAIF_AP_APDL_ALL_L2TISAR0_MASK GENMASK(31, 0) +#define DPMAIF_AP_IP_BUSY_MASK GENMASK(31, 0) + +#define DPMAIF_AO_UL_INIT_SET (BASE_DPMAIF_AO_UL + 0x0) +#define DPMAIF_AO_UL_CHNL_ARB0 (BASE_DPMAIF_AO_UL + 0x1c) +#define DPMAIF_AO_UL_AP_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x80) +#define DPMAIF_AO_UL_AP_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x84) +#define DPMAIF_AO_UL_AP_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x88) +#define DPMAIF_AO_UL_AP_L1TIMR0 (BASE_DPMAIF_AO_UL + 0x8c) +#define DPMAIF_AO_UL_APDL_L2TIMR0 (BASE_DPMAIF_AO_UL + 0x90) +#define DPMAIF_AO_UL_APDL_L2TIMCR0 (BASE_DPMAIF_AO_UL + 0x94) +#define DPMAIF_AO_UL_APDL_L2TIMSR0 (BASE_DPMAIF_AO_UL + 0x98) +#define DPMAIF_AO_AP_DLUL_IP_BUSY_MASK (BASE_DPMAIF_AO_UL + 0x9c) + +#define DPMAIF_AO_UL_CHNL0_CON0 (BASE_DPMAIF_PD_SRAM_UL + 0x10) +#define DPMAIF_AO_UL_CHNL0_CON1 (BASE_DPMAIF_PD_SRAM_UL + 0x14) +#define DPMAIF_AO_UL_CHNL0_CON2 (BASE_DPMAIF_PD_SRAM_UL + 0x18) +#define DPMAIF_AO_UL_CH0_STA (BASE_DPMAIF_PD_SRAM_UL + 0x70) + +#define DPMAIF_AO_DL_INIT_SET (BASE_DPMAIF_AO_DL + 0x00) +#define DPMAIF_AO_DL_IRQ_MASK (BASE_DPMAIF_AO_DL + 0x0c) +#define DPMAIF_AO_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_AO_DL + 0x28) +#define DPMAIF_AO_DL_DLQPIT_TRIG_THRES (BASE_DPMAIF_AO_DL + 0x34) + +#define DPMAIF_AO_DL_PKTINFO_CON0 (BASE_DPMAIF_PD_SRAM_DL + 0x00) +#define DPMAIF_AO_DL_PKTINFO_CON1 (BASE_DPMAIF_PD_SRAM_DL + 0x04) +#define DPMAIF_AO_DL_PKTINFO_CON2 (BASE_DPMAIF_PD_SRAM_DL + 0x08) +#define DPMAIF_AO_DL_RDY_CHK_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x0c) +#define DPMAIF_AO_DL_RDY_CHK_FRG_THRES (BASE_DPMAIF_PD_SRAM_DL + 0x10) + +#define DPMAIF_AO_DL_DLQ_AGG_CFG (BASE_DPMAIF_PD_SRAM_DL + 0x20) +#define DPMAIF_AO_DL_DLQPIT_TIMEOUT0 (BASE_DPMAIF_PD_SRAM_DL + 0x24) +#define DPMAIF_AO_DL_DLQPIT_TIMEOUT1 (BASE_DPMAIF_PD_SRAM_DL + 0x28) +#define DPMAIF_AO_DL_HPC_CNTL (BASE_DPMAIF_PD_SRAM_DL + 0x38) +#define DPMAIF_AO_DL_PIT_SEQ_END (BASE_DPMAIF_PD_SRAM_DL + 0x40) + +#define DPMAIF_AO_DL_BAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xd8) +#define DPMAIF_AO_DL_BAT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xdc) +#define DPMAIF_AO_DL_PIT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xec) +#define DPMAIF_AO_DL_PIT_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x60) +#define DPMAIF_AO_DL_FRGBAT_RD_IDX (BASE_DPMAIF_PD_SRAM_DL + 0x78) +#define DPMAIF_AO_DL_DLQ_WR_IDX (BASE_DPMAIF_PD_SRAM_DL + 0xa4) + +#define DPMAIF_HPC_INTR_MASK (BASE_DPMAIF_MMW_HPC + 0x0f4) +#define DPMA_HPC_ALL_INT_MASK GENMASK(15, 0) + +#define DPMAIF_HPC_DLQ_PATH_MODE 3 +#define DPMAIF_HPC_ADD_MODE_DF 0 +#define DPMAIF_HPC_TOTAL_NUM 8 +#define DPMAIF_HPC_MAX_TOTAL_NUM 8 + +#define DPMAIF_DL_DLQPIT_INIT (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x00) +#define DPMAIF_DL_DLQPIT_ADD (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x10) +#define DPMAIF_DL_DLQPIT_INIT_CON0 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x14) +#define DPMAIF_DL_DLQPIT_INIT_CON1 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x18) +#define DPMAIF_DL_DLQPIT_INIT_CON2 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x1c) +#define DPMAIF_DL_DLQPIT_INIT_CON3 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x20) +#define DPMAIF_DL_DLQPIT_INIT_CON4 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x24) +#define DPMAIF_DL_DLQPIT_INIT_CON5 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x28) +#define DPMAIF_DL_DLQPIT_INIT_CON6 (BASE_DPMAIF_DL_DLQ_REMOVEAO_IDX + 0x2c) + +#define DPMAIF_ULQSAR_n(q) (DPMAIF_AO_UL_CHNL0_CON0 + 0x10 * (q)) +#define DPMAIF_UL_DRBSIZE_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON1 + 0x10 * (q)) +#define DPMAIF_UL_DRB_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON2 + 0x10 * (q)) +#define DPMAIF_ULQ_STA0_n(q) (DPMAIF_AO_UL_CH0_STA + 0x04 * (q)) +#define DPMAIF_ULQ_ADD_DESC_CH_n(q) (DPMAIF_UL_ADD_DESC_CH0 + 0x04 * (q)) + +#define DPMAIF_UL_DRB_RIDX_MSK GENMASK(31, 16) + +#define DPMAIF_AP_RGU_ASSERT 0x10001150 +#define DPMAIF_AP_RGU_DEASSERT 0x10001154 +#define DPMAIF_AP_RST_BIT BIT(2) + +#define DPMAIF_AP_AO_RGU_ASSERT 0x10001140 +#define DPMAIF_AP_AO_RGU_DEASSERT 0x10001144 +#define DPMAIF_AP_AO_RST_BIT BIT(6) + +/* DPMAIF init/restore */ +#define DPMAIF_UL_ADD_NOT_READY BIT(31) +#define DPMAIF_UL_ADD_UPDATE BIT(31) +#define DPMAIF_UL_ADD_COUNT_MASK GENMASK(15, 0) +#define DPMAIF_UL_ALL_QUE_ARB_EN GENMASK(11, 8) + +#define DPMAIF_DL_ADD_UPDATE BIT(31) +#define DPMAIF_DL_ADD_NOT_READY BIT(31) +#define DPMAIF_DL_FRG_ADD_UPDATE BIT(16) +#define DPMAIF_DL_ADD_COUNT_MASK GENMASK(15, 0) + +#define DPMAIF_DL_BAT_INIT_ALLSET BIT(0) +#define DPMAIF_DL_BAT_FRG_INIT BIT(16) +#define DPMAIF_DL_BAT_INIT_EN BIT(31) +#define DPMAIF_DL_BAT_INIT_NOT_READY BIT(31) +#define DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT 0 + +#define DPMAIF_DL_PIT_INIT_ALLSET BIT(0) +#define DPMAIF_DL_PIT_INIT_EN BIT(31) +#define DPMAIF_DL_PIT_INIT_NOT_READY BIT(31) + +#define DPMAIF_BAT_REMAIN_SZ_BASE 16 +#define DPMAIF_BAT_BUFFER_SZ_BASE 128 +#define DPMAIF_FRG_BUFFER_SZ_BASE 128 + +#define DLQ_PIT_IDX_SIZE 0x20 + +#define DPMAIF_PIT_SIZE_MSK GENMASK(17, 0) + +#define DPMAIF_PIT_REM_CNT_MSK GENMASK(17, 0) + +#define DPMAIF_BAT_EN_MSK BIT(16) +#define DPMAIF_FRG_EN_MSK BIT(28) +#define DPMAIF_BAT_SIZE_MSK GENMASK(15, 0) + +#define DPMAIF_BAT_BID_MAXCNT_MSK GENMASK(31, 16) +#define DPMAIF_BAT_REMAIN_MINSZ_MSK GENMASK(15, 8) +#define DPMAIF_PIT_CHK_NUM_MSK GENMASK(31, 24) +#define DPMAIF_BAT_BUF_SZ_MSK GENMASK(16, 8) +#define DPMAIF_FRG_BUF_SZ_MSK GENMASK(16, 8) +#define DPMAIF_BAT_RSV_LEN_MSK GENMASK(7, 0) +#define DPMAIF_PKT_ALIGN_MSK GENMASK(23, 22) + +#define DPMAIF_BAT_CHECK_THRES_MSK GENMASK(21, 16) +#define DPMAIF_FRG_CHECK_THRES_MSK GENMASK(7, 0) + +#define DPMAIF_PKT_ALIGN_EN BIT(23) + +#define DPMAIF_DRB_SIZE_MSK GENMASK(15, 0) + +#define DPMAIF_DL_RD_WR_IDX_MSK GENMASK(17, 0) + +/* DPMAIF_UL_CHK_BUSY */ +#define DPMAIF_UL_IDLE_STS BIT(11) +/* DPMAIF_DL_CHK_BUSY */ +#define DPMAIF_DL_IDLE_STS BIT(23) +/* DPMAIF_AO_DL_RDY_CHK_THRES */ +#define DPMAIF_DL_PKT_CHECKSUM_EN BIT(31) +#define DPMAIF_PORT_MODE_PCIE BIT(30) +#define DPMAIF_DL_BURST_PIT_EN BIT(13) +/* DPMAIF_DL_BAT_INIT_CON1 */ +#define DPMAIF_DL_BAT_CACHE_PRI BIT(22) +/* DPMAIF_AP_MEM_CLR */ +#define DPMAIF_MEM_CLR BIT(0) +/* DPMAIF_AP_OVERWRITE_CFG */ +#define DPMAIF_SRAM_SYNC BIT(0) +/* DPMAIF_AO_UL_INIT_SET */ +#define DPMAIF_UL_INIT_DONE BIT(0) +/* DPMAIF_AO_DL_INIT_SET */ +#define DPMAIF_DL_INIT_DONE BIT(0) +/* DPMAIF_AO_DL_PIT_SEQ_END */ +#define DPMAIF_DL_PIT_SEQ_MSK GENMASK(7, 0) +/* DPMAIF_UL_RESERVE_AO_RW */ +#define DPMAIF_PCIE_MODE_SET_VALUE 0x55 +/* DPMAIF_AP_CG_EN */ +#define DPMAIF_CG_EN 0x7f + +#define DPMAIF_UDL_IP_BUSY BIT(0) +#define DPMAIF_DL_INT_DLQ0_QDONE BIT(8) +#define DPMAIF_DL_INT_DLQ1_QDONE BIT(9) +#define DPMAIF_DL_INT_DLQ0_PITCNT_LEN BIT(10) +#define DPMAIF_DL_INT_DLQ1_PITCNT_LEN BIT(11) +#define DPMAIF_DL_INT_Q2TOQ1 BIT(24) +#define DPMAIF_DL_INT_Q2APTOP BIT(25) + +#define DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS GENMASK(15, 0) +#define DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK GENMASK(31, 16) + +/* DPMAIF DLQ HW configure */ +#define DPMAIF_AGG_MAX_LEN_DF 65535 +#define DPMAIF_AGG_TBL_ENT_NUM_DF 50 +#define DPMAIF_HASH_PRIME_DF 13 +#define DPMAIF_MID_TIMEOUT_THRES_DF 100 +#define DPMAIF_DLQ_TIMEOUT_THRES_DF 100 +#define DPMAIF_DLQ_PRS_THRES_DF 10 +#define DPMAIF_DLQ_HASH_BIT_CHOOSE_DF 0 + +#define DPMAIF_DLQPIT_EN_MSK BIT(20) +#define DPMAIF_DLQPIT_CHAN_OFS 16 +#define DPMAIF_ADD_DLQ_PIT_CHAN_OFS 20 + #endif /* __T7XX_REG_H__ */ -- cgit v1.2.3 From d642b012df70a76dd5723f2d426b40bffe83ac49 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:05 -0700 Subject: net: wwan: t7xx: Add data path interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Data Path Modem AP Interface (DPMAIF) HIF layer provides methods for initialization, ISR, control and event handling of TX/RX flows. DPMAIF TX Exposes the 'dmpaif_tx_send_skb' function which can be used by the network device to transmit packets. The uplink data management uses a Descriptor Ring Buffer (DRB). First DRB entry is a message type that will be followed by 1 or more normal DRB entries. Message type DRB will hold the skb information and each normal DRB entry holds a pointer to the skb payload. DPMAIF RX The downlink buffer management uses Buffer Address Table (BAT) and Packet Information Table (PIT) rings. The BAT ring holds the address of skb data buffer for the HW to use, while the PIT contains metadata about a whole network packet including a reference to the BAT entry holding the data buffer address. The driver reads the PIT and BAT entries written by the modem, when reaching a threshold, the driver will reload the PIT and BAT rings. Signed-off-by: Haijun Liu Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Loic Poulain Reviewed-by: Sergey Ryazanov Reviewed-by: Ilpo Järvinen Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/Makefile | 4 + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 484 +++++++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 205 +++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 1220 ++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h | 116 +++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 651 +++++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h | 78 ++ 7 files changed, 2758 insertions(+) create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c create mode 100644 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile index 9eec2e2472fb..04a9ba50dc14 100644 --- a/drivers/net/wwan/t7xx/Makefile +++ b/drivers/net/wwan/t7xx/Makefile @@ -13,3 +13,7 @@ mtk_t7xx-y:= t7xx_pci.o \ t7xx_port_proxy.o \ t7xx_port_ctrl_msg.o \ t7xx_port_wwan.o \ + t7xx_hif_dpmaif.o \ + t7xx_hif_dpmaif_tx.o \ + t7xx_hif_dpmaif_rx.o \ + t7xx_dpmaif.o \ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c new file mode 100644 index 000000000000..2e9a8d7ba693 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_pci.h" +#include "t7xx_pcie_mac.h" +#include "t7xx_state_monitor.h" + +unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx) +{ + buf_idx++; + + return buf_idx < buf_len ? buf_idx : 0; +} + +unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, + unsigned int wr_idx, enum dpmaif_rdwr rd_wr) +{ + int pkt_cnt; + + if (rd_wr == DPMAIF_READ) + pkt_cnt = wr_idx - rd_idx; + else + pkt_cnt = rd_idx - wr_idx - 1; + + if (pkt_cnt < 0) + pkt_cnt += total_cnt; + + return (unsigned int)pkt_cnt; +} + +static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + int i; + + for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + } +} + +static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + int i; + + for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + } +} + +static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para) +{ + struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl; + struct dpmaif_hw_intr_st_para intr_status; + struct device *dev = dpmaif_ctrl->dev; + struct dpmaif_hw_info *hw_info; + int i; + + memset(&intr_status, 0, sizeof(intr_status)); + hw_info = &dpmaif_ctrl->hw_info; + + if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) { + dev_err(dev, "Failed to get HW interrupt count\n"); + return; + } + + t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + + for (i = 0; i < intr_status.intr_cnt; i++) { + switch (intr_status.intr_types[i]) { + case DPF_INTR_UL_DONE: + t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]); + break; + + case DPF_INTR_UL_DRB_EMPTY: + case DPF_INTR_UL_MD_NOTREADY: + case DPF_INTR_UL_MD_PWR_NOTREADY: + /* No need to log an error for these */ + break; + + case DPF_INTR_DL_BATCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n"); + t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info); + break; + + case DPF_INTR_DL_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n"); + t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info); + break; + + case DPF_INTR_DL_Q0_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n"); + t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT); + break; + + case DPF_INTR_DL_Q1_PITCNT_LEN_ERR: + dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n"); + t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1); + break; + + case DPF_INTR_DL_DONE: + case DPF_INTR_DL_Q0_DONE: + case DPF_INTR_DL_Q1_DONE: + t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]); + break; + + default: + dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n", + intr_status.intr_types[i]); + } + } +} + +static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data) +{ + struct dpmaif_isr_para *isr_para = data; + struct dpmaif_ctrl *dpmaif_ctrl; + + dpmaif_ctrl = isr_para->dpmaif_ctrl; + if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { + dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n"); + return IRQ_HANDLED; + } + + t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + t7xx_dpmaif_irq_cb(isr_para); + t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int); + return IRQ_HANDLED; +} + +static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_isr_para *isr_para; + unsigned char i; + + dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT; + dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + isr_para->dpmaif_ctrl = dpmaif_ctrl; + isr_para->dlq_id = i; + isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i]; + } +} + +static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev; + struct dpmaif_isr_para *isr_para; + enum t7xx_int int_type; + int i; + + t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl); + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + isr_para = &dpmaif_ctrl->isr_para[i]; + int_type = isr_para->pcie_int; + t7xx_pcie_mac_clear_int(t7xx_dev, int_type); + + t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler; + t7xx_dev->intr_thread[int_type] = NULL; + t7xx_dev->callback_param[int_type] = isr_para; + + t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type); + t7xx_pcie_mac_set_int(t7xx_dev, int_type); + } +} + +static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rx_q; + struct dpmaif_tx_queue *tx_q; + int ret, rx_idx, tx_idx, i; + + ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret); + return ret; + } + + ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret); + goto err_free_normal_bat; + } + + for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) { + rx_q = &dpmaif_ctrl->rxq[rx_idx]; + rx_q->index = rx_idx; + rx_q->dpmaif_ctrl = dpmaif_ctrl; + ret = t7xx_dpmaif_rxq_init(rx_q); + if (ret) + goto err_free_rxq; + } + + for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) { + tx_q = &dpmaif_ctrl->txq[tx_idx]; + tx_q->index = tx_idx; + tx_q->dpmaif_ctrl = dpmaif_ctrl; + ret = t7xx_dpmaif_txq_init(tx_q); + if (ret) + goto err_free_txq; + } + + ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n"); + goto err_free_txq; + } + + ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl); + if (ret) + goto err_thread_rel; + + return 0; + +err_thread_rel: + t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); + +err_free_txq: + for (i = 0; i < tx_idx; i++) { + tx_q = &dpmaif_ctrl->txq[i]; + t7xx_dpmaif_txq_free(tx_q); + } + +err_free_rxq: + for (i = 0; i < rx_idx; i++) { + rx_q = &dpmaif_ctrl->rxq[i]; + t7xx_dpmaif_rxq_free(rx_q); + } + + t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag); + +err_free_normal_bat: + t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req); + + return ret; +} + +static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rx_q; + struct dpmaif_tx_queue *tx_q; + int i; + + t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl); + t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl); + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + tx_q = &dpmaif_ctrl->txq[i]; + t7xx_dpmaif_txq_free(tx_q); + } + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + rx_q = &dpmaif_ctrl->rxq[i]; + t7xx_dpmaif_rxq_free(rx_q); + } +} + +static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; + struct dpmaif_hw_params hw_init_para; + struct dpmaif_rx_queue *rxq; + struct dpmaif_tx_queue *txq; + unsigned int buf_cnt; + int i, ret = 0; + + if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON) + return -EFAULT; + + memset(&hw_init_para, 0, sizeof(hw_init_para)); + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + rxq = &dpmaif_ctrl->rxq[i]; + rxq->que_started = true; + rxq->index = i; + rxq->budget = rxq->bat_req->bat_size_cnt - 1; + + hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr; + hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt; + hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr; + hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt; + hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr; + hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt; + } + + bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt); + buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1; + ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret); + return ret; + } + + buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1; + ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret); + goto err_free_normal_bat; + } + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + txq = &dpmaif_ctrl->txq[i]; + txq->que_started = true; + + hw_init_para.drb_base_addr[i] = txq->drb_bus_addr; + hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt; + } + + ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para); + if (ret) { + dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret); + goto err_free_frag_bat; + } + + ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1); + if (ret) + goto err_free_frag_bat; + + ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1); + if (ret) + goto err_free_frag_bat; + + t7xx_dpmaif_ul_clr_all_intr(hw_info); + t7xx_dpmaif_dl_clr_all_intr(hw_info); + dpmaif_ctrl->state = DPMAIF_STATE_PWRON; + t7xx_dpmaif_enable_irq(dpmaif_ctrl); + wake_up(&dpmaif_ctrl->tx_wq); + return 0; + +err_free_frag_bat: + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); + +err_free_normal_bat: + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); + + return ret; +} + +static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl) +{ + t7xx_dpmaif_tx_stop(dpmaif_ctrl); + t7xx_dpmaif_rx_stop(dpmaif_ctrl); +} + +static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl) +{ + t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); +} + +static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (!dpmaif_ctrl->dpmaif_sw_init_done) { + dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n"); + return -EFAULT; + } + + if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF) + return -EFAULT; + + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + dpmaif_ctrl->state = DPMAIF_STATE_PWROFF; + t7xx_dpmaif_stop_sw(dpmaif_ctrl); + t7xx_dpmaif_tx_clear(dpmaif_ctrl); + t7xx_dpmaif_rx_clear(dpmaif_ctrl); + return 0; +} + +int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state) +{ + int ret = 0; + + switch (state) { + case MD_STATE_WAITING_FOR_HS1: + ret = t7xx_dpmaif_start(dpmaif_ctrl); + break; + + case MD_STATE_EXCEPTION: + ret = t7xx_dpmaif_stop(dpmaif_ctrl); + break; + + case MD_STATE_STOPPED: + ret = t7xx_dpmaif_stop(dpmaif_ctrl); + break; + + case MD_STATE_WAITING_TO_STOP: + t7xx_dpmaif_stop_hw(dpmaif_ctrl); + break; + + default: + break; + } + + return ret; +} + +/** + * t7xx_dpmaif_hif_init() - Initialize data path. + * @t7xx_dev: MTK context structure. + * @callbacks: Callbacks implemented by the network layer to handle RX skb and + * event notifications. + * + * Allocate and initialize datapath control block. + * Register datapath ISR, TX and RX resources. + * + * Return: + * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure. + * * NULL - In case of error. + */ +struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, + struct dpmaif_callbacks *callbacks) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct dpmaif_ctrl *dpmaif_ctrl; + int ret; + + if (!callbacks) + return NULL; + + dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL); + if (!dpmaif_ctrl) + return NULL; + + dpmaif_ctrl->t7xx_dev = t7xx_dev; + dpmaif_ctrl->callbacks = callbacks; + dpmaif_ctrl->dev = dev; + dpmaif_ctrl->dpmaif_sw_init_done = false; + dpmaif_ctrl->hw_info.dev = dev; + dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base - + t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; + + t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl); + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + + ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl); + if (ret) { + dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret); + return NULL; + } + + dpmaif_ctrl->dpmaif_sw_init_done = true; + return dpmaif_ctrl; +} + +void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (dpmaif_ctrl->dpmaif_sw_init_done) { + t7xx_dpmaif_stop(dpmaif_ctrl); + t7xx_dpmaif_sw_release(dpmaif_ctrl); + dpmaif_ctrl->dpmaif_sw_init_done = false; + } +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h new file mode 100644 index 000000000000..1d5de7ceeeca --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Eliot Lee + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMAIF_H__ +#define __T7XX_HIF_DPMAIF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_pci.h" +#include "t7xx_state_monitor.h" + +/* SKB control buffer */ +struct t7xx_skb_cb { + u8 netif_idx; + u8 txq_number; + u8 rx_pkt_type; +}; + +#define T7XX_SKB_CB(__skb) ((struct t7xx_skb_cb *)(__skb)->cb) + +enum dpmaif_rdwr { + DPMAIF_READ, + DPMAIF_WRITE, +}; + +/* Structure of DL BAT */ +struct dpmaif_cur_rx_skb_info { + bool msg_pit_received; + struct sk_buff *cur_skb; + unsigned int cur_chn_idx; + unsigned int check_sum; + unsigned int pit_dp; + unsigned int pkt_type; + int err_payload; +}; + +struct dpmaif_bat { + unsigned int p_buffer_addr; + unsigned int buffer_addr_ext; +}; + +struct dpmaif_bat_skb { + struct sk_buff *skb; + dma_addr_t data_bus_addr; + unsigned int data_len; +}; + +struct dpmaif_bat_page { + struct page *page; + dma_addr_t data_bus_addr; + unsigned int offset; + unsigned int data_len; +}; + +enum bat_type { + BAT_TYPE_NORMAL, + BAT_TYPE_FRAG, +}; + +struct dpmaif_bat_request { + void *bat_base; + dma_addr_t bat_bus_addr; + unsigned int bat_size_cnt; + unsigned int bat_wr_idx; + unsigned int bat_release_rd_idx; + void *bat_skb; + unsigned int pkt_buf_sz; + unsigned long *bat_bitmap; + atomic_t refcnt; + spinlock_t mask_lock; /* Protects BAT mask */ + enum bat_type type; +}; + +struct dpmaif_rx_queue { + unsigned int index; + bool que_started; + unsigned int budget; + + void *pit_base; + dma_addr_t pit_bus_addr; + unsigned int pit_size_cnt; + + unsigned int pit_rd_idx; + unsigned int pit_wr_idx; + unsigned int pit_release_rd_idx; + + struct dpmaif_bat_request *bat_req; + struct dpmaif_bat_request *bat_frag; + + wait_queue_head_t rx_wq; + struct task_struct *rx_thread; + struct sk_buff_head skb_list; + unsigned int skb_list_max_len; + + struct workqueue_struct *worker; + struct work_struct dpmaif_rxq_work; + + atomic_t rx_processing; + + struct dpmaif_ctrl *dpmaif_ctrl; + unsigned int expect_pit_seq; + unsigned int pit_remain_release_cnt; + struct dpmaif_cur_rx_skb_info rx_data_info; +}; + +struct dpmaif_tx_queue { + unsigned int index; + bool que_started; + atomic_t tx_budget; + void *drb_base; + dma_addr_t drb_bus_addr; + unsigned int drb_size_cnt; + unsigned int drb_wr_idx; + unsigned int drb_rd_idx; + unsigned int drb_release_rd_idx; + void *drb_skb_base; + wait_queue_head_t req_wq; + struct workqueue_struct *worker; + struct work_struct dpmaif_tx_work; + spinlock_t tx_lock; /* Protects txq DRB */ + atomic_t tx_processing; + + struct dpmaif_ctrl *dpmaif_ctrl; + struct sk_buff_head tx_skb_head; +}; + +struct dpmaif_isr_para { + struct dpmaif_ctrl *dpmaif_ctrl; + unsigned char pcie_int; + unsigned char dlq_id; +}; + +enum dpmaif_state { + DPMAIF_STATE_MIN, + DPMAIF_STATE_PWROFF, + DPMAIF_STATE_PWRON, + DPMAIF_STATE_EXCEPTION, + DPMAIF_STATE_MAX +}; + +enum dpmaif_txq_state { + DMPAIF_TXQ_STATE_IRQ, + DMPAIF_TXQ_STATE_FULL, +}; + +struct dpmaif_callbacks { + void (*state_notify)(struct t7xx_pci_dev *t7xx_dev, + enum dpmaif_txq_state state, int txq_number); + void (*recv_skb)(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb); +}; + +struct dpmaif_ctrl { + struct device *dev; + struct t7xx_pci_dev *t7xx_dev; + enum dpmaif_state state; + bool dpmaif_sw_init_done; + struct dpmaif_hw_info hw_info; + struct dpmaif_tx_queue txq[DPMAIF_TXQ_NUM]; + struct dpmaif_rx_queue rxq[DPMAIF_RXQ_NUM]; + + unsigned char rxq_int_mapping[DPMAIF_RXQ_NUM]; + struct dpmaif_isr_para isr_para[DPMAIF_RXQ_NUM]; + + struct dpmaif_bat_request bat_req; + struct dpmaif_bat_request bat_frag; + struct workqueue_struct *bat_release_wq; + struct work_struct bat_release_work; + + wait_queue_head_t tx_wq; + struct task_struct *tx_thread; + + struct dpmaif_callbacks *callbacks; +}; + +struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, + struct dpmaif_callbacks *callbacks); +void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state); +unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx); +unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx, + unsigned int wr_idx, enum dpmaif_rdwr); + +#endif /* __T7XX_HIF_DPMAIF_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c new file mode 100644 index 000000000000..d110f7edf56b --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -0,0 +1,1220 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Andy Shevchenko + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_pci.h" + +#define DPMAIF_BAT_COUNT 8192 +#define DPMAIF_FRG_COUNT 4814 +#define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2) + +#define DPMAIF_BAT_CNT_THRESHOLD 30 +#define DPMAIF_PIT_CNT_THRESHOLD 60 +#define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0) +#define DPMAIF_NOTIFY_RELEASE_COUNT 128 +#define DPMAIF_POLL_PIT_TIME_US 20 +#define DPMAIF_POLL_PIT_MAX_TIME_US 2000 +#define DPMAIF_WQ_TIME_LIMIT_MS 2 +#define DPMAIF_CS_RESULT_PASS 0 + +/* Packet type */ +#define DES_PT_PD 0 +#define DES_PT_MSG 1 +/* Buffer type */ +#define PKT_BUF_FRAG 1 + +static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info) +{ + u32 value; + + value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer)); + value <<= 13; + value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header)); + return value; +} + +static int t7xx_dpmaif_net_rx_push_thread(void *arg) +{ + struct dpmaif_rx_queue *q = arg; + struct dpmaif_ctrl *hif_ctrl; + struct dpmaif_callbacks *cb; + + hif_ctrl = q->dpmaif_ctrl; + cb = hif_ctrl->callbacks; + + while (!kthread_should_stop()) { + struct sk_buff *skb; + unsigned long flags; + + if (skb_queue_empty(&q->skb_list)) { + if (wait_event_interruptible(q->rx_wq, + !skb_queue_empty(&q->skb_list) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + } + + spin_lock_irqsave(&q->skb_list.lock, flags); + skb = __skb_dequeue(&q->skb_list); + spin_unlock_irqrestore(&q->skb_list.lock, flags); + + if (!skb) + continue; + + cb->recv_skb(hif_ctrl->t7xx_dev, skb); + cond_resched(); + } + + return 0; +} + +static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int q_num, const unsigned int bat_cnt) +{ + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; + struct dpmaif_bat_request *bat_req = rxq->bat_req; + unsigned int old_rl_idx, new_wr_idx, old_wr_idx; + + if (!rxq->que_started) { + dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index); + return -EINVAL; + } + + old_rl_idx = bat_req->bat_release_rd_idx; + old_wr_idx = bat_req->bat_wr_idx; + new_wr_idx = old_wr_idx + bat_cnt; + + if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx) + goto err_flow; + + if (new_wr_idx >= bat_req->bat_size_cnt) { + new_wr_idx -= bat_req->bat_size_cnt; + if (new_wr_idx >= old_rl_idx) + goto err_flow; + } + + bat_req->bat_wr_idx = new_wr_idx; + return 0; + +err_flow: + dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n"); + return -EINVAL; +} + +static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int size, struct dpmaif_bat_skb *cur_skb) +{ + dma_addr_t data_bus_addr; + struct sk_buff *skb; + size_t data_len; + + skb = __dev_alloc_skb(size, GFP_KERNEL); + if (!skb) + return false; + + data_len = skb_data_area_size(skb); + data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) { + dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n"); + dev_kfree_skb_any(skb); + return false; + } + + cur_skb->skb = skb; + cur_skb->data_bus_addr = data_bus_addr; + cur_skb->data_len = data_len; + + return true; +} + +static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base, + unsigned int index) +{ + struct dpmaif_bat_skb *bat_skb = bat_skb_base + index; + + if (bat_skb->skb) { + dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); + dev_kfree_skb(bat_skb->skb); + bat_skb->skb = NULL; + } +} + +/** + * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @q_num: Queue number. + * @buf_cnt: Number of buffers to allocate. + * @initial: Indicates if the ring is being populated for the first time. + * + * Allocate skb and store the start address of the data buffer into the BAT ring. + * If this is not the initial call, notify the HW about the new entries. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req, + const unsigned int q_num, const unsigned int buf_cnt, + const bool initial) +{ + unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx; + int ret; + + if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) + return -EINVAL; + + /* Check BAT buffer space */ + bat_max_cnt = bat_req->bat_size_cnt; + + bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx, + bat_req->bat_wr_idx, DPMAIF_WRITE); + if (buf_cnt > bat_cnt) + return -ENOMEM; + + bat_start_idx = bat_req->bat_wr_idx; + + for (i = 0; i < buf_cnt; i++) { + unsigned int cur_bat_idx = bat_start_idx + i; + struct dpmaif_bat_skb *cur_skb; + struct dpmaif_bat *cur_bat; + + if (cur_bat_idx >= bat_max_cnt) + cur_bat_idx -= bat_max_cnt; + + cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx; + if (!cur_skb->skb && + !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb)) + break; + + cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; + cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr); + cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr); + } + + if (!i) + return -ENOMEM; + + ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i); + if (ret) + goto err_unmap_skbs; + + if (!initial) { + unsigned int hw_wr_idx; + + ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i); + if (ret) + goto err_unmap_skbs; + + hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info, + DPF_RX_QNO_DFT); + if (hw_wr_idx != bat_req->bat_wr_idx) { + ret = -EFAULT; + dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n"); + goto err_unmap_skbs; + } + } + + return 0; + +err_unmap_skbs: + while (--i > 0) + t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); + + return ret; +} + +static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq, + const unsigned int rel_entry_num) +{ + struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; + unsigned int old_rel_idx, new_rel_idx, hw_wr_idx; + int ret; + + if (!rxq->que_started) + return 0; + + if (rel_entry_num >= rxq->pit_size_cnt) { + dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n"); + return -EINVAL; + } + + old_rel_idx = rxq->pit_release_rd_idx; + new_rel_idx = old_rel_idx + rel_entry_num; + hw_wr_idx = rxq->pit_wr_idx; + if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt) + new_rel_idx -= rxq->pit_size_cnt; + + ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num); + if (ret) { + dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret); + return ret; + } + + rxq->pit_release_rd_idx = new_rel_idx; + return 0; +} + +static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&bat_req->mask_lock, flags); + set_bit(idx, bat_req->bat_bitmap); + spin_unlock_irqrestore(&bat_req->mask_lock, flags); +} + +static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, + const unsigned int cur_bid) +{ + struct dpmaif_bat_request *bat_frag = rxq->bat_frag; + struct dpmaif_bat_page *bat_page; + + if (cur_bid >= DPMAIF_FRG_COUNT) + return -EINVAL; + + bat_page = bat_frag->bat_skb + cur_bid; + if (!bat_page->page) + return -EINVAL; + + return 0; +} + +static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base, + unsigned int index) +{ + struct dpmaif_bat_page *bat_page = bat_page_base + index; + + if (bat_page->page) { + dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE); + put_page(bat_page->page); + bat_page->page = NULL; + } +} + +/** + * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @buf_cnt: Number of buffers to allocate. + * @initial: Indicates if the ring is being populated for the first time. + * + * Fragment BAT is used when the received packet does not fit in a normal BAT entry. + * This function allocates a page fragment and stores the start address of the page + * into the Fragment BAT ring. + * If this is not the initial call, notify the HW about the new entries. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const unsigned int buf_cnt, const bool initial) +{ + unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx; + struct dpmaif_bat_page *bat_skb = bat_req->bat_skb; + int ret = 0, i; + + if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt) + return -EINVAL; + + buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt, + bat_req->bat_release_rd_idx, bat_req->bat_wr_idx, + DPMAIF_WRITE); + if (buf_cnt > buf_space) { + dev_err(dpmaif_ctrl->dev, + "Requested more buffers than the space available in RX frag ring\n"); + return -EINVAL; + } + + for (i = 0; i < buf_cnt; i++) { + struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx; + struct dpmaif_bat *cur_bat; + dma_addr_t data_base_addr; + + if (!cur_page->page) { + unsigned long offset; + struct page *page; + void *data; + + data = netdev_alloc_frag(bat_req->pkt_buf_sz); + if (!data) + break; + + page = virt_to_head_page(data); + offset = data - page_address(page); + + data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset, + bat_req->pkt_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) { + put_page(virt_to_head_page(data)); + dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n"); + break; + } + + cur_page->page = page; + cur_page->data_bus_addr = data_base_addr; + cur_page->offset = offset; + cur_page->data_len = bat_req->pkt_buf_sz; + } + + data_base_addr = cur_page->data_bus_addr; + cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx; + cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr); + cur_bat->p_buffer_addr = lower_32_bits(data_base_addr); + cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx); + } + + bat_req->bat_wr_idx = cur_bat_idx; + + if (!initial) + t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i); + + if (i < buf_cnt) { + ret = -ENOMEM; + if (initial) { + while (--i > 0) + t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); + } + } + + return ret; +} + +static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct sk_buff *skb) +{ + unsigned long long data_bus_addr, data_base_addr; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_bat_page *page_info; + unsigned int data_len; + int data_offset; + + page_info = rxq->bat_frag->bat_skb; + page_info += t7xx_normal_pit_bid(pkt_info); + dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); + + if (!page_info->page) + return -EINVAL; + + data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); + data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); + data_base_addr = page_info->data_bus_addr; + data_offset = data_bus_addr - data_base_addr; + data_offset += page_info->offset; + data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, + data_offset, data_len, page_info->data_len); + + page_info->page = NULL; + page_info->offset = 0; + page_info->data_len = 0; + return 0; +} + +static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + const struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info); + int ret; + + ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid); + if (ret < 0) + return ret; + + ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb); + if (ret < 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret); + return ret; + } + + t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid); + return 0; +} + +static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid) +{ + struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb; + + bat_skb += cur_bid; + if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb) + return -EINVAL; + + return 0; +} + +static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit) +{ + return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer)); +} + +static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pit) +{ + unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq; + + if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq, + cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US, + DPMAIF_POLL_PIT_MAX_TIME_US, false, pit)) + return -EFAULT; + + rxq->expect_pit_seq++; + if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE) + rxq->expect_pit_seq = 0; + + return 0; +} + +static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req) +{ + unsigned int zero_index; + unsigned long flags; + + spin_lock_irqsave(&bat_req->mask_lock, flags); + + zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt, + bat_req->bat_release_rd_idx); + + if (zero_index < bat_req->bat_size_cnt) { + spin_unlock_irqrestore(&bat_req->mask_lock, flags); + return zero_index - bat_req->bat_release_rd_idx; + } + + /* limiting the search till bat_release_rd_idx */ + zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx); + spin_unlock_irqrestore(&bat_req->mask_lock, flags); + return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index; +} + +static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq, + const unsigned int rel_entry_num, + const enum bat_type buf_type) +{ + struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; + unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i; + struct dpmaif_bat_request *bat; + unsigned long flags; + + if (!rxq->que_started || !rel_entry_num) + return -EINVAL; + + if (buf_type == BAT_TYPE_FRAG) { + bat = rxq->bat_frag; + hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index); + } else { + bat = rxq->bat_req; + hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index); + } + + if (rel_entry_num >= bat->bat_size_cnt) + return -EINVAL; + + old_rel_idx = bat->bat_release_rd_idx; + new_rel_idx = old_rel_idx + rel_entry_num; + + /* Do not need to release if the queue is empty */ + if (bat->bat_wr_idx == old_rel_idx) + return 0; + + if (hw_rd_idx >= old_rel_idx) { + if (new_rel_idx > hw_rd_idx) + return -EINVAL; + } + + if (new_rel_idx >= bat->bat_size_cnt) { + new_rel_idx -= bat->bat_size_cnt; + if (new_rel_idx > hw_rd_idx) + return -EINVAL; + } + + spin_lock_irqsave(&bat->mask_lock, flags); + for (i = 0; i < rel_entry_num; i++) { + unsigned int index = bat->bat_release_rd_idx + i; + + if (index >= bat->bat_size_cnt) + index -= bat->bat_size_cnt; + + clear_bit(index, bat->bat_bitmap); + } + spin_unlock_irqrestore(&bat->mask_lock, flags); + + bat->bat_release_rd_idx = new_rel_idx; + return rel_entry_num; +} + +static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq) +{ + int ret; + + if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt); + if (ret) + return ret; + + rxq->pit_remain_release_cnt = 0; + return 0; +} + +static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq) +{ + unsigned int bid_cnt; + int ret; + + bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req); + if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL); + if (ret <= 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret); + return ret; + } + + ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false); + if (ret < 0) + dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret); + + return ret; +} + +static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq) +{ + unsigned int bid_cnt; + int ret; + + bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag); + if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD) + return 0; + + ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG); + if (ret <= 0) { + dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret); + return ret; + } + + return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false); +} + +static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *msg_pit, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + int header = le32_to_cpu(msg_pit->header); + + skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header); + skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header); + skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header); + skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3)); +} + +static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned long long data_bus_addr, data_base_addr; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_bat_skb *bat_skb; + unsigned int data_len; + struct sk_buff *skb; + int data_offset; + + bat_skb = rxq->bat_req->bat_skb; + bat_skb += t7xx_normal_pit_bid(pkt_info); + dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE); + + data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); + data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); + data_base_addr = bat_skb->data_bus_addr; + data_offset = data_bus_addr - data_base_addr; + data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); + skb = bat_skb->skb; + skb->len = 0; + skb_reset_tail_pointer(skb); + skb_reserve(skb, data_offset); + + if (skb->tail + data_len > skb->end) { + dev_err(dev, "No buffer space available\n"); + return -ENOBUFS; + } + + skb_put(skb, data_len); + skb_info->cur_skb = skb; + bat_skb->skb = NULL; + return 0; +} + +static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq, + const struct dpmaif_pit *pkt_info, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info); + int ret; + + ret = t7xx_bat_cur_bid_check(rxq, cur_bid); + if (ret < 0) + return ret; + + ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info); + if (ret < 0) { + dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret); + return ret; + } + + t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid); + return 0; +} + +static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq) +{ + struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + int ret; + + queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work); + + ret = t7xx_dpmaif_pit_release_and_add(rxq); + if (ret < 0) + dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret); + + return ret; +} + +static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb) +{ + unsigned long flags; + + spin_lock_irqsave(&rxq->skb_list.lock, flags); + if (rxq->skb_list.qlen < rxq->skb_list_max_len) + __skb_queue_tail(&rxq->skb_list, skb); + else + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&rxq->skb_list.lock, flags); +} + +static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq, + struct dpmaif_cur_rx_skb_info *skb_info) +{ + struct sk_buff *skb = skb_info->cur_skb; + struct t7xx_skb_cb *skb_cb; + u8 netif_id; + + skb_info->cur_skb = NULL; + + if (skb_info->pit_dp) { + dev_kfree_skb_any(skb); + return; + } + + skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY : + CHECKSUM_NONE; + netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx); + skb_cb = T7XX_SKB_CB(skb); + skb_cb->netif_idx = netif_id; + skb_cb->rx_pkt_type = skb_info->pkt_type; + t7xx_dpmaif_rx_skb_enqueue(rxq, skb); +} + +static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt, + const unsigned long timeout) +{ + unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0; + struct device *dev = rxq->dpmaif_ctrl->dev; + struct dpmaif_cur_rx_skb_info *skb_info; + int ret = 0; + + pit_len = rxq->pit_size_cnt; + skb_info = &rxq->rx_data_info; + cur_pit = rxq->pit_rd_idx; + + for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) { + struct dpmaif_pit *pkt_info; + u32 val; + + if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout)) + break; + + pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit; + if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) { + dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index); + return -EAGAIN; + } + + val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header)); + if (val == DES_PT_MSG) { + if (skb_info->msg_pit_received) + dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index); + + skb_info->msg_pit_received = true; + t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info); + } else { /* DES_PT_PD */ + val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header)); + if (val != PKT_BUF_FRAG) + ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info); + else if (!skb_info->cur_skb) + ret = -EINVAL; + else + ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info); + + if (ret < 0) { + skb_info->err_payload = 1; + dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index); + } + + val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header)); + if (!val) { + if (!skb_info->err_payload) { + t7xx_dpmaif_rx_skb(rxq, skb_info); + } else if (skb_info->cur_skb) { + dev_kfree_skb_any(skb_info->cur_skb); + skb_info->cur_skb = NULL; + } + + memset(skb_info, 0, sizeof(*skb_info)); + + recv_skb_cnt++; + if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) { + wake_up_all(&rxq->rx_wq); + recv_skb_cnt = 0; + } + } + } + + cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit); + rxq->pit_rd_idx = cur_pit; + rxq->pit_remain_release_cnt++; + + if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) { + ret = t7xx_dpmaifq_rx_notify_hw(rxq); + if (ret < 0) + break; + } + } + + if (recv_skb_cnt) + wake_up_all(&rxq->rx_wq); + + if (!ret) + ret = t7xx_dpmaifq_rx_notify_hw(rxq); + + if (ret) + return ret; + + return rx_cnt; +} + +static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq) +{ + unsigned int hw_wr_idx, pit_cnt; + + if (!rxq->que_started) + return 0; + + hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index); + pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx, + DPMAIF_READ); + rxq->pit_wr_idx = hw_wr_idx; + return pit_cnt; +} + +static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl, + const unsigned int q_num, const unsigned int budget) +{ + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; + unsigned long time_limit; + unsigned int cnt; + + time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS); + + while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) { + unsigned int rd_cnt; + int real_cnt; + + rd_cnt = min(cnt, budget); + + real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit); + if (real_cnt < 0) + return real_cnt; + + if (real_cnt < cnt) + return -EAGAIN; + } + + return 0; +} + +static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq) +{ + struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info; + int ret; + + ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget); + if (ret < 0) { + /* Try one more time */ + queue_work(rxq->worker, &rxq->dpmaif_rxq_work); + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + } else { + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index); + } +} + +static void t7xx_dpmaif_rxq_work(struct work_struct *work) +{ + struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); + struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + + atomic_set(&rxq->rx_processing, 1); + /* Ensure rx_processing is changed to 1 before actually begin RX flow */ + smp_mb(); + + if (!rxq->que_started) { + atomic_set(&rxq->rx_processing, 0); + dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index); + return; + } + + t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); + atomic_set(&rxq->rx_processing, 0); +} + +void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask) +{ + struct dpmaif_rx_queue *rxq; + int qno; + + qno = ffs(que_mask) - 1; + if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) { + dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno); + return; + } + + rxq = &dpmaif_ctrl->rxq[qno]; + queue_work(rxq->worker, &rxq->dpmaif_rxq_work); +} + +static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req) +{ + if (bat_req->bat_base) + dma_free_coherent(dpmaif_ctrl->dev, + bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), + bat_req->bat_base, bat_req->bat_bus_addr); +} + +/** + * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer. + * @dpmaif_ctrl: Pointer to DPMAIF context structure. + * @bat_req: Pointer to BAT request structure. + * @buf_type: BAT ring type. + * + * This function allocates the BAT ring buffer shared with the HW device, also allocates + * a buffer used to store information about the BAT skbs for further release. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code. + */ +int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const enum bat_type buf_type) +{ + int sw_buf_size; + + if (buf_type == BAT_TYPE_FRAG) { + sw_buf_size = sizeof(struct dpmaif_bat_page); + bat_req->bat_size_cnt = DPMAIF_FRG_COUNT; + bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF; + } else { + sw_buf_size = sizeof(struct dpmaif_bat_skb); + bat_req->bat_size_cnt = DPMAIF_BAT_COUNT; + bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF; + } + + bat_req->type = buf_type; + bat_req->bat_wr_idx = 0; + bat_req->bat_release_rd_idx = 0; + + bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev, + bat_req->bat_size_cnt * sizeof(struct dpmaif_bat), + &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!bat_req->bat_base) + return -ENOMEM; + + /* For AP SW to record skb information */ + bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size, + GFP_KERNEL); + if (!bat_req->bat_skb) + goto err_free_dma_mem; + + bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL); + if (!bat_req->bat_bitmap) + goto err_free_dma_mem; + + spin_lock_init(&bat_req->mask_lock); + atomic_set(&bat_req->refcnt, 0); + return 0; + +err_free_dma_mem: + t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req); + + return -ENOMEM; +} + +void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req) +{ + if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt)) + return; + + bitmap_free(bat_req->bat_bitmap); + bat_req->bat_bitmap = NULL; + + if (bat_req->bat_skb) { + unsigned int i; + + for (i = 0; i < bat_req->bat_size_cnt; i++) { + if (bat_req->type == BAT_TYPE_FRAG) + t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i); + else + t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i); + } + } + + t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req); +} + +static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq) +{ + rxq->pit_size_cnt = DPMAIF_PIT_COUNT; + rxq->pit_rd_idx = 0; + rxq->pit_wr_idx = 0; + rxq->pit_release_rd_idx = 0; + rxq->expect_pit_seq = 0; + rxq->pit_remain_release_cnt = 0; + memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); + + rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev, + rxq->pit_size_cnt * sizeof(struct dpmaif_pit), + &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!rxq->pit_base) + return -ENOMEM; + + rxq->bat_req = &rxq->dpmaif_ctrl->bat_req; + atomic_inc(&rxq->bat_req->refcnt); + + rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag; + atomic_inc(&rxq->bat_frag->refcnt); + return 0; +} + +static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq) +{ + if (!rxq->dpmaif_ctrl) + return; + + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req); + t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag); + + if (rxq->pit_base) + dma_free_coherent(rxq->dpmaif_ctrl->dev, + rxq->pit_size_cnt * sizeof(struct dpmaif_pit), + rxq->pit_base, rxq->pit_bus_addr); +} + +int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue) +{ + int ret; + + ret = t7xx_dpmaif_rx_alloc(queue); + if (ret < 0) { + dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret); + return ret; + } + + INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work); + + queue->worker = alloc_workqueue("dpmaif_rx%d_worker", + WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index); + if (!queue->worker) { + ret = -ENOMEM; + goto err_free_rx_buffer; + } + + init_waitqueue_head(&queue->rx_wq); + skb_queue_head_init(&queue->skb_list); + queue->skb_list_max_len = queue->bat_req->pkt_buf_sz; + queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread, + queue, "dpmaif_rx%d_push", queue->index); + + ret = PTR_ERR_OR_ZERO(queue->rx_thread); + if (ret) + goto err_free_workqueue; + + return 0; + +err_free_workqueue: + destroy_workqueue(queue->worker); + +err_free_rx_buffer: + t7xx_dpmaif_rx_buf_free(queue); + + return ret; +} + +void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue) +{ + if (queue->worker) + destroy_workqueue(queue->worker); + + if (queue->rx_thread) + kthread_stop(queue->rx_thread); + + skb_queue_purge(&queue->skb_list); + t7xx_dpmaif_rx_buf_free(queue); +} + +static void t7xx_dpmaif_bat_release_work(struct work_struct *work) +{ + struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work); + struct dpmaif_rx_queue *rxq; + + /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ + rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; + t7xx_dpmaif_bat_release_and_add(rxq); + t7xx_dpmaif_frag_bat_release_and_add(rxq); +} + +int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl) +{ + dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue", + WQ_MEM_RECLAIM, 1); + if (!dpmaif_ctrl->bat_release_wq) + return -ENOMEM; + + INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work); + return 0; +} + +void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl) +{ + flush_work(&dpmaif_ctrl->bat_release_work); + + if (dpmaif_ctrl->bat_release_wq) { + destroy_workqueue(dpmaif_ctrl->bat_release_wq); + dpmaif_ctrl->bat_release_wq = NULL; + } +} + +/** + * t7xx_dpmaif_rx_stop() - Suspend RX flow. + * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl. + * + * Wait for all the RX work to finish executing and mark the RX queue as paused. + */ +void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + unsigned int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) { + struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i]; + int timeout, value; + + flush_work(&rxq->dpmaif_rxq_work); + + timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value, + !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US); + if (timeout) + dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n"); + + /* Ensure RX processing has stopped before we set rxq->que_started to false */ + smp_mb(); + rxq->que_started = false; + } +} + +static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq) +{ + int cnt, j = 0; + + flush_work(&rxq->dpmaif_rxq_work); + rxq->que_started = false; + + do { + cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, + rxq->pit_wr_idx, DPMAIF_READ); + + if (++j >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt); + break; + } + } while (cnt); + + memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit)); + memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat)); + bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt); + memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info)); + + rxq->pit_rd_idx = 0; + rxq->pit_wr_idx = 0; + rxq->pit_release_rd_idx = 0; + rxq->expect_pit_seq = 0; + rxq->pit_remain_release_cnt = 0; + rxq->bat_req->bat_release_rd_idx = 0; + rxq->bat_req->bat_wr_idx = 0; + rxq->bat_frag->bat_release_rd_idx = 0; + rxq->bat_frag->bat_wr_idx = 0; +} + +void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_RXQ_NUM; i++) + t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h new file mode 100644 index 000000000000..182f62dfe398 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Moises Veleta + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMA_RX_H__ +#define __T7XX_HIF_DPMA_RX_H__ + +#include +#include + +#include "t7xx_hif_dpmaif.h" + +#define NETIF_MASK GENMASK(4, 0) + +#define PKT_TYPE_IP4 0 +#define PKT_TYPE_IP6 1 + +/* Structure of DL PIT */ +struct dpmaif_pit { + __le32 header; + union { + struct { + __le32 data_addr_l; + __le32 data_addr_h; + __le32 footer; + } pd; + struct { + __le32 params_1; + __le32 params_2; + __le32 params_3; + } msg; + }; +}; + +/* PIT header fields */ +#define PD_PIT_DATA_LEN GENMASK(31, 16) +#define PD_PIT_BUFFER_ID GENMASK(15, 3) +#define PD_PIT_BUFFER_TYPE BIT(2) +#define PD_PIT_CONT BIT(1) +#define PD_PIT_PACKET_TYPE BIT(0) +/* PIT footer fields */ +#define PD_PIT_DLQ_DONE GENMASK(31, 30) +#define PD_PIT_ULQ_DONE GENMASK(29, 24) +#define PD_PIT_HEADER_OFFSET GENMASK(23, 19) +#define PD_PIT_BI_F GENMASK(18, 17) +#define PD_PIT_IG BIT(16) +#define PD_PIT_RES GENMASK(15, 11) +#define PD_PIT_H_BID GENMASK(10, 8) +#define PD_PIT_PIT_SEQ GENMASK(7, 0) + +#define MSG_PIT_DP BIT(31) +#define MSG_PIT_RES GENMASK(30, 27) +#define MSG_PIT_NETWORK_TYPE GENMASK(26, 24) +#define MSG_PIT_CHANNEL_ID GENMASK(23, 16) +#define MSG_PIT_RES2 GENMASK(15, 12) +#define MSG_PIT_HPC_IDX GENMASK(11, 8) +#define MSG_PIT_SRC_QID GENMASK(7, 5) +#define MSG_PIT_ERROR_BIT BIT(4) +#define MSG_PIT_CHECKSUM GENMASK(3, 2) +#define MSG_PIT_CONT BIT(1) +#define MSG_PIT_PACKET_TYPE BIT(0) + +#define MSG_PIT_HP_IDX GENMASK(31, 27) +#define MSG_PIT_CMD GENMASK(26, 24) +#define MSG_PIT_RES3 GENMASK(23, 21) +#define MSG_PIT_FLOW GENMASK(20, 16) +#define MSG_PIT_COUNT GENMASK(15, 0) + +#define MSG_PIT_HASH GENMASK(31, 24) +#define MSG_PIT_RES4 GENMASK(23, 18) +#define MSG_PIT_PRO GENMASK(17, 16) +#define MSG_PIT_VBID GENMASK(15, 3) +#define MSG_PIT_RES5 GENMASK(2, 0) + +#define MSG_PIT_DLQ_DONE GENMASK(31, 30) +#define MSG_PIT_ULQ_DONE GENMASK(29, 24) +#define MSG_PIT_IP BIT(23) +#define MSG_PIT_RES6 BIT(22) +#define MSG_PIT_MR GENMASK(21, 20) +#define MSG_PIT_RES7 GENMASK(19, 17) +#define MSG_PIT_IG BIT(16) +#define MSG_PIT_RES8 GENMASK(15, 11) +#define MSG_PIT_H_BID GENMASK(10, 8) +#define MSG_PIT_PIT_SEQ GENMASK(7, 0) + +int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue); +void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl, + const struct dpmaif_bat_request *bat_req, + const unsigned int q_num, const unsigned int buf_cnt, + const bool initial); +int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const unsigned int buf_cnt, const bool first_time); +void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask); +void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue); +void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req, + const enum bat_type buf_type); +void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, + struct dpmaif_bat_request *bat_req); + +#endif /* __T7XX_HIF_DPMA_RX_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c new file mode 100644 index 000000000000..e8435a871842 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Amir Hanania + * Haijun Liu + * Eliot Lee + * Moises Veleta + * Ricardo Martinez + * + * Contributors: + * Chiranjeevi Rapolu + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_dpmaif.h" +#include "t7xx_hif_dpmaif.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_pci.h" + +#define DPMAIF_SKB_TX_BURST_CNT 5 +#define DPMAIF_DRB_LIST_LEN 6144 + +/* DRB dtype */ +#define DES_DTYP_PD 0 +#define DES_DTYP_MSG 1 + +static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt; + unsigned long flags; + + if (!txq->que_started) + return 0; + + old_sw_rd_idx = txq->drb_rd_idx; + new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num); + if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) { + dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx); + return 0; + } + + if (old_sw_rd_idx <= new_hw_rd_idx) + drb_cnt = new_hw_rd_idx - old_sw_rd_idx; + else + drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx; + + spin_lock_irqsave(&txq->tx_lock, flags); + txq->drb_rd_idx = new_hw_rd_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + return drb_cnt; +} + +static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num, unsigned int release_cnt) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base; + struct dpmaif_drb *cur_drb, *drb_base; + unsigned int drb_cnt, i, cur_idx; + unsigned long flags; + + drb_skb_base = txq->drb_skb_base; + drb_base = txq->drb_base; + + spin_lock_irqsave(&txq->tx_lock, flags); + drb_cnt = txq->drb_size_cnt; + cur_idx = txq->drb_release_rd_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + for (i = 0; i < release_cnt; i++) { + cur_drb = drb_base + cur_idx; + if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) { + cur_drb_skb = drb_skb_base + cur_idx; + if (!cur_drb_skb->is_msg) + dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr, + cur_drb_skb->data_len, DMA_TO_DEVICE); + + if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) { + if (!cur_drb_skb->skb) { + dev_err(dpmaif_ctrl->dev, + "txq%u: DRB check fail, invalid skb\n", q_num); + continue; + } + + dev_kfree_skb_any(cur_drb_skb->skb); + } + + cur_drb_skb->skb = NULL; + } + + spin_lock_irqsave(&txq->tx_lock, flags); + cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx); + txq->drb_release_rd_idx = cur_idx; + spin_unlock_irqrestore(&txq->tx_lock, flags); + + if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8) + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index); + } + + if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) + dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num); + + return i; +} + +static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl, + unsigned int q_num, unsigned int budget) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num]; + unsigned int rel_cnt, real_rel_cnt; + + /* Update read index from HW */ + t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num); + + rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx, + txq->drb_rd_idx, DPMAIF_READ); + + real_rel_cnt = min_not_zero(budget, rel_cnt); + if (real_rel_cnt) + real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt); + + return real_rel_cnt < rel_cnt ? -EAGAIN : 0; +} + +static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq) +{ + return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index); +} + +static void t7xx_dpmaif_tx_done(struct work_struct *work) +{ + struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work); + struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl; + struct dpmaif_hw_info *hw_info; + int ret; + + hw_info = &dpmaif_ctrl->hw_info; + ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); + if (ret == -EAGAIN || + (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && + t7xx_dpmaif_drb_ring_not_empty(txq))) { + queue_work(dpmaif_ctrl->txq[txq->index].worker, + &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); + /* Give the device time to enter the low power state */ + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + } else { + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); + } +} + +static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l, + unsigned int channel_id) +{ + struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base; + struct dpmaif_drb *drb = drb_base + cur_idx; + + drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) | + FIELD_PREP(DRB_HDR_CONT, 1) | + FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len)); + + drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) | + FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) | + FIELD_PREP(DRB_MSG_L4_CHK, 1)); +} + +static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, dma_addr_t data_addr, + unsigned int pkt_size, bool last_one) +{ + struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base; + struct dpmaif_drb *drb = drb_base + cur_idx; + u32 header; + + header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size); + if (!last_one) + header |= FIELD_PREP(DRB_HDR_CONT, 1); + + drb->header = cpu_to_le32(header); + drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr)); + drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr)); +} + +static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, + unsigned int cur_idx, struct sk_buff *skb, bool is_msg, + bool is_frag, bool is_last_one, dma_addr_t bus_addr, + unsigned int data_len) +{ + struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base; + struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx; + + drb_skb->skb = skb; + drb_skb->bus_addr = bus_addr; + drb_skb->data_len = data_len; + drb_skb->index = cur_idx; + drb_skb->is_msg = is_msg; + drb_skb->is_frag = is_frag; + drb_skb->is_last = is_last_one; +} + +static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb) +{ + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + unsigned int wr_cnt, send_cnt, payload_cnt; + unsigned int cur_idx, drb_wr_idx_backup; + struct skb_shared_info *shinfo; + struct dpmaif_tx_queue *txq; + struct t7xx_skb_cb *skb_cb; + unsigned long flags; + + skb_cb = T7XX_SKB_CB(skb); + txq = &dpmaif_ctrl->txq[skb_cb->txq_number]; + if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON) + return -ENODEV; + + atomic_set(&txq->tx_processing, 1); + /* Ensure tx_processing is changed to 1 before actually begin TX flow */ + smp_mb(); + + shinfo = skb_shinfo(skb); + if (shinfo->frag_list) + dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n"); + + payload_cnt = shinfo->nr_frags + 1; + /* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */ + send_cnt = payload_cnt + 1; + + spin_lock_irqsave(&txq->tx_lock, flags); + cur_idx = txq->drb_wr_idx; + drb_wr_idx_backup = cur_idx; + txq->drb_wr_idx += send_cnt; + if (txq->drb_wr_idx >= txq->drb_size_cnt) + txq->drb_wr_idx -= txq->drb_size_cnt; + t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx); + t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0); + spin_unlock_irqrestore(&txq->tx_lock, flags); + + for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) { + bool is_frag, is_last_one = wr_cnt == payload_cnt - 1; + unsigned int data_len; + dma_addr_t bus_addr; + void *data_addr; + + if (!wr_cnt) { + data_len = skb_headlen(skb); + data_addr = skb->data; + is_frag = false; + } else { + skb_frag_t *frag = shinfo->frags + wr_cnt - 1; + + data_len = skb_frag_size(frag); + data_addr = skb_frag_address(frag); + is_frag = true; + } + + bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE); + if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr)) + goto unmap_buffers; + + cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx); + + spin_lock_irqsave(&txq->tx_lock, flags); + t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len, + is_last_one); + t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag, + is_last_one, bus_addr, data_len); + spin_unlock_irqrestore(&txq->tx_lock, flags); + } + + if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2)) + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index); + + atomic_set(&txq->tx_processing, 0); + + return 0; + +unmap_buffers: + while (wr_cnt--) { + struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base; + + cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1; + drb_skb += cur_idx; + dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr, + drb_skb->data_len, DMA_TO_DEVICE); + } + + txq->drb_wr_idx = drb_wr_idx_backup; + atomic_set(&txq->tx_processing, 0); + + return -ENOMEM; +} + +static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head)) + return false; + } + + return true; +} + +/* Currently, only the default TX queue is used */ +static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_tx_queue *txq; + + txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE]; + if (!txq->que_started) + return NULL; + + return txq; +} + +static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq) +{ + return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx, + txq->drb_wr_idx, DPMAIF_WRITE); +} + +static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb) +{ + /* Normal DRB (frags data + skb linear data) + msg DRB */ + return skb_shinfo(skb)->nr_frags + 2; +} + +static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq) +{ + unsigned int drb_remain_cnt, i; + unsigned int send_drb_cnt; + int drb_cnt = 0; + int ret = 0; + + drb_remain_cnt = t7xx_txq_drb_wr_available(txq); + + for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) { + struct sk_buff *skb; + + skb = skb_peek(&txq->tx_skb_head); + if (!skb) + break; + + send_drb_cnt = t7xx_skb_drb_cnt(skb); + if (drb_remain_cnt < send_drb_cnt) { + drb_remain_cnt = t7xx_txq_drb_wr_available(txq); + continue; + } + + drb_remain_cnt -= send_drb_cnt; + + ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb); + if (ret < 0) { + dev_err(txq->dpmaif_ctrl->dev, + "Failed to add skb to device's ring: %d\n", ret); + break; + } + + drb_cnt += send_drb_cnt; + skb_unlink(skb, &txq->tx_skb_head); + } + + if (drb_cnt > 0) + return drb_cnt; + + return ret; +} + +static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) +{ + do { + struct dpmaif_tx_queue *txq; + int drb_send_cnt; + + txq = t7xx_select_tx_queue(dpmaif_ctrl); + if (!txq) + return; + + drb_send_cnt = t7xx_txq_burst_send_skb(txq); + if (drb_send_cnt <= 0) { + usleep_range(10, 20); + cond_resched(); + continue; + } + + t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index, + drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD); + + cond_resched(); + } while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() && + (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)); +} + +static int t7xx_dpmaif_tx_hw_push_thread(void *arg) +{ + struct dpmaif_ctrl *dpmaif_ctrl = arg; + + while (!kthread_should_stop()) { + if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) || + dpmaif_ctrl->state != DPMAIF_STATE_PWRON) { + if (wait_event_interruptible(dpmaif_ctrl->tx_wq, + (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && + dpmaif_ctrl->state == DPMAIF_STATE_PWRON) || + kthread_should_stop())) + continue; + + if (kthread_should_stop()) + break; + } + + t7xx_do_tx_hw_push(dpmaif_ctrl); + } + + return 0; +} + +int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + init_waitqueue_head(&dpmaif_ctrl->tx_wq); + dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread, + dpmaif_ctrl, "dpmaif_tx_hw_push"); + return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread); +} + +void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl) +{ + if (dpmaif_ctrl->tx_thread) + kthread_stop(dpmaif_ctrl->tx_thread); +} + +/** + * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue. + * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl. + * @txq_number: Queue number to xmit on. + * @skb: Pointer to the skb to transmit. + * + * Add the skb to the queue of the skbs to be transmit. + * Wake up the thread that push the skbs from the queue to the HW. + * + * Return: + * * 0 - Success. + * * -EBUSY - Tx budget exhausted. + * In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full + * state to prevent this error condition. + */ +int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number, + struct sk_buff *skb) +{ + struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number]; + struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks; + struct t7xx_skb_cb *skb_cb; + + if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) { + cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number); + return -EBUSY; + } + + skb_cb = T7XX_SKB_CB(skb); + skb_cb->txq_number = txq_number; + skb_queue_tail(&txq->tx_skb_head, skb); + wake_up(&dpmaif_ctrl->tx_wq); + + return 0; +} + +void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + if (que_mask & BIT(i)) + queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work); + } +} + +static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq) +{ + size_t brb_skb_size, brb_pd_size; + + brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb); + brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb); + + txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN; + + /* For HW && AP SW */ + txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size, + &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO); + if (!txq->drb_base) + return -ENOMEM; + + /* For AP SW to record the skb information */ + txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL); + if (!txq->drb_skb_base) { + dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size, + txq->drb_base, txq->drb_bus_addr); + return -ENOMEM; + } + + return 0; +} + +static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq) +{ + struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base; + unsigned int i; + + if (!drb_skb_base) + return; + + for (i = 0; i < txq->drb_size_cnt; i++) { + drb_skb = drb_skb_base + i; + if (!drb_skb->skb) + continue; + + if (!drb_skb->is_msg) + dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr, + drb_skb->data_len, DMA_TO_DEVICE); + + if (drb_skb->is_last) { + dev_kfree_skb(drb_skb->skb); + drb_skb->skb = NULL; + } + } +} + +static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq) +{ + if (txq->drb_base) + dma_free_coherent(txq->dpmaif_ctrl->dev, + txq->drb_size_cnt * sizeof(struct dpmaif_drb), + txq->drb_base, txq->drb_bus_addr); + + t7xx_dpmaif_tx_free_drb_skb(txq); +} + +/** + * t7xx_dpmaif_txq_init() - Initialize TX queue. + * @txq: Pointer to struct dpmaif_tx_queue. + * + * Initialize the TX queue data structure and allocate memory for it to use. + * + * Return: + * * 0 - Success. + * * -ERROR - Error code from failure sub-initializations. + */ +int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq) +{ + int ret; + + skb_queue_head_init(&txq->tx_skb_head); + init_waitqueue_head(&txq->req_wq); + atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN); + + ret = t7xx_dpmaif_tx_drb_buf_init(txq); + if (ret) { + dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret); + return ret; + } + + txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | + (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index); + if (!txq->worker) + return -ENOMEM; + + INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done); + spin_lock_init(&txq->tx_lock); + + return 0; +} + +void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq) +{ + if (txq->worker) + destroy_workqueue(txq->worker); + + skb_queue_purge(&txq->tx_skb_head); + t7xx_dpmaif_tx_drb_buf_rel(txq); +} + +void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) { + struct dpmaif_tx_queue *txq; + int count = 0; + + txq = &dpmaif_ctrl->txq[i]; + txq->que_started = false; + /* Make sure TXQ is disabled */ + smp_mb(); + + /* Wait for active Tx to be done */ + while (atomic_read(&txq->tx_processing)) { + if (++count >= DPMAIF_MAX_CHECK_COUNT) { + dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n"); + break; + } + } + } +} + +static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq) +{ + txq->que_started = false; + + cancel_work_sync(&txq->dpmaif_tx_work); + flush_work(&txq->dpmaif_tx_work); + t7xx_dpmaif_tx_free_drb_skb(txq); + + txq->drb_rd_idx = 0; + txq->drb_wr_idx = 0; + txq->drb_release_rd_idx = 0; +} + +void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int i; + + for (i = 0; i < DPMAIF_TXQ_NUM; i++) + t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]); +} diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h new file mode 100644 index 000000000000..ca9b9ea2da8b --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Eliot Lee + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Moises Veleta + * Sreehari Kancharla + */ + +#ifndef __T7XX_HIF_DPMA_TX_H__ +#define __T7XX_HIF_DPMA_TX_H__ + +#include +#include +#include + +#include "t7xx_hif_dpmaif.h" + +#define DPMAIF_TX_DEFAULT_QUEUE 0 + +struct dpmaif_drb { + __le32 header; + union { + struct { + __le32 data_addr_l; + __le32 data_addr_h; + } pd; + struct { + __le32 msg_hdr; + __le32 reserved1; + } msg; + }; + __le32 reserved2; +}; + +/* Header fields */ +#define DRB_HDR_DATA_LEN GENMASK(31, 16) +#define DRB_HDR_RESERVED GENMASK(15, 3) +#define DRB_HDR_CONT BIT(2) +#define DRB_HDR_DTYP GENMASK(1, 0) + +#define DRB_MSG_DW2_RES GENMASK(31, 30) +#define DRB_MSG_L4_CHK BIT(29) +#define DRB_MSG_IP_CHK BIT(28) +#define DRB_MSG_RESERVED BIT(27) +#define DRB_MSG_NETWORK_TYPE GENMASK(26, 24) +#define DRB_MSG_CHANNEL_ID GENMASK(23, 16) +#define DRB_MSG_COUNT_L GENMASK(15, 0) + +struct dpmaif_drb_skb { + struct sk_buff *skb; + dma_addr_t bus_addr; + unsigned int data_len; + u16 index:13; + u16 is_msg:1; + u16 is_frag:1; + u16 is_last:1; +}; + +int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number, + struct sk_buff *skb); +void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl); +int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq); +void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask); +int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq); +void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl); +void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl); + +#endif /* __T7XX_HIF_DPMA_TX_H__ */ -- cgit v1.2.3 From 05d19bf500f8281f574713479b04679fa226d0a3 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:06 -0700 Subject: net: wwan: t7xx: Add WWAN network interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Creates the Cross Core Modem Network Interface (CCMNI) which implements the wwan_ops for registration with the WWAN framework, CCMNI also implements the net_device_ops functions used by the network device. Network device operations include open, close, start transmission, TX timeout and change MTU. Signed-off-by: Haijun Liu Co-developed-by: Chandrashekar Devegowda Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Loic Poulain Reviewed-by: Ilpo Järvinen Reviewed-by: Sergey Ryazanov Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/Makefile | 1 + drivers/net/wwan/t7xx/t7xx_modem_ops.c | 11 +- drivers/net/wwan/t7xx/t7xx_netdev.c | 423 +++++++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_netdev.h | 55 +++++ 4 files changed, 489 insertions(+), 1 deletion(-) create mode 100644 drivers/net/wwan/t7xx/t7xx_netdev.c create mode 100644 drivers/net/wwan/t7xx/t7xx_netdev.h (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile index 04a9ba50dc14..dc6a7d682c15 100644 --- a/drivers/net/wwan/t7xx/Makefile +++ b/drivers/net/wwan/t7xx/Makefile @@ -17,3 +17,4 @@ mtk_t7xx-y:= t7xx_pci.o \ t7xx_hif_dpmaif_tx.o \ t7xx_hif_dpmaif_rx.o \ t7xx_dpmaif.o \ + t7xx_netdev.o diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c index a1b60173d89d..1056ad9bf34f 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -35,6 +35,7 @@ #include "t7xx_hif_cldma.h" #include "t7xx_mhccif.h" #include "t7xx_modem_ops.h" +#include "t7xx_netdev.h" #include "t7xx_pci.h" #include "t7xx_pcie_mac.h" #include "t7xx_port.h" @@ -670,10 +671,14 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev) if (ret) goto err_destroy_hswq; - ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); + ret = t7xx_ccmni_init(t7xx_dev); if (ret) goto err_uninit_fsm; + ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]); + if (ret) + goto err_uninit_ccmni; + ret = t7xx_port_proxy_init(md); if (ret) goto err_uninit_md_cldma; @@ -692,6 +697,9 @@ err_uninit_proxy: err_uninit_md_cldma: t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); +err_uninit_ccmni: + t7xx_ccmni_exit(t7xx_dev); + err_uninit_fsm: t7xx_fsm_uninit(md); @@ -713,6 +721,7 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev) t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); t7xx_port_proxy_uninit(md->port_prox); t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]); + t7xx_ccmni_exit(t7xx_dev); t7xx_fsm_uninit(md); destroy_workqueue(md->handshake_wq); } diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c new file mode 100644 index 000000000000..c6b6547f2c6f --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_netdev.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Chandrashekar Devegowda + * Haijun Liu + * Ricardo Martinez + * + * Contributors: + * Amir Hanania + * Andy Shevchenko + * Chiranjeevi Rapolu + * Eliot Lee + * Moises Veleta + * Sreehari Kancharla + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t7xx_hif_dpmaif_rx.h" +#include "t7xx_hif_dpmaif_tx.h" +#include "t7xx_netdev.h" +#include "t7xx_pci.h" +#include "t7xx_port_proxy.h" +#include "t7xx_state_monitor.h" + +#define IP_MUX_SESSION_DEFAULT 0 + +static int t7xx_ccmni_open(struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + atomic_inc(&ccmni->usage); + return 0; +} + +static int t7xx_ccmni_close(struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + + atomic_dec(&ccmni->usage); + netif_carrier_off(dev); + netif_tx_disable(dev); + return 0; +} + +static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb, + unsigned int txq_number) +{ + struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb; + struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb); + + skb_cb->netif_idx = ccmni->index; + + if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb)) + return NETDEV_TX_BUSY; + + return 0; +} + +static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + int skb_len = skb->len; + + /* If MTU is changed or there is no headroom, drop the packet */ + if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) { + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE)) + return NETDEV_TX_BUSY; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb_len; + + return NETDEV_TX_OK; +} + +static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue) +{ + struct t7xx_ccmni *ccmni = netdev_priv(dev); + + dev->stats.tx_errors++; + + if (atomic_read(&ccmni->usage) > 0) + netif_tx_wake_all_queues(dev); +} + +static const struct net_device_ops ccmni_netdev_ops = { + .ndo_open = t7xx_ccmni_open, + .ndo_stop = t7xx_ccmni_close, + .ndo_start_xmit = t7xx_ccmni_start_xmit, + .ndo_tx_timeout = t7xx_ccmni_tx_timeout, +}; + +static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) { + netif_tx_start_all_queues(ccmni->dev); + netif_carrier_on(ccmni->dev); + } + } +} + +static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) + netif_tx_disable(ccmni->dev); + } +} + +static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb) +{ + struct t7xx_ccmni *ccmni; + int i; + + for (i = 0; i < ctlb->nic_dev_num; i++) { + ccmni = ctlb->ccmni_inst[i]; + if (!ccmni) + continue; + + if (atomic_read(&ccmni->usage) > 0) + netif_carrier_off(ccmni->dev); + } +} + +static void t7xx_ccmni_wwan_setup(struct net_device *dev) +{ + dev->hard_header_len += sizeof(struct ccci_header); + + dev->mtu = ETH_DATA_LEN; + dev->max_mtu = CCMNI_MTU_MAX; + BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE); + + dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO; + + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + + dev->features = NETIF_F_VLAN_CHALLENGED; + + dev->features |= NETIF_F_SG; + dev->hw_features |= NETIF_F_SG; + + dev->features |= NETIF_F_HW_CSUM; + dev->hw_features |= NETIF_F_HW_CSUM; + + dev->features |= NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_RXCSUM; + + dev->needs_free_netdev = true; + + dev->type = ARPHRD_NONE; + + dev->netdev_ops = &ccmni_netdev_ops; +} + +static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, + struct netlink_ext_ack *extack) +{ + struct t7xx_ccmni_ctrl *ctlb = ctxt; + struct t7xx_ccmni *ccmni; + int ret; + + if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) + return -EINVAL; + + ccmni = wwan_netdev_drvpriv(dev); + ccmni->index = if_id; + ccmni->ctlb = ctlb; + ccmni->dev = dev; + atomic_set(&ccmni->usage, 0); + ctlb->ccmni_inst[if_id] = ccmni; + + ret = register_netdevice(dev); + if (ret) + return ret; + + netif_device_attach(dev); + return 0; +} + +static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head) +{ + struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); + struct t7xx_ccmni_ctrl *ctlb = ctxt; + u8 if_id = ccmni->index; + + if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) + return; + + if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni)) + return; + + unregister_netdevice(dev); +} + +static const struct wwan_ops ccmni_wwan_ops = { + .priv_size = sizeof(struct t7xx_ccmni), + .setup = t7xx_ccmni_wwan_setup, + .newlink = t7xx_ccmni_wwan_newlink, + .dellink = t7xx_ccmni_wwan_dellink, +}; + +static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb) +{ + struct device *dev = ctlb->hif_ctrl->dev; + int ret; + + if (ctlb->wwan_is_registered) + return 0; + + /* WWAN core will create a netdev for the default IP MUX channel */ + ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT); + if (ret < 0) { + dev_err(dev, "Unable to register WWAN ops, %d\n", ret); + return ret; + } + + ctlb->wwan_is_registered = true; + return 0; +} + +static int t7xx_ccmni_md_state_callback(enum md_state state, void *para) +{ + struct t7xx_ccmni_ctrl *ctlb = para; + struct device *dev; + int ret = 0; + + dev = ctlb->hif_ctrl->dev; + ctlb->md_sta = state; + + switch (state) { + case MD_STATE_READY: + ret = t7xx_ccmni_register_wwan(ctlb); + if (!ret) + t7xx_ccmni_start(ctlb); + break; + + case MD_STATE_EXCEPTION: + case MD_STATE_STOPPED: + t7xx_ccmni_pre_stop(ctlb); + + ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); + if (ret < 0) + dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); + + t7xx_ccmni_post_stop(ctlb); + break; + + case MD_STATE_WAITING_FOR_HS1: + case MD_STATE_WAITING_TO_STOP: + ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); + if (ret < 0) + dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); + + break; + + default: + break; + } + + return ret; +} + +static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + struct t7xx_fsm_notifier *md_status_notifier; + + md_status_notifier = &ctlb->md_status_notify; + INIT_LIST_HEAD(&md_status_notifier->entry); + md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback; + md_status_notifier->data = ctlb; + + t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier); +} + +static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb) +{ + struct t7xx_skb_cb *skb_cb; + struct net_device *net_dev; + struct t7xx_ccmni *ccmni; + int pkt_type, skb_len; + u8 netif_id; + + skb_cb = T7XX_SKB_CB(skb); + netif_id = skb_cb->netif_idx; + ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id]; + if (!ccmni) { + dev_kfree_skb(skb); + return; + } + + net_dev = ccmni->dev; + skb->dev = net_dev; + + pkt_type = skb_cb->rx_pkt_type; + if (pkt_type == PKT_TYPE_IP6) + skb->protocol = htons(ETH_P_IPV6); + else + skb->protocol = htons(ETH_P_IP); + + skb_len = skb->len; + netif_rx(skb); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += skb_len; +} + +static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) +{ + struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; + struct netdev_queue *net_queue; + + if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) { + net_queue = netdev_get_tx_queue(ccmni->dev, qno); + if (netif_tx_queue_stopped(net_queue)) + netif_tx_wake_queue(net_queue); + } +} + +static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) +{ + struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; + struct netdev_queue *net_queue; + + if (atomic_read(&ccmni->usage) > 0) { + netdev_err(ccmni->dev, "TX queue %d is full\n", qno); + net_queue = netdev_get_tx_queue(ccmni->dev, qno); + netif_tx_stop_queue(net_queue); + } +} + +static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev, + enum dpmaif_txq_state state, int qno) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + + if (ctlb->md_sta != MD_STATE_READY) + return; + + if (!ctlb->ccmni_inst[0]) { + dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n"); + return; + } + + if (state == DMPAIF_TXQ_STATE_IRQ) + t7xx_ccmni_queue_tx_irq_notify(ctlb, qno); + else if (state == DMPAIF_TXQ_STATE_FULL) + t7xx_ccmni_queue_tx_full_notify(ctlb, qno); +} + +int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + struct t7xx_ccmni_ctrl *ctlb; + + ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL); + if (!ctlb) + return -ENOMEM; + + t7xx_dev->ccmni_ctlb = ctlb; + ctlb->t7xx_dev = t7xx_dev; + ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify; + ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb; + ctlb->nic_dev_num = NIC_DEV_DEFAULT; + + ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks); + if (!ctlb->hif_ctrl) + return -ENOMEM; + + init_md_status_notifier(t7xx_dev); + return 0; +} + +void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev) +{ + struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; + + t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify); + + if (ctlb->wwan_is_registered) { + wwan_unregister_ops(&t7xx_dev->pdev->dev); + ctlb->wwan_is_registered = false; + } + + t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); +} diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h new file mode 100644 index 000000000000..f5ad49ca12a7 --- /dev/null +++ b/drivers/net/wwan/t7xx/t7xx_netdev.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021, MediaTek Inc. + * Copyright (c) 2021-2022, Intel Corporation. + * + * Authors: + * Haijun Liu + * Moises Veleta + * + * Contributors: + * Amir Hanania + * Chiranjeevi Rapolu + * Ricardo Martinez + */ + +#ifndef __T7XX_NETDEV_H__ +#define __T7XX_NETDEV_H__ + +#include +#include +#include + +#include "t7xx_hif_dpmaif.h" +#include "t7xx_pci.h" +#include "t7xx_state_monitor.h" + +#define RXQ_NUM DPMAIF_RXQ_NUM +#define NIC_DEV_MAX 21 +#define NIC_DEV_DEFAULT 2 + +#define CCMNI_NETDEV_WDT_TO (1 * HZ) +#define CCMNI_MTU_MAX 3000 + +struct t7xx_ccmni { + u8 index; + atomic_t usage; + struct net_device *dev; + struct t7xx_ccmni_ctrl *ctlb; +}; + +struct t7xx_ccmni_ctrl { + struct t7xx_pci_dev *t7xx_dev; + struct dpmaif_ctrl *hif_ctrl; + struct t7xx_ccmni *ccmni_inst[NIC_DEV_MAX]; + struct dpmaif_callbacks callbacks; + unsigned int nic_dev_num; + unsigned int md_sta; + struct t7xx_fsm_notifier md_status_notify; + bool wwan_is_registered; +}; + +int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev); +void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev); + +#endif /* __T7XX_NETDEV_H__ */ -- cgit v1.2.3 From 46e8f49ed7b3063f51e28f3ea2084b3da29c1503 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:07 -0700 Subject: net: wwan: t7xx: Introduce power management MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements suspend, resumes, freeze, thaw, poweroff, and restore `dev_pm_ops` callbacks. From the host point of view, the t7xx driver is one entity. But, the device has several modules that need to be addressed in different ways during power management (PM) flows. The driver uses the term 'PM entities' to refer to the 2 DPMA and 2 CLDMA HW blocks that need to be managed during PM flows. When a dev_pm_ops function is called, the PM entities list is iterated and the matching function is called for each entry in the list. Signed-off-by: Haijun Liu Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Ilpo Järvinen Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 123 ++++++++- drivers/net/wwan/t7xx/t7xx_hif_cldma.h | 1 + drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c | 90 ++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h | 1 + drivers/net/wwan/t7xx/t7xx_mhccif.c | 17 ++ drivers/net/wwan/t7xx/t7xx_pci.c | 421 +++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_pci.h | 46 ++++ drivers/net/wwan/t7xx/t7xx_state_monitor.c | 2 + 8 files changed, 700 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 7a8c09f7435a..cbe5ea4495e0 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -1076,6 +1076,120 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) return 0; } +static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + int qno_t; + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_restore(hw_info); + for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) { + t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr, + MTK_TX); + t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr, + MTK_RX); + } + t7xx_cldma_enable_irq(md_ctrl); + t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); + md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; + t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + unsigned long flags; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + md_ctrl->txq_active |= TXRX_STATUS_BITMASK; + t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); + t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK); + + return 0; +} + +static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); + md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); + t7xx_cldma_clear_ip_busy(hw_info); + t7xx_cldma_disable_irq(md_ctrl); + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); +} + +static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param) +{ + struct cldma_ctrl *md_ctrl = entity_param; + struct t7xx_cldma_hw *hw_info; + unsigned long flags; + + if (md_ctrl->hif_id == CLDMA_ID_MD) + t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); + + hw_info = &md_ctrl->hw_info; + + spin_lock_irqsave(&md_ctrl->cldma_lock, flags); + t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX); + t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX); + md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; + t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); + md_ctrl->txq_started = 0; + spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + return 0; +} + +static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl) +{ + md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL); + if (!md_ctrl->pm_entity) + return -ENOMEM; + + md_ctrl->pm_entity->entity_param = md_ctrl; + + if (md_ctrl->hif_id == CLDMA_ID_MD) + md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1; + else + md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2; + + md_ctrl->pm_entity->suspend = t7xx_cldma_suspend; + md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late; + md_ctrl->pm_entity->resume = t7xx_cldma_resume; + md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early; + + return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity); +} + +static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl) +{ + if (!md_ctrl->pm_entity) + return -EINVAL; + + t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity); + kfree(md_ctrl->pm_entity); + md_ctrl->pm_entity = NULL; + return 0; +} + void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) { struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; @@ -1126,6 +1240,7 @@ static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) * t7xx_cldma_init() - Initialize CLDMA. * @md_ctrl: CLDMA context structure. * + * Allocate and initialize device power management entity. * Initialize HIF TX/RX queue structure. * Register CLDMA callback ISR with PCIe driver. * @@ -1136,12 +1251,16 @@ static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) { struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; - int i; + int ret, i; md_ctrl->txq_active = 0; md_ctrl->rxq_active = 0; md_ctrl->is_late_init = false; + ret = t7xx_cldma_pm_init(md_ctrl); + if (ret) + return ret; + spin_lock_init(&md_ctrl->cldma_lock); for (i = 0; i < CLDMA_TXQ_NUM; i++) { @@ -1176,6 +1295,7 @@ int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) err_workqueue: t7xx_cldma_destroy_wqs(md_ctrl); + t7xx_cldma_pm_uninit(md_ctrl); return -ENOMEM; } @@ -1190,4 +1310,5 @@ void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) t7xx_cldma_stop(md_ctrl); t7xx_cldma_late_release(md_ctrl); t7xx_cldma_destroy_wqs(md_ctrl); + t7xx_cldma_pm_uninit(md_ctrl); } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h index deb239e4f803..47a35e552da7 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h @@ -98,6 +98,7 @@ struct cldma_ctrl { struct dma_pool *gpd_dmapool; struct cldma_ring tx_ring[CLDMA_TXQ_NUM]; struct cldma_ring rx_ring[CLDMA_RXQ_NUM]; + struct md_pm_entity *pm_entity; struct t7xx_cldma_hw hw_info; bool is_late_init; int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb); diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c index 2e9a8d7ba693..7eff3531b9a5 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c @@ -398,6 +398,90 @@ static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl) return 0; } +static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param) +{ + struct dpmaif_ctrl *dpmaif_ctrl = param; + + t7xx_dpmaif_tx_stop(dpmaif_ctrl); + t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info); + t7xx_dpmaif_disable_irq(dpmaif_ctrl); + t7xx_dpmaif_rx_stop(dpmaif_ctrl); + return 0; +} + +static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl) +{ + int qno; + + for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++) + t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno); +} + +static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct dpmaif_rx_queue *rxq; + struct dpmaif_tx_queue *txq; + unsigned int que_cnt; + + for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) { + txq = &dpmaif_ctrl->txq[que_cnt]; + txq->que_started = true; + } + + for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) { + rxq = &dpmaif_ctrl->rxq[que_cnt]; + rxq->que_started = true; + } +} + +static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param) +{ + struct dpmaif_ctrl *dpmaif_ctrl = param; + + if (!dpmaif_ctrl) + return 0; + + t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl); + t7xx_dpmaif_enable_irq(dpmaif_ctrl); + t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl); + t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info); + wake_up(&dpmaif_ctrl->tx_wq); + return 0; +} + +static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; + int ret; + + INIT_LIST_HEAD(&dpmaif_pm_entity->entity); + dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend; + dpmaif_pm_entity->suspend_late = NULL; + dpmaif_pm_entity->resume_early = NULL; + dpmaif_pm_entity->resume = &t7xx_dpmaif_resume; + dpmaif_pm_entity->id = PM_ENTITY_ID_DATA; + dpmaif_pm_entity->entity_param = dpmaif_ctrl; + + ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); + if (ret) + dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); + + return ret; +} + +static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl) +{ + struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity; + int ret; + + ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity); + if (ret < 0) + dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n"); + + return ret; +} + int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state) { int ret = 0; @@ -461,11 +545,16 @@ struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev, dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base - t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; + ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl); + if (ret) + return NULL; + t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl); t7xx_dpmaif_disable_irq(dpmaif_ctrl); ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl); if (ret) { + t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret); return NULL; } @@ -478,6 +567,7 @@ void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl) { if (dpmaif_ctrl->dpmaif_sw_init_done) { t7xx_dpmaif_stop(dpmaif_ctrl); + t7xx_dpmaif_pm_entity_release(dpmaif_ctrl); t7xx_dpmaif_sw_release(dpmaif_ctrl); dpmaif_ctrl->dpmaif_sw_init_done = false; } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h index 1d5de7ceeeca..1225ca0ed51e 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h @@ -174,6 +174,7 @@ struct dpmaif_callbacks { struct dpmaif_ctrl { struct device *dev; struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity dpmaif_pm_entity; enum dpmaif_state state; bool dpmaif_sw_init_done; struct dpmaif_hw_info hw_info; diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.c b/drivers/net/wwan/t7xx/t7xx_mhccif.c index 5826e2d0b5a8..4bb452f5ccff 100644 --- a/drivers/net/wwan/t7xx/t7xx_mhccif.c +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c @@ -24,6 +24,11 @@ #include "t7xx_pcie_mac.h" #include "t7xx_reg.h" +#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \ + D2H_INT_RESUME_ACK | \ + D2H_INT_SUSPEND_ACK_AP | \ + D2H_INT_RESUME_ACK_AP) + static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask) { void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base; @@ -53,6 +58,18 @@ static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data) } t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); + + if (int_status & D2H_INT_SR_ACK) + complete(&t7xx_dev->pm_sr_ack); + + iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + + int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev); + if (!int_status) { + val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1); + iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + } + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); return IRQ_HANDLED; } diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 8beb32a4185d..564147664af0 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -18,23 +18,438 @@ #include #include +#include #include #include #include #include #include +#include +#include +#include #include +#include #include +#include +#include #include "t7xx_mhccif.h" #include "t7xx_modem_ops.h" #include "t7xx_pci.h" #include "t7xx_pcie_mac.h" #include "t7xx_reg.h" +#include "t7xx_state_monitor.h" #define T7XX_PCI_IREG_BASE 0 #define T7XX_PCI_EREG_BASE 2 +#define PM_ACK_TIMEOUT_MS 1500 +#define PM_RESOURCE_POLL_TIMEOUT_US 10000 +#define PM_RESOURCE_POLL_STEP_US 100 + +enum t7xx_pm_state { + MTK_PM_EXCEPTION, + MTK_PM_INIT, /* Device initialized, but handshake not completed */ + MTK_PM_SUSPENDED, + MTK_PM_RESUMED, +}; + +static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev) +{ + int ret, val; + + ret = read_poll_timeout(ioread32, val, + (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK, + PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true, + IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); + if (ret == -ETIMEDOUT) + dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); + + return ret; +} + +static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) +{ + struct pci_dev *pdev = t7xx_dev->pdev; + + INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); + mutex_init(&t7xx_dev->md_pm_entity_mtx); + init_completion(&t7xx_dev->pm_sr_ack); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + + device_init_wakeup(&pdev->dev, true); + dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | + DPM_FLAG_NO_DIRECT_COMPLETE); + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + + return t7xx_wait_pm_config(t7xx_dev); +} + +void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) +{ + /* Enable the PCIe resource lock only after MD deep sleep is done */ + t7xx_mhccif_mask_clr(t7xx_dev, + D2H_INT_SUSPEND_ACK | + D2H_INT_RESUME_ACK | + D2H_INT_SUSPEND_ACK_AP | + D2H_INT_RESUME_ACK_AP); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); +} + +static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) +{ + /* The device is kept in FSM re-init flow + * so just roll back PM setting to the init setting. + */ + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + return t7xx_wait_pm_config(t7xx_dev); +} + +void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev) +{ + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + t7xx_wait_pm_config(t7xx_dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); +} + +int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) +{ + struct md_pm_entity *entity; + + mutex_lock(&t7xx_dev->md_pm_entity_mtx); + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->id == pm_entity->id) { + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return -EEXIST; + } + } + + list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return 0; +} + +int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity) +{ + struct md_pm_entity *entity, *tmp_entity; + + mutex_lock(&t7xx_dev->md_pm_entity_mtx); + list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->id == pm_entity->id) { + list_del(&pm_entity->entity); + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + return 0; + } + } + + mutex_unlock(&t7xx_dev->md_pm_entity_mtx); + + return -ENXIO; +} + +static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request) +{ + unsigned long wait_ret; + + reinit_completion(&t7xx_dev->pm_sr_ack); + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request); + wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, + msecs_to_jiffies(PM_ACK_TIMEOUT_MS)); + if (!wait_ret) + return -ETIMEDOUT; + + return 0; +} + +static int __t7xx_pci_pm_suspend(struct pci_dev *pdev) +{ + enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID; + struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity *entity; + int ret; + + t7xx_dev = pci_get_drvdata(pdev); + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { + dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); + return -EFAULT; + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + ret = t7xx_wait_pm_config(t7xx_dev); + if (ret) { + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return ret; + } + + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_dev->rgu_pci_irq_en = false; + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (!entity->suspend) + continue; + + ret = entity->suspend(t7xx_dev, entity->entity_param); + if (ret) { + entity_id = entity->id; + dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); + goto abort_suspend; + } + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ); + if (ret) { + dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); + goto abort_suspend; + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP); + if (ret) { + t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); + dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); + goto abort_suspend; + } + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->suspend_late) + entity->suspend_late(t7xx_dev, entity->entity_param); + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return 0; + +abort_suspend: + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity_id == entity->id) + break; + + if (entity->resume) + entity->resume(t7xx_dev, entity->entity_param); + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + return ret; +} + +static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev) +{ + t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM); + + /* Disable interrupt first and let the IPs enable them */ + iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0); + + /* Device disables PCIe interrupts during resume and + * following function will re-enable PCIe interrupts. + */ + t7xx_pcie_mac_interrupts_en(t7xx_dev); + t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT); +} + +static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3) +{ + int ret; + + ret = pcim_enable_device(t7xx_dev->pdev); + if (ret) + return ret; + + t7xx_pcie_mac_atr_init(t7xx_dev); + t7xx_pcie_interrupt_reinit(t7xx_dev); + + if (is_d3) { + t7xx_mhccif_init(t7xx_dev); + return t7xx_pci_pm_reinit(t7xx_dev); + } + + return 0; +} + +static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event) +{ + struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; + struct device *dev = &t7xx_dev->pdev->dev; + int ret = -EINVAL; + + switch (event) { + case FSM_CMD_STOP: + ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION); + break; + + case FSM_CMD_START: + t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT); + t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT); + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0); + break; + + default: + break; + } + + if (ret) + dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret); + + return ret; +} + +static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check) +{ + struct t7xx_pci_dev *t7xx_dev; + struct md_pm_entity *entity; + u32 prev_state; + int ret = 0; + + t7xx_dev = pci_get_drvdata(pdev); + if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + return 0; + } + + t7xx_pcie_mac_interrupts_en(t7xx_dev); + prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE); + + if (state_check) { + /* For D3/L3 resume, the device could boot so quickly that the + * initial value of the dummy register might be overwritten. + * Identify new boots if the ATR source address register is not initialized. + */ + u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) + + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR); + if (prev_state == PM_RESUME_REG_STATE_L3 || + (prev_state == PM_RESUME_REG_STATE_INIT && + atr_reg_val == ATR_SRC_ADDR_INVALID)) { + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); + if (ret) + return ret; + + ret = t7xx_pcie_reinit(t7xx_dev, true); + if (ret) + return ret; + + t7xx_clear_rgu_irq(t7xx_dev); + return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START); + } + + if (prev_state == PM_RESUME_REG_STATE_EXP || + prev_state == PM_RESUME_REG_STATE_L2_EXP) { + if (prev_state == PM_RESUME_REG_STATE_L2_EXP) { + ret = t7xx_pcie_reinit(t7xx_dev, false); + if (ret) + return ret; + } + + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + + t7xx_mhccif_mask_clr(t7xx_dev, + D2H_INT_EXCEPTION_INIT | + D2H_INT_EXCEPTION_INIT_DONE | + D2H_INT_EXCEPTION_CLEARQ_DONE | + D2H_INT_EXCEPTION_ALLQ_RESET | + D2H_INT_PORT_ENUM); + + return ret; + } + + if (prev_state == PM_RESUME_REG_STATE_L2) { + ret = t7xx_pcie_reinit(t7xx_dev, false); + if (ret) + return ret; + + } else if (prev_state != PM_RESUME_REG_STATE_L1 && + prev_state != PM_RESUME_REG_STATE_INIT) { + ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP); + if (ret) + return ret; + + t7xx_clear_rgu_irq(t7xx_dev); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); + return 0; + } + } + + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + t7xx_wait_pm_config(t7xx_dev); + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->resume_early) + entity->resume_early(t7xx_dev, entity->entity_param); + } + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ); + if (ret) + dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); + + ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP); + if (ret) + dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); + + list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { + if (entity->resume) { + ret = entity->resume(t7xx_dev, entity->entity_param); + if (ret) + dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", + entity->id, ret); + } + } + + t7xx_dev->rgu_pci_irq_en = true; + t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + + return ret; +} + +static int t7xx_pci_pm_resume_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct t7xx_pci_dev *t7xx_dev; + + t7xx_dev = pci_get_drvdata(pdev); + t7xx_pcie_mac_interrupts_dis(t7xx_dev); + + return 0; +} + +static void t7xx_pci_shutdown(struct pci_dev *pdev) +{ + __t7xx_pci_pm_suspend(pdev); +} + +static int t7xx_pci_pm_suspend(struct device *dev) +{ + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); +} + +static int t7xx_pci_pm_resume(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), true); +} + +static int t7xx_pci_pm_thaw(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), false); +} + +static const struct dev_pm_ops t7xx_pci_pm_ops = { + .suspend = t7xx_pci_pm_suspend, + .resume = t7xx_pci_pm_resume, + .resume_noirq = t7xx_pci_pm_resume_noirq, + .freeze = t7xx_pci_pm_suspend, + .thaw = t7xx_pci_pm_thaw, + .poweroff = t7xx_pci_pm_suspend, + .restore = t7xx_pci_pm_resume, + .restore_noirq = t7xx_pci_pm_resume_noirq, +}; + static int t7xx_request_irq(struct pci_dev *pdev) { struct t7xx_pci_dev *t7xx_dev; @@ -165,6 +580,10 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE]; t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE]; + ret = t7xx_pci_pm_init(t7xx_dev); + if (ret) + return ret; + t7xx_pcie_mac_atr_init(t7xx_dev); t7xx_pci_infracfg_ao_calc(t7xx_dev); t7xx_mhccif_init(t7xx_dev); @@ -216,6 +635,8 @@ static struct pci_driver t7xx_pci_driver = { .id_table = t7xx_pci_table, .probe = t7xx_pci_probe, .remove = t7xx_pci_remove, + .driver.pm = &t7xx_pci_pm_ops, + .shutdown = t7xx_pci_shutdown, }; module_pci_driver(t7xx_pci_driver); diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h index dfd4a2f2095b..f51fc5a1301f 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.h +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -17,7 +17,9 @@ #ifndef __T7XX_PCI_H__ #define __T7XX_PCI_H__ +#include #include +#include #include #include @@ -49,6 +51,10 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); * @md: modem interface * @ccmni_ctlb: context structure used to control the network data path * @rgu_pci_irq_en: RGU callback ISR registered and active + * @md_pm_entities: list of pm entities + * @md_pm_entity_mtx: protects md_pm_entities list + * @pm_sr_ack: ack from the device when went to sleep or woke up + * @md_pm_state: state for resume/suspend */ struct t7xx_pci_dev { t7xx_intr_callback intr_handler[EXT_INT_NUM]; @@ -59,6 +65,46 @@ struct t7xx_pci_dev { struct t7xx_modem *md; struct t7xx_ccmni_ctrl *ccmni_ctlb; bool rgu_pci_irq_en; + + /* Low Power Items */ + struct list_head md_pm_entities; + struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */ + struct completion pm_sr_ack; + atomic_t md_pm_state; +}; + +enum t7xx_pm_id { + PM_ENTITY_ID_CTRL1, + PM_ENTITY_ID_CTRL2, + PM_ENTITY_ID_DATA, + PM_ENTITY_ID_INVALID +}; + +/* struct md_pm_entity - device power management entity + * @entity: list of PM Entities + * @suspend: callback invoked before sending D3 request to device + * @suspend_late: callback invoked after getting D3 ACK from device + * @resume_early: callback invoked before sending the resume request to device + * @resume: callback invoked after getting resume ACK from device + * @id: unique PM entity identifier + * @entity_param: parameter passed to the registered callbacks + * + * This structure is used to indicate PM operations required by internal + * HW modules such as CLDMA and DPMA. + */ +struct md_pm_entity { + struct list_head entity; + int (*suspend)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + void (*suspend_late)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + void (*resume_early)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + int (*resume)(struct t7xx_pci_dev *t7xx_dev, void *entity_param); + enum t7xx_pm_id id; + void *entity_param; }; +int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); +int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); +void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev); + #endif /* __T7XX_PCI_H__ */ diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c index d841f5051af8..0bcca08ff2bd 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -188,6 +188,7 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm case EXCEPTION_EVENT: dev_err(dev, "Exception event\n"); t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION); + t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev); t7xx_md_exception_handshake(ctl->md); fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX, @@ -300,6 +301,7 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl) return -ETIMEDOUT; } + t7xx_pci_pm_init_late(md->t7xx_dev); fsm_routine_ready(ctl); return 0; } -- cgit v1.2.3 From d10b3a695ba0227faf249537402bb72b283a36b8 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:08 -0700 Subject: net: wwan: t7xx: Runtime PM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enables runtime power management callbacks including runtime_suspend and runtime_resume. Autosuspend is used to prevent overhead by frequent wake-ups. Signed-off-by: Haijun Liu Signed-off-by: Chandrashekar Devegowda Co-developed-by: Eliot Lee Signed-off-by: Eliot Lee Signed-off-by: Ricardo Martinez Reviewed-by: Ilpo Järvinen Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 14 ++++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 17 +++++++++++++++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 15 +++++++++++++++ drivers/net/wwan/t7xx/t7xx_pci.c | 22 ++++++++++++++++++++++ 4 files changed, 68 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index cbe5ea4495e0..90306eb9858b 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -251,6 +252,8 @@ static void t7xx_cldma_rx_done(struct work_struct *work) t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); } static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) @@ -360,6 +363,9 @@ static void t7xx_cldma_tx_done(struct work_struct *work) t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); } spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); + + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); } static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, @@ -568,6 +574,7 @@ static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { if (i < CLDMA_TXQ_NUM) { + pm_runtime_get(md_ctrl->dev); t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); queue_work(md_ctrl->txq[i].worker, @@ -592,6 +599,7 @@ static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { + pm_runtime_get(md_ctrl->dev); t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); @@ -922,6 +930,10 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb if (qno >= CLDMA_TXQ_NUM) return -EINVAL; + ret = pm_runtime_resume_and_get(md_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return ret; + queue = &md_ctrl->txq[qno]; spin_lock_irqsave(&md_ctrl->cldma_lock, flags); @@ -965,6 +977,8 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb } while (!ret); allow_sleep: + pm_runtime_mark_last_busy(md_ctrl->dev); + pm_runtime_put_autosuspend(md_ctrl->dev); return ret; } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index d110f7edf56b..5f25555eb4a4 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -910,6 +911,7 @@ static void t7xx_dpmaif_rxq_work(struct work_struct *work) { struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work); struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl; + int ret; atomic_set(&rxq->rx_processing, 1); /* Ensure rx_processing is changed to 1 before actually begin RX flow */ @@ -921,7 +923,14 @@ static void t7xx_dpmaif_rxq_work(struct work_struct *work) return; } + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; + t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); + + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); atomic_set(&rxq->rx_processing, 0); } @@ -1123,11 +1132,19 @@ static void t7xx_dpmaif_bat_release_work(struct work_struct *work) { struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work); struct dpmaif_rx_queue *rxq; + int ret; + + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; t7xx_dpmaif_bat_release_and_add(rxq); t7xx_dpmaif_frag_bat_release_and_add(rxq); + + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl) diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c index e8435a871842..a5ac9dfaecfd 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -161,6 +162,10 @@ static void t7xx_dpmaif_tx_done(struct work_struct *work) struct dpmaif_hw_info *hw_info; int ret; + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return; + hw_info = &dpmaif_ctrl->hw_info; ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); if (ret == -EAGAIN || @@ -174,6 +179,9 @@ static void t7xx_dpmaif_tx_done(struct work_struct *work) t7xx_dpmaif_clr_ip_busy_sts(hw_info); t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); } + + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num, @@ -423,6 +431,7 @@ static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) static int t7xx_dpmaif_tx_hw_push_thread(void *arg) { struct dpmaif_ctrl *dpmaif_ctrl = arg; + int ret; while (!kthread_should_stop()) { if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) || @@ -437,7 +446,13 @@ static int t7xx_dpmaif_tx_hw_push_thread(void *arg) break; } + ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev); + if (ret < 0 && ret != -EACCES) + return ret; + t7xx_do_tx_hw_push(dpmaif_ctrl); + pm_runtime_mark_last_busy(dpmaif_ctrl->dev); + pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } return 0; diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 564147664af0..400c11f7b31e 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "t7xx_mhccif.h" @@ -44,6 +45,7 @@ #define T7XX_PCI_EREG_BASE 2 #define PM_ACK_TIMEOUT_MS 1500 +#define PM_AUTOSUSPEND_MS 20000 #define PM_RESOURCE_POLL_TIMEOUT_US 10000 #define PM_RESOURCE_POLL_STEP_US 100 @@ -82,6 +84,8 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) DPM_FLAG_NO_DIRECT_COMPLETE); iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); + pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); + pm_runtime_use_autosuspend(&pdev->dev); return t7xx_wait_pm_config(t7xx_dev); } @@ -96,6 +100,8 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) D2H_INT_RESUME_ACK_AP); iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); + + pm_runtime_put_noidle(&t7xx_dev->pdev->dev); } static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) @@ -104,6 +110,9 @@ static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev) * so just roll back PM setting to the init setting. */ atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); + + pm_runtime_get_noresume(&t7xx_dev->pdev->dev); + iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR); return t7xx_wait_pm_config(t7xx_dev); } @@ -403,6 +412,7 @@ static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check) t7xx_dev->rgu_pci_irq_en = true; t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT); iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR); + pm_runtime_mark_last_busy(&pdev->dev); atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); return ret; @@ -439,6 +449,16 @@ static int t7xx_pci_pm_thaw(struct device *dev) return __t7xx_pci_pm_resume(to_pci_dev(dev), false); } +static int t7xx_pci_pm_runtime_suspend(struct device *dev) +{ + return __t7xx_pci_pm_suspend(to_pci_dev(dev)); +} + +static int t7xx_pci_pm_runtime_resume(struct device *dev) +{ + return __t7xx_pci_pm_resume(to_pci_dev(dev), true); +} + static const struct dev_pm_ops t7xx_pci_pm_ops = { .suspend = t7xx_pci_pm_suspend, .resume = t7xx_pci_pm_resume, @@ -448,6 +468,8 @@ static const struct dev_pm_ops t7xx_pci_pm_ops = { .poweroff = t7xx_pci_pm_suspend, .restore = t7xx_pci_pm_resume, .restore_noirq = t7xx_pci_pm_resume_noirq, + .runtime_suspend = t7xx_pci_pm_runtime_suspend, + .runtime_resume = t7xx_pci_pm_runtime_resume }; static int t7xx_request_irq(struct pci_dev *pdev) -- cgit v1.2.3 From de49ea38ba11c1f0fd9e126e93b2f7eb67ed5020 Mon Sep 17 00:00:00 2001 From: Haijun Liu Date: Fri, 6 May 2022 11:13:09 -0700 Subject: net: wwan: t7xx: Device deep sleep lock/unlock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce the mechanism to lock/unlock the device 'deep sleep' mode. When the PCIe link state is L1.2 or L2, the host side still can keep the device is in D0 state from the host side point of view. At the same time, if the device's 'deep sleep' mode is unlocked, the device will go to 'deep sleep' while it is still in D0 state on the host side. Signed-off-by: Haijun Liu Signed-off-by: Chandrashekar Devegowda Co-developed-by: Ricardo Martinez Signed-off-by: Ricardo Martinez Reviewed-by: Ilpo Järvinen Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 12 ++++ drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 14 ++++- drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 41 +++++++++---- drivers/net/wwan/t7xx/t7xx_mhccif.c | 3 + drivers/net/wwan/t7xx/t7xx_pci.c | 93 ++++++++++++++++++++++++++++++ drivers/net/wwan/t7xx/t7xx_pci.h | 10 ++++ 6 files changed, 158 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 90306eb9858b..46066dcd2607 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -934,6 +934,7 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb if (ret < 0 && ret != -EACCES) return ret; + t7xx_pci_disable_sleep(md_ctrl->t7xx_dev); queue = &md_ctrl->txq[qno]; spin_lock_irqsave(&md_ctrl->cldma_lock, flags); @@ -955,6 +956,11 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); spin_unlock_irqrestore(&queue->ring_lock, flags); + if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { + ret = -ETIMEDOUT; + break; + } + /* Protect the access to the modem for queues operations (resume/start) * which access shared locations by all the queues. * cldma_lock is independent of ring_lock which is per queue. @@ -967,6 +973,11 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb } spin_unlock_irqrestore(&queue->ring_lock, flags); + if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { + ret = -ETIMEDOUT; + break; + } + if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { spin_lock_irqsave(&md_ctrl->cldma_lock, flags); t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); @@ -977,6 +988,7 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb } while (!ret); allow_sleep: + t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); pm_runtime_mark_last_busy(md_ctrl->dev); pm_runtime_put_autosuspend(md_ctrl->dev); return ret; diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index 5f25555eb4a4..35a8a0d7c1ee 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -927,8 +927,11 @@ static void t7xx_dpmaif_rxq_work(struct work_struct *work) if (ret < 0 && ret != -EACCES) return; - t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) + t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq); + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); pm_runtime_mark_last_busy(dpmaif_ctrl->dev); pm_runtime_put_autosuspend(dpmaif_ctrl->dev); atomic_set(&rxq->rx_processing, 0); @@ -1138,11 +1141,16 @@ static void t7xx_dpmaif_bat_release_work(struct work_struct *work) if (ret < 0 && ret != -EACCES) return; + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */ rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT]; - t7xx_dpmaif_bat_release_and_add(rxq); - t7xx_dpmaif_frag_bat_release_and_add(rxq); + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { + t7xx_dpmaif_bat_release_and_add(rxq); + t7xx_dpmaif_frag_bat_release_and_add(rxq); + } + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); pm_runtime_mark_last_busy(dpmaif_ctrl->dev); pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c index a5ac9dfaecfd..46514208d4f9 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c @@ -166,20 +166,25 @@ static void t7xx_dpmaif_tx_done(struct work_struct *work) if (ret < 0 && ret != -EACCES) return; - hw_info = &dpmaif_ctrl->hw_info; - ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); - if (ret == -EAGAIN || - (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && - t7xx_dpmaif_drb_ring_not_empty(txq))) { - queue_work(dpmaif_ctrl->txq[txq->index].worker, - &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); - /* Give the device time to enter the low power state */ - t7xx_dpmaif_clr_ip_busy_sts(hw_info); - } else { - t7xx_dpmaif_clr_ip_busy_sts(hw_info); - t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); + /* The device may be in low power state. Disable sleep if needed */ + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); + if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) { + hw_info = &dpmaif_ctrl->hw_info; + ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt); + if (ret == -EAGAIN || + (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) && + t7xx_dpmaif_drb_ring_not_empty(txq))) { + queue_work(dpmaif_ctrl->txq[txq->index].worker, + &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work); + /* Give the device time to enter the low power state */ + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + } else { + t7xx_dpmaif_clr_ip_busy_sts(hw_info); + t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index); + } } + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); pm_runtime_mark_last_busy(dpmaif_ctrl->dev); pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } @@ -405,6 +410,8 @@ static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq) static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) { + bool wait_disable_sleep = true; + do { struct dpmaif_tx_queue *txq; int drb_send_cnt; @@ -420,6 +427,14 @@ static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl) continue; } + /* Wait for the PCIe resource to unlock */ + if (wait_disable_sleep) { + if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) + return; + + wait_disable_sleep = false; + } + t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index, drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD); @@ -450,7 +465,9 @@ static int t7xx_dpmaif_tx_hw_push_thread(void *arg) if (ret < 0 && ret != -EACCES) return ret; + t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev); t7xx_do_tx_hw_push(dpmaif_ctrl); + t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev); pm_runtime_mark_last_busy(dpmaif_ctrl->dev); pm_runtime_put_autosuspend(dpmaif_ctrl->dev); } diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.c b/drivers/net/wwan/t7xx/t7xx_mhccif.c index 4bb452f5ccff..3ee18d46f8d2 100644 --- a/drivers/net/wwan/t7xx/t7xx_mhccif.c +++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c @@ -59,6 +59,9 @@ static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data) t7xx_mhccif_clear_interrupts(t7xx_dev, int_status); + if (int_status & D2H_INT_DS_LOCK_ACK) + complete_all(&t7xx_dev->sleep_lock_acquire); + if (int_status & D2H_INT_SR_ACK) complete(&t7xx_dev->pm_sr_ack); diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 400c11f7b31e..5f1bb8d6afb6 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "t7xx_mhccif.h" #include "t7xx_modem_ops.h" @@ -44,6 +45,7 @@ #define T7XX_PCI_IREG_BASE 0 #define T7XX_PCI_EREG_BASE 2 +#define PM_SLEEP_DIS_TIMEOUT_MS 20 #define PM_ACK_TIMEOUT_MS 1500 #define PM_AUTOSUSPEND_MS 20000 #define PM_RESOURCE_POLL_TIMEOUT_US 10000 @@ -56,6 +58,21 @@ enum t7xx_pm_state { MTK_PM_RESUMED, }; +static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable) +{ + void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL; + u32 value; + + value = ioread32(ctrl_reg); + + if (enable) + value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS; + else + value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS; + + iowrite32(value, ctrl_reg); +} + static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev) { int ret, val; @@ -76,6 +93,8 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev) INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); mutex_init(&t7xx_dev->md_pm_entity_mtx); + spin_lock_init(&t7xx_dev->md_pm_lock); + init_completion(&t7xx_dev->sleep_lock_acquire); init_completion(&t7xx_dev->pm_sr_ack); atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); @@ -94,6 +113,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev) { /* Enable the PCIe resource lock only after MD deep sleep is done */ t7xx_mhccif_mask_clr(t7xx_dev, + D2H_INT_DS_LOCK_ACK | D2H_INT_SUSPEND_ACK | D2H_INT_RESUME_ACK | D2H_INT_SUSPEND_ACK_AP | @@ -159,6 +179,79 @@ int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_en return -ENXIO; } +int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev) +{ + struct device *dev = &t7xx_dev->pdev->dev; + int ret; + + ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, + msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS)); + if (!ret) + dev_err_ratelimited(dev, "Resource wait complete timed out\n"); + + return ret; +} + +/** + * t7xx_pci_disable_sleep() - Disable deep sleep capability. + * @t7xx_dev: MTK device. + * + * Lock the deep sleep capability, note that the device can still go into deep sleep + * state while device is in D0 state, from the host's point-of-view. + * + * If device is in deep sleep state, wake up the device and disable deep sleep capability. + */ +void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); + t7xx_dev->sleep_disable_count++; + if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) + goto unlock_and_complete; + + if (t7xx_dev->sleep_disable_count == 1) { + u32 status; + + reinit_completion(&t7xx_dev->sleep_lock_acquire); + t7xx_dev_set_sleep_capability(t7xx_dev, false); + + status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS); + if (status & T7XX_PCIE_RESOURCE_STS_MSK) + goto unlock_and_complete; + + t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK); + } + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); + return; + +unlock_and_complete: + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); + complete_all(&t7xx_dev->sleep_lock_acquire); +} + +/** + * t7xx_pci_enable_sleep() - Enable deep sleep capability. + * @t7xx_dev: MTK device. + * + * After enabling deep sleep, device can enter into deep sleep state. + */ +void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev) +{ + unsigned long flags; + + spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); + t7xx_dev->sleep_disable_count--; + if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) + goto unlock; + + if (t7xx_dev->sleep_disable_count == 0) + t7xx_dev_set_sleep_capability(t7xx_dev, true); + +unlock: + spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); +} + static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request) { unsigned long wait_ret; diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h index f51fc5a1301f..50b37056ce5a 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.h +++ b/drivers/net/wwan/t7xx/t7xx_pci.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "t7xx_reg.h" @@ -55,6 +56,9 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param); * @md_pm_entity_mtx: protects md_pm_entities list * @pm_sr_ack: ack from the device when went to sleep or woke up * @md_pm_state: state for resume/suspend + * @md_pm_lock: protects PCIe sleep lock + * @sleep_disable_count: PCIe L1.2 lock counter + * @sleep_lock_acquire: indicates that sleep has been disabled */ struct t7xx_pci_dev { t7xx_intr_callback intr_handler[EXT_INT_NUM]; @@ -71,6 +75,9 @@ struct t7xx_pci_dev { struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */ struct completion pm_sr_ack; atomic_t md_pm_state; + spinlock_t md_pm_lock; /* Protects PCI resource lock */ + unsigned int sleep_disable_count; + struct completion sleep_lock_acquire; }; enum t7xx_pm_id { @@ -102,6 +109,9 @@ struct md_pm_entity { void *entity_param; }; +void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev); +void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev); +int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev); int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity); void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev); -- cgit v1.2.3 From ca4567f1e6f660f86fcd04f3563c0045b0d4772f Mon Sep 17 00:00:00 2001 From: Alaa Mohamed Date: Thu, 5 May 2022 17:09:57 +0200 Subject: rtnetlink: add extack support in fdb del handlers Add extack support to .ndo_fdb_del in netdevice.h and all related methods. Signed-off-by: Alaa Mohamed Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ice/ice_main.c | 3 ++- drivers/net/ethernet/mscc/ocelot_net.c | 3 ++- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 3 ++- drivers/net/macvlan.c | 3 ++- drivers/net/vxlan/vxlan_core.c | 3 ++- include/linux/netdevice.h | 2 +- net/bridge/br_fdb.c | 3 ++- net/bridge/br_private.h | 3 ++- net/core/rtnetlink.c | 4 ++-- 9 files changed, 17 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 049a3f48caff..867908f94661 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5688,11 +5688,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], * @dev: the net device pointer * @addr: the MAC address entry being added * @vid: VLAN ID + * @extack: netlink extended ack */ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - __always_unused u16 vid) + __always_unused u16 vid, struct netlink_ext_ack *extack) { int err; diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 247bc105bdd2..616d8127ef51 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -774,7 +774,8 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct netlink_ext_ack *extack) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index d320567b2cca..28476b982bab 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -368,7 +368,8 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *netdev, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = -EOPNOTSUPP; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0017bd0fe3bb..eff75beb1395 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1020,7 +1020,8 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 2bda692f70c5..91b7bb01fb10 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1280,7 +1280,8 @@ out: /* Delete entry (via netlink) */ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); union vxlan_addr ip; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8cf0ac616cb9..74c97a34921d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1513,7 +1513,7 @@ struct net_device_ops { struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 vid); + u16 vid, struct netlink_ext_ack *extack); int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 1a3d583fbc8e..e7f4fccb6adb 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -1253,7 +1253,8 @@ static int __br_fdb_delete(struct net_bridge *br, /* Remove neighbor entry with RTM_DELNEIGH */ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 vid) + const unsigned char *addr, u16 vid, + struct netlink_ext_ack *extack) { struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6ae882cfae1c..06e5f6faa431 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -793,7 +793,8 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid, unsigned long flags); int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, u16 vid); + struct net_device *dev, const unsigned char *addr, u16 vid, + struct netlink_ext_ack *extack); int br_fdb_delete_bulk(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, u16 vid, struct netlink_ext_ack *extack); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e6d4b9272995..6aff02df9ba5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4258,7 +4258,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, ops = br_dev->netdev_ops; if (!del_bulk) { if (ops->ndo_fdb_del) - err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); + err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); } else { if (ops->ndo_fdb_del_bulk) err = ops->ndo_fdb_del_bulk(ndm, tb, dev, vid, @@ -4276,7 +4276,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, ops = dev->netdev_ops; if (!del_bulk) { if (ops->ndo_fdb_del) - err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); + err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack); else err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); } else { -- cgit v1.2.3 From e92695e506d663bc4868ffc5bc187488a4f4d5c8 Mon Sep 17 00:00:00 2001 From: Alaa Mohamed Date: Thu, 5 May 2022 17:09:58 +0200 Subject: net: vxlan: Add extack support to vxlan_fdb_delete This patch adds extack msg support to vxlan_fdb_delete and vxlan_fdb_parse. extack is used to propagate meaningful error msgs to the user of vxlan fdb netlink api Signed-off-by: Alaa Mohamed Signed-off-by: David S. Miller --- drivers/net/vxlan/vxlan_core.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 91b7bb01fb10..293082c32a78 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1129,19 +1129,25 @@ static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, union vxlan_addr *ip, __be16 *port, __be32 *src_vni, - __be32 *vni, u32 *ifindex, u32 *nhid) + __be32 *vni, u32 *ifindex, u32 *nhid, + struct netlink_ext_ack *extack) { struct net *net = dev_net(vxlan->dev); int err; if (tb[NDA_NH_ID] && (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] || - tb[NDA_PORT])) - return -EINVAL; + tb[NDA_PORT])) { + NL_SET_ERR_MSG(extack, + "DST, VNI, ifindex and port are mutually exclusive with NH_ID"); + return -EINVAL; + } if (tb[NDA_DST]) { err = vxlan_nla_get_addr(ip, tb[NDA_DST]); - if (err) + if (err) { + NL_SET_ERR_MSG(extack, "Unsupported address family"); return err; + } } else { union vxlan_addr *remote = &vxlan->default_dst.remote_ip; @@ -1157,24 +1163,30 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, } if (tb[NDA_PORT]) { - if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) + if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) { + NL_SET_ERR_MSG(extack, "Invalid vxlan port"); return -EINVAL; + } *port = nla_get_be16(tb[NDA_PORT]); } else { *port = vxlan->cfg.dst_port; } if (tb[NDA_VNI]) { - if (nla_len(tb[NDA_VNI]) != sizeof(u32)) + if (nla_len(tb[NDA_VNI]) != sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid vni"); return -EINVAL; + } *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); } else { *vni = vxlan->default_dst.remote_vni; } if (tb[NDA_SRC_VNI]) { - if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) + if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid src vni"); return -EINVAL; + } *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); } else { *src_vni = vxlan->default_dst.remote_vni; @@ -1183,12 +1195,16 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, if (tb[NDA_IFINDEX]) { struct net_device *tdev; - if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) + if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) { + NL_SET_ERR_MSG(extack, "Invalid ifindex"); return -EINVAL; + } *ifindex = nla_get_u32(tb[NDA_IFINDEX]); tdev = __dev_get_by_index(net, *ifindex); - if (!tdev) + if (!tdev) { + NL_SET_ERR_MSG(extack, "Device not found"); return -EADDRNOTAVAIL; + } } else { *ifindex = 0; } @@ -1226,7 +1242,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -EINVAL; err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, - &nhid); + &nhid, extack); if (err) return err; @@ -1292,7 +1308,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], int err; err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, - &nhid); + &nhid, extack); if (err) return err; -- cgit v1.2.3 From a7f0e4bea8eda1d286f4e99176bcb88f53aa703b Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Fri, 6 May 2022 06:23:51 +0200 Subject: net: phy: genphy_c45_baset1_an_config_aneg: do no set unknown configuration Do not change default master/slave autoneg configuration if no changes was requested. Fixes: 3da8ffd8545f ("net: phy: Add 10BASE-T1L support in phy-c45") Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index eefdd67d5556..0014aa6e73c0 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -191,8 +191,12 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev) case MASTER_SLAVE_CFG_MASTER_PREFERRED: case MASTER_SLAVE_CFG_SLAVE_PREFERRED: break; + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + return 0; default: - break; + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; } switch (phydev->master_slave_set) { -- cgit v1.2.3 From 90532850eb210dff70423eaf20e323a91ef542d8 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Fri, 6 May 2022 06:23:52 +0200 Subject: net: phy: introduce genphy_c45_pma_baset1_setup_master_slave() Move baset1 specific part of genphy_c45_pma_setup_forced() code to separate function to make it reusable by PHY drivers. Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 49 +++++++++++++++++++++++++++++------------------ include/linux/phy.h | 1 + 2 files changed, 31 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 0014aa6e73c0..ffa2d5cd09fc 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -70,6 +70,35 @@ int genphy_c45_pma_suspend(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_pma_suspend); +/** + * genphy_c45_pma_baset1_setup_master_slave - configures forced master/slave + * role of BaseT1 devices. + * @phydev: target phy_device struct + */ +int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev) +{ + int ctl = 0; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_MASTER_FORCE: + ctl = MDIO_PMA_PMD_BT1_CTRL_CFG_MST; + break; + case MASTER_SLAVE_CFG_SLAVE_FORCE: + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + break; + default: + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; + } + + return phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, + MDIO_PMA_PMD_BT1_CTRL_CFG_MST, ctl); +} +EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_setup_master_slave); + /** * genphy_c45_pma_setup_forced - configures a forced speed * @phydev: target phy_device struct @@ -141,25 +170,7 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) return ret; if (genphy_c45_baset1_able(phydev)) { - int ctl = 0; - - switch (phydev->master_slave_set) { - case MASTER_SLAVE_CFG_MASTER_PREFERRED: - case MASTER_SLAVE_CFG_MASTER_FORCE: - ctl = MDIO_PMA_PMD_BT1_CTRL_CFG_MST; - break; - case MASTER_SLAVE_CFG_SLAVE_FORCE: - case MASTER_SLAVE_CFG_SLAVE_PREFERRED: - case MASTER_SLAVE_CFG_UNKNOWN: - case MASTER_SLAVE_CFG_UNSUPPORTED: - break; - default: - phydev_warn(phydev, "Unsupported Master/Slave mode\n"); - return -EOPNOTSUPP; - } - - ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, - MDIO_PMA_PMD_BT1_CTRL_CFG_MST, ctl); + ret = genphy_c45_pma_baset1_setup_master_slave(phydev); if (ret < 0) return ret; } diff --git a/include/linux/phy.h b/include/linux/phy.h index 2d12054932ba..d3f924d3b235 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1614,6 +1614,7 @@ int genphy_c45_read_link(struct phy_device *phydev); int genphy_c45_read_lpa(struct phy_device *phydev); int genphy_c45_read_pma(struct phy_device *phydev); int genphy_c45_pma_setup_forced(struct phy_device *phydev); +int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev); int genphy_c45_an_config_aneg(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); -- cgit v1.2.3 From a04dd88f77a4036ceedd42ed1fbefa68008e1850 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Fri, 6 May 2022 06:23:53 +0200 Subject: net: phy: genphy_c45_pma_baset1_setup_master_slave: do no set unknown configuration Do not change default master/slave forced configuration if no changes was requested. Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index ffa2d5cd09fc..b1f7c63f66cd 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -86,9 +86,10 @@ int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev) break; case MASTER_SLAVE_CFG_SLAVE_FORCE: case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + break; case MASTER_SLAVE_CFG_UNKNOWN: case MASTER_SLAVE_CFG_UNSUPPORTED: - break; + return 0; default: phydev_warn(phydev, "Unsupported Master/Slave mode\n"); return -EOPNOTSUPP; -- cgit v1.2.3 From b9a366f3d874ce1c9c0148ec399f2ce4ab05a467 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Fri, 6 May 2022 06:23:54 +0200 Subject: net: phy: introduce genphy_c45_pma_baset1_read_master_slave() Move baset1 specific part of genphy_c45_read_pma() code to separate function to make it reusable by PHY drivers. Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 31 +++++++++++++++++++++++++------ include/linux/phy.h | 1 + 2 files changed, 26 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index b1f7c63f66cd..d440b76a18b4 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -550,6 +550,30 @@ int genphy_c45_read_lpa(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_lpa); +/** + * genphy_c45_pma_baset1_read_master_slave - read forced master/slave + * configuration + * @phydev: target phy_device struct + */ +int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev) +{ + int val; + + phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; + + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL); + if (val < 0) + return val; + + if (val & MDIO_PMA_PMD_BT1_CTRL_CFG_MST) + phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; + else + phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_read_master_slave); + /** * genphy_c45_read_pma - read link speed etc from PMA * @phydev: target phy_device struct @@ -591,14 +615,9 @@ int genphy_c45_read_pma(struct phy_device *phydev) phydev->duplex = DUPLEX_FULL; if (genphy_c45_baset1_able(phydev)) { - val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL); + val = genphy_c45_pma_baset1_read_master_slave(phydev); if (val < 0) return val; - - if (MDIO_PMA_PMD_BT1_CTRL_CFG_MST) - phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; - else - phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; } return 0; diff --git a/include/linux/phy.h b/include/linux/phy.h index d3f924d3b235..4713c95d65fb 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1619,6 +1619,7 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); +int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); int genphy_c45_config_aneg(struct phy_device *phydev); int genphy_c45_loopback(struct phy_device *phydev, bool enable); -- cgit v1.2.3 From acb8c5aec2b13dab96f0be33feb10a5b1213f113 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Fri, 6 May 2022 06:23:55 +0200 Subject: net: phy: genphy_c45_pma_baset1_read_master_slave: read actual configuration Since MDIO_PMA_PMD_BT1_CTRL register shows actual configuration (and forced state configuration is equal to the state), we should show this configuration for ethtool. Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index d440b76a18b4..a0684c716a2e 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -560,15 +560,19 @@ int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev) int val; phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; + phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL); if (val < 0) return val; - if (val & MDIO_PMA_PMD_BT1_CTRL_CFG_MST) + if (val & MDIO_PMA_PMD_BT1_CTRL_CFG_MST) { + phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE; phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER; - else + } else { + phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE; phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE; + } return 0; } -- cgit v1.2.3 From 2013ad8836aceb828c0fb5b16fa8bb98fd3d9b28 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Fri, 6 May 2022 06:23:56 +0200 Subject: net: phy: export genphy_c45_baset1_read_status() Export genphy_c45_baset1_read_status() to make it reusable by PHY drivers. Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 3 ++- include/linux/phy.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index a0684c716a2e..29b1df03f3e8 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -785,7 +785,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities); * is forced or not, it is read from BASE-T1 AN advertisement * register 7.514. */ -static int genphy_c45_baset1_read_status(struct phy_device *phydev) +int genphy_c45_baset1_read_status(struct phy_device *phydev) { int ret; int cfg; @@ -815,6 +815,7 @@ static int genphy_c45_baset1_read_status(struct phy_device *phydev) return 0; } +EXPORT_SYMBOL_GPL(genphy_c45_baset1_read_status); /** * genphy_c45_read_status - read PHY status diff --git a/include/linux/phy.h b/include/linux/phy.h index 4713c95d65fb..508f1149665b 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1621,6 +1621,7 @@ int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); +int genphy_c45_baset1_read_status(struct phy_device *phydev); int genphy_c45_config_aneg(struct phy_device *phydev); int genphy_c45_loopback(struct phy_device *phydev, bool enable); int genphy_c45_pma_resume(struct phy_device *phydev); -- cgit v1.2.3 From 165cd04fe25392f875ebb94188c4faa20905290d Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Fri, 6 May 2022 06:23:57 +0200 Subject: net: phy: dp83td510: Add support for the DP83TD510 Ethernet PHY The DP83TD510E is an ultra-low power Ethernet physical layer transceiver that supports 10M single pair cable. This driver was tested with NXP SJA1105, STMMAC and ASIX AX88772B USB Ethernet controller. Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 6 ++ drivers/net/phy/Makefile | 1 + drivers/net/phy/dp83td510.c | 209 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 216 insertions(+) create mode 100644 drivers/net/phy/dp83td510.c (limited to 'drivers') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index bbbf6c07ea53..9fee639ee5c8 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -342,6 +342,12 @@ config DP83869_PHY Currently supports the DP83869 PHY. This PHY supports copper and fiber connections. +config DP83TD510_PHY + tristate "Texas Instruments DP83TD510 Ethernet 10Base-T1L PHY" + help + Support for the DP83TD510 Ethernet 10Base-T1L PHY. This PHY supports + a 10M single pair Ethernet connection for up to 1000 meter cable. + config VITESSE_PHY tristate "Vitesse PHYs" help diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index b82651b57043..b12b1d86fc99 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_DP83848_PHY) += dp83848.o obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_DP83869_PHY) += dp83869.o obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o +obj-$(CONFIG_DP83TD510_PHY) += dp83td510.o obj-$(CONFIG_FIXED_PHY) += fixed_phy.o obj-$(CONFIG_ICPLUS_PHY) += icplus.o obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c new file mode 100644 index 000000000000..1ae792b0daaa --- /dev/null +++ b/drivers/net/phy/dp83td510.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for the Texas Instruments DP83TD510 PHY + * Copyright (c) 2022 Pengutronix, Oleksij Rempel + */ + +#include +#include +#include +#include + +#define DP83TD510E_PHY_ID 0x20000181 + +/* MDIO_MMD_VEND2 registers */ +#define DP83TD510E_PHY_STS 0x10 +#define DP83TD510E_STS_MII_INT BIT(7) +#define DP83TD510E_LINK_STATUS BIT(0) + +#define DP83TD510E_GEN_CFG 0x11 +#define DP83TD510E_GENCFG_INT_POLARITY BIT(3) +#define DP83TD510E_GENCFG_INT_EN BIT(1) +#define DP83TD510E_GENCFG_INT_OE BIT(0) + +#define DP83TD510E_INTERRUPT_REG_1 0x12 +#define DP83TD510E_INT1_LINK BIT(13) +#define DP83TD510E_INT1_LINK_EN BIT(5) + +#define DP83TD510E_AN_STAT_1 0x60c +#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15) + +static int dp83td510_config_intr(struct phy_device *phydev) +{ + int ret; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* Clear any pending interrupts */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS, + 0x0); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_INTERRUPT_REG_1, + DP83TD510E_INT1_LINK_EN); + if (ret) + return ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_GEN_CFG, + DP83TD510E_GENCFG_INT_POLARITY | + DP83TD510E_GENCFG_INT_EN | + DP83TD510E_GENCFG_INT_OE); + } else { + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_INTERRUPT_REG_1, 0x0); + if (ret) + return ret; + + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_GEN_CFG, + DP83TD510E_GENCFG_INT_EN); + if (ret) + return ret; + + /* Clear any pending interrupts */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS, + 0x0); + } + + return ret; +} + +static irqreturn_t dp83td510_handle_interrupt(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS); + if (ret < 0) { + phy_error(phydev); + return IRQ_NONE; + } else if (!(ret & DP83TD510E_STS_MII_INT)) { + return IRQ_NONE; + } + + /* Read the current enabled interrupts */ + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_INTERRUPT_REG_1); + if (ret < 0) { + phy_error(phydev); + return IRQ_NONE; + } else if (!(ret & DP83TD510E_INT1_LINK_EN) || + !(ret & DP83TD510E_INT1_LINK)) { + return IRQ_NONE; + } + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static int dp83td510_read_status(struct phy_device *phydev) +{ + u16 phy_sts; + int ret; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + linkmode_zero(phydev->lp_advertising); + + phy_sts = phy_read(phydev, DP83TD510E_PHY_STS); + + phydev->link = !!(phy_sts & DP83TD510E_LINK_STATUS); + if (phydev->link) { + /* This PHY supports only one link mode: 10BaseT1L_Full */ + phydev->duplex = DUPLEX_FULL; + phydev->speed = SPEED_10; + + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = genphy_c45_read_lpa(phydev); + if (ret) + return ret; + + phy_resolve_aneg_linkmode(phydev); + } + } + + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = genphy_c45_baset1_read_status(phydev); + if (ret < 0) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_AN_STAT_1); + if (ret < 0) + return ret; + + if (ret & DP83TD510E_MASTER_SLAVE_RESOL_FAIL) + phydev->master_slave_state = MASTER_SLAVE_STATE_ERR; + } else { + return genphy_c45_pma_baset1_read_master_slave(phydev); + } + + return 0; +} + +static int dp83td510_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + int ret; + + ret = genphy_c45_pma_baset1_setup_master_slave(phydev); + if (ret < 0) + return ret; + + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_an_disable_aneg(phydev); + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} + +static int dp83td510_get_features(struct phy_device *phydev) +{ + /* This PHY can't respond on MDIO bus if no RMII clock is enabled. + * In case RMII mode is used (most meaningful mode for this PHY) and + * the PHY do not have own XTAL, and CLK providing MAC is not probed, + * we won't be able to read all needed ability registers. + * So provide it manually. + */ + + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, + phydev->supported); + + return 0; +} + +static struct phy_driver dp83td510_driver[] = { +{ + PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID), + .name = "TI DP83TD510E", + + .config_aneg = dp83td510_config_aneg, + .read_status = dp83td510_read_status, + .get_features = dp83td510_get_features, + .config_intr = dp83td510_config_intr, + .handle_interrupt = dp83td510_handle_interrupt, + + .suspend = genphy_suspend, + .resume = genphy_resume, +} }; +module_phy_driver(dp83td510_driver); + +static struct mdio_device_id __maybe_unused dp83td510_tbl[] = { + { PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID) }, + { } +}; +MODULE_DEVICE_TABLE(mdio, dp83td510_tbl); + +MODULE_DESCRIPTION("Texas Instruments DP83TD510E PHY driver"); +MODULE_AUTHOR("Oleksij Rempel "); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From e078286a1375c3073e11c71add0c19e9f5816f71 Mon Sep 17 00:00:00 2001 From: Yuiko Oshino Date: Thu, 5 May 2022 11:12:51 -0700 Subject: net: phy: microchip: update LAN88xx phy ID and phy ID mask. update LAN88xx phy ID and phy ID mask because the existing code conflicts with the LAN8742 phy. The current phy IDs on the available hardware. LAN8742 0x0007C130, 0x0007C131 LAN88xx 0x0007C132 Signed-off-by: Yuiko Oshino Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/microchip.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 9f1f2b6c97d4..131caf659ed2 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -344,8 +344,8 @@ static int lan88xx_config_aneg(struct phy_device *phydev) static struct phy_driver microchip_phy_driver[] = { { - .phy_id = 0x0007c130, - .phy_id_mask = 0xfffffff0, + .phy_id = 0x0007c132, + .phy_id_mask = 0xfffffff2, .name = "Microchip LAN88xx", /* PHY_GBIT_FEATURES */ @@ -369,7 +369,7 @@ static struct phy_driver microchip_phy_driver[] = { module_phy_driver(microchip_phy_driver); static struct mdio_device_id __maybe_unused microchip_tbl[] = { - { 0x0007c130, 0xfffffff0 }, + { 0x0007c132, 0xfffffff2 }, { } }; -- cgit v1.2.3 From 53ad228682899689d8a3a0f91e399febe88a1db3 Mon Sep 17 00:00:00 2001 From: Yuiko Oshino Date: Thu, 5 May 2022 11:12:52 -0700 Subject: net: phy: smsc: add LAN8742 phy support. The current phy IDs on the available hardware. LAN8742 0x0007C130, 0x0007C131 Signed-off-by: Yuiko Oshino Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/smsc.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index d8cac02a79b9..44fa9e00cc50 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -481,6 +481,32 @@ static struct phy_driver smsc_phy_driver[] = { .get_strings = smsc_get_strings, .get_stats = smsc_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, +}, { + .phy_id = 0x0007c130, /* 0x0007c130 and 0x0007c131 */ + .phy_id_mask = 0xfffffff2, + .name = "Microchip LAN8742", + + /* PHY_BASIC_FEATURES */ + .flags = PHY_RST_AFTER_CLK_EN, + + .probe = smsc_phy_probe, + + /* basic functions */ + .read_status = lan87xx_read_status, + .config_init = smsc_phy_config_init, + .soft_reset = smsc_phy_reset, + + /* IRQ related */ + .config_intr = smsc_phy_config_intr, + .handle_interrupt = smsc_phy_handle_interrupt, + + /* Statistics */ + .get_sset_count = smsc_get_sset_count, + .get_strings = smsc_get_strings, + .get_stats = smsc_get_stats, + .suspend = genphy_suspend, .resume = genphy_resume, } }; @@ -498,6 +524,7 @@ static struct mdio_device_id __maybe_unused smsc_tbl[] = { { 0x0007c0d0, 0xfffffff0 }, { 0x0007c0f0, 0xfffffff0 }, { 0x0007c110, 0xfffffff0 }, + { 0x0007c130, 0xfffffff2 }, { } }; -- cgit v1.2.3 From 4fb3f1f1818cdfded6d40ff6881a9a5e78cc8609 Mon Sep 17 00:00:00 2001 From: Chin-Yen Lee Date: Sat, 7 May 2022 07:50:45 +0800 Subject: rtw88: adjust adaptivity option to 1 Fine tune algorithm of adaptivity sensitivity to avoid disconnecting from AP suddenly in field. Signed-off-by: Chin-Yen Lee Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506235045.4669-1-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw88/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index e344e058f943..090610e48d08 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -1786,7 +1786,7 @@ void rtw_fw_adaptivity(struct rtw_dev *rtwdev) SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY); SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode); - SET_ADAPTIVITY_OPTION(h2c_pkt, 2); + SET_ADAPTIVITY_OPTION(h2c_pkt, 1); SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]); SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini); SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density); -- cgit v1.2.3 From f63bc788727c591a3a6186327882048a75bb2bef Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 4 May 2022 12:25:26 +0300 Subject: bcma: gpio: Switch to use fwnode instead of of_node GPIO library now accepts fwnode as a firmware node, so switch the driver to use it. Signed-off-by: Andy Shevchenko Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504092525.71668-1-andriy.shevchenko@linux.intel.com --- drivers/bcma/driver_gpio.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 1e74ec1c7f23..fac8ff983aec 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -11,6 +11,8 @@ #include #include #include +#include + #include #include "bcma_private.h" @@ -182,9 +184,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) chip->direction_input = bcma_gpio_direction_input; chip->direction_output = bcma_gpio_direction_output; chip->parent = bus->dev; -#if IS_BUILTIN(CONFIG_OF) - chip->of_node = cc->core->dev.of_node; -#endif + chip->fwnode = dev_fwnode(&cc->core->dev); + switch (bus->chipinfo.id) { case BCMA_CHIP_ID_BCM4707: case BCMA_CHIP_ID_BCM5357: -- cgit v1.2.3 From 9497b7880ffd71fcbd469970f6ef45bb55877bfd Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Wed, 4 May 2022 23:46:36 -0400 Subject: ath11k: reset 11d state in process of recovery When doing simulate_fw_crash operation periodically with a short interval time such as 10 seconds, it is easy happened WMI command timed out for WMI_SCAN_CHAN_LIST_CMDID in ath11k_reg_update_chan_list(). log: [42287.610053] ath11k_pci 0000:01:00.0: wmi command 12291 timeout [42287.610064] ath11k_pci 0000:01:00.0: failed to send WMI_SCAN_CHAN_LIST cmd [42287.610073] ath11k_pci 0000:01:00.0: failed to perform regd update : -11 Note that this issue does not occur with a longer interval such as 20 seconds. The reason the issue occurs with a shorter interval is the following steps: 1) Upon initial boot, or after device recovery, the initial hw scan plus the 11d scan will run, and when 6 GHz support is present, these scans can take up to 12 seconds to complete, so ath11k_reg_update_chan_list() is still waiting the completion of ar->completed_11d_scan. 2) If a simulate_fw_crash operation is received during this time, those scans do not complete, and ath11k_core_pre_reconfigure_recovery() complete the ar->completed_11d_scan, then ath11k_reg_update_chan_list() wakeup and start to send WMI_SCAN_CHAN_LIST_CMDID, but firmware is crashed at this moment, so wmi timed out occur. To address this issue, reset the 11d state during device recovery so that WMI_SCAN_CHAN_LIST_CMDID does not timed out for short interval time such as 10 seconds. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Fixes: 1f682dc9fb37 ("ath11k: reduce the wait time of 11d scan and hw scan while add interface") Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220505034636.29582-1-quic_wgong@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 1 + drivers/net/wireless/ath/ath11k/mac.c | 5 +++++ drivers/net/wireless/ath/ath11k/reg.c | 3 +++ 3 files changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 01e1d494b527..236215aa3867 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1620,6 +1620,7 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab) ieee80211_stop_queues(ar->hw); ath11k_mac_drain_tx(ar); + ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); complete(&ar->scan.started); complete(&ar->scan.completed); diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index df15fa42d8ad..9b28fdac3c94 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -6155,6 +6155,11 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar) ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac stop 11d vdev id %d\n", ar->vdev_id_11d_scan); + if (ar->state_11d == ATH11K_11D_PREPARING) { + ar->state_11d = ATH11K_11D_IDLE; + complete(&ar->completed_11d_scan); + } + if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) { vdev_id = ar->vdev_id_11d_scan; diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 79ac2142317a..7ee3ff69dfc8 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -139,6 +139,9 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait) "reg hw scan wait left time %d\n", left); } + if (ar->state == ATH11K_STATE_RESTARTING) + return 0; + bands = hw->wiphy->bands; for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!bands[band]) -- cgit v1.2.3 From a4fe9b6db6f93694fa14b41e764a2c0f147f13da Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Mon, 9 May 2022 15:55:27 +0800 Subject: net: hns3: fix access null pointer issue when set tx-buf-size as 0 When set tx-buf-size as 0 by ethtool, hns3_init_tx_spare_buffer() will return directly and priv->ring->tx_spare->len is uninitialized, then print function access priv->ring->tx_spare->len will cause this issue. When set tx-buf-size as 0 by ethtool, the print function will print 0 directly and not access priv->ring->tx_spare->len. Fixes: 2373b35c24ff ("net: hns3: add log for setting tx spare buf size") Signed-off-by: Hao Chen Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 1db8a86f046d..6d20974519fe 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1915,8 +1915,11 @@ static int hns3_set_tunable(struct net_device *netdev, return ret; } - netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", - priv->ring->tx_spare->len); + if (!priv->ring->tx_spare) + netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n"); + else + netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", + priv->ring->tx_spare->len); break; default: -- cgit v1.2.3 From bbed702412041c370d336f076b783bc9e5e04c21 Mon Sep 17 00:00:00 2001 From: Yufeng Mo Date: Mon, 9 May 2022 15:55:28 +0800 Subject: net: hns3: remove the affinity settings of vector0 Vector0 is used for common interrupt control events and is irrelevant to performance. Currently, the driver sets the default affinity of vector0 to NUMA nodes, which is unnecessary. Therefore, the default setting is removed, and the driver does not set the affinity for vector0. Signed-off-by: Yufeng Mo Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 27 +--------------------- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 -- 2 files changed, 1 insertion(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a5dd2c8c244a..1ebad0e50e6a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1546,9 +1546,8 @@ static void hclge_init_tc_config(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; - int node, ret; + int ret; ret = hclge_get_cfg(hdev, &cfg); if (ret) @@ -1594,13 +1593,6 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_tc_config(hdev); hclge_init_kdump_kernel_config(hdev); - /* Set the affinity based on numa node */ - node = dev_to_node(&hdev->pdev->dev); - if (node != NUMA_NO_NODE) - cpumask = cpumask_of_node(node); - - cpumask_copy(&hdev->affinity_mask, cpumask); - return ret; } @@ -3564,17 +3556,6 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) hdev->num_msi_used += 1; } -static void hclge_misc_affinity_setup(struct hclge_dev *hdev) -{ - irq_set_affinity_hint(hdev->misc_vector.vector_irq, - &hdev->affinity_mask); -} - -static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) -{ - irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); -} - static int hclge_misc_irq_init(struct hclge_dev *hdev) { int ret; @@ -11457,11 +11438,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); - /* Setup affinity after service timer setup because add_timer_on - * is called in affinity notify. - */ - hclge_misc_affinity_setup(hdev); - hclge_clear_all_event_cause(hdev); hclge_clear_resetting_state(hdev); @@ -11879,7 +11855,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_reset_vf_rate(hdev); hclge_clear_vf_vlan(hdev); - hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); hclge_ptp_uninit(hdev); hclge_uninit_rxd_adv_layout(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index c70239758bb2..ab5c37848a7b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -938,8 +938,6 @@ struct hclge_dev { DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); - /* affinity mask and notify for misc interrupt */ - cpumask_t affinity_mask; struct hclge_ptp *ptp; struct devlink *devlink; struct hclge_comm_rss_cfg rss_cfg; -- cgit v1.2.3 From 767975e582c50b39d633f6e1c4bb99cc1f156efb Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Mon, 9 May 2022 15:55:29 +0800 Subject: net: hns3: add byte order conversion for PF to VF mailbox message Currently, hns3 mailbox processing between PF and VF missed to convert message byte order and use data type u16 instead of __le16 for mailbox data process. These processes may cause problems between different architectures. So this patch uses __le16/__le32 data type to define mailbox data structures. To be compatible with old hns3 driver, these structures use one-byte alignment. Then byte order conversions are added to mailbox messages from PF to VF. Signed-off-by: Jie Wang Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 36 ++++++++-- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 60 ++++++++-------- .../ethernet/hisilicon/hns3/hns3pf/hclge_trace.h | 2 +- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 4 +- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 80 +++++++++++++--------- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h | 2 +- 7 files changed, 109 insertions(+), 77 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 8c7fadf2b734..e1ba0ae055b0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -134,13 +134,13 @@ struct hclge_vf_to_pf_msg { }; struct hclge_pf_to_vf_msg { - u16 code; + __le16 code; union { /* used for mbx response */ struct { - u16 vf_mbx_msg_code; - u16 vf_mbx_msg_subcode; - u16 resp_status; + __le16 vf_mbx_msg_code; + __le16 vf_mbx_msg_subcode; + __le16 resp_status; u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; }; /* used for general mbx */ @@ -157,7 +157,7 @@ struct hclge_mbx_vf_to_pf_cmd { u8 rsv1[1]; u8 msg_len; u8 rsv2; - u16 match_id; + __le16 match_id; struct hclge_vf_to_pf_msg msg; }; @@ -168,7 +168,7 @@ struct hclge_mbx_pf_to_vf_cmd { u8 rsv[3]; u8 msg_len; u8 rsv1; - u16 match_id; + __le16 match_id; struct hclge_pf_to_vf_msg msg; }; @@ -178,6 +178,28 @@ struct hclge_vf_rst_cmd { u8 rsv[22]; }; +#pragma pack(1) +struct hclge_mbx_link_status { + __le16 link_status; + __le32 speed; + __le16 duplex; + u8 flag; +}; + +struct hclge_mbx_link_mode { + __le16 idx; + __le64 link_mode; +}; + +struct hclge_mbx_port_base_vlan { + __le16 state; + __le16 vlan_proto; + __le16 qos; + __le16 vlan_tag; +}; + +#pragma pack() + /* used by VF to store the received Async responses from PF */ struct hclgevf_mbx_arq_ring { #define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 @@ -186,7 +208,7 @@ struct hclgevf_mbx_arq_ring { u32 head; u32 tail; atomic_t count; - u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; + __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; #define hclge_mbx_ring_ptr_move_crq(crq) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 49c40744cda5..d3c4d2c4a5ba 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -57,17 +57,19 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; resp_pf_to_vf->match_id = vf_to_pf_req->match_id; - resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; - resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; - resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; + resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP); + resp_pf_to_vf->msg.vf_mbx_msg_code = + cpu_to_le16(vf_to_pf_req->msg.code); + resp_pf_to_vf->msg.vf_mbx_msg_subcode = + cpu_to_le16(vf_to_pf_req->msg.subcode); resp = hclge_errno_to_resp(resp_msg->status); if (resp < SHRT_MAX) { - resp_pf_to_vf->msg.resp_status = resp; + resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp); } else { dev_warn(&hdev->pdev->dev, "failed to send response to VF, response status %u is out-of-bound\n", resp); - resp_pf_to_vf->msg.resp_status = EIO; + resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO); } if (resp_msg->len > 0) @@ -107,7 +109,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, resp_pf_to_vf->dest_vfid = dest_vfid; resp_pf_to_vf->msg_len = msg_len; - resp_pf_to_vf->msg.code = mbx_opcode; + resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode); memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); @@ -125,8 +127,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; + __le16 msg_data; u16 reset_type; - u8 msg_data[2]; u8 dest_vfid; BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); @@ -140,10 +142,10 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) else reset_type = HNAE3_VF_FUNC_RESET; - memcpy(&msg_data[0], &reset_type, sizeof(u16)); + msg_data = cpu_to_le16(reset_type); /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), HCLGE_MBX_ASSERTING_RESET, dest_vfid); } @@ -339,16 +341,14 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, u16 state, struct hclge_vlan_info *vlan_info) { -#define MSG_DATA_SIZE 8 + struct hclge_mbx_port_base_vlan base_vlan; - u8 msg_data[MSG_DATA_SIZE]; + base_vlan.state = cpu_to_le16(state); + base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto); + base_vlan.qos = cpu_to_le16(vlan_info->qos); + base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag); - memcpy(&msg_data[0], &state, sizeof(u16)); - memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16)); - memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16)); - memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16)); - - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan), HCLGE_MBX_PUSH_VLAN_INFO, vfid); } @@ -488,10 +488,9 @@ int hclge_push_vf_link_status(struct hclge_vport *vport) #define HCLGE_VF_LINK_STATE_UP 1U #define HCLGE_VF_LINK_STATE_DOWN 0U + struct hclge_mbx_link_status link_info; struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[9]; - u16 duplex; /* mac.link can only be 0 or 1 */ switch (vport->vf_info.link_state) { @@ -507,14 +506,13 @@ int hclge_push_vf_link_status(struct hclge_vport *vport) break; } - duplex = hdev->hw.mac.duplex; - memcpy(&msg_data[0], &link_status, sizeof(u16)); - memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); - memcpy(&msg_data[6], &duplex, sizeof(u16)); - msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; + link_info.link_status = cpu_to_le16(link_status); + link_info.speed = cpu_to_le32(hdev->hw.mac.speed); + link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex); + link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN; /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info), HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); } @@ -522,22 +520,22 @@ static void hclge_get_link_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { #define HCLGE_SUPPORTED 1 + struct hclge_mbx_link_mode link_mode; struct hclge_dev *hdev = vport->back; unsigned long advertising; unsigned long supported; unsigned long send_data; - u8 msg_data[10] = {}; u8 dest_vfid; advertising = hdev->hw.mac.advertising[0]; supported = hdev->hw.mac.supported[0]; dest_vfid = mbx_req->mbx_src_vfid; - msg_data[0] = mbx_req->msg.data[0]; - - send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; + send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported : + advertising; + link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]); + link_mode.link_mode = cpu_to_le64(send_data); - memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); - hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode), HCLGE_MBX_LINK_STAT_MODE, dest_vfid); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h index 5b0b71bd6120..8510b88d4982 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -62,7 +62,7 @@ TRACE_EVENT(hclge_pf_mbx_send, TP_fast_assign( __entry->vfid = req->dest_vfid; - __entry->code = req->msg.code; + __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index e13d71abd9f7..38a26797c9d4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -3333,7 +3333,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, } void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size) + struct hclge_mbx_port_base_vlan *port_base_vlan) { struct hnae3_handle *nic = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; @@ -3358,7 +3358,7 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, /* send msg to PF and wait update port based vlan info */ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_PORT_BASE_VLAN_CFG); - memcpy(send_msg.data, port_base_vlan_info, data_size); + memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan)); ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); if (!ret) { if (state == HNAE3_PORT_BASE_VLAN_DISABLE) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 4b00fd44f118..59ca6c794d6d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -293,5 +293,5 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size); + struct hclge_mbx_port_base_vlan *port_base_vlan); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index c8055d69255c..bbf7b14079de 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -124,7 +124,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); hclgevf_reset_mbx_resp_status(hdev); - req->match_id = hdev->mbx_resp.match_id; + req->match_id = cpu_to_le16(hdev->mbx_resp.match_id); status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) { dev_err(&hdev->pdev->dev, @@ -162,27 +162,29 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, struct hclge_mbx_pf_to_vf_cmd *req) { + u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode); + u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code); struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; + u16 resp_status = le16_to_cpu(req->msg.resp_status); + u16 match_id = le16_to_cpu(req->match_id); if (resp->received_resp) dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%u)\n", - req->msg.vf_mbx_msg_code); - - resp->origin_mbx_msg = - (req->msg.vf_mbx_msg_code << 16); - resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; - resp->resp_status = - hclgevf_resp_to_errno(req->msg.resp_status); + "VF mbx resp flag not clear(%u)\n", + vf_mbx_msg_code); + + resp->origin_mbx_msg = (vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= vf_mbx_msg_subcode; + resp->resp_status = hclgevf_resp_to_errno(resp_status); memcpy(resp->additional_info, req->msg.resp_data, HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); - if (req->match_id) { + if (match_id) { /* If match_id is not zero, it means PF support match_id. * if the match_id is right, VF get the right response, or * ignore the response. and driver will clear hdev->mbx_resp * when send next message which need response. */ - if (req->match_id == resp->match_id) + if (match_id == resp->match_id) resp->received_resp = true; } else { resp->received_resp = true; @@ -199,7 +201,7 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, "Async Q full, dropping msg(%u)\n", - req->msg.code); + le16_to_cpu(req->msg.code)); return; } @@ -218,6 +220,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) struct hclge_comm_cmq_ring *crq; struct hclge_desc *desc; u16 flag; + u16 code; crq = &hdev->hw.hw.cmq.crq; @@ -232,10 +235,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + code = le16_to_cpu(req->msg.code); if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %u\n", - req->msg.code); + code); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; @@ -251,7 +255,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) * timeout and simultaneously queue the async messages for later * prcessing in context of mailbox task i.e. the slow path. */ - switch (req->msg.code) { + switch (code) { case HCLGE_MBX_PF_VF_RESP: hclgevf_handle_mbx_response(hdev, req); break; @@ -265,7 +269,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) default: dev_err(&hdev->pdev->dev, "VF received unsupported(%u) mbx msg from PF\n", - req->msg.code); + code); break; } crq->desc[crq->next_to_use].flag = 0; @@ -287,14 +291,18 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { + struct hclge_mbx_port_base_vlan *vlan_info; + struct hclge_mbx_link_status *link_info; + struct hclge_mbx_link_mode *link_mode; enum hnae3_reset_type reset_type; u16 link_status, state; - u16 *msg_q, *vlan_info; + __le16 *msg_q; + u16 opcode; u8 duplex; u32 speed; u32 tail; u8 flag; - u8 idx; + u16 idx; tail = hdev->arq.tail; @@ -308,13 +316,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) } msg_q = hdev->arq.msg_q[hdev->arq.head]; - - switch (msg_q[0]) { + opcode = le16_to_cpu(msg_q[0]); + switch (opcode) { case HCLGE_MBX_LINK_STAT_CHANGE: - link_status = msg_q[1]; - memcpy(&speed, &msg_q[2], sizeof(speed)); - duplex = (u8)msg_q[4]; - flag = (u8)msg_q[5]; + link_info = (struct hclge_mbx_link_status *)(msg_q + 1); + link_status = le16_to_cpu(link_info->link_status); + speed = le32_to_cpu(link_info->speed); + duplex = (u8)le16_to_cpu(link_info->duplex); + flag = link_info->flag; /* update upper layer with new link link status */ hclgevf_update_speed_duplex(hdev, speed, duplex); @@ -326,13 +335,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) break; case HCLGE_MBX_LINK_STAT_MODE: - idx = (u8)msg_q[1]; + link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1); + idx = le16_to_cpu(link_mode->idx); if (idx) - memcpy(&hdev->hw.mac.supported, &msg_q[2], - sizeof(unsigned long)); + hdev->hw.mac.supported = + le64_to_cpu(link_mode->link_mode); else - memcpy(&hdev->hw.mac.advertising, &msg_q[2], - sizeof(unsigned long)); + hdev->hw.mac.advertising = + le64_to_cpu(link_mode->link_mode); break; case HCLGE_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending @@ -340,25 +350,27 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) * has been completely reset. After this stack should * eventually be re-initialized. */ - reset_type = (enum hnae3_reset_type)msg_q[1]; + reset_type = + (enum hnae3_reset_type)le16_to_cpu(msg_q[1]); set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); break; case HCLGE_MBX_PUSH_VLAN_INFO: - state = msg_q[1]; - vlan_info = &msg_q[1]; + vlan_info = + (struct hclge_mbx_port_base_vlan *)(msg_q + 1); + state = le16_to_cpu(vlan_info->state); hclgevf_update_port_base_vlan_info(hdev, state, - (u8 *)vlan_info, 8); + vlan_info); break; case HCLGE_MBX_PUSH_PROMISC_INFO: - hclgevf_parse_promisc_info(hdev, msg_q[1]); + hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1])); break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%u) message from arq\n", - msg_q[0]); + opcode); break; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h index e4bfb6191fef..5d4895bb57a1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -29,7 +29,7 @@ TRACE_EVENT(hclge_vf_mbx_get, TP_fast_assign( __entry->vfid = req->dest_vfid; - __entry->code = req->msg.code; + __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); __assign_str(devname, &hdev->nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, -- cgit v1.2.3 From 416eedb60361151b3eeb0f52cb8a37b6d47be328 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Mon, 9 May 2022 15:55:30 +0800 Subject: net: hns3: add byte order conversion for VF to PF mailbox message This patch uses __le16/__32 to define mailbox data structures. Then byte order conversion are added for mailbox messages from VF to PF. Signed-off-by: Jie Wang Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 25 +++++++++- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 4 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 50 ++++++++++---------- .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 54 ++++++++++------------ 4 files changed, 75 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index e1ba0ae055b0..c52876555d4b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -92,8 +92,8 @@ struct hclge_ring_chain_param { struct hclge_basic_info { u8 hw_tc_map; u8 rsv; - u16 mbx_api_version; - u32 pf_caps; + __le16 mbx_api_version; + __le32 pf_caps; }; struct hclgevf_mbx_resp_status { @@ -198,6 +198,27 @@ struct hclge_mbx_port_base_vlan { __le16 vlan_tag; }; +struct hclge_mbx_vf_queue_info { + __le16 num_tqps; + __le16 rss_size; + __le16 rx_buf_len; +}; + +struct hclge_mbx_vf_queue_depth { + __le16 num_tx_desc; + __le16 num_rx_desc; +}; + +struct hclge_mbx_vlan_filter { + u8 is_kill; + __le16 vlan_id; + __le16 proto; +}; + +struct hclge_mbx_mtu_info { + __le32 mtu; +}; + #pragma pack() /* used by VF to store the received Async responses from PF */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index ab5c37848a7b..18caddd541f8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -780,8 +780,8 @@ struct hclge_vf_vlan_cfg { union { struct { u8 is_kill; - u16 vlan; - u16 proto; + __le16 vlan; + __le16 proto; }; u8 enable; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index d3c4d2c4a5ba..6957b5e158c9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -362,13 +362,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; struct hclge_vf_vlan_cfg *msg_cmd; + __be16 proto; + u16 vlan_id; msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; switch (msg_cmd->subcode) { case HCLGE_MBX_VLAN_FILTER: - return hclge_set_vlan_filter(handle, - cpu_to_be16(msg_cmd->proto), - msg_cmd->vlan, msg_cmd->is_kill); + proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto)); + vlan_id = le16_to_cpu(msg_cmd->vlan); + return hclge_set_vlan_filter(handle, proto, vlan_id, + msg_cmd->is_kill); case HCLGE_MBX_VLAN_RX_OFF_CFG: return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: @@ -411,15 +414,17 @@ static void hclge_get_basic_info(struct hclge_vport *vport, struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; struct hclge_basic_info *basic_info; unsigned int i; + u32 pf_caps; basic_info = (struct hclge_basic_info *)resp_msg->data; for (i = 0; i < kinfo->tc_info.num_tc; i++) basic_info->hw_tc_map |= BIT(i); + pf_caps = le32_to_cpu(basic_info->pf_caps); if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) - hnae3_set_bit(basic_info->pf_caps, - HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + basic_info->pf_caps = cpu_to_le32(pf_caps); resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; } @@ -427,19 +432,15 @@ static void hclge_get_vf_queue_info(struct hclge_vport *vport, struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_TQPS_RSS_INFO_LEN 6 -#define HCLGE_TQPS_ALLOC_OFFSET 0 -#define HCLGE_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; struct hclge_dev *hdev = vport->back; /* get the queue related info */ - memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], - &vport->alloc_tqps, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], - &vport->nic.kinfo.rss_size, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], - &hdev->rx_buf_len, sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data; + queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps); + queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size); + queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len); resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; } @@ -454,16 +455,15 @@ static void hclge_get_vf_queue_depth(struct hclge_vport *vport, struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_TQPS_DEPTH_INFO_LEN 4 -#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; struct hclge_dev *hdev = vport->back; /* get the queue depth info */ - memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], - &hdev->num_tx_desc, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], - &hdev->num_rx_desc, sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data; + queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc); + queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc); + resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; } @@ -549,7 +549,7 @@ static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, u16 queue_id; int ret; - memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; resp_msg->len = sizeof(u8); @@ -585,9 +585,11 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport) static int hclge_set_vf_mtu(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { + struct hclge_mbx_mtu_info *mtu_info; u32 mtu; - memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data; + mtu = le32_to_cpu(mtu_info->mtu); return hclge_set_vport_mtu(vport, mtu); } @@ -600,7 +602,7 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; u16 queue_id, qid_in_pf; - memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); if (queue_id >= handle->kinfo.num_tqps) { dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", queue_id, mbx_req->mbx_src_vfid); @@ -608,7 +610,7 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, } qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); - memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); + *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf); resp_msg->len = sizeof(qid_in_pf); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 38a26797c9d4..5eaf09ea4009 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -189,8 +189,8 @@ static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) basic_info = (struct hclge_basic_info *)resp_msg; hdev->hw_tc_map = basic_info->hw_tc_map; - hdev->mbx_api_version = basic_info->mbx_api_version; - caps = basic_info->pf_caps; + hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); + caps = le32_to_cpu(basic_info->pf_caps); if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); @@ -223,10 +223,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 -#define HCLGEVF_TQPS_ALLOC_OFFSET 0 -#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int status; @@ -241,12 +239,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) return status; } - memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], - sizeof(u16)); - memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], - sizeof(u16)); - memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], - sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg; + hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); + hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); + hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); return 0; } @@ -254,9 +250,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 -#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -271,10 +266,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) return ret; } - memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], - sizeof(u16)); - memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], - sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg; + hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); + hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); return 0; } @@ -288,11 +282,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) int ret; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); - memcpy(send_msg.data, &queue_id, sizeof(queue_id)); + *(__le16 *)send_msg.data = cpu_to_le16(queue_id); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, sizeof(resp_data)); if (!ret) - qid_in_pf = *(u16 *)resp_data; + qid_in_pf = le16_to_cpu(*(__le16 *)resp_data); return qid_in_pf; } @@ -1245,11 +1239,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) { -#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 -#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 -#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_vlan_filter *vlan_filter; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -1271,11 +1262,11 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_VLAN_FILTER); - send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, - sizeof(vlan_id)); - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, - sizeof(proto)); + vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data; + vlan_filter->is_kill = is_kill; + vlan_filter->vlan_id = cpu_to_le16(vlan_id); + vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto)); + /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack. @@ -1347,7 +1338,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) for (i = 1; i < handle->kinfo.num_tqps; i++) { hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); - memcpy(send_msg.data, &i, sizeof(i)); + *(__le16 *)send_msg.data = cpu_to_le16(i); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); if (ret) return ret; @@ -1359,10 +1350,13 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_mtu_info *mtu_info; struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); - memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data; + mtu_info->mtu = cpu_to_le32(new_mtu); + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); } -- cgit v1.2.3 From a1aed456e3261c0096e36618db9aa61d5974ad16 Mon Sep 17 00:00:00 2001 From: Guangbin Huang Date: Mon, 9 May 2022 15:55:31 +0800 Subject: net: hns3: add query vf ring and vector map relation This patch adds a new mailbox opcode to query map relation between vf ring and vector. Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 1 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 83 ++++++++++++++++++++++ 2 files changed, 84 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index c52876555d4b..7d4ae467f3ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ + HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 6957b5e158c9..e1012f7f9b73 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -251,6 +251,81 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, return ret; } +static int hclge_query_ring_vector_map(struct hclge_vport *vport, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_desc *desc) +{ + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc->data; + struct hclge_dev *hdev = vport->back; + u16 tqp_type_and_id; + int status; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); + + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + ring_chain->tqp_index); + req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Get VF ring vector map info fail, status is %d.\n", + status); + + return status; +} + +static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req, + struct hclge_respond_to_vf_msg *resp) +{ +#define HCLGE_LIMIT_RING_NUM 1 +#define HCLGE_RING_TYPE_OFFSET 0 +#define HCLGE_TQP_INDEX_OFFSET 1 +#define HCLGE_INT_GL_INDEX_OFFSET 2 +#define HCLGE_VECTOR_ID_OFFSET 3 +#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 + struct hnae3_ring_chain_node ring_chain; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *data = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + u16 tqp_type_and_id; + u8 int_gl_index; + int ret; + + req->msg.ring_num = HCLGE_LIMIT_RING_NUM; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); + if (ret) { + hclge_free_vector_ring_chain(&ring_chain); + return ret; + } + + tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); + int_gl_index = hnae3_get_field(tqp_type_and_id, + HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); + + resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; + resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; + resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; + resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l; + resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { @@ -755,6 +830,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret = hclge_map_unmap_ring_to_vf_vector(vport, false, req); break; + case HCLGE_MBX_GET_RING_VECTOR_MAP: + ret = hclge_get_vf_ring_vector_map(vport, req, + &resp_msg); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to get VF ring vector map\n", + ret); + break; case HCLGE_MBX_SET_PROMISC_MODE: hclge_set_vf_promisc_mode(vport, req); break; -- cgit v1.2.3 From 443edfd6d43d59652a6062e89de680fc0c824f84 Mon Sep 17 00:00:00 2001 From: Guangbin Huang Date: Mon, 9 May 2022 15:55:32 +0800 Subject: net: hns3: fix incorrect type of argument in declaration of function hclge_comm_get_rss_indir_tbl The argument rss_ind_tbl_size is type u16 in function definition of hclge_comm_get_rss_indir_tbl(), but it is set to type __le16 in function declaration by mistake, so fix it. Signed-off-by: Guangbin Huang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h index aa1d7a6ff4ca..946d166a452d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h @@ -106,7 +106,7 @@ int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, u8 *hfunc); void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, - u32 *indir, __le16 rss_ind_tbl_size); + u32 *indir, u16 rss_ind_tbl_size); int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, const u8 *key); int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, -- cgit v1.2.3 From 34e244ea150769fed3dbfd1d1cb33a088216d3ac Mon Sep 17 00:00:00 2001 From: Yu Xiao Date: Sun, 8 May 2022 19:38:15 +0200 Subject: nfp: vendor neutral strings for chip and Corigne in strings for driver Historically the nfp driver has supported NFP chips with Netronome's PCIE vendor ID. In preparation for extending the to also support NFP chips that have Corigine's PCIE vendor ID (0x1da8) make printk statements relating to the chip vendor neutral. An alternate approach is to set the string based on the PCI vendor ID. In our judgement this proved to cumbersome so we have taken this simpler approach. Update strings relating to the driver to use Corigine, who have taken over maintenance of the driver. Signed-off-by: Yu Xiao Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 8 +++++--- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 +- drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index eeda39e34f84..08757cd6c7c5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -865,7 +865,9 @@ static int __init nfp_main_init(void) { int err; - pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2017 Netronome Systems\n", + pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2020 Netronome Systems\n", + nfp_driver_name); + pr_info("%s: NFP PCIe Driver, Copyright (C) 2021-2022 Corigine Inc.\n", nfp_driver_name); nfp_net_debugfs_create(); @@ -909,6 +911,6 @@ MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); -MODULE_AUTHOR("Netronome Systems "); +MODULE_AUTHOR("Corigine, Inc. "); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver."); +MODULE_DESCRIPTION("The Network Flow Processor (NFP) driver."); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index c60ead337d06..e3594a5c2a85 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1984,7 +1984,7 @@ static const struct udp_tunnel_nic_info nfp_udp_tunnels = { */ void nfp_net_info(struct nfp_net *nn) { - nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", + nn_info(nn, "NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n", nn->dp.is_vf ? "VF " : "", nn->dp.num_tx_rings, nn->max_tx_rings, nn->dp.num_rx_rings, nn->max_rx_rings); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 0d1d39edbbae..bd47a5717d37 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1314,7 +1314,7 @@ nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_i int err; /* Finished with card initialization. */ - dev_info(&pdev->dev, "Netronome Flow Processor %s PCIe Card Probe\n", + dev_info(&pdev->dev, "Network Flow Processor %s PCIe Card Probe\n", dev_info->chip_names); pcie_print_link_status(pdev); -- cgit v1.2.3 From 299ba7a32a3ca2ad2955bbad6b30eb88d1ced62d Mon Sep 17 00:00:00 2001 From: Yu Xiao Date: Sun, 8 May 2022 19:38:16 +0200 Subject: nfp: support Corigine PCIE vendor ID Historically the nfp driver has supported NFP chips with Netronome's PCIE vendor ID. This patch extends the driver to also support NFP chips, which at this point are assumed to be otherwise identical from a software perspective, that have Corigine's PCIE vendor ID (0x1da8). Also, Rename the macro definitions PCI_DEVICE_ID_NERTONEOME_NFPXXXX to PCI_DEVICE_ID_NFPXXXX, as they are now used in conjunction with two PCIE vendor IDs. Signed-off-by: Yu Xiao Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 30 +++++++++++++++++----- .../net/ethernet/netronome/nfp/nfp_netvf_main.c | 12 +++++++-- .../ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c | 16 ++++++------ .../net/ethernet/netronome/nfp/nfpcore/nfp_dev.h | 8 ++++++ 4 files changed, 50 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 08757cd6c7c5..4f88d17536c3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -33,22 +33,38 @@ static const char nfp_driver_name[] = "nfp"; static const struct pci_device_id nfp_pci_device_ids[] = { - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP3800, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP3800, }, - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP5000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, + { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP3800, + PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, + PCI_ANY_ID, 0, NFP_DEV_NFP3800, + }, + { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP4000, + PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, + PCI_ANY_ID, 0, NFP_DEV_NFP6000, + }, + { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP5000, + PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, + PCI_ANY_ID, 0, NFP_DEV_NFP6000, + }, + { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP6000, + PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, + PCI_ANY_ID, 0, NFP_DEV_NFP6000, + }, { 0, } /* Required last entry. */ }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); @@ -681,8 +697,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, struct nfp_pf *pf; int err; - if (pdev->vendor == PCI_VENDOR_ID_NETRONOME && - pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000_VF) + if ((pdev->vendor == PCI_VENDOR_ID_NETRONOME || + pdev->vendor == PCI_VENDOR_ID_CORIGINE) && + (pdev->device == PCI_DEVICE_ID_NFP3800_VF || + pdev->device == PCI_DEVICE_ID_NFP6000_VF)) dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n"); dev_info = &nfp_dev_info[pci_id->driver_data]; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index a51eb26dd977..e19bb0150cb5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -38,14 +38,22 @@ struct nfp_net_vf { static const char nfp_net_driver_name[] = "nfp_netvf"; static const struct pci_device_id nfp_netvf_pci_device_ids[] = { - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP3800_VF, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP3800_VF, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF, }, - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000_VF, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF, }, + { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP3800_VF, + PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, + PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF, + }, + { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP6000_VF, + PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, + PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF, + }, { 0, } /* Required last entry. */ }; MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index bd47a5717d37..33b4c2856316 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -621,13 +621,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) nfp->dev_info->pcie_expl_offset; switch (nfp->pdev->device) { - case PCI_DEVICE_ID_NETRONOME_NFP3800: + case PCI_DEVICE_ID_NFP3800: pf = nfp->pdev->devfn & 7; nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); break; - case PCI_DEVICE_ID_NETRONOME_NFP4000: - case PCI_DEVICE_ID_NETRONOME_NFP5000: - case PCI_DEVICE_ID_NETRONOME_NFP6000: + case PCI_DEVICE_ID_NFP4000: + case PCI_DEVICE_ID_NFP5000: + case PCI_DEVICE_ID_NFP6000: nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); break; default: @@ -640,12 +640,12 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) } switch (nfp->pdev->device) { - case PCI_DEVICE_ID_NETRONOME_NFP3800: + case PCI_DEVICE_ID_NFP3800: expl_groups = 1; break; - case PCI_DEVICE_ID_NETRONOME_NFP4000: - case PCI_DEVICE_ID_NETRONOME_NFP5000: - case PCI_DEVICE_ID_NETRONOME_NFP6000: + case PCI_DEVICE_ID_NFP4000: + case PCI_DEVICE_ID_NFP5000: + case PCI_DEVICE_ID_NFP6000: expl_groups = 4; break; default: diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h index d4189869cf7b..e4d38178de0f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h @@ -6,6 +6,14 @@ #include +#define PCI_VENDOR_ID_CORIGINE 0x1da8 +#define PCI_DEVICE_ID_NFP3800 0x3800 +#define PCI_DEVICE_ID_NFP4000 0x4000 +#define PCI_DEVICE_ID_NFP5000 0x5000 +#define PCI_DEVICE_ID_NFP6000 0x6000 +#define PCI_DEVICE_ID_NFP3800_VF 0x3803 +#define PCI_DEVICE_ID_NFP6000_VF 0x6003 + enum nfp_dev_id { NFP_DEV_NFP3800, NFP_DEV_NFP3800_VF, -- cgit v1.2.3 From 8324a02c342a36336114a497130826612ed5520d Mon Sep 17 00:00:00 2001 From: Gavin Li Date: Sun, 27 Mar 2022 17:45:32 +0300 Subject: net/mlx5: Add exit route when waiting for FW Currently, removing a device needs to get the driver interface lock before doing any cleanup. If the driver is waiting in a loop for FW init, there is no way to cancel the wait, instead the device cleanup waits for the loop to conclude and release the lock. To allow immediate response to remove device commands, check the TEARDOWN flag while waiting for FW init, and exit the loop if it has been set. Signed-off-by: Gavin Li Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 5 ++++- include/linux/mlx5/driver.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 35e48ef04845..f28a3526aafa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -189,7 +189,8 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, fw_initializing = ioread32be(&dev->iseg->initializing); if (!(fw_initializing >> 31)) break; - if (time_after(jiffies, end)) { + if (time_after(jiffies, end) || + test_and_clear_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) { err = -EBUSY; break; } @@ -1602,6 +1603,7 @@ static void remove_one(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct devlink *devlink = priv_to_devlink(dev); + set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); devlink_unregister(devlink); mlx5_sriov_disable(pdev); mlx5_crdump_disable(dev); @@ -1785,6 +1787,7 @@ static void shutdown(struct pci_dev *pdev) int err; mlx5_core_info(dev, "Shutdown was called\n"); + set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); err = mlx5_try_fast_unload(dev); if (err) mlx5_unload_one(dev); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ff47d49d8be4..f327d0544038 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -632,6 +632,7 @@ enum mlx5_device_state { enum mlx5_interface_state { MLX5_INTERFACE_STATE_UP = BIT(0), + MLX5_BREAK_FW_WAIT = BIT(1), }; enum mlx5_pci_status { -- cgit v1.2.3 From 37ca95e62ee23fa6d2c2c64e3dc40b4a0c0146dc Mon Sep 17 00:00:00 2001 From: Gavin Li Date: Sun, 27 Mar 2022 17:36:44 +0300 Subject: net/mlx5: Increase FW pre-init timeout for health recovery Currently, health recovery will reload driver to recover it from fatal errors. During the driver's load process, it would wait for FW to set the pre-init bit for up to 120 seconds, beyond this threshold it would abort the load process. In some cases, such as a FW upgrade on the DPU, this timeout period is insufficient, and the user has no way to recover the host device. To solve this issue, introduce a new FW pre-init timeout for health recovery, which is set to 2 hours. The timeout for devlink reload and probe will use the original one because they are user triggered flows, and therefore should not have a significantly long timeout, during which the user command would hang. Signed-off-by: Gavin Li Reviewed-by: Moshe Shemesh Reviewed-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/main.c | 23 +++++++++++++--------- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 2 +- 6 files changed, 20 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index e8789e6d7e7b..f85166e587f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -178,13 +178,13 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a *actions_performed = BIT(action); switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: - return mlx5_load_one(dev); + return mlx5_load_one(dev, false); case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) break; /* On fw_activate action, also driver is reloaded and reinit performed */ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); - return mlx5_load_one(dev); + return mlx5_load_one(dev, false); default: /* Unsupported action should not get to this function */ WARN_ON(1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index ca1aba845dd6..84df0d56a2b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -148,7 +148,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) { complete(&fw_reset->done); } else { - mlx5_load_one(dev); + mlx5_load_one(dev, false); devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c index c1df0d3595d8..d758848d34d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c @@ -10,6 +10,7 @@ struct mlx5_timeouts { static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = { [MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000, + [MLX5_TO_FW_PRE_INIT_ON_RECOVERY_TIMEOUT_MS] = 7200000, [MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000, [MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2, [MLX5_TO_FW_INIT_MS] = 2000, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h index 1c42ead782fa..257c03eeab36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h @@ -7,6 +7,7 @@ enum mlx5_timeouts_types { /* pre init timeouts (not read from FW) */ MLX5_TO_FW_PRE_INIT_TIMEOUT_MS, + MLX5_TO_FW_PRE_INIT_ON_RECOVERY_TIMEOUT_MS, MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS, MLX5_TO_FW_PRE_INIT_WAIT_MS, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f28a3526aafa..84f75aa25214 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1003,7 +1003,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_devcom_unregister_device(dev->priv.devcom); } -static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) +static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout) { int err; @@ -1018,11 +1018,11 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot) /* wait for firmware to accept initialization segments configurations */ - err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT), + err = wait_fw_init(dev, timeout, mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL)); if (err) { mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n", - mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); + timeout); return err; } @@ -1272,7 +1272,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev) mutex_lock(&dev->intf_state_mutex); dev->state = MLX5_DEVICE_STATE_UP; - err = mlx5_function_setup(dev, true); + err = mlx5_function_setup(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); if (err) goto err_function; @@ -1336,9 +1336,10 @@ out: mutex_unlock(&dev->intf_state_mutex); } -int mlx5_load_one(struct mlx5_core_dev *dev) +int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery) { int err = 0; + u64 timeout; mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { @@ -1348,7 +1349,11 @@ int mlx5_load_one(struct mlx5_core_dev *dev) /* remove any previous indication of internal error */ dev->state = MLX5_DEVICE_STATE_UP; - err = mlx5_function_setup(dev, false); + if (recovery) + timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT); + else + timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT); + err = mlx5_function_setup(dev, timeout); if (err) goto err_function; @@ -1719,7 +1724,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev) mlx5_pci_trace(dev, "Enter, loading driver..\n"); - err = mlx5_load_one(dev); + err = mlx5_load_one(dev, false); mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err, !err ? "recovered" : "Failed"); @@ -1807,7 +1812,7 @@ static int mlx5_resume(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); - return mlx5_load_one(dev); + return mlx5_load_one(dev, false); } static const struct pci_device_id mlx5_core_pci_table[] = { @@ -1852,7 +1857,7 @@ int mlx5_recover_device(struct mlx5_core_dev *dev) return -EIO; } - return mlx5_load_one(dev); + return mlx5_load_one(dev, true); } static struct pci_driver mlx5_core_driver = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index a9b2d6ead542..9026be1d6223 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -290,7 +290,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev); int mlx5_init_one(struct mlx5_core_dev *dev); void mlx5_uninit_one(struct mlx5_core_dev *dev); void mlx5_unload_one(struct mlx5_core_dev *dev); -int mlx5_load_one(struct mlx5_core_dev *dev); +int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery); int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out); -- cgit v1.2.3 From 34a30d7635a8e37275a7b63bec09035ed762969b Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 1 Mar 2022 15:42:01 +0000 Subject: net/mlx5: Lag, expose number of lag ports Downstream patches will add support for hardware lag with more than 2 ports. Add a way for users to query the number of lag ports. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/infiniband/hw/mlx5/gsi.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 1 + drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + drivers/infiniband/hw/mlx5/qp.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 6 ++++++ include/linux/mlx5/driver.h | 1 + 6 files changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c index 3ad8f637c589..b804f2dd5628 100644 --- a/drivers/infiniband/hw/mlx5/gsi.c +++ b/drivers/infiniband/hw/mlx5/gsi.c @@ -100,7 +100,7 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, port_type) == MLX5_CAP_PORT_TYPE_IB) num_qps = pd->device->attrs.max_pkeys; else if (dev->lag_active) - num_qps = MLX5_MAX_PORTS; + num_qps = dev->lag_ports; } gsi = &mqp->gsi; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 61aa196d6484..61a3b767262f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2991,6 +2991,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) } dev->flow_db->lag_demux_ft = ft; + dev->lag_ports = mlx5_lag_get_num_ports(mdev); dev->lag_active = true; return 0; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 4f04bb55c4c6..8b3c83c0b70a 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1131,6 +1131,7 @@ struct mlx5_ib_dev { struct xarray sig_mrs; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; u16 pkey_table_len; + u8 lag_ports; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 3f467557d34e..fb8669c02546 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3907,7 +3907,7 @@ static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev, tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity; return (unsigned int)atomic_add_return(1, tx_port_affinity) % - MLX5_MAX_PORTS + 1; + (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1; } static bool qp_supports_affinity(struct mlx5_ib_qp *qp) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 6cad3b72c133..fe34cce77d07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -1185,6 +1185,12 @@ unlock: } EXPORT_SYMBOL(mlx5_lag_get_slave_port); +u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev) +{ + return MLX5_MAX_PORTS; +} +EXPORT_SYMBOL(mlx5_lag_get_num_ports); + struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev) { struct mlx5_core_dev *peer_dev = NULL; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index f327d0544038..62ea1120de9c 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1142,6 +1142,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, int num_counters, size_t *offsets); struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev); +u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, -- cgit v1.2.3 From 8a6e75e5f57e9ac82268d9bfca3403598d9d0292 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Sun, 27 Feb 2022 12:23:34 +0000 Subject: net/mlx5: devcom only supports 2 ports Devcom API is intended to be used between 2 devices only add this implied assumption into the code and check when it's no true. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c | 16 +++++++++------- drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h | 2 ++ 2 files changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index bced2efe9bef..adefde3ea941 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -14,7 +14,7 @@ static LIST_HEAD(devcom_list); struct mlx5_devcom_component { struct { void *data; - } device[MLX5_MAX_PORTS]; + } device[MLX5_DEVCOM_PORTS_SUPPORTED]; mlx5_devcom_event_handler_t handler; struct rw_semaphore sem; @@ -25,7 +25,7 @@ struct mlx5_devcom_list { struct list_head list; struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS]; - struct mlx5_core_dev *devs[MLX5_MAX_PORTS]; + struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED]; }; struct mlx5_devcom { @@ -74,13 +74,15 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) if (!mlx5_core_is_pf(dev)) return NULL; + if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED) + return NULL; sguid0 = mlx5_query_nic_system_image_guid(dev); list_for_each_entry(iter, &devcom_list, list) { struct mlx5_core_dev *tmp_dev = NULL; idx = -1; - for (i = 0; i < MLX5_MAX_PORTS; i++) { + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) { if (iter->devs[i]) tmp_dev = iter->devs[i]; else @@ -134,11 +136,11 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) kfree(devcom); - for (i = 0; i < MLX5_MAX_PORTS; i++) + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) if (priv->devs[i]) break; - if (i != MLX5_MAX_PORTS) + if (i != MLX5_DEVCOM_PORTS_SUPPORTED) return; list_del(&priv->list); @@ -191,7 +193,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); - for (i = 0; i < MLX5_MAX_PORTS; i++) + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) if (i != devcom->idx && comp->device[i].data) { err = comp->handler(event, comp->device[i].data, event_data); @@ -239,7 +241,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, return NULL; } - for (i = 0; i < MLX5_MAX_PORTS; i++) + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) if (i != devcom->idx) break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index 939d5bf1581b..94313c18bb64 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -6,6 +6,8 @@ #include +#define MLX5_DEVCOM_PORTS_SUPPORTED 2 + enum mlx5_devcom_components { MLX5_DEVCOM_ESW_OFFLOADS, -- cgit v1.2.3 From 4202ea95a6b64f6d773af9f901c74197a6431fa8 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 1 Mar 2022 16:45:41 +0000 Subject: net/mlx5: Lag, move E-Switch prerequisite check into lag code There is no need to expose E-Switch function for something that can be checked with already present API inside lag code. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 11 ----------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 3 --- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 11 +++++++++-- 3 files changed, 9 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 25f2d2717aaa..8ef22893e5e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1893,17 +1893,6 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode); -bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) -{ - if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE && - dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) || - (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS && - dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS)) - return true; - - return false; -} - bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index bac5160837c5..a5ae5df4d6f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -518,8 +518,6 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); } -bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, - struct mlx5_core_dev *dev1); bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1); @@ -724,7 +722,6 @@ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} -static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index fe34cce77d07..1de843d2f248 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -457,12 +457,19 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) { +#ifdef CONFIG_MLX5_ESWITCH + u8 mode; +#endif + if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev) return false; #ifdef CONFIG_MLX5_ESWITCH - return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev, - ldev->pf[MLX5_LAG_P2].dev); + mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev); + + return (mode == MLX5_ESWITCH_NONE || mode == MLX5_ESWITCH_OFFLOADS) && + (mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev) == + mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P2].dev)); #else return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) && !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev)); -- cgit v1.2.3 From ec2fa47d7b984ce7b736f3e74072757c146278cd Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Wed, 15 Dec 2021 06:51:18 +0000 Subject: net/mlx5: Lag, use lag lock Use a lag specific lock instead of depending on external locks to synchronise the lag creation/destruction. With this, taking E-Switch mode lock is no longer needed for syncing lag logic. Cleanup any dead code that is left over and don't export functions that aren't used outside the E-Switch core code. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 14 ---- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 5 -- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 79 ++++++++++------------- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h | 2 + 4 files changed, 35 insertions(+), 65 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 8ef22893e5e6..719ef26d23c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1569,9 +1569,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ida_init(&esw->offloads.vport_metadata_ida); xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); mutex_init(&esw->state_lock); - lockdep_register_key(&esw->mode_lock_key); init_rwsem(&esw->mode_lock); - lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key); refcount_set(&esw->qos.refcnt, 0); esw->enabled_vports = 0; @@ -1615,7 +1613,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); WARN_ON(refcount_read(&esw->qos.refcnt)); - lockdep_unregister_key(&esw->mode_lock_key); mutex_destroy(&esw->state_lock); WARN_ON(!xa_empty(&esw->offloads.vhca_map)); xa_destroy(&esw->offloads.vhca_map); @@ -2003,17 +2000,6 @@ void mlx5_esw_unlock(struct mlx5_eswitch *esw) up_write(&esw->mode_lock); } -/** - * mlx5_esw_lock() - Take write lock on esw mode lock - * @esw: eswitch device. - */ -void mlx5_esw_lock(struct mlx5_eswitch *esw) -{ - if (!mlx5_esw_allowed(esw)) - return; - down_write(&esw->mode_lock); -} - /** * mlx5_eswitch_get_total_vports - Get total vports of the eswitch * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a5ae5df4d6f1..2754a732914d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -331,7 +331,6 @@ struct mlx5_eswitch { u32 large_group_num; } params; struct blocking_notifier_head n_head; - struct lock_class_key mode_lock_key; }; void esw_offloads_disable(struct mlx5_eswitch *esw); @@ -704,7 +703,6 @@ void mlx5_esw_get(struct mlx5_core_dev *dev); void mlx5_esw_put(struct mlx5_core_dev *dev); int mlx5_esw_try_lock(struct mlx5_eswitch *esw); void mlx5_esw_unlock(struct mlx5_eswitch *esw); -void mlx5_esw_lock(struct mlx5_eswitch *esw); void esw_vport_change_handle_locked(struct mlx5_vport *vport); @@ -730,9 +728,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) return ERR_PTR(-EOPNOTSUPP); } -static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; } -static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; } - static inline struct mlx5_flow_handle * esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 1de843d2f248..fc32f3e05191 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -121,6 +121,7 @@ static void mlx5_ldev_free(struct kref *ref) mlx5_lag_mp_cleanup(ldev); cancel_delayed_work_sync(&ldev->bond_work); destroy_workqueue(ldev->wq); + mutex_destroy(&ldev->lock); kfree(ldev); } @@ -150,6 +151,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev) } kref_init(&ldev->ref); + mutex_init(&ldev->lock); INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work); ldev->nb.notifier_call = mlx5_lag_netdev_event; @@ -643,31 +645,11 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) queue_delayed_work(ldev->wq, &ldev->bond_work, delay); } -static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0, - struct mlx5_core_dev *dev1) -{ - if (dev0) - mlx5_esw_lock(dev0->priv.eswitch); - if (dev1) - mlx5_esw_lock(dev1->priv.eswitch); -} - -static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0, - struct mlx5_core_dev *dev1) -{ - if (dev1) - mlx5_esw_unlock(dev1->priv.eswitch); - if (dev0) - mlx5_esw_unlock(dev0->priv.eswitch); -} - static void mlx5_do_bond_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, bond_work); - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; int status; status = mlx5_dev_list_trylock(); @@ -676,15 +658,16 @@ static void mlx5_do_bond_work(struct work_struct *work) return; } + mutex_lock(&ldev->lock); if (ldev->mode_changes_in_progress) { + mutex_unlock(&ldev->lock); mlx5_dev_list_unlock(); mlx5_queue_bond_work(ldev, HZ); return; } - mlx5_lag_lock_eswitches(dev0, dev1); mlx5_do_bond(ldev); - mlx5_lag_unlock_eswitches(dev0, dev1); + mutex_unlock(&ldev->lock); mlx5_dev_list_unlock(); } @@ -908,7 +891,6 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, dev->priv.lag = ldev; } -/* Must be called with intf_mutex held */ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, struct mlx5_core_dev *dev) { @@ -946,13 +928,18 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) mlx5_core_err(dev, "Failed to alloc lag dev\n"); return 0; } - } else { - if (ldev->mode_changes_in_progress) - return -EAGAIN; - mlx5_ldev_get(ldev); + mlx5_ldev_add_mdev(ldev, dev); + return 0; } + mutex_lock(&ldev->lock); + if (ldev->mode_changes_in_progress) { + mutex_unlock(&ldev->lock); + return -EAGAIN; + } + mlx5_ldev_get(ldev); mlx5_ldev_add_mdev(ldev, dev); + mutex_unlock(&ldev->lock); return 0; } @@ -966,14 +953,14 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) return; recheck: - mlx5_dev_list_lock(); + mutex_lock(&ldev->lock); if (ldev->mode_changes_in_progress) { - mlx5_dev_list_unlock(); + mutex_unlock(&ldev->lock); msleep(100); goto recheck; } mlx5_ldev_remove_mdev(ldev, dev); - mlx5_dev_list_unlock(); + mutex_unlock(&ldev->lock); mlx5_ldev_put(ldev); } @@ -984,32 +971,35 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) recheck: mlx5_dev_list_lock(); err = __mlx5_lag_dev_add_mdev(dev); + mlx5_dev_list_unlock(); + if (err) { - mlx5_dev_list_unlock(); msleep(100); goto recheck; } - mlx5_dev_list_unlock(); } -/* Must be called with intf_mutex held */ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev) { struct mlx5_lag *ldev; + bool lag_is_active; ldev = mlx5_lag_dev(dev); if (!ldev) return; + mutex_lock(&ldev->lock); mlx5_ldev_remove_netdev(ldev, netdev); ldev->flags &= ~MLX5_LAG_FLAG_READY; - if (__mlx5_lag_is_active(ldev)) + lag_is_active = __mlx5_lag_is_active(ldev); + mutex_unlock(&ldev->lock); + + if (lag_is_active) mlx5_queue_bond_work(ldev, 0); } -/* Must be called with intf_mutex held */ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev) { @@ -1020,6 +1010,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, if (!ldev) return; + mutex_lock(&ldev->lock); mlx5_ldev_add_netdev(ldev, dev, netdev); for (i = 0; i < MLX5_MAX_PORTS; i++) @@ -1028,6 +1019,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, if (i >= MLX5_MAX_PORTS) ldev->flags |= MLX5_LAG_FLAG_READY; + mutex_unlock(&ldev->lock); mlx5_queue_bond_work(ldev, 0); } @@ -1104,8 +1096,6 @@ EXPORT_SYMBOL(mlx5_lag_is_shared_fdb); void mlx5_lag_disable_change(struct mlx5_core_dev *dev) { - struct mlx5_core_dev *dev0; - struct mlx5_core_dev *dev1; struct mlx5_lag *ldev; ldev = mlx5_lag_dev(dev); @@ -1113,16 +1103,13 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) return; mlx5_dev_list_lock(); - - dev0 = ldev->pf[MLX5_LAG_P1].dev; - dev1 = ldev->pf[MLX5_LAG_P2].dev; + mutex_lock(&ldev->lock); ldev->mode_changes_in_progress++; - if (__mlx5_lag_is_active(ldev)) { - mlx5_lag_lock_eswitches(dev0, dev1); + if (__mlx5_lag_is_active(ldev)) mlx5_disable_lag(ldev); - mlx5_lag_unlock_eswitches(dev0, dev1); - } + + mutex_unlock(&ldev->lock); mlx5_dev_list_unlock(); } @@ -1134,9 +1121,9 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev) if (!ldev) return; - mlx5_dev_list_lock(); + mutex_lock(&ldev->lock); ldev->mode_changes_in_progress--; - mlx5_dev_list_unlock(); + mutex_unlock(&ldev->lock); mlx5_queue_bond_work(ldev, 0); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index cbf9a9003e55..03a7ea07ce96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -56,6 +56,8 @@ struct mlx5_lag { struct notifier_block nb; struct lag_mp lag_mp; struct mlx5_lag_port_sel port_sel; + /* Protect lag fields/state changes */ + struct mutex lock; }; static inline struct mlx5_lag * -- cgit v1.2.3 From bc4c2f2e017949646b43fdcad005a03462d437c6 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Sun, 27 Feb 2022 12:40:39 +0000 Subject: net/mlx5: Lag, filter non compatible devices When search for a peer lag device we can filter based on that device's capabilities. Downstream patch will be less strict when filtering compatible devices and remove the limitation where we require exact MLX5_MAX_PORTS and change it to a range. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/dev.c | 48 ++++++++++++++++++---- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 12 +++--- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 1 + 3 files changed, 47 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index ba6dad97e308..3e750b827a19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -555,12 +555,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev) PCI_SLOT(dev->pdev->devfn)); } -static int next_phys_dev(struct device *dev, const void *data) +static int _next_phys_dev(struct mlx5_core_dev *mdev, + const struct mlx5_core_dev *curr) { - struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev); - struct mlx5_core_dev *mdev = madev->mdev; - const struct mlx5_core_dev *curr = data; - if (!mlx5_core_is_pf(mdev)) return 0; @@ -574,8 +571,29 @@ static int next_phys_dev(struct device *dev, const void *data) return 1; } -/* Must be called with intf_mutex held */ -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) +static int next_phys_dev(struct device *dev, const void *data) +{ + struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev); + struct mlx5_core_dev *mdev = madev->mdev; + + return _next_phys_dev(mdev, data); +} + +static int next_phys_dev_lag(struct device *dev, const void *data) +{ + struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev); + struct mlx5_core_dev *mdev = madev->mdev; + + if (!MLX5_CAP_GEN(mdev, vport_group_manager) || + !MLX5_CAP_GEN(mdev, lag_master) || + MLX5_CAP_GEN(mdev, num_lag_ports) != MLX5_MAX_PORTS) + return 0; + + return _next_phys_dev(mdev, data); +} + +static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, + int (*match)(struct device *dev, const void *data)) { struct auxiliary_device *adev; struct mlx5_adev *madev; @@ -583,7 +601,7 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) if (!mlx5_core_is_pf(dev)) return NULL; - adev = auxiliary_find_device(NULL, dev, &next_phys_dev); + adev = auxiliary_find_device(NULL, dev, match); if (!adev) return NULL; @@ -592,6 +610,20 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) return madev->mdev; } +/* Must be called with intf_mutex held */ +struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) +{ + lockdep_assert_held(&mlx5_intf_mutex); + return mlx5_get_next_dev(dev, &next_phys_dev); +} + +/* Must be called with intf_mutex held */ +struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev) +{ + lockdep_assert_held(&mlx5_intf_mutex); + return mlx5_get_next_dev(dev, &next_phys_dev_lag); +} + void mlx5_dev_list_lock(void) { mutex_lock(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index fc32f3e05191..360cb1c4221e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -913,12 +913,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) struct mlx5_lag *ldev = NULL; struct mlx5_core_dev *tmp_dev; - if (!MLX5_CAP_GEN(dev, vport_group_manager) || - !MLX5_CAP_GEN(dev, lag_master) || - MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS) - return 0; - - tmp_dev = mlx5_get_next_phys_dev(dev); + tmp_dev = mlx5_get_next_phys_dev_lag(dev); if (tmp_dev) ldev = tmp_dev->priv.lag; @@ -968,6 +963,11 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) { int err; + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + !MLX5_CAP_GEN(dev, lag_master) || + MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS) + return; + recheck: mlx5_dev_list_lock(); err = __mlx5_lag_dev_add_mdev(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 9026be1d6223..484cb1e4fc7f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -210,6 +210,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev); int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); +struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); int mlx5_dev_list_trylock(void); -- cgit v1.2.3 From e9d5bb51c592d0275b00a52ce3d8fe8457501ce6 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Sun, 27 Feb 2022 13:45:59 +0000 Subject: net/mlx5: Lag, store number of ports inside lag object Store the number of lag ports inside the lag object. Lag object is a single shared object managing the lag state of multiple mlx5 devices on the same physical HCA. Downstream patches will allow hardware lag to be created over devices with more than 2 ports. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 360cb1c4221e..deac240e6d78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -164,6 +164,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev) if (err) mlx5_core_err(dev, "Failed to init multipath lag err=%d\n", err); + ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports); return ldev; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 03a7ea07ce96..1c8fb3fada0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -45,6 +45,7 @@ struct lag_tracker { */ struct mlx5_lag { u8 flags; + u8 ports; int mode_changes_in_progress; bool shared_fdb; u8 v2p_map[MLX5_MAX_PORTS]; -- cgit v1.2.3 From e2c45931ff124381e6389c5e226a9527ff8c9969 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 1 Mar 2022 17:24:40 +0000 Subject: net/mlx5: Lag, support single FDB only on 2 ports E-Switch currently doesn't support more than 2 E-Switch managers being aggregated under a single hardware lag. Have specific checks to disallow creating lag when the code doesn't support it. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index deac240e6d78..4678b50b7e18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -458,6 +458,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) return 0; } +#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 2 static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) { #ifdef CONFIG_MLX5_ESWITCH @@ -470,6 +471,9 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) #ifdef CONFIG_MLX5_ESWITCH mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev); + if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS) + return false; + return (mode == MLX5_ESWITCH_NONE || mode == MLX5_ESWITCH_OFFLOADS) && (mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev) == mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P2].dev)); -- cgit v1.2.3 From cdf611d17094aea113d7acc32040a1b362dfe2c4 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 1 Mar 2022 17:34:31 +0000 Subject: net/mlx5: Lag, use hash when in roce lag on 4 ports Downstream patches will add support for lag over 4 ports. In that mode we will only use hash as the uplink selection method. Using hash instead of queue affinity (before this patch) offers key advantages like: - Align ports selection method with the method used by the bond device - Better packets distribution where a single queue can transmit from multiple ports (with queue affinity a queue is bound to a single port regardless of the packet being sent). - In case of failover we traffic is split between multiple ports and not a single one like in queue affinity. Going forward it was decided that queue affinity will be deprecated as using hash provides a better user experience which means on 4 ports HCAs hash will always be used. Future work will add hash support for 2 ports HCAs as well. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 45 ++++++++++++++++++----- 1 file changed, 36 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 4678b50b7e18..4f6867eba5fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -310,17 +310,41 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, mlx5_lag_drop_rule_setup(ldev, tracker); } -static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, - struct lag_tracker *tracker, u8 *flags) +#define MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED 4 +static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, + struct lag_tracker *tracker, u8 *flags) { - bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE); struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; - if (roce_lag || - !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) || - tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH) - return; - *flags |= MLX5_LAG_FLAG_HASH_BASED; + if (ldev->ports == MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED) { + /* Four ports are support only in hash mode */ + if (!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table)) + return -EINVAL; + *flags |= MLX5_LAG_FLAG_HASH_BASED; + } + + return 0; +} + +static int mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, + struct lag_tracker *tracker, u8 *flags) +{ + struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; + + if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && + tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) + *flags |= MLX5_LAG_FLAG_HASH_BASED; + return 0; +} + +static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, + struct lag_tracker *tracker, u8 *flags) +{ + bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE); + + if (roce_lag) + return mlx5_lag_set_port_sel_mode_roce(ldev, tracker, flags); + return mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, flags); } static char *get_str_port_sel_mode(u8 flags) @@ -382,7 +406,10 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1], &ldev->v2p_map[MLX5_LAG_P2]); - mlx5_lag_set_port_sel_mode(ldev, tracker, &flags); + err = mlx5_lag_set_port_sel_mode(ldev, tracker, &flags); + if (err) + return err; + if (flags & MLX5_LAG_FLAG_HASH_BASED) { err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, ldev->v2p_map[MLX5_LAG_P1], -- cgit v1.2.3 From 7e978e7714d6b0ba40ee60cb23852ed888658768 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 1 Mar 2022 17:20:35 +0000 Subject: net/mlx5: Lag, use actual number of lag ports Refactor the entire lag code to use ldev->ports instead of hard-coded defines (like MLX5_MAX_PORTS) for its operations. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 295 +++++++++++++-------- .../net/ethernet/mellanox/mlx5/core/lag/port_sel.c | 60 ++--- .../net/ethernet/mellanox/mlx5/core/lag/port_sel.h | 10 +- 3 files changed, 216 insertions(+), 149 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 4f6867eba5fb..f2659b0f8cc5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -53,8 +53,7 @@ enum { */ static DEFINE_SPINLOCK(lag_lock); -static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, - u8 remap_port2, bool shared_fdb, u8 flags) +static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, bool shared_fdb, u8 flags) { u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); @@ -63,8 +62,8 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb); if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) { - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); } else { MLX5_SET(lagc, lag_ctx, port_select_mode, MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT); @@ -73,8 +72,8 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, return mlx5_cmd_exec_in(dev, create_lag, in); } -static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1, - u8 remap_port2) +static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports, + u8 *ports) { u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx); @@ -82,8 +81,8 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1, MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG); MLX5_SET(modify_lag_in, in, field_select, 0x1); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); - MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); return mlx5_cmd_exec_in(dev, modify_lag, in); } @@ -174,7 +173,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) + for (i = 0; i < ldev->ports; i++) if (ldev->pf[i].netdev == ndev) return i; @@ -191,39 +190,69 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev) return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV); } +static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports, + u8 *ports, int *num_disabled) +{ + int i; + + *num_disabled = 0; + for (i = 0; i < num_ports; i++) { + if (!tracker->netdev_state[i].tx_enabled || + !tracker->netdev_state[i].link_up) + ports[(*num_disabled)++] = i; + } +} + static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, - u8 *port1, u8 *port2) + u8 num_ports, u8 *ports) { - bool p1en; - bool p2en; + int disabled[MLX5_MAX_PORTS] = {}; + int enabled[MLX5_MAX_PORTS] = {}; + int disabled_ports_num = 0; + int enabled_ports_num = 0; + u32 rand; + int i; - p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled && - tracker->netdev_state[MLX5_LAG_P1].link_up; + for (i = 0; i < num_ports; i++) { + if (tracker->netdev_state[i].tx_enabled && + tracker->netdev_state[i].link_up) + enabled[enabled_ports_num++] = i; + else + disabled[disabled_ports_num++] = i; + } - p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled && - tracker->netdev_state[MLX5_LAG_P2].link_up; + /* Use native mapping by default */ + for (i = 0; i < num_ports; i++) + ports[i] = MLX5_LAG_EGRESS_PORT_1 + i; - *port1 = MLX5_LAG_EGRESS_PORT_1; - *port2 = MLX5_LAG_EGRESS_PORT_2; - if ((!p1en && !p2en) || (p1en && p2en)) + /* If all ports are disabled/enabled keep native mapping */ + if (enabled_ports_num == num_ports || + disabled_ports_num == num_ports) return; - if (p1en) - *port2 = MLX5_LAG_EGRESS_PORT_1; - else - *port1 = MLX5_LAG_EGRESS_PORT_2; + /* Go over the disabled ports and for each assign a random active port */ + for (i = 0; i < disabled_ports_num; i++) { + get_random_bytes(&rand, 4); + + ports[disabled[i]] = enabled[rand % enabled_ports_num] + 1; + } } static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev) { - return ldev->pf[MLX5_LAG_P1].has_drop || ldev->pf[MLX5_LAG_P2].has_drop; + int i; + + for (i = 0; i < ldev->ports; i++) + if (ldev->pf[i].has_drop) + return true; + return false; } static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev) { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) { + for (i = 0; i < ldev->ports; i++) { if (!ldev->pf[i].has_drop) continue; @@ -236,12 +265,12 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev) static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev, struct lag_tracker *tracker) { - struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; - struct mlx5_core_dev *inactive; - u8 v2p_port1, v2p_port2; - int inactive_idx; + u8 disabled_ports[MLX5_MAX_PORTS] = {}; + struct mlx5_core_dev *dev; + int disabled_index; + int num_disabled; int err; + int i; /* First delete the current drop rule so there won't be any dropped * packets @@ -251,58 +280,60 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev, if (!ldev->tracker.has_inactive) return; - mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, &v2p_port2); + mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled); - if (v2p_port1 == MLX5_LAG_EGRESS_PORT_1) { - inactive = dev1; - inactive_idx = MLX5_LAG_P2; - } else { - inactive = dev0; - inactive_idx = MLX5_LAG_P1; + for (i = 0; i < num_disabled; i++) { + disabled_index = disabled_ports[i]; + dev = ldev->pf[disabled_index].dev; + err = mlx5_esw_acl_ingress_vport_drop_rule_create(dev->priv.eswitch, + MLX5_VPORT_UPLINK); + if (!err) + ldev->pf[disabled_index].has_drop = true; + else + mlx5_core_err(dev, + "Failed to create lag drop rule, error: %d", err); } - - err = mlx5_esw_acl_ingress_vport_drop_rule_create(inactive->priv.eswitch, - MLX5_VPORT_UPLINK); - if (!err) - ldev->pf[inactive_idx].has_drop = true; - else - mlx5_core_err(inactive, - "Failed to create lag drop rule, error: %d", err); } -static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2) +static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED) - return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2); - return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); + return mlx5_lag_port_sel_modify(ldev, ports); + return mlx5_cmd_modify_lag(dev0, ldev->ports, ports); } void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - u8 v2p_port1, v2p_port2; + u8 ports[MLX5_MAX_PORTS] = {}; int err; + int i; + int j; - mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, - &v2p_port2); + mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ports); - if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] || - v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) { - err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2); + for (i = 0; i < ldev->ports; i++) { + if (ports[i] == ldev->v2p_map[i]) + continue; + err = _mlx5_modify_lag(ldev, ports); if (err) { mlx5_core_err(dev0, "Failed to modify LAG (%d)\n", err); return; } - ldev->v2p_map[MLX5_LAG_P1] = v2p_port1; - ldev->v2p_map[MLX5_LAG_P2] = v2p_port2; - mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d", - ldev->v2p_map[MLX5_LAG_P1], - ldev->v2p_map[MLX5_LAG_P2]); + memcpy(ldev->v2p_map, ports, sizeof(ports[0]) * + ldev->ports); + + mlx5_core_info(dev0, "modify lag map\n"); + for (j = 0; j < ldev->ports; j++) + mlx5_core_info(dev0, "\tmap port %d:%d\n", + j + 1, + ldev->v2p_map[j]); + break; } if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && @@ -362,13 +393,15 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; int err; + int i; - mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s", - ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2], + mlx5_core_info(dev0, "lag map:\n"); + for (i = 0; i < ldev->ports; i++) + mlx5_core_info(dev0, "\tport %d:%d\n", i + 1, ldev->v2p_map[i]); + mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", shared_fdb, get_str_port_sel_mode(flags)); - err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1], - ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags); + err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, shared_fdb, flags); if (err) { mlx5_core_err(dev0, "Failed to create LAG (%d)\n", @@ -404,16 +437,14 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; int err; - mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1], - &ldev->v2p_map[MLX5_LAG_P2]); + mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->v2p_map); err = mlx5_lag_set_port_sel_mode(ldev, tracker, &flags); if (err) return err; if (flags & MLX5_LAG_FLAG_HASH_BASED) { err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, - ldev->v2p_map[MLX5_LAG_P1], - ldev->v2p_map[MLX5_LAG_P2]); + ldev->v2p_map); if (err) { mlx5_core_err(dev0, "Failed to create LAG port selection(%d)\n", @@ -491,30 +522,37 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) #ifdef CONFIG_MLX5_ESWITCH u8 mode; #endif + int i; - if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev) - return false; + for (i = 0; i < ldev->ports; i++) + if (!ldev->pf[i].dev) + return false; #ifdef CONFIG_MLX5_ESWITCH mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev); - if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS) + if (mode != MLX5_ESWITCH_NONE && mode != MLX5_ESWITCH_OFFLOADS) return false; - return (mode == MLX5_ESWITCH_NONE || mode == MLX5_ESWITCH_OFFLOADS) && - (mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev) == - mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P2].dev)); + for (i = 0; i < ldev->ports; i++) + if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode) + return false; + + if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS) + return false; #else - return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) && - !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev)); + for (i = 0; i < ldev->ports; i++) + if (mlx5_sriov_is_enabled(ldev->pf[i].dev)) + return false; #endif + return true; } static void mlx5_lag_add_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) { + for (i = 0; i < ldev->ports; i++) { if (!ldev->pf[i].dev) continue; @@ -531,7 +569,7 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) { + for (i = 0; i < ldev->ports; i++) { if (!ldev->pf[i].dev) continue; @@ -551,6 +589,7 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) bool shared_fdb = ldev->shared_fdb; bool roce_lag; int err; + int i; roce_lag = __mlx5_lag_is_roce(ldev); @@ -561,7 +600,8 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); } - mlx5_nic_vport_disable_roce(dev1); + for (i = 1; i < ldev->ports; i++) + mlx5_nic_vport_disable_roce(ldev->pf[i].dev); } err = mlx5_deactivate_lag(ldev); @@ -598,6 +638,23 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) return false; } +static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev) +{ + bool roce_lag = true; + int i; + + for (i = 0; i < ldev->ports; i++) + roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev); + +#ifdef CONFIG_MLX5_ESWITCH + for (i = 0; i < ldev->ports; i++) + roce_lag = roce_lag && + ldev->pf[i].dev->priv.eswitch->mode == MLX5_ESWITCH_NONE; +#endif + + return roce_lag; +} + static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; @@ -605,6 +662,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) struct lag_tracker tracker; bool do_bond, roce_lag; int err; + int i; if (!mlx5_lag_is_ready(ldev)) { do_bond = false; @@ -621,14 +679,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) if (do_bond && !__mlx5_lag_is_active(ldev)) { bool shared_fdb = mlx5_shared_fdb_supported(ldev); - roce_lag = !mlx5_sriov_is_enabled(dev0) && - !mlx5_sriov_is_enabled(dev1); - -#ifdef CONFIG_MLX5_ESWITCH - roce_lag = roce_lag && - dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE && - dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE; -#endif + roce_lag = mlx5_lag_is_roce_lag(ldev); if (shared_fdb || roce_lag) mlx5_lag_remove_devices(ldev); @@ -645,7 +696,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } else if (roce_lag) { dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); - mlx5_nic_vport_enable_roce(dev1); + for (i = 1; i < ldev->ports; i++) + mlx5_nic_vport_enable_roce(ldev->pf[i].dev); } else if (shared_fdb) { dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); @@ -713,7 +765,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, bool is_bonded, is_in_lag, mode_supported; bool has_inactive = 0; struct slave *slave; - int bond_status = 0; + u8 bond_status = 0; int num_slaves = 0; int changed = 0; int idx; @@ -744,7 +796,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, rcu_read_unlock(); /* None of this lagdev's netdevs are slaves of this master. */ - if (!(bond_status & 0x3)) + if (!(bond_status & GENMASK(ldev->ports - 1, 0))) return 0; if (lag_upper_info) { @@ -757,7 +809,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, * A device is considered bonded if both its physical ports are slaves * of the same lag master, and only them. */ - is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3; + is_in_lag = num_slaves == ldev->ports && + bond_status == GENMASK(ldev->ports - 1, 0); /* Lag mode must be activebackup or hash. */ mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || @@ -886,7 +939,7 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev, { unsigned int fn = mlx5_get_dev_index(dev); - if (fn >= MLX5_MAX_PORTS) + if (fn >= ldev->ports) return; spin_lock(&lag_lock); @@ -902,7 +955,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev, int i; spin_lock(&lag_lock); - for (i = 0; i < MLX5_MAX_PORTS; i++) { + for (i = 0; i < ldev->ports; i++) { if (ldev->pf[i].netdev == netdev) { ldev->pf[i].netdev = NULL; break; @@ -916,7 +969,7 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, { unsigned int fn = mlx5_get_dev_index(dev); - if (fn >= MLX5_MAX_PORTS) + if (fn >= ldev->ports) return; ldev->pf[fn].dev = dev; @@ -928,11 +981,11 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) + for (i = 0; i < ldev->ports; i++) if (ldev->pf[i].dev == dev) break; - if (i == MLX5_MAX_PORTS) + if (i == ldev->ports) return; ldev->pf[i].dev = NULL; @@ -1045,11 +1098,11 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, mutex_lock(&ldev->lock); mlx5_ldev_add_netdev(ldev, dev, netdev); - for (i = 0; i < MLX5_MAX_PORTS; i++) + for (i = 0; i < ldev->ports; i++) if (!ldev->pf[i].dev) break; - if (i >= MLX5_MAX_PORTS) + if (i >= ldev->ports) ldev->flags |= MLX5_LAG_FLAG_READY; mutex_unlock(&ldev->lock); mlx5_queue_bond_work(ldev, 0); @@ -1163,6 +1216,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) { struct net_device *ndev = NULL; struct mlx5_lag *ldev; + int i; spin_lock(&lag_lock); ldev = mlx5_lag_dev(dev); @@ -1171,9 +1225,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) goto unlock; if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { - ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ? - ldev->pf[MLX5_LAG_P1].netdev : - ldev->pf[MLX5_LAG_P2].netdev; + for (i = 0; i < ldev->ports; i++) + if (ldev->tracker.netdev_state[i].tx_enabled) + ndev = ldev->pf[i].netdev; + if (!ndev) + ndev = ldev->pf[ldev->ports - 1].netdev; } else { ndev = ldev->pf[MLX5_LAG_P1].netdev; } @@ -1192,16 +1248,19 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, { struct mlx5_lag *ldev; u8 port = 0; + int i; spin_lock(&lag_lock); ldev = mlx5_lag_dev(dev); if (!(ldev && __mlx5_lag_is_roce(ldev))) goto unlock; - if (ldev->pf[MLX5_LAG_P1].netdev == slave) - port = MLX5_LAG_P1; - else - port = MLX5_LAG_P2; + for (i = 0; i < ldev->ports; i++) { + if (ldev->pf[MLX5_LAG_P1].netdev == slave) { + port = i; + break; + } + } port = ldev->v2p_map[port]; @@ -1213,7 +1272,13 @@ EXPORT_SYMBOL(mlx5_lag_get_slave_port); u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev) { - return MLX5_MAX_PORTS; + struct mlx5_lag *ldev; + + ldev = mlx5_lag_dev(dev); + if (!ldev) + return 0; + + return ldev->ports; } EXPORT_SYMBOL(mlx5_lag_get_num_ports); @@ -1243,7 +1308,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, size_t *offsets) { int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out); - struct mlx5_core_dev *mdev[MLX5_MAX_PORTS]; + struct mlx5_core_dev **mdev; struct mlx5_lag *ldev; int num_ports; int ret, i, j; @@ -1253,14 +1318,20 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, if (!out) return -ENOMEM; + mdev = kvzalloc(sizeof(mdev[0]) * MLX5_MAX_PORTS, GFP_KERNEL); + if (!mdev) { + ret = -ENOMEM; + goto free_out; + } + memset(values, 0, sizeof(*values) * num_counters); spin_lock(&lag_lock); ldev = mlx5_lag_dev(dev); if (ldev && __mlx5_lag_is_active(ldev)) { - num_ports = MLX5_MAX_PORTS; - mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev; - mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev; + num_ports = ldev->ports; + for (i = 0; i < ldev->ports; i++) + mdev[i] = ldev->pf[i].dev; } else { num_ports = 1; mdev[MLX5_LAG_P1] = dev; @@ -1275,13 +1346,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in, out); if (ret) - goto free; + goto free_mdev; for (j = 0; j < num_counters; ++j) values[j] += be64_to_cpup((__be64 *)(out + offsets[j])); } -free: +free_mdev: + kvfree(mdev); +free_out: kvfree(out); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index 5be322528279..478b4ef723f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -12,7 +12,8 @@ enum { static struct mlx5_flow_group * mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, - struct mlx5_flow_definer *definer) + struct mlx5_flow_definer *definer, + u8 ports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *fg; @@ -25,7 +26,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, MLX5_SET(create_flow_group_in, in, match_definer_id, mlx5_get_match_definer_id(definer)); MLX5_SET(create_flow_group_in, in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1); + MLX5_SET(create_flow_group_in, in, end_flow_index, ports - 1); MLX5_SET(create_flow_group_in, in, group_type, MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT); @@ -36,7 +37,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, struct mlx5_lag_definer *lag_definer, - u8 port1, u8 port2) + u8 *ports) { struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_flow_table_attr ft_attr = {}; @@ -45,7 +46,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, struct mlx5_flow_namespace *ns; int err, i; - ft_attr.max_fte = MLX5_MAX_PORTS; + ft_attr.max_fte = ldev->ports; ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER; ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL); @@ -61,7 +62,8 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, } lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft, - lag_definer->definer); + lag_definer->definer, + ldev->ports); if (IS_ERR(lag_definer->fg)) { err = PTR_ERR(lag_definer->fg); goto destroy_ft; @@ -70,8 +72,8 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.flags |= FLOW_ACT_NO_APPEND; - for (i = 0; i < MLX5_MAX_PORTS; i++) { - u8 affinity = i == 0 ? port1 : port2; + for (i = 0; i < ldev->ports; i++) { + u8 affinity = ports[i]; dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev, vhca_id); @@ -279,8 +281,7 @@ static int mlx5_lag_set_definer(u32 *match_definer_mask, static struct mlx5_lag_definer * mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash, - enum mlx5_traffic_types tt, bool tunnel, u8 port1, - u8 port2) + enum mlx5_traffic_types tt, bool tunnel, u8 *ports) { struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_lag_definer *lag_definer; @@ -308,7 +309,7 @@ mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash, goto free_mask; } - err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2); + err = mlx5_lag_create_port_sel_table(ldev, lag_definer, ports); if (err) goto destroy_match_definer; @@ -331,7 +332,7 @@ static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev, struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) + for (i = 0; i < ldev->ports; i++) mlx5_del_flow_rules(lag_definer->rules[i]); mlx5_destroy_flow_group(lag_definer->fg); mlx5_destroy_flow_table(lag_definer->ft); @@ -356,7 +357,7 @@ static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev) static int mlx5_lag_create_definers(struct mlx5_lag *ldev, enum netdev_lag_hash hash_type, - u8 port1, u8 port2) + u8 *ports) { struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_lag_definer *lag_definer; @@ -364,7 +365,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev, for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt, - false, port1, port2); + false, ports); if (IS_ERR(lag_definer)) { err = PTR_ERR(lag_definer); goto destroy_definers; @@ -376,7 +377,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev, lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt, - true, port1, port2); + true, ports); if (IS_ERR(lag_definer)) { err = PTR_ERR(lag_definer); goto destroy_definers; @@ -513,13 +514,13 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) } int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, - enum netdev_lag_hash hash_type, u8 port1, u8 port2) + enum netdev_lag_hash hash_type, u8 *ports) { struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; int err; set_tt_map(port_sel, hash_type); - err = mlx5_lag_create_definers(ldev, hash_type, port1, port2); + err = mlx5_lag_create_definers(ldev, hash_type, ports); if (err) return err; @@ -546,12 +547,13 @@ destroy_definers: static int mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, struct mlx5_lag_definer **definers, - u8 port1, u8 port2) + u8 *ports) { struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_flow_destination dest = {}; int err; int tt; + int i; dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; @@ -559,19 +561,13 @@ mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { struct mlx5_flow_handle **rules = definers[tt]->rules; - if (ldev->v2p_map[MLX5_LAG_P1] != port1) { - dest.vport.vhca_id = - MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id); - err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1], - &dest, NULL); - if (err) - return err; - } - - if (ldev->v2p_map[MLX5_LAG_P2] != port2) { + for (i = 0; i < ldev->ports; i++) { + if (ldev->v2p_map[i] == ports[i]) + continue; dest.vport.vhca_id = - MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id); - err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2], + MLX5_CAP_GEN(ldev->pf[ports[i] - 1].dev, + vhca_id); + err = mlx5_modify_rule_destination(rules[i], &dest, NULL); if (err) return err; @@ -581,14 +577,14 @@ mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, return 0; } -int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2) +int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports) { struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; int err; err = mlx5_lag_modify_definers_destinations(ldev, port_sel->outer.definers, - port1, port2); + ports); if (err) return err; @@ -597,7 +593,7 @@ int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2) return mlx5_lag_modify_definers_destinations(ldev, port_sel->inner.definers, - port1, port2); + ports); } void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h index 6d15b28a42fc..79852ac41dbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h @@ -27,22 +27,20 @@ struct mlx5_lag_port_sel { #ifdef CONFIG_MLX5_ESWITCH -int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2); +int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports); void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev); int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, - enum netdev_lag_hash hash_type, u8 port1, - u8 port2); + enum netdev_lag_hash hash_type, u8 *ports); #else /* CONFIG_MLX5_ESWITCH */ static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, enum netdev_lag_hash hash_type, - u8 port1, u8 port2) + u8 *ports) { return 0; } -static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, - u8 port2) +static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports) { return 0; } -- cgit v1.2.3 From 4cd14d44b11dabf195d1e66dadbb954336224658 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 1 Mar 2022 17:34:58 +0000 Subject: net/mlx5: Support devices with more than 2 ports Increase the define MLX5_MAX_PORTS to 4 as the driver is ready to support NICs with 4 ports. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/dev.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 3 ++- include/linux/mlx5/driver.h | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 3e750b827a19..11f7c03ae81b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -586,7 +586,8 @@ static int next_phys_dev_lag(struct device *dev, const void *data) if (!MLX5_CAP_GEN(mdev, vport_group_manager) || !MLX5_CAP_GEN(mdev, lag_master) || - MLX5_CAP_GEN(mdev, num_lag_ports) != MLX5_MAX_PORTS) + (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS || + MLX5_CAP_GEN(mdev, num_lag_ports) <= 1)) return 0; return _next_phys_dev(mdev, data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index f2659b0f8cc5..716e073c80d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -1050,7 +1050,8 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) if (!MLX5_CAP_GEN(dev, vport_group_manager) || !MLX5_CAP_GEN(dev, lag_master) || - MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS) + (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS || + MLX5_CAP_GEN(dev, num_lag_ports) <= 1)) return; recheck: diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 62ea1120de9c..fdb9d07a05a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -84,7 +84,7 @@ enum mlx5_sqp_t { }; enum { - MLX5_MAX_PORTS = 2, + MLX5_MAX_PORTS = 4, }; enum { -- cgit v1.2.3 From 24b3599effe2b1eda7bc7e8b2b5e8fe459256222 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Wed, 2 Mar 2022 09:12:01 +0000 Subject: net/mlx5: Lag, refactor dmesg print Combine dmesg lag prints into a single function. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 716e073c80d4..90056a3ca89d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -107,6 +107,16 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); +static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, + struct mlx5_lag *ldev) +{ + int i; + + mlx5_core_info(dev, "lag map:\n"); + for (i = 0; i < ldev->ports; i++) + mlx5_core_info(dev, "\tport %d:%d\n", i + 1, ldev->v2p_map[i]); +} + static int mlx5_lag_netdev_event(struct notifier_block *this, unsigned long event, void *ptr); static void mlx5_do_bond_work(struct work_struct *work); @@ -311,7 +321,6 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, u8 ports[MLX5_MAX_PORTS] = {}; int err; int i; - int j; mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ports); @@ -328,11 +337,7 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, memcpy(ldev->v2p_map, ports, sizeof(ports[0]) * ldev->ports); - mlx5_core_info(dev0, "modify lag map\n"); - for (j = 0; j < ldev->ports; j++) - mlx5_core_info(dev0, "\tmap port %d:%d\n", - j + 1, - ldev->v2p_map[j]); + mlx5_lag_print_mapping(dev0, ldev); break; } @@ -393,11 +398,8 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; int err; - int i; - mlx5_core_info(dev0, "lag map:\n"); - for (i = 0; i < ldev->ports; i++) - mlx5_core_info(dev0, "\tport %d:%d\n", i + 1, ldev->v2p_map[i]); + mlx5_lag_print_mapping(dev0, ldev); mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", shared_fdb, get_str_port_sel_mode(flags)); -- cgit v1.2.3 From 352899f384d4aefa77ede6310d08c1b515612a8f Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Wed, 2 Mar 2022 15:38:50 +0000 Subject: net/mlx5: Lag, use buckets in hash mode When in hardware lag and the NIC has more than 2 ports when one port goes down need to distribute the traffic between the remaining active ports. For better spread in such cases instead of using 1-to-1 mapping and only 4 slots in the hash, use many. Each port will have many slots that point to it. When a port goes down go over all the slots that pointed to that port and spread them between the remaining active ports. Once the port comes back restore the default mapping. We will have number_of_ports * MLX5_LAG_MAX_HASH_BUCKETS slots. Each MLX5_LAG_MAX_HASH_BUCKETS belong to a different port. The native mapping is such that: port 1: The first MLX5_LAG_MAX_HASH_BUCKETS slots are: [1, 1, .., 1] which means if a packet is hased into one of this slots it will hit the wire via port 1. port 2: The second MLX5_LAG_MAX_HASH_BUCKETS slots are: [2, 2, .., 2] which means if a packet is hased into one of this slots it will hit the wire via port2. and this mapping is the same of the rest of the ports. On a failover, lets say port 2 goes down (port 1, 3, 4 are still up). the new mapping for port 2 will be: port 2: The second MLX5_LAG_MAX_HASH_BUCKETS are: [1, 3, 1, 4, .., 4] which means the mapping was changed from the native mapping to a mapping that consists of only the active ports. With this if a port goes down the traffic will be split between the active ports randomly Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 154 +++++++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h | 4 +- .../net/ethernet/mellanox/mlx5/core/lag/port_sel.c | 95 ++++++++----- .../net/ethernet/mellanox/mlx5/core/lag/port_sel.h | 5 +- 4 files changed, 182 insertions(+), 76 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 90056a3ca89d..8a74c409b501 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -107,14 +107,73 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); +static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports, + u8 *ports, int *num_disabled) +{ + int i; + + *num_disabled = 0; + for (i = 0; i < num_ports; i++) { + if (!tracker->netdev_state[i].tx_enabled || + !tracker->netdev_state[i].link_up) + ports[(*num_disabled)++] = i; + } +} + +static void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, + u8 *ports, int *num_enabled) +{ + int i; + + *num_enabled = 0; + for (i = 0; i < num_ports; i++) { + if (tracker->netdev_state[i].tx_enabled && + tracker->netdev_state[i].link_up) + ports[(*num_enabled)++] = i; + } + + if (*num_enabled == 0) + mlx5_infer_tx_disabled(tracker, num_ports, ports, num_enabled); +} + static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, - struct mlx5_lag *ldev) + struct mlx5_lag *ldev, + struct lag_tracker *tracker, + u8 flags) { + char buf[MLX5_MAX_PORTS * 10 + 1] = {}; + u8 enabled_ports[MLX5_MAX_PORTS] = {}; + int written = 0; + int num_enabled; + int idx; + int err; int i; + int j; - mlx5_core_info(dev, "lag map:\n"); - for (i = 0; i < ldev->ports; i++) - mlx5_core_info(dev, "\tport %d:%d\n", i + 1, ldev->v2p_map[i]); + if (flags & MLX5_LAG_FLAG_HASH_BASED) { + mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports, + &num_enabled); + for (i = 0; i < num_enabled; i++) { + err = scnprintf(buf + written, 4, "%d, ", enabled_ports[i] + 1); + if (err != 3) + return; + written += err; + } + buf[written - 2] = 0; + mlx5_core_info(dev, "lag map active ports: %s\n", buf); + } else { + for (i = 0; i < ldev->ports; i++) { + for (j = 0; j < ldev->buckets; j++) { + idx = i * ldev->buckets + j; + err = scnprintf(buf + written, 10, + " port %d:%d", i + 1, ldev->v2p_map[idx]); + if (err != 9) + return; + written += err; + } + } + mlx5_core_info(dev, "lag map:%s\n", buf); + } } static int mlx5_lag_netdev_event(struct notifier_block *this, @@ -174,6 +233,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev) mlx5_core_err(dev, "Failed to init multipath lag err=%d\n", err); ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports); + ldev->buckets = 1; return ldev; } @@ -200,28 +260,25 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev) return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV); } -static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports, - u8 *ports, int *num_disabled) -{ - int i; - - *num_disabled = 0; - for (i = 0; i < num_ports; i++) { - if (!tracker->netdev_state[i].tx_enabled || - !tracker->netdev_state[i].link_up) - ports[(*num_disabled)++] = i; - } -} - +/* Create a mapping between steering slots and active ports. + * As we have ldev->buckets slots per port first assume the native + * mapping should be used. + * If there are ports that are disabled fill the relevant slots + * with mapping that points to active ports. + */ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, - u8 num_ports, u8 *ports) + u8 num_ports, + u8 buckets, + u8 *ports) { int disabled[MLX5_MAX_PORTS] = {}; int enabled[MLX5_MAX_PORTS] = {}; int disabled_ports_num = 0; int enabled_ports_num = 0; + int idx; u32 rand; int i; + int j; for (i = 0; i < num_ports; i++) { if (tracker->netdev_state[i].tx_enabled && @@ -231,9 +288,14 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, disabled[disabled_ports_num++] = i; } - /* Use native mapping by default */ + /* Use native mapping by default where each port's buckets + * point the native port: 1 1 1 .. 1 2 2 2 ... 2 3 3 3 ... 3 etc + */ for (i = 0; i < num_ports; i++) - ports[i] = MLX5_LAG_EGRESS_PORT_1 + i; + for (j = 0; j < buckets; j++) { + idx = i * buckets + j; + ports[idx] = MLX5_LAG_EGRESS_PORT_1 + i; + } /* If all ports are disabled/enabled keep native mapping */ if (enabled_ports_num == num_ports || @@ -242,9 +304,10 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, /* Go over the disabled ports and for each assign a random active port */ for (i = 0; i < disabled_ports_num; i++) { - get_random_bytes(&rand, 4); - - ports[disabled[i]] = enabled[rand % enabled_ports_num] + 1; + for (j = 0; j < buckets; j++) { + get_random_bytes(&rand, 4); + ports[disabled[i] * buckets + j] = enabled[rand % enabled_ports_num] + 1; + } } } @@ -317,28 +380,33 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker) { + u8 ports[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS] = {}; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - u8 ports[MLX5_MAX_PORTS] = {}; + int idx; int err; int i; + int j; - mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ports); + mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ports); for (i = 0; i < ldev->ports; i++) { - if (ports[i] == ldev->v2p_map[i]) - continue; - err = _mlx5_modify_lag(ldev, ports); - if (err) { - mlx5_core_err(dev0, - "Failed to modify LAG (%d)\n", - err); - return; - } - memcpy(ldev->v2p_map, ports, sizeof(ports[0]) * - ldev->ports); + for (j = 0; j < ldev->buckets; j++) { + idx = i * ldev->buckets + j; + if (ports[idx] == ldev->v2p_map[idx]) + continue; + err = _mlx5_modify_lag(ldev, ports); + if (err) { + mlx5_core_err(dev0, + "Failed to modify LAG (%d)\n", + err); + return; + } + memcpy(ldev->v2p_map, ports, sizeof(ports)); - mlx5_lag_print_mapping(dev0, ldev); - break; + mlx5_lag_print_mapping(dev0, ldev, tracker, + ldev->flags); + break; + } } if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && @@ -357,6 +425,8 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, if (!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table)) return -EINVAL; *flags |= MLX5_LAG_FLAG_HASH_BASED; + if (ldev->ports > 2) + ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS; } return 0; @@ -370,6 +440,7 @@ static int mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) *flags |= MLX5_LAG_FLAG_HASH_BASED; + return 0; } @@ -399,7 +470,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; int err; - mlx5_lag_print_mapping(dev0, ldev); + mlx5_lag_print_mapping(dev0, ldev, tracker, flags); mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", shared_fdb, get_str_port_sel_mode(flags)); @@ -439,11 +510,12 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; int err; - mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->v2p_map); err = mlx5_lag_set_port_sel_mode(ldev, tracker, &flags); if (err) return err; + mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map); + if (flags & MLX5_LAG_FLAG_HASH_BASED) { err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, ldev->v2p_map); @@ -1265,7 +1337,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, } } - port = ldev->v2p_map[port]; + port = ldev->v2p_map[port * ldev->buckets]; unlock: spin_unlock(&lag_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 1c8fb3fada0c..0c90d0ed03be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -4,6 +4,7 @@ #ifndef __MLX5_LAG_H__ #define __MLX5_LAG_H__ +#define MLX5_LAG_MAX_HASH_BUCKETS 16 #include "mlx5_core.h" #include "mp.h" #include "port_sel.h" @@ -46,9 +47,10 @@ struct lag_tracker { struct mlx5_lag { u8 flags; u8 ports; + u8 buckets; int mode_changes_in_progress; bool shared_fdb; - u8 v2p_map[MLX5_MAX_PORTS]; + u8 v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS]; struct kref ref; struct lag_func pf[MLX5_MAX_PORTS]; struct lag_tracker tracker; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index 478b4ef723f8..d3a3fe4ce670 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -13,7 +13,7 @@ enum { static struct mlx5_flow_group * mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, struct mlx5_flow_definer *definer, - u8 ports) + u8 rules) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *fg; @@ -26,7 +26,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, MLX5_SET(create_flow_group_in, in, match_definer_id, mlx5_get_match_definer_id(definer)); MLX5_SET(create_flow_group_in, in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, in, end_flow_index, ports - 1); + MLX5_SET(create_flow_group_in, in, end_flow_index, rules - 1); MLX5_SET(create_flow_group_in, in, group_type, MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT); @@ -45,8 +45,10 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_namespace *ns; int err, i; + int idx; + int j; - ft_attr.max_fte = ldev->ports; + ft_attr.max_fte = ldev->ports * ldev->buckets; ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER; ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL); @@ -63,7 +65,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft, lag_definer->definer, - ldev->ports); + ft_attr.max_fte); if (IS_ERR(lag_definer->fg)) { err = PTR_ERR(lag_definer->fg); goto destroy_ft; @@ -73,18 +75,24 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.flags |= FLOW_ACT_NO_APPEND; for (i = 0; i < ldev->ports; i++) { - u8 affinity = ports[i]; - - dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev, - vhca_id); - lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft, - NULL, &flow_act, - &dest, 1); - if (IS_ERR(lag_definer->rules[i])) { - err = PTR_ERR(lag_definer->rules[i]); - while (i--) - mlx5_del_flow_rules(lag_definer->rules[i]); - goto destroy_fg; + for (j = 0; j < ldev->buckets; j++) { + u8 affinity; + + idx = i * ldev->buckets + j; + affinity = ports[idx]; + + dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev, + vhca_id); + lag_definer->rules[idx] = mlx5_add_flow_rules(lag_definer->ft, + NULL, &flow_act, + &dest, 1); + if (IS_ERR(lag_definer->rules[idx])) { + err = PTR_ERR(lag_definer->rules[idx]); + while (i--) + while (j--) + mlx5_del_flow_rules(lag_definer->rules[idx]); + goto destroy_fg; + } } } @@ -330,10 +338,16 @@ static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev, struct mlx5_lag_definer *lag_definer) { struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; + int idx; int i; + int j; - for (i = 0; i < ldev->ports; i++) - mlx5_del_flow_rules(lag_definer->rules[i]); + for (i = 0; i < ldev->ports; i++) { + for (j = 0; j < ldev->buckets; j++) { + idx = i * ldev->buckets + j; + mlx5_del_flow_rules(lag_definer->rules[idx]); + } + } mlx5_destroy_flow_group(lag_definer->fg); mlx5_destroy_flow_table(lag_definer->ft); mlx5_destroy_match_definer(dev, lag_definer->definer); @@ -544,31 +558,28 @@ destroy_definers: return err; } -static int -mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, - struct mlx5_lag_definer **definers, - u8 *ports) +static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, + struct mlx5_lag_definer *def, + u8 *ports) { - struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_flow_destination dest = {}; + int idx; int err; - int tt; int i; + int j; dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; - for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { - struct mlx5_flow_handle **rules = definers[tt]->rules; - - for (i = 0; i < ldev->ports; i++) { + for (i = 0; i < ldev->ports; i++) { + for (j = 0; j < ldev->buckets; j++) { + idx = i * ldev->buckets + j; if (ldev->v2p_map[i] == ports[i]) continue; - dest.vport.vhca_id = - MLX5_CAP_GEN(ldev->pf[ports[i] - 1].dev, - vhca_id); - err = mlx5_modify_rule_destination(rules[i], - &dest, NULL); + + dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev, + vhca_id); + err = mlx5_modify_rule_destination(def->rules[idx], &dest, NULL); if (err) return err; } @@ -577,6 +588,24 @@ mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, return 0; } +static int +mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, + struct mlx5_lag_definer **definers, + u8 *ports) +{ + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; + int err; + int tt; + + for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { + err = __mlx5_lag_modify_definers_destinations(ldev, definers[tt], ports); + if (err) + return err; + } + + return 0; +} + int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports) { struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h index 79852ac41dbc..5ec3af2a3ecd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h @@ -10,7 +10,10 @@ struct mlx5_lag_definer { struct mlx5_flow_definer *definer; struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; - struct mlx5_flow_handle *rules[MLX5_MAX_PORTS]; + /* Each port has ldev->buckets number of rules and they are arrange in + * [port * buckets .. port * buckets + buckets) locations + */ + struct mlx5_flow_handle *rules[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS]; }; struct mlx5_lag_ttc { -- cgit v1.2.3 From 7f46a0b7327ae261f9981888708dbca22c283900 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Tue, 15 Mar 2022 16:56:50 +0000 Subject: net/mlx5: Lag, add debugfs to query hardware lag state Lag state has become very complicated with many modes, flags, types and port selections methods and future work will add additional features. Add a debugfs to query the current lag state. A new directory named "lag" will be created under the mlx5 debugfs directory. As the driver has debugfs per pci function the location will be: /mlx5//lag For example: /sys/kernel/debug/mlx5/0000:08:00.0/lag The following files are exposed: - state: Returns "active" or "disabled". If "active" it means hardware lag is active. - members: Returns the BDFs of all the members of lag object. - type: Returns the type of the lag currently configured. Valid only if hardware lag is active. * "roce" - Members are bare metal PFs. * "switchdev" - Members are in switchdev mode. * "multipath" - ECMP offloads. - port_sel_mode: Returns the egress port selection method, valid only if hardware lag is active. * "queue_affinity" - Egress port is selected by the QP/SQ affinity. * "hash" - Egress port is selected by hash done on each packet. Controlled by: xmit_hash_policy of the bond device. - flags: Returns flags that are specific per lag @type. Valid only if hardware lag is active. * "shared_fdb" - "on" or "off", if "on" single FDB is used. - mapping: Returns the mapping which is used to select egress port. Valid only if hardware lag is active. If @port_sel_mode is "hash" returns the active egress ports. The hash result will select only active ports. if @port_sel_mode is "queue_affinity" returns the mapping between the configured port affinity of the QP/SQ and actual egress port. For example: * 1:1 - Mapping means if the configured affinity is port 1 traffic will egress via port 1. * 1:2 - Mapping means if the configured affinity is port 1 traffic will egress via port 2. This can happen if port 1 is down or in active/backup mode and port 1 is backup. Signed-off-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../net/ethernet/mellanox/mlx5/core/lag/debugfs.c | 173 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 11 +- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h | 9 ++ include/linux/mlx5/driver.h | 1 + 5 files changed, 192 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 81620c25c77e..7895ed7cc285 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,7 +14,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o alloc.o port.o mr.o pd.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ - fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \ + fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \ fw_reset.o qos.o lib/tout.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c new file mode 100644 index 000000000000..443daf6e3d4b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "lag.h" + +static char *get_str_mode_type(struct mlx5_lag *ldev) +{ + if (ldev->flags & MLX5_LAG_FLAG_ROCE) + return "roce"; + if (ldev->flags & MLX5_LAG_FLAG_SRIOV) + return "switchdev"; + if (ldev->flags & MLX5_LAG_FLAG_MULTIPATH) + return "multipath"; + + return NULL; +} + +static int type_show(struct seq_file *file, void *priv) +{ + struct mlx5_core_dev *dev = file->private; + struct mlx5_lag *ldev; + char *mode = NULL; + + ldev = dev->priv.lag; + mutex_lock(&ldev->lock); + if (__mlx5_lag_is_active(ldev)) + mode = get_str_mode_type(ldev); + mutex_unlock(&ldev->lock); + if (!mode) + return -EINVAL; + seq_printf(file, "%s\n", mode); + + return 0; +} + +static int port_sel_mode_show(struct seq_file *file, void *priv) +{ + struct mlx5_core_dev *dev = file->private; + struct mlx5_lag *ldev; + int ret = 0; + char *mode; + + ldev = dev->priv.lag; + mutex_lock(&ldev->lock); + if (__mlx5_lag_is_active(ldev)) + mode = get_str_port_sel_mode(ldev->flags); + else + ret = -EINVAL; + mutex_unlock(&ldev->lock); + if (ret || !mode) + return ret; + + seq_printf(file, "%s\n", mode); + return 0; +} + +static int state_show(struct seq_file *file, void *priv) +{ + struct mlx5_core_dev *dev = file->private; + struct mlx5_lag *ldev; + bool active; + + ldev = dev->priv.lag; + mutex_lock(&ldev->lock); + active = __mlx5_lag_is_active(ldev); + mutex_unlock(&ldev->lock); + seq_printf(file, "%s\n", active ? "active" : "disabled"); + return 0; +} + +static int flags_show(struct seq_file *file, void *priv) +{ + struct mlx5_core_dev *dev = file->private; + struct mlx5_lag *ldev; + bool shared_fdb; + bool lag_active; + + ldev = dev->priv.lag; + mutex_lock(&ldev->lock); + lag_active = __mlx5_lag_is_active(ldev); + if (lag_active) + shared_fdb = ldev->shared_fdb; + + mutex_unlock(&ldev->lock); + if (!lag_active) + return -EINVAL; + + seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off"); + return 0; +} + +static int mapping_show(struct seq_file *file, void *priv) +{ + struct mlx5_core_dev *dev = file->private; + u8 ports[MLX5_MAX_PORTS] = {}; + struct mlx5_lag *ldev; + bool hash = false; + bool lag_active; + int num_ports; + int i; + + ldev = dev->priv.lag; + mutex_lock(&ldev->lock); + lag_active = __mlx5_lag_is_active(ldev); + if (lag_active) { + if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED) { + mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports, + &num_ports); + hash = true; + } else { + for (i = 0; i < ldev->ports; i++) + ports[i] = ldev->v2p_map[i]; + num_ports = ldev->ports; + } + } + mutex_unlock(&ldev->lock); + if (!lag_active) + return -EINVAL; + + for (i = 0; i < num_ports; i++) { + if (hash) + seq_printf(file, "%d\n", ports[i] + 1); + else + seq_printf(file, "%d:%d\n", i + 1, ports[i]); + } + + return 0; +} + +static int members_show(struct seq_file *file, void *priv) +{ + struct mlx5_core_dev *dev = file->private; + struct mlx5_lag *ldev; + int i; + + ldev = dev->priv.lag; + mutex_lock(&ldev->lock); + for (i = 0; i < ldev->ports; i++) { + if (!ldev->pf[i].dev) + continue; + seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device)); + } + mutex_unlock(&ldev->lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(type); +DEFINE_SHOW_ATTRIBUTE(port_sel_mode); +DEFINE_SHOW_ATTRIBUTE(state); +DEFINE_SHOW_ATTRIBUTE(flags); +DEFINE_SHOW_ATTRIBUTE(mapping); +DEFINE_SHOW_ATTRIBUTE(members); + +void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev) +{ + struct dentry *dbg; + + dbg = debugfs_create_dir("lag", mlx5_debugfs_get_dev_root(dev)); + dev->priv.dbg.lag_debugfs = dbg; + + debugfs_create_file("type", 0444, dbg, dev, &type_fops); + debugfs_create_file("port_sel_mode", 0444, dbg, dev, &port_sel_mode_fops); + debugfs_create_file("state", 0444, dbg, dev, &state_fops); + debugfs_create_file("flags", 0444, dbg, dev, &flags_fops); + debugfs_create_file("mapping", 0444, dbg, dev, &mapping_fops); + debugfs_create_file("members", 0444, dbg, dev, &members_fops); +} + +void mlx5_ldev_remove_debugfs(struct dentry *dbg) +{ + debugfs_remove_recursive(dbg); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 8a74c409b501..b6dd9043061f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -120,8 +120,8 @@ static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports, } } -static void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, - u8 *ports, int *num_enabled) +void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, + u8 *ports, int *num_enabled) { int i; @@ -454,7 +454,7 @@ static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, return mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, flags); } -static char *get_str_port_sel_mode(u8 flags) +char *get_str_port_sel_mode(u8 flags) { if (flags & MLX5_LAG_FLAG_HASH_BASED) return "hash"; @@ -1106,6 +1106,10 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) if (!ldev) return; + /* mdev is being removed, might as well remove debugfs + * as early as possible. + */ + mlx5_ldev_remove_debugfs(dev->priv.dbg.lag_debugfs); recheck: mutex_lock(&ldev->lock); if (ldev->mode_changes_in_progress) { @@ -1137,6 +1141,7 @@ recheck: msleep(100); goto recheck; } + mlx5_ldev_add_debugfs(dev); } void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 0c90d0ed03be..46683b84ff84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -4,6 +4,8 @@ #ifndef __MLX5_LAG_H__ #define __MLX5_LAG_H__ +#include + #define MLX5_LAG_MAX_HASH_BUCKETS 16 #include "mlx5_core.h" #include "mp.h" @@ -90,4 +92,11 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, struct net_device *ndev); +char *get_str_port_sel_mode(u8 flags); +void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, + u8 *ports, int *num_enabled); + +void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev); +void mlx5_ldev_remove_debugfs(struct dentry *dbg); + #endif /* __MLX5_LAG_H__ */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index fdb9d07a05a4..d6bac3976913 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -558,6 +558,7 @@ struct mlx5_debugfs_entries { struct dentry *cq_debugfs; struct dentry *cmdif_debugfs; struct dentry *pages_debugfs; + struct dentry *lag_debugfs; }; struct mlx5_ft_pool; -- cgit v1.2.3 From b3552d6a3b8bd4260a971f1f70140ed3513cc4f7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 6 May 2022 13:00:29 -0700 Subject: eth: dpaa2-mac: remove a dead-code NULL check on fwnode parent Since commit 4e30e98c4b4c ("dpaa2-mac: return -EPROBE_DEFER from dpaa2_mac_open in case the fwnode is not set") @parent can't be NULL after the if. It's either the address of the ->fwnode of @dpmacs or @fwnode in case of ACPI. Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20220506200029.852310-1-kuba@kernel.org Signed-off-by: Paolo Abeni --- drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index c48811d3bcd5..c9bee9a0c9b2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -108,9 +108,6 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev, return ERR_PTR(-EPROBE_DEFER); } - if (!parent) - return NULL; - fwnode_for_each_child_node(parent, child) { err = -EINVAL; if (is_acpi_device_node(child)) -- cgit v1.2.3 From 42704b26b0f1d891f6cf4ebc877dbac0d17c690d Mon Sep 17 00:00:00 2001 From: Gerhard Engleder Date: Fri, 6 May 2022 22:01:37 +0200 Subject: ptp: Add cycles support for virtual clocks ptp vclocks require a free running time for their timecounter. Currently only a physical clock forced to free running is supported. If vclocks are used, then the physical clock cannot be synchronized anymore. The synchronized time is not available in hardware in this case. As a result, timed transmission with TAPRIO hardware support is not possible anymore. If hardware would support a free running time additionally to the physical clock, then the physical clock does not need to be forced to free running. Thus, the physical clocks can still be synchronized while vclocks are in use. The physical clock could be used to synchronize the time domain of the TSN network and trigger TAPRIO. In parallel vclocks can be used to synchronize other time domains. Introduce support for a free running cycle counter called cycles to physical clocks. Rework ptp vclocks to use this free running cycle counter. Default implementation is based on time of physical clock. Thus, behavior of ptp vclocks based on physical clocks without free running cycle counter is identical to previous behavior. Signed-off-by: Gerhard Engleder Acked-by: Richard Cochran Signed-off-by: Paolo Abeni --- drivers/ptp/ptp_clock.c | 31 +++++++++++++++++++++++++++---- drivers/ptp/ptp_private.h | 10 ++++++++++ drivers/ptp/ptp_sysfs.c | 11 +++++++---- drivers/ptp/ptp_vclock.c | 13 +++++-------- include/linux/ptp_clock_kernel.h | 31 +++++++++++++++++++++++++++++++ 5 files changed, 80 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index b6f2cfd15dd2..688cde320bb0 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -77,8 +77,8 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); - if (ptp_vclock_in_use(ptp)) { - pr_err("ptp: virtual clock in use\n"); + if (ptp_clock_freerun(ptp)) { + pr_err("ptp: physical clock is free running\n"); return -EBUSY; } @@ -103,8 +103,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) struct ptp_clock_info *ops; int err = -EOPNOTSUPP; - if (ptp_vclock_in_use(ptp)) { - pr_err("ptp: virtual clock in use\n"); + if (ptp_clock_freerun(ptp)) { + pr_err("ptp: physical clock is free running\n"); return -EBUSY; } @@ -178,6 +178,14 @@ static void ptp_clock_release(struct device *dev) kfree(ptp); } +static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts) +{ + if (info->getcyclesx64) + return info->getcyclesx64(info, ts, NULL); + else + return info->gettime64(info, ts); +} + static void ptp_aux_kworker(struct kthread_work *work) { struct ptp_clock *ptp = container_of(work, struct ptp_clock, @@ -225,6 +233,21 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, mutex_init(&ptp->n_vclocks_mux); init_waitqueue_head(&ptp->tsev_wq); + if (ptp->info->getcycles64 || ptp->info->getcyclesx64) { + ptp->has_cycles = true; + if (!ptp->info->getcycles64 && ptp->info->getcyclesx64) + ptp->info->getcycles64 = ptp_getcycles64; + } else { + /* Free running cycle counter not supported, use time. */ + ptp->info->getcycles64 = ptp_getcycles64; + + if (ptp->info->gettimex64) + ptp->info->getcyclesx64 = ptp->info->gettimex64; + + if (ptp->info->getcrosststamp) + ptp->info->getcrosscycles = ptp->info->getcrosststamp; + } + if (ptp->info->do_aux_work) { kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index dba6be477067..ab47c10b3874 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -52,6 +52,7 @@ struct ptp_clock { int *vclock_index; struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */ bool is_virtual_clock; + bool has_cycles; }; #define info_to_vclock(d) container_of((d), struct ptp_vclock, info) @@ -96,6 +97,15 @@ static inline bool ptp_vclock_in_use(struct ptp_clock *ptp) return in_use; } +/* Check if ptp clock shall be free running */ +static inline bool ptp_clock_freerun(struct ptp_clock *ptp) +{ + if (ptp->has_cycles) + return false; + + return ptp_vclock_in_use(ptp); +} + extern struct class *ptp_class; /* diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 9233bfedeb17..f30b0a439470 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -231,10 +231,13 @@ static ssize_t n_vclocks_store(struct device *dev, *(ptp->vclock_index + ptp->n_vclocks - i) = -1; } - if (num == 0) - dev_info(dev, "only physical clock in use now\n"); - else - dev_info(dev, "guarantee physical clock free running\n"); + /* Need to inform about changed physical clock behavior */ + if (!ptp->has_cycles) { + if (num == 0) + dev_info(dev, "only physical clock in use now\n"); + else + dev_info(dev, "guarantee physical clock free running\n"); + } ptp->n_vclocks = num; mutex_unlock(&ptp->n_vclocks_mux); diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index cb179a3ea508..3a095eab9cc5 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -68,7 +68,7 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp, int err; u64 ns; - err = pptp->info->gettimex64(pptp->info, &pts, sts); + err = pptp->info->getcyclesx64(pptp->info, &pts, sts); if (err) return err; @@ -104,7 +104,7 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp, int err; u64 ns; - err = pptp->info->getcrosststamp(pptp->info, xtstamp); + err = pptp->info->getcrosscycles(pptp->info, xtstamp); if (err) return err; @@ -143,10 +143,7 @@ static u64 ptp_vclock_read(const struct cyclecounter *cc) struct ptp_clock *ptp = vclock->pclock; struct timespec64 ts = {}; - if (ptp->info->gettimex64) - ptp->info->gettimex64(ptp->info, &ts, NULL); - else - ptp->info->gettime64(ptp->info, &ts); + ptp->info->getcycles64(ptp->info, &ts); return timespec64_to_ns(&ts); } @@ -168,11 +165,11 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) vclock->pclock = pclock; vclock->info = ptp_vclock_info; - if (pclock->info->gettimex64) + if (pclock->info->getcyclesx64) vclock->info.gettimex64 = ptp_vclock_gettimex; else vclock->info.gettime64 = ptp_vclock_gettime; - if (pclock->info->getcrosststamp) + if (pclock->info->getcrosscycles) vclock->info.getcrosststamp = ptp_vclock_getcrosststamp; vclock->cc = ptp_vclock_cc; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index e8cc8b6bbf50..ad309202cf9f 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -108,6 +108,32 @@ struct ptp_system_timestamp { * @settime64: Set the current time on the hardware clock. * parameter ts: Time value to set. * + * @getcycles64: Reads the current free running cycle counter from the hardware + * clock. + * If @getcycles64 and @getcyclesx64 are not supported, then + * @gettime64 or @gettimex64 will be used as default + * implementation. + * parameter ts: Holds the result. + * + * @getcyclesx64: Reads the current free running cycle counter from the + * hardware clock and optionally also the system clock. + * If @getcycles64 and @getcyclesx64 are not supported, then + * @gettimex64 will be used as default implementation if + * available. + * parameter ts: Holds the PHC timestamp. + * parameter sts: If not NULL, it holds a pair of timestamps + * from the system clock. The first reading is made right before + * reading the lowest bits of the PHC timestamp and the second + * reading immediately follows that. + * + * @getcrosscycles: Reads the current free running cycle counter from the + * hardware clock and system clock simultaneously. + * If @getcycles64 and @getcyclesx64 are not supported, then + * @getcrosststamp will be used as default implementation if + * available. + * parameter cts: Contains timestamp (device,system) pair, + * where system time is realtime and monotonic. + * * @enable: Request driver to enable or disable an ancillary feature. * parameter request: Desired resource to enable or disable. * parameter on: Caller passes one to enable or zero to disable. @@ -155,6 +181,11 @@ struct ptp_clock_info { int (*getcrosststamp)(struct ptp_clock_info *ptp, struct system_device_crosststamp *cts); int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); + int (*getcycles64)(struct ptp_clock_info *ptp, struct timespec64 *ts); + int (*getcyclesx64)(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts); + int (*getcrosscycles)(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts); int (*enable)(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on); int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, -- cgit v1.2.3 From d58809d854c9ee19e4cd41023e137e65e9dc3f94 Mon Sep 17 00:00:00 2001 From: Gerhard Engleder Date: Fri, 6 May 2022 22:01:39 +0200 Subject: ptp: Pass hwtstamp to ptp_convert_timestamp() ptp_convert_timestamp() converts only the timestamp hwtstamp, which is a field of the argument with the type struct skb_shared_hwtstamps *. So a pointer to the hwtstamp field of this structure is sufficient. Rework ptp_convert_timestamp() to use an argument of type ktime_t *. This allows to add additional timestamp manipulation stages before the call of ptp_convert_timestamp(). Signed-off-by: Gerhard Engleder Acked-by: Richard Cochran Signed-off-by: Paolo Abeni --- drivers/ptp/ptp_vclock.c | 5 ++--- include/linux/ptp_clock_kernel.h | 7 +++---- net/socket.c | 2 +- 3 files changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index 3a095eab9cc5..c30bcce2bb43 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -232,8 +232,7 @@ out: } EXPORT_SYMBOL(ptp_get_vclocks_index); -ktime_t ptp_convert_timestamp(const struct skb_shared_hwtstamps *hwtstamps, - int vclock_index) +ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index) { char name[PTP_CLOCK_NAME_LEN] = ""; struct ptp_vclock *vclock; @@ -255,7 +254,7 @@ ktime_t ptp_convert_timestamp(const struct skb_shared_hwtstamps *hwtstamps, vclock = info_to_vclock(ptp->info); - ns = ktime_to_ns(hwtstamps->hwtstamp); + ns = ktime_to_ns(*hwtstamp); spin_lock_irqsave(&vclock->lock, flags); ns = timecounter_cyc2time(&vclock->tc, ns); diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index ad309202cf9f..92b44161408e 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -384,17 +384,16 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index); /** * ptp_convert_timestamp() - convert timestamp to a ptp vclock time * - * @hwtstamps: skb_shared_hwtstamps structure pointer + * @hwtstamp: timestamp * @vclock_index: phc index of ptp vclock. * * Returns converted timestamp, or 0 on error. */ -ktime_t ptp_convert_timestamp(const struct skb_shared_hwtstamps *hwtstamps, - int vclock_index); +ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index); #else static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) { return 0; } -static inline ktime_t ptp_convert_timestamp(const struct skb_shared_hwtstamps *hwtstamps, +static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index) { return 0; } diff --git a/net/socket.c b/net/socket.c index 5c1c5e6100e1..0f680c7d968a 100644 --- a/net/socket.c +++ b/net/socket.c @@ -888,7 +888,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && !skb_is_swtx_tstamp(skb, false_tstamp)) { if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC) - hwtstamp = ptp_convert_timestamp(shhwtstamps, + hwtstamp = ptp_convert_timestamp(&shhwtstamps->hwtstamp, sk->sk_bind_phc); else hwtstamp = shhwtstamps->hwtstamp; -- cgit v1.2.3 From fcf308e50928a9c9eca90c56f9fc6885005dafd1 Mon Sep 17 00:00:00 2001 From: Gerhard Engleder Date: Fri, 6 May 2022 22:01:41 +0200 Subject: ptp: Speed up vclock lookup ptp_convert_timestamp() is called in the RX path of network messages. The current implementation takes ~5000ns on 1.2GHz A53. This is too much for the hot path of packet processing. Introduce hash table for fast vclock lookup in ptp_convert_timestamp(). The execution time of ptp_convert_timestamp() is reduced to ~700ns on 1.2GHz A53. Signed-off-by: Gerhard Engleder Acked-by: Richard Cochran Signed-off-by: Paolo Abeni --- drivers/ptp/ptp_private.h | 1 + drivers/ptp/ptp_vclock.c | 66 +++++++++++++++++++++++++++++++++-------------- 2 files changed, 48 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index ab47c10b3874..77918a2c6701 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -63,6 +63,7 @@ struct ptp_vclock { struct ptp_clock *pclock; struct ptp_clock_info info; struct ptp_clock *clock; + struct hlist_node vclock_hash_node; struct cyclecounter cc; struct timecounter tc; spinlock_t lock; /* protects tc/cc */ diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index c30bcce2bb43..1c0ed4805c0a 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -5,6 +5,7 @@ * Copyright 2021 NXP */ #include +#include #include "ptp_private.h" #define PTP_VCLOCK_CC_SHIFT 31 @@ -13,6 +14,32 @@ #define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL #define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2) +/* protects vclock_hash addition/deletion */ +static DEFINE_SPINLOCK(vclock_hash_lock); + +static DEFINE_READ_MOSTLY_HASHTABLE(vclock_hash, 8); + +static void ptp_vclock_hash_add(struct ptp_vclock *vclock) +{ + spin_lock(&vclock_hash_lock); + + hlist_add_head_rcu(&vclock->vclock_hash_node, + &vclock_hash[vclock->clock->index % HASH_SIZE(vclock_hash)]); + + spin_unlock(&vclock_hash_lock); +} + +static void ptp_vclock_hash_del(struct ptp_vclock *vclock) +{ + spin_lock(&vclock_hash_lock); + + hlist_del_init_rcu(&vclock->vclock_hash_node); + + spin_unlock(&vclock_hash_lock); + + synchronize_rcu(); +} + static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct ptp_vclock *vclock = info_to_vclock(ptp); @@ -176,6 +203,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt", pclock->index); + INIT_HLIST_NODE(&vclock->vclock_hash_node); + spin_lock_init(&vclock->lock); vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev); @@ -187,11 +216,15 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) timecounter_init(&vclock->tc, &vclock->cc, 0); ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); + ptp_vclock_hash_add(vclock); + return vclock; } void ptp_vclock_unregister(struct ptp_vclock *vclock) { + ptp_vclock_hash_del(vclock); + ptp_clock_unregister(vclock->clock); kfree(vclock); } @@ -234,34 +267,29 @@ EXPORT_SYMBOL(ptp_get_vclocks_index); ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index) { - char name[PTP_CLOCK_NAME_LEN] = ""; + unsigned int hash = vclock_index % HASH_SIZE(vclock_hash); struct ptp_vclock *vclock; - struct ptp_clock *ptp; unsigned long flags; - struct device *dev; u64 ns; + u64 vclock_ns = 0; - snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index); - dev = class_find_device_by_name(ptp_class, name); - if (!dev) - return 0; + ns = ktime_to_ns(*hwtstamp); - ptp = dev_get_drvdata(dev); - if (!ptp->is_virtual_clock) { - put_device(dev); - return 0; - } + rcu_read_lock(); - vclock = info_to_vclock(ptp->info); + hlist_for_each_entry_rcu(vclock, &vclock_hash[hash], vclock_hash_node) { + if (vclock->clock->index != vclock_index) + continue; - ns = ktime_to_ns(*hwtstamp); + spin_lock_irqsave(&vclock->lock, flags); + vclock_ns = timecounter_cyc2time(&vclock->tc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + break; + } - spin_lock_irqsave(&vclock->lock, flags); - ns = timecounter_cyc2time(&vclock->tc, ns); - spin_unlock_irqrestore(&vclock->lock, flags); + rcu_read_unlock(); - put_device(dev); - return ns_to_ktime(ns); + return ns_to_ktime(vclock_ns); } EXPORT_SYMBOL(ptp_convert_timestamp); #endif -- cgit v1.2.3 From 0abb62b68252b1beefd553dd80f5b4c0b456bcaf Mon Sep 17 00:00:00 2001 From: Gerhard Engleder Date: Fri, 6 May 2022 22:01:42 +0200 Subject: tsnep: Add free running cycle counter support The TSN endpoint Ethernet MAC supports a free running counter additionally to its clock. This free running counter can be read and hardware timestamps are supported. As the name implies, this counter cannot be set and its frequency cannot be adjusted. Add free running cycle counter support based on this free running counter to physical clock. This also requires hardware time stamps based on that free running counter. Signed-off-by: Gerhard Engleder Acked-by: Jonathan Lemon Signed-off-by: Paolo Abeni --- drivers/net/ethernet/engleder/tsnep_hw.h | 9 ++++++-- drivers/net/ethernet/engleder/tsnep_main.c | 33 +++++++++++++++++++++++++----- drivers/net/ethernet/engleder/tsnep_ptp.c | 28 +++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h index 71cc8577d640..916ceac3ada2 100644 --- a/drivers/net/ethernet/engleder/tsnep_hw.h +++ b/drivers/net/ethernet/engleder/tsnep_hw.h @@ -43,6 +43,10 @@ #define ECM_RESET_CHANNEL 0x00000100 #define ECM_RESET_TXRX 0x00010000 +/* counter */ +#define ECM_COUNTER_LOW 0x0028 +#define ECM_COUNTER_HIGH 0x002C + /* control and status */ #define ECM_STATUS 0x0080 #define ECM_LINK_MODE_OFF 0x01000000 @@ -190,7 +194,8 @@ struct tsnep_tx_desc { /* tsnep TX descriptor writeback */ struct tsnep_tx_desc_wb { __le32 properties; - __le32 reserved1[3]; + __le32 reserved1; + __le64 counter; __le64 timestamp; __le32 dma_delay; __le32 reserved2; @@ -221,7 +226,7 @@ struct tsnep_rx_desc_wb { /* tsnep RX inline meta */ struct tsnep_rx_inline { - __le64 reserved; + __le64 counter; __le64 timestamp; }; diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 49c93aa38862..cb069a0af7b9 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -470,8 +470,15 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) (__le32_to_cpu(entry->desc_wb->properties) & TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { struct skb_shared_hwtstamps hwtstamps; - u64 timestamp = - __le64_to_cpu(entry->desc_wb->timestamp); + u64 timestamp; + + if (skb_shinfo(entry->skb)->tx_flags & + SKBTX_HW_TSTAMP_USE_CYCLES) + timestamp = + __le64_to_cpu(entry->desc_wb->counter); + else + timestamp = + __le64_to_cpu(entry->desc_wb->timestamp); memset(&hwtstamps, 0, sizeof(hwtstamps)); hwtstamps.hwtstamp = ns_to_ktime(timestamp); @@ -704,11 +711,11 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, skb_hwtstamps(skb); struct tsnep_rx_inline *rx_inline = (struct tsnep_rx_inline *)skb->data; - u64 timestamp = - __le64_to_cpu(rx_inline->timestamp); + skb_shinfo(skb)->tx_flags |= + SKBTX_HW_TSTAMP_NETDEV; memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(timestamp); + hwtstamps->netdev_data = rx_inline; } skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE); skb->protocol = eth_type_trans(skb, @@ -1010,6 +1017,21 @@ static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr) return 0; } +static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, + const struct skb_shared_hwtstamps *hwtstamps, + bool cycles) +{ + struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; + u64 timestamp; + + if (cycles) + timestamp = __le64_to_cpu(rx_inline->counter); + else + timestamp = __le64_to_cpu(rx_inline->timestamp); + + return ns_to_ktime(timestamp); +} + static const struct net_device_ops tsnep_netdev_ops = { .ndo_open = tsnep_netdev_open, .ndo_stop = tsnep_netdev_close, @@ -1019,6 +1041,7 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_get_stats64 = tsnep_netdev_get_stats64, .ndo_set_mac_address = tsnep_netdev_set_mac_address, + .ndo_get_tstamp = tsnep_netdev_get_tstamp, .ndo_setup_tc = tsnep_tc_setup, }; diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c index eaad453d487e..54fbf0126815 100644 --- a/drivers/net/ethernet/engleder/tsnep_ptp.c +++ b/drivers/net/ethernet/engleder/tsnep_ptp.c @@ -175,6 +175,33 @@ static int tsnep_ptp_settime64(struct ptp_clock_info *ptp, return 0; } +static int tsnep_ptp_getcyclesx64(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, + ptp_clock_info); + u32 high_before; + u32 low; + u32 high; + u64 counter; + + /* read high dword twice to detect overrun */ + high = ioread32(adapter->addr + ECM_COUNTER_HIGH); + do { + ptp_read_system_prets(sts); + low = ioread32(adapter->addr + ECM_COUNTER_LOW); + ptp_read_system_postts(sts); + high_before = high; + high = ioread32(adapter->addr + ECM_COUNTER_HIGH); + } while (high != high_before); + counter = (((u64)high) << 32) | ((u64)low); + + *ts = ns_to_timespec64(counter); + + return 0; +} + int tsnep_ptp_init(struct tsnep_adapter *adapter) { int retval = 0; @@ -192,6 +219,7 @@ int tsnep_ptp_init(struct tsnep_adapter *adapter) adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime; adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64; adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64; + adapter->ptp_clock_info.getcyclesx64 = tsnep_ptp_getcyclesx64; spin_lock_init(&adapter->ptp_lock); -- cgit v1.2.3 From fd3040b9394c58bcedb83554bcf1a073021d6b36 Mon Sep 17 00:00:00 2001 From: Wells Lu Date: Sun, 8 May 2022 18:13:20 +0800 Subject: net: ethernet: Add driver for Sunplus SP7021 Add driver for Sunplus SP7021 SoC. Reviewed-by: Andrew Lunn Signed-off-by: Wells Lu Signed-off-by: Paolo Abeni --- MAINTAINERS | 1 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/sunplus/Kconfig | 35 ++ drivers/net/ethernet/sunplus/Makefile | 6 + drivers/net/ethernet/sunplus/spl2sw_define.h | 270 ++++++++++++ drivers/net/ethernet/sunplus/spl2sw_desc.c | 228 ++++++++++ drivers/net/ethernet/sunplus/spl2sw_desc.h | 19 + drivers/net/ethernet/sunplus/spl2sw_driver.c | 578 +++++++++++++++++++++++++ drivers/net/ethernet/sunplus/spl2sw_int.c | 271 ++++++++++++ drivers/net/ethernet/sunplus/spl2sw_int.h | 13 + drivers/net/ethernet/sunplus/spl2sw_mac.c | 274 ++++++++++++ drivers/net/ethernet/sunplus/spl2sw_mac.h | 18 + drivers/net/ethernet/sunplus/spl2sw_mdio.c | 126 ++++++ drivers/net/ethernet/sunplus/spl2sw_mdio.h | 12 + drivers/net/ethernet/sunplus/spl2sw_phy.c | 92 ++++ drivers/net/ethernet/sunplus/spl2sw_phy.h | 12 + drivers/net/ethernet/sunplus/spl2sw_register.h | 86 ++++ 18 files changed, 2043 insertions(+) create mode 100644 drivers/net/ethernet/sunplus/Kconfig create mode 100644 drivers/net/ethernet/sunplus/Makefile create mode 100644 drivers/net/ethernet/sunplus/spl2sw_define.h create mode 100644 drivers/net/ethernet/sunplus/spl2sw_desc.c create mode 100644 drivers/net/ethernet/sunplus/spl2sw_desc.h create mode 100644 drivers/net/ethernet/sunplus/spl2sw_driver.c create mode 100644 drivers/net/ethernet/sunplus/spl2sw_int.c create mode 100644 drivers/net/ethernet/sunplus/spl2sw_int.h create mode 100644 drivers/net/ethernet/sunplus/spl2sw_mac.c create mode 100644 drivers/net/ethernet/sunplus/spl2sw_mac.h create mode 100644 drivers/net/ethernet/sunplus/spl2sw_mdio.c create mode 100644 drivers/net/ethernet/sunplus/spl2sw_mdio.h create mode 100644 drivers/net/ethernet/sunplus/spl2sw_phy.c create mode 100644 drivers/net/ethernet/sunplus/spl2sw_phy.h create mode 100644 drivers/net/ethernet/sunplus/spl2sw_register.h (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index 711456384f48..de6a26a01166 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18914,6 +18914,7 @@ L: netdev@vger.kernel.org S: Maintained W: https://sunplus.atlassian.net/wiki/spaces/doc/overview F: Documentation/devicetree/bindings/net/sunplus,sp7021-emac.yaml +F: drivers/net/ethernet/sunplus/ SUNPLUS OCOTP DRIVER M: Vincent Shih diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 827993022386..955abbc5490e 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -179,6 +179,7 @@ source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/socionext/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" +source "drivers/net/ethernet/sunplus/Kconfig" source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 8ef43e0c33c0..9eb01169957f 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ +obj-$(CONFIG_NET_VENDOR_SUNPLUS) += sunplus/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ diff --git a/drivers/net/ethernet/sunplus/Kconfig b/drivers/net/ethernet/sunplus/Kconfig new file mode 100644 index 000000000000..d0144a2ab918 --- /dev/null +++ b/drivers/net/ethernet/sunplus/Kconfig @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Sunplus network device configuration +# + +config NET_VENDOR_SUNPLUS + bool "Sunplus devices" + default y + depends on ARCH_SUNPLUS || COMPILE_TEST + help + If you have a network (Ethernet) card belonging to this + class, say Y here. + + Note that the answer to this question doesn't directly + affect the kernel: saying N will just cause the configurator + to skip all the questions about Sunplus cards. If you say Y, + you will be asked for your specific card in the following + questions. + +if NET_VENDOR_SUNPLUS + +config SP7021_EMAC + tristate "Sunplus Dual 10M/100M Ethernet devices" + depends on SOC_SP7021 || COMPILE_TEST + select PHYLIB + select COMMON_CLK_SP7021 + select RESET_SUNPLUS + select NVMEM_SUNPLUS_OCOTP + help + If you have Sunplus dual 10M/100M Ethernet devices, say Y. + The network device creates two net-device interfaces. + To compile this driver as a module, choose M here. The + module will be called sp7021_emac. + +endif # NET_VENDOR_SUNPLUS diff --git a/drivers/net/ethernet/sunplus/Makefile b/drivers/net/ethernet/sunplus/Makefile new file mode 100644 index 000000000000..ef7d7d0a7e71 --- /dev/null +++ b/drivers/net/ethernet/sunplus/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Sunplus network device drivers. +# +obj-$(CONFIG_SP7021_EMAC) += sp7021_emac.o +sp7021_emac-objs := spl2sw_driver.o spl2sw_int.o spl2sw_desc.o spl2sw_mac.o spl2sw_mdio.o spl2sw_phy.o diff --git a/drivers/net/ethernet/sunplus/spl2sw_define.h b/drivers/net/ethernet/sunplus/spl2sw_define.h new file mode 100644 index 000000000000..acc6c1228ebc --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_define.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_DEFINE_H__ +#define __SPL2SW_DEFINE_H__ + +#define MAX_NETDEV_NUM 2 /* Maximum # of net-device */ + +/* Interrupt status */ +#define MAC_INT_DAISY_MODE_CHG BIT(31) /* Daisy Mode Change */ +#define MAC_INT_IP_CHKSUM_ERR BIT(23) /* IP Checksum Append Error */ +#define MAC_INT_WDOG_TIMER1_EXP BIT(22) /* Watchdog Timer1 Expired */ +#define MAC_INT_WDOG_TIMER0_EXP BIT(21) /* Watchdog Timer0 Expired */ +#define MAC_INT_INTRUDER_ALERT BIT(20) /* Atruder Alert */ +#define MAC_INT_PORT_ST_CHG BIT(19) /* Port Status Change */ +#define MAC_INT_BC_STORM BIT(18) /* Broad Cast Storm */ +#define MAC_INT_MUST_DROP_LAN BIT(17) /* Global Queue Exhausted */ +#define MAC_INT_GLOBAL_QUE_FULL BIT(16) /* Global Queue Full */ +#define MAC_INT_TX_SOC_PAUSE_ON BIT(15) /* Soc Port TX Pause On */ +#define MAC_INT_RX_SOC_QUE_FULL BIT(14) /* Soc Port Out Queue Full */ +#define MAC_INT_TX_LAN1_QUE_FULL BIT(9) /* Port 1 Out Queue Full */ +#define MAC_INT_TX_LAN0_QUE_FULL BIT(8) /* Port 0 Out Queue Full */ +#define MAC_INT_RX_L_DESCF BIT(7) /* Low Priority Descriptor Full */ +#define MAC_INT_RX_H_DESCF BIT(6) /* High Priority Descriptor Full */ +#define MAC_INT_RX_DONE_L BIT(5) /* RX Low Priority Done */ +#define MAC_INT_RX_DONE_H BIT(4) /* RX High Priority Done */ +#define MAC_INT_TX_DONE_L BIT(3) /* TX Low Priority Done */ +#define MAC_INT_TX_DONE_H BIT(2) /* TX High Priority Done */ +#define MAC_INT_TX_DES_ERR BIT(1) /* TX Descriptor Error */ +#define MAC_INT_RX_DES_ERR BIT(0) /* Rx Descriptor Error */ + +#define MAC_INT_RX (MAC_INT_RX_DONE_H | MAC_INT_RX_DONE_L | \ + MAC_INT_RX_DES_ERR) +#define MAC_INT_TX (MAC_INT_TX_DONE_L | MAC_INT_TX_DONE_H | \ + MAC_INT_TX_DES_ERR) +#define MAC_INT_MASK_DEF (MAC_INT_DAISY_MODE_CHG | MAC_INT_IP_CHKSUM_ERR | \ + MAC_INT_WDOG_TIMER1_EXP | MAC_INT_WDOG_TIMER0_EXP | \ + MAC_INT_INTRUDER_ALERT | MAC_INT_PORT_ST_CHG | \ + MAC_INT_BC_STORM | MAC_INT_MUST_DROP_LAN | \ + MAC_INT_GLOBAL_QUE_FULL | MAC_INT_TX_SOC_PAUSE_ON | \ + MAC_INT_RX_SOC_QUE_FULL | MAC_INT_TX_LAN1_QUE_FULL | \ + MAC_INT_TX_LAN0_QUE_FULL | MAC_INT_RX_L_DESCF | \ + MAC_INT_RX_H_DESCF) + +/* Address table search */ +#define MAC_ADDR_LOOKUP_IDLE BIT(2) +#define MAC_SEARCH_NEXT_ADDR BIT(1) +#define MAC_BEGIN_SEARCH_ADDR BIT(0) + +/* Address table status */ +#define MAC_HASH_LOOKUP_ADDR GENMASK(31, 22) +#define MAC_R_PORT_MAP GENMASK(13, 12) +#define MAC_R_CPU_PORT GENMASK(11, 10) +#define MAC_R_VID GENMASK(9, 7) +#define MAC_R_AGE GENMASK(6, 4) +#define MAC_R_PROXY BIT(3) +#define MAC_R_MC_INGRESS BIT(2) +#define MAC_AT_TABLE_END BIT(1) +#define MAC_AT_DATA_READY BIT(0) + +/* Wt mac ad0 */ +#define MAC_W_PORT_MAP GENMASK(13, 12) +#define MAC_W_LAN_PORT_1 BIT(13) +#define MAC_W_LAN_PORT_0 BIT(12) +#define MAC_W_CPU_PORT GENMASK(11, 10) +#define MAC_W_CPU_PORT_1 BIT(11) +#define MAC_W_CPU_PORT_0 BIT(10) +#define MAC_W_VID GENMASK(9, 7) +#define MAC_W_AGE GENMASK(6, 4) +#define MAC_W_PROXY BIT(3) +#define MAC_W_MC_INGRESS BIT(2) +#define MAC_W_MAC_DONE BIT(1) +#define MAC_W_MAC_CMD BIT(0) + +/* W mac 15_0 bus */ +#define MAC_W_MAC_15_0 GENMASK(15, 0) + +/* W mac 47_16 bus */ +#define MAC_W_MAC_47_16 GENMASK(31, 0) + +/* PVID config 0 */ +#define MAC_P1_PVID GENMASK(6, 4) +#define MAC_P0_PVID GENMASK(2, 0) + +/* VLAN member config 0 */ +#define MAC_VLAN_MEMSET_3 GENMASK(27, 24) +#define MAC_VLAN_MEMSET_2 GENMASK(19, 16) +#define MAC_VLAN_MEMSET_1 GENMASK(11, 8) +#define MAC_VLAN_MEMSET_0 GENMASK(3, 0) + +/* VLAN member config 1 */ +#define MAC_VLAN_MEMSET_5 GENMASK(11, 8) +#define MAC_VLAN_MEMSET_4 GENMASK(3, 0) + +/* Port ability */ +#define MAC_PORT_ABILITY_LINK_ST GENMASK(25, 24) + +/* CPU control */ +#define MAC_EN_SOC1_AGING BIT(15) +#define MAC_EN_SOC0_AGING BIT(14) +#define MAC_DIS_LRN_SOC1 BIT(13) +#define MAC_DIS_LRN_SOC0 BIT(12) +#define MAC_EN_CRC_SOC1 BIT(9) +#define MAC_EN_CRC_SOC0 BIT(8) +#define MAC_DIS_SOC1_CPU BIT(7) +#define MAC_DIS_SOC0_CPU BIT(6) +#define MAC_DIS_BC2CPU_P1 BIT(5) +#define MAC_DIS_BC2CPU_P0 BIT(4) +#define MAC_DIS_MC2CPU GENMASK(3, 2) +#define MAC_DIS_MC2CPU_P1 BIT(3) +#define MAC_DIS_MC2CPU_P0 BIT(2) +#define MAC_DIS_UN2CPU GENMASK(1, 0) + +/* Port control 0 */ +#define MAC_DIS_PORT GENMASK(25, 24) +#define MAC_DIS_PORT1 BIT(25) +#define MAC_DIS_PORT0 BIT(24) +#define MAC_DIS_RMC2CPU_P1 BIT(17) +#define MAC_DIS_RMC2CPU_P0 BIT(16) +#define MAC_EN_FLOW_CTL_P1 BIT(9) +#define MAC_EN_FLOW_CTL_P0 BIT(8) +#define MAC_EN_BACK_PRESS_P1 BIT(1) +#define MAC_EN_BACK_PRESS_P0 BIT(0) + +/* Port control 1 */ +#define MAC_DIS_SA_LRN_P1 BIT(9) +#define MAC_DIS_SA_LRN_P0 BIT(8) + +/* Port control 2 */ +#define MAC_EN_AGING_P1 BIT(9) +#define MAC_EN_AGING_P0 BIT(8) + +/* Switch Global control */ +#define MAC_RMC_TB_FAULT_RULE GENMASK(26, 25) +#define MAC_LED_FLASH_TIME GENMASK(24, 23) +#define MAC_BC_STORM_PREV GENMASK(5, 4) + +/* LED port 0 */ +#define MAC_LED_ACT_HI BIT(28) + +/* PHY control register 0 */ +#define MAC_CPU_PHY_WT_DATA GENMASK(31, 16) +#define MAC_CPU_PHY_CMD GENMASK(14, 13) +#define MAC_CPU_PHY_REG_ADDR GENMASK(12, 8) +#define MAC_CPU_PHY_ADDR GENMASK(4, 0) + +/* PHY control register 1 */ +#define MAC_CPU_PHY_RD_DATA GENMASK(31, 16) +#define MAC_PHY_RD_RDY BIT(1) +#define MAC_PHY_WT_DONE BIT(0) + +/* MAC force mode */ +#define MAC_EXT_PHY1_ADDR GENMASK(28, 24) +#define MAC_EXT_PHY0_ADDR GENMASK(20, 16) +#define MAC_FORCE_RMII_LINK GENMASK(9, 8) +#define MAC_FORCE_RMII_EN_1 BIT(7) +#define MAC_FORCE_RMII_EN_0 BIT(6) +#define MAC_FORCE_RMII_FC GENMASK(5, 4) +#define MAC_FORCE_RMII_DPX GENMASK(3, 2) +#define MAC_FORCE_RMII_SPD GENMASK(1, 0) + +/* CPU transmit trigger */ +#define MAC_TRIG_L_SOC0 BIT(1) +#define MAC_TRIG_H_SOC0 BIT(0) + +/* Config descriptor queue */ +#define TX_DESC_NUM 16 /* # of descriptors in TX queue */ +#define MAC_GUARD_DESC_NUM 2 /* # of descriptors of gap 0 */ +#define RX_QUEUE0_DESC_NUM 16 /* # of descriptors in RX queue 0 */ +#define RX_QUEUE1_DESC_NUM 16 /* # of descriptors in RX queue 1 */ +#define TX_DESC_QUEUE_NUM 1 /* # of TX queue */ +#define RX_DESC_QUEUE_NUM 2 /* # of RX queue */ + +#define MAC_RX_LEN_MAX 2047 /* Size of RX buffer */ + +/* Tx descriptor */ +/* cmd1 */ +#define TXD_OWN BIT(31) +#define TXD_ERR_CODE GENMASK(29, 26) +#define TXD_SOP BIT(25) /* start of a packet */ +#define TXD_EOP BIT(24) /* end of a packet */ +#define TXD_VLAN GENMASK(17, 12) +#define TXD_PKT_LEN GENMASK(10, 0) /* packet length */ +/* cmd2 */ +#define TXD_EOR BIT(31) /* end of ring */ +#define TXD_BUF_LEN2 GENMASK(22, 12) +#define TXD_BUF_LEN1 GENMASK(10, 0) + +/* Rx descriptor */ +/* cmd1 */ +#define RXD_OWN BIT(31) +#define RXD_ERR_CODE GENMASK(29, 26) +#define RXD_TCP_UDP_CHKSUM BIT(23) +#define RXD_PROXY BIT(22) +#define RXD_PROTOCOL GENMASK(21, 20) +#define RXD_VLAN_TAG BIT(19) +#define RXD_IP_CHKSUM BIT(18) +#define RXD_ROUTE_TYPE GENMASK(17, 16) +#define RXD_PKT_SP GENMASK(14, 12) /* packet source port */ +#define RXD_PKT_LEN GENMASK(10, 0) /* packet length */ +/* cmd2 */ +#define RXD_EOR BIT(31) /* end of ring */ +#define RXD_BUF_LEN2 GENMASK(22, 12) +#define RXD_BUF_LEN1 GENMASK(10, 0) + +/* structure of descriptor */ +struct spl2sw_mac_desc { + u32 cmd1; + u32 cmd2; + u32 addr1; + u32 addr2; +}; + +struct spl2sw_skb_info { + struct sk_buff *skb; + u32 mapping; + u32 len; +}; + +struct spl2sw_common { + void __iomem *l2sw_reg_base; + + struct platform_device *pdev; + struct reset_control *rstc; + struct clk *clk; + + void *desc_base; + dma_addr_t desc_dma; + s32 desc_size; + struct spl2sw_mac_desc *rx_desc[RX_DESC_QUEUE_NUM]; + struct spl2sw_skb_info *rx_skb_info[RX_DESC_QUEUE_NUM]; + u32 rx_pos[RX_DESC_QUEUE_NUM]; + u32 rx_desc_num[RX_DESC_QUEUE_NUM]; + u32 rx_desc_buff_size; + + struct spl2sw_mac_desc *tx_desc; + struct spl2sw_skb_info tx_temp_skb_info[TX_DESC_NUM]; + u32 tx_done_pos; + u32 tx_pos; + u32 tx_desc_full; + + struct net_device *ndev[MAX_NETDEV_NUM]; + struct mii_bus *mii_bus; + + struct napi_struct rx_napi; + struct napi_struct tx_napi; + + spinlock_t tx_lock; /* spinlock for accessing tx buffer */ + spinlock_t mdio_lock; /* spinlock for mdio commands */ + spinlock_t int_mask_lock; /* spinlock for accessing int mask reg. */ + + u8 enable; +}; + +struct spl2sw_mac { + struct net_device *ndev; + struct spl2sw_common *comm; + + u8 mac_addr[ETH_ALEN]; + phy_interface_t phy_mode; + struct device_node *phy_node; + + u8 lan_port; + u8 to_vlan; + u8 vlan_id; +}; + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_desc.c b/drivers/net/ethernet/sunplus/spl2sw_desc.c new file mode 100644 index 000000000000..3f0d9f78b37d --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_desc.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include + +#include "spl2sw_define.h" +#include "spl2sw_desc.h" + +void spl2sw_rx_descs_flush(struct spl2sw_common *comm) +{ + struct spl2sw_skb_info *rx_skbinfo; + struct spl2sw_mac_desc *rx_desc; + u32 i, j; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) { + rx_desc = comm->rx_desc[i]; + rx_skbinfo = comm->rx_skb_info[i]; + for (j = 0; j < comm->rx_desc_num[i]; j++) { + rx_desc[j].addr1 = rx_skbinfo[j].mapping; + rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ? + RXD_EOR | comm->rx_desc_buff_size : + comm->rx_desc_buff_size; + wmb(); /* Set RXD_OWN after other fields are ready. */ + rx_desc[j].cmd1 = RXD_OWN; + } + } +} + +void spl2sw_tx_descs_clean(struct spl2sw_common *comm) +{ + u32 i; + + if (!comm->tx_desc) + return; + + for (i = 0; i < TX_DESC_NUM; i++) { + comm->tx_desc[i].cmd1 = 0; + wmb(); /* Clear TXD_OWN and then set other fields. */ + comm->tx_desc[i].cmd2 = 0; + comm->tx_desc[i].addr1 = 0; + comm->tx_desc[i].addr2 = 0; + + if (comm->tx_temp_skb_info[i].mapping) { + dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping, + comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE); + comm->tx_temp_skb_info[i].mapping = 0; + } + + if (comm->tx_temp_skb_info[i].skb) { + dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb); + comm->tx_temp_skb_info[i].skb = NULL; + } + } +} + +void spl2sw_rx_descs_clean(struct spl2sw_common *comm) +{ + struct spl2sw_skb_info *rx_skbinfo; + struct spl2sw_mac_desc *rx_desc; + u32 i, j; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) { + if (!comm->rx_skb_info[i]) + continue; + + rx_desc = comm->rx_desc[i]; + rx_skbinfo = comm->rx_skb_info[i]; + for (j = 0; j < comm->rx_desc_num[i]; j++) { + rx_desc[j].cmd1 = 0; + wmb(); /* Clear RXD_OWN and then set other fields. */ + rx_desc[j].cmd2 = 0; + rx_desc[j].addr1 = 0; + + if (rx_skbinfo[j].skb) { + dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping, + comm->rx_desc_buff_size, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_skbinfo[j].skb); + rx_skbinfo[j].skb = NULL; + rx_skbinfo[j].mapping = 0; + } + } + + kfree(rx_skbinfo); + comm->rx_skb_info[i] = NULL; + } +} + +void spl2sw_descs_clean(struct spl2sw_common *comm) +{ + spl2sw_rx_descs_clean(comm); + spl2sw_tx_descs_clean(comm); +} + +void spl2sw_descs_free(struct spl2sw_common *comm) +{ + u32 i; + + spl2sw_descs_clean(comm); + comm->tx_desc = NULL; + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) + comm->rx_desc[i] = NULL; + + /* Free descriptor area */ + if (comm->desc_base) { + dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base, + comm->desc_dma); + comm->desc_base = NULL; + comm->desc_dma = 0; + comm->desc_size = 0; + } +} + +void spl2sw_tx_descs_init(struct spl2sw_common *comm) +{ + memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) * + (TX_DESC_NUM + MAC_GUARD_DESC_NUM)); +} + +int spl2sw_rx_descs_init(struct spl2sw_common *comm) +{ + struct spl2sw_skb_info *rx_skbinfo; + struct spl2sw_mac_desc *rx_desc; + struct sk_buff *skb; + u32 mapping; + u32 i, j; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) { + comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo), + GFP_KERNEL | GFP_DMA); + if (!comm->rx_skb_info[i]) + goto mem_alloc_fail; + + rx_skbinfo = comm->rx_skb_info[i]; + rx_desc = comm->rx_desc[i]; + for (j = 0; j < comm->rx_desc_num[i]; j++) { + skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size); + if (!skb) + goto mem_alloc_fail; + + rx_skbinfo[j].skb = skb; + mapping = dma_map_single(&comm->pdev->dev, skb->data, + comm->rx_desc_buff_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&comm->pdev->dev, mapping)) + goto mem_alloc_fail; + + rx_skbinfo[j].mapping = mapping; + rx_desc[j].addr1 = mapping; + rx_desc[j].addr2 = 0; + rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ? + RXD_EOR | comm->rx_desc_buff_size : + comm->rx_desc_buff_size; + wmb(); /* Set RXD_OWN after other fields are effective. */ + rx_desc[j].cmd1 = RXD_OWN; + } + } + + return 0; + +mem_alloc_fail: + spl2sw_rx_descs_clean(comm); + return -ENOMEM; +} + +int spl2sw_descs_alloc(struct spl2sw_common *comm) +{ + s32 desc_size; + u32 i; + + /* Alloc descriptor area */ + desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc); + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) + desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc); + + comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma, + GFP_KERNEL); + if (!comm->desc_base) + return -ENOMEM; + + comm->desc_size = desc_size; + + /* Setup Tx descriptor */ + comm->tx_desc = comm->desc_base; + + /* Setup Rx descriptor */ + comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM]; + for (i = 1; i < RX_DESC_QUEUE_NUM; i++) + comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1]; + + return 0; +} + +int spl2sw_descs_init(struct spl2sw_common *comm) +{ + u32 i, ret; + + /* Initialize rx descriptor's data */ + comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM; + comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) { + comm->rx_desc[i] = NULL; + comm->rx_skb_info[i] = NULL; + comm->rx_pos[i] = 0; + } + comm->rx_desc_buff_size = MAC_RX_LEN_MAX; + + /* Initialize tx descriptor's data */ + comm->tx_done_pos = 0; + comm->tx_desc = NULL; + comm->tx_pos = 0; + comm->tx_desc_full = 0; + for (i = 0; i < TX_DESC_NUM; i++) + comm->tx_temp_skb_info[i].skb = NULL; + + /* Allocate tx & rx descriptors. */ + ret = spl2sw_descs_alloc(comm); + if (ret) + return ret; + + spl2sw_tx_descs_init(comm); + + return spl2sw_rx_descs_init(comm); +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_desc.h b/drivers/net/ethernet/sunplus/spl2sw_desc.h new file mode 100644 index 000000000000..f04e2d85c3b3 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_desc.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_DESC_H__ +#define __SPL2SW_DESC_H__ + +void spl2sw_rx_descs_flush(struct spl2sw_common *comm); +void spl2sw_tx_descs_clean(struct spl2sw_common *comm); +void spl2sw_rx_descs_clean(struct spl2sw_common *comm); +void spl2sw_descs_clean(struct spl2sw_common *comm); +void spl2sw_descs_free(struct spl2sw_common *comm); +void spl2sw_tx_descs_init(struct spl2sw_common *comm); +int spl2sw_rx_descs_init(struct spl2sw_common *comm); +int spl2sw_descs_alloc(struct spl2sw_common *comm); +int spl2sw_descs_init(struct spl2sw_common *comm); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c new file mode 100644 index 000000000000..8320fa833d3e --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_desc.h" +#include "spl2sw_mdio.h" +#include "spl2sw_phy.h" +#include "spl2sw_int.h" +#include "spl2sw_mac.h" + +/* net device operations */ +static int spl2sw_ethernet_open(struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct spl2sw_common *comm = mac->comm; + u32 mask; + + netdev_dbg(ndev, "Open port = %x\n", mac->lan_port); + + comm->enable |= mac->lan_port; + + spl2sw_mac_hw_start(comm); + + /* Enable TX and RX interrupts */ + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask &= ~(MAC_INT_TX | MAC_INT_RX); + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + + phy_start(ndev->phydev); + + netif_start_queue(ndev); + + return 0; +} + +static int spl2sw_ethernet_stop(struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct spl2sw_common *comm = mac->comm; + + netif_stop_queue(ndev); + + comm->enable &= ~mac->lan_port; + + phy_stop(ndev->phydev); + + spl2sw_mac_hw_stop(comm); + + return 0; +} + +static int spl2sw_ethernet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct spl2sw_common *comm = mac->comm; + struct spl2sw_skb_info *skbinfo; + struct spl2sw_mac_desc *txdesc; + unsigned long flags; + u32 mapping; + u32 tx_pos; + u32 cmd1; + u32 cmd2; + + if (unlikely(comm->tx_desc_full == 1)) { + /* No TX descriptors left. Wait for tx interrupt. */ + netdev_dbg(ndev, "TX descriptor queue full when xmit!\n"); + return NETDEV_TX_BUSY; + } + + /* If skb size is shorter than ETH_ZLEN (60), pad it with 0. */ + if (unlikely(skb->len < ETH_ZLEN)) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + skb_put(skb, ETH_ZLEN - skb->len); + } + + mapping = dma_map_single(&comm->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&comm->pdev->dev, mapping)) { + ndev->stats.tx_errors++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + spin_lock_irqsave(&comm->tx_lock, flags); + + tx_pos = comm->tx_pos; + txdesc = &comm->tx_desc[tx_pos]; + skbinfo = &comm->tx_temp_skb_info[tx_pos]; + skbinfo->mapping = mapping; + skbinfo->len = skb->len; + skbinfo->skb = skb; + + /* Set up a TX descriptor */ + cmd1 = TXD_OWN | TXD_SOP | TXD_EOP | (mac->to_vlan << 12) | + (skb->len & TXD_PKT_LEN); + cmd2 = skb->len & TXD_BUF_LEN1; + + if (tx_pos == (TX_DESC_NUM - 1)) + cmd2 |= TXD_EOR; + + txdesc->addr1 = skbinfo->mapping; + txdesc->cmd2 = cmd2; + wmb(); /* Set TXD_OWN after other fields are effective. */ + txdesc->cmd1 = cmd1; + + /* Move tx_pos to next position */ + tx_pos = ((tx_pos + 1) == TX_DESC_NUM) ? 0 : tx_pos + 1; + + if (unlikely(tx_pos == comm->tx_done_pos)) { + netif_stop_queue(ndev); + comm->tx_desc_full = 1; + } + comm->tx_pos = tx_pos; + wmb(); /* make sure settings are effective. */ + + /* Trigger mac to transmit */ + writel(MAC_TRIG_L_SOC0, comm->l2sw_reg_base + L2SW_CPU_TX_TRIG); + + spin_unlock_irqrestore(&comm->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void spl2sw_ethernet_set_rx_mode(struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + + spl2sw_mac_rx_mode_set(mac); +} + +static int spl2sw_ethernet_set_mac_address(struct net_device *ndev, void *addr) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + int err; + + err = eth_mac_addr(ndev, addr); + if (err) + return err; + + /* Delete the old MAC address */ + netdev_dbg(ndev, "Old Ethernet (MAC) address = %pM\n", mac->mac_addr); + if (is_valid_ether_addr(mac->mac_addr)) { + err = spl2sw_mac_addr_del(mac); + if (err) + return err; + } + + /* Set the MAC address */ + ether_addr_copy(mac->mac_addr, ndev->dev_addr); + return spl2sw_mac_addr_add(mac); +} + +static void spl2sw_ethernet_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct spl2sw_common *comm = mac->comm; + unsigned long flags; + int i; + + netdev_err(ndev, "TX timed out!\n"); + ndev->stats.tx_errors++; + + spin_lock_irqsave(&comm->tx_lock, flags); + + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) + netif_stop_queue(comm->ndev[i]); + + spl2sw_mac_soft_reset(comm); + + /* Accept TX packets again. */ + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + netif_trans_update(comm->ndev[i]); + netif_wake_queue(comm->ndev[i]); + } + + spin_unlock_irqrestore(&comm->tx_lock, flags); +} + +static const struct net_device_ops netdev_ops = { + .ndo_open = spl2sw_ethernet_open, + .ndo_stop = spl2sw_ethernet_stop, + .ndo_start_xmit = spl2sw_ethernet_start_xmit, + .ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode, + .ndo_set_mac_address = spl2sw_ethernet_set_mac_address, + .ndo_do_ioctl = phy_do_ioctl, + .ndo_tx_timeout = spl2sw_ethernet_tx_timeout, +}; + +static void spl2sw_check_mac_vendor_id_and_convert(u8 *mac_addr) +{ + u8 tmp; + + /* Byte order of MAC address of some samples are reversed. + * Check vendor id and convert byte order if it is wrong. + * OUI of Sunplus: fc:4b:bc + */ + if (mac_addr[5] == 0xfc && mac_addr[4] == 0x4b && mac_addr[3] == 0xbc && + (mac_addr[0] != 0xfc || mac_addr[1] != 0x4b || mac_addr[2] != 0xbc)) { + /* Swap mac_addr[0] and mac_addr[5] */ + tmp = mac_addr[0]; + mac_addr[0] = mac_addr[5]; + mac_addr[5] = tmp; + + /* Swap mac_addr[1] and mac_addr[4] */ + tmp = mac_addr[1]; + mac_addr[1] = mac_addr[4]; + mac_addr[4] = tmp; + + /* Swap mac_addr[2] and mac_addr[3] */ + tmp = mac_addr[2]; + mac_addr[2] = mac_addr[3]; + mac_addr[3] = tmp; + } +} + +static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node *np, + void *addrbuf) +{ + struct nvmem_cell *cell; + ssize_t len; + u8 *mac; + + /* Get nvmem cell of mac-address from dts. */ + cell = of_nvmem_cell_get(np, "mac-address"); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + /* Read mac address from nvmem cell. */ + mac = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + if (IS_ERR(mac)) + return PTR_ERR(mac); + + if (len != ETH_ALEN) { + kfree(mac); + dev_info(dev, "Invalid length of mac address in nvmem!\n"); + return -EINVAL; + } + + /* Byte order of some samples are reversed. + * Convert byte order here. + */ + spl2sw_check_mac_vendor_id_and_convert(mac); + + /* Check if mac address is valid */ + if (!is_valid_ether_addr(mac)) { + kfree(mac); + dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac); + return -EINVAL; + } + + ether_addr_copy(addrbuf, mac); + kfree(mac); + return 0; +} + +static u32 spl2sw_init_netdev(struct platform_device *pdev, u8 *mac_addr, + struct net_device **r_ndev) +{ + struct net_device *ndev; + struct spl2sw_mac *mac; + int ret; + + /* Allocate the devices, and also allocate spl2sw_mac, + * we can get it by netdev_priv(). + */ + ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*mac)); + if (!ndev) { + *r_ndev = NULL; + return -ENOMEM; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + ndev->netdev_ops = &netdev_ops; + mac = netdev_priv(ndev); + mac->ndev = ndev; + ether_addr_copy(mac->mac_addr, mac_addr); + + eth_hw_addr_set(ndev, mac_addr); + dev_info(&pdev->dev, "Ethernet (MAC) address = %pM\n", mac_addr); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Failed to register net device \"%s\"!\n", + ndev->name); + free_netdev(ndev); + *r_ndev = NULL; + return ret; + } + netdev_dbg(ndev, "Registered net device \"%s\" successfully.\n", ndev->name); + + *r_ndev = ndev; + return 0; +} + +static struct device_node *spl2sw_get_eth_child_node(struct device_node *ether_np, int id) +{ + struct device_node *port_np; + int port_id; + + for_each_child_of_node(ether_np, port_np) { + /* It is not a 'port' node, continue. */ + if (strcmp(port_np->name, "port")) + continue; + + if (of_property_read_u32(port_np, "reg", &port_id) < 0) + continue; + + if (port_id == id) + return port_np; + } + + /* Not found! */ + return NULL; +} + +static int spl2sw_probe(struct platform_device *pdev) +{ + struct device_node *eth_ports_np; + struct device_node *port_np; + struct spl2sw_common *comm; + struct device_node *phy_np; + phy_interface_t phy_mode; + struct net_device *ndev; + struct spl2sw_mac *mac; + u8 mac_addr[ETH_ALEN]; + int irq, i, ret; + + if (platform_get_drvdata(pdev)) + return -ENODEV; + + /* Allocate memory for 'spl2sw_common' area. */ + comm = devm_kzalloc(&pdev->dev, sizeof(*comm), GFP_KERNEL); + if (!comm) + return -ENOMEM; + + comm->pdev = pdev; + platform_set_drvdata(pdev, comm); + + spin_lock_init(&comm->tx_lock); + spin_lock_init(&comm->mdio_lock); + spin_lock_init(&comm->int_mask_lock); + + /* Get memory resource 0 from dts. */ + comm->l2sw_reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(comm->l2sw_reg_base)) + return PTR_ERR(comm->l2sw_reg_base); + + /* Get irq resource from dts. */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + irq = ret; + + /* Get clock controller. */ + comm->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(comm->clk)) { + dev_err_probe(&pdev->dev, PTR_ERR(comm->clk), + "Failed to retrieve clock controller!\n"); + return PTR_ERR(comm->clk); + } + + /* Get reset controller. */ + comm->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(comm->rstc)) { + dev_err_probe(&pdev->dev, PTR_ERR(comm->rstc), + "Failed to retrieve reset controller!\n"); + return PTR_ERR(comm->rstc); + } + + /* Enable clock. */ + ret = clk_prepare_enable(comm->clk); + if (ret) + return ret; + udelay(1); + + /* Reset MAC */ + reset_control_assert(comm->rstc); + udelay(1); + reset_control_deassert(comm->rstc); + usleep_range(1000, 2000); + + /* Request irq. */ + ret = devm_request_irq(&pdev->dev, irq, spl2sw_ethernet_interrupt, 0, + dev_name(&pdev->dev), comm); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq #%d!\n", irq); + goto out_clk_disable; + } + + /* Initialize TX and RX descriptors. */ + ret = spl2sw_descs_init(comm); + if (ret) { + dev_err(&pdev->dev, "Fail to initialize mac descriptors!\n"); + spl2sw_descs_free(comm); + goto out_clk_disable; + } + + /* Initialize MAC. */ + spl2sw_mac_init(comm); + + /* Initialize mdio bus */ + ret = spl2sw_mdio_init(comm); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize mdio bus!\n"); + goto out_clk_disable; + } + + /* Get child node ethernet-ports. */ + eth_ports_np = of_get_child_by_name(pdev->dev.of_node, "ethernet-ports"); + if (!eth_ports_np) { + dev_err(&pdev->dev, "No ethernet-ports child node found!\n"); + ret = -ENODEV; + goto out_free_mdio; + } + + for (i = 0; i < MAX_NETDEV_NUM; i++) { + /* Get port@i of node ethernet-ports. */ + port_np = spl2sw_get_eth_child_node(eth_ports_np, i); + if (!port_np) + continue; + + /* Get phy-mode. */ + if (of_get_phy_mode(port_np, &phy_mode)) { + dev_err(&pdev->dev, "Failed to get phy-mode property of port@%d!\n", + i); + continue; + } + + /* Get phy-handle. */ + phy_np = of_parse_phandle(port_np, "phy-handle", 0); + if (!phy_np) { + dev_err(&pdev->dev, "Failed to get phy-handle property of port@%d!\n", + i); + continue; + } + + /* Get mac-address from nvmem. */ + ret = spl2sw_nvmem_get_mac_address(&pdev->dev, port_np, mac_addr); + if (ret == -EPROBE_DEFER) { + goto out_unregister_dev; + } else if (ret) { + dev_info(&pdev->dev, "Generate a random mac address!\n"); + eth_random_addr(mac_addr); + } + + /* Initialize the net device. */ + ret = spl2sw_init_netdev(pdev, mac_addr, &ndev); + if (ret) + goto out_unregister_dev; + + ndev->irq = irq; + comm->ndev[i] = ndev; + mac = netdev_priv(ndev); + mac->phy_node = phy_np; + mac->phy_mode = phy_mode; + mac->comm = comm; + + mac->lan_port = 0x1 << i; /* forward to port i */ + mac->to_vlan = 0x1 << i; /* vlan group: i */ + mac->vlan_id = i; /* vlan group: i */ + + /* Set MAC address */ + ret = spl2sw_mac_addr_add(mac); + if (ret) + goto out_unregister_dev; + + spl2sw_mac_rx_mode_set(mac); + } + + /* Find first valid net device. */ + for (i = 0; i < MAX_NETDEV_NUM; i++) { + if (comm->ndev[i]) + break; + } + if (i >= MAX_NETDEV_NUM) { + dev_err(&pdev->dev, "No valid ethernet port!\n"); + ret = -ENODEV; + goto out_free_mdio; + } + + /* Save first valid net device */ + ndev = comm->ndev[i]; + + ret = spl2sw_phy_connect(comm); + if (ret) { + netdev_err(ndev, "Failed to connect phy!\n"); + goto out_unregister_dev; + } + + /* Add and enable napi. */ + netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll, NAPI_POLL_WEIGHT); + napi_enable(&comm->rx_napi); + netif_napi_add(ndev, &comm->tx_napi, spl2sw_tx_poll, NAPI_POLL_WEIGHT); + napi_enable(&comm->tx_napi); + return 0; + +out_unregister_dev: + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) + unregister_netdev(comm->ndev[i]); + +out_free_mdio: + spl2sw_mdio_remove(comm); + +out_clk_disable: + clk_disable_unprepare(comm->clk); + return ret; +} + +static int spl2sw_remove(struct platform_device *pdev) +{ + struct spl2sw_common *comm; + int i; + + comm = platform_get_drvdata(pdev); + + spl2sw_phy_remove(comm); + + /* Unregister and free net device. */ + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) + unregister_netdev(comm->ndev[i]); + + comm->enable = 0; + spl2sw_mac_hw_stop(comm); + spl2sw_descs_free(comm); + + /* Disable and delete napi. */ + napi_disable(&comm->rx_napi); + netif_napi_del(&comm->rx_napi); + napi_disable(&comm->tx_napi); + netif_napi_del(&comm->tx_napi); + + spl2sw_mdio_remove(comm); + + clk_disable_unprepare(comm->clk); + + return 0; +} + +static const struct of_device_id spl2sw_of_match[] = { + {.compatible = "sunplus,sp7021-emac"}, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, spl2sw_of_match); + +static struct platform_driver spl2sw_driver = { + .probe = spl2sw_probe, + .remove = spl2sw_remove, + .driver = { + .name = "sp7021_emac", + .owner = THIS_MODULE, + .of_match_table = spl2sw_of_match, + }, +}; + +module_platform_driver(spl2sw_driver); + +MODULE_AUTHOR("Wells Lu "); +MODULE_DESCRIPTION("Sunplus Dual 10M/100M Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.c b/drivers/net/ethernet/sunplus/spl2sw_int.c new file mode 100644 index 000000000000..95ce096d1543 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_int.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_int.h" + +int spl2sw_rx_poll(struct napi_struct *napi, int budget) +{ + struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi); + struct spl2sw_mac_desc *desc, *h_desc; + struct net_device_stats *stats; + struct sk_buff *skb, *new_skb; + struct spl2sw_skb_info *sinfo; + int budget_left = budget; + unsigned long flags; + u32 rx_pos, pkg_len; + u32 num, rx_count; + s32 queue; + u32 mask; + int port; + u32 cmd; + + /* Process high-priority queue and then low-priority queue. */ + for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) { + rx_pos = comm->rx_pos[queue]; + rx_count = comm->rx_desc_num[queue]; + + for (num = 0; num < rx_count && budget_left; num++) { + sinfo = comm->rx_skb_info[queue] + rx_pos; + desc = comm->rx_desc[queue] + rx_pos; + cmd = desc->cmd1; + + if (cmd & RXD_OWN) + break; + + port = FIELD_GET(RXD_PKT_SP, cmd); + if (port < MAX_NETDEV_NUM && comm->ndev[port]) + stats = &comm->ndev[port]->stats; + else + goto spl2sw_rx_poll_rec_err; + + pkg_len = FIELD_GET(RXD_PKT_LEN, cmd); + if (unlikely((cmd & RXD_ERR_CODE) || pkg_len < ETH_ZLEN + 4)) { + stats->rx_length_errors++; + stats->rx_dropped++; + goto spl2sw_rx_poll_rec_err; + } + + dma_unmap_single(&comm->pdev->dev, sinfo->mapping, + comm->rx_desc_buff_size, DMA_FROM_DEVICE); + + skb = sinfo->skb; + skb_put(skb, pkg_len - 4); /* Minus FCS */ + skb->ip_summed = CHECKSUM_NONE; + skb->protocol = eth_type_trans(skb, comm->ndev[port]); + netif_receive_skb(skb); + + stats->rx_packets++; + stats->rx_bytes += skb->len; + + /* Allocate a new skb for receiving. */ + new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size); + if (unlikely(!new_skb)) { + desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? + RXD_EOR : 0; + sinfo->skb = NULL; + sinfo->mapping = 0; + desc->addr1 = 0; + goto spl2sw_rx_poll_alloc_err; + } + + sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data, + comm->rx_desc_buff_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) { + dev_kfree_skb_irq(new_skb); + desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? + RXD_EOR : 0; + sinfo->skb = NULL; + sinfo->mapping = 0; + desc->addr1 = 0; + goto spl2sw_rx_poll_alloc_err; + } + + sinfo->skb = new_skb; + desc->addr1 = sinfo->mapping; + +spl2sw_rx_poll_rec_err: + desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? + RXD_EOR | comm->rx_desc_buff_size : + comm->rx_desc_buff_size; + + wmb(); /* Set RXD_OWN after other fields are effective. */ + desc->cmd1 = RXD_OWN; + +spl2sw_rx_poll_alloc_err: + /* Move rx_pos to next position */ + rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1; + + budget_left--; + + /* If there are packets in high-priority queue, + * stop processing low-priority queue. + */ + if (queue == 1 && !(h_desc->cmd1 & RXD_OWN)) + break; + } + + comm->rx_pos[queue] = rx_pos; + + /* Save pointer to last rx descriptor of high-priority queue. */ + if (queue == 0) + h_desc = comm->rx_desc[queue] + rx_pos; + } + + spin_lock_irqsave(&comm->int_mask_lock, flags); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask &= ~MAC_INT_RX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock_irqrestore(&comm->int_mask_lock, flags); + + napi_complete(napi); + return budget - budget_left; +} + +int spl2sw_tx_poll(struct napi_struct *napi, int budget) +{ + struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi); + struct spl2sw_skb_info *skbinfo; + struct net_device_stats *stats; + int budget_left = budget; + unsigned long flags; + u32 tx_done_pos; + u32 mask; + u32 cmd; + int i; + + spin_lock(&comm->tx_lock); + + tx_done_pos = comm->tx_done_pos; + while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) { + cmd = comm->tx_desc[tx_done_pos].cmd1; + if (cmd & TXD_OWN) + break; + + skbinfo = &comm->tx_temp_skb_info[tx_done_pos]; + if (unlikely(!skbinfo->skb)) + goto spl2sw_tx_poll_next; + + i = ffs(FIELD_GET(TXD_VLAN, cmd)) - 1; + if (i < MAX_NETDEV_NUM && comm->ndev[i]) + stats = &comm->ndev[i]->stats; + else + goto spl2sw_tx_poll_unmap; + + if (unlikely(cmd & (TXD_ERR_CODE))) { + stats->tx_errors++; + } else { + stats->tx_packets++; + stats->tx_bytes += skbinfo->len; + } + +spl2sw_tx_poll_unmap: + dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len, + DMA_TO_DEVICE); + skbinfo->mapping = 0; + dev_kfree_skb_irq(skbinfo->skb); + skbinfo->skb = NULL; + +spl2sw_tx_poll_next: + /* Move tx_done_pos to next position */ + tx_done_pos = ((tx_done_pos + 1) == TX_DESC_NUM) ? 0 : tx_done_pos + 1; + + if (comm->tx_desc_full == 1) + comm->tx_desc_full = 0; + + budget_left--; + } + + comm->tx_done_pos = tx_done_pos; + if (!comm->tx_desc_full) + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) + if (netif_queue_stopped(comm->ndev[i])) + netif_wake_queue(comm->ndev[i]); + + spin_unlock(&comm->tx_lock); + + spin_lock_irqsave(&comm->int_mask_lock, flags); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask &= ~MAC_INT_TX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock_irqrestore(&comm->int_mask_lock, flags); + + napi_complete(napi); + return budget - budget_left; +} + +irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id) +{ + struct spl2sw_common *comm = (struct spl2sw_common *)dev_id; + u32 status; + u32 mask; + int i; + + status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); + if (unlikely(!status)) { + dev_dbg(&comm->pdev->dev, "Interrput status is null!\n"); + goto spl2sw_ethernet_int_out; + } + writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); + + if (status & MAC_INT_RX) { + /* Disable RX interrupts. */ + spin_lock(&comm->int_mask_lock); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask |= MAC_INT_RX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock(&comm->int_mask_lock); + + if (unlikely(status & MAC_INT_RX_DES_ERR)) { + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + comm->ndev[i]->stats.rx_fifo_errors++; + break; + } + dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n"); + } + + napi_schedule(&comm->rx_napi); + } + + if (status & MAC_INT_TX) { + /* Disable TX interrupts. */ + spin_lock(&comm->int_mask_lock); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask |= MAC_INT_TX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock(&comm->int_mask_lock); + + if (unlikely(status & MAC_INT_TX_DES_ERR)) { + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + comm->ndev[i]->stats.tx_fifo_errors++; + break; + } + dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n"); + + spin_lock(&comm->int_mask_lock); + mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + mask &= ~MAC_INT_TX; + writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + spin_unlock(&comm->int_mask_lock); + } else { + napi_schedule(&comm->tx_napi); + } + } + +spl2sw_ethernet_int_out: + return IRQ_HANDLED; +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.h b/drivers/net/ethernet/sunplus/spl2sw_int.h new file mode 100644 index 000000000000..64f6f2572fe3 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_int.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_INT_H__ +#define __SPL2SW_INT_H__ + +int spl2sw_rx_poll(struct napi_struct *napi, int budget); +int spl2sw_tx_poll(struct napi_struct *napi, int budget); +irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_mac.c b/drivers/net/ethernet/sunplus/spl2sw_mac.c new file mode 100644 index 000000000000..57e431dfc467 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_mac.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_desc.h" +#include "spl2sw_mac.h" + +void spl2sw_mac_hw_stop(struct spl2sw_common *comm) +{ + u32 reg; + + if (comm->enable == 0) { + /* Mask and clear all interrupts. */ + writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); + writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); + + /* Disable cpu 0 and cpu 1. */ + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU; + writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL); + } + + /* Disable LAN ports. */ + reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0); + reg |= FIELD_PREP(MAC_DIS_PORT, ~comm->enable); + writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0); +} + +void spl2sw_mac_hw_start(struct spl2sw_common *comm) +{ + u32 reg; + + /* Enable cpu port 0 (6) & CRC padding (8) */ + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + reg &= ~MAC_DIS_SOC0_CPU; + reg |= MAC_EN_CRC_SOC0; + writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL); + + /* Enable port 0 & port 1 */ + reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0); + reg &= FIELD_PREP(MAC_DIS_PORT, ~comm->enable) | ~MAC_DIS_PORT; + writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0); +} + +int spl2sw_mac_addr_add(struct spl2sw_mac *mac) +{ + struct spl2sw_common *comm = mac->comm; + u32 reg; + int ret; + + /* Write 6-octet MAC address. */ + writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8), + comm->l2sw_reg_base + L2SW_W_MAC_15_0); + writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) + + (mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24), + comm->l2sw_reg_base + L2SW_W_MAC_47_16); + + /* Set learn port = cpu_port, aging = 1 */ + reg = MAC_W_CPU_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) | + FIELD_PREP(MAC_W_AGE, 1) | MAC_W_MAC_CMD; + writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0); + + /* Wait for completing. */ + ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true, + comm->l2sw_reg_base + L2SW_WT_MAC_AD0); + if (ret) { + netdev_err(mac->ndev, "Failed to add address to table!\n"); + return ret; + } + + netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n", + readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0), + (u32)FIELD_GET(MAC_W_MAC_47_16, + readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)), + (u32)FIELD_GET(MAC_W_MAC_15_0, + readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0))); + return 0; +} + +int spl2sw_mac_addr_del(struct spl2sw_mac *mac) +{ + struct spl2sw_common *comm = mac->comm; + u32 reg; + int ret; + + /* Write 6-octet MAC address. */ + writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8), + comm->l2sw_reg_base + L2SW_W_MAC_15_0); + writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) + + (mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24), + comm->l2sw_reg_base + L2SW_W_MAC_47_16); + + /* Set learn port = lan_port0 and aging = 0 + * to wipe (age) out the entry. + */ + reg = MAC_W_LAN_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) | MAC_W_MAC_CMD; + writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0); + + /* Wait for completing. */ + ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true, + comm->l2sw_reg_base + L2SW_WT_MAC_AD0); + if (ret) { + netdev_err(mac->ndev, "Failed to delete address from table!\n"); + return ret; + } + + netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n", + readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0), + (u32)FIELD_GET(MAC_W_MAC_47_16, + readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)), + (u32)FIELD_GET(MAC_W_MAC_15_0, + readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0))); + return 0; +} + +void spl2sw_mac_hw_init(struct spl2sw_common *comm) +{ + u32 reg; + + /* Disable cpu0 and cpu 1 port. */ + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU; + writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL); + + /* Set base addresses of TX and RX queues. */ + writel(comm->desc_dma, comm->l2sw_reg_base + L2SW_TX_LBASE_ADDR_0); + writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * TX_DESC_NUM, + comm->l2sw_reg_base + L2SW_TX_HBASE_ADDR_0); + writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM + + MAC_GUARD_DESC_NUM), comm->l2sw_reg_base + L2SW_RX_HBASE_ADDR_0); + writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM + + MAC_GUARD_DESC_NUM + RX_QUEUE0_DESC_NUM), + comm->l2sw_reg_base + L2SW_RX_LBASE_ADDR_0); + + /* Fc_rls_th=0x4a, Fc_set_th=0x3a, Drop_rls_th=0x2d, Drop_set_th=0x1d */ + writel(0x4a3a2d1d, comm->l2sw_reg_base + L2SW_FL_CNTL_TH); + + /* Cpu_rls_th=0x4a, Cpu_set_th=0x3a, Cpu_th=0x12, Port_th=0x12 */ + writel(0x4a3a1212, comm->l2sw_reg_base + L2SW_CPU_FL_CNTL_TH); + + /* mtcc_lmt=0xf, Pri_th_l=6, Pri_th_h=6, weigh_8x_en=1 */ + writel(0xf6680000, comm->l2sw_reg_base + L2SW_PRI_FL_CNTL); + + /* High-active LED */ + reg = readl(comm->l2sw_reg_base + L2SW_LED_PORT0); + reg |= MAC_LED_ACT_HI; + writel(reg, comm->l2sw_reg_base + L2SW_LED_PORT0); + + /* Disable aging of cpu port 0 & 1. + * Disable SA learning of cpu port 0 & 1. + * Enable UC and MC packets + */ + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + reg &= ~(MAC_EN_SOC1_AGING | MAC_EN_SOC0_AGING | + MAC_DIS_BC2CPU_P1 | MAC_DIS_BC2CPU_P0 | + MAC_DIS_MC2CPU_P1 | MAC_DIS_MC2CPU_P0); + reg |= MAC_DIS_LRN_SOC1 | MAC_DIS_LRN_SOC0; + writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL); + + /* Enable RMC2CPU for port 0 & 1 + * Enable Flow control for port 0 & 1 + * Enable Back pressure for port 0 & 1 + */ + reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0); + reg &= ~(MAC_DIS_RMC2CPU_P1 | MAC_DIS_RMC2CPU_P0); + reg |= MAC_EN_FLOW_CTL_P1 | MAC_EN_FLOW_CTL_P0 | + MAC_EN_BACK_PRESS_P1 | MAC_EN_BACK_PRESS_P0; + writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0); + + /* Disable LAN port SA learning. */ + reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL1); + reg |= MAC_DIS_SA_LRN_P1 | MAC_DIS_SA_LRN_P0; + writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL1); + + /* Enable rmii force mode and + * set both external phy-address to 31. + */ + reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + reg &= ~(MAC_EXT_PHY1_ADDR | MAC_EXT_PHY0_ADDR); + reg |= FIELD_PREP(MAC_EXT_PHY1_ADDR, 31) | FIELD_PREP(MAC_EXT_PHY0_ADDR, 31); + reg |= MAC_FORCE_RMII_EN_1 | MAC_FORCE_RMII_EN_0; + writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + + /* Port 0: VLAN group 0 + * Port 1: VLAN group 1 + */ + reg = FIELD_PREP(MAC_P1_PVID, 1) | FIELD_PREP(MAC_P0_PVID, 0); + writel(reg, comm->l2sw_reg_base + L2SW_PVID_CONFIG0); + + /* VLAN group 0: cpu0 (bit3) + port0 (bit0) = 1001 = 0x9 + * VLAN group 1: cpu0 (bit3) + port1 (bit1) = 1010 = 0xa + */ + reg = FIELD_PREP(MAC_VLAN_MEMSET_1, 0xa) | FIELD_PREP(MAC_VLAN_MEMSET_0, 9); + writel(reg, comm->l2sw_reg_base + L2SW_VLAN_MEMSET_CONFIG0); + + /* RMC forward: to_cpu (1) + * LED: 60mS (1) + * BC storm prev: 31 BC (1) + */ + reg = readl(comm->l2sw_reg_base + L2SW_SW_GLB_CNTL); + reg &= ~(MAC_RMC_TB_FAULT_RULE | MAC_LED_FLASH_TIME | MAC_BC_STORM_PREV); + reg |= FIELD_PREP(MAC_RMC_TB_FAULT_RULE, 1) | + FIELD_PREP(MAC_LED_FLASH_TIME, 1) | + FIELD_PREP(MAC_BC_STORM_PREV, 1); + writel(reg, comm->l2sw_reg_base + L2SW_SW_GLB_CNTL); + + writel(MAC_INT_MASK_DEF, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); +} + +void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac) +{ + struct spl2sw_common *comm = mac->comm; + struct net_device *ndev = mac->ndev; + u32 mask, reg, rx_mode; + + netdev_dbg(ndev, "ndev->flags = %08x\n", ndev->flags); + mask = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) | + FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port); + reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL); + + if (ndev->flags & IFF_PROMISC) { + /* Allow MC and unknown UC packets */ + rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) | + FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port); + } else if ((!netdev_mc_empty(ndev) && (ndev->flags & IFF_MULTICAST)) || + (ndev->flags & IFF_ALLMULTI)) { + /* Allow MC packets */ + rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port); + } else { + /* Disable MC and unknown UC packets */ + rx_mode = 0; + } + + writel((reg & (~mask)) | ((~rx_mode) & mask), comm->l2sw_reg_base + L2SW_CPU_CNTL); + netdev_dbg(ndev, "cpu_cntl = %08x\n", readl(comm->l2sw_reg_base + L2SW_CPU_CNTL)); +} + +void spl2sw_mac_init(struct spl2sw_common *comm) +{ + u32 i; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) + comm->rx_pos[i] = 0; + mb(); /* make sure settings are effective. */ + + spl2sw_mac_hw_init(comm); +} + +void spl2sw_mac_soft_reset(struct spl2sw_common *comm) +{ + u32 i; + + spl2sw_mac_hw_stop(comm); + + spl2sw_rx_descs_flush(comm); + comm->tx_pos = 0; + comm->tx_done_pos = 0; + comm->tx_desc_full = 0; + + for (i = 0; i < RX_DESC_QUEUE_NUM; i++) + comm->rx_pos[i] = 0; + mb(); /* make sure settings are effective. */ + + spl2sw_mac_hw_init(comm); + spl2sw_mac_hw_start(comm); +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_mac.h b/drivers/net/ethernet/sunplus/spl2sw_mac.h new file mode 100644 index 000000000000..41a929b3a380 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_mac.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_MAC_H__ +#define __SPL2SW_MAC_H__ + +void spl2sw_mac_hw_stop(struct spl2sw_common *comm); +void spl2sw_mac_hw_start(struct spl2sw_common *comm); +int spl2sw_mac_addr_add(struct spl2sw_mac *mac); +int spl2sw_mac_addr_del(struct spl2sw_mac *mac); +void spl2sw_mac_hw_init(struct spl2sw_common *comm); +void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac); +void spl2sw_mac_init(struct spl2sw_common *comm); +void spl2sw_mac_soft_reset(struct spl2sw_common *comm); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.c b/drivers/net/ethernet/sunplus/spl2sw_mdio.c new file mode 100644 index 000000000000..139ac8f2685e --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_mdio.h" + +#define SPL2SW_MDIO_READ_CMD 0x02 +#define SPL2SW_MDIO_WRITE_CMD 0x01 + +static int spl2sw_mdio_access(struct spl2sw_common *comm, u8 cmd, u8 addr, u8 regnum, u16 wdata) +{ + u32 reg, reg2; + u32 val; + int ret; + + /* Note that addr (of phy) should match either ext_phy0_addr + * or ext_phy1_addr, or mdio commands won't be sent out. + */ + reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + reg &= ~MAC_EXT_PHY0_ADDR; + reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, addr); + + reg2 = FIELD_PREP(MAC_CPU_PHY_WT_DATA, wdata) | FIELD_PREP(MAC_CPU_PHY_CMD, cmd) | + FIELD_PREP(MAC_CPU_PHY_REG_ADDR, regnum) | FIELD_PREP(MAC_CPU_PHY_ADDR, addr); + + /* Set ext_phy0_addr and then issue mdio command. + * No interrupt is allowed in between. + */ + spin_lock_irq(&comm->mdio_lock); + writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + writel(reg2, comm->l2sw_reg_base + L2SW_PHY_CNTL_REG0); + spin_unlock_irq(&comm->mdio_lock); + + ret = read_poll_timeout(readl, val, val & cmd, 1, 1000, true, + comm->l2sw_reg_base + L2SW_PHY_CNTL_REG1); + + /* Set ext_phy0_addr back to 31 to prevent + * from sending mdio command to phy by + * hardware auto-mdio function. + */ + reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + reg &= ~MAC_EXT_PHY0_ADDR; + reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, 31); + writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + + if (ret == 0) + return val >> 16; + else + return ret; +} + +static int spl2sw_mii_read(struct mii_bus *bus, int addr, int regnum) +{ + struct spl2sw_common *comm = bus->priv; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + return spl2sw_mdio_access(comm, SPL2SW_MDIO_READ_CMD, addr, regnum, 0); +} + +static int spl2sw_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct spl2sw_common *comm = bus->priv; + int ret; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + ret = spl2sw_mdio_access(comm, SPL2SW_MDIO_WRITE_CMD, addr, regnum, val); + if (ret < 0) + return ret; + + return 0; +} + +u32 spl2sw_mdio_init(struct spl2sw_common *comm) +{ + struct device_node *mdio_np; + struct mii_bus *mii_bus; + int ret; + + /* Get mdio child node. */ + mdio_np = of_get_child_by_name(comm->pdev->dev.of_node, "mdio"); + if (!mdio_np) { + dev_err(&comm->pdev->dev, "No mdio child node found!\n"); + return -ENODEV; + } + + /* Allocate and register mdio bus. */ + mii_bus = devm_mdiobus_alloc(&comm->pdev->dev); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "sunplus_mii_bus"; + mii_bus->parent = &comm->pdev->dev; + mii_bus->priv = comm; + mii_bus->read = spl2sw_mii_read; + mii_bus->write = spl2sw_mii_write; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&comm->pdev->dev)); + + ret = of_mdiobus_register(mii_bus, mdio_np); + if (ret) { + dev_err(&comm->pdev->dev, "Failed to register mdiobus!\n"); + return ret; + } + + comm->mii_bus = mii_bus; + return ret; +} + +void spl2sw_mdio_remove(struct spl2sw_common *comm) +{ + if (comm->mii_bus) { + mdiobus_unregister(comm->mii_bus); + comm->mii_bus = NULL; + } +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.h b/drivers/net/ethernet/sunplus/spl2sw_mdio.h new file mode 100644 index 000000000000..8a24c9caeb6f --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_MDIO_H__ +#define __SPL2SW_MDIO_H__ + +u32 spl2sw_mdio_init(struct spl2sw_common *comm); +void spl2sw_mdio_remove(struct spl2sw_common *comm); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_phy.c b/drivers/net/ethernet/sunplus/spl2sw_phy.c new file mode 100644 index 000000000000..404f508a54d4 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_phy.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include + +#include "spl2sw_register.h" +#include "spl2sw_define.h" +#include "spl2sw_phy.h" + +static void spl2sw_mii_link_change(struct net_device *ndev) +{ + struct spl2sw_mac *mac = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + struct spl2sw_common *comm = mac->comm; + u32 reg; + + reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + + if (phydev->link) { + reg |= FIELD_PREP(MAC_FORCE_RMII_LINK, mac->lan_port); + + if (phydev->speed == 100) { + reg |= FIELD_PREP(MAC_FORCE_RMII_SPD, mac->lan_port); + } else { + reg &= FIELD_PREP(MAC_FORCE_RMII_SPD, ~mac->lan_port) | + ~MAC_FORCE_RMII_SPD; + } + + if (phydev->duplex) { + reg |= FIELD_PREP(MAC_FORCE_RMII_DPX, mac->lan_port); + } else { + reg &= FIELD_PREP(MAC_FORCE_RMII_DPX, ~mac->lan_port) | + ~MAC_FORCE_RMII_DPX; + } + + if (phydev->pause) { + reg |= FIELD_PREP(MAC_FORCE_RMII_FC, mac->lan_port); + } else { + reg &= FIELD_PREP(MAC_FORCE_RMII_FC, ~mac->lan_port) | + ~MAC_FORCE_RMII_FC; + } + } else { + reg &= FIELD_PREP(MAC_FORCE_RMII_LINK, ~mac->lan_port) | + ~MAC_FORCE_RMII_LINK; + } + + writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE); + + phy_print_status(phydev); +} + +int spl2sw_phy_connect(struct spl2sw_common *comm) +{ + struct phy_device *phydev; + struct net_device *ndev; + struct spl2sw_mac *mac; + int i; + + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + ndev = comm->ndev[i]; + mac = netdev_priv(ndev); + phydev = of_phy_connect(ndev, mac->phy_node, spl2sw_mii_link_change, + 0, mac->phy_mode); + if (!phydev) + return -ENODEV; + + phy_support_asym_pause(phydev); + phy_attached_info(phydev); + } + + return 0; +} + +void spl2sw_phy_remove(struct spl2sw_common *comm) +{ + struct net_device *ndev; + int i; + + for (i = 0; i < MAX_NETDEV_NUM; i++) + if (comm->ndev[i]) { + ndev = comm->ndev[i]; + if (ndev) { + phy_disconnect(ndev->phydev); + ndev->phydev = NULL; + } + } +} diff --git a/drivers/net/ethernet/sunplus/spl2sw_phy.h b/drivers/net/ethernet/sunplus/spl2sw_phy.h new file mode 100644 index 000000000000..3d051a2548c8 --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_phy.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_PHY_H__ +#define __SPL2SW_PHY_H__ + +int spl2sw_phy_connect(struct spl2sw_common *comm); +void spl2sw_phy_remove(struct spl2sw_common *comm); + +#endif diff --git a/drivers/net/ethernet/sunplus/spl2sw_register.h b/drivers/net/ethernet/sunplus/spl2sw_register.h new file mode 100644 index 000000000000..9718e2ee2b6c --- /dev/null +++ b/drivers/net/ethernet/sunplus/spl2sw_register.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright Sunplus Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __SPL2SW_REGISTER_H__ +#define __SPL2SW_REGISTER_H__ + +/* Register L2SW */ +#define L2SW_SW_INT_STATUS_0 0x0 +#define L2SW_SW_INT_MASK_0 0x4 +#define L2SW_FL_CNTL_TH 0x8 +#define L2SW_CPU_FL_CNTL_TH 0xc +#define L2SW_PRI_FL_CNTL 0x10 +#define L2SW_VLAN_PRI_TH 0x14 +#define L2SW_EN_TOS_BUS 0x18 +#define L2SW_TOS_MAP0 0x1c +#define L2SW_TOS_MAP1 0x20 +#define L2SW_TOS_MAP2 0x24 +#define L2SW_TOS_MAP3 0x28 +#define L2SW_TOS_MAP4 0x2c +#define L2SW_TOS_MAP5 0x30 +#define L2SW_TOS_MAP6 0x34 +#define L2SW_TOS_MAP7 0x38 +#define L2SW_GLOBAL_QUE_STATUS 0x3c +#define L2SW_ADDR_TBL_SRCH 0x40 +#define L2SW_ADDR_TBL_ST 0x44 +#define L2SW_MAC_AD_SER0 0x48 +#define L2SW_MAC_AD_SER1 0x4c +#define L2SW_WT_MAC_AD0 0x50 +#define L2SW_W_MAC_15_0 0x54 +#define L2SW_W_MAC_47_16 0x58 +#define L2SW_PVID_CONFIG0 0x5c +#define L2SW_PVID_CONFIG1 0x60 +#define L2SW_VLAN_MEMSET_CONFIG0 0x64 +#define L2SW_VLAN_MEMSET_CONFIG1 0x68 +#define L2SW_PORT_ABILITY 0x6c +#define L2SW_PORT_ST 0x70 +#define L2SW_CPU_CNTL 0x74 +#define L2SW_PORT_CNTL0 0x78 +#define L2SW_PORT_CNTL1 0x7c +#define L2SW_PORT_CNTL2 0x80 +#define L2SW_SW_GLB_CNTL 0x84 +#define L2SW_L2SW_SW_RESET 0x88 +#define L2SW_LED_PORT0 0x8c +#define L2SW_LED_PORT1 0x90 +#define L2SW_LED_PORT2 0x94 +#define L2SW_LED_PORT3 0x98 +#define L2SW_LED_PORT4 0x9c +#define L2SW_WATCH_DOG_TRIG_RST 0xa0 +#define L2SW_WATCH_DOG_STOP_CPU 0xa4 +#define L2SW_PHY_CNTL_REG0 0xa8 +#define L2SW_PHY_CNTL_REG1 0xac +#define L2SW_MAC_FORCE_MODE 0xb0 +#define L2SW_VLAN_GROUP_CONFIG0 0xb4 +#define L2SW_VLAN_GROUP_CONFIG1 0xb8 +#define L2SW_FLOW_CTRL_TH3 0xbc +#define L2SW_QUEUE_STATUS_0 0xc0 +#define L2SW_DEBUG_CNTL 0xc4 +#define L2SW_RESERVED_1 0xc8 +#define L2SW_MEM_TEST_INFO 0xcc +#define L2SW_SW_INT_STATUS_1 0xd0 +#define L2SW_SW_INT_MASK_1 0xd4 +#define L2SW_SW_GLOBAL_SIGNAL 0xd8 + +#define L2SW_CPU_TX_TRIG 0x208 +#define L2SW_TX_HBASE_ADDR_0 0x20c +#define L2SW_TX_LBASE_ADDR_0 0x210 +#define L2SW_RX_HBASE_ADDR_0 0x214 +#define L2SW_RX_LBASE_ADDR_0 0x218 +#define L2SW_TX_HW_ADDR_0 0x21c +#define L2SW_TX_LW_ADDR_0 0x220 +#define L2SW_RX_HW_ADDR_0 0x224 +#define L2SW_RX_LW_ADDR_0 0x228 +#define L2SW_CPU_PORT_CNTL_REG_0 0x22c +#define L2SW_TX_HBASE_ADDR_1 0x230 +#define L2SW_TX_LBASE_ADDR_1 0x234 +#define L2SW_RX_HBASE_ADDR_1 0x238 +#define L2SW_RX_LBASE_ADDR_1 0x23c +#define L2SW_TX_HW_ADDR_1 0x240 +#define L2SW_TX_LW_ADDR_1 0x244 +#define L2SW_RX_HW_ADDR_1 0x248 +#define L2SW_RX_LW_ADDR_1 0x24c +#define L2SW_CPU_PORT_CNTL_REG_1 0x250 + +#endif -- cgit v1.2.3 From 0f84a156aa3b9c9889c64a31d36b533508fabcb7 Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Mon, 9 May 2022 14:57:31 +0300 Subject: ath11k: Handle keepalive during WoWLAN suspend and resume With WoWLAN enabled and after sleeping for a rather long time, we are seeing that with some APs, it is not able to wake up the STA though the correct wake up pattern has been configured. This is because the host doesn't send keepalive command to firmware, thus firmware will not send any packet to the AP and after a specific time the AP kicks out the STA. Fix this issue by enabling keepalive before going to suspend and disabling it after resume back. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506012540.1579604-1-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/mac.c | 31 +++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/mac.h | 4 +++ drivers/net/wireless/ath/ath11k/wmi.c | 41 +++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.h | 46 +++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wow.c | 34 ++++++++++++++++++++++++++ 5 files changed, 156 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 9b28fdac3c94..7fdc39b7205e 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -9035,3 +9035,34 @@ void ath11k_mac_destroy(struct ath11k_base *ab) pdev->ar = NULL; } } + +int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif, + enum wmi_sta_keepalive_method method, + u32 interval) +{ + struct ath11k *ar = arvif->ar; + struct wmi_sta_keepalive_arg arg = {}; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map)) + return 0; + + arg.vdev_id = arvif->vdev_id; + arg.enabled = 1; + arg.method = method; + arg.interval = interval; + + ret = ath11k_wmi_sta_keepalive(ar, &arg); + if (ret) { + ath11k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 7f93e3a9ca23..57ebfc592b00 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -8,6 +8,7 @@ #include #include +#include "wmi.h" struct ath11k; struct ath11k_base; @@ -173,4 +174,7 @@ void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb); void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id); void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif); int ath11k_mac_wait_tx_complete(struct ath11k *ar); +int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif, + enum wmi_sta_keepalive_method method, + u32 interval); #endif diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 1410114d1d5c..63463b6c63be 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -8959,3 +8959,44 @@ int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar) return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); } + +int ath11k_wmi_sta_keepalive(struct ath11k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_sta_keepalive_cmd *cmd; + struct wmi_sta_keepalive_arp_resp *arp; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd) + sizeof(*arp); + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_keepalive_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_STA_KEEPALIVE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = arg->vdev_id; + cmd->enabled = arg->enabled; + cmd->interval = arg->interval; + cmd->method = arg->method; + + if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || + arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { + arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1); + arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_STA_KEEPALVE_ARP_RESPONSE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); + arp->src_ip4_addr = arg->src_ip4_addr; + arp->dest_ip4_addr = arg->dest_ip4_addr; + ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", + arg->vdev_id, arg->enabled, arg->method, arg->interval); + + return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); +} diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 7600e9a52da8..b1fad4707dc6 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -5907,6 +5907,50 @@ struct wmi_pdev_set_geo_table_cmd { u32 rsvd_len; } __packed; +struct wmi_sta_keepalive_cmd { + u32 tlv_header; + u32 vdev_id; + u32 enabled; + + /* WMI_STA_KEEPALIVE_METHOD_ */ + u32 method; + + /* in seconds */ + u32 interval; + + /* following this structure is the TLV for struct + * wmi_sta_keepalive_arp_resp + */ +} __packed; + +struct wmi_sta_keepalive_arp_resp { + u32 tlv_header; + u32 src_ip4_addr; + u32 dest_ip4_addr; + struct wmi_mac_addr dest_mac_addr; +} __packed; + +struct wmi_sta_keepalive_arg { + u32 vdev_id; + u32 enabled; + u32 method; + u32 interval; + u32 src_ip4_addr; + u32 dest_ip4_addr; + const u8 dest_mac_addr[ETH_ALEN]; +}; + +enum wmi_sta_keepalive_method { + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1, + WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2, + WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3, + WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4, + WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5, +}; + +#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 +#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 + int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id); struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); @@ -6087,5 +6131,7 @@ int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, struct ath11k_vif *arvif); int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val); int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar); +int ath11k_wmi_sta_keepalive(struct ath11k *ar, + const struct wmi_sta_keepalive_arg *arg); #endif diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 9d088cebef03..b3e65cd13d83 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -640,6 +640,24 @@ static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable) return 0; } +static int ath11k_wow_set_keepalive(struct ath11k *ar, + enum wmi_sta_keepalive_method method, + u32 interval) +{ + struct ath11k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath11k_mac_vif_set_keepalive(arvif, method, interval); + if (ret) + return ret; + } + + return 0; +} + int ath11k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { @@ -691,6 +709,14 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw, goto cleanup; } + ret = ath11k_wow_set_keepalive(ar, + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, + WMI_STA_KEEPALIVE_INTERVAL_DEFAULT); + if (ret) { + ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret); + goto cleanup; + } + ret = ath11k_wow_enable(ar->ab); if (ret) { ath11k_warn(ar->ab, "failed to start wow: %d\n", ret); @@ -786,6 +812,14 @@ int ath11k_wow_op_resume(struct ieee80211_hw *hw) goto exit; } + ret = ath11k_wow_set_keepalive(ar, + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, + WMI_STA_KEEPALIVE_INTERVAL_DISABLE); + if (ret) { + ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret); + goto exit; + } + exit: if (ret) { switch (ar->state) { -- cgit v1.2.3 From 3a5627b94222c3abc7e65486e2d2c0cc0a35c140 Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Mon, 9 May 2022 14:57:31 +0300 Subject: ath11k: Implement remain-on-channel support Add remain on channel support, it is needed in several scenarios such as Passpoint etc. Currently this is supported by QCA6390, WCN6855, IPQ8074, IPQ6018 and QCN9074. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506013614.1580274-2-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 1 + drivers/net/wireless/ath/ath11k/mac.c | 115 +++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.c | 4 ++ 3 files changed, 120 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 236215aa3867..6b6535437485 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1624,6 +1624,7 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab) complete(&ar->completed_11d_scan); complete(&ar->scan.started); complete(&ar->scan.completed); + complete(&ar->scan.on_channel); complete(&ar->peer_assoc_done); complete(&ar->peer_delete_done); complete(&ar->install_key_done); diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 7fdc39b7205e..a8ddbdcfac16 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -8354,6 +8354,118 @@ exit: return ret; } +static int ath11k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath11k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + ar->scan.roc_notify = false; + spin_unlock_bh(&ar->data_lock); + + ath11k_scan_abort(ar); + + mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); + + return 0; +} + +static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel *chan, + int duration, + enum ieee80211_roc_type type) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct scan_req_params arg; + int ret; + u32 scan_time_msec; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ATH11K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + reinit_completion(&ar->scan.on_channel); + ar->scan.state = ATH11K_SCAN_STARTING; + ar->scan.is_roc = true; + ar->scan.vdev_id = arvif->vdev_id; + ar->scan.roc_freq = chan->center_freq; + ar->scan.roc_notify = true; + ret = 0; + break; + case ATH11K_SCAN_STARTING: + case ATH11K_SCAN_RUNNING: + case ATH11K_SCAN_ABORTING: + ret = -EBUSY; + break; + } + spin_unlock_bh(&ar->data_lock); + + if (ret) + goto exit; + + scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; + + memset(&arg, 0, sizeof(arg)); + ath11k_wmi_start_scan_init(ar, &arg); + arg.num_chan = 1; + arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), + GFP_KERNEL); + if (!arg.chan_list) { + ret = -ENOMEM; + goto exit; + } + + arg.vdev_id = arvif->vdev_id; + arg.scan_id = ATH11K_SCAN_ID; + arg.chan_list[0] = chan->center_freq; + arg.dwell_time_active = scan_time_msec; + arg.dwell_time_passive = scan_time_msec; + arg.max_scan_time = scan_time_msec; + arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE; + arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ; + arg.burst_duration = duration; + + ret = ath11k_start_scan(ar, &arg); + if (ret) { + ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret); + + spin_lock_bh(&ar->data_lock); + ar->scan.state = ATH11K_SCAN_IDLE; + spin_unlock_bh(&ar->data_lock); + goto free_chan_list; + } + + ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); + if (ret == 0) { + ath11k_warn(ar->ab, "failed to switch to channel for roc scan\n"); + ret = ath11k_scan_stop(ar); + if (ret) + ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret); + ret = -ETIMEDOUT; + goto free_chan_list; + } + + ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, + msecs_to_jiffies(duration)); + + ret = 0; + +free_chan_list: + kfree(arg.chan_list); +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + static const struct ieee80211_ops ath11k_ops = { .tx = ath11k_mac_op_tx, .start = ath11k_mac_op_start, @@ -8406,6 +8518,8 @@ static const struct ieee80211_ops ath11k_ops = { #endif .set_sar_specs = ath11k_mac_op_set_bios_sar_specs, + .remain_on_channel = ath11k_mac_op_remain_on_channel, + .cancel_remain_on_channel = ath11k_mac_op_cancel_remain_on_channel, }; static void ath11k_mac_update_ch_list(struct ath11k *ar, @@ -8995,6 +9109,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab) init_completion(&ar->bss_survey_done); init_completion(&ar->scan.started); init_completion(&ar->scan.completed); + init_completion(&ar->scan.on_channel); init_completion(&ar->thermal.wmi_sync); INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 63463b6c63be..0eb2e5ef45d2 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -5264,6 +5264,8 @@ static void ath11k_wmi_event_scan_started(struct ath11k *ar) break; case ATH11K_SCAN_STARTING: ar->scan.state = ATH11K_SCAN_RUNNING; + if (ar->scan.is_roc) + ieee80211_ready_on_channel(ar->hw); complete(&ar->scan.started); break; } @@ -5346,6 +5348,8 @@ static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq) case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + if (ar->scan.is_roc && ar->scan.roc_freq == freq) + complete(&ar->scan.on_channel); break; } } -- cgit v1.2.3 From 355333a217541916576351446b5832fec7930566 Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Mon, 9 May 2022 14:57:31 +0300 Subject: ath11k: Don't check arvif->is_started before sending management frames Commit 66307ca04057 ("ath11k: fix mgmt_tx_wmi cmd sent to FW for deleted vdev") wants both of below two conditions are true before sending management frames: 1: ar->allocated_vdev_map & (1LL << arvif->vdev_id) 2: arvif->is_started Actually the second one is not necessary because with the first one we can make sure the vdev is present. Also use ar->conf_mutex to synchronize vdev delete and mgmt. TX. This issue is found in case of Passpoint scenario where ath11k needs to send action frames before vdev is started. Fix it by removing the second condition. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Fixes: 66307ca04057 ("ath11k: fix mgmt_tx_wmi cmd sent to FW for deleted vdev") Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506013614.1580274-3-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/mac.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index a8ddbdcfac16..73de3619101f 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -5551,8 +5551,8 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) } arvif = ath11k_vif_to_arvif(skb_cb->vif); - if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) && - arvif->is_started) { + mutex_lock(&ar->conf_mutex); + if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); if (ret) { ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", @@ -5570,6 +5570,7 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) arvif->is_started); ath11k_mgmt_over_wmi_tx_drop(ar, skb); } + mutex_unlock(&ar->conf_mutex); } } -- cgit v1.2.3 From 1d7f514577f0ccf3e5f5736247138868fb62896a Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Mon, 9 May 2022 14:57:32 +0300 Subject: ath11k: Designating channel frequency when sending management frames In case of Passpoint, the WLAN interface may be requested to remain on a specific channel and then to send some management frames on that channel. Now chanfreq of wmi_mgmt_send_cmd is set as 0, as a result firmware may choose a default but wrong channel. Fix it by assigning chanfreq field with the designated channel. This change only applies to WCN6855 and QCA6390, other chips are not affected. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-01720.1-QCAHSPSWPL_V1_V2_SILICONZ_LITE-1 Signed-off-by: Baochen Qiang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506013614.1580274-4-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 7 +++++++ drivers/net/wireless/ath/ath11k/hw.h | 1 + drivers/net/wireless/ath/ath11k/wmi.c | 17 ++++++++++++++++- 3 files changed, 24 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 6b6535437485..26f7bdd1241a 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -110,6 +110,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dp_window_idx = 0, .ce_window_idx = 0, .fixed_fw_mem = false, + .support_off_channel_tx = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -185,6 +186,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dp_window_idx = 0, .ce_window_idx = 0, .fixed_fw_mem = false, + .support_off_channel_tx = false, }, { .name = "qca6390 hw2.0", @@ -259,6 +261,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dp_window_idx = 0, .ce_window_idx = 0, .fixed_fw_mem = false, + .support_off_channel_tx = true, }, { .name = "qcn9074 hw1.0", @@ -333,6 +336,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dp_window_idx = 3, .ce_window_idx = 2, .fixed_fw_mem = false, + .support_off_channel_tx = false, }, { .name = "wcn6855 hw2.0", @@ -407,6 +411,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dp_window_idx = 0, .ce_window_idx = 0, .fixed_fw_mem = false, + .support_off_channel_tx = true, }, { .name = "wcn6855 hw2.1", @@ -480,6 +485,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dp_window_idx = 0, .ce_window_idx = 0, .fixed_fw_mem = false, + .support_off_channel_tx = true, }, { .name = "wcn6750 hw1.0", @@ -553,6 +559,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .dp_window_idx = 1, .ce_window_idx = 2, .fixed_fw_mem = true, + .support_off_channel_tx = false, }, }; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 3a2abde63489..77dc5c851c9b 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -204,6 +204,7 @@ struct ath11k_hw_params { u8 dp_window_idx; u8 ce_window_idx; bool fixed_fw_mem; + bool support_off_channel_tx; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 0eb2e5ef45d2..84d1c7054013 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -625,10 +625,25 @@ struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len) return skb; } +static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar, + struct ieee80211_tx_info *info) +{ + struct ath11k_base *ab = ar->ab; + u32 freq = 0; + + if (ab->hw_params.support_off_channel_tx && + ar->scan.is_roc && + (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) + freq = ar->scan.roc_freq; + + return freq; +} + int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, struct sk_buff *frame) { struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); struct wmi_mgmt_send_cmd *cmd; struct wmi_tlv *frame_tlv; struct sk_buff *skb; @@ -649,7 +664,7 @@ int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->desc_id = buf_id; - cmd->chanfreq = 0; + cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info); cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->frame_len = frame->len; -- cgit v1.2.3 From 4255a07a98cb0054947fbe5cb3d6f0b5eb87522b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 9 May 2022 14:57:32 +0300 Subject: wil6210: remove 'freq' debugfs This is completely racy, remove it. Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506095451.9852305a92c8.I05155824a1b9059eb59beccde81786dc69de354a@changeid --- drivers/net/wireless/ath/wil6210/debugfs.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 4c944e595978..64d6c98174c8 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1391,19 +1391,6 @@ static int temp_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(temp); -/*---------freq------------*/ -static int freq_show(struct seq_file *s, void *data) -{ - struct wil6210_priv *wil = s->private; - struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; - u32 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0; - - seq_printf(s, "Freq = %d\n", freq); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(freq); - /*---------link------------*/ static int link_show(struct seq_file *s, void *data) { @@ -2380,7 +2367,6 @@ static const struct { {"pmcdata", 0444, &fops_pmcdata}, {"pmcring", 0444, &fops_pmcring}, {"temp", 0444, &temp_fops}, - {"freq", 0444, &freq_fops}, {"link", 0444, &link_fops}, {"info", 0444, &info_fops}, {"recovery", 0644, &fops_recovery}, -- cgit v1.2.3 From 5962f370ce416371b432325a8f98680f73a1bfdc Mon Sep 17 00:00:00 2001 From: Anilkumar Kolli Date: Mon, 9 May 2022 14:57:32 +0300 Subject: ath11k: Reuse the available memory after firmware reload Ath11k allocates memory when firmware requests memory in QMI. Coldboot calibration and firmware recovery uses firmware reload. On firmware reload, firmware sends memory request again. If Ath11k allocates memory on first firmware boot, reuse the available memory. Also check if the segment type and size is same on the next firmware boot. Reuse if segment type/size is same as previous firmware boot else free the segment and allocate the segment with size/type. Tested-on: QCN9074 hw1.0 PCI WLAN.HK.2.6.0.1-00752-QCAHKSWPL_SILICONZ-1 Signed-off-by: Anilkumar Kolli Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506141448.10340-1-quic_akolli@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 1 - drivers/net/wireless/ath/ath11k/qmi.c | 24 +++++++++++++++++++++--- drivers/net/wireless/ath/ath11k/qmi.h | 2 ++ 3 files changed, 23 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 26f7bdd1241a..1e98ff9ff288 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1777,7 +1777,6 @@ static void ath11k_core_reset(struct work_struct *work) ATH11K_RECOVER_START_TIMEOUT_HZ); ath11k_hif_power_down(ab); - ath11k_qmi_free_resource(ab); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n"); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index d1e945074bc1..61ead37a944a 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1970,6 +1970,21 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) for (i = 0; i < ab->qmi.mem_seg_count; i++) { chunk = &ab->qmi.target_mem[i]; + + /* Firmware reloads in coldboot/firmware recovery. + * in such case, no need to allocate memory for FW again. + */ + if (chunk->vaddr) { + if (chunk->prev_type == chunk->type || + chunk->prev_size == chunk->size) + continue; + + /* cannot reuse the existing chunk */ + dma_free_coherent(ab->dev, chunk->size, + chunk->vaddr, chunk->paddr); + chunk->vaddr = NULL; + } + chunk->vaddr = dma_alloc_coherent(ab->dev, chunk->size, &chunk->paddr, @@ -1990,6 +2005,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) chunk->type); return -EINVAL; } + chunk->prev_type = chunk->type; + chunk->prev_size = chunk->size; } return 0; @@ -2466,9 +2483,6 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab) char path[100]; int ret; - if (m3_mem->vaddr || m3_mem->size) - return 0; - fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE); if (IS_ERR(fw)) { ret = PTR_ERR(fw); @@ -2478,6 +2492,9 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab) return ret; } + if (m3_mem->vaddr || m3_mem->size) + goto skip_m3_alloc; + m3_mem->vaddr = dma_alloc_coherent(ab->dev, fw->size, &m3_mem->paddr, GFP_KERNEL); @@ -2488,6 +2505,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab) return -ENOMEM; } +skip_m3_alloc: memcpy(m3_mem->vaddr, fw->data, fw->size); m3_mem->size = fw->size; release_firmware(fw); diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index c24e6995cca3..c83cf822be81 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -97,6 +97,8 @@ struct ath11k_qmi_event_msg { struct target_mem_chunk { u32 size; u32 type; + u32 prev_size; + u32 prev_type; dma_addr_t paddr; u32 *vaddr; void __iomem *iaddr; -- cgit v1.2.3 From 25c321e8534e9efe1869b548e7912faffed1f5be Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 9 May 2022 14:57:32 +0300 Subject: ath11k: remove redundant assignment to variables vht_mcs and he_mcs The variables vht_mcs and he_mcs are being initialized in the start of for-loops however they are re-assigned new values in the loop and not used outside the loop. The initializations are redundant and can be removed. Cleans up clang scan warnings: warning: Although the value stored to 'vht_mcs' is used in the enclosing expression, the value is never actually read from 'vht_mcs' [deadcode.DeadStores] warning: Although the value stored to 'he_mcs' is used in the enclosing expression, the value is never actually read from 'he_mcs' [deadcode.DeadStores] Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220507184155.26939-1-colin.i.king@gmail.com --- drivers/net/wireless/ath/ath11k/mac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 73de3619101f..ee1590b16eff 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1951,7 +1951,7 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar, /* Calculate peer NSS capability from VHT capabilities if STA * supports VHT. */ - for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { + for (i = 0, max_nss = 0; i < NL80211_VHT_NSS_MAX; i++) { vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> (2 * i) & 3; @@ -2272,7 +2272,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, /* Calculate peer NSS capability from HE capabilities if STA * supports HE. */ - for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) { + for (i = 0, max_nss = 0; i < NL80211_HE_NSS_MAX; i++) { he_mcs = he_tx_mcs >> (2 * i) & 3; /* In case of fixed rates, MCS Range in he_tx_mcs might have -- cgit v1.2.3 From d098538ed4e8a6c09f86cf243f406c1451066040 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 21 Mar 2022 08:05:10 +0200 Subject: igc: Remove igc_set_spd_dplx method igc_set_spd_dplx method is not used. This patch comes to tidy up the driver code. Reported-by: Muhammad Husaini Zulkifli Signed-off-by: Sasha Neftin Tested-by: Naama Meir Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc.h | 1 - drivers/net/ethernet/intel/igc/igc_main.c | 50 ------------------------------- 2 files changed, 51 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 3e386c38d016..1e7e7071f64d 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -264,7 +264,6 @@ int igc_reinit_queues(struct igc_adapter *adapter); void igc_write_rss_indir_tbl(struct igc_adapter *adapter); bool igc_has_link(struct igc_adapter *adapter); void igc_reset(struct igc_adapter *adapter); -int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); void igc_update_stats(struct igc_adapter *adapter); void igc_disable_rx_ring(struct igc_ring *ring); void igc_enable_rx_ring(struct igc_ring *ring); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 74b2c590ed5d..ae17af44fe02 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6187,56 +6187,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) return value; } -int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) -{ - struct igc_mac_info *mac = &adapter->hw.mac; - - mac->autoneg = false; - - /* Make sure dplx is at most 1 bit and lsb of speed is not set - * for the switch() below to work - */ - if ((spd & 1) || (dplx & ~1)) - goto err_inval; - - switch (spd + dplx) { - case SPEED_10 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_10_HALF; - break; - case SPEED_10 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_10_FULL; - break; - case SPEED_100 + DUPLEX_HALF: - mac->forced_speed_duplex = ADVERTISE_100_HALF; - break; - case SPEED_100 + DUPLEX_FULL: - mac->forced_speed_duplex = ADVERTISE_100_FULL; - break; - case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = true; - adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; - break; - case SPEED_1000 + DUPLEX_HALF: /* not supported */ - goto err_inval; - case SPEED_2500 + DUPLEX_FULL: - mac->autoneg = true; - adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; - break; - case SPEED_2500 + DUPLEX_HALF: /* not supported */ - default: - goto err_inval; - } - - /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ - adapter->hw.phy.mdix = AUTO_ALL_MODES; - - return 0; - -err_inval: - netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n"); - return -EINVAL; -} - /** * igc_probe - Device Initialization Routine * @pdev: PCI device information struct -- cgit v1.2.3 From 7241069f7a0715f85ec868cf807446a6113b84fe Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Sat, 23 Apr 2022 19:53:21 +0300 Subject: igc: Remove unused phy_type enum Complete to commit 8e153faf5827 ("igc: Remove unused phy type") i225 parts have only one PHY. There is no point to use phy_type enum. Clean up the code accordingly, and get rid of the unused enum lines. Signed-off-by: Sasha Neftin Tested-by: Naama Meir Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_base.c | 2 -- drivers/net/ethernet/intel/igc/igc_hw.h | 7 ------- drivers/net/ethernet/intel/igc/igc_phy.c | 12 +++--------- 3 files changed, 3 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index f068b66b8025..a15927e77272 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -182,8 +182,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) igc_check_for_copper_link(hw); - phy->type = igc_phy_i225; - out: return ret_val; } diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index b1e72ec5f131..360644f33d5f 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -53,11 +53,6 @@ enum igc_mac_type { igc_num_macs /* List is 1-based, so subtract 1 for true count. */ }; -enum igc_phy_type { - igc_phy_unknown = 0, - igc_phy_i225, -}; - enum igc_media_type { igc_media_type_unknown = 0, igc_media_type_copper = 1, @@ -138,8 +133,6 @@ struct igc_nvm_info { struct igc_phy_info { struct igc_phy_operations ops; - enum igc_phy_type type; - u32 addr; u32 id; u32 reset_delay_us; /* in usec */ diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 6961f65d36b9..2140ad1e8443 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -148,17 +148,11 @@ void igc_power_down_phy_copper(struct igc_hw *hw) s32 igc_check_downshift(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; - s32 ret_val; - switch (phy->type) { - case igc_phy_i225: - default: - /* speed downshift not supported */ - phy->speed_downgraded = false; - ret_val = 0; - } + /* speed downshift not supported */ + phy->speed_downgraded = false; - return ret_val; + return 0; } /** -- cgit v1.2.3 From 95073d08154a27b1d0a84bcf6210e67c3b4d8c08 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Sat, 23 Apr 2022 19:55:02 +0300 Subject: igc: Change type of the 'igc_check_downshift' method The 'igc_check_downshift' method always returns 0; there is no need for a return value so change the type of this method to void. Signed-off-by: Sasha Neftin Tested-by: Naama Meir Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_phy.c | 6 +----- drivers/net/ethernet/intel/igc/igc_phy.h | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 2140ad1e8443..53b77c969c85 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -141,18 +141,14 @@ void igc_power_down_phy_copper(struct igc_hw *hw) * igc_check_downshift - Checks whether a downshift in speed occurred * @hw: pointer to the HW structure * - * Success returns 0, Failure returns 1 - * * A downshift is detected by querying the PHY link health. */ -s32 igc_check_downshift(struct igc_hw *hw) +void igc_check_downshift(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; /* speed downshift not supported */ phy->speed_downgraded = false; - - return 0; } /** diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h index 1b031372d206..832a7e359f18 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.h +++ b/drivers/net/ethernet/intel/igc/igc_phy.h @@ -11,7 +11,7 @@ s32 igc_phy_hw_reset(struct igc_hw *hw); s32 igc_get_phy_id(struct igc_hw *hw); s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations, u32 usec_interval, bool *success); -s32 igc_check_downshift(struct igc_hw *hw); +void igc_check_downshift(struct igc_hw *hw); s32 igc_setup_copper_link(struct igc_hw *hw); void igc_power_up_phy_copper(struct igc_hw *hw); void igc_power_down_phy_copper(struct igc_hw *hw); -- cgit v1.2.3 From 61004d1d4badb5ba31d57d49003c17cfcdfed027 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Tue, 10 May 2022 09:48:45 +0200 Subject: nfp: flower: fix 'variable 'flow6' set but not used' Kernel test robot reported an issue after a recent patch about an unused variable when CONFIG_IPV6 is disabled. Move the variable declaration to be inside the #ifdef, and do a bit more cleanup. There is no need to use a temporary ipv6 bool value, it is just checked once, remove the extra variable and just do the check directly. Fixes: 9d5447ed44b5 ("nfp: flower: fixup ipv6/ipv4 route lookup for neigh events") Reported-by: kernel test robot Signed-off-by: Louis Peens Signed-off-by: Simon Horman Link: https://lore.kernel.org/r/20220510074845.41457-1-simon.horman@corigine.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/netronome/nfp/flower/tunnel_conf.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 9c37ed6943d0..6bf3ec448e7e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -563,12 +563,9 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, { struct nfp_flower_priv *app_priv; struct netevent_redirect *redir; - struct flowi4 flow4 = {}; - struct flowi6 flow6 = {}; struct neighbour *n; struct nfp_app *app; bool neigh_invalid; - bool ipv6 = false; int err; switch (event) { @@ -583,16 +580,8 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, return NOTIFY_DONE; } - if (n->tbl->family == AF_INET6) - ipv6 = true; - neigh_invalid = !(n->nud_state & NUD_VALID) || n->dead; - if (ipv6) - flow6.daddr = *(struct in6_addr *)n->primary_key; - else - flow4.daddr = *(__be32 *)n->primary_key; - app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app = app_priv->app; @@ -601,8 +590,11 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, return NOTIFY_DONE; #if IS_ENABLED(CONFIG_INET) - if (ipv6) { + if (n->tbl->family == AF_INET6) { #if IS_ENABLED(CONFIG_IPV6) + struct flowi6 flow6 = {}; + + flow6.daddr = *(struct in6_addr *)n->primary_key; if (!neigh_invalid) { struct dst_entry *dst; /* Use ipv6_dst_lookup_flow to populate flow6->saddr @@ -623,6 +615,9 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, return NOTIFY_DONE; #endif /* CONFIG_IPV6 */ } else { + struct flowi4 flow4 = {}; + + flow4.daddr = *(__be32 *)n->primary_key; if (!neigh_invalid) { struct rtable *rt; /* Use ip_route_output_key to populate flow4->saddr and -- cgit v1.2.3 From 36ff6393292d3b7b3b02007bd7d9a353524ed225 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:31:18 +0100 Subject: sfc: Move Siena specific files Files are only moved, no changes are made. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/farch.c | 2988 -------------------------- drivers/net/ethernet/sfc/siena.c | 1109 ---------- drivers/net/ethernet/sfc/siena/farch.c | 2988 ++++++++++++++++++++++++++ drivers/net/ethernet/sfc/siena/siena.c | 1109 ++++++++++ drivers/net/ethernet/sfc/siena/siena_sriov.c | 1686 +++++++++++++++ drivers/net/ethernet/sfc/siena/siena_sriov.h | 76 + drivers/net/ethernet/sfc/siena_sriov.c | 1686 --------------- drivers/net/ethernet/sfc/siena_sriov.h | 76 - 8 files changed, 5859 insertions(+), 5859 deletions(-) delete mode 100644 drivers/net/ethernet/sfc/farch.c delete mode 100644 drivers/net/ethernet/sfc/siena.c create mode 100644 drivers/net/ethernet/sfc/siena/farch.c create mode 100644 drivers/net/ethernet/sfc/siena/siena.c create mode 100644 drivers/net/ethernet/sfc/siena/siena_sriov.c create mode 100644 drivers/net/ethernet/sfc/siena/siena_sriov.h delete mode 100644 drivers/net/ethernet/sfc/siena_sriov.c delete mode 100644 drivers/net/ethernet/sfc/siena_sriov.h (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c deleted file mode 100644 index 9599123bc28d..000000000000 --- a/drivers/net/ethernet/sfc/farch.c +++ /dev/null @@ -1,2988 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/**************************************************************************** - * Driver for Solarflare network controllers and boards - * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2013 Solarflare Communications Inc. - */ - -#include -#include -#include -#include -#include -#include -#include -#include "net_driver.h" -#include "bitfield.h" -#include "efx.h" -#include "rx_common.h" -#include "tx_common.h" -#include "nic.h" -#include "farch_regs.h" -#include "sriov.h" -#include "siena_sriov.h" -#include "io.h" -#include "workarounds.h" - -/* Falcon-architecture (SFC9000-family) support */ - -/************************************************************************** - * - * Configurable values - * - ************************************************************************** - */ - -/* This is set to 16 for a good reason. In summary, if larger than - * 16, the descriptor cache holds more than a default socket - * buffer's worth of packets (for UDP we can only have at most one - * socket buffer's worth outstanding). This combined with the fact - * that we only get 1 TX event per descriptor cache means the NIC - * goes idle. - */ -#define TX_DC_ENTRIES 16 -#define TX_DC_ENTRIES_ORDER 1 - -#define RX_DC_ENTRIES 64 -#define RX_DC_ENTRIES_ORDER 3 - -/* If EFX_MAX_INT_ERRORS internal errors occur within - * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and - * disable it. - */ -#define EFX_INT_ERROR_EXPIRE 3600 -#define EFX_MAX_INT_ERRORS 5 - -/* Depth of RX flush request fifo */ -#define EFX_RX_FLUSH_COUNT 4 - -/* Driver generated events */ -#define _EFX_CHANNEL_MAGIC_TEST 0x000101 -#define _EFX_CHANNEL_MAGIC_FILL 0x000102 -#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 -#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 - -#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) -#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) - -#define EFX_CHANNEL_MAGIC_TEST(_channel) \ - _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) -#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ - _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ - efx_rx_queue_index(_rx_queue)) -#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ - _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ - efx_rx_queue_index(_rx_queue)) -#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ - _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ - (_tx_queue)->queue) - -static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); - -/************************************************************************** - * - * Hardware access - * - **************************************************************************/ - -static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, - unsigned int index) -{ - efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, - value, index); -} - -static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, - const efx_oword_t *mask) -{ - return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || - ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); -} - -int efx_farch_test_registers(struct efx_nic *efx, - const struct efx_farch_register_test *regs, - size_t n_regs) -{ - unsigned address = 0; - int i, j; - efx_oword_t mask, imask, original, reg, buf; - - for (i = 0; i < n_regs; ++i) { - address = regs[i].address; - mask = imask = regs[i].mask; - EFX_INVERT_OWORD(imask); - - efx_reado(efx, &original, address); - - /* bit sweep on and off */ - for (j = 0; j < 128; j++) { - if (!EFX_EXTRACT_OWORD32(mask, j, j)) - continue; - - /* Test this testable bit can be set in isolation */ - EFX_AND_OWORD(reg, original, mask); - EFX_SET_OWORD32(reg, j, j, 1); - - efx_writeo(efx, ®, address); - efx_reado(efx, &buf, address); - - if (efx_masked_compare_oword(®, &buf, &mask)) - goto fail; - - /* Test this testable bit can be cleared in isolation */ - EFX_OR_OWORD(reg, original, mask); - EFX_SET_OWORD32(reg, j, j, 0); - - efx_writeo(efx, ®, address); - efx_reado(efx, &buf, address); - - if (efx_masked_compare_oword(®, &buf, &mask)) - goto fail; - } - - efx_writeo(efx, &original, address); - } - - return 0; - -fail: - netif_err(efx, hw, efx->net_dev, - "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT - " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), - EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); - return -EIO; -} - -/************************************************************************** - * - * Special buffer handling - * Special buffers are used for event queues and the TX and RX - * descriptor rings. - * - *************************************************************************/ - -/* - * Initialise a special buffer - * - * This will define a buffer (previously allocated via - * efx_alloc_special_buffer()) in the buffer table, allowing - * it to be used for event queues, descriptor rings etc. - */ -static void -efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) -{ - efx_qword_t buf_desc; - unsigned int index; - dma_addr_t dma_addr; - int i; - - EFX_WARN_ON_PARANOID(!buffer->buf.addr); - - /* Write buffer descriptors to NIC */ - for (i = 0; i < buffer->entries; i++) { - index = buffer->index + i; - dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); - netif_dbg(efx, probe, efx->net_dev, - "mapping special buffer %d at %llx\n", - index, (unsigned long long)dma_addr); - EFX_POPULATE_QWORD_3(buf_desc, - FRF_AZ_BUF_ADR_REGION, 0, - FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, - FRF_AZ_BUF_OWNER_ID_FBUF, 0); - efx_write_buf_tbl(efx, &buf_desc, index); - } -} - -/* Unmaps a buffer and clears the buffer table entries */ -static void -efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) -{ - efx_oword_t buf_tbl_upd; - unsigned int start = buffer->index; - unsigned int end = (buffer->index + buffer->entries - 1); - - if (!buffer->entries) - return; - - netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", - buffer->index, buffer->index + buffer->entries - 1); - - EFX_POPULATE_OWORD_4(buf_tbl_upd, - FRF_AZ_BUF_UPD_CMD, 0, - FRF_AZ_BUF_CLR_CMD, 1, - FRF_AZ_BUF_CLR_END_ID, end, - FRF_AZ_BUF_CLR_START_ID, start); - efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); -} - -/* - * Allocate a new special buffer - * - * This allocates memory for a new buffer, clears it and allocates a - * new buffer ID range. It does not write into the buffer table. - * - * This call will allocate 4KB buffers, since 8KB buffers can't be - * used for event queues and descriptor rings. - */ -static int efx_alloc_special_buffer(struct efx_nic *efx, - struct efx_special_buffer *buffer, - unsigned int len) -{ -#ifdef CONFIG_SFC_SRIOV - struct siena_nic_data *nic_data = efx->nic_data; -#endif - len = ALIGN(len, EFX_BUF_SIZE); - - if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) - return -ENOMEM; - buffer->entries = len / EFX_BUF_SIZE; - BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); - - /* Select new buffer ID */ - buffer->index = efx->next_buffer_table; - efx->next_buffer_table += buffer->entries; -#ifdef CONFIG_SFC_SRIOV - BUG_ON(efx_siena_sriov_enabled(efx) && - nic_data->vf_buftbl_base < efx->next_buffer_table); -#endif - - netif_dbg(efx, probe, efx->net_dev, - "allocating special buffers %d-%d at %llx+%x " - "(virt %p phys %llx)\n", buffer->index, - buffer->index + buffer->entries - 1, - (u64)buffer->buf.dma_addr, len, - buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); - - return 0; -} - -static void -efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) -{ - if (!buffer->buf.addr) - return; - - netif_dbg(efx, hw, efx->net_dev, - "deallocating special buffers %d-%d at %llx+%x " - "(virt %p phys %llx)\n", buffer->index, - buffer->index + buffer->entries - 1, - (u64)buffer->buf.dma_addr, buffer->buf.len, - buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); - - efx_nic_free_buffer(efx, &buffer->buf); - buffer->entries = 0; -} - -/************************************************************************** - * - * TX path - * - **************************************************************************/ - -/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ -static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) -{ - unsigned write_ptr; - efx_dword_t reg; - - write_ptr = tx_queue->write_count & tx_queue->ptr_mask; - EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); - efx_writed_page(tx_queue->efx, ®, - FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); -} - -/* Write pointer and first descriptor for TX descriptor ring */ -static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, - const efx_qword_t *txd) -{ - unsigned write_ptr; - efx_oword_t reg; - - BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); - BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); - - write_ptr = tx_queue->write_count & tx_queue->ptr_mask; - EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, - FRF_AZ_TX_DESC_WPTR, write_ptr); - reg.qword[0] = *txd; - efx_writeo_page(tx_queue->efx, ®, - FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); -} - - -/* For each entry inserted into the software descriptor ring, create a - * descriptor in the hardware TX descriptor ring (in host memory), and - * write a doorbell. - */ -void efx_farch_tx_write(struct efx_tx_queue *tx_queue) -{ - struct efx_tx_buffer *buffer; - efx_qword_t *txd; - unsigned write_ptr; - unsigned old_write_count = tx_queue->write_count; - - tx_queue->xmit_pending = false; - if (unlikely(tx_queue->write_count == tx_queue->insert_count)) - return; - - do { - write_ptr = tx_queue->write_count & tx_queue->ptr_mask; - buffer = &tx_queue->buffer[write_ptr]; - txd = efx_tx_desc(tx_queue, write_ptr); - ++tx_queue->write_count; - - EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); - - /* Create TX descriptor ring entry */ - BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); - EFX_POPULATE_QWORD_4(*txd, - FSF_AZ_TX_KER_CONT, - buffer->flags & EFX_TX_BUF_CONT, - FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, - FSF_AZ_TX_KER_BUF_REGION, 0, - FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); - } while (tx_queue->write_count != tx_queue->insert_count); - - wmb(); /* Ensure descriptors are written before they are fetched */ - - if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { - txd = efx_tx_desc(tx_queue, - old_write_count & tx_queue->ptr_mask); - efx_farch_push_tx_desc(tx_queue, txd); - ++tx_queue->pushes; - } else { - efx_farch_notify_tx_desc(tx_queue); - } -} - -unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, - dma_addr_t dma_addr, unsigned int len) -{ - /* Don't cross 4K boundaries with descriptors. */ - unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; - - len = min(limit, len); - - return len; -} - - -/* Allocate hardware resources for a TX queue */ -int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) -{ - struct efx_nic *efx = tx_queue->efx; - unsigned entries; - - tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) | - ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0); - entries = tx_queue->ptr_mask + 1; - return efx_alloc_special_buffer(efx, &tx_queue->txd, - entries * sizeof(efx_qword_t)); -} - -void efx_farch_tx_init(struct efx_tx_queue *tx_queue) -{ - int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; - struct efx_nic *efx = tx_queue->efx; - efx_oword_t reg; - - /* Pin TX descriptor ring */ - efx_init_special_buffer(efx, &tx_queue->txd); - - /* Push TX descriptor ring to card */ - EFX_POPULATE_OWORD_10(reg, - FRF_AZ_TX_DESCQ_EN, 1, - FRF_AZ_TX_ISCSI_DDIG_EN, 0, - FRF_AZ_TX_ISCSI_HDIG_EN, 0, - FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, - FRF_AZ_TX_DESCQ_EVQ_ID, - tx_queue->channel->channel, - FRF_AZ_TX_DESCQ_OWNER_ID, 0, - FRF_AZ_TX_DESCQ_LABEL, tx_queue->label, - FRF_AZ_TX_DESCQ_SIZE, - __ffs(tx_queue->txd.entries), - FRF_AZ_TX_DESCQ_TYPE, 0, - FRF_BZ_TX_NON_IP_DROP_DIS, 1); - - EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); - EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); - - efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, - tx_queue->queue); - - EFX_POPULATE_OWORD_1(reg, - FRF_BZ_TX_PACE, - (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? - FFE_BZ_TX_PACE_OFF : - FFE_BZ_TX_PACE_RESERVED); - efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue); - - tx_queue->tso_version = 1; -} - -static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) -{ - struct efx_nic *efx = tx_queue->efx; - efx_oword_t tx_flush_descq; - - WARN_ON(atomic_read(&tx_queue->flush_outstanding)); - atomic_set(&tx_queue->flush_outstanding, 1); - - EFX_POPULATE_OWORD_2(tx_flush_descq, - FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, - FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); - efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); -} - -void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) -{ - struct efx_nic *efx = tx_queue->efx; - efx_oword_t tx_desc_ptr; - - /* Remove TX descriptor ring from card */ - EFX_ZERO_OWORD(tx_desc_ptr); - efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, - tx_queue->queue); - - /* Unpin TX descriptor ring */ - efx_fini_special_buffer(efx, &tx_queue->txd); -} - -/* Free buffers backing TX queue */ -void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) -{ - efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); -} - -/************************************************************************** - * - * RX path - * - **************************************************************************/ - -/* This creates an entry in the RX descriptor queue */ -static inline void -efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) -{ - struct efx_rx_buffer *rx_buf; - efx_qword_t *rxd; - - rxd = efx_rx_desc(rx_queue, index); - rx_buf = efx_rx_buffer(rx_queue, index); - EFX_POPULATE_QWORD_3(*rxd, - FSF_AZ_RX_KER_BUF_SIZE, - rx_buf->len - - rx_queue->efx->type->rx_buffer_padding, - FSF_AZ_RX_KER_BUF_REGION, 0, - FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); -} - -/* This writes to the RX_DESC_WPTR register for the specified receive - * descriptor ring. - */ -void efx_farch_rx_write(struct efx_rx_queue *rx_queue) -{ - struct efx_nic *efx = rx_queue->efx; - efx_dword_t reg; - unsigned write_ptr; - - while (rx_queue->notified_count != rx_queue->added_count) { - efx_farch_build_rx_desc( - rx_queue, - rx_queue->notified_count & rx_queue->ptr_mask); - ++rx_queue->notified_count; - } - - wmb(); - write_ptr = rx_queue->added_count & rx_queue->ptr_mask; - EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); - efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, - efx_rx_queue_index(rx_queue)); -} - -int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) -{ - struct efx_nic *efx = rx_queue->efx; - unsigned entries; - - entries = rx_queue->ptr_mask + 1; - return efx_alloc_special_buffer(efx, &rx_queue->rxd, - entries * sizeof(efx_qword_t)); -} - -void efx_farch_rx_init(struct efx_rx_queue *rx_queue) -{ - efx_oword_t rx_desc_ptr; - struct efx_nic *efx = rx_queue->efx; - bool jumbo_en; - - /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ - jumbo_en = efx->rx_scatter; - - netif_dbg(efx, hw, efx->net_dev, - "RX queue %d ring in special buffers %d-%d\n", - efx_rx_queue_index(rx_queue), rx_queue->rxd.index, - rx_queue->rxd.index + rx_queue->rxd.entries - 1); - - rx_queue->scatter_n = 0; - - /* Pin RX descriptor ring */ - efx_init_special_buffer(efx, &rx_queue->rxd); - - /* Push RX descriptor ring to card */ - EFX_POPULATE_OWORD_10(rx_desc_ptr, - FRF_AZ_RX_ISCSI_DDIG_EN, true, - FRF_AZ_RX_ISCSI_HDIG_EN, true, - FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, - FRF_AZ_RX_DESCQ_EVQ_ID, - efx_rx_queue_channel(rx_queue)->channel, - FRF_AZ_RX_DESCQ_OWNER_ID, 0, - FRF_AZ_RX_DESCQ_LABEL, - efx_rx_queue_index(rx_queue), - FRF_AZ_RX_DESCQ_SIZE, - __ffs(rx_queue->rxd.entries), - FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , - FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, - FRF_AZ_RX_DESCQ_EN, 1); - efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, - efx_rx_queue_index(rx_queue)); -} - -static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) -{ - struct efx_nic *efx = rx_queue->efx; - efx_oword_t rx_flush_descq; - - EFX_POPULATE_OWORD_2(rx_flush_descq, - FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, - FRF_AZ_RX_FLUSH_DESCQ, - efx_rx_queue_index(rx_queue)); - efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); -} - -void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) -{ - efx_oword_t rx_desc_ptr; - struct efx_nic *efx = rx_queue->efx; - - /* Remove RX descriptor ring from card */ - EFX_ZERO_OWORD(rx_desc_ptr); - efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, - efx_rx_queue_index(rx_queue)); - - /* Unpin RX descriptor ring */ - efx_fini_special_buffer(efx, &rx_queue->rxd); -} - -/* Free buffers backing RX queue */ -void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) -{ - efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); -} - -/************************************************************************** - * - * Flush handling - * - **************************************************************************/ - -/* efx_farch_flush_queues() must be woken up when all flushes are completed, - * or more RX flushes can be kicked off. - */ -static bool efx_farch_flush_wake(struct efx_nic *efx) -{ - /* Ensure that all updates are visible to efx_farch_flush_queues() */ - smp_mb(); - - return (atomic_read(&efx->active_queues) == 0 || - (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT - && atomic_read(&efx->rxq_flush_pending) > 0)); -} - -static bool efx_check_tx_flush_complete(struct efx_nic *efx) -{ - bool i = true; - efx_oword_t txd_ptr_tbl; - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - efx_reado_table(efx, &txd_ptr_tbl, - FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); - if (EFX_OWORD_FIELD(txd_ptr_tbl, - FRF_AZ_TX_DESCQ_FLUSH) || - EFX_OWORD_FIELD(txd_ptr_tbl, - FRF_AZ_TX_DESCQ_EN)) { - netif_dbg(efx, hw, efx->net_dev, - "flush did not complete on TXQ %d\n", - tx_queue->queue); - i = false; - } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, - 1, 0)) { - /* The flush is complete, but we didn't - * receive a flush completion event - */ - netif_dbg(efx, hw, efx->net_dev, - "flush complete on TXQ %d, so drain " - "the queue\n", tx_queue->queue); - /* Don't need to increment active_queues as it - * has already been incremented for the queues - * which did not drain - */ - efx_farch_magic_event(channel, - EFX_CHANNEL_MAGIC_TX_DRAIN( - tx_queue)); - } - } - } - - return i; -} - -/* Flush all the transmit queues, and continue flushing receive queues until - * they're all flushed. Wait for the DRAIN events to be received so that there - * are no more RX and TX events left on any channel. */ -static int efx_farch_do_flush(struct efx_nic *efx) -{ - unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ - struct efx_channel *channel; - struct efx_rx_queue *rx_queue; - struct efx_tx_queue *tx_queue; - int rc = 0; - - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - efx_farch_flush_tx_queue(tx_queue); - } - efx_for_each_channel_rx_queue(rx_queue, channel) { - rx_queue->flush_pending = true; - atomic_inc(&efx->rxq_flush_pending); - } - } - - while (timeout && atomic_read(&efx->active_queues) > 0) { - /* If SRIOV is enabled, then offload receive queue flushing to - * the firmware (though we will still have to poll for - * completion). If that fails, fall back to the old scheme. - */ - if (efx_siena_sriov_enabled(efx)) { - rc = efx_mcdi_flush_rxqs(efx); - if (!rc) - goto wait; - } - - /* The hardware supports four concurrent rx flushes, each of - * which may need to be retried if there is an outstanding - * descriptor fetch - */ - efx_for_each_channel(channel, efx) { - efx_for_each_channel_rx_queue(rx_queue, channel) { - if (atomic_read(&efx->rxq_flush_outstanding) >= - EFX_RX_FLUSH_COUNT) - break; - - if (rx_queue->flush_pending) { - rx_queue->flush_pending = false; - atomic_dec(&efx->rxq_flush_pending); - atomic_inc(&efx->rxq_flush_outstanding); - efx_farch_flush_rx_queue(rx_queue); - } - } - } - - wait: - timeout = wait_event_timeout(efx->flush_wq, - efx_farch_flush_wake(efx), - timeout); - } - - if (atomic_read(&efx->active_queues) && - !efx_check_tx_flush_complete(efx)) { - netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " - "(rx %d+%d)\n", atomic_read(&efx->active_queues), - atomic_read(&efx->rxq_flush_outstanding), - atomic_read(&efx->rxq_flush_pending)); - rc = -ETIMEDOUT; - - atomic_set(&efx->active_queues, 0); - atomic_set(&efx->rxq_flush_pending, 0); - atomic_set(&efx->rxq_flush_outstanding, 0); - } - - return rc; -} - -int efx_farch_fini_dmaq(struct efx_nic *efx) -{ - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - struct efx_rx_queue *rx_queue; - int rc = 0; - - /* Do not attempt to write to the NIC during EEH recovery */ - if (efx->state != STATE_RECOVERY) { - /* Only perform flush if DMA is enabled */ - if (efx->pci_dev->is_busmaster) { - efx->type->prepare_flush(efx); - rc = efx_farch_do_flush(efx); - efx->type->finish_flush(efx); - } - - efx_for_each_channel(channel, efx) { - efx_for_each_channel_rx_queue(rx_queue, channel) - efx_farch_rx_fini(rx_queue); - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_farch_tx_fini(tx_queue); - } - } - - return rc; -} - -/* Reset queue and flush accounting after FLR - * - * One possible cause of FLR recovery is that DMA may be failing (eg. if bus - * mastering was disabled), in which case we don't receive (RXQ) flush - * completion events. This means that efx->rxq_flush_outstanding remained at 4 - * after the FLR; also, efx->active_queues was non-zero (as no flush completion - * events were received, and we didn't go through efx_check_tx_flush_complete()) - * If we don't fix this up, on the next call to efx_realloc_channels() we won't - * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 - * for batched flush requests; and the efx->active_queues gets messed up because - * we keep incrementing for the newly initialised queues, but it never went to - * zero previously. Then we get a timeout every time we try to restart the - * queues, as it doesn't go back to zero when we should be flushing the queues. - */ -void efx_farch_finish_flr(struct efx_nic *efx) -{ - atomic_set(&efx->rxq_flush_pending, 0); - atomic_set(&efx->rxq_flush_outstanding, 0); - atomic_set(&efx->active_queues, 0); -} - - -/************************************************************************** - * - * Event queue processing - * Event queues are processed by per-channel tasklets. - * - **************************************************************************/ - -/* Update a channel's event queue's read pointer (RPTR) register - * - * This writes the EVQ_RPTR_REG register for the specified channel's - * event queue. - */ -void efx_farch_ev_read_ack(struct efx_channel *channel) -{ - efx_dword_t reg; - struct efx_nic *efx = channel->efx; - - EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, - channel->eventq_read_ptr & channel->eventq_mask); - - /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size - * of 4 bytes, but it is really 16 bytes just like later revisions. - */ - efx_writed(efx, ®, - efx->type->evq_rptr_tbl_base + - FR_BZ_EVQ_RPTR_STEP * channel->channel); -} - -/* Use HW to insert a SW defined event */ -void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, - efx_qword_t *event) -{ - efx_oword_t drv_ev_reg; - - BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || - FRF_AZ_DRV_EV_DATA_WIDTH != 64); - drv_ev_reg.u32[0] = event->u32[0]; - drv_ev_reg.u32[1] = event->u32[1]; - drv_ev_reg.u32[2] = 0; - drv_ev_reg.u32[3] = 0; - EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); - efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); -} - -static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) -{ - efx_qword_t event; - - EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, - FSE_AZ_EV_CODE_DRV_GEN_EV, - FSF_AZ_DRV_GEN_EV_MAGIC, magic); - efx_farch_generate_event(channel->efx, channel->channel, &event); -} - -/* Handle a transmit completion event - * - * The NIC batches TX completion events; the message we receive is of - * the form "complete all TX events up to this index". - */ -static void -efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) -{ - unsigned int tx_ev_desc_ptr; - unsigned int tx_ev_q_label; - struct efx_tx_queue *tx_queue; - struct efx_nic *efx = channel->efx; - - if (unlikely(READ_ONCE(efx->reset_pending))) - return; - - if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { - /* Transmit completion */ - tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); - tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); - tx_queue = channel->tx_queue + - (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); - efx_xmit_done(tx_queue, tx_ev_desc_ptr); - } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { - /* Rewrite the FIFO write pointer */ - tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); - tx_queue = channel->tx_queue + - (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); - - netif_tx_lock(efx->net_dev); - efx_farch_notify_tx_desc(tx_queue); - netif_tx_unlock(efx->net_dev); - } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { - efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); - } else { - netif_err(efx, tx_err, efx->net_dev, - "channel %d unexpected TX event " - EFX_QWORD_FMT"\n", channel->channel, - EFX_QWORD_VAL(*event)); - } -} - -/* Detect errors included in the rx_evt_pkt_ok bit. */ -static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, - const efx_qword_t *event) -{ - struct efx_channel *channel = efx_rx_queue_channel(rx_queue); - struct efx_nic *efx = rx_queue->efx; - bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; - bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; - bool rx_ev_frm_trunc, rx_ev_tobe_disc; - bool rx_ev_other_err, rx_ev_pause_frm; - - rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); - rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, - FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); - rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, - FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); - rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, - FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); - rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); - rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); - rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); - - /* Every error apart from tobe_disc and pause_frm */ - rx_ev_other_err = (rx_ev_tcp_udp_chksum_err | - rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | - rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); - - /* Count errors that are not in MAC stats. Ignore expected - * checksum errors during self-test. */ - if (rx_ev_frm_trunc) - ++channel->n_rx_frm_trunc; - else if (rx_ev_tobe_disc) - ++channel->n_rx_tobe_disc; - else if (!efx->loopback_selftest) { - if (rx_ev_ip_hdr_chksum_err) - ++channel->n_rx_ip_hdr_chksum_err; - else if (rx_ev_tcp_udp_chksum_err) - ++channel->n_rx_tcp_udp_chksum_err; - } - - /* TOBE_DISC is expected on unicast mismatches; don't print out an - * error message. FRM_TRUNC indicates RXDP dropped the packet due - * to a FIFO overflow. - */ -#ifdef DEBUG - if (rx_ev_other_err && net_ratelimit()) { - netif_dbg(efx, rx_err, efx->net_dev, - " RX queue %d unexpected RX event " - EFX_QWORD_FMT "%s%s%s%s%s%s%s\n", - efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), - rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", - rx_ev_ip_hdr_chksum_err ? - " [IP_HDR_CHKSUM_ERR]" : "", - rx_ev_tcp_udp_chksum_err ? - " [TCP_UDP_CHKSUM_ERR]" : "", - rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", - rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", - rx_ev_tobe_disc ? " [TOBE_DISC]" : "", - rx_ev_pause_frm ? " [PAUSE]" : ""); - } -#else - (void) rx_ev_other_err; -#endif - - if (efx->net_dev->features & NETIF_F_RXALL) - /* don't discard frame for CRC error */ - rx_ev_eth_crc_err = false; - - /* The frame must be discarded if any of these are true. */ - return (rx_ev_eth_crc_err | rx_ev_frm_trunc | - rx_ev_tobe_disc | rx_ev_pause_frm) ? - EFX_RX_PKT_DISCARD : 0; -} - -/* Handle receive events that are not in-order. Return true if this - * can be handled as a partial packet discard, false if it's more - * serious. - */ -static bool -efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) -{ - struct efx_channel *channel = efx_rx_queue_channel(rx_queue); - struct efx_nic *efx = rx_queue->efx; - unsigned expected, dropped; - - if (rx_queue->scatter_n && - index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & - rx_queue->ptr_mask)) { - ++channel->n_rx_nodesc_trunc; - return true; - } - - expected = rx_queue->removed_count & rx_queue->ptr_mask; - dropped = (index - expected) & rx_queue->ptr_mask; - netif_info(efx, rx_err, efx->net_dev, - "dropped %d events (index=%d expected=%d)\n", - dropped, index, expected); - - efx_schedule_reset(efx, RESET_TYPE_DISABLE); - return false; -} - -/* Handle a packet received event - * - * The NIC gives a "discard" flag if it's a unicast packet with the - * wrong destination address - * Also "is multicast" and "matches multicast filter" flags can be used to - * discard non-matching multicast packets. - */ -static void -efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) -{ - unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; - unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; - unsigned expected_ptr; - bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; - u16 flags; - struct efx_rx_queue *rx_queue; - struct efx_nic *efx = channel->efx; - - if (unlikely(READ_ONCE(efx->reset_pending))) - return; - - rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); - rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); - WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != - channel->channel); - - rx_queue = efx_channel_get_rx_queue(channel); - - rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); - expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & - rx_queue->ptr_mask); - - /* Check for partial drops and other errors */ - if (unlikely(rx_ev_desc_ptr != expected_ptr) || - unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { - if (rx_ev_desc_ptr != expected_ptr && - !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) - return; - - /* Discard all pending fragments */ - if (rx_queue->scatter_n) { - efx_rx_packet( - rx_queue, - rx_queue->removed_count & rx_queue->ptr_mask, - rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); - rx_queue->removed_count += rx_queue->scatter_n; - rx_queue->scatter_n = 0; - } - - /* Return if there is no new fragment */ - if (rx_ev_desc_ptr != expected_ptr) - return; - - /* Discard new fragment if not SOP */ - if (!rx_ev_sop) { - efx_rx_packet( - rx_queue, - rx_queue->removed_count & rx_queue->ptr_mask, - 1, 0, EFX_RX_PKT_DISCARD); - ++rx_queue->removed_count; - return; - } - } - - ++rx_queue->scatter_n; - if (rx_ev_cont) - return; - - rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); - rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); - rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); - - if (likely(rx_ev_pkt_ok)) { - /* If packet is marked as OK then we can rely on the - * hardware checksum and classification. - */ - flags = 0; - switch (rx_ev_hdr_type) { - case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: - flags |= EFX_RX_PKT_TCP; - fallthrough; - case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: - flags |= EFX_RX_PKT_CSUMMED; - fallthrough; - case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: - case FSE_AZ_RX_EV_HDR_TYPE_OTHER: - break; - } - } else { - flags = efx_farch_handle_rx_not_ok(rx_queue, event); - } - - /* Detect multicast packets that didn't match the filter */ - rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); - if (rx_ev_mcast_pkt) { - unsigned int rx_ev_mcast_hash_match = - EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); - - if (unlikely(!rx_ev_mcast_hash_match)) { - ++channel->n_rx_mcast_mismatch; - flags |= EFX_RX_PKT_DISCARD; - } - } - - channel->irq_mod_score += 2; - - /* Handle received packet */ - efx_rx_packet(rx_queue, - rx_queue->removed_count & rx_queue->ptr_mask, - rx_queue->scatter_n, rx_ev_byte_cnt, flags); - rx_queue->removed_count += rx_queue->scatter_n; - rx_queue->scatter_n = 0; -} - -/* If this flush done event corresponds to a &struct efx_tx_queue, then - * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue - * of all transmit completions. - */ -static void -efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) -{ - struct efx_tx_queue *tx_queue; - struct efx_channel *channel; - int qid; - - qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); - if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) { - channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL); - tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL); - if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) - efx_farch_magic_event(tx_queue->channel, - EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); - } -} - -/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush - * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add - * the RX queue back to the mask of RX queues in need of flushing. - */ -static void -efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) -{ - struct efx_channel *channel; - struct efx_rx_queue *rx_queue; - int qid; - bool failed; - - qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); - failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); - if (qid >= efx->n_channels) - return; - channel = efx_get_channel(efx, qid); - if (!efx_channel_has_rx_queue(channel)) - return; - rx_queue = efx_channel_get_rx_queue(channel); - - if (failed) { - netif_info(efx, hw, efx->net_dev, - "RXQ %d flush retry\n", qid); - rx_queue->flush_pending = true; - atomic_inc(&efx->rxq_flush_pending); - } else { - efx_farch_magic_event(efx_rx_queue_channel(rx_queue), - EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); - } - atomic_dec(&efx->rxq_flush_outstanding); - if (efx_farch_flush_wake(efx)) - wake_up(&efx->flush_wq); -} - -static void -efx_farch_handle_drain_event(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - - WARN_ON(atomic_read(&efx->active_queues) == 0); - atomic_dec(&efx->active_queues); - if (efx_farch_flush_wake(efx)) - wake_up(&efx->flush_wq); -} - -static void efx_farch_handle_generated_event(struct efx_channel *channel, - efx_qword_t *event) -{ - struct efx_nic *efx = channel->efx; - struct efx_rx_queue *rx_queue = - efx_channel_has_rx_queue(channel) ? - efx_channel_get_rx_queue(channel) : NULL; - unsigned magic, code; - - magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); - code = _EFX_CHANNEL_MAGIC_CODE(magic); - - if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { - channel->event_test_cpu = raw_smp_processor_id(); - } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { - /* The queue must be empty, so we won't receive any rx - * events, so efx_process_channel() won't refill the - * queue. Refill it here */ - efx_fast_push_rx_descriptors(rx_queue, true); - } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { - efx_farch_handle_drain_event(channel); - } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { - efx_farch_handle_drain_event(channel); - } else { - netif_dbg(efx, hw, efx->net_dev, "channel %d received " - "generated event "EFX_QWORD_FMT"\n", - channel->channel, EFX_QWORD_VAL(*event)); - } -} - -static void -efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) -{ - struct efx_nic *efx = channel->efx; - unsigned int ev_sub_code; - unsigned int ev_sub_data; - - ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); - ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); - - switch (ev_sub_code) { - case FSE_AZ_TX_DESCQ_FLS_DONE_EV: - netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", - channel->channel, ev_sub_data); - efx_farch_handle_tx_flush_done(efx, event); -#ifdef CONFIG_SFC_SRIOV - efx_siena_sriov_tx_flush_done(efx, event); -#endif - break; - case FSE_AZ_RX_DESCQ_FLS_DONE_EV: - netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", - channel->channel, ev_sub_data); - efx_farch_handle_rx_flush_done(efx, event); -#ifdef CONFIG_SFC_SRIOV - efx_siena_sriov_rx_flush_done(efx, event); -#endif - break; - case FSE_AZ_EVQ_INIT_DONE_EV: - netif_dbg(efx, hw, efx->net_dev, - "channel %d EVQ %d initialised\n", - channel->channel, ev_sub_data); - break; - case FSE_AZ_SRM_UPD_DONE_EV: - netif_vdbg(efx, hw, efx->net_dev, - "channel %d SRAM update done\n", channel->channel); - break; - case FSE_AZ_WAKE_UP_EV: - netif_vdbg(efx, hw, efx->net_dev, - "channel %d RXQ %d wakeup event\n", - channel->channel, ev_sub_data); - break; - case FSE_AZ_TIMER_EV: - netif_vdbg(efx, hw, efx->net_dev, - "channel %d RX queue %d timer expired\n", - channel->channel, ev_sub_data); - break; - case FSE_AA_RX_RECOVER_EV: - netif_err(efx, rx_err, efx->net_dev, - "channel %d seen DRIVER RX_RESET event. " - "Resetting.\n", channel->channel); - atomic_inc(&efx->rx_reset); - efx_schedule_reset(efx, RESET_TYPE_DISABLE); - break; - case FSE_BZ_RX_DSC_ERROR_EV: - if (ev_sub_data < EFX_VI_BASE) { - netif_err(efx, rx_err, efx->net_dev, - "RX DMA Q %d reports descriptor fetch error." - " RX Q %d is disabled.\n", ev_sub_data, - ev_sub_data); - efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); - } -#ifdef CONFIG_SFC_SRIOV - else - efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); -#endif - break; - case FSE_BZ_TX_DSC_ERROR_EV: - if (ev_sub_data < EFX_VI_BASE) { - netif_err(efx, tx_err, efx->net_dev, - "TX DMA Q %d reports descriptor fetch error." - " TX Q %d is disabled.\n", ev_sub_data, - ev_sub_data); - efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); - } -#ifdef CONFIG_SFC_SRIOV - else - efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); -#endif - break; - default: - netif_vdbg(efx, hw, efx->net_dev, - "channel %d unknown driver event code %d " - "data %04x\n", channel->channel, ev_sub_code, - ev_sub_data); - break; - } -} - -int efx_farch_ev_process(struct efx_channel *channel, int budget) -{ - struct efx_nic *efx = channel->efx; - unsigned int read_ptr; - efx_qword_t event, *p_event; - int ev_code; - int spent = 0; - - if (budget <= 0) - return spent; - - read_ptr = channel->eventq_read_ptr; - - for (;;) { - p_event = efx_event(channel, read_ptr); - event = *p_event; - - if (!efx_event_present(&event)) - /* End of events */ - break; - - netif_vdbg(channel->efx, intr, channel->efx->net_dev, - "channel %d event is "EFX_QWORD_FMT"\n", - channel->channel, EFX_QWORD_VAL(event)); - - /* Clear this event by marking it all ones */ - EFX_SET_QWORD(*p_event); - - ++read_ptr; - - ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); - - switch (ev_code) { - case FSE_AZ_EV_CODE_RX_EV: - efx_farch_handle_rx_event(channel, &event); - if (++spent == budget) - goto out; - break; - case FSE_AZ_EV_CODE_TX_EV: - efx_farch_handle_tx_event(channel, &event); - break; - case FSE_AZ_EV_CODE_DRV_GEN_EV: - efx_farch_handle_generated_event(channel, &event); - break; - case FSE_AZ_EV_CODE_DRIVER_EV: - efx_farch_handle_driver_event(channel, &event); - break; -#ifdef CONFIG_SFC_SRIOV - case FSE_CZ_EV_CODE_USER_EV: - efx_siena_sriov_event(channel, &event); - break; -#endif - case FSE_CZ_EV_CODE_MCDI_EV: - efx_mcdi_process_event(channel, &event); - break; - case FSE_AZ_EV_CODE_GLOBAL_EV: - if (efx->type->handle_global_event && - efx->type->handle_global_event(channel, &event)) - break; - fallthrough; - default: - netif_err(channel->efx, hw, channel->efx->net_dev, - "channel %d unknown event type %d (data " - EFX_QWORD_FMT ")\n", channel->channel, - ev_code, EFX_QWORD_VAL(event)); - } - } - -out: - channel->eventq_read_ptr = read_ptr; - return spent; -} - -/* Allocate buffer table entries for event queue */ -int efx_farch_ev_probe(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - unsigned entries; - - entries = channel->eventq_mask + 1; - return efx_alloc_special_buffer(efx, &channel->eventq, - entries * sizeof(efx_qword_t)); -} - -int efx_farch_ev_init(struct efx_channel *channel) -{ - efx_oword_t reg; - struct efx_nic *efx = channel->efx; - - netif_dbg(efx, hw, efx->net_dev, - "channel %d event queue in special buffers %d-%d\n", - channel->channel, channel->eventq.index, - channel->eventq.index + channel->eventq.entries - 1); - - EFX_POPULATE_OWORD_3(reg, - FRF_CZ_TIMER_Q_EN, 1, - FRF_CZ_HOST_NOTIFY_MODE, 0, - FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); - efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); - - /* Pin event queue buffer */ - efx_init_special_buffer(efx, &channel->eventq); - - /* Fill event queue with all ones (i.e. empty events) */ - memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); - - /* Push event queue to card */ - EFX_POPULATE_OWORD_3(reg, - FRF_AZ_EVQ_EN, 1, - FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), - FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); - efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, - channel->channel); - - return 0; -} - -void efx_farch_ev_fini(struct efx_channel *channel) -{ - efx_oword_t reg; - struct efx_nic *efx = channel->efx; - - /* Remove event queue from card */ - EFX_ZERO_OWORD(reg); - efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, - channel->channel); - efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); - - /* Unpin event queue */ - efx_fini_special_buffer(efx, &channel->eventq); -} - -/* Free buffers backing event queue */ -void efx_farch_ev_remove(struct efx_channel *channel) -{ - efx_free_special_buffer(channel->efx, &channel->eventq); -} - - -void efx_farch_ev_test_generate(struct efx_channel *channel) -{ - efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); -} - -void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) -{ - efx_farch_magic_event(efx_rx_queue_channel(rx_queue), - EFX_CHANNEL_MAGIC_FILL(rx_queue)); -} - -/************************************************************************** - * - * Hardware interrupts - * The hardware interrupt handler does very little work; all the event - * queue processing is carried out by per-channel tasklets. - * - **************************************************************************/ - -/* Enable/disable/generate interrupts */ -static inline void efx_farch_interrupts(struct efx_nic *efx, - bool enabled, bool force) -{ - efx_oword_t int_en_reg_ker; - - EFX_POPULATE_OWORD_3(int_en_reg_ker, - FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, - FRF_AZ_KER_INT_KER, force, - FRF_AZ_DRV_INT_EN_KER, enabled); - efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); -} - -void efx_farch_irq_enable_master(struct efx_nic *efx) -{ - EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); - wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ - - efx_farch_interrupts(efx, true, false); -} - -void efx_farch_irq_disable_master(struct efx_nic *efx) -{ - /* Disable interrupts */ - efx_farch_interrupts(efx, false, false); -} - -/* Generate a test interrupt - * Interrupt must already have been enabled, otherwise nasty things - * may happen. - */ -int efx_farch_irq_test_generate(struct efx_nic *efx) -{ - efx_farch_interrupts(efx, true, true); - return 0; -} - -/* Process a fatal interrupt - * Disable bus mastering ASAP and schedule a reset - */ -irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) -{ - efx_oword_t *int_ker = efx->irq_status.addr; - efx_oword_t fatal_intr; - int error, mem_perr; - - efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); - error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); - - netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " - EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), - EFX_OWORD_VAL(fatal_intr), - error ? "disabling bus mastering" : "no recognised error"); - - /* If this is a memory parity error dump which blocks are offending */ - mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || - EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); - if (mem_perr) { - efx_oword_t reg; - efx_reado(efx, ®, FR_AZ_MEM_STAT); - netif_err(efx, hw, efx->net_dev, - "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", - EFX_OWORD_VAL(reg)); - } - - /* Disable both devices */ - pci_clear_master(efx->pci_dev); - efx_farch_irq_disable_master(efx); - - /* Count errors and reset or disable the NIC accordingly */ - if (efx->int_error_count == 0 || - time_after(jiffies, efx->int_error_expire)) { - efx->int_error_count = 0; - efx->int_error_expire = - jiffies + EFX_INT_ERROR_EXPIRE * HZ; - } - if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { - netif_err(efx, hw, efx->net_dev, - "SYSTEM ERROR - reset scheduled\n"); - efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); - } else { - netif_err(efx, hw, efx->net_dev, - "SYSTEM ERROR - max number of errors seen." - "NIC will be disabled\n"); - efx_schedule_reset(efx, RESET_TYPE_DISABLE); - } - - return IRQ_HANDLED; -} - -/* Handle a legacy interrupt - * Acknowledges the interrupt and schedule event queue processing. - */ -irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) -{ - struct efx_nic *efx = dev_id; - bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); - efx_oword_t *int_ker = efx->irq_status.addr; - irqreturn_t result = IRQ_NONE; - struct efx_channel *channel; - efx_dword_t reg; - u32 queues; - int syserr; - - /* Read the ISR which also ACKs the interrupts */ - efx_readd(efx, ®, FR_BZ_INT_ISR0); - queues = EFX_EXTRACT_DWORD(reg, 0, 31); - - /* Legacy interrupts are disabled too late by the EEH kernel - * code. Disable them earlier. - * If an EEH error occurred, the read will have returned all ones. - */ - if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && - !efx->eeh_disabled_legacy_irq) { - disable_irq_nosync(efx->legacy_irq); - efx->eeh_disabled_legacy_irq = true; - } - - /* Handle non-event-queue sources */ - if (queues & (1U << efx->irq_level) && soft_enabled) { - syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); - if (unlikely(syserr)) - return efx_farch_fatal_interrupt(efx); - efx->last_irq_cpu = raw_smp_processor_id(); - } - - if (queues != 0) { - efx->irq_zero_count = 0; - - /* Schedule processing of any interrupting queues */ - if (likely(soft_enabled)) { - efx_for_each_channel(channel, efx) { - if (queues & 1) - efx_schedule_channel_irq(channel); - queues >>= 1; - } - } - result = IRQ_HANDLED; - - } else { - efx_qword_t *event; - - /* Legacy ISR read can return zero once (SF bug 15783) */ - - /* We can't return IRQ_HANDLED more than once on seeing ISR=0 - * because this might be a shared interrupt. */ - if (efx->irq_zero_count++ == 0) - result = IRQ_HANDLED; - - /* Ensure we schedule or rearm all event queues */ - if (likely(soft_enabled)) { - efx_for_each_channel(channel, efx) { - event = efx_event(channel, - channel->eventq_read_ptr); - if (efx_event_present(event)) - efx_schedule_channel_irq(channel); - else - efx_farch_ev_read_ack(channel); - } - } - } - - if (result == IRQ_HANDLED) - netif_vdbg(efx, intr, efx->net_dev, - "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", - irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); - - return result; -} - -/* Handle an MSI interrupt - * - * Handle an MSI hardware interrupt. This routine schedules event - * queue processing. No interrupt acknowledgement cycle is necessary. - * Also, we never need to check that the interrupt is for us, since - * MSI interrupts cannot be shared. - */ -irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) -{ - struct efx_msi_context *context = dev_id; - struct efx_nic *efx = context->efx; - efx_oword_t *int_ker = efx->irq_status.addr; - int syserr; - - netif_vdbg(efx, intr, efx->net_dev, - "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", - irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); - - if (!likely(READ_ONCE(efx->irq_soft_enabled))) - return IRQ_HANDLED; - - /* Handle non-event-queue sources */ - if (context->index == efx->irq_level) { - syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); - if (unlikely(syserr)) - return efx_farch_fatal_interrupt(efx); - efx->last_irq_cpu = raw_smp_processor_id(); - } - - /* Schedule processing of the channel */ - efx_schedule_channel_irq(efx->channel[context->index]); - - return IRQ_HANDLED; -} - -/* Setup RSS indirection table. - * This maps from the hash value of the packet to RXQ - */ -void efx_farch_rx_push_indir_table(struct efx_nic *efx) -{ - size_t i = 0; - efx_dword_t dword; - - BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != - FR_BZ_RX_INDIRECTION_TBL_ROWS); - - for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { - EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, - efx->rss_context.rx_indir_table[i]); - efx_writed(efx, &dword, - FR_BZ_RX_INDIRECTION_TBL + - FR_BZ_RX_INDIRECTION_TBL_STEP * i); - } -} - -void efx_farch_rx_pull_indir_table(struct efx_nic *efx) -{ - size_t i = 0; - efx_dword_t dword; - - BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != - FR_BZ_RX_INDIRECTION_TBL_ROWS); - - for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { - efx_readd(efx, &dword, - FR_BZ_RX_INDIRECTION_TBL + - FR_BZ_RX_INDIRECTION_TBL_STEP * i); - efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); - } -} - -/* Looks at available SRAM resources and works out how many queues we - * can support, and where things like descriptor caches should live. - * - * SRAM is split up as follows: - * 0 buftbl entries for channels - * efx->vf_buftbl_base buftbl entries for SR-IOV - * efx->rx_dc_base RX descriptor caches - * efx->tx_dc_base TX descriptor caches - */ -void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) -{ - unsigned vi_count, total_tx_channels; -#ifdef CONFIG_SFC_SRIOV - struct siena_nic_data *nic_data; - unsigned buftbl_min; -#endif - - total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; - vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL); - -#ifdef CONFIG_SFC_SRIOV - nic_data = efx->nic_data; - /* Account for the buffer table entries backing the datapath channels - * and the descriptor caches for those channels. - */ - buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + - total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE + - efx->n_channels * EFX_MAX_EVQ_SIZE) - * sizeof(efx_qword_t) / EFX_BUF_SIZE); - if (efx->type->sriov_wanted) { - if (efx->type->sriov_wanted(efx)) { - unsigned vi_dc_entries, buftbl_free; - unsigned entries_per_vf, vf_limit; - - nic_data->vf_buftbl_base = buftbl_min; - - vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; - vi_count = max(vi_count, EFX_VI_BASE); - buftbl_free = (sram_lim_qw - buftbl_min - - vi_count * vi_dc_entries); - - entries_per_vf = ((vi_dc_entries + - EFX_VF_BUFTBL_PER_VI) * - efx_vf_size(efx)); - vf_limit = min(buftbl_free / entries_per_vf, - (1024U - EFX_VI_BASE) >> efx->vi_scale); - - if (efx->vf_count > vf_limit) { - netif_err(efx, probe, efx->net_dev, - "Reducing VF count from from %d to %d\n", - efx->vf_count, vf_limit); - efx->vf_count = vf_limit; - } - vi_count += efx->vf_count * efx_vf_size(efx); - } - } -#endif - - efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; - efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; -} - -u32 efx_farch_fpga_ver(struct efx_nic *efx) -{ - efx_oword_t altera_build; - efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); - return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); -} - -void efx_farch_init_common(struct efx_nic *efx) -{ - efx_oword_t temp; - - /* Set positions of descriptor caches in SRAM. */ - EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); - efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); - EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); - efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); - - /* Set TX descriptor cache size. */ - BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); - EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); - efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); - - /* Set RX descriptor cache size. Set low watermark to size-8, as - * this allows most efficient prefetching. - */ - BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); - EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); - efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); - EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); - efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); - - /* Program INT_KER address */ - EFX_POPULATE_OWORD_2(temp, - FRF_AZ_NORM_INT_VEC_DIS_KER, - EFX_INT_MODE_USE_MSI(efx), - FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); - efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); - - if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) - /* Use an interrupt level unused by event queues */ - efx->irq_level = 0x1f; - else - /* Use a valid MSI-X vector */ - efx->irq_level = 0; - - /* Enable all the genuinely fatal interrupts. (They are still - * masked by the overall interrupt mask, controlled by - * falcon_interrupts()). - * - * Note: All other fatal interrupts are enabled - */ - EFX_POPULATE_OWORD_3(temp, - FRF_AZ_ILL_ADR_INT_KER_EN, 1, - FRF_AZ_RBUF_OWN_INT_KER_EN, 1, - FRF_AZ_TBUF_OWN_INT_KER_EN, 1); - EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); - EFX_INVERT_OWORD(temp); - efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); - - /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be - * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. - */ - efx_reado(efx, &temp, FR_AZ_TX_RESERVED); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); - /* Enable SW_EV to inherit in char driver - assume harmless here */ - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); - /* Prefetch threshold 2 => fetch when descriptor cache half empty */ - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); - /* Disable hardware watchdog which can misfire */ - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); - /* Squash TX of packets of 16 bytes or less */ - EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); - efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); - - EFX_POPULATE_OWORD_4(temp, - /* Default values */ - FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, - FRF_BZ_TX_PACE_SB_AF, 0xb, - FRF_BZ_TX_PACE_FB_BASE, 0, - /* Allow large pace values in the fast bin. */ - FRF_BZ_TX_PACE_BIN_TH, - FFE_BZ_TX_PACE_RESERVED); - efx_writeo(efx, &temp, FR_BZ_TX_PACE); -} - -/************************************************************************** - * - * Filter tables - * - ************************************************************************** - */ - -/* "Fudge factors" - difference between programmed value and actual depth. - * Due to pipelined implementation we need to program H/W with a value that - * is larger than the hop limit we want. - */ -#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 -#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 - -/* Hard maximum search limit. Hardware will time-out beyond 200-something. - * We also need to avoid infinite loops in efx_farch_filter_search() when the - * table is full. - */ -#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 - -/* Don't try very hard to find space for performance hints, as this is - * counter-productive. */ -#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 - -enum efx_farch_filter_type { - EFX_FARCH_FILTER_TCP_FULL = 0, - EFX_FARCH_FILTER_TCP_WILD, - EFX_FARCH_FILTER_UDP_FULL, - EFX_FARCH_FILTER_UDP_WILD, - EFX_FARCH_FILTER_MAC_FULL = 4, - EFX_FARCH_FILTER_MAC_WILD, - EFX_FARCH_FILTER_UC_DEF = 8, - EFX_FARCH_FILTER_MC_DEF, - EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ -}; - -enum efx_farch_filter_table_id { - EFX_FARCH_FILTER_TABLE_RX_IP = 0, - EFX_FARCH_FILTER_TABLE_RX_MAC, - EFX_FARCH_FILTER_TABLE_RX_DEF, - EFX_FARCH_FILTER_TABLE_TX_MAC, - EFX_FARCH_FILTER_TABLE_COUNT, -}; - -enum efx_farch_filter_index { - EFX_FARCH_FILTER_INDEX_UC_DEF, - EFX_FARCH_FILTER_INDEX_MC_DEF, - EFX_FARCH_FILTER_SIZE_RX_DEF, -}; - -struct efx_farch_filter_spec { - u8 type:4; - u8 priority:4; - u8 flags; - u16 dmaq_id; - u32 data[3]; -}; - -struct efx_farch_filter_table { - enum efx_farch_filter_table_id id; - u32 offset; /* address of table relative to BAR */ - unsigned size; /* number of entries */ - unsigned step; /* step between entries */ - unsigned used; /* number currently used */ - unsigned long *used_bitmap; - struct efx_farch_filter_spec *spec; - unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; -}; - -struct efx_farch_filter_state { - struct rw_semaphore lock; /* Protects table contents */ - struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; -}; - -static void -efx_farch_filter_table_clear_entry(struct efx_nic *efx, - struct efx_farch_filter_table *table, - unsigned int filter_idx); - -/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit - * key derived from the n-tuple. The initial LFSR state is 0xffff. */ -static u16 efx_farch_filter_hash(u32 key) -{ - u16 tmp; - - /* First 16 rounds */ - tmp = 0x1fff ^ key >> 16; - tmp = tmp ^ tmp >> 3 ^ tmp >> 6; - tmp = tmp ^ tmp >> 9; - /* Last 16 rounds */ - tmp = tmp ^ tmp << 13 ^ key; - tmp = tmp ^ tmp >> 3 ^ tmp >> 6; - return tmp ^ tmp >> 9; -} - -/* To allow for hash collisions, filter search continues at these - * increments from the first possible entry selected by the hash. */ -static u16 efx_farch_filter_increment(u32 key) -{ - return key * 2 - 1; -} - -static enum efx_farch_filter_table_id -efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) -{ - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FARCH_FILTER_TCP_FULL >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FARCH_FILTER_TCP_WILD >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FARCH_FILTER_UDP_FULL >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != - (EFX_FARCH_FILTER_UDP_WILD >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != - (EFX_FARCH_FILTER_MAC_FULL >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != - (EFX_FARCH_FILTER_MAC_WILD >> 2)); - BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != - EFX_FARCH_FILTER_TABLE_RX_MAC + 2); - return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); -} - -static void efx_farch_filter_push_rx_config(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table; - efx_oword_t filter_ctl; - - efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; - EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); - EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); - EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; - if (table->size) { - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, - table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - } - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; - if (table->size) { - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, - table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, - !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & - EFX_FILTER_FLAG_RX_RSS)); - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, - table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, - !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & - EFX_FILTER_FLAG_RX_RSS)); - - /* There is a single bit to enable RX scatter for all - * unmatched packets. Only set it if scatter is - * enabled in both filter specs. - */ - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, - !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & - table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & - EFX_FILTER_FLAG_RX_SCATTER)); - } else { - /* We don't expose 'default' filters because unmatched - * packets always go to the queue number found in the - * RSS table. But we still need to set the RX scatter - * bit here. - */ - EFX_SET_OWORD_FIELD( - filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, - efx->rx_scatter); - } - - efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); -} - -static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table; - efx_oword_t tx_cfg; - - efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); - - table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; - if (table->size) { - EFX_SET_OWORD_FIELD( - tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, - table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); - EFX_SET_OWORD_FIELD( - tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, - table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + - EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); - } - - efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); -} - -static int -efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, - const struct efx_filter_spec *gen_spec) -{ - bool is_full = false; - - if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) - return -EINVAL; - - spec->priority = gen_spec->priority; - spec->flags = gen_spec->flags; - spec->dmaq_id = gen_spec->dmaq_id; - - switch (gen_spec->match_flags) { - case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | - EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): - is_full = true; - fallthrough; - case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { - __be32 rhost, host1, host2; - __be16 rport, port1, port2; - - EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); - - if (gen_spec->ether_type != htons(ETH_P_IP)) - return -EPROTONOSUPPORT; - if (gen_spec->loc_port == 0 || - (is_full && gen_spec->rem_port == 0)) - return -EADDRNOTAVAIL; - switch (gen_spec->ip_proto) { - case IPPROTO_TCP: - spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : - EFX_FARCH_FILTER_TCP_WILD); - break; - case IPPROTO_UDP: - spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : - EFX_FARCH_FILTER_UDP_WILD); - break; - default: - return -EPROTONOSUPPORT; - } - - /* Filter is constructed in terms of source and destination, - * with the odd wrinkle that the ports are swapped in a UDP - * wildcard filter. We need to convert from local and remote - * (= zero for wildcard) addresses. - */ - rhost = is_full ? gen_spec->rem_host[0] : 0; - rport = is_full ? gen_spec->rem_port : 0; - host1 = rhost; - host2 = gen_spec->loc_host[0]; - if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { - port1 = gen_spec->loc_port; - port2 = rport; - } else { - port1 = rport; - port2 = gen_spec->loc_port; - } - spec->data[0] = ntohl(host1) << 16 | ntohs(port1); - spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; - spec->data[2] = ntohl(host2); - - break; - } - - case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: - is_full = true; - fallthrough; - case EFX_FILTER_MATCH_LOC_MAC: - spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : - EFX_FARCH_FILTER_MAC_WILD); - spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; - spec->data[1] = (gen_spec->loc_mac[2] << 24 | - gen_spec->loc_mac[3] << 16 | - gen_spec->loc_mac[4] << 8 | - gen_spec->loc_mac[5]); - spec->data[2] = (gen_spec->loc_mac[0] << 8 | - gen_spec->loc_mac[1]); - break; - - case EFX_FILTER_MATCH_LOC_MAC_IG: - spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? - EFX_FARCH_FILTER_MC_DEF : - EFX_FARCH_FILTER_UC_DEF); - memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ - break; - - default: - return -EPROTONOSUPPORT; - } - - return 0; -} - -static void -efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, - const struct efx_farch_filter_spec *spec) -{ - bool is_full = false; - - /* *gen_spec should be completely initialised, to be consistent - * with efx_filter_init_{rx,tx}() and in case we want to copy - * it back to userland. - */ - memset(gen_spec, 0, sizeof(*gen_spec)); - - gen_spec->priority = spec->priority; - gen_spec->flags = spec->flags; - gen_spec->dmaq_id = spec->dmaq_id; - - switch (spec->type) { - case EFX_FARCH_FILTER_TCP_FULL: - case EFX_FARCH_FILTER_UDP_FULL: - is_full = true; - fallthrough; - case EFX_FARCH_FILTER_TCP_WILD: - case EFX_FARCH_FILTER_UDP_WILD: { - __be32 host1, host2; - __be16 port1, port2; - - gen_spec->match_flags = - EFX_FILTER_MATCH_ETHER_TYPE | - EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; - if (is_full) - gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | - EFX_FILTER_MATCH_REM_PORT); - gen_spec->ether_type = htons(ETH_P_IP); - gen_spec->ip_proto = - (spec->type == EFX_FARCH_FILTER_TCP_FULL || - spec->type == EFX_FARCH_FILTER_TCP_WILD) ? - IPPROTO_TCP : IPPROTO_UDP; - - host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); - port1 = htons(spec->data[0]); - host2 = htonl(spec->data[2]); - port2 = htons(spec->data[1] >> 16); - if (spec->flags & EFX_FILTER_FLAG_TX) { - gen_spec->loc_host[0] = host1; - gen_spec->rem_host[0] = host2; - } else { - gen_spec->loc_host[0] = host2; - gen_spec->rem_host[0] = host1; - } - if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ - (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { - gen_spec->loc_port = port1; - gen_spec->rem_port = port2; - } else { - gen_spec->loc_port = port2; - gen_spec->rem_port = port1; - } - - break; - } - - case EFX_FARCH_FILTER_MAC_FULL: - is_full = true; - fallthrough; - case EFX_FARCH_FILTER_MAC_WILD: - gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; - if (is_full) - gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; - gen_spec->loc_mac[0] = spec->data[2] >> 8; - gen_spec->loc_mac[1] = spec->data[2]; - gen_spec->loc_mac[2] = spec->data[1] >> 24; - gen_spec->loc_mac[3] = spec->data[1] >> 16; - gen_spec->loc_mac[4] = spec->data[1] >> 8; - gen_spec->loc_mac[5] = spec->data[1]; - gen_spec->outer_vid = htons(spec->data[0]); - break; - - case EFX_FARCH_FILTER_UC_DEF: - case EFX_FARCH_FILTER_MC_DEF: - gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; - gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; - break; - - default: - WARN_ON(1); - break; - } -} - -static void -efx_farch_filter_init_rx_auto(struct efx_nic *efx, - struct efx_farch_filter_spec *spec) -{ - /* If there's only one channel then disable RSS for non VF - * traffic, thereby allowing VFs to use RSS when the PF can't. - */ - spec->priority = EFX_FILTER_PRI_AUTO; - spec->flags = (EFX_FILTER_FLAG_RX | - (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | - (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); - spec->dmaq_id = 0; -} - -/* Build a filter entry and return its n-tuple key. */ -static u32 efx_farch_filter_build(efx_oword_t *filter, - struct efx_farch_filter_spec *spec) -{ - u32 data3; - - switch (efx_farch_filter_spec_table_id(spec)) { - case EFX_FARCH_FILTER_TABLE_RX_IP: { - bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || - spec->type == EFX_FARCH_FILTER_UDP_WILD); - EFX_POPULATE_OWORD_7( - *filter, - FRF_BZ_RSS_EN, - !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), - FRF_BZ_SCATTER_EN, - !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), - FRF_BZ_TCP_UDP, is_udp, - FRF_BZ_RXQ_ID, spec->dmaq_id, - EFX_DWORD_2, spec->data[2], - EFX_DWORD_1, spec->data[1], - EFX_DWORD_0, spec->data[0]); - data3 = is_udp; - break; - } - - case EFX_FARCH_FILTER_TABLE_RX_MAC: { - bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; - EFX_POPULATE_OWORD_7( - *filter, - FRF_CZ_RMFT_RSS_EN, - !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), - FRF_CZ_RMFT_SCATTER_EN, - !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), - FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, - FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, - FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], - FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], - FRF_CZ_RMFT_VLAN_ID, spec->data[0]); - data3 = is_wild; - break; - } - - case EFX_FARCH_FILTER_TABLE_TX_MAC: { - bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; - EFX_POPULATE_OWORD_5(*filter, - FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, - FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, - FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], - FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], - FRF_CZ_TMFT_VLAN_ID, spec->data[0]); - data3 = is_wild | spec->dmaq_id << 1; - break; - } - - default: - BUG(); - } - - return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; -} - -static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, - const struct efx_farch_filter_spec *right) -{ - if (left->type != right->type || - memcmp(left->data, right->data, sizeof(left->data))) - return false; - - if (left->flags & EFX_FILTER_FLAG_TX && - left->dmaq_id != right->dmaq_id) - return false; - - return true; -} - -/* - * Construct/deconstruct external filter IDs. At least the RX filter - * IDs must be ordered by matching priority, for RX NFC semantics. - * - * Deconstruction needs to be robust against invalid IDs so that - * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can - * accept user-provided IDs. - */ - -#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 - -static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { - [EFX_FARCH_FILTER_TCP_FULL] = 0, - [EFX_FARCH_FILTER_UDP_FULL] = 0, - [EFX_FARCH_FILTER_TCP_WILD] = 1, - [EFX_FARCH_FILTER_UDP_WILD] = 1, - [EFX_FARCH_FILTER_MAC_FULL] = 2, - [EFX_FARCH_FILTER_MAC_WILD] = 3, - [EFX_FARCH_FILTER_UC_DEF] = 4, - [EFX_FARCH_FILTER_MC_DEF] = 4, -}; - -static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { - EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ - EFX_FARCH_FILTER_TABLE_RX_IP, - EFX_FARCH_FILTER_TABLE_RX_MAC, - EFX_FARCH_FILTER_TABLE_RX_MAC, - EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ - EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ - EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ -}; - -#define EFX_FARCH_FILTER_INDEX_WIDTH 13 -#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) - -static inline u32 -efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, - unsigned int index) -{ - unsigned int range; - - range = efx_farch_filter_type_match_pri[spec->type]; - if (!(spec->flags & EFX_FILTER_FLAG_RX)) - range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; - - return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; -} - -static inline enum efx_farch_filter_table_id -efx_farch_filter_id_table_id(u32 id) -{ - unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; - - if (range < ARRAY_SIZE(efx_farch_filter_range_table)) - return efx_farch_filter_range_table[range]; - else - return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ -} - -static inline unsigned int efx_farch_filter_id_index(u32 id) -{ - return id & EFX_FARCH_FILTER_INDEX_MASK; -} - -u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; - enum efx_farch_filter_table_id table_id; - - do { - table_id = efx_farch_filter_range_table[range]; - if (state->table[table_id].size != 0) - return range << EFX_FARCH_FILTER_INDEX_WIDTH | - state->table[table_id].size; - } while (range--); - - return 0; -} - -s32 efx_farch_filter_insert(struct efx_nic *efx, - struct efx_filter_spec *gen_spec, - bool replace_equal) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table; - struct efx_farch_filter_spec spec; - efx_oword_t filter; - int rep_index, ins_index; - unsigned int depth = 0; - int rc; - - rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); - if (rc) - return rc; - - down_write(&state->lock); - - table = &state->table[efx_farch_filter_spec_table_id(&spec)]; - if (table->size == 0) { - rc = -EINVAL; - goto out_unlock; - } - - netif_vdbg(efx, hw, efx->net_dev, - "%s: type %d search_limit=%d", __func__, spec.type, - table->search_limit[spec.type]); - - if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { - /* One filter spec per type */ - BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); - BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != - EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); - rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; - ins_index = rep_index; - } else { - /* Search concurrently for - * (1) a filter to be replaced (rep_index): any filter - * with the same match values, up to the current - * search depth for this type, and - * (2) the insertion point (ins_index): (1) or any - * free slot before it or up to the maximum search - * depth for this priority - * We fail if we cannot find (2). - * - * We can stop once either - * (a) we find (1), in which case we have definitely - * found (2) as well; or - * (b) we have searched exhaustively for (1), and have - * either found (2) or searched exhaustively for it - */ - u32 key = efx_farch_filter_build(&filter, &spec); - unsigned int hash = efx_farch_filter_hash(key); - unsigned int incr = efx_farch_filter_increment(key); - unsigned int max_rep_depth = table->search_limit[spec.type]; - unsigned int max_ins_depth = - spec.priority <= EFX_FILTER_PRI_HINT ? - EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : - EFX_FARCH_FILTER_CTL_SRCH_MAX; - unsigned int i = hash & (table->size - 1); - - ins_index = -1; - depth = 1; - - for (;;) { - if (!test_bit(i, table->used_bitmap)) { - if (ins_index < 0) - ins_index = i; - } else if (efx_farch_filter_equal(&spec, - &table->spec[i])) { - /* Case (a) */ - if (ins_index < 0) - ins_index = i; - rep_index = i; - break; - } - - if (depth >= max_rep_depth && - (ins_index >= 0 || depth >= max_ins_depth)) { - /* Case (b) */ - if (ins_index < 0) { - rc = -EBUSY; - goto out_unlock; - } - rep_index = -1; - break; - } - - i = (i + incr) & (table->size - 1); - ++depth; - } - } - - /* If we found a filter to be replaced, check whether we - * should do so - */ - if (rep_index >= 0) { - struct efx_farch_filter_spec *saved_spec = - &table->spec[rep_index]; - - if (spec.priority == saved_spec->priority && !replace_equal) { - rc = -EEXIST; - goto out_unlock; - } - if (spec.priority < saved_spec->priority) { - rc = -EPERM; - goto out_unlock; - } - if (saved_spec->priority == EFX_FILTER_PRI_AUTO || - saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) - spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; - } - - /* Insert the filter */ - if (ins_index != rep_index) { - __set_bit(ins_index, table->used_bitmap); - ++table->used; - } - table->spec[ins_index] = spec; - - if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { - efx_farch_filter_push_rx_config(efx); - } else { - if (table->search_limit[spec.type] < depth) { - table->search_limit[spec.type] = depth; - if (spec.flags & EFX_FILTER_FLAG_TX) - efx_farch_filter_push_tx_limits(efx); - else - efx_farch_filter_push_rx_config(efx); - } - - efx_writeo(efx, &filter, - table->offset + table->step * ins_index); - - /* If we were able to replace a filter by inserting - * at a lower depth, clear the replaced filter - */ - if (ins_index != rep_index && rep_index >= 0) - efx_farch_filter_table_clear_entry(efx, table, - rep_index); - } - - netif_vdbg(efx, hw, efx->net_dev, - "%s: filter type %d index %d rxq %u set", - __func__, spec.type, ins_index, spec.dmaq_id); - rc = efx_farch_filter_make_id(&spec, ins_index); - -out_unlock: - up_write(&state->lock); - return rc; -} - -static void -efx_farch_filter_table_clear_entry(struct efx_nic *efx, - struct efx_farch_filter_table *table, - unsigned int filter_idx) -{ - static efx_oword_t filter; - - EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); - BUG_ON(table->offset == 0); /* can't clear MAC default filters */ - - __clear_bit(filter_idx, table->used_bitmap); - --table->used; - memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); - - efx_writeo(efx, &filter, table->offset + table->step * filter_idx); - - /* If this filter required a greater search depth than - * any other, the search limit for its type can now be - * decreased. However, it is hard to determine that - * unless the table has become completely empty - in - * which case, all its search limits can be set to 0. - */ - if (unlikely(table->used == 0)) { - memset(table->search_limit, 0, sizeof(table->search_limit)); - if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) - efx_farch_filter_push_tx_limits(efx); - else - efx_farch_filter_push_rx_config(efx); - } -} - -static int efx_farch_filter_remove(struct efx_nic *efx, - struct efx_farch_filter_table *table, - unsigned int filter_idx, - enum efx_filter_priority priority) -{ - struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; - - if (!test_bit(filter_idx, table->used_bitmap) || - spec->priority != priority) - return -ENOENT; - - if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { - efx_farch_filter_init_rx_auto(efx, spec); - efx_farch_filter_push_rx_config(efx); - } else { - efx_farch_filter_table_clear_entry(efx, table, filter_idx); - } - - return 0; -} - -int efx_farch_filter_remove_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - unsigned int filter_idx; - int rc; - - table_id = efx_farch_filter_id_table_id(filter_id); - if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) - return -ENOENT; - table = &state->table[table_id]; - - filter_idx = efx_farch_filter_id_index(filter_id); - if (filter_idx >= table->size) - return -ENOENT; - down_write(&state->lock); - - rc = efx_farch_filter_remove(efx, table, filter_idx, priority); - up_write(&state->lock); - - return rc; -} - -int efx_farch_filter_get_safe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id, struct efx_filter_spec *spec_buf) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - struct efx_farch_filter_spec *spec; - unsigned int filter_idx; - int rc = -ENOENT; - - down_read(&state->lock); - - table_id = efx_farch_filter_id_table_id(filter_id); - if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) - goto out_unlock; - table = &state->table[table_id]; - - filter_idx = efx_farch_filter_id_index(filter_id); - if (filter_idx >= table->size) - goto out_unlock; - spec = &table->spec[filter_idx]; - - if (test_bit(filter_idx, table->used_bitmap) && - spec->priority == priority) { - efx_farch_filter_to_gen_spec(spec_buf, spec); - rc = 0; - } - -out_unlock: - up_read(&state->lock); - return rc; -} - -static void -efx_farch_filter_table_clear(struct efx_nic *efx, - enum efx_farch_filter_table_id table_id, - enum efx_filter_priority priority) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table = &state->table[table_id]; - unsigned int filter_idx; - - down_write(&state->lock); - for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { - if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) - efx_farch_filter_remove(efx, table, - filter_idx, priority); - } - up_write(&state->lock); -} - -int efx_farch_filter_clear_rx(struct efx_nic *efx, - enum efx_filter_priority priority) -{ - efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, - priority); - efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, - priority); - efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, - priority); - return 0; -} - -u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, - enum efx_filter_priority priority) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - unsigned int filter_idx; - u32 count = 0; - - down_read(&state->lock); - - for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; - table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; - table_id++) { - table = &state->table[table_id]; - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (test_bit(filter_idx, table->used_bitmap) && - table->spec[filter_idx].priority == priority) - ++count; - } - } - - up_read(&state->lock); - - return count; -} - -s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 *buf, u32 size) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - unsigned int filter_idx; - s32 count = 0; - - down_read(&state->lock); - - for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; - table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; - table_id++) { - table = &state->table[table_id]; - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (test_bit(filter_idx, table->used_bitmap) && - table->spec[filter_idx].priority == priority) { - if (count == size) { - count = -EMSGSIZE; - goto out; - } - buf[count++] = efx_farch_filter_make_id( - &table->spec[filter_idx], filter_idx); - } - } - } -out: - up_read(&state->lock); - - return count; -} - -/* Restore filter stater after reset */ -void efx_farch_filter_table_restore(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - efx_oword_t filter; - unsigned int filter_idx; - - down_write(&state->lock); - - for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { - table = &state->table[table_id]; - - /* Check whether this is a regular register table */ - if (table->step == 0) - continue; - - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (!test_bit(filter_idx, table->used_bitmap)) - continue; - efx_farch_filter_build(&filter, &table->spec[filter_idx]); - efx_writeo(efx, &filter, - table->offset + table->step * filter_idx); - } - } - - efx_farch_filter_push_rx_config(efx); - efx_farch_filter_push_tx_limits(efx); - - up_write(&state->lock); -} - -void efx_farch_filter_table_remove(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - - for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { - kfree(state->table[table_id].used_bitmap); - vfree(state->table[table_id].spec); - } - kfree(state); -} - -int efx_farch_filter_table_probe(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state; - struct efx_farch_filter_table *table; - unsigned table_id; - - state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); - if (!state) - return -ENOMEM; - efx->filter_state = state; - init_rwsem(&state->lock); - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; - table->id = EFX_FARCH_FILTER_TABLE_RX_IP; - table->offset = FR_BZ_RX_FILTER_TBL0; - table->size = FR_BZ_RX_FILTER_TBL0_ROWS; - table->step = FR_BZ_RX_FILTER_TBL0_STEP; - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; - table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; - table->offset = FR_CZ_RX_MAC_FILTER_TBL0; - table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; - table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; - table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; - table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; - - table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; - table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; - table->offset = FR_CZ_TX_MAC_FILTER_TBL0; - table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; - table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; - - for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { - table = &state->table[table_id]; - if (table->size == 0) - continue; - table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), - sizeof(unsigned long), - GFP_KERNEL); - if (!table->used_bitmap) - goto fail; - table->spec = vzalloc(array_size(sizeof(*table->spec), - table->size)); - if (!table->spec) - goto fail; - } - - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; - if (table->size) { - /* RX default filters must always exist */ - struct efx_farch_filter_spec *spec; - unsigned i; - - for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { - spec = &table->spec[i]; - spec->type = EFX_FARCH_FILTER_UC_DEF + i; - efx_farch_filter_init_rx_auto(efx, spec); - __set_bit(i, table->used_bitmap); - } - } - - efx_farch_filter_push_rx_config(efx); - - return 0; - -fail: - efx_farch_filter_table_remove(efx); - return -ENOMEM; -} - -/* Update scatter enable flags for filters pointing to our own RX queues */ -void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) -{ - struct efx_farch_filter_state *state = efx->filter_state; - enum efx_farch_filter_table_id table_id; - struct efx_farch_filter_table *table; - efx_oword_t filter; - unsigned int filter_idx; - - down_write(&state->lock); - - for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; - table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; - table_id++) { - table = &state->table[table_id]; - - for (filter_idx = 0; filter_idx < table->size; filter_idx++) { - if (!test_bit(filter_idx, table->used_bitmap) || - table->spec[filter_idx].dmaq_id >= - efx->n_rx_channels) - continue; - - if (efx->rx_scatter) - table->spec[filter_idx].flags |= - EFX_FILTER_FLAG_RX_SCATTER; - else - table->spec[filter_idx].flags &= - ~EFX_FILTER_FLAG_RX_SCATTER; - - if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) - /* Pushed by efx_farch_filter_push_rx_config() */ - continue; - - efx_farch_filter_build(&filter, &table->spec[filter_idx]); - efx_writeo(efx, &filter, - table->offset + table->step * filter_idx); - } - } - - efx_farch_filter_push_rx_config(efx); - - up_write(&state->lock); -} - -#ifdef CONFIG_RFS_ACCEL - -bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, - unsigned int index) -{ - struct efx_farch_filter_state *state = efx->filter_state; - struct efx_farch_filter_table *table; - bool ret = false, force = false; - u16 arfs_id; - - down_write(&state->lock); - spin_lock_bh(&efx->rps_hash_lock); - table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; - if (test_bit(index, table->used_bitmap) && - table->spec[index].priority == EFX_FILTER_PRI_HINT) { - struct efx_arfs_rule *rule = NULL; - struct efx_filter_spec spec; - - efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); - if (!efx->rps_hash_table) { - /* In the absence of the table, we always returned 0 to - * ARFS, so use the same to query it. - */ - arfs_id = 0; - } else { - rule = efx_rps_hash_find(efx, &spec); - if (!rule) { - /* ARFS table doesn't know of this filter, remove it */ - force = true; - } else { - arfs_id = rule->arfs_id; - if (!efx_rps_check_rule(rule, index, &force)) - goto out_unlock; - } - } - if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, - flow_id, arfs_id)) { - if (rule) - rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; - efx_rps_hash_del(efx, &spec); - efx_farch_filter_table_clear_entry(efx, table, index); - ret = true; - } - } -out_unlock: - spin_unlock_bh(&efx->rps_hash_lock); - up_write(&state->lock); - return ret; -} - -#endif /* CONFIG_RFS_ACCEL */ - -void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) -{ - struct net_device *net_dev = efx->net_dev; - struct netdev_hw_addr *ha; - union efx_multicast_hash *mc_hash = &efx->multicast_hash; - u32 crc; - int bit; - - if (!efx_dev_registered(efx)) - return; - - netif_addr_lock_bh(net_dev); - - efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); - - /* Build multicast hash table */ - if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { - memset(mc_hash, 0xff, sizeof(*mc_hash)); - } else { - memset(mc_hash, 0x00, sizeof(*mc_hash)); - netdev_for_each_mc_addr(ha, net_dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); - __set_bit_le(bit, mc_hash); - } - - /* Broadcast packets go through the multicast hash filter. - * ether_crc_le() of the broadcast address is 0xbe2612ff - * so we always add bit 0xff to the mask. - */ - __set_bit_le(0xff, mc_hash); - } - - netif_addr_unlock_bh(net_dev); -} diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c deleted file mode 100644 index ce3060e15b54..000000000000 --- a/drivers/net/ethernet/sfc/siena.c +++ /dev/null @@ -1,1109 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/**************************************************************************** - * Driver for Solarflare network controllers and boards - * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2013 Solarflare Communications Inc. - */ - -#include -#include -#include -#include -#include -#include -#include "net_driver.h" -#include "bitfield.h" -#include "efx.h" -#include "efx_common.h" -#include "nic.h" -#include "farch_regs.h" -#include "io.h" -#include "workarounds.h" -#include "mcdi.h" -#include "mcdi_pcol.h" -#include "mcdi_port.h" -#include "mcdi_port_common.h" -#include "selftest.h" -#include "siena_sriov.h" -#include "rx_common.h" - -/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ - -static void siena_init_wol(struct efx_nic *efx); - - -static void siena_push_irq_moderation(struct efx_channel *channel) -{ - struct efx_nic *efx = channel->efx; - efx_dword_t timer_cmd; - - if (channel->irq_moderation_us) { - unsigned int ticks; - - ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us); - EFX_POPULATE_DWORD_2(timer_cmd, - FRF_CZ_TC_TIMER_MODE, - FFE_CZ_TIMER_MODE_INT_HLDOFF, - FRF_CZ_TC_TIMER_VAL, - ticks - 1); - } else { - EFX_POPULATE_DWORD_2(timer_cmd, - FRF_CZ_TC_TIMER_MODE, - FFE_CZ_TIMER_MODE_DIS, - FRF_CZ_TC_TIMER_VAL, 0); - } - efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, - channel->channel); -} - -void siena_prepare_flush(struct efx_nic *efx) -{ - if (efx->fc_disable++ == 0) - efx_mcdi_set_mac(efx); -} - -void siena_finish_flush(struct efx_nic *efx) -{ - if (--efx->fc_disable == 0) - efx_mcdi_set_mac(efx); -} - -static const struct efx_farch_register_test siena_register_tests[] = { - { FR_AZ_ADR_REGION, - EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, - { FR_CZ_USR_EV_CFG, - EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, - { FR_AZ_RX_CFG, - EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, - { FR_AZ_TX_CFG, - EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, - { FR_AZ_TX_RESERVED, - EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, - { FR_AZ_SRM_TX_DC_CFG, - EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, - { FR_AZ_RX_DC_CFG, - EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, - { FR_AZ_RX_DC_PF_WM, - EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, - { FR_BZ_DP_CTRL, - EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, - { FR_BZ_RX_RSS_TKEY, - EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, - { FR_CZ_RX_RSS_IPV6_REG1, - EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, - { FR_CZ_RX_RSS_IPV6_REG2, - EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, - { FR_CZ_RX_RSS_IPV6_REG3, - EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, -}; - -static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) -{ - enum reset_type reset_method = RESET_TYPE_ALL; - int rc, rc2; - - efx_reset_down(efx, reset_method); - - /* Reset the chip immediately so that it is completely - * quiescent regardless of what any VF driver does. - */ - rc = efx_mcdi_reset(efx, reset_method); - if (rc) - goto out; - - tests->registers = - efx_farch_test_registers(efx, siena_register_tests, - ARRAY_SIZE(siena_register_tests)) - ? -1 : 1; - - rc = efx_mcdi_reset(efx, reset_method); -out: - rc2 = efx_reset_up(efx, reset_method, rc == 0); - return rc ? rc : rc2; -} - -/************************************************************************** - * - * PTP - * - ************************************************************************** - */ - -static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time) -{ - _efx_writed(efx, cpu_to_le32(host_time), - FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); -} - -static int siena_ptp_set_ts_config(struct efx_nic *efx, - struct hwtstamp_config *init) -{ - int rc; - - switch (init->rx_filter) { - case HWTSTAMP_FILTER_NONE: - /* if TX timestamping is still requested then leave PTP on */ - return efx_ptp_change_mode(efx, - init->tx_type != HWTSTAMP_TX_OFF, - efx_ptp_get_mode(efx)); - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1); - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; - rc = efx_ptp_change_mode(efx, true, - MC_CMD_PTP_MODE_V2_ENHANCED); - /* bug 33070 - old versions of the firmware do not support the - * improved UUID filtering option. Similarly old versions of the - * application do not expect it to be enabled. If the firmware - * does not accept the enhanced mode, fall back to the standard - * PTP v2 UUID filtering. */ - if (rc != 0) - rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2); - return rc; - default: - return -ERANGE; - } -} - -/************************************************************************** - * - * Device reset - * - ************************************************************************** - */ - -static int siena_map_reset_flags(u32 *flags) -{ - enum { - SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER | - ETH_RESET_OFFLOAD | ETH_RESET_MAC | - ETH_RESET_PHY), - SIENA_RESET_MC = (SIENA_RESET_PORT | - ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT), - }; - - if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) { - *flags &= ~SIENA_RESET_MC; - return RESET_TYPE_WORLD; - } - - if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) { - *flags &= ~SIENA_RESET_PORT; - return RESET_TYPE_ALL; - } - - /* no invisible reset implemented */ - - return -EINVAL; -} - -#ifdef CONFIG_EEH -/* When a PCI device is isolated from the bus, a subsequent MMIO read is - * required for the kernel EEH mechanisms to notice. As the Solarflare driver - * was written to minimise MMIO read (for latency) then a periodic call to check - * the EEH status of the device is required so that device recovery can happen - * in a timely fashion. - */ -static void siena_monitor(struct efx_nic *efx) -{ - struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); - - eeh_dev_check_failure(eehdev); -} -#endif - -static int siena_probe_nvconfig(struct efx_nic *efx) -{ - u32 caps = 0; - int rc; - - rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps); - - efx->timer_quantum_ns = - (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ? - 3072 : 6144; /* 768 cycles */ - efx->timer_max_ns = efx->type->timer_period_max * - efx->timer_quantum_ns; - - return rc; -} - -static int siena_dimension_resources(struct efx_nic *efx) -{ - /* Each port has a small block of internal SRAM dedicated to - * the buffer table and descriptor caches. In theory we can - * map both blocks to one port, but we don't. - */ - efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); - return 0; -} - -/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3) - * for memory. - */ -static unsigned int siena_mem_bar(struct efx_nic *efx) -{ - return 2; -} - -static unsigned int siena_mem_map_size(struct efx_nic *efx) -{ - return FR_CZ_MC_TREG_SMEM + - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS; -} - -static int siena_probe_nic(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data; - efx_oword_t reg; - int rc; - - /* Allocate storage for hardware specific data */ - nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); - if (!nic_data) - return -ENOMEM; - nic_data->efx = efx; - efx->nic_data = nic_data; - - if (efx_farch_fpga_ver(efx) != 0) { - netif_err(efx, probe, efx->net_dev, - "Siena FPGA not supported\n"); - rc = -ENODEV; - goto fail1; - } - - efx->max_channels = EFX_MAX_CHANNELS; - efx->max_vis = EFX_MAX_CHANNELS; - efx->max_tx_channels = EFX_MAX_CHANNELS; - efx->tx_queues_per_channel = 4; - - efx_reado(efx, ®, FR_AZ_CS_DEBUG); - efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; - - rc = efx_mcdi_init(efx); - if (rc) - goto fail1; - - /* Now we can reset the NIC */ - rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); - if (rc) { - netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); - goto fail3; - } - - siena_init_wol(efx); - - /* Allocate memory for INT_KER */ - rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), - GFP_KERNEL); - if (rc) - goto fail4; - BUG_ON(efx->irq_status.dma_addr & 0x0f); - - netif_dbg(efx, probe, efx->net_dev, - "INT_KER at %llx (virt %p phys %llx)\n", - (unsigned long long)efx->irq_status.dma_addr, - efx->irq_status.addr, - (unsigned long long)virt_to_phys(efx->irq_status.addr)); - - /* Read in the non-volatile configuration */ - rc = siena_probe_nvconfig(efx); - if (rc == -EINVAL) { - netif_err(efx, probe, efx->net_dev, - "NVRAM is invalid therefore using defaults\n"); - efx->phy_type = PHY_TYPE_NONE; - efx->mdio.prtad = MDIO_PRTAD_NONE; - } else if (rc) { - goto fail5; - } - - rc = efx_mcdi_mon_probe(efx); - if (rc) - goto fail5; - -#ifdef CONFIG_SFC_SRIOV - efx_siena_sriov_probe(efx); -#endif - efx_ptp_defer_probe_with_channel(efx); - - return 0; - -fail5: - efx_nic_free_buffer(efx, &efx->irq_status); -fail4: -fail3: - efx_mcdi_detach(efx); - efx_mcdi_fini(efx); -fail1: - kfree(efx->nic_data); - return rc; -} - -static int siena_rx_pull_rss_config(struct efx_nic *efx) -{ - efx_oword_t temp; - - /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the - * first 128 bits of the same key, assuming it's been set by - * siena_rx_push_rss_config, below) - */ - efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); - memcpy(efx->rss_context.rx_hash_key, &temp, sizeof(temp)); - efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); - memcpy(efx->rss_context.rx_hash_key + sizeof(temp), &temp, sizeof(temp)); - efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); - memcpy(efx->rss_context.rx_hash_key + 2 * sizeof(temp), &temp, - FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); - efx_farch_rx_pull_indir_table(efx); - return 0; -} - -static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, - const u32 *rx_indir_table, const u8 *key) -{ - efx_oword_t temp; - - /* Set hash key for IPv4 */ - if (key) - memcpy(efx->rss_context.rx_hash_key, key, sizeof(temp)); - memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); - efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); - - /* Enable IPv6 RSS */ - BUILD_BUG_ON(sizeof(efx->rss_context.rx_hash_key) < - 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || - FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); - memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); - efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); - memcpy(&temp, efx->rss_context.rx_hash_key + sizeof(temp), sizeof(temp)); - efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); - EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, - FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); - memcpy(&temp, efx->rss_context.rx_hash_key + 2 * sizeof(temp), - FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); - efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); - - memcpy(efx->rss_context.rx_indir_table, rx_indir_table, - sizeof(efx->rss_context.rx_indir_table)); - efx_farch_rx_push_indir_table(efx); - - return 0; -} - -/* This call performs hardware-specific global initialisation, such as - * defining the descriptor cache sizes and number of RSS channels. - * It does not set up any buffers, descriptor rings or event queues. - */ -static int siena_init_nic(struct efx_nic *efx) -{ - efx_oword_t temp; - int rc; - - /* Recover from a failed assertion post-reset */ - rc = efx_mcdi_handle_assertion(efx); - if (rc) - return rc; - - /* Squash TX of packets of 16 bytes or less */ - efx_reado(efx, &temp, FR_AZ_TX_RESERVED); - EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); - efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); - - /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 - * descriptors (which is bad). - */ - efx_reado(efx, &temp, FR_AZ_TX_CFG); - EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); - EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); - efx_writeo(efx, &temp, FR_AZ_TX_CFG); - - efx_reado(efx, &temp, FR_AZ_RX_CFG); - EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); - EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); - /* Enable hash insertion. This is broken for the 'Falcon' hash - * if IPv6 hashing is also enabled, so also select Toeplitz - * TCP/IPv4 and IPv4 hashes. */ - EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); - EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); - EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); - EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE, - EFX_RX_USR_BUF_SIZE >> 5); - efx_writeo(efx, &temp, FR_AZ_RX_CFG); - - siena_rx_push_rss_config(efx, false, efx->rss_context.rx_indir_table, NULL); - efx->rss_context.context_id = 0; /* indicates RSS is active */ - - /* Enable event logging */ - rc = efx_mcdi_log_ctrl(efx, true, false, 0); - if (rc) - return rc; - - /* Set destination of both TX and RX Flush events */ - EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); - efx_writeo(efx, &temp, FR_BZ_DP_CTRL); - - EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); - efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); - - efx_farch_init_common(efx); - return 0; -} - -static void siena_remove_nic(struct efx_nic *efx) -{ - efx_mcdi_mon_remove(efx); - - efx_nic_free_buffer(efx, &efx->irq_status); - - efx_mcdi_reset(efx, RESET_TYPE_ALL); - - efx_mcdi_detach(efx); - efx_mcdi_fini(efx); - - /* Tear down the private nic state */ - kfree(efx->nic_data); - efx->nic_data = NULL; -} - -#define SIENA_DMA_STAT(ext_name, mcdi_name) \ - [SIENA_STAT_ ## ext_name] = \ - { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } -#define SIENA_OTHER_STAT(ext_name) \ - [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 } -#define GENERIC_SW_STAT(ext_name) \ - [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } - -static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = { - SIENA_DMA_STAT(tx_bytes, TX_BYTES), - SIENA_OTHER_STAT(tx_good_bytes), - SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES), - SIENA_DMA_STAT(tx_packets, TX_PKTS), - SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS), - SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS), - SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS), - SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), - SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), - SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), - SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS), - SIENA_DMA_STAT(tx_64, TX_64_PKTS), - SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), - SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), - SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), - SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), - SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), - SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), - SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS), - SIENA_OTHER_STAT(tx_collision), - SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS), - SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS), - SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS), - SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS), - SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS), - SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS), - SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS), - SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS), - SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS), - SIENA_DMA_STAT(rx_bytes, RX_BYTES), - SIENA_OTHER_STAT(rx_good_bytes), - SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES), - SIENA_DMA_STAT(rx_packets, RX_PKTS), - SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS), - SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), - SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS), - SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS), - SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), - SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), - SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), - SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), - SIENA_DMA_STAT(rx_64, RX_64_PKTS), - SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), - SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), - SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), - SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), - SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), - SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), - SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), - SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), - SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), - SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS), - SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS), - SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), - SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), - SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS), - SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS), - GENERIC_SW_STAT(rx_nodesc_trunc), - GENERIC_SW_STAT(rx_noskb_drops), -}; -static const unsigned long siena_stat_mask[] = { - [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL, -}; - -static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names) -{ - return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT, - siena_stat_mask, names); -} - -static int siena_try_update_nic_stats(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data = efx->nic_data; - u64 *stats = nic_data->stats; - __le64 *dma_stats; - __le64 generation_start, generation_end; - - dma_stats = efx->stats_buffer.addr; - - generation_end = dma_stats[efx->num_mac_stats - 1]; - if (generation_end == EFX_MC_STATS_GENERATION_INVALID) - return 0; - rmb(); - efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask, - stats, efx->stats_buffer.addr, false); - rmb(); - generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; - if (generation_end != generation_start) - return -EAGAIN; - - /* Update derived statistics */ - efx_nic_fix_nodesc_drop_stat(efx, - &stats[SIENA_STAT_rx_nodesc_drop_cnt]); - efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes], - stats[SIENA_STAT_tx_bytes] - - stats[SIENA_STAT_tx_bad_bytes]); - stats[SIENA_STAT_tx_collision] = - stats[SIENA_STAT_tx_single_collision] + - stats[SIENA_STAT_tx_multiple_collision] + - stats[SIENA_STAT_tx_excessive_collision] + - stats[SIENA_STAT_tx_late_collision]; - efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes], - stats[SIENA_STAT_rx_bytes] - - stats[SIENA_STAT_rx_bad_bytes]); - efx_update_sw_stats(efx, stats); - return 0; -} - -static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats, - struct rtnl_link_stats64 *core_stats) -{ - struct siena_nic_data *nic_data = efx->nic_data; - u64 *stats = nic_data->stats; - int retry; - - /* If we're unlucky enough to read statistics wduring the DMA, wait - * up to 10ms for it to finish (typically takes <500us) */ - for (retry = 0; retry < 100; ++retry) { - if (siena_try_update_nic_stats(efx) == 0) - break; - udelay(100); - } - - if (full_stats) - memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT); - - if (core_stats) { - core_stats->rx_packets = stats[SIENA_STAT_rx_packets]; - core_stats->tx_packets = stats[SIENA_STAT_tx_packets]; - core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes]; - core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes]; - core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt] + - stats[GENERIC_STAT_rx_nodesc_trunc] + - stats[GENERIC_STAT_rx_noskb_drops]; - core_stats->multicast = stats[SIENA_STAT_rx_multicast]; - core_stats->collisions = stats[SIENA_STAT_tx_collision]; - core_stats->rx_length_errors = - stats[SIENA_STAT_rx_gtjumbo] + - stats[SIENA_STAT_rx_length_error]; - core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad]; - core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error]; - core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow]; - core_stats->tx_window_errors = - stats[SIENA_STAT_tx_late_collision]; - - core_stats->rx_errors = (core_stats->rx_length_errors + - core_stats->rx_crc_errors + - core_stats->rx_frame_errors + - stats[SIENA_STAT_rx_symbol_error]); - core_stats->tx_errors = (core_stats->tx_window_errors + - stats[SIENA_STAT_tx_bad]); - } - - return SIENA_STAT_COUNT; -} - -static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unused) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); - int rc; - - BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN != - MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + - sizeof(efx->multicast_hash)); - - efx_farch_filter_sync_rx_mode(efx); - - WARN_ON(!mutex_is_locked(&efx->mac_lock)); - - rc = efx_mcdi_set_mac(efx); - if (rc != 0) - return rc; - - memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0), - efx->multicast_hash.byte, sizeof(efx->multicast_hash)); - return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, - inbuf, sizeof(inbuf), NULL, 0, NULL); -} - -/************************************************************************** - * - * Wake on LAN - * - ************************************************************************** - */ - -static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) -{ - struct siena_nic_data *nic_data = efx->nic_data; - - wol->supported = WAKE_MAGIC; - if (nic_data->wol_filter_id != -1) - wol->wolopts = WAKE_MAGIC; - else - wol->wolopts = 0; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - - -static int siena_set_wol(struct efx_nic *efx, u32 type) -{ - struct siena_nic_data *nic_data = efx->nic_data; - int rc; - - if (type & ~WAKE_MAGIC) - return -EINVAL; - - if (type & WAKE_MAGIC) { - if (nic_data->wol_filter_id != -1) - efx_mcdi_wol_filter_remove(efx, - nic_data->wol_filter_id); - rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, - &nic_data->wol_filter_id); - if (rc) - goto fail; - - pci_wake_from_d3(efx->pci_dev, true); - } else { - rc = efx_mcdi_wol_filter_reset(efx); - nic_data->wol_filter_id = -1; - pci_wake_from_d3(efx->pci_dev, false); - if (rc) - goto fail; - } - - return 0; - fail: - netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", - __func__, type, rc); - return rc; -} - - -static void siena_init_wol(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data = efx->nic_data; - int rc; - - rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); - - if (rc != 0) { - /* If it failed, attempt to get into a synchronised - * state with MC by resetting any set WoL filters */ - efx_mcdi_wol_filter_reset(efx); - nic_data->wol_filter_id = -1; - } else if (nic_data->wol_filter_id != -1) { - pci_wake_from_d3(efx->pci_dev, true); - } -} - -/************************************************************************** - * - * MCDI - * - ************************************************************************** - */ - -#define MCDI_PDU(efx) \ - (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) -#define MCDI_DOORBELL(efx) \ - (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) -#define MCDI_STATUS(efx) \ - (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) - -static void siena_mcdi_request(struct efx_nic *efx, - const efx_dword_t *hdr, size_t hdr_len, - const efx_dword_t *sdu, size_t sdu_len) -{ - unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); - unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); - unsigned int i; - unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4); - - EFX_WARN_ON_PARANOID(hdr_len != 4); - - efx_writed(efx, hdr, pdu); - - for (i = 0; i < inlen_dw; i++) - efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i); - - /* Ensure the request is written out before the doorbell */ - wmb(); - - /* ring the doorbell with a distinctive value */ - _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); -} - -static bool siena_mcdi_poll_response(struct efx_nic *efx) -{ - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); - efx_dword_t hdr; - - efx_readd(efx, &hdr, pdu); - - /* All 1's indicates that shared memory is in reset (and is - * not a valid hdr). Wait for it to come out reset before - * completing the command - */ - return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff && - EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); -} - -static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, - size_t offset, size_t outlen) -{ - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); - unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4); - int i; - - for (i = 0; i < outlen_dw; i++) - efx_readd(efx, &outbuf[i], pdu + offset + 4 * i); -} - -static int siena_mcdi_poll_reboot(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data = efx->nic_data; - unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); - efx_dword_t reg; - u32 value; - - efx_readd(efx, ®, addr); - value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); - - if (value == 0) - return 0; - - EFX_ZERO_DWORD(reg); - efx_writed(efx, ®, addr); - - /* MAC statistics have been cleared on the NIC; clear the local - * copies that we update with efx_update_diff_stat(). - */ - nic_data->stats[SIENA_STAT_tx_good_bytes] = 0; - nic_data->stats[SIENA_STAT_rx_good_bytes] = 0; - - if (value == MC_STATUS_DWORD_ASSERT) - return -EINTR; - else - return -EIO; -} - -/************************************************************************** - * - * MTD - * - ************************************************************************** - */ - -#ifdef CONFIG_SFC_MTD - -struct siena_nvram_type_info { - int port; - const char *name; -}; - -static const struct siena_nvram_type_info siena_nvram_types[] = { - [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, - [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, - [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, - [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" }, - [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" }, - [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" }, - [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" }, - [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" }, - [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" }, - [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, - [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, - [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, - [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" }, -}; - -static int siena_mtd_probe_partition(struct efx_nic *efx, - struct efx_mcdi_mtd_partition *part, - unsigned int type) -{ - const struct siena_nvram_type_info *info; - size_t size, erase_size; - bool protected; - int rc; - - if (type >= ARRAY_SIZE(siena_nvram_types) || - siena_nvram_types[type].name == NULL) - return -ENODEV; - - info = &siena_nvram_types[type]; - - if (info->port != efx_port_num(efx)) - return -ENODEV; - - rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); - if (rc) - return rc; - if (protected) - return -ENODEV; /* hide it */ - - part->nvram_type = type; - part->common.dev_type_name = "Siena NVRAM manager"; - part->common.type_name = info->name; - - part->common.mtd.type = MTD_NORFLASH; - part->common.mtd.flags = MTD_CAP_NORFLASH; - part->common.mtd.size = size; - part->common.mtd.erasesize = erase_size; - - return 0; -} - -static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, - struct efx_mcdi_mtd_partition *parts, - size_t n_parts) -{ - uint16_t fw_subtype_list[ - MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; - size_t i; - int rc; - - rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); - if (rc) - return rc; - - for (i = 0; i < n_parts; i++) - parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type]; - - return 0; -} - -static int siena_mtd_probe(struct efx_nic *efx) -{ - struct efx_mcdi_mtd_partition *parts; - u32 nvram_types; - unsigned int type; - size_t n_parts; - int rc; - - ASSERT_RTNL(); - - rc = efx_mcdi_nvram_types(efx, &nvram_types); - if (rc) - return rc; - - parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL); - if (!parts) - return -ENOMEM; - - type = 0; - n_parts = 0; - - while (nvram_types != 0) { - if (nvram_types & 1) { - rc = siena_mtd_probe_partition(efx, &parts[n_parts], - type); - if (rc == 0) - n_parts++; - else if (rc != -ENODEV) - goto fail; - } - type++; - nvram_types >>= 1; - } - - rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts); - if (rc) - goto fail; - - rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); -fail: - if (rc) - kfree(parts); - return rc; -} - -#endif /* CONFIG_SFC_MTD */ - -static unsigned int siena_check_caps(const struct efx_nic *efx, - u8 flag, u32 offset) -{ - /* Siena did not support MC_CMD_GET_CAPABILITIES */ - return 0; -} - -static unsigned int efx_siena_recycle_ring_size(const struct efx_nic *efx) -{ - /* Maximum link speed is 10G */ - return EFX_RECYCLE_RING_SIZE_10G; -} - -/************************************************************************** - * - * Revision-dependent attributes used by efx.c and nic.c - * - ************************************************************************** - */ - -const struct efx_nic_type siena_a0_nic_type = { - .is_vf = false, - .mem_bar = siena_mem_bar, - .mem_map_size = siena_mem_map_size, - .probe = siena_probe_nic, - .remove = siena_remove_nic, - .init = siena_init_nic, - .dimension_resources = siena_dimension_resources, - .fini = efx_port_dummy_op_void, -#ifdef CONFIG_EEH - .monitor = siena_monitor, -#else - .monitor = NULL, -#endif - .map_reset_reason = efx_mcdi_map_reset_reason, - .map_reset_flags = siena_map_reset_flags, - .reset = efx_mcdi_reset, - .probe_port = efx_mcdi_port_probe, - .remove_port = efx_mcdi_port_remove, - .fini_dmaq = efx_farch_fini_dmaq, - .prepare_flush = siena_prepare_flush, - .finish_flush = siena_finish_flush, - .prepare_flr = efx_port_dummy_op_void, - .finish_flr = efx_farch_finish_flr, - .describe_stats = siena_describe_nic_stats, - .update_stats = siena_update_nic_stats, - .start_stats = efx_mcdi_mac_start_stats, - .pull_stats = efx_mcdi_mac_pull_stats, - .stop_stats = efx_mcdi_mac_stop_stats, - .push_irq_moderation = siena_push_irq_moderation, - .reconfigure_mac = siena_mac_reconfigure, - .check_mac_fault = efx_mcdi_mac_check_fault, - .reconfigure_port = efx_mcdi_port_reconfigure, - .get_wol = siena_get_wol, - .set_wol = siena_set_wol, - .resume_wol = siena_init_wol, - .test_chip = siena_test_chip, - .test_nvram = efx_mcdi_nvram_test_all, - .mcdi_request = siena_mcdi_request, - .mcdi_poll_response = siena_mcdi_poll_response, - .mcdi_read_response = siena_mcdi_read_response, - .mcdi_poll_reboot = siena_mcdi_poll_reboot, - .irq_enable_master = efx_farch_irq_enable_master, - .irq_test_generate = efx_farch_irq_test_generate, - .irq_disable_non_ev = efx_farch_irq_disable_master, - .irq_handle_msi = efx_farch_msi_interrupt, - .irq_handle_legacy = efx_farch_legacy_interrupt, - .tx_probe = efx_farch_tx_probe, - .tx_init = efx_farch_tx_init, - .tx_remove = efx_farch_tx_remove, - .tx_write = efx_farch_tx_write, - .tx_limit_len = efx_farch_tx_limit_len, - .tx_enqueue = __efx_enqueue_skb, - .rx_push_rss_config = siena_rx_push_rss_config, - .rx_pull_rss_config = siena_rx_pull_rss_config, - .rx_probe = efx_farch_rx_probe, - .rx_init = efx_farch_rx_init, - .rx_remove = efx_farch_rx_remove, - .rx_write = efx_farch_rx_write, - .rx_defer_refill = efx_farch_rx_defer_refill, - .rx_packet = __efx_rx_packet, - .ev_probe = efx_farch_ev_probe, - .ev_init = efx_farch_ev_init, - .ev_fini = efx_farch_ev_fini, - .ev_remove = efx_farch_ev_remove, - .ev_process = efx_farch_ev_process, - .ev_read_ack = efx_farch_ev_read_ack, - .ev_test_generate = efx_farch_ev_test_generate, - .filter_table_probe = efx_farch_filter_table_probe, - .filter_table_restore = efx_farch_filter_table_restore, - .filter_table_remove = efx_farch_filter_table_remove, - .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter, - .filter_insert = efx_farch_filter_insert, - .filter_remove_safe = efx_farch_filter_remove_safe, - .filter_get_safe = efx_farch_filter_get_safe, - .filter_clear_rx = efx_farch_filter_clear_rx, - .filter_count_rx_used = efx_farch_filter_count_rx_used, - .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, - .filter_get_rx_ids = efx_farch_filter_get_rx_ids, -#ifdef CONFIG_RFS_ACCEL - .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, -#endif -#ifdef CONFIG_SFC_MTD - .mtd_probe = siena_mtd_probe, - .mtd_rename = efx_mcdi_mtd_rename, - .mtd_read = efx_mcdi_mtd_read, - .mtd_erase = efx_mcdi_mtd_erase, - .mtd_write = efx_mcdi_mtd_write, - .mtd_sync = efx_mcdi_mtd_sync, -#endif - .ptp_write_host_time = siena_ptp_write_host_time, - .ptp_set_ts_config = siena_ptp_set_ts_config, -#ifdef CONFIG_SFC_SRIOV - .sriov_configure = efx_siena_sriov_configure, - .sriov_init = efx_siena_sriov_init, - .sriov_fini = efx_siena_sriov_fini, - .sriov_wanted = efx_siena_sriov_wanted, - .sriov_reset = efx_siena_sriov_reset, - .sriov_flr = efx_siena_sriov_flr, - .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac, - .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan, - .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, - .sriov_get_vf_config = efx_siena_sriov_get_vf_config, - .vswitching_probe = efx_port_dummy_op_int, - .vswitching_restore = efx_port_dummy_op_int, - .vswitching_remove = efx_port_dummy_op_void, - .set_mac_address = efx_siena_sriov_mac_address_changed, -#endif - - .revision = EFX_REV_SIENA_A0, - .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, - .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, - .buf_tbl_base = FR_BZ_BUF_FULL_TBL, - .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, - .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, - .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), - .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE, - .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, - .rx_buffer_padding = 0, - .can_rx_scatter = true, - .option_descriptors = false, - .min_interrupt_mode = EFX_INT_MODE_LEGACY, - .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, - .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXHASH | NETIF_F_NTUPLE), - .mcdi_max_ver = 1, - .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, - .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), - .rx_hash_key_size = 16, - .check_caps = siena_check_caps, - .sensor_event = efx_mcdi_sensor_event, - .rx_recycle_ring_size = efx_siena_recycle_ring_size, -}; diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c new file mode 100644 index 000000000000..9599123bc28d --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/farch.c @@ -0,0 +1,2988 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "rx_common.h" +#include "tx_common.h" +#include "nic.h" +#include "farch_regs.h" +#include "sriov.h" +#include "siena_sriov.h" +#include "io.h" +#include "workarounds.h" + +/* Falcon-architecture (SFC9000-family) support */ + +/************************************************************************** + * + * Configurable values + * + ************************************************************************** + */ + +/* This is set to 16 for a good reason. In summary, if larger than + * 16, the descriptor cache holds more than a default socket + * buffer's worth of packets (for UDP we can only have at most one + * socket buffer's worth outstanding). This combined with the fact + * that we only get 1 TX event per descriptor cache means the NIC + * goes idle. + */ +#define TX_DC_ENTRIES 16 +#define TX_DC_ENTRIES_ORDER 1 + +#define RX_DC_ENTRIES 64 +#define RX_DC_ENTRIES_ORDER 3 + +/* If EFX_MAX_INT_ERRORS internal errors occur within + * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and + * disable it. + */ +#define EFX_INT_ERROR_EXPIRE 3600 +#define EFX_MAX_INT_ERRORS 5 + +/* Depth of RX flush request fifo */ +#define EFX_RX_FLUSH_COUNT 4 + +/* Driver generated events */ +#define _EFX_CHANNEL_MAGIC_TEST 0x000101 +#define _EFX_CHANNEL_MAGIC_FILL 0x000102 +#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 +#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 + +#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) +#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) + +#define EFX_CHANNEL_MAGIC_TEST(_channel) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) +#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ + efx_rx_queue_index(_rx_queue)) +#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ + efx_rx_queue_index(_rx_queue)) +#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ + (_tx_queue)->queue) + +static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); + +/************************************************************************** + * + * Hardware access + * + **************************************************************************/ + +static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, + unsigned int index) +{ + efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, + value, index); +} + +static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, + const efx_oword_t *mask) +{ + return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || + ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); +} + +int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs) +{ + unsigned address = 0; + int i, j; + efx_oword_t mask, imask, original, reg, buf; + + for (i = 0; i < n_regs; ++i) { + address = regs[i].address; + mask = imask = regs[i].mask; + EFX_INVERT_OWORD(imask); + + efx_reado(efx, &original, address); + + /* bit sweep on and off */ + for (j = 0; j < 128; j++) { + if (!EFX_EXTRACT_OWORD32(mask, j, j)) + continue; + + /* Test this testable bit can be set in isolation */ + EFX_AND_OWORD(reg, original, mask); + EFX_SET_OWORD32(reg, j, j, 1); + + efx_writeo(efx, ®, address); + efx_reado(efx, &buf, address); + + if (efx_masked_compare_oword(®, &buf, &mask)) + goto fail; + + /* Test this testable bit can be cleared in isolation */ + EFX_OR_OWORD(reg, original, mask); + EFX_SET_OWORD32(reg, j, j, 0); + + efx_writeo(efx, ®, address); + efx_reado(efx, &buf, address); + + if (efx_masked_compare_oword(®, &buf, &mask)) + goto fail; + } + + efx_writeo(efx, &original, address); + } + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, + "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT + " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), + EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); + return -EIO; +} + +/************************************************************************** + * + * Special buffer handling + * Special buffers are used for event queues and the TX and RX + * descriptor rings. + * + *************************************************************************/ + +/* + * Initialise a special buffer + * + * This will define a buffer (previously allocated via + * efx_alloc_special_buffer()) in the buffer table, allowing + * it to be used for event queues, descriptor rings etc. + */ +static void +efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + efx_qword_t buf_desc; + unsigned int index; + dma_addr_t dma_addr; + int i; + + EFX_WARN_ON_PARANOID(!buffer->buf.addr); + + /* Write buffer descriptors to NIC */ + for (i = 0; i < buffer->entries; i++) { + index = buffer->index + i; + dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); + netif_dbg(efx, probe, efx->net_dev, + "mapping special buffer %d at %llx\n", + index, (unsigned long long)dma_addr); + EFX_POPULATE_QWORD_3(buf_desc, + FRF_AZ_BUF_ADR_REGION, 0, + FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, + FRF_AZ_BUF_OWNER_ID_FBUF, 0); + efx_write_buf_tbl(efx, &buf_desc, index); + } +} + +/* Unmaps a buffer and clears the buffer table entries */ +static void +efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + efx_oword_t buf_tbl_upd; + unsigned int start = buffer->index; + unsigned int end = (buffer->index + buffer->entries - 1); + + if (!buffer->entries) + return; + + netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", + buffer->index, buffer->index + buffer->entries - 1); + + EFX_POPULATE_OWORD_4(buf_tbl_upd, + FRF_AZ_BUF_UPD_CMD, 0, + FRF_AZ_BUF_CLR_CMD, 1, + FRF_AZ_BUF_CLR_END_ID, end, + FRF_AZ_BUF_CLR_START_ID, start); + efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); +} + +/* + * Allocate a new special buffer + * + * This allocates memory for a new buffer, clears it and allocates a + * new buffer ID range. It does not write into the buffer table. + * + * This call will allocate 4KB buffers, since 8KB buffers can't be + * used for event queues and descriptor rings. + */ +static int efx_alloc_special_buffer(struct efx_nic *efx, + struct efx_special_buffer *buffer, + unsigned int len) +{ +#ifdef CONFIG_SFC_SRIOV + struct siena_nic_data *nic_data = efx->nic_data; +#endif + len = ALIGN(len, EFX_BUF_SIZE); + + if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) + return -ENOMEM; + buffer->entries = len / EFX_BUF_SIZE; + BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); + + /* Select new buffer ID */ + buffer->index = efx->next_buffer_table; + efx->next_buffer_table += buffer->entries; +#ifdef CONFIG_SFC_SRIOV + BUG_ON(efx_siena_sriov_enabled(efx) && + nic_data->vf_buftbl_base < efx->next_buffer_table); +#endif + + netif_dbg(efx, probe, efx->net_dev, + "allocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->buf.dma_addr, len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); + + return 0; +} + +static void +efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + if (!buffer->buf.addr) + return; + + netif_dbg(efx, hw, efx->net_dev, + "deallocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->buf.dma_addr, buffer->buf.len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); + + efx_nic_free_buffer(efx, &buffer->buf); + buffer->entries = 0; +} + +/************************************************************************** + * + * TX path + * + **************************************************************************/ + +/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ +static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned write_ptr; + efx_dword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(tx_queue->efx, ®, + FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); +} + +/* Write pointer and first descriptor for TX descriptor ring */ +static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, + const efx_qword_t *txd) +{ + unsigned write_ptr; + efx_oword_t reg; + + BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); + BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, + FRF_AZ_TX_DESC_WPTR, write_ptr); + reg.qword[0] = *txd; + efx_writeo_page(tx_queue->efx, ®, + FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); +} + + +/* For each entry inserted into the software descriptor ring, create a + * descriptor in the hardware TX descriptor ring (in host memory), and + * write a doorbell. + */ +void efx_farch_tx_write(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + efx_qword_t *txd; + unsigned write_ptr; + unsigned old_write_count = tx_queue->write_count; + + tx_queue->xmit_pending = false; + if (unlikely(tx_queue->write_count == tx_queue->insert_count)) + return; + + do { + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + txd = efx_tx_desc(tx_queue, write_ptr); + ++tx_queue->write_count; + + EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); + + /* Create TX descriptor ring entry */ + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); + EFX_POPULATE_QWORD_4(*txd, + FSF_AZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, + FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, + FSF_AZ_TX_KER_BUF_REGION, 0, + FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); + } while (tx_queue->write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { + txd = efx_tx_desc(tx_queue, + old_write_count & tx_queue->ptr_mask); + efx_farch_push_tx_desc(tx_queue, txd); + ++tx_queue->pushes; + } else { + efx_farch_notify_tx_desc(tx_queue); + } +} + +unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len) +{ + /* Don't cross 4K boundaries with descriptors. */ + unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; + + len = min(limit, len); + + return len; +} + + +/* Allocate hardware resources for a TX queue */ +int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned entries; + + tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) | + ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0); + entries = tx_queue->ptr_mask + 1; + return efx_alloc_special_buffer(efx, &tx_queue->txd, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_tx_init(struct efx_tx_queue *tx_queue) +{ + int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM; + struct efx_nic *efx = tx_queue->efx; + efx_oword_t reg; + + /* Pin TX descriptor ring */ + efx_init_special_buffer(efx, &tx_queue->txd); + + /* Push TX descriptor ring to card */ + EFX_POPULATE_OWORD_10(reg, + FRF_AZ_TX_DESCQ_EN, 1, + FRF_AZ_TX_ISCSI_DDIG_EN, 0, + FRF_AZ_TX_ISCSI_HDIG_EN, 0, + FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, + FRF_AZ_TX_DESCQ_EVQ_ID, + tx_queue->channel->channel, + FRF_AZ_TX_DESCQ_OWNER_ID, 0, + FRF_AZ_TX_DESCQ_LABEL, tx_queue->label, + FRF_AZ_TX_DESCQ_SIZE, + __ffs(tx_queue->txd.entries), + FRF_AZ_TX_DESCQ_TYPE, 0, + FRF_BZ_TX_NON_IP_DROP_DIS, 1); + + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); + + efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, + tx_queue->queue); + + EFX_POPULATE_OWORD_1(reg, + FRF_BZ_TX_PACE, + (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? + FFE_BZ_TX_PACE_OFF : + FFE_BZ_TX_PACE_RESERVED); + efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue); + + tx_queue->tso_version = 1; +} + +static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t tx_flush_descq; + + WARN_ON(atomic_read(&tx_queue->flush_outstanding)); + atomic_set(&tx_queue->flush_outstanding, 1); + + EFX_POPULATE_OWORD_2(tx_flush_descq, + FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); + efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); +} + +void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t tx_desc_ptr; + + /* Remove TX descriptor ring from card */ + EFX_ZERO_OWORD(tx_desc_ptr); + efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, + tx_queue->queue); + + /* Unpin TX descriptor ring */ + efx_fini_special_buffer(efx, &tx_queue->txd); +} + +/* Free buffers backing TX queue */ +void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) +{ + efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); +} + +/************************************************************************** + * + * RX path + * + **************************************************************************/ + +/* This creates an entry in the RX descriptor queue */ +static inline void +efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) +{ + struct efx_rx_buffer *rx_buf; + efx_qword_t *rxd; + + rxd = efx_rx_desc(rx_queue, index); + rx_buf = efx_rx_buffer(rx_queue, index); + EFX_POPULATE_QWORD_3(*rxd, + FSF_AZ_RX_KER_BUF_SIZE, + rx_buf->len - + rx_queue->efx->type->rx_buffer_padding, + FSF_AZ_RX_KER_BUF_REGION, 0, + FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); +} + +/* This writes to the RX_DESC_WPTR register for the specified receive + * descriptor ring. + */ +void efx_farch_rx_write(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + efx_dword_t reg; + unsigned write_ptr; + + while (rx_queue->notified_count != rx_queue->added_count) { + efx_farch_build_rx_desc( + rx_queue, + rx_queue->notified_count & rx_queue->ptr_mask); + ++rx_queue->notified_count; + } + + wmb(); + write_ptr = rx_queue->added_count & rx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, + efx_rx_queue_index(rx_queue)); +} + +int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned entries; + + entries = rx_queue->ptr_mask + 1; + return efx_alloc_special_buffer(efx, &rx_queue->rxd, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_rx_init(struct efx_rx_queue *rx_queue) +{ + efx_oword_t rx_desc_ptr; + struct efx_nic *efx = rx_queue->efx; + bool jumbo_en; + + /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */ + jumbo_en = efx->rx_scatter; + + netif_dbg(efx, hw, efx->net_dev, + "RX queue %d ring in special buffers %d-%d\n", + efx_rx_queue_index(rx_queue), rx_queue->rxd.index, + rx_queue->rxd.index + rx_queue->rxd.entries - 1); + + rx_queue->scatter_n = 0; + + /* Pin RX descriptor ring */ + efx_init_special_buffer(efx, &rx_queue->rxd); + + /* Push RX descriptor ring to card */ + EFX_POPULATE_OWORD_10(rx_desc_ptr, + FRF_AZ_RX_ISCSI_DDIG_EN, true, + FRF_AZ_RX_ISCSI_HDIG_EN, true, + FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, + FRF_AZ_RX_DESCQ_EVQ_ID, + efx_rx_queue_channel(rx_queue)->channel, + FRF_AZ_RX_DESCQ_OWNER_ID, 0, + FRF_AZ_RX_DESCQ_LABEL, + efx_rx_queue_index(rx_queue), + FRF_AZ_RX_DESCQ_SIZE, + __ffs(rx_queue->rxd.entries), + FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , + FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, + FRF_AZ_RX_DESCQ_EN, 1); + efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, + efx_rx_queue_index(rx_queue)); +} + +static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + efx_oword_t rx_flush_descq; + + EFX_POPULATE_OWORD_2(rx_flush_descq, + FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_RX_FLUSH_DESCQ, + efx_rx_queue_index(rx_queue)); + efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); +} + +void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) +{ + efx_oword_t rx_desc_ptr; + struct efx_nic *efx = rx_queue->efx; + + /* Remove RX descriptor ring from card */ + EFX_ZERO_OWORD(rx_desc_ptr); + efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, + efx_rx_queue_index(rx_queue)); + + /* Unpin RX descriptor ring */ + efx_fini_special_buffer(efx, &rx_queue->rxd); +} + +/* Free buffers backing RX queue */ +void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) +{ + efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); +} + +/************************************************************************** + * + * Flush handling + * + **************************************************************************/ + +/* efx_farch_flush_queues() must be woken up when all flushes are completed, + * or more RX flushes can be kicked off. + */ +static bool efx_farch_flush_wake(struct efx_nic *efx) +{ + /* Ensure that all updates are visible to efx_farch_flush_queues() */ + smp_mb(); + + return (atomic_read(&efx->active_queues) == 0 || + (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT + && atomic_read(&efx->rxq_flush_pending) > 0)); +} + +static bool efx_check_tx_flush_complete(struct efx_nic *efx) +{ + bool i = true; + efx_oword_t txd_ptr_tbl; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_reado_table(efx, &txd_ptr_tbl, + FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); + if (EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_FLUSH) || + EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_EN)) { + netif_dbg(efx, hw, efx->net_dev, + "flush did not complete on TXQ %d\n", + tx_queue->queue); + i = false; + } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, + 1, 0)) { + /* The flush is complete, but we didn't + * receive a flush completion event + */ + netif_dbg(efx, hw, efx->net_dev, + "flush complete on TXQ %d, so drain " + "the queue\n", tx_queue->queue); + /* Don't need to increment active_queues as it + * has already been incremented for the queues + * which did not drain + */ + efx_farch_magic_event(channel, + EFX_CHANNEL_MAGIC_TX_DRAIN( + tx_queue)); + } + } + } + + return i; +} + +/* Flush all the transmit queues, and continue flushing receive queues until + * they're all flushed. Wait for the DRAIN events to be received so that there + * are no more RX and TX events left on any channel. */ +static int efx_farch_do_flush(struct efx_nic *efx) +{ + unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int rc = 0; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_farch_flush_tx_queue(tx_queue); + } + efx_for_each_channel_rx_queue(rx_queue, channel) { + rx_queue->flush_pending = true; + atomic_inc(&efx->rxq_flush_pending); + } + } + + while (timeout && atomic_read(&efx->active_queues) > 0) { + /* If SRIOV is enabled, then offload receive queue flushing to + * the firmware (though we will still have to poll for + * completion). If that fails, fall back to the old scheme. + */ + if (efx_siena_sriov_enabled(efx)) { + rc = efx_mcdi_flush_rxqs(efx); + if (!rc) + goto wait; + } + + /* The hardware supports four concurrent rx flushes, each of + * which may need to be retried if there is an outstanding + * descriptor fetch + */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (atomic_read(&efx->rxq_flush_outstanding) >= + EFX_RX_FLUSH_COUNT) + break; + + if (rx_queue->flush_pending) { + rx_queue->flush_pending = false; + atomic_dec(&efx->rxq_flush_pending); + atomic_inc(&efx->rxq_flush_outstanding); + efx_farch_flush_rx_queue(rx_queue); + } + } + } + + wait: + timeout = wait_event_timeout(efx->flush_wq, + efx_farch_flush_wake(efx), + timeout); + } + + if (atomic_read(&efx->active_queues) && + !efx_check_tx_flush_complete(efx)) { + netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " + "(rx %d+%d)\n", atomic_read(&efx->active_queues), + atomic_read(&efx->rxq_flush_outstanding), + atomic_read(&efx->rxq_flush_pending)); + rc = -ETIMEDOUT; + + atomic_set(&efx->active_queues, 0); + atomic_set(&efx->rxq_flush_pending, 0); + atomic_set(&efx->rxq_flush_outstanding, 0); + } + + return rc; +} + +int efx_farch_fini_dmaq(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc = 0; + + /* Do not attempt to write to the NIC during EEH recovery */ + if (efx->state != STATE_RECOVERY) { + /* Only perform flush if DMA is enabled */ + if (efx->pci_dev->is_busmaster) { + efx->type->prepare_flush(efx); + rc = efx_farch_do_flush(efx); + efx->type->finish_flush(efx); + } + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_farch_rx_fini(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_farch_tx_fini(tx_queue); + } + } + + return rc; +} + +/* Reset queue and flush accounting after FLR + * + * One possible cause of FLR recovery is that DMA may be failing (eg. if bus + * mastering was disabled), in which case we don't receive (RXQ) flush + * completion events. This means that efx->rxq_flush_outstanding remained at 4 + * after the FLR; also, efx->active_queues was non-zero (as no flush completion + * events were received, and we didn't go through efx_check_tx_flush_complete()) + * If we don't fix this up, on the next call to efx_realloc_channels() we won't + * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 + * for batched flush requests; and the efx->active_queues gets messed up because + * we keep incrementing for the newly initialised queues, but it never went to + * zero previously. Then we get a timeout every time we try to restart the + * queues, as it doesn't go back to zero when we should be flushing the queues. + */ +void efx_farch_finish_flr(struct efx_nic *efx) +{ + atomic_set(&efx->rxq_flush_pending, 0); + atomic_set(&efx->rxq_flush_outstanding, 0); + atomic_set(&efx->active_queues, 0); +} + + +/************************************************************************** + * + * Event queue processing + * Event queues are processed by per-channel tasklets. + * + **************************************************************************/ + +/* Update a channel's event queue's read pointer (RPTR) register + * + * This writes the EVQ_RPTR_REG register for the specified channel's + * event queue. + */ +void efx_farch_ev_read_ack(struct efx_channel *channel) +{ + efx_dword_t reg; + struct efx_nic *efx = channel->efx; + + EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, + channel->eventq_read_ptr & channel->eventq_mask); + + /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size + * of 4 bytes, but it is really 16 bytes just like later revisions. + */ + efx_writed(efx, ®, + efx->type->evq_rptr_tbl_base + + FR_BZ_EVQ_RPTR_STEP * channel->channel); +} + +/* Use HW to insert a SW defined event */ +void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event) +{ + efx_oword_t drv_ev_reg; + + BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || + FRF_AZ_DRV_EV_DATA_WIDTH != 64); + drv_ev_reg.u32[0] = event->u32[0]; + drv_ev_reg.u32[1] = event->u32[1]; + drv_ev_reg.u32[2] = 0; + drv_ev_reg.u32[3] = 0; + EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); + efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); +} + +static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) +{ + efx_qword_t event; + + EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, + FSE_AZ_EV_CODE_DRV_GEN_EV, + FSF_AZ_DRV_GEN_EV_MAGIC, magic); + efx_farch_generate_event(channel->efx, channel->channel, &event); +} + +/* Handle a transmit completion event + * + * The NIC batches TX completion events; the message we receive is of + * the form "complete all TX events up to this index". + */ +static void +efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) +{ + unsigned int tx_ev_desc_ptr; + unsigned int tx_ev_q_label; + struct efx_tx_queue *tx_queue; + struct efx_nic *efx = channel->efx; + + if (unlikely(READ_ONCE(efx->reset_pending))) + return; + + if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { + /* Transmit completion */ + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); + tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); + tx_queue = channel->tx_queue + + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); + efx_xmit_done(tx_queue, tx_ev_desc_ptr); + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { + /* Rewrite the FIFO write pointer */ + tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); + tx_queue = channel->tx_queue + + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); + + netif_tx_lock(efx->net_dev); + efx_farch_notify_tx_desc(tx_queue); + netif_tx_unlock(efx->net_dev); + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } else { + netif_err(efx, tx_err, efx->net_dev, + "channel %d unexpected TX event " + EFX_QWORD_FMT"\n", channel->channel, + EFX_QWORD_VAL(*event)); + } +} + +/* Detect errors included in the rx_evt_pkt_ok bit. */ +static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, + const efx_qword_t *event) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; + bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; + bool rx_ev_frm_trunc, rx_ev_tobe_disc; + bool rx_ev_other_err, rx_ev_pause_frm; + + rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); + rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); + rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); + rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); + rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); + rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); + rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); + + /* Every error apart from tobe_disc and pause_frm */ + rx_ev_other_err = (rx_ev_tcp_udp_chksum_err | + rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | + rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); + + /* Count errors that are not in MAC stats. Ignore expected + * checksum errors during self-test. */ + if (rx_ev_frm_trunc) + ++channel->n_rx_frm_trunc; + else if (rx_ev_tobe_disc) + ++channel->n_rx_tobe_disc; + else if (!efx->loopback_selftest) { + if (rx_ev_ip_hdr_chksum_err) + ++channel->n_rx_ip_hdr_chksum_err; + else if (rx_ev_tcp_udp_chksum_err) + ++channel->n_rx_tcp_udp_chksum_err; + } + + /* TOBE_DISC is expected on unicast mismatches; don't print out an + * error message. FRM_TRUNC indicates RXDP dropped the packet due + * to a FIFO overflow. + */ +#ifdef DEBUG + if (rx_ev_other_err && net_ratelimit()) { + netif_dbg(efx, rx_err, efx->net_dev, + " RX queue %d unexpected RX event " + EFX_QWORD_FMT "%s%s%s%s%s%s%s\n", + efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), + rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", + rx_ev_ip_hdr_chksum_err ? + " [IP_HDR_CHKSUM_ERR]" : "", + rx_ev_tcp_udp_chksum_err ? + " [TCP_UDP_CHKSUM_ERR]" : "", + rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", + rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", + rx_ev_tobe_disc ? " [TOBE_DISC]" : "", + rx_ev_pause_frm ? " [PAUSE]" : ""); + } +#else + (void) rx_ev_other_err; +#endif + + if (efx->net_dev->features & NETIF_F_RXALL) + /* don't discard frame for CRC error */ + rx_ev_eth_crc_err = false; + + /* The frame must be discarded if any of these are true. */ + return (rx_ev_eth_crc_err | rx_ev_frm_trunc | + rx_ev_tobe_disc | rx_ev_pause_frm) ? + EFX_RX_PKT_DISCARD : 0; +} + +/* Handle receive events that are not in-order. Return true if this + * can be handled as a partial packet discard, false if it's more + * serious. + */ +static bool +efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + unsigned expected, dropped; + + if (rx_queue->scatter_n && + index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & + rx_queue->ptr_mask)) { + ++channel->n_rx_nodesc_trunc; + return true; + } + + expected = rx_queue->removed_count & rx_queue->ptr_mask; + dropped = (index - expected) & rx_queue->ptr_mask; + netif_info(efx, rx_err, efx->net_dev, + "dropped %d events (index=%d expected=%d)\n", + dropped, index, expected); + + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + return false; +} + +/* Handle a packet received event + * + * The NIC gives a "discard" flag if it's a unicast packet with the + * wrong destination address + * Also "is multicast" and "matches multicast filter" flags can be used to + * discard non-matching multicast packets. + */ +static void +efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) +{ + unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; + unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; + unsigned expected_ptr; + bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; + u16 flags; + struct efx_rx_queue *rx_queue; + struct efx_nic *efx = channel->efx; + + if (unlikely(READ_ONCE(efx->reset_pending))) + return; + + rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); + rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); + WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != + channel->channel); + + rx_queue = efx_channel_get_rx_queue(channel); + + rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); + expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & + rx_queue->ptr_mask); + + /* Check for partial drops and other errors */ + if (unlikely(rx_ev_desc_ptr != expected_ptr) || + unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { + if (rx_ev_desc_ptr != expected_ptr && + !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) + return; + + /* Discard all pending fragments */ + if (rx_queue->scatter_n) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + } + + /* Return if there is no new fragment */ + if (rx_ev_desc_ptr != expected_ptr) + return; + + /* Discard new fragment if not SOP */ + if (!rx_ev_sop) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + 1, 0, EFX_RX_PKT_DISCARD); + ++rx_queue->removed_count; + return; + } + } + + ++rx_queue->scatter_n; + if (rx_ev_cont) + return; + + rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); + rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); + rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); + + if (likely(rx_ev_pkt_ok)) { + /* If packet is marked as OK then we can rely on the + * hardware checksum and classification. + */ + flags = 0; + switch (rx_ev_hdr_type) { + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: + flags |= EFX_RX_PKT_TCP; + fallthrough; + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: + flags |= EFX_RX_PKT_CSUMMED; + fallthrough; + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: + case FSE_AZ_RX_EV_HDR_TYPE_OTHER: + break; + } + } else { + flags = efx_farch_handle_rx_not_ok(rx_queue, event); + } + + /* Detect multicast packets that didn't match the filter */ + rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); + if (rx_ev_mcast_pkt) { + unsigned int rx_ev_mcast_hash_match = + EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); + + if (unlikely(!rx_ev_mcast_hash_match)) { + ++channel->n_rx_mcast_mismatch; + flags |= EFX_RX_PKT_DISCARD; + } + } + + channel->irq_mod_score += 2; + + /* Handle received packet */ + efx_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_ev_byte_cnt, flags); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; +} + +/* If this flush done event corresponds to a &struct efx_tx_queue, then + * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue + * of all transmit completions. + */ +static void +efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + int qid; + + qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) { + channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL); + tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL); + if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) + efx_farch_magic_event(tx_queue->channel, + EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); + } +} + +/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush + * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add + * the RX queue back to the mask of RX queues in need of flushing. + */ +static void +efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + int qid; + bool failed; + + qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); + failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); + if (qid >= efx->n_channels) + return; + channel = efx_get_channel(efx, qid); + if (!efx_channel_has_rx_queue(channel)) + return; + rx_queue = efx_channel_get_rx_queue(channel); + + if (failed) { + netif_info(efx, hw, efx->net_dev, + "RXQ %d flush retry\n", qid); + rx_queue->flush_pending = true; + atomic_inc(&efx->rxq_flush_pending); + } else { + efx_farch_magic_event(efx_rx_queue_channel(rx_queue), + EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); + } + atomic_dec(&efx->rxq_flush_outstanding); + if (efx_farch_flush_wake(efx)) + wake_up(&efx->flush_wq); +} + +static void +efx_farch_handle_drain_event(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + WARN_ON(atomic_read(&efx->active_queues) == 0); + atomic_dec(&efx->active_queues); + if (efx_farch_flush_wake(efx)) + wake_up(&efx->flush_wq); +} + +static void efx_farch_handle_generated_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue = + efx_channel_has_rx_queue(channel) ? + efx_channel_get_rx_queue(channel) : NULL; + unsigned magic, code; + + magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); + code = _EFX_CHANNEL_MAGIC_CODE(magic); + + if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { + channel->event_test_cpu = raw_smp_processor_id(); + } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here */ + efx_fast_push_rx_descriptors(rx_queue, true); + } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { + efx_farch_handle_drain_event(channel); + } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { + efx_farch_handle_drain_event(channel); + } else { + netif_dbg(efx, hw, efx->net_dev, "channel %d received " + "generated event "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(*event)); + } +} + +static void +efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + unsigned int ev_sub_code; + unsigned int ev_sub_data; + + ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); + ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + + switch (ev_sub_code) { + case FSE_AZ_TX_DESCQ_FLS_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", + channel->channel, ev_sub_data); + efx_farch_handle_tx_flush_done(efx, event); +#ifdef CONFIG_SFC_SRIOV + efx_siena_sriov_tx_flush_done(efx, event); +#endif + break; + case FSE_AZ_RX_DESCQ_FLS_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", + channel->channel, ev_sub_data); + efx_farch_handle_rx_flush_done(efx, event); +#ifdef CONFIG_SFC_SRIOV + efx_siena_sriov_rx_flush_done(efx, event); +#endif + break; + case FSE_AZ_EVQ_INIT_DONE_EV: + netif_dbg(efx, hw, efx->net_dev, + "channel %d EVQ %d initialised\n", + channel->channel, ev_sub_data); + break; + case FSE_AZ_SRM_UPD_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d SRAM update done\n", channel->channel); + break; + case FSE_AZ_WAKE_UP_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RXQ %d wakeup event\n", + channel->channel, ev_sub_data); + break; + case FSE_AZ_TIMER_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RX queue %d timer expired\n", + channel->channel, ev_sub_data); + break; + case FSE_AA_RX_RECOVER_EV: + netif_err(efx, rx_err, efx->net_dev, + "channel %d seen DRIVER RX_RESET event. " + "Resetting.\n", channel->channel); + atomic_inc(&efx->rx_reset); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + break; + case FSE_BZ_RX_DSC_ERROR_EV: + if (ev_sub_data < EFX_VI_BASE) { + netif_err(efx, rx_err, efx->net_dev, + "RX DMA Q %d reports descriptor fetch error." + " RX Q %d is disabled.\n", ev_sub_data, + ev_sub_data); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } +#ifdef CONFIG_SFC_SRIOV + else + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); +#endif + break; + case FSE_BZ_TX_DSC_ERROR_EV: + if (ev_sub_data < EFX_VI_BASE) { + netif_err(efx, tx_err, efx->net_dev, + "TX DMA Q %d reports descriptor fetch error." + " TX Q %d is disabled.\n", ev_sub_data, + ev_sub_data); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } +#ifdef CONFIG_SFC_SRIOV + else + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); +#endif + break; + default: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d unknown driver event code %d " + "data %04x\n", channel->channel, ev_sub_code, + ev_sub_data); + break; + } +} + +int efx_farch_ev_process(struct efx_channel *channel, int budget) +{ + struct efx_nic *efx = channel->efx; + unsigned int read_ptr; + efx_qword_t event, *p_event; + int ev_code; + int spent = 0; + + if (budget <= 0) + return spent; + + read_ptr = channel->eventq_read_ptr; + + for (;;) { + p_event = efx_event(channel, read_ptr); + event = *p_event; + + if (!efx_event_present(&event)) + /* End of events */ + break; + + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d event is "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(event)); + + /* Clear this event by marking it all ones */ + EFX_SET_QWORD(*p_event); + + ++read_ptr; + + ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); + + switch (ev_code) { + case FSE_AZ_EV_CODE_RX_EV: + efx_farch_handle_rx_event(channel, &event); + if (++spent == budget) + goto out; + break; + case FSE_AZ_EV_CODE_TX_EV: + efx_farch_handle_tx_event(channel, &event); + break; + case FSE_AZ_EV_CODE_DRV_GEN_EV: + efx_farch_handle_generated_event(channel, &event); + break; + case FSE_AZ_EV_CODE_DRIVER_EV: + efx_farch_handle_driver_event(channel, &event); + break; +#ifdef CONFIG_SFC_SRIOV + case FSE_CZ_EV_CODE_USER_EV: + efx_siena_sriov_event(channel, &event); + break; +#endif + case FSE_CZ_EV_CODE_MCDI_EV: + efx_mcdi_process_event(channel, &event); + break; + case FSE_AZ_EV_CODE_GLOBAL_EV: + if (efx->type->handle_global_event && + efx->type->handle_global_event(channel, &event)) + break; + fallthrough; + default: + netif_err(channel->efx, hw, channel->efx->net_dev, + "channel %d unknown event type %d (data " + EFX_QWORD_FMT ")\n", channel->channel, + ev_code, EFX_QWORD_VAL(event)); + } + } + +out: + channel->eventq_read_ptr = read_ptr; + return spent; +} + +/* Allocate buffer table entries for event queue */ +int efx_farch_ev_probe(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned entries; + + entries = channel->eventq_mask + 1; + return efx_alloc_special_buffer(efx, &channel->eventq, + entries * sizeof(efx_qword_t)); +} + +int efx_farch_ev_init(struct efx_channel *channel) +{ + efx_oword_t reg; + struct efx_nic *efx = channel->efx; + + netif_dbg(efx, hw, efx->net_dev, + "channel %d event queue in special buffers %d-%d\n", + channel->channel, channel->eventq.index, + channel->eventq.index + channel->eventq.entries - 1); + + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); + + /* Pin event queue buffer */ + efx_init_special_buffer(efx, &channel->eventq); + + /* Fill event queue with all ones (i.e. empty events) */ + memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); + + /* Push event queue to card */ + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), + FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); + efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, + channel->channel); + + return 0; +} + +void efx_farch_ev_fini(struct efx_channel *channel) +{ + efx_oword_t reg; + struct efx_nic *efx = channel->efx; + + /* Remove event queue from card */ + EFX_ZERO_OWORD(reg); + efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, + channel->channel); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); + + /* Unpin event queue */ + efx_fini_special_buffer(efx, &channel->eventq); +} + +/* Free buffers backing event queue */ +void efx_farch_ev_remove(struct efx_channel *channel) +{ + efx_free_special_buffer(channel->efx, &channel->eventq); +} + + +void efx_farch_ev_test_generate(struct efx_channel *channel) +{ + efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); +} + +void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) +{ + efx_farch_magic_event(efx_rx_queue_channel(rx_queue), + EFX_CHANNEL_MAGIC_FILL(rx_queue)); +} + +/************************************************************************** + * + * Hardware interrupts + * The hardware interrupt handler does very little work; all the event + * queue processing is carried out by per-channel tasklets. + * + **************************************************************************/ + +/* Enable/disable/generate interrupts */ +static inline void efx_farch_interrupts(struct efx_nic *efx, + bool enabled, bool force) +{ + efx_oword_t int_en_reg_ker; + + EFX_POPULATE_OWORD_3(int_en_reg_ker, + FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, + FRF_AZ_KER_INT_KER, force, + FRF_AZ_DRV_INT_EN_KER, enabled); + efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); +} + +void efx_farch_irq_enable_master(struct efx_nic *efx) +{ + EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); + wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ + + efx_farch_interrupts(efx, true, false); +} + +void efx_farch_irq_disable_master(struct efx_nic *efx) +{ + /* Disable interrupts */ + efx_farch_interrupts(efx, false, false); +} + +/* Generate a test interrupt + * Interrupt must already have been enabled, otherwise nasty things + * may happen. + */ +int efx_farch_irq_test_generate(struct efx_nic *efx) +{ + efx_farch_interrupts(efx, true, true); + return 0; +} + +/* Process a fatal interrupt + * Disable bus mastering ASAP and schedule a reset + */ +irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) +{ + efx_oword_t *int_ker = efx->irq_status.addr; + efx_oword_t fatal_intr; + int error, mem_perr; + + efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); + error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); + + netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " + EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), + EFX_OWORD_VAL(fatal_intr), + error ? "disabling bus mastering" : "no recognised error"); + + /* If this is a memory parity error dump which blocks are offending */ + mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || + EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); + if (mem_perr) { + efx_oword_t reg; + efx_reado(efx, ®, FR_AZ_MEM_STAT); + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", + EFX_OWORD_VAL(reg)); + } + + /* Disable both devices */ + pci_clear_master(efx->pci_dev); + efx_farch_irq_disable_master(efx); + + /* Count errors and reset or disable the NIC accordingly */ + if (efx->int_error_count == 0 || + time_after(jiffies, efx->int_error_expire)) { + efx->int_error_count = 0; + efx->int_error_expire = + jiffies + EFX_INT_ERROR_EXPIRE * HZ; + } + if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - reset scheduled\n"); + efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); + } else { + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - max number of errors seen." + "NIC will be disabled\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + } + + return IRQ_HANDLED; +} + +/* Handle a legacy interrupt + * Acknowledges the interrupt and schedule event queue processing. + */ +irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) +{ + struct efx_nic *efx = dev_id; + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); + efx_oword_t *int_ker = efx->irq_status.addr; + irqreturn_t result = IRQ_NONE; + struct efx_channel *channel; + efx_dword_t reg; + u32 queues; + int syserr; + + /* Read the ISR which also ACKs the interrupts */ + efx_readd(efx, ®, FR_BZ_INT_ISR0); + queues = EFX_EXTRACT_DWORD(reg, 0, 31); + + /* Legacy interrupts are disabled too late by the EEH kernel + * code. Disable them earlier. + * If an EEH error occurred, the read will have returned all ones. + */ + if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && + !efx->eeh_disabled_legacy_irq) { + disable_irq_nosync(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = true; + } + + /* Handle non-event-queue sources */ + if (queues & (1U << efx->irq_level) && soft_enabled) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + efx->last_irq_cpu = raw_smp_processor_id(); + } + + if (queues != 0) { + efx->irq_zero_count = 0; + + /* Schedule processing of any interrupting queues */ + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + if (queues & 1) + efx_schedule_channel_irq(channel); + queues >>= 1; + } + } + result = IRQ_HANDLED; + + } else { + efx_qword_t *event; + + /* Legacy ISR read can return zero once (SF bug 15783) */ + + /* We can't return IRQ_HANDLED more than once on seeing ISR=0 + * because this might be a shared interrupt. */ + if (efx->irq_zero_count++ == 0) + result = IRQ_HANDLED; + + /* Ensure we schedule or rearm all event queues */ + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + event = efx_event(channel, + channel->eventq_read_ptr); + if (efx_event_present(event)) + efx_schedule_channel_irq(channel); + else + efx_farch_ev_read_ack(channel); + } + } + } + + if (result == IRQ_HANDLED) + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); + + return result; +} + +/* Handle an MSI interrupt + * + * Handle an MSI hardware interrupt. This routine schedules event + * queue processing. No interrupt acknowledgement cycle is necessary. + * Also, we never need to check that the interrupt is for us, since + * MSI interrupts cannot be shared. + */ +irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + efx_oword_t *int_ker = efx->irq_status.addr; + int syserr; + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + + if (!likely(READ_ONCE(efx->irq_soft_enabled))) + return IRQ_HANDLED; + + /* Handle non-event-queue sources */ + if (context->index == efx->irq_level) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + efx->last_irq_cpu = raw_smp_processor_id(); + } + + /* Schedule processing of the channel */ + efx_schedule_channel_irq(efx->channel[context->index]); + + return IRQ_HANDLED; +} + +/* Setup RSS indirection table. + * This maps from the hash value of the packet to RXQ + */ +void efx_farch_rx_push_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, + efx->rss_context.rx_indir_table[i]); + efx_writed(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + } +} + +void efx_farch_rx_pull_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + efx_readd(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE); + } +} + +/* Looks at available SRAM resources and works out how many queues we + * can support, and where things like descriptor caches should live. + * + * SRAM is split up as follows: + * 0 buftbl entries for channels + * efx->vf_buftbl_base buftbl entries for SR-IOV + * efx->rx_dc_base RX descriptor caches + * efx->tx_dc_base TX descriptor caches + */ +void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) +{ + unsigned vi_count, total_tx_channels; +#ifdef CONFIG_SFC_SRIOV + struct siena_nic_data *nic_data; + unsigned buftbl_min; +#endif + + total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; + vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL); + +#ifdef CONFIG_SFC_SRIOV + nic_data = efx->nic_data; + /* Account for the buffer table entries backing the datapath channels + * and the descriptor caches for those channels. + */ + buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + + total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE + + efx->n_channels * EFX_MAX_EVQ_SIZE) + * sizeof(efx_qword_t) / EFX_BUF_SIZE); + if (efx->type->sriov_wanted) { + if (efx->type->sriov_wanted(efx)) { + unsigned vi_dc_entries, buftbl_free; + unsigned entries_per_vf, vf_limit; + + nic_data->vf_buftbl_base = buftbl_min; + + vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; + vi_count = max(vi_count, EFX_VI_BASE); + buftbl_free = (sram_lim_qw - buftbl_min - + vi_count * vi_dc_entries); + + entries_per_vf = ((vi_dc_entries + + EFX_VF_BUFTBL_PER_VI) * + efx_vf_size(efx)); + vf_limit = min(buftbl_free / entries_per_vf, + (1024U - EFX_VI_BASE) >> efx->vi_scale); + + if (efx->vf_count > vf_limit) { + netif_err(efx, probe, efx->net_dev, + "Reducing VF count from from %d to %d\n", + efx->vf_count, vf_limit); + efx->vf_count = vf_limit; + } + vi_count += efx->vf_count * efx_vf_size(efx); + } + } +#endif + + efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; + efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; +} + +u32 efx_farch_fpga_ver(struct efx_nic *efx) +{ + efx_oword_t altera_build; + efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); + return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); +} + +void efx_farch_init_common(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Set positions of descriptor caches in SRAM. */ + EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); + efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); + efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); + + /* Set TX descriptor cache size. */ + BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); + efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); + + /* Set RX descriptor cache size. Set low watermark to size-8, as + * this allows most efficient prefetching. + */ + BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); + efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); + efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); + + /* Program INT_KER address */ + EFX_POPULATE_OWORD_2(temp, + FRF_AZ_NORM_INT_VEC_DIS_KER, + EFX_INT_MODE_USE_MSI(efx), + FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); + efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); + + if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) + /* Use an interrupt level unused by event queues */ + efx->irq_level = 0x1f; + else + /* Use a valid MSI-X vector */ + efx->irq_level = 0; + + /* Enable all the genuinely fatal interrupts. (They are still + * masked by the overall interrupt mask, controlled by + * falcon_interrupts()). + * + * Note: All other fatal interrupts are enabled + */ + EFX_POPULATE_OWORD_3(temp, + FRF_AZ_ILL_ADR_INT_KER_EN, 1, + FRF_AZ_RBUF_OWN_INT_KER_EN, 1, + FRF_AZ_TBUF_OWN_INT_KER_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); + EFX_INVERT_OWORD(temp); + efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); + + /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be + * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. + */ + efx_reado(efx, &temp, FR_AZ_TX_RESERVED); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); + /* Enable SW_EV to inherit in char driver - assume harmless here */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); + /* Prefetch threshold 2 => fetch when descriptor cache half empty */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); + /* Disable hardware watchdog which can misfire */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); + /* Squash TX of packets of 16 bytes or less */ + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); + efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + EFX_POPULATE_OWORD_4(temp, + /* Default values */ + FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, + FRF_BZ_TX_PACE_SB_AF, 0xb, + FRF_BZ_TX_PACE_FB_BASE, 0, + /* Allow large pace values in the fast bin. */ + FRF_BZ_TX_PACE_BIN_TH, + FFE_BZ_TX_PACE_RESERVED); + efx_writeo(efx, &temp, FR_BZ_TX_PACE); +} + +/************************************************************************** + * + * Filter tables + * + ************************************************************************** + */ + +/* "Fudge factors" - difference between programmed value and actual depth. + * Due to pipelined implementation we need to program H/W with a value that + * is larger than the hop limit we want. + */ +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 + +/* Hard maximum search limit. Hardware will time-out beyond 200-something. + * We also need to avoid infinite loops in efx_farch_filter_search() when the + * table is full. + */ +#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 + +/* Don't try very hard to find space for performance hints, as this is + * counter-productive. */ +#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 + +enum efx_farch_filter_type { + EFX_FARCH_FILTER_TCP_FULL = 0, + EFX_FARCH_FILTER_TCP_WILD, + EFX_FARCH_FILTER_UDP_FULL, + EFX_FARCH_FILTER_UDP_WILD, + EFX_FARCH_FILTER_MAC_FULL = 4, + EFX_FARCH_FILTER_MAC_WILD, + EFX_FARCH_FILTER_UC_DEF = 8, + EFX_FARCH_FILTER_MC_DEF, + EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ +}; + +enum efx_farch_filter_table_id { + EFX_FARCH_FILTER_TABLE_RX_IP = 0, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, + EFX_FARCH_FILTER_TABLE_TX_MAC, + EFX_FARCH_FILTER_TABLE_COUNT, +}; + +enum efx_farch_filter_index { + EFX_FARCH_FILTER_INDEX_UC_DEF, + EFX_FARCH_FILTER_INDEX_MC_DEF, + EFX_FARCH_FILTER_SIZE_RX_DEF, +}; + +struct efx_farch_filter_spec { + u8 type:4; + u8 priority:4; + u8 flags; + u16 dmaq_id; + u32 data[3]; +}; + +struct efx_farch_filter_table { + enum efx_farch_filter_table_id id; + u32 offset; /* address of table relative to BAR */ + unsigned size; /* number of entries */ + unsigned step; /* step between entries */ + unsigned used; /* number currently used */ + unsigned long *used_bitmap; + struct efx_farch_filter_spec *spec; + unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; +}; + +struct efx_farch_filter_state { + struct rw_semaphore lock; /* Protects table contents */ + struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; +}; + +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx); + +/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit + * key derived from the n-tuple. The initial LFSR state is 0xffff. */ +static u16 efx_farch_filter_hash(u32 key) +{ + u16 tmp; + + /* First 16 rounds */ + tmp = 0x1fff ^ key >> 16; + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + tmp = tmp ^ tmp >> 9; + /* Last 16 rounds */ + tmp = tmp ^ tmp << 13 ^ key; + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + return tmp ^ tmp >> 9; +} + +/* To allow for hash collisions, filter search continues at these + * increments from the first possible entry selected by the hash. */ +static u16 efx_farch_filter_increment(u32 key) +{ + return key * 2 - 1; +} + +static enum efx_farch_filter_table_id +efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) +{ + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_TCP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_TCP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_UDP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_UDP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FARCH_FILTER_MAC_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FARCH_FILTER_MAC_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != + EFX_FARCH_FILTER_TABLE_RX_MAC + 2); + return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); +} + +static void efx_farch_filter_push_rx_config(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + efx_oword_t filter_ctl; + + efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + if (table->size) { + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + } + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + if (table->size) { + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, + table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + EFX_FILTER_FLAG_RX_RSS)); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, + !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_RSS)); + + /* There is a single bit to enable RX scatter for all + * unmatched packets. Only set it if scatter is + * enabled in both filter specs. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_SCATTER)); + } else { + /* We don't expose 'default' filters because unmatched + * packets always go to the queue number found in the + * RSS table. But we still need to set the RX scatter + * bit here. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + efx->rx_scatter); + } + + efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); +} + +static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + efx_oword_t tx_cfg; + + efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); + + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + if (table->size) { + EFX_SET_OWORD_FIELD( + tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + } + + efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); +} + +static int +efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, + const struct efx_filter_spec *gen_spec) +{ + bool is_full = false; + + if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context) + return -EINVAL; + + spec->priority = gen_spec->priority; + spec->flags = gen_spec->flags; + spec->dmaq_id = gen_spec->dmaq_id; + + switch (gen_spec->match_flags) { + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): + is_full = true; + fallthrough; + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { + __be32 rhost, host1, host2; + __be16 rport, port1, port2; + + EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); + + if (gen_spec->ether_type != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + if (gen_spec->loc_port == 0 || + (is_full && gen_spec->rem_port == 0)) + return -EADDRNOTAVAIL; + switch (gen_spec->ip_proto) { + case IPPROTO_TCP: + spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : + EFX_FARCH_FILTER_TCP_WILD); + break; + case IPPROTO_UDP: + spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : + EFX_FARCH_FILTER_UDP_WILD); + break; + default: + return -EPROTONOSUPPORT; + } + + /* Filter is constructed in terms of source and destination, + * with the odd wrinkle that the ports are swapped in a UDP + * wildcard filter. We need to convert from local and remote + * (= zero for wildcard) addresses. + */ + rhost = is_full ? gen_spec->rem_host[0] : 0; + rport = is_full ? gen_spec->rem_port : 0; + host1 = rhost; + host2 = gen_spec->loc_host[0]; + if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { + port1 = gen_spec->loc_port; + port2 = rport; + } else { + port1 = rport; + port2 = gen_spec->loc_port; + } + spec->data[0] = ntohl(host1) << 16 | ntohs(port1); + spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; + spec->data[2] = ntohl(host2); + + break; + } + + case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: + is_full = true; + fallthrough; + case EFX_FILTER_MATCH_LOC_MAC: + spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : + EFX_FARCH_FILTER_MAC_WILD); + spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; + spec->data[1] = (gen_spec->loc_mac[2] << 24 | + gen_spec->loc_mac[3] << 16 | + gen_spec->loc_mac[4] << 8 | + gen_spec->loc_mac[5]); + spec->data[2] = (gen_spec->loc_mac[0] << 8 | + gen_spec->loc_mac[1]); + break; + + case EFX_FILTER_MATCH_LOC_MAC_IG: + spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? + EFX_FARCH_FILTER_MC_DEF : + EFX_FARCH_FILTER_UC_DEF); + memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ + break; + + default: + return -EPROTONOSUPPORT; + } + + return 0; +} + +static void +efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, + const struct efx_farch_filter_spec *spec) +{ + bool is_full = false; + + /* *gen_spec should be completely initialised, to be consistent + * with efx_filter_init_{rx,tx}() and in case we want to copy + * it back to userland. + */ + memset(gen_spec, 0, sizeof(*gen_spec)); + + gen_spec->priority = spec->priority; + gen_spec->flags = spec->flags; + gen_spec->dmaq_id = spec->dmaq_id; + + switch (spec->type) { + case EFX_FARCH_FILTER_TCP_FULL: + case EFX_FARCH_FILTER_UDP_FULL: + is_full = true; + fallthrough; + case EFX_FARCH_FILTER_TCP_WILD: + case EFX_FARCH_FILTER_UDP_WILD: { + __be32 host1, host2; + __be16 port1, port2; + + gen_spec->match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + if (is_full) + gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_REM_PORT); + gen_spec->ether_type = htons(ETH_P_IP); + gen_spec->ip_proto = + (spec->type == EFX_FARCH_FILTER_TCP_FULL || + spec->type == EFX_FARCH_FILTER_TCP_WILD) ? + IPPROTO_TCP : IPPROTO_UDP; + + host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); + port1 = htons(spec->data[0]); + host2 = htonl(spec->data[2]); + port2 = htons(spec->data[1] >> 16); + if (spec->flags & EFX_FILTER_FLAG_TX) { + gen_spec->loc_host[0] = host1; + gen_spec->rem_host[0] = host2; + } else { + gen_spec->loc_host[0] = host2; + gen_spec->rem_host[0] = host1; + } + if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ + (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { + gen_spec->loc_port = port1; + gen_spec->rem_port = port2; + } else { + gen_spec->loc_port = port2; + gen_spec->rem_port = port1; + } + + break; + } + + case EFX_FARCH_FILTER_MAC_FULL: + is_full = true; + fallthrough; + case EFX_FARCH_FILTER_MAC_WILD: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; + if (is_full) + gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + gen_spec->loc_mac[0] = spec->data[2] >> 8; + gen_spec->loc_mac[1] = spec->data[2]; + gen_spec->loc_mac[2] = spec->data[1] >> 24; + gen_spec->loc_mac[3] = spec->data[1] >> 16; + gen_spec->loc_mac[4] = spec->data[1] >> 8; + gen_spec->loc_mac[5] = spec->data[1]; + gen_spec->outer_vid = htons(spec->data[0]); + break; + + case EFX_FARCH_FILTER_UC_DEF: + case EFX_FARCH_FILTER_MC_DEF: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; + gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; + break; + + default: + WARN_ON(1); + break; + } +} + +static void +efx_farch_filter_init_rx_auto(struct efx_nic *efx, + struct efx_farch_filter_spec *spec) +{ + /* If there's only one channel then disable RSS for non VF + * traffic, thereby allowing VFs to use RSS when the PF can't. + */ + spec->priority = EFX_FILTER_PRI_AUTO; + spec->flags = (EFX_FILTER_FLAG_RX | + (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | + (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); + spec->dmaq_id = 0; +} + +/* Build a filter entry and return its n-tuple key. */ +static u32 efx_farch_filter_build(efx_oword_t *filter, + struct efx_farch_filter_spec *spec) +{ + u32 data3; + + switch (efx_farch_filter_spec_table_id(spec)) { + case EFX_FARCH_FILTER_TABLE_RX_IP: { + bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || + spec->type == EFX_FARCH_FILTER_UDP_WILD); + EFX_POPULATE_OWORD_7( + *filter, + FRF_BZ_RSS_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), + FRF_BZ_SCATTER_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), + FRF_BZ_TCP_UDP, is_udp, + FRF_BZ_RXQ_ID, spec->dmaq_id, + EFX_DWORD_2, spec->data[2], + EFX_DWORD_1, spec->data[1], + EFX_DWORD_0, spec->data[0]); + data3 = is_udp; + break; + } + + case EFX_FARCH_FILTER_TABLE_RX_MAC: { + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; + EFX_POPULATE_OWORD_7( + *filter, + FRF_CZ_RMFT_RSS_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), + FRF_CZ_RMFT_SCATTER_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), + FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, + FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], + FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], + FRF_CZ_RMFT_VLAN_ID, spec->data[0]); + data3 = is_wild; + break; + } + + case EFX_FARCH_FILTER_TABLE_TX_MAC: { + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; + EFX_POPULATE_OWORD_5(*filter, + FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, + FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], + FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], + FRF_CZ_TMFT_VLAN_ID, spec->data[0]); + data3 = is_wild | spec->dmaq_id << 1; + break; + } + + default: + BUG(); + } + + return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; +} + +static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, + const struct efx_farch_filter_spec *right) +{ + if (left->type != right->type || + memcmp(left->data, right->data, sizeof(left->data))) + return false; + + if (left->flags & EFX_FILTER_FLAG_TX && + left->dmaq_id != right->dmaq_id) + return false; + + return true; +} + +/* + * Construct/deconstruct external filter IDs. At least the RX filter + * IDs must be ordered by matching priority, for RX NFC semantics. + * + * Deconstruction needs to be robust against invalid IDs so that + * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can + * accept user-provided IDs. + */ + +#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 + +static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { + [EFX_FARCH_FILTER_TCP_FULL] = 0, + [EFX_FARCH_FILTER_UDP_FULL] = 0, + [EFX_FARCH_FILTER_TCP_WILD] = 1, + [EFX_FARCH_FILTER_UDP_WILD] = 1, + [EFX_FARCH_FILTER_MAC_FULL] = 2, + [EFX_FARCH_FILTER_MAC_WILD] = 3, + [EFX_FARCH_FILTER_UC_DEF] = 4, + [EFX_FARCH_FILTER_MC_DEF] = 4, +}; + +static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { + EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ + EFX_FARCH_FILTER_TABLE_RX_IP, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ +}; + +#define EFX_FARCH_FILTER_INDEX_WIDTH 13 +#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) + +static inline u32 +efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, + unsigned int index) +{ + unsigned int range; + + range = efx_farch_filter_type_match_pri[spec->type]; + if (!(spec->flags & EFX_FILTER_FLAG_RX)) + range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; + + return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; +} + +static inline enum efx_farch_filter_table_id +efx_farch_filter_id_table_id(u32 id) +{ + unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; + + if (range < ARRAY_SIZE(efx_farch_filter_range_table)) + return efx_farch_filter_range_table[range]; + else + return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ +} + +static inline unsigned int efx_farch_filter_id_index(u32 id) +{ + return id & EFX_FARCH_FILTER_INDEX_MASK; +} + +u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; + enum efx_farch_filter_table_id table_id; + + do { + table_id = efx_farch_filter_range_table[range]; + if (state->table[table_id].size != 0) + return range << EFX_FARCH_FILTER_INDEX_WIDTH | + state->table[table_id].size; + } while (range--); + + return 0; +} + +s32 efx_farch_filter_insert(struct efx_nic *efx, + struct efx_filter_spec *gen_spec, + bool replace_equal) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec spec; + efx_oword_t filter; + int rep_index, ins_index; + unsigned int depth = 0; + int rc; + + rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); + if (rc) + return rc; + + down_write(&state->lock); + + table = &state->table[efx_farch_filter_spec_table_id(&spec)]; + if (table->size == 0) { + rc = -EINVAL; + goto out_unlock; + } + + netif_vdbg(efx, hw, efx->net_dev, + "%s: type %d search_limit=%d", __func__, spec.type, + table->search_limit[spec.type]); + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + /* One filter spec per type */ + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != + EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); + rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; + ins_index = rep_index; + } else { + /* Search concurrently for + * (1) a filter to be replaced (rep_index): any filter + * with the same match values, up to the current + * search depth for this type, and + * (2) the insertion point (ins_index): (1) or any + * free slot before it or up to the maximum search + * depth for this priority + * We fail if we cannot find (2). + * + * We can stop once either + * (a) we find (1), in which case we have definitely + * found (2) as well; or + * (b) we have searched exhaustively for (1), and have + * either found (2) or searched exhaustively for it + */ + u32 key = efx_farch_filter_build(&filter, &spec); + unsigned int hash = efx_farch_filter_hash(key); + unsigned int incr = efx_farch_filter_increment(key); + unsigned int max_rep_depth = table->search_limit[spec.type]; + unsigned int max_ins_depth = + spec.priority <= EFX_FILTER_PRI_HINT ? + EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : + EFX_FARCH_FILTER_CTL_SRCH_MAX; + unsigned int i = hash & (table->size - 1); + + ins_index = -1; + depth = 1; + + for (;;) { + if (!test_bit(i, table->used_bitmap)) { + if (ins_index < 0) + ins_index = i; + } else if (efx_farch_filter_equal(&spec, + &table->spec[i])) { + /* Case (a) */ + if (ins_index < 0) + ins_index = i; + rep_index = i; + break; + } + + if (depth >= max_rep_depth && + (ins_index >= 0 || depth >= max_ins_depth)) { + /* Case (b) */ + if (ins_index < 0) { + rc = -EBUSY; + goto out_unlock; + } + rep_index = -1; + break; + } + + i = (i + incr) & (table->size - 1); + ++depth; + } + } + + /* If we found a filter to be replaced, check whether we + * should do so + */ + if (rep_index >= 0) { + struct efx_farch_filter_spec *saved_spec = + &table->spec[rep_index]; + + if (spec.priority == saved_spec->priority && !replace_equal) { + rc = -EEXIST; + goto out_unlock; + } + if (spec.priority < saved_spec->priority) { + rc = -EPERM; + goto out_unlock; + } + if (saved_spec->priority == EFX_FILTER_PRI_AUTO || + saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) + spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; + } + + /* Insert the filter */ + if (ins_index != rep_index) { + __set_bit(ins_index, table->used_bitmap); + ++table->used; + } + table->spec[ins_index] = spec; + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + efx_farch_filter_push_rx_config(efx); + } else { + if (table->search_limit[spec.type] < depth) { + table->search_limit[spec.type] = depth; + if (spec.flags & EFX_FILTER_FLAG_TX) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } + + efx_writeo(efx, &filter, + table->offset + table->step * ins_index); + + /* If we were able to replace a filter by inserting + * at a lower depth, clear the replaced filter + */ + if (ins_index != rep_index && rep_index >= 0) + efx_farch_filter_table_clear_entry(efx, table, + rep_index); + } + + netif_vdbg(efx, hw, efx->net_dev, + "%s: filter type %d index %d rxq %u set", + __func__, spec.type, ins_index, spec.dmaq_id); + rc = efx_farch_filter_make_id(&spec, ins_index); + +out_unlock: + up_write(&state->lock); + return rc; +} + +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx) +{ + static efx_oword_t filter; + + EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); + BUG_ON(table->offset == 0); /* can't clear MAC default filters */ + + __clear_bit(filter_idx, table->used_bitmap); + --table->used; + memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); + + efx_writeo(efx, &filter, table->offset + table->step * filter_idx); + + /* If this filter required a greater search depth than + * any other, the search limit for its type can now be + * decreased. However, it is hard to determine that + * unless the table has become completely empty - in + * which case, all its search limits can be set to 0. + */ + if (unlikely(table->used == 0)) { + memset(table->search_limit, 0, sizeof(table->search_limit)); + if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } +} + +static int efx_farch_filter_remove(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; + + if (!test_bit(filter_idx, table->used_bitmap) || + spec->priority != priority) + return -ENOENT; + + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { + efx_farch_filter_init_rx_auto(efx, spec); + efx_farch_filter_push_rx_config(efx); + } else { + efx_farch_filter_table_clear_entry(efx, table, filter_idx); + } + + return 0; +} + +int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + int rc; + + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) + return -ENOENT; + table = &state->table[table_id]; + + filter_idx = efx_farch_filter_id_index(filter_id); + if (filter_idx >= table->size) + return -ENOENT; + down_write(&state->lock); + + rc = efx_farch_filter_remove(efx, table, filter_idx, priority); + up_write(&state->lock); + + return rc; +} + +int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec_buf) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec *spec; + unsigned int filter_idx; + int rc = -ENOENT; + + down_read(&state->lock); + + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) + goto out_unlock; + table = &state->table[table_id]; + + filter_idx = efx_farch_filter_id_index(filter_id); + if (filter_idx >= table->size) + goto out_unlock; + spec = &table->spec[filter_idx]; + + if (test_bit(filter_idx, table->used_bitmap) && + spec->priority == priority) { + efx_farch_filter_to_gen_spec(spec_buf, spec); + rc = 0; + } + +out_unlock: + up_read(&state->lock); + return rc; +} + +static void +efx_farch_filter_table_clear(struct efx_nic *efx, + enum efx_farch_filter_table_id table_id, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table = &state->table[table_id]; + unsigned int filter_idx; + + down_write(&state->lock); + for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { + if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) + efx_farch_filter_remove(efx, table, + filter_idx, priority); + } + up_write(&state->lock); +} + +int efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, + priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, + priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, + priority); + return 0; +} + +u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + u32 count = 0; + + down_read(&state->lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) + ++count; + } + } + + up_read(&state->lock); + + return count; +} + +s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + s32 count = 0; + + down_read(&state->lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) { + if (count == size) { + count = -EMSGSIZE; + goto out; + } + buf[count++] = efx_farch_filter_make_id( + &table->spec[filter_idx], filter_idx); + } + } + } +out: + up_read(&state->lock); + + return count; +} + +/* Restore filter stater after reset */ +void efx_farch_filter_table_restore(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + down_write(&state->lock); + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + table = &state->table[table_id]; + + /* Check whether this is a regular register table */ + if (table->step == 0) + continue; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap)) + continue; + efx_farch_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_farch_filter_push_rx_config(efx); + efx_farch_filter_push_tx_limits(efx); + + up_write(&state->lock); +} + +void efx_farch_filter_table_remove(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + kfree(state->table[table_id].used_bitmap); + vfree(state->table[table_id].spec); + } + kfree(state); +} + +int efx_farch_filter_table_probe(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state; + struct efx_farch_filter_table *table; + unsigned table_id; + + state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); + if (!state) + return -ENOMEM; + efx->filter_state = state; + init_rwsem(&state->lock); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + table->id = EFX_FARCH_FILTER_TABLE_RX_IP; + table->offset = FR_BZ_RX_FILTER_TBL0; + table->size = FR_BZ_RX_FILTER_TBL0_ROWS; + table->step = FR_BZ_RX_FILTER_TBL0_STEP; + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; + table->offset = FR_CZ_RX_MAC_FILTER_TBL0; + table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; + table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; + table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; + + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; + table->offset = FR_CZ_TX_MAC_FILTER_TBL0; + table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; + table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + table = &state->table[table_id]; + if (table->size == 0) + continue; + table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), + sizeof(unsigned long), + GFP_KERNEL); + if (!table->used_bitmap) + goto fail; + table->spec = vzalloc(array_size(sizeof(*table->spec), + table->size)); + if (!table->spec) + goto fail; + } + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + if (table->size) { + /* RX default filters must always exist */ + struct efx_farch_filter_spec *spec; + unsigned i; + + for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { + spec = &table->spec[i]; + spec->type = EFX_FARCH_FILTER_UC_DEF + i; + efx_farch_filter_init_rx_auto(efx, spec); + __set_bit(i, table->used_bitmap); + } + } + + efx_farch_filter_push_rx_config(efx); + + return 0; + +fail: + efx_farch_filter_table_remove(efx); + return -ENOMEM; +} + +/* Update scatter enable flags for filters pointing to our own RX queues */ +void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + down_write(&state->lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap) || + table->spec[filter_idx].dmaq_id >= + efx->n_rx_channels) + continue; + + if (efx->rx_scatter) + table->spec[filter_idx].flags |= + EFX_FILTER_FLAG_RX_SCATTER; + else + table->spec[filter_idx].flags &= + ~EFX_FILTER_FLAG_RX_SCATTER; + + if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) + /* Pushed by efx_farch_filter_push_rx_config() */ + continue; + + efx_farch_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_farch_filter_push_rx_config(efx); + + up_write(&state->lock); +} + +#ifdef CONFIG_RFS_ACCEL + +bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + bool ret = false, force = false; + u16 arfs_id; + + down_write(&state->lock); + spin_lock_bh(&efx->rps_hash_lock); + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + if (test_bit(index, table->used_bitmap) && + table->spec[index].priority == EFX_FILTER_PRI_HINT) { + struct efx_arfs_rule *rule = NULL; + struct efx_filter_spec spec; + + efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always returned 0 to + * ARFS, so use the same to query it. + */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, &spec); + if (!rule) { + /* ARFS table doesn't know of this filter, remove it */ + force = true; + } else { + arfs_id = rule->arfs_id; + if (!efx_rps_check_rule(rule, index, &force)) + goto out_unlock; + } + } + if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, + flow_id, arfs_id)) { + if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + efx_rps_hash_del(efx, &spec); + efx_farch_filter_table_clear_entry(efx, table, index); + ret = true; + } + } +out_unlock: + spin_unlock_bh(&efx->rps_hash_lock); + up_write(&state->lock); + return ret; +} + +#endif /* CONFIG_RFS_ACCEL */ + +void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *ha; + union efx_multicast_hash *mc_hash = &efx->multicast_hash; + u32 crc; + int bit; + + if (!efx_dev_registered(efx)) + return; + + netif_addr_lock_bh(net_dev); + + efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); + + /* Build multicast hash table */ + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + memset(mc_hash, 0xff, sizeof(*mc_hash)); + } else { + memset(mc_hash, 0x00, sizeof(*mc_hash)); + netdev_for_each_mc_addr(ha, net_dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); + __set_bit_le(bit, mc_hash); + } + + /* Broadcast packets go through the multicast hash filter. + * ether_crc_le() of the broadcast address is 0xbe2612ff + * so we always add bit 0xff to the mask. + */ + __set_bit_le(0xff, mc_hash); + } + + netif_addr_unlock_bh(net_dev); +} diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c new file mode 100644 index 000000000000..ce3060e15b54 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -0,0 +1,1109 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "efx_common.h" +#include "nic.h" +#include "farch_regs.h" +#include "io.h" +#include "workarounds.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "mcdi_port.h" +#include "mcdi_port_common.h" +#include "selftest.h" +#include "siena_sriov.h" +#include "rx_common.h" + +/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ + +static void siena_init_wol(struct efx_nic *efx); + + +static void siena_push_irq_moderation(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + efx_dword_t timer_cmd; + + if (channel->irq_moderation_us) { + unsigned int ticks; + + ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us); + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_CZ_TC_TIMER_MODE, + FFE_CZ_TIMER_MODE_INT_HLDOFF, + FRF_CZ_TC_TIMER_VAL, + ticks - 1); + } else { + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_CZ_TC_TIMER_MODE, + FFE_CZ_TIMER_MODE_DIS, + FRF_CZ_TC_TIMER_VAL, 0); + } + efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, + channel->channel); +} + +void siena_prepare_flush(struct efx_nic *efx) +{ + if (efx->fc_disable++ == 0) + efx_mcdi_set_mac(efx); +} + +void siena_finish_flush(struct efx_nic *efx) +{ + if (--efx->fc_disable == 0) + efx_mcdi_set_mac(efx); +} + +static const struct efx_farch_register_test siena_register_tests[] = { + { FR_AZ_ADR_REGION, + EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, + { FR_CZ_USR_EV_CFG, + EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_CFG, + EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, + { FR_AZ_TX_CFG, + EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, + { FR_AZ_TX_RESERVED, + EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, + { FR_AZ_SRM_TX_DC_CFG, + EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_CFG, + EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_PF_WM, + EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_BZ_DP_CTRL, + EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_BZ_RX_RSS_TKEY, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG1, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG2, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG3, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, +}; + +static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) +{ + enum reset_type reset_method = RESET_TYPE_ALL; + int rc, rc2; + + efx_reset_down(efx, reset_method); + + /* Reset the chip immediately so that it is completely + * quiescent regardless of what any VF driver does. + */ + rc = efx_mcdi_reset(efx, reset_method); + if (rc) + goto out; + + tests->registers = + efx_farch_test_registers(efx, siena_register_tests, + ARRAY_SIZE(siena_register_tests)) + ? -1 : 1; + + rc = efx_mcdi_reset(efx, reset_method); +out: + rc2 = efx_reset_up(efx, reset_method, rc == 0); + return rc ? rc : rc2; +} + +/************************************************************************** + * + * PTP + * + ************************************************************************** + */ + +static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time) +{ + _efx_writed(efx, cpu_to_le32(host_time), + FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); +} + +static int siena_ptp_set_ts_config(struct efx_nic *efx, + struct hwtstamp_config *init) +{ + int rc; + + switch (init->rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* if TX timestamping is still requested then leave PTP on */ + return efx_ptp_change_mode(efx, + init->tx_type != HWTSTAMP_TX_OFF, + efx_ptp_get_mode(efx)); + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1); + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + rc = efx_ptp_change_mode(efx, true, + MC_CMD_PTP_MODE_V2_ENHANCED); + /* bug 33070 - old versions of the firmware do not support the + * improved UUID filtering option. Similarly old versions of the + * application do not expect it to be enabled. If the firmware + * does not accept the enhanced mode, fall back to the standard + * PTP v2 UUID filtering. */ + if (rc != 0) + rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2); + return rc; + default: + return -ERANGE; + } +} + +/************************************************************************** + * + * Device reset + * + ************************************************************************** + */ + +static int siena_map_reset_flags(u32 *flags) +{ + enum { + SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER | + ETH_RESET_OFFLOAD | ETH_RESET_MAC | + ETH_RESET_PHY), + SIENA_RESET_MC = (SIENA_RESET_PORT | + ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT), + }; + + if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) { + *flags &= ~SIENA_RESET_MC; + return RESET_TYPE_WORLD; + } + + if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) { + *flags &= ~SIENA_RESET_PORT; + return RESET_TYPE_ALL; + } + + /* no invisible reset implemented */ + + return -EINVAL; +} + +#ifdef CONFIG_EEH +/* When a PCI device is isolated from the bus, a subsequent MMIO read is + * required for the kernel EEH mechanisms to notice. As the Solarflare driver + * was written to minimise MMIO read (for latency) then a periodic call to check + * the EEH status of the device is required so that device recovery can happen + * in a timely fashion. + */ +static void siena_monitor(struct efx_nic *efx) +{ + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); + + eeh_dev_check_failure(eehdev); +} +#endif + +static int siena_probe_nvconfig(struct efx_nic *efx) +{ + u32 caps = 0; + int rc; + + rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps); + + efx->timer_quantum_ns = + (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ? + 3072 : 6144; /* 768 cycles */ + efx->timer_max_ns = efx->type->timer_period_max * + efx->timer_quantum_ns; + + return rc; +} + +static int siena_dimension_resources(struct efx_nic *efx) +{ + /* Each port has a small block of internal SRAM dedicated to + * the buffer table and descriptor caches. In theory we can + * map both blocks to one port, but we don't. + */ + efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); + return 0; +} + +/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3) + * for memory. + */ +static unsigned int siena_mem_bar(struct efx_nic *efx) +{ + return 2; +} + +static unsigned int siena_mem_map_size(struct efx_nic *efx) +{ + return FR_CZ_MC_TREG_SMEM + + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS; +} + +static int siena_probe_nic(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data; + efx_oword_t reg; + int rc; + + /* Allocate storage for hardware specific data */ + nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + nic_data->efx = efx; + efx->nic_data = nic_data; + + if (efx_farch_fpga_ver(efx) != 0) { + netif_err(efx, probe, efx->net_dev, + "Siena FPGA not supported\n"); + rc = -ENODEV; + goto fail1; + } + + efx->max_channels = EFX_MAX_CHANNELS; + efx->max_vis = EFX_MAX_CHANNELS; + efx->max_tx_channels = EFX_MAX_CHANNELS; + efx->tx_queues_per_channel = 4; + + efx_reado(efx, ®, FR_AZ_CS_DEBUG); + efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; + + rc = efx_mcdi_init(efx); + if (rc) + goto fail1; + + /* Now we can reset the NIC */ + rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); + goto fail3; + } + + siena_init_wol(efx); + + /* Allocate memory for INT_KER */ + rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), + GFP_KERNEL); + if (rc) + goto fail4; + BUG_ON(efx->irq_status.dma_addr & 0x0f); + + netif_dbg(efx, probe, efx->net_dev, + "INT_KER at %llx (virt %p phys %llx)\n", + (unsigned long long)efx->irq_status.dma_addr, + efx->irq_status.addr, + (unsigned long long)virt_to_phys(efx->irq_status.addr)); + + /* Read in the non-volatile configuration */ + rc = siena_probe_nvconfig(efx); + if (rc == -EINVAL) { + netif_err(efx, probe, efx->net_dev, + "NVRAM is invalid therefore using defaults\n"); + efx->phy_type = PHY_TYPE_NONE; + efx->mdio.prtad = MDIO_PRTAD_NONE; + } else if (rc) { + goto fail5; + } + + rc = efx_mcdi_mon_probe(efx); + if (rc) + goto fail5; + +#ifdef CONFIG_SFC_SRIOV + efx_siena_sriov_probe(efx); +#endif + efx_ptp_defer_probe_with_channel(efx); + + return 0; + +fail5: + efx_nic_free_buffer(efx, &efx->irq_status); +fail4: +fail3: + efx_mcdi_detach(efx); + efx_mcdi_fini(efx); +fail1: + kfree(efx->nic_data); + return rc; +} + +static int siena_rx_pull_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the + * first 128 bits of the same key, assuming it's been set by + * siena_rx_push_rss_config, below) + */ + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(efx->rss_context.rx_hash_key, &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + memcpy(efx->rss_context.rx_hash_key + sizeof(temp), &temp, sizeof(temp)); + efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + memcpy(efx->rss_context.rx_hash_key + 2 * sizeof(temp), &temp, + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_farch_rx_pull_indir_table(efx); + return 0; +} + +static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, const u8 *key) +{ + efx_oword_t temp; + + /* Set hash key for IPv4 */ + if (key) + memcpy(efx->rss_context.rx_hash_key, key, sizeof(temp)); + memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + + /* Enable IPv6 RSS */ + BUILD_BUG_ON(sizeof(efx->rss_context.rx_hash_key) < + 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || + FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); + memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(&temp, efx->rss_context.rx_hash_key + sizeof(temp), sizeof(temp)); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, + FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); + memcpy(&temp, efx->rss_context.rx_hash_key + 2 * sizeof(temp), + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + + memcpy(efx->rss_context.rx_indir_table, rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + efx_farch_rx_push_indir_table(efx); + + return 0; +} + +/* This call performs hardware-specific global initialisation, such as + * defining the descriptor cache sizes and number of RSS channels. + * It does not set up any buffers, descriptor rings or event queues. + */ +static int siena_init_nic(struct efx_nic *efx) +{ + efx_oword_t temp; + int rc; + + /* Recover from a failed assertion post-reset */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + return rc; + + /* Squash TX of packets of 16 bytes or less */ + efx_reado(efx, &temp, FR_AZ_TX_RESERVED); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); + efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 + * descriptors (which is bad). + */ + efx_reado(efx, &temp, FR_AZ_TX_CFG); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); + EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); + efx_writeo(efx, &temp, FR_AZ_TX_CFG); + + efx_reado(efx, &temp, FR_AZ_RX_CFG); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); + /* Enable hash insertion. This is broken for the 'Falcon' hash + * if IPv6 hashing is also enabled, so also select Toeplitz + * TCP/IPv4 and IPv4 hashes. */ + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE, + EFX_RX_USR_BUF_SIZE >> 5); + efx_writeo(efx, &temp, FR_AZ_RX_CFG); + + siena_rx_push_rss_config(efx, false, efx->rss_context.rx_indir_table, NULL); + efx->rss_context.context_id = 0; /* indicates RSS is active */ + + /* Enable event logging */ + rc = efx_mcdi_log_ctrl(efx, true, false, 0); + if (rc) + return rc; + + /* Set destination of both TX and RX Flush events */ + EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); + efx_writeo(efx, &temp, FR_BZ_DP_CTRL); + + EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); + efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); + + efx_farch_init_common(efx); + return 0; +} + +static void siena_remove_nic(struct efx_nic *efx) +{ + efx_mcdi_mon_remove(efx); + + efx_nic_free_buffer(efx, &efx->irq_status); + + efx_mcdi_reset(efx, RESET_TYPE_ALL); + + efx_mcdi_detach(efx); + efx_mcdi_fini(efx); + + /* Tear down the private nic state */ + kfree(efx->nic_data); + efx->nic_data = NULL; +} + +#define SIENA_DMA_STAT(ext_name, mcdi_name) \ + [SIENA_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define SIENA_OTHER_STAT(ext_name) \ + [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 } +#define GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = { + SIENA_DMA_STAT(tx_bytes, TX_BYTES), + SIENA_OTHER_STAT(tx_good_bytes), + SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES), + SIENA_DMA_STAT(tx_packets, TX_PKTS), + SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS), + SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS), + SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS), + SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), + SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), + SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), + SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS), + SIENA_DMA_STAT(tx_64, TX_64_PKTS), + SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), + SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), + SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), + SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), + SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS), + SIENA_OTHER_STAT(tx_collision), + SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS), + SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS), + SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS), + SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS), + SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS), + SIENA_DMA_STAT(rx_bytes, RX_BYTES), + SIENA_OTHER_STAT(rx_good_bytes), + SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES), + SIENA_DMA_STAT(rx_packets, RX_PKTS), + SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS), + SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), + SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS), + SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS), + SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), + SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), + SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), + SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), + SIENA_DMA_STAT(rx_64, RX_64_PKTS), + SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), + SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), + SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), + SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), + SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), + SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), + SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), + SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS), + SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS), + SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), + SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), + SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS), + SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS), + GENERIC_SW_STAT(rx_nodesc_trunc), + GENERIC_SW_STAT(rx_noskb_drops), +}; +static const unsigned long siena_stat_mask[] = { + [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL, +}; + +static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names) +{ + return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT, + siena_stat_mask, names); +} + +static int siena_try_update_nic_stats(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + __le64 *dma_stats; + __le64 generation_start, generation_end; + + dma_stats = efx->stats_buffer.addr; + + generation_end = dma_stats[efx->num_mac_stats - 1]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + return 0; + rmb(); + efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask, + stats, efx->stats_buffer.addr, false); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) + return -EAGAIN; + + /* Update derived statistics */ + efx_nic_fix_nodesc_drop_stat(efx, + &stats[SIENA_STAT_rx_nodesc_drop_cnt]); + efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes], + stats[SIENA_STAT_tx_bytes] - + stats[SIENA_STAT_tx_bad_bytes]); + stats[SIENA_STAT_tx_collision] = + stats[SIENA_STAT_tx_single_collision] + + stats[SIENA_STAT_tx_multiple_collision] + + stats[SIENA_STAT_tx_excessive_collision] + + stats[SIENA_STAT_tx_late_collision]; + efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes], + stats[SIENA_STAT_rx_bytes] - + stats[SIENA_STAT_rx_bad_bytes]); + efx_update_sw_stats(efx, stats); + return 0; +} + +static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + struct siena_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + int retry; + + /* If we're unlucky enough to read statistics wduring the DMA, wait + * up to 10ms for it to finish (typically takes <500us) */ + for (retry = 0; retry < 100; ++retry) { + if (siena_try_update_nic_stats(efx) == 0) + break; + udelay(100); + } + + if (full_stats) + memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT); + + if (core_stats) { + core_stats->rx_packets = stats[SIENA_STAT_rx_packets]; + core_stats->tx_packets = stats[SIENA_STAT_tx_packets]; + core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes]; + core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes]; + core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[SIENA_STAT_rx_multicast]; + core_stats->collisions = stats[SIENA_STAT_tx_collision]; + core_stats->rx_length_errors = + stats[SIENA_STAT_rx_gtjumbo] + + stats[SIENA_STAT_rx_length_error]; + core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad]; + core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error]; + core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow]; + core_stats->tx_window_errors = + stats[SIENA_STAT_tx_late_collision]; + + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors + + stats[SIENA_STAT_rx_symbol_error]); + core_stats->tx_errors = (core_stats->tx_window_errors + + stats[SIENA_STAT_tx_bad]); + } + + return SIENA_STAT_COUNT; +} + +static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unused) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN != + MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + + sizeof(efx->multicast_hash)); + + efx_farch_filter_sync_rx_mode(efx); + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + rc = efx_mcdi_set_mac(efx); + if (rc != 0) + return rc; + + memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0), + efx->multicast_hash.byte, sizeof(efx->multicast_hash)); + return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +/************************************************************************** + * + * Wake on LAN + * + ************************************************************************** + */ + +static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) +{ + struct siena_nic_data *nic_data = efx->nic_data; + + wol->supported = WAKE_MAGIC; + if (nic_data->wol_filter_id != -1) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + + +static int siena_set_wol(struct efx_nic *efx, u32 type) +{ + struct siena_nic_data *nic_data = efx->nic_data; + int rc; + + if (type & ~WAKE_MAGIC) + return -EINVAL; + + if (type & WAKE_MAGIC) { + if (nic_data->wol_filter_id != -1) + efx_mcdi_wol_filter_remove(efx, + nic_data->wol_filter_id); + rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, + &nic_data->wol_filter_id); + if (rc) + goto fail; + + pci_wake_from_d3(efx->pci_dev, true); + } else { + rc = efx_mcdi_wol_filter_reset(efx); + nic_data->wol_filter_id = -1; + pci_wake_from_d3(efx->pci_dev, false); + if (rc) + goto fail; + } + + return 0; + fail: + netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", + __func__, type, rc); + return rc; +} + + +static void siena_init_wol(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + int rc; + + rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); + + if (rc != 0) { + /* If it failed, attempt to get into a synchronised + * state with MC by resetting any set WoL filters */ + efx_mcdi_wol_filter_reset(efx); + nic_data->wol_filter_id = -1; + } else if (nic_data->wol_filter_id != -1) { + pci_wake_from_d3(efx->pci_dev, true); + } +} + +/************************************************************************** + * + * MCDI + * + ************************************************************************** + */ + +#define MCDI_PDU(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) +#define MCDI_DOORBELL(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) +#define MCDI_STATUS(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) + +static void siena_mcdi_request(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); + unsigned int i; + unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4); + + EFX_WARN_ON_PARANOID(hdr_len != 4); + + efx_writed(efx, hdr, pdu); + + for (i = 0; i < inlen_dw; i++) + efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i); + + /* Ensure the request is written out before the doorbell */ + wmb(); + + /* ring the doorbell with a distinctive value */ + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); +} + +static bool siena_mcdi_poll_response(struct efx_nic *efx) +{ + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + efx_dword_t hdr; + + efx_readd(efx, &hdr, pdu); + + /* All 1's indicates that shared memory is in reset (and is + * not a valid hdr). Wait for it to come out reset before + * completing the command + */ + return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff && + EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, + size_t offset, size_t outlen) +{ + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4); + int i; + + for (i = 0; i < outlen_dw; i++) + efx_readd(efx, &outbuf[i], pdu + offset + 4 * i); +} + +static int siena_mcdi_poll_reboot(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); + efx_dword_t reg; + u32 value; + + efx_readd(efx, ®, addr); + value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + + if (value == 0) + return 0; + + EFX_ZERO_DWORD(reg); + efx_writed(efx, ®, addr); + + /* MAC statistics have been cleared on the NIC; clear the local + * copies that we update with efx_update_diff_stat(). + */ + nic_data->stats[SIENA_STAT_tx_good_bytes] = 0; + nic_data->stats[SIENA_STAT_rx_good_bytes] = 0; + + if (value == MC_STATUS_DWORD_ASSERT) + return -EINTR; + else + return -EIO; +} + +/************************************************************************** + * + * MTD + * + ************************************************************************** + */ + +#ifdef CONFIG_SFC_MTD + +struct siena_nvram_type_info { + int port; + const char *name; +}; + +static const struct siena_nvram_type_info siena_nvram_types[] = { + [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, + [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, + [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, + [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" }, + [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" }, + [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" }, + [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, + [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" }, +}; + +static int siena_mtd_probe_partition(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *part, + unsigned int type) +{ + const struct siena_nvram_type_info *info; + size_t size, erase_size; + bool protected; + int rc; + + if (type >= ARRAY_SIZE(siena_nvram_types) || + siena_nvram_types[type].name == NULL) + return -ENODEV; + + info = &siena_nvram_types[type]; + + if (info->port != efx_port_num(efx)) + return -ENODEV; + + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); + if (rc) + return rc; + if (protected) + return -ENODEV; /* hide it */ + + part->nvram_type = type; + part->common.dev_type_name = "Siena NVRAM manager"; + part->common.type_name = info->name; + + part->common.mtd.type = MTD_NORFLASH; + part->common.mtd.flags = MTD_CAP_NORFLASH; + part->common.mtd.size = size; + part->common.mtd.erasesize = erase_size; + + return 0; +} + +static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *parts, + size_t n_parts) +{ + uint16_t fw_subtype_list[ + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; + size_t i; + int rc; + + rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); + if (rc) + return rc; + + for (i = 0; i < n_parts; i++) + parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type]; + + return 0; +} + +static int siena_mtd_probe(struct efx_nic *efx) +{ + struct efx_mcdi_mtd_partition *parts; + u32 nvram_types; + unsigned int type; + size_t n_parts; + int rc; + + ASSERT_RTNL(); + + rc = efx_mcdi_nvram_types(efx, &nvram_types); + if (rc) + return rc; + + parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + type = 0; + n_parts = 0; + + while (nvram_types != 0) { + if (nvram_types & 1) { + rc = siena_mtd_probe_partition(efx, &parts[n_parts], + type); + if (rc == 0) + n_parts++; + else if (rc != -ENODEV) + goto fail; + } + type++; + nvram_types >>= 1; + } + + rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts); + if (rc) + goto fail; + + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); +fail: + if (rc) + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_MTD */ + +static unsigned int siena_check_caps(const struct efx_nic *efx, + u8 flag, u32 offset) +{ + /* Siena did not support MC_CMD_GET_CAPABILITIES */ + return 0; +} + +static unsigned int efx_siena_recycle_ring_size(const struct efx_nic *efx) +{ + /* Maximum link speed is 10G */ + return EFX_RECYCLE_RING_SIZE_10G; +} + +/************************************************************************** + * + * Revision-dependent attributes used by efx.c and nic.c + * + ************************************************************************** + */ + +const struct efx_nic_type siena_a0_nic_type = { + .is_vf = false, + .mem_bar = siena_mem_bar, + .mem_map_size = siena_mem_map_size, + .probe = siena_probe_nic, + .remove = siena_remove_nic, + .init = siena_init_nic, + .dimension_resources = siena_dimension_resources, + .fini = efx_port_dummy_op_void, +#ifdef CONFIG_EEH + .monitor = siena_monitor, +#else + .monitor = NULL, +#endif + .map_reset_reason = efx_mcdi_map_reset_reason, + .map_reset_flags = siena_map_reset_flags, + .reset = efx_mcdi_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_farch_fini_dmaq, + .prepare_flush = siena_prepare_flush, + .finish_flush = siena_finish_flush, + .prepare_flr = efx_port_dummy_op_void, + .finish_flr = efx_farch_finish_flr, + .describe_stats = siena_describe_nic_stats, + .update_stats = siena_update_nic_stats, + .start_stats = efx_mcdi_mac_start_stats, + .pull_stats = efx_mcdi_mac_pull_stats, + .stop_stats = efx_mcdi_mac_stop_stats, + .push_irq_moderation = siena_push_irq_moderation, + .reconfigure_mac = siena_mac_reconfigure, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = siena_get_wol, + .set_wol = siena_set_wol, + .resume_wol = siena_init_wol, + .test_chip = siena_test_chip, + .test_nvram = efx_mcdi_nvram_test_all, + .mcdi_request = siena_mcdi_request, + .mcdi_poll_response = siena_mcdi_poll_response, + .mcdi_read_response = siena_mcdi_read_response, + .mcdi_poll_reboot = siena_mcdi_poll_reboot, + .irq_enable_master = efx_farch_irq_enable_master, + .irq_test_generate = efx_farch_irq_test_generate, + .irq_disable_non_ev = efx_farch_irq_disable_master, + .irq_handle_msi = efx_farch_msi_interrupt, + .irq_handle_legacy = efx_farch_legacy_interrupt, + .tx_probe = efx_farch_tx_probe, + .tx_init = efx_farch_tx_init, + .tx_remove = efx_farch_tx_remove, + .tx_write = efx_farch_tx_write, + .tx_limit_len = efx_farch_tx_limit_len, + .tx_enqueue = __efx_enqueue_skb, + .rx_push_rss_config = siena_rx_push_rss_config, + .rx_pull_rss_config = siena_rx_pull_rss_config, + .rx_probe = efx_farch_rx_probe, + .rx_init = efx_farch_rx_init, + .rx_remove = efx_farch_rx_remove, + .rx_write = efx_farch_rx_write, + .rx_defer_refill = efx_farch_rx_defer_refill, + .rx_packet = __efx_rx_packet, + .ev_probe = efx_farch_ev_probe, + .ev_init = efx_farch_ev_init, + .ev_fini = efx_farch_ev_fini, + .ev_remove = efx_farch_ev_remove, + .ev_process = efx_farch_ev_process, + .ev_read_ack = efx_farch_ev_read_ack, + .ev_test_generate = efx_farch_ev_test_generate, + .filter_table_probe = efx_farch_filter_table_probe, + .filter_table_restore = efx_farch_filter_table_restore, + .filter_table_remove = efx_farch_filter_table_remove, + .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter, + .filter_insert = efx_farch_filter_insert, + .filter_remove_safe = efx_farch_filter_remove_safe, + .filter_get_safe = efx_farch_filter_get_safe, + .filter_clear_rx = efx_farch_filter_clear_rx, + .filter_count_rx_used = efx_farch_filter_count_rx_used, + .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_farch_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = siena_mtd_probe, + .mtd_rename = efx_mcdi_mtd_rename, + .mtd_read = efx_mcdi_mtd_read, + .mtd_erase = efx_mcdi_mtd_erase, + .mtd_write = efx_mcdi_mtd_write, + .mtd_sync = efx_mcdi_mtd_sync, +#endif + .ptp_write_host_time = siena_ptp_write_host_time, + .ptp_set_ts_config = siena_ptp_set_ts_config, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_siena_sriov_configure, + .sriov_init = efx_siena_sriov_init, + .sriov_fini = efx_siena_sriov_fini, + .sriov_wanted = efx_siena_sriov_wanted, + .sriov_reset = efx_siena_sriov_reset, + .sriov_flr = efx_siena_sriov_flr, + .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac, + .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan, + .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, + .sriov_get_vf_config = efx_siena_sriov_get_vf_config, + .vswitching_probe = efx_port_dummy_op_int, + .vswitching_restore = efx_port_dummy_op_int, + .vswitching_remove = efx_port_dummy_op_void, + .set_mac_address = efx_siena_sriov_mac_address_changed, +#endif + + .revision = EFX_REV_SIENA_A0, + .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, + .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, + .buf_tbl_base = FR_BZ_BUF_FULL_TBL, + .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, + .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, + .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE, + .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, + .rx_buffer_padding = 0, + .can_rx_scatter = true, + .option_descriptors = false, + .min_interrupt_mode = EFX_INT_MODE_LEGACY, + .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, + .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXHASH | NETIF_F_NTUPLE), + .mcdi_max_ver = 1, + .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, + .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), + .rx_hash_key_size = 16, + .check_caps = siena_check_caps, + .sensor_event = efx_mcdi_sensor_event, + .rx_recycle_ring_size = efx_siena_recycle_ring_size, +}; diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.c b/drivers/net/ethernet/sfc/siena/siena_sriov.c new file mode 100644 index 000000000000..f12851a527d9 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.c @@ -0,0 +1,1686 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2010-2012 Solarflare Communications Inc. + */ +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "efx_channels.h" +#include "nic.h" +#include "io.h" +#include "mcdi.h" +#include "filter.h" +#include "mcdi_pcol.h" +#include "farch_regs.h" +#include "siena_sriov.h" +#include "vfdi.h" + +/* Number of longs required to track all the VIs in a VF */ +#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX) + +/* Maximum number of RX queues supported */ +#define VF_MAX_RX_QUEUES 63 + +/** + * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour + * @VF_TX_FILTER_OFF: Disabled + * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only + * 2 TX queues allowed per VF. + * @VF_TX_FILTER_ON: Enabled + */ +enum efx_vf_tx_filter_mode { + VF_TX_FILTER_OFF, + VF_TX_FILTER_AUTO, + VF_TX_FILTER_ON, +}; + +/** + * struct siena_vf - Back-end resource and protocol state for a PCI VF + * @efx: The Efx NIC owning this VF + * @pci_rid: The PCI requester ID for this VF + * @pci_name: The PCI name (formatted address) of this VF + * @index: Index of VF within its port and PF. + * @req: VFDI incoming request work item. Incoming USR_EV events are received + * by the NAPI handler, but must be handled by executing MCDI requests + * inside a work item. + * @req_addr: VFDI incoming request DMA address (in VF's PCI address space). + * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member. + * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member. + * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by + * @status_lock + * @busy: VFDI request queued to be processed or being processed. Receiving + * a VFDI request when @busy is set is an error condition. + * @buf: Incoming VFDI requests are DMA from the VF into this buffer. + * @buftbl_base: Buffer table entries for this VF start at this index. + * @rx_filtering: Receive filtering has been requested by the VF driver. + * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request. + * @rx_filter_qid: VF relative qid for RX filter requested by VF. + * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported. + * @tx_filter_mode: Transmit MAC filtering mode. + * @tx_filter_id: Transmit MAC filter ID. + * @addr: The MAC address and outer vlan tag of the VF. + * @status_addr: VF DMA address of page for &struct vfdi_status updates. + * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, + * @peer_page_addrs and @peer_page_count from simultaneous + * updates by the VM and consumption by + * efx_siena_sriov_update_vf_addr() + * @peer_page_addrs: Pointer to an array of guest pages for local addresses. + * @peer_page_count: Number of entries in @peer_page_count. + * @evq0_addrs: Array of guest pages backing evq0. + * @evq0_count: Number of entries in @evq0_addrs. + * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler + * to wait for flush completions. + * @txq_lock: Mutex for TX queue allocation. + * @txq_mask: Mask of initialized transmit queues. + * @txq_count: Number of initialized transmit queues. + * @rxq_mask: Mask of initialized receive queues. + * @rxq_count: Number of initialized receive queues. + * @rxq_retry_mask: Mask or receive queues that need to be flushed again + * due to flush failure. + * @rxq_retry_count: Number of receive queues in @rxq_retry_mask. + * @reset_work: Work item to schedule a VF reset. + */ +struct siena_vf { + struct efx_nic *efx; + unsigned int pci_rid; + char pci_name[13]; /* dddd:bb:dd.f */ + unsigned int index; + struct work_struct req; + u64 req_addr; + int req_type; + unsigned req_seqno; + unsigned msg_seqno; + bool busy; + struct efx_buffer buf; + unsigned buftbl_base; + bool rx_filtering; + enum efx_filter_flags rx_filter_flags; + unsigned rx_filter_qid; + int rx_filter_id; + enum efx_vf_tx_filter_mode tx_filter_mode; + int tx_filter_id; + struct vfdi_endpoint addr; + u64 status_addr; + struct mutex status_lock; + u64 *peer_page_addrs; + unsigned peer_page_count; + u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) / + EFX_BUF_SIZE]; + unsigned evq0_count; + wait_queue_head_t flush_waitq; + struct mutex txq_lock; + unsigned long txq_mask[VI_MASK_LENGTH]; + unsigned txq_count; + unsigned long rxq_mask[VI_MASK_LENGTH]; + unsigned rxq_count; + unsigned long rxq_retry_mask[VI_MASK_LENGTH]; + atomic_t rxq_retry_count; + struct work_struct reset_work; +}; + +struct efx_memcpy_req { + unsigned int from_rid; + void *from_buf; + u64 from_addr; + unsigned int to_rid; + u64 to_addr; + unsigned length; +}; + +/** + * struct efx_local_addr - A MAC address on the vswitch without a VF. + * + * Siena does not have a switch, so VFs can't transmit data to each + * other. Instead the VFs must be made aware of the local addresses + * on the vswitch, so that they can arrange for an alternative + * software datapath to be used. + * + * @link: List head for insertion into efx->local_addr_list. + * @addr: Ethernet address + */ +struct efx_local_addr { + struct list_head link; + u8 addr[ETH_ALEN]; +}; + +/** + * struct efx_endpoint_page - Page of vfdi_endpoint structures + * + * @link: List head for insertion into efx->local_page_list. + * @ptr: Pointer to page. + * @addr: DMA address of page. + */ +struct efx_endpoint_page { + struct list_head link; + void *ptr; + dma_addr_t addr; +}; + +/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */ +#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \ + ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid)) +#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \ + (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ + (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) +#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \ + (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ + (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) + +#define EFX_FIELD_MASK(_field) \ + ((1 << _field ## _WIDTH) - 1) + +/* VFs can only use this many transmit channels */ +static unsigned int vf_max_tx_channels = 2; +module_param(vf_max_tx_channels, uint, 0444); +MODULE_PARM_DESC(vf_max_tx_channels, + "Limit the number of TX channels VFs can use"); + +static int max_vfs = -1; +module_param(max_vfs, int, 0444); +MODULE_PARM_DESC(max_vfs, + "Reduce the number of VFs initialized by the driver"); + +/* Workqueue used by VFDI communication. We can't use the global + * workqueue because it may be running the VF driver's probe() + * routine, which will be blocked there waiting for a VFDI response. + */ +static struct workqueue_struct *vfdi_workqueue; + +static unsigned abs_index(struct siena_vf *vf, unsigned index) +{ + return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; +} + +static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, + unsigned *vi_scale_out, unsigned *vf_total_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); + unsigned vi_scale, vf_total; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0); + MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE); + MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN, + outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SRIOV_OUT_LEN) + return -EIO; + + vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL); + vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE); + if (vi_scale > EFX_VI_SCALE_MAX) + return -EOPNOTSUPP; + + if (vi_scale_out) + *vi_scale_out = vi_scale; + if (vf_total_out) + *vf_total_out = vf_total; + + return 0; +} + +static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled) +{ + struct siena_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + + EFX_POPULATE_OWORD_2(reg, + FRF_CZ_USREV_DIS, enabled ? 0 : 1, + FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel); + efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); +} + +static int efx_siena_sriov_memcpy(struct efx_nic *efx, + struct efx_memcpy_req *req, + unsigned int count) +{ + MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); + MCDI_DECLARE_STRUCT_PTR(record); + unsigned int index, used; + u64 from_addr; + u32 from_rid; + int rc; + + mb(); /* Finish writing source/reading dest before DMA starts */ + + if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM)) + return -ENOBUFS; + used = MC_CMD_MEMCPY_IN_LEN(count); + + for (index = 0; index < count; index++) { + record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS, + count); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, + req->to_rid); + MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR, + req->to_addr); + if (req->from_buf == NULL) { + from_rid = req->from_rid; + from_addr = req->from_addr; + } else { + if (WARN_ON(used + req->length > + MCDI_CTL_SDU_LEN_MAX_V1)) { + rc = -ENOBUFS; + goto out; + } + + from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; + from_addr = used; + memcpy(_MCDI_PTR(inbuf, used), req->from_buf, + req->length); + used += req->length; + } + + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid); + MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR, + from_addr); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH, + req->length); + + ++req; + } + + rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); +out: + mb(); /* Don't write source/read dest before DMA is complete */ + + return rc; +} + +/* The TX filter is entirely controlled by this driver, and is modified + * underneath the feet of the VF + */ +static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct efx_filter_spec filter; + u16 vlan; + int rc; + + if (vf->tx_filter_id != -1) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + vf->tx_filter_id); + netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n", + vf->pci_name, vf->tx_filter_id); + vf->tx_filter_id = -1; + } + + if (is_zero_ether_addr(vf->addr.mac_addr)) + return; + + /* Turn on TX filtering automatically if not explicitly + * enabled or disabled. + */ + if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2) + vf->tx_filter_mode = VF_TX_FILTER_ON; + + vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; + efx_filter_init_tx(&filter, abs_index(vf, 0)); + rc = efx_filter_set_eth_local(&filter, + vlan ? vlan : EFX_FILTER_VID_UNSPEC, + vf->addr.mac_addr); + BUG_ON(rc); + + rc = efx_filter_insert_filter(efx, &filter, true); + if (rc < 0) { + netif_warn(efx, hw, efx->net_dev, + "Unable to migrate tx filter for vf %s\n", + vf->pci_name); + } else { + netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n", + vf->pci_name, rc); + vf->tx_filter_id = rc; + } +} + +/* The RX filter is managed here on behalf of the VF driver */ +static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct efx_filter_spec filter; + u16 vlan; + int rc; + + if (vf->rx_filter_id != -1) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + vf->rx_filter_id); + netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n", + vf->pci_name, vf->rx_filter_id); + vf->rx_filter_id = -1; + } + + if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr)) + return; + + vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; + efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED, + vf->rx_filter_flags, + abs_index(vf, vf->rx_filter_qid)); + rc = efx_filter_set_eth_local(&filter, + vlan ? vlan : EFX_FILTER_VID_UNSPEC, + vf->addr.mac_addr); + BUG_ON(rc); + + rc = efx_filter_insert_filter(efx, &filter, true); + if (rc < 0) { + netif_warn(efx, hw, efx->net_dev, + "Unable to insert rx filter for vf %s\n", + vf->pci_name); + } else { + netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n", + vf->pci_name, rc); + vf->rx_filter_id = rc; + } +} + +static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + + efx_siena_sriov_reset_tx_filter(vf); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); +} + +/* Push the peer list to this VF. The caller must hold status_lock to interlock + * with VFDI requests, and they must be serialised against manipulation of + * local_page_list, either by acquiring local_lock or by running from + * efx_siena_sriov_peer_work() + */ +static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *status = nic_data->vfdi_status.addr; + struct efx_memcpy_req copy[4]; + struct efx_endpoint_page *epp; + unsigned int pos, count; + unsigned data_offset; + efx_qword_t event; + + WARN_ON(!mutex_is_locked(&vf->status_lock)); + WARN_ON(!vf->status_addr); + + status->local = vf->addr; + status->generation_end = ++status->generation_start; + + memset(copy, '\0', sizeof(copy)); + /* Write generation_start */ + copy[0].from_buf = &status->generation_start; + copy[0].to_rid = vf->pci_rid; + copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status, + generation_start); + copy[0].length = sizeof(status->generation_start); + /* DMA the rest of the structure (excluding the generations). This + * assumes that the non-generation portion of vfdi_status is in + * one chunk starting at the version member. + */ + data_offset = offsetof(struct vfdi_status, version); + copy[1].from_rid = efx->pci_dev->devfn; + copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset; + copy[1].to_rid = vf->pci_rid; + copy[1].to_addr = vf->status_addr + data_offset; + copy[1].length = status->length - data_offset; + + /* Copy the peer pages */ + pos = 2; + count = 0; + list_for_each_entry(epp, &nic_data->local_page_list, link) { + if (count == vf->peer_page_count) { + /* The VF driver will know they need to provide more + * pages because peer_addr_count is too large. + */ + break; + } + copy[pos].from_buf = NULL; + copy[pos].from_rid = efx->pci_dev->devfn; + copy[pos].from_addr = epp->addr; + copy[pos].to_rid = vf->pci_rid; + copy[pos].to_addr = vf->peer_page_addrs[count]; + copy[pos].length = EFX_PAGE_SIZE; + + if (++pos == ARRAY_SIZE(copy)) { + efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); + pos = 0; + } + ++count; + } + + /* Write generation_end */ + copy[pos].from_buf = &status->generation_end; + copy[pos].to_rid = vf->pci_rid; + copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, + generation_end); + copy[pos].length = sizeof(status->generation_end); + efx_siena_sriov_memcpy(efx, copy, pos + 1); + + /* Notify the guest */ + EFX_POPULATE_QWORD_3(event, + FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, + VFDI_EV_SEQ, (vf->msg_seqno & 0xff), + VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); + ++vf->msg_seqno; + efx_farch_generate_event(efx, + EFX_VI_BASE + vf->index * efx_vf_size(efx), + &event); +} + +static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset, + u64 *addr, unsigned count) +{ + efx_qword_t buf; + unsigned pos; + + for (pos = 0; pos < count; ++pos) { + EFX_POPULATE_QWORD_3(buf, + FRF_AZ_BUF_ADR_REGION, 0, + FRF_AZ_BUF_ADR_FBUF, + addr ? addr[pos] >> 12 : 0, + FRF_AZ_BUF_OWNER_ID_FBUF, 0); + efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL, + &buf, offset + pos); + } +} + +static bool bad_vf_index(struct efx_nic *efx, unsigned index) +{ + return index >= efx_vf_size(efx); +} + +static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count) +{ + unsigned max_buf_count = max_entry_count * + sizeof(efx_qword_t) / EFX_BUF_SIZE; + + return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count); +} + +/* Check that VI specified by per-port index belongs to a VF. + * Optionally set VF index and VI index within the VF. + */ +static bool map_vi_index(struct efx_nic *efx, unsigned abs_index, + struct siena_vf **vf_out, unsigned *rel_index_out) +{ + struct siena_nic_data *nic_data = efx->nic_data; + unsigned vf_i; + + if (abs_index < EFX_VI_BASE) + return true; + vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx); + if (vf_i >= efx->vf_init_count) + return true; + + if (vf_out) + *vf_out = nic_data->vf + vf_i; + if (rel_index_out) + *rel_index_out = abs_index % efx_vf_size(efx); + return false; +} + +static int efx_vfdi_init_evq(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_evq = req->u.init_evq.index; + unsigned buf_count = req->u.init_evq.buf_count; + unsigned abs_evq = abs_index(vf, vf_evq); + unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq); + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || + bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n", + vf->pci_name, vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + + efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); + + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq); + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(buf_count), + FRF_AZ_EVQ_BUF_BASE_ID, buftbl); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq); + + if (vf_evq == 0) { + memcpy(vf->evq0_addrs, req->u.init_evq.addr, + buf_count * sizeof(u64)); + vf->evq0_count = buf_count; + } + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_init_rxq(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_rxq = req->u.init_rxq.index; + unsigned vf_evq = req->u.init_rxq.evq; + unsigned buf_count = req->u.init_rxq.buf_count; + unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq); + unsigned label; + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) || + vf_rxq >= VF_MAX_RX_QUEUES || + bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d " + "buf_count %d\n", vf->pci_name, vf_rxq, + vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) + ++vf->rxq_count; + efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); + + label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); + EFX_POPULATE_OWORD_6(reg, + FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl, + FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), + FRF_AZ_RX_DESCQ_LABEL, label, + FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count), + FRF_AZ_RX_DESCQ_JUMBO, + !!(req->u.init_rxq.flags & + VFDI_RXQ_FLAG_SCATTER_EN), + FRF_AZ_RX_DESCQ_EN, 1); + efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL, + abs_index(vf, vf_rxq)); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_init_txq(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_txq = req->u.init_txq.index; + unsigned vf_evq = req->u.init_txq.evq; + unsigned buf_count = req->u.init_txq.buf_count; + unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq); + unsigned label, eth_filt_en; + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) || + vf_txq >= vf_max_tx_channels || + bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d " + "buf_count %d\n", vf->pci_name, vf_txq, + vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + + mutex_lock(&vf->txq_lock); + if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) + ++vf->txq_count; + mutex_unlock(&vf->txq_lock); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); + + eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; + + label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL); + EFX_POPULATE_OWORD_8(reg, + FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U), + FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en, + FRF_AZ_TX_DESCQ_EN, 1, + FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl, + FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), + FRF_AZ_TX_DESCQ_LABEL, label, + FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count), + FRF_BZ_TX_NON_IP_DROP_DIS, 1); + efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL, + abs_index(vf, vf_txq)); + + return VFDI_RC_SUCCESS; +} + +/* Returns true when efx_vfdi_fini_all_queues should wake */ +static bool efx_vfdi_flush_wake(struct siena_vf *vf) +{ + /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */ + smp_mb(); + + return (!vf->txq_count && !vf->rxq_count) || + atomic_read(&vf->rxq_retry_count); +} + +static void efx_vfdi_flush_clear(struct siena_vf *vf) +{ + memset(vf->txq_mask, 0, sizeof(vf->txq_mask)); + vf->txq_count = 0; + memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask)); + vf->rxq_count = 0; + memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask)); + atomic_set(&vf->rxq_retry_count, 0); +} + +static int efx_vfdi_fini_all_queues(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + efx_oword_t reg; + unsigned count = efx_vf_size(efx); + unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); + unsigned timeout = HZ; + unsigned index, rxqs_count; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX); + int rc; + + BUILD_BUG_ON(VF_MAX_RX_QUEUES > + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); + + rtnl_lock(); + siena_prepare_flush(efx); + rtnl_unlock(); + + /* Flush all the initialized queues */ + rxqs_count = 0; + for (index = 0; index < count; ++index) { + if (test_bit(index, vf->txq_mask)) { + EFX_POPULATE_OWORD_2(reg, + FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_TX_FLUSH_DESCQ, + vf_offset + index); + efx_writeo(efx, ®, FR_AZ_TX_FLUSH_DESCQ); + } + if (test_bit(index, vf->rxq_mask)) { + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + rxqs_count, vf_offset + index); + rxqs_count++; + } + } + + atomic_set(&vf->rxq_retry_count, 0); + while (timeout && (vf->rxq_count || vf->txq_count)) { + rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count), + NULL, 0, NULL); + WARN_ON(rc < 0); + + timeout = wait_event_timeout(vf->flush_waitq, + efx_vfdi_flush_wake(vf), + timeout); + rxqs_count = 0; + for (index = 0; index < count; ++index) { + if (test_and_clear_bit(index, vf->rxq_retry_mask)) { + atomic_dec(&vf->rxq_retry_count); + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + rxqs_count, vf_offset + index); + rxqs_count++; + } + } + } + + rtnl_lock(); + siena_finish_flush(efx); + rtnl_unlock(); + + /* Irrespective of success/failure, fini the queues */ + EFX_ZERO_OWORD(reg); + for (index = 0; index < count; ++index) { + efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, + vf_offset + index); + } + efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL, + EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); + efx_vfdi_flush_clear(vf); + + vf->evq0_count = 0; + + return timeout ? 0 : VFDI_RC_ETIMEDOUT; +} + +static int efx_vfdi_insert_filter(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_rxq = req->u.mac_filter.rxq; + unsigned flags; + + if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INSERT_FILTER from %s: rxq %d " + "flags 0x%x\n", vf->pci_name, vf_rxq, + req->u.mac_filter.flags); + return VFDI_RC_EINVAL; + } + + flags = 0; + if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS) + flags |= EFX_FILTER_FLAG_RX_RSS; + if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + vf->rx_filter_flags = flags; + vf->rx_filter_qid = vf_rxq; + vf->rx_filtering = true; + + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_remove_all_filters(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + + vf->rx_filtering = false; + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_set_status_page(struct siena_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_req *req = vf->buf.addr; + u64 page_count = req->u.set_status_page.peer_page_count; + u64 max_page_count = + (EFX_PAGE_SIZE - + offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0])) + / sizeof(req->u.set_status_page.peer_page_addr[0]); + + if (!req->u.set_status_page.dma_addr || page_count > max_page_count) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid SET_STATUS_PAGE from %s\n", + vf->pci_name); + return VFDI_RC_EINVAL; + } + + mutex_lock(&nic_data->local_lock); + mutex_lock(&vf->status_lock); + vf->status_addr = req->u.set_status_page.dma_addr; + + kfree(vf->peer_page_addrs); + vf->peer_page_addrs = NULL; + vf->peer_page_count = 0; + + if (page_count) { + vf->peer_page_addrs = kcalloc(page_count, sizeof(u64), + GFP_KERNEL); + if (vf->peer_page_addrs) { + memcpy(vf->peer_page_addrs, + req->u.set_status_page.peer_page_addr, + page_count * sizeof(u64)); + vf->peer_page_count = page_count; + } + } + + __efx_siena_sriov_push_vf_status(vf); + mutex_unlock(&vf->status_lock); + mutex_unlock(&nic_data->local_lock); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_clear_status_page(struct siena_vf *vf) +{ + mutex_lock(&vf->status_lock); + vf->status_addr = 0; + mutex_unlock(&vf->status_lock); + + return VFDI_RC_SUCCESS; +} + +typedef int (*efx_vfdi_op_t)(struct siena_vf *vf); + +static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { + [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq, + [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq, + [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq, + [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues, + [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter, + [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters, + [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page, + [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, +}; + +static void efx_siena_sriov_vfdi(struct work_struct *work) +{ + struct siena_vf *vf = container_of(work, struct siena_vf, req); + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + struct efx_memcpy_req copy[2]; + int rc; + + /* Copy this page into the local address space */ + memset(copy, '\0', sizeof(copy)); + copy[0].from_rid = vf->pci_rid; + copy[0].from_addr = vf->req_addr; + copy[0].to_rid = efx->pci_dev->devfn; + copy[0].to_addr = vf->buf.dma_addr; + copy[0].length = EFX_PAGE_SIZE; + rc = efx_siena_sriov_memcpy(efx, copy, 1); + if (rc) { + /* If we can't get the request, we can't reply to the caller */ + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Unable to fetch VFDI request from %s rc %d\n", + vf->pci_name, -rc); + vf->busy = false; + return; + } + + if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) { + rc = vfdi_ops[req->op](vf); + if (rc == 0) { + netif_dbg(efx, hw, efx->net_dev, + "vfdi request %d from %s ok\n", + req->op, vf->pci_name); + } + } else { + netif_dbg(efx, hw, efx->net_dev, + "ERROR: Unrecognised request %d from VF %s addr " + "%llx\n", req->op, vf->pci_name, + (unsigned long long)vf->req_addr); + rc = VFDI_RC_EOPNOTSUPP; + } + + /* Allow subsequent VF requests */ + vf->busy = false; + smp_wmb(); + + /* Respond to the request */ + req->rc = rc; + req->op = VFDI_OP_RESPONSE; + + memset(copy, '\0', sizeof(copy)); + copy[0].from_buf = &req->rc; + copy[0].to_rid = vf->pci_rid; + copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc); + copy[0].length = sizeof(req->rc); + copy[1].from_buf = &req->op; + copy[1].to_rid = vf->pci_rid; + copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); + copy[1].length = sizeof(req->op); + + (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); +} + + + +/* After a reset the event queues inside the guests no longer exist. Fill the + * event ring in guest memory with VFDI reset events, then (re-initialise) the + * event queue to raise an interrupt. The guest driver will then recover. + */ + +static void efx_siena_sriov_reset_vf(struct siena_vf *vf, + struct efx_buffer *buffer) +{ + struct efx_nic *efx = vf->efx; + struct efx_memcpy_req copy_req[4]; + efx_qword_t event; + unsigned int pos, count, k, buftbl, abs_evq; + efx_oword_t reg; + efx_dword_t ptr; + int rc; + + BUG_ON(buffer->len != EFX_PAGE_SIZE); + + if (!vf->evq0_count) + return; + BUG_ON(vf->evq0_count & (vf->evq0_count - 1)); + + mutex_lock(&vf->status_lock); + EFX_POPULATE_QWORD_3(event, + FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, + VFDI_EV_SEQ, vf->msg_seqno, + VFDI_EV_TYPE, VFDI_EV_TYPE_RESET); + vf->msg_seqno++; + for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event)) + memcpy(buffer->addr + pos, &event, sizeof(event)); + + for (pos = 0; pos < vf->evq0_count; pos += count) { + count = min_t(unsigned, vf->evq0_count - pos, + ARRAY_SIZE(copy_req)); + for (k = 0; k < count; k++) { + copy_req[k].from_buf = NULL; + copy_req[k].from_rid = efx->pci_dev->devfn; + copy_req[k].from_addr = buffer->dma_addr; + copy_req[k].to_rid = vf->pci_rid; + copy_req[k].to_addr = vf->evq0_addrs[pos + k]; + copy_req[k].length = EFX_PAGE_SIZE; + } + rc = efx_siena_sriov_memcpy(efx, copy_req, count); + if (rc) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Unable to notify %s of reset" + ": %d\n", vf->pci_name, -rc); + break; + } + } + + /* Reinitialise, arm and trigger evq0 */ + abs_evq = abs_index(vf, 0); + buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); + efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); + + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq); + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count), + FRF_AZ_EVQ_BUF_BASE_ID, buftbl); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq); + EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0); + efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq); + + mutex_unlock(&vf->status_lock); +} + +static void efx_siena_sriov_reset_vf_work(struct work_struct *work) +{ + struct siena_vf *vf = container_of(work, struct siena_vf, req); + struct efx_nic *efx = vf->efx; + struct efx_buffer buf; + + if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { + efx_siena_sriov_reset_vf(vf, &buf); + efx_nic_free_buffer(efx, &buf); + } +} + +static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx) +{ + netif_err(efx, drv, efx->net_dev, + "ERROR: IOV requires MSI-X and 1 additional interrupt" + "vector. IOV disabled\n"); + efx->vf_count = 0; +} + +static int efx_siena_sriov_probe_channel(struct efx_channel *channel) +{ + struct siena_nic_data *nic_data = channel->efx->nic_data; + nic_data->vfdi_channel = channel; + + return 0; +} + +static void +efx_siena_sriov_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-iov", channel->efx->name); +} + +static const struct efx_channel_type efx_siena_sriov_channel_type = { + .handle_no_channel = efx_siena_sriov_handle_no_channel, + .pre_probe = efx_siena_sriov_probe_channel, + .post_remove = efx_channel_dummy_op_void, + .get_name = efx_siena_sriov_get_channel_name, + /* no copy operation; channel must not be reallocated */ + .keep_eventq = true, +}; + +void efx_siena_sriov_probe(struct efx_nic *efx) +{ + unsigned count; + + if (!max_vfs) + return; + + if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) { + pci_info(efx->pci_dev, "no SR-IOV VFs probed\n"); + return; + } + if (count > 0 && count > max_vfs) + count = max_vfs; + + /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ + efx->vf_count = count; + + efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type; +} + +/* Copy the list of individual addresses into the vfdi_status.peers + * array and auxiliary pages, protected by %local_lock. Drop that lock + * and then broadcast the address list to every VF. + */ +static void efx_siena_sriov_peer_work(struct work_struct *data) +{ + struct siena_nic_data *nic_data = container_of(data, + struct siena_nic_data, + peer_work); + struct efx_nic *efx = nic_data->efx; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; + struct siena_vf *vf; + struct efx_local_addr *local_addr; + struct vfdi_endpoint *peer; + struct efx_endpoint_page *epp; + struct list_head pages; + unsigned int peer_space; + unsigned int peer_count; + unsigned int pos; + + mutex_lock(&nic_data->local_lock); + + /* Move the existing peer pages off %local_page_list */ + INIT_LIST_HEAD(&pages); + list_splice_tail_init(&nic_data->local_page_list, &pages); + + /* Populate the VF addresses starting from entry 1 (entry 0 is + * the PF address) + */ + peer = vfdi_status->peers + 1; + peer_space = ARRAY_SIZE(vfdi_status->peers) - 1; + peer_count = 1; + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = nic_data->vf + pos; + + mutex_lock(&vf->status_lock); + if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) { + *peer++ = vf->addr; + ++peer_count; + --peer_space; + BUG_ON(peer_space == 0); + } + mutex_unlock(&vf->status_lock); + } + + /* Fill the remaining addresses */ + list_for_each_entry(local_addr, &nic_data->local_addr_list, link) { + ether_addr_copy(peer->mac_addr, local_addr->addr); + peer->tci = 0; + ++peer; + ++peer_count; + if (--peer_space == 0) { + if (list_empty(&pages)) { + epp = kmalloc(sizeof(*epp), GFP_KERNEL); + if (!epp) + break; + epp->ptr = dma_alloc_coherent( + &efx->pci_dev->dev, EFX_PAGE_SIZE, + &epp->addr, GFP_KERNEL); + if (!epp->ptr) { + kfree(epp); + break; + } + } else { + epp = list_first_entry( + &pages, struct efx_endpoint_page, link); + list_del(&epp->link); + } + + list_add_tail(&epp->link, &nic_data->local_page_list); + peer = (struct vfdi_endpoint *)epp->ptr; + peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); + } + } + vfdi_status->peer_count = peer_count; + mutex_unlock(&nic_data->local_lock); + + /* Free any now unused endpoint pages */ + while (!list_empty(&pages)) { + epp = list_first_entry( + &pages, struct efx_endpoint_page, link); + list_del(&epp->link); + dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, + epp->ptr, epp->addr); + kfree(epp); + } + + /* Finally, push the pages */ + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = nic_data->vf + pos; + + mutex_lock(&vf->status_lock); + if (vf->status_addr) + __efx_siena_sriov_push_vf_status(vf); + mutex_unlock(&vf->status_lock); + } +} + +static void efx_siena_sriov_free_local(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct efx_local_addr *local_addr; + struct efx_endpoint_page *epp; + + while (!list_empty(&nic_data->local_addr_list)) { + local_addr = list_first_entry(&nic_data->local_addr_list, + struct efx_local_addr, link); + list_del(&local_addr->link); + kfree(local_addr); + } + + while (!list_empty(&nic_data->local_page_list)) { + epp = list_first_entry(&nic_data->local_page_list, + struct efx_endpoint_page, link); + list_del(&epp->link); + dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, + epp->ptr, epp->addr); + kfree(epp); + } +} + +static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) +{ + unsigned index; + struct siena_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; + + nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf), + GFP_KERNEL); + if (!nic_data->vf) + return -ENOMEM; + + for (index = 0; index < efx->vf_count; ++index) { + vf = nic_data->vf + index; + + vf->efx = efx; + vf->index = index; + vf->rx_filter_id = -1; + vf->tx_filter_mode = VF_TX_FILTER_AUTO; + vf->tx_filter_id = -1; + INIT_WORK(&vf->req, efx_siena_sriov_vfdi); + INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work); + init_waitqueue_head(&vf->flush_waitq); + mutex_init(&vf->status_lock); + mutex_init(&vf->txq_lock); + } + + return 0; +} + +static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + unsigned int pos; + + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = nic_data->vf + pos; + + efx_nic_free_buffer(efx, &vf->buf); + kfree(vf->peer_page_addrs); + vf->peer_page_addrs = NULL; + vf->peer_page_count = 0; + + vf->evq0_count = 0; + } +} + +static int efx_siena_sriov_vfs_init(struct efx_nic *efx) +{ + struct pci_dev *pci_dev = efx->pci_dev; + struct siena_nic_data *nic_data = efx->nic_data; + unsigned index, devfn, sriov, buftbl_base; + u16 offset, stride; + struct siena_vf *vf; + int rc; + + sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV); + if (!sriov) + return -ENOENT; + + pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); + + buftbl_base = nic_data->vf_buftbl_base; + devfn = pci_dev->devfn + offset; + for (index = 0; index < efx->vf_count; ++index) { + vf = nic_data->vf + index; + + /* Reserve buffer entries */ + vf->buftbl_base = buftbl_base; + buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx); + + vf->pci_rid = devfn; + snprintf(vf->pci_name, sizeof(vf->pci_name), + "%04x:%02x:%02x.%d", + pci_domain_nr(pci_dev->bus), pci_dev->bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn)); + + rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE, + GFP_KERNEL); + if (rc) + goto fail; + + devfn += stride; + } + + return 0; + +fail: + efx_siena_sriov_vfs_fini(efx); + return rc; +} + +int efx_siena_sriov_init(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *vfdi_status; + int rc; + + /* Ensure there's room for vf_channel */ + BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE); + /* Ensure that VI_BASE is aligned on VI_SCALE */ + BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1)); + + if (efx->vf_count == 0) + return 0; + + rc = efx_siena_sriov_cmd(efx, true, NULL, NULL); + if (rc) + goto fail_cmd; + + rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status, + sizeof(*vfdi_status), GFP_KERNEL); + if (rc) + goto fail_status; + vfdi_status = nic_data->vfdi_status.addr; + memset(vfdi_status, 0, sizeof(*vfdi_status)); + vfdi_status->version = 1; + vfdi_status->length = sizeof(*vfdi_status); + vfdi_status->max_tx_channels = vf_max_tx_channels; + vfdi_status->vi_scale = efx->vi_scale; + vfdi_status->rss_rxq_count = efx->rss_spread; + vfdi_status->peer_count = 1 + efx->vf_count; + vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; + + rc = efx_siena_sriov_vf_alloc(efx); + if (rc) + goto fail_alloc; + + mutex_init(&nic_data->local_lock); + INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work); + INIT_LIST_HEAD(&nic_data->local_addr_list); + INIT_LIST_HEAD(&nic_data->local_page_list); + + rc = efx_siena_sriov_vfs_init(efx); + if (rc) + goto fail_vfs; + + rtnl_lock(); + ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr); + efx->vf_init_count = efx->vf_count; + rtnl_unlock(); + + efx_siena_sriov_usrev(efx, true); + + /* At this point we must be ready to accept VFDI requests */ + + rc = pci_enable_sriov(efx->pci_dev, efx->vf_count); + if (rc) + goto fail_pci; + + netif_info(efx, probe, net_dev, + "enabled SR-IOV for %d VFs, %d VI per VF\n", + efx->vf_count, efx_vf_size(efx)); + return 0; + +fail_pci: + efx_siena_sriov_usrev(efx, false); + rtnl_lock(); + efx->vf_init_count = 0; + rtnl_unlock(); + efx_siena_sriov_vfs_fini(efx); +fail_vfs: + cancel_work_sync(&nic_data->peer_work); + efx_siena_sriov_free_local(efx); + kfree(nic_data->vf); +fail_alloc: + efx_nic_free_buffer(efx, &nic_data->vfdi_status); +fail_status: + efx_siena_sriov_cmd(efx, false, NULL, NULL); +fail_cmd: + return rc; +} + +void efx_siena_sriov_fini(struct efx_nic *efx) +{ + struct siena_vf *vf; + unsigned int pos; + struct siena_nic_data *nic_data = efx->nic_data; + + if (efx->vf_init_count == 0) + return; + + /* Disable all interfaces to reconfiguration */ + BUG_ON(nic_data->vfdi_channel->enabled); + efx_siena_sriov_usrev(efx, false); + rtnl_lock(); + efx->vf_init_count = 0; + rtnl_unlock(); + + /* Flush all reconfiguration work */ + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = nic_data->vf + pos; + cancel_work_sync(&vf->req); + cancel_work_sync(&vf->reset_work); + } + cancel_work_sync(&nic_data->peer_work); + + pci_disable_sriov(efx->pci_dev); + + /* Tear down back-end state */ + efx_siena_sriov_vfs_fini(efx); + efx_siena_sriov_free_local(efx); + kfree(nic_data->vf); + efx_nic_free_buffer(efx, &nic_data->vfdi_status); + efx_siena_sriov_cmd(efx, false, NULL, NULL); +} + +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct siena_vf *vf; + unsigned qid, seq, type, data; + + qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID); + + /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */ + BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0); + seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ); + type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE); + data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA); + + netif_vdbg(efx, hw, efx->net_dev, + "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n", + qid, seq, type, data); + + if (map_vi_index(efx, qid, &vf, NULL)) + return; + if (vf->busy) + goto error; + + if (type == VFDI_EV_TYPE_REQ_WORD0) { + /* Resynchronise */ + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->req_seqno = seq + 1; + vf->req_addr = 0; + } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type) + goto error; + + switch (vf->req_type) { + case VFDI_EV_TYPE_REQ_WORD0: + case VFDI_EV_TYPE_REQ_WORD1: + case VFDI_EV_TYPE_REQ_WORD2: + vf->req_addr |= (u64)data << (vf->req_type << 4); + ++vf->req_type; + return; + + case VFDI_EV_TYPE_REQ_WORD3: + vf->req_addr |= (u64)data << 48; + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->busy = true; + queue_work(vfdi_workqueue, &vf->req); + return; + } + +error: + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Screaming VFDI request from %s\n", + vf->pci_name); + /* Reset the request and sequence number */ + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->req_seqno = seq + 1; +} + +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + + if (vf_i > efx->vf_init_count) + return; + vf = nic_data->vf + vf_i; + netif_info(efx, hw, efx->net_dev, + "FLR on VF %s\n", vf->pci_name); + + vf->status_addr = 0; + efx_vfdi_remove_all_filters(vf); + efx_vfdi_flush_clear(vf); + + vf->evq0_count = 0; +} + +int efx_siena_sriov_mac_address_changed(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; + + if (!efx->vf_init_count) + return 0; + ether_addr_copy(vfdi_status->peers[0].mac_addr, + efx->net_dev->dev_addr); + queue_work(vfdi_workqueue, &nic_data->peer_work); + + return 0; +} + +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct siena_vf *vf; + unsigned queue, qid; + + queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + if (map_vi_index(efx, queue, &vf, &qid)) + return; + /* Ignore flush completions triggered by an FLR */ + if (!test_bit(qid, vf->txq_mask)) + return; + + __clear_bit(qid, vf->txq_mask); + --vf->txq_count; + + if (efx_vfdi_flush_wake(vf)) + wake_up(&vf->flush_waitq); +} + +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct siena_vf *vf; + unsigned ev_failed, queue, qid; + + queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); + ev_failed = EFX_QWORD_FIELD(*event, + FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); + if (map_vi_index(efx, queue, &vf, &qid)) + return; + if (!test_bit(qid, vf->rxq_mask)) + return; + + if (ev_failed) { + set_bit(qid, vf->rxq_retry_mask); + atomic_inc(&vf->rxq_retry_count); + } else { + __clear_bit(qid, vf->rxq_mask); + --vf->rxq_count; + } + if (efx_vfdi_flush_wake(vf)) + wake_up(&vf->flush_waitq); +} + +/* Called from napi. Schedule the reset work item */ +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) +{ + struct siena_vf *vf; + unsigned int rel; + + if (map_vi_index(efx, dmaq, &vf, &rel)) + return; + + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "VF %d DMA Q %d reports descriptor fetch error.\n", + vf->index, rel); + queue_work(vfdi_workqueue, &vf->reset_work); +} + +/* Reset all VFs */ +void efx_siena_sriov_reset(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + unsigned int vf_i; + struct efx_buffer buf; + struct siena_vf *vf; + + ASSERT_RTNL(); + + if (efx->vf_init_count == 0) + return; + + efx_siena_sriov_usrev(efx, true); + (void)efx_siena_sriov_cmd(efx, true, NULL, NULL); + + if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) + return; + + for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { + vf = nic_data->vf + vf_i; + efx_siena_sriov_reset_vf(vf, &buf); + } + + efx_nic_free_buffer(efx, &buf); +} + +int efx_init_sriov(void) +{ + /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and + * efx_siena_sriov_peer_work() spend almost all their time sleeping for + * MCDI to complete anyway + */ + vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); + if (!vfdi_workqueue) + return -ENOMEM; + return 0; +} + +void efx_fini_sriov(void) +{ + destroy_workqueue(vfdi_workqueue); +} + +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + mutex_lock(&vf->status_lock); + ether_addr_copy(vf->addr.mac_addr, mac); + __efx_siena_sriov_update_vf_addr(vf); + mutex_unlock(&vf->status_lock); + + return 0; +} + +int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, + u16 vlan, u8 qos) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + u16 tci; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + mutex_lock(&vf->status_lock); + tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); + vf->addr.tci = htons(tci); + __efx_siena_sriov_update_vf_addr(vf); + mutex_unlock(&vf->status_lock); + + return 0; +} + +int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, + bool spoofchk) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + int rc; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + mutex_lock(&vf->txq_lock); + if (vf->txq_count == 0) { + vf->tx_filter_mode = + spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF; + rc = 0; + } else { + /* This cannot be changed while TX queues are running */ + rc = -EBUSY; + } + mutex_unlock(&vf->txq_lock); + return rc; +} + +int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivi) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; + u16 tci; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + ivi->vf = vf_i; + ether_addr_copy(ivi->mac, vf->addr.mac_addr); + ivi->max_tx_rate = 0; + ivi->min_tx_rate = 0; + tci = ntohs(vf->addr.tci); + ivi->vlan = tci & VLAN_VID_MASK; + ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; + ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON; + + return 0; +} + +bool efx_siena_sriov_wanted(struct efx_nic *efx) +{ + return efx->vf_count != 0; +} + +int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs) +{ + return 0; +} diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.h b/drivers/net/ethernet/sfc/siena/siena_sriov.h new file mode 100644 index 000000000000..e548c4daf189 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2015 Solarflare Communications Inc. + */ + +#ifndef SIENA_SRIOV_H +#define SIENA_SRIOV_H + +#include "net_driver.h" + +/* On the SFC9000 family each port is associated with 1 PCI physical + * function (PF) handled by sfc and a configurable number of virtual + * functions (VFs) that may be handled by some other driver, often in + * a VM guest. The queue pointer registers are mapped in both PF and + * VF BARs such that an 8K region provides access to a single RX, TX + * and event queue (collectively a Virtual Interface, VI or VNIC). + * + * The PF has access to all 1024 VIs while VFs are mapped to VIs + * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered + * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE). + * The number of VIs and the VI_SCALE value are configurable but must + * be established at boot time by firmware. + */ + +/* Maximum VI_SCALE parameter supported by Siena */ +#define EFX_VI_SCALE_MAX 6 +/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX), + * so this is the smallest allowed value. + */ +#define EFX_VI_BASE 128U +/* Maximum number of VFs allowed */ +#define EFX_VF_COUNT_MAX 127 +/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */ +#define EFX_MAX_VF_EVQ_SIZE 8192UL +/* The number of buffer table entries reserved for each VI on a VF */ +#define EFX_VF_BUFTBL_PER_VI \ + ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \ + sizeof(efx_qword_t) / EFX_BUF_SIZE) + +int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs); +int efx_siena_sriov_init(struct efx_nic *efx); +void efx_siena_sriov_fini(struct efx_nic *efx); +int efx_siena_sriov_mac_address_changed(struct efx_nic *efx); +bool efx_siena_sriov_wanted(struct efx_nic *efx); +void efx_siena_sriov_reset(struct efx_nic *efx); +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); + +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac); +int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf, + u16 vlan, u8 qos); +int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, + bool spoofchk); +int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf, + struct ifla_vf_info *ivf); + +#ifdef CONFIG_SFC_SRIOV + +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) +{ + return efx->vf_init_count != 0; +} +#else /* !CONFIG_SFC_SRIOV */ +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) +{ + return false; +} +#endif /* CONFIG_SFC_SRIOV */ + +void efx_siena_sriov_probe(struct efx_nic *efx); +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); + +#endif /* SIENA_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c deleted file mode 100644 index f12851a527d9..000000000000 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ /dev/null @@ -1,1686 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/**************************************************************************** - * Driver for Solarflare network controllers and boards - * Copyright 2010-2012 Solarflare Communications Inc. - */ -#include -#include -#include "net_driver.h" -#include "efx.h" -#include "efx_channels.h" -#include "nic.h" -#include "io.h" -#include "mcdi.h" -#include "filter.h" -#include "mcdi_pcol.h" -#include "farch_regs.h" -#include "siena_sriov.h" -#include "vfdi.h" - -/* Number of longs required to track all the VIs in a VF */ -#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX) - -/* Maximum number of RX queues supported */ -#define VF_MAX_RX_QUEUES 63 - -/** - * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour - * @VF_TX_FILTER_OFF: Disabled - * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only - * 2 TX queues allowed per VF. - * @VF_TX_FILTER_ON: Enabled - */ -enum efx_vf_tx_filter_mode { - VF_TX_FILTER_OFF, - VF_TX_FILTER_AUTO, - VF_TX_FILTER_ON, -}; - -/** - * struct siena_vf - Back-end resource and protocol state for a PCI VF - * @efx: The Efx NIC owning this VF - * @pci_rid: The PCI requester ID for this VF - * @pci_name: The PCI name (formatted address) of this VF - * @index: Index of VF within its port and PF. - * @req: VFDI incoming request work item. Incoming USR_EV events are received - * by the NAPI handler, but must be handled by executing MCDI requests - * inside a work item. - * @req_addr: VFDI incoming request DMA address (in VF's PCI address space). - * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member. - * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member. - * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by - * @status_lock - * @busy: VFDI request queued to be processed or being processed. Receiving - * a VFDI request when @busy is set is an error condition. - * @buf: Incoming VFDI requests are DMA from the VF into this buffer. - * @buftbl_base: Buffer table entries for this VF start at this index. - * @rx_filtering: Receive filtering has been requested by the VF driver. - * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request. - * @rx_filter_qid: VF relative qid for RX filter requested by VF. - * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported. - * @tx_filter_mode: Transmit MAC filtering mode. - * @tx_filter_id: Transmit MAC filter ID. - * @addr: The MAC address and outer vlan tag of the VF. - * @status_addr: VF DMA address of page for &struct vfdi_status updates. - * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, - * @peer_page_addrs and @peer_page_count from simultaneous - * updates by the VM and consumption by - * efx_siena_sriov_update_vf_addr() - * @peer_page_addrs: Pointer to an array of guest pages for local addresses. - * @peer_page_count: Number of entries in @peer_page_count. - * @evq0_addrs: Array of guest pages backing evq0. - * @evq0_count: Number of entries in @evq0_addrs. - * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler - * to wait for flush completions. - * @txq_lock: Mutex for TX queue allocation. - * @txq_mask: Mask of initialized transmit queues. - * @txq_count: Number of initialized transmit queues. - * @rxq_mask: Mask of initialized receive queues. - * @rxq_count: Number of initialized receive queues. - * @rxq_retry_mask: Mask or receive queues that need to be flushed again - * due to flush failure. - * @rxq_retry_count: Number of receive queues in @rxq_retry_mask. - * @reset_work: Work item to schedule a VF reset. - */ -struct siena_vf { - struct efx_nic *efx; - unsigned int pci_rid; - char pci_name[13]; /* dddd:bb:dd.f */ - unsigned int index; - struct work_struct req; - u64 req_addr; - int req_type; - unsigned req_seqno; - unsigned msg_seqno; - bool busy; - struct efx_buffer buf; - unsigned buftbl_base; - bool rx_filtering; - enum efx_filter_flags rx_filter_flags; - unsigned rx_filter_qid; - int rx_filter_id; - enum efx_vf_tx_filter_mode tx_filter_mode; - int tx_filter_id; - struct vfdi_endpoint addr; - u64 status_addr; - struct mutex status_lock; - u64 *peer_page_addrs; - unsigned peer_page_count; - u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) / - EFX_BUF_SIZE]; - unsigned evq0_count; - wait_queue_head_t flush_waitq; - struct mutex txq_lock; - unsigned long txq_mask[VI_MASK_LENGTH]; - unsigned txq_count; - unsigned long rxq_mask[VI_MASK_LENGTH]; - unsigned rxq_count; - unsigned long rxq_retry_mask[VI_MASK_LENGTH]; - atomic_t rxq_retry_count; - struct work_struct reset_work; -}; - -struct efx_memcpy_req { - unsigned int from_rid; - void *from_buf; - u64 from_addr; - unsigned int to_rid; - u64 to_addr; - unsigned length; -}; - -/** - * struct efx_local_addr - A MAC address on the vswitch without a VF. - * - * Siena does not have a switch, so VFs can't transmit data to each - * other. Instead the VFs must be made aware of the local addresses - * on the vswitch, so that they can arrange for an alternative - * software datapath to be used. - * - * @link: List head for insertion into efx->local_addr_list. - * @addr: Ethernet address - */ -struct efx_local_addr { - struct list_head link; - u8 addr[ETH_ALEN]; -}; - -/** - * struct efx_endpoint_page - Page of vfdi_endpoint structures - * - * @link: List head for insertion into efx->local_page_list. - * @ptr: Pointer to page. - * @addr: DMA address of page. - */ -struct efx_endpoint_page { - struct list_head link; - void *ptr; - dma_addr_t addr; -}; - -/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */ -#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \ - ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid)) -#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \ - (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ - (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) -#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \ - (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ - (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) - -#define EFX_FIELD_MASK(_field) \ - ((1 << _field ## _WIDTH) - 1) - -/* VFs can only use this many transmit channels */ -static unsigned int vf_max_tx_channels = 2; -module_param(vf_max_tx_channels, uint, 0444); -MODULE_PARM_DESC(vf_max_tx_channels, - "Limit the number of TX channels VFs can use"); - -static int max_vfs = -1; -module_param(max_vfs, int, 0444); -MODULE_PARM_DESC(max_vfs, - "Reduce the number of VFs initialized by the driver"); - -/* Workqueue used by VFDI communication. We can't use the global - * workqueue because it may be running the VF driver's probe() - * routine, which will be blocked there waiting for a VFDI response. - */ -static struct workqueue_struct *vfdi_workqueue; - -static unsigned abs_index(struct siena_vf *vf, unsigned index) -{ - return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; -} - -static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, - unsigned *vi_scale_out, unsigned *vf_total_out) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); - unsigned vi_scale, vf_total; - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0); - MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE); - MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count); - - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN, - outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen); - if (rc) - return rc; - if (outlen < MC_CMD_SRIOV_OUT_LEN) - return -EIO; - - vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL); - vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE); - if (vi_scale > EFX_VI_SCALE_MAX) - return -EOPNOTSUPP; - - if (vi_scale_out) - *vi_scale_out = vi_scale; - if (vf_total_out) - *vf_total_out = vf_total; - - return 0; -} - -static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled) -{ - struct siena_nic_data *nic_data = efx->nic_data; - efx_oword_t reg; - - EFX_POPULATE_OWORD_2(reg, - FRF_CZ_USREV_DIS, enabled ? 0 : 1, - FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel); - efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); -} - -static int efx_siena_sriov_memcpy(struct efx_nic *efx, - struct efx_memcpy_req *req, - unsigned int count) -{ - MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); - MCDI_DECLARE_STRUCT_PTR(record); - unsigned int index, used; - u64 from_addr; - u32 from_rid; - int rc; - - mb(); /* Finish writing source/reading dest before DMA starts */ - - if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM)) - return -ENOBUFS; - used = MC_CMD_MEMCPY_IN_LEN(count); - - for (index = 0; index < count; index++) { - record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index); - MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS, - count); - MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, - req->to_rid); - MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR, - req->to_addr); - if (req->from_buf == NULL) { - from_rid = req->from_rid; - from_addr = req->from_addr; - } else { - if (WARN_ON(used + req->length > - MCDI_CTL_SDU_LEN_MAX_V1)) { - rc = -ENOBUFS; - goto out; - } - - from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; - from_addr = used; - memcpy(_MCDI_PTR(inbuf, used), req->from_buf, - req->length); - used += req->length; - } - - MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid); - MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR, - from_addr); - MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH, - req->length); - - ++req; - } - - rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); -out: - mb(); /* Don't write source/read dest before DMA is complete */ - - return rc; -} - -/* The TX filter is entirely controlled by this driver, and is modified - * underneath the feet of the VF - */ -static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct efx_filter_spec filter; - u16 vlan; - int rc; - - if (vf->tx_filter_id != -1) { - efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, - vf->tx_filter_id); - netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n", - vf->pci_name, vf->tx_filter_id); - vf->tx_filter_id = -1; - } - - if (is_zero_ether_addr(vf->addr.mac_addr)) - return; - - /* Turn on TX filtering automatically if not explicitly - * enabled or disabled. - */ - if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2) - vf->tx_filter_mode = VF_TX_FILTER_ON; - - vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; - efx_filter_init_tx(&filter, abs_index(vf, 0)); - rc = efx_filter_set_eth_local(&filter, - vlan ? vlan : EFX_FILTER_VID_UNSPEC, - vf->addr.mac_addr); - BUG_ON(rc); - - rc = efx_filter_insert_filter(efx, &filter, true); - if (rc < 0) { - netif_warn(efx, hw, efx->net_dev, - "Unable to migrate tx filter for vf %s\n", - vf->pci_name); - } else { - netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n", - vf->pci_name, rc); - vf->tx_filter_id = rc; - } -} - -/* The RX filter is managed here on behalf of the VF driver */ -static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct efx_filter_spec filter; - u16 vlan; - int rc; - - if (vf->rx_filter_id != -1) { - efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, - vf->rx_filter_id); - netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n", - vf->pci_name, vf->rx_filter_id); - vf->rx_filter_id = -1; - } - - if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr)) - return; - - vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; - efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED, - vf->rx_filter_flags, - abs_index(vf, vf->rx_filter_qid)); - rc = efx_filter_set_eth_local(&filter, - vlan ? vlan : EFX_FILTER_VID_UNSPEC, - vf->addr.mac_addr); - BUG_ON(rc); - - rc = efx_filter_insert_filter(efx, &filter, true); - if (rc < 0) { - netif_warn(efx, hw, efx->net_dev, - "Unable to insert rx filter for vf %s\n", - vf->pci_name); - } else { - netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n", - vf->pci_name, rc); - vf->rx_filter_id = rc; - } -} - -static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct siena_nic_data *nic_data = efx->nic_data; - - efx_siena_sriov_reset_tx_filter(vf); - efx_siena_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &nic_data->peer_work); -} - -/* Push the peer list to this VF. The caller must hold status_lock to interlock - * with VFDI requests, and they must be serialised against manipulation of - * local_page_list, either by acquiring local_lock or by running from - * efx_siena_sriov_peer_work() - */ -static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct siena_nic_data *nic_data = efx->nic_data; - struct vfdi_status *status = nic_data->vfdi_status.addr; - struct efx_memcpy_req copy[4]; - struct efx_endpoint_page *epp; - unsigned int pos, count; - unsigned data_offset; - efx_qword_t event; - - WARN_ON(!mutex_is_locked(&vf->status_lock)); - WARN_ON(!vf->status_addr); - - status->local = vf->addr; - status->generation_end = ++status->generation_start; - - memset(copy, '\0', sizeof(copy)); - /* Write generation_start */ - copy[0].from_buf = &status->generation_start; - copy[0].to_rid = vf->pci_rid; - copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status, - generation_start); - copy[0].length = sizeof(status->generation_start); - /* DMA the rest of the structure (excluding the generations). This - * assumes that the non-generation portion of vfdi_status is in - * one chunk starting at the version member. - */ - data_offset = offsetof(struct vfdi_status, version); - copy[1].from_rid = efx->pci_dev->devfn; - copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset; - copy[1].to_rid = vf->pci_rid; - copy[1].to_addr = vf->status_addr + data_offset; - copy[1].length = status->length - data_offset; - - /* Copy the peer pages */ - pos = 2; - count = 0; - list_for_each_entry(epp, &nic_data->local_page_list, link) { - if (count == vf->peer_page_count) { - /* The VF driver will know they need to provide more - * pages because peer_addr_count is too large. - */ - break; - } - copy[pos].from_buf = NULL; - copy[pos].from_rid = efx->pci_dev->devfn; - copy[pos].from_addr = epp->addr; - copy[pos].to_rid = vf->pci_rid; - copy[pos].to_addr = vf->peer_page_addrs[count]; - copy[pos].length = EFX_PAGE_SIZE; - - if (++pos == ARRAY_SIZE(copy)) { - efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); - pos = 0; - } - ++count; - } - - /* Write generation_end */ - copy[pos].from_buf = &status->generation_end; - copy[pos].to_rid = vf->pci_rid; - copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, - generation_end); - copy[pos].length = sizeof(status->generation_end); - efx_siena_sriov_memcpy(efx, copy, pos + 1); - - /* Notify the guest */ - EFX_POPULATE_QWORD_3(event, - FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, - VFDI_EV_SEQ, (vf->msg_seqno & 0xff), - VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); - ++vf->msg_seqno; - efx_farch_generate_event(efx, - EFX_VI_BASE + vf->index * efx_vf_size(efx), - &event); -} - -static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset, - u64 *addr, unsigned count) -{ - efx_qword_t buf; - unsigned pos; - - for (pos = 0; pos < count; ++pos) { - EFX_POPULATE_QWORD_3(buf, - FRF_AZ_BUF_ADR_REGION, 0, - FRF_AZ_BUF_ADR_FBUF, - addr ? addr[pos] >> 12 : 0, - FRF_AZ_BUF_OWNER_ID_FBUF, 0); - efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL, - &buf, offset + pos); - } -} - -static bool bad_vf_index(struct efx_nic *efx, unsigned index) -{ - return index >= efx_vf_size(efx); -} - -static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count) -{ - unsigned max_buf_count = max_entry_count * - sizeof(efx_qword_t) / EFX_BUF_SIZE; - - return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count); -} - -/* Check that VI specified by per-port index belongs to a VF. - * Optionally set VF index and VI index within the VF. - */ -static bool map_vi_index(struct efx_nic *efx, unsigned abs_index, - struct siena_vf **vf_out, unsigned *rel_index_out) -{ - struct siena_nic_data *nic_data = efx->nic_data; - unsigned vf_i; - - if (abs_index < EFX_VI_BASE) - return true; - vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx); - if (vf_i >= efx->vf_init_count) - return true; - - if (vf_out) - *vf_out = nic_data->vf + vf_i; - if (rel_index_out) - *rel_index_out = abs_index % efx_vf_size(efx); - return false; -} - -static int efx_vfdi_init_evq(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct vfdi_req *req = vf->buf.addr; - unsigned vf_evq = req->u.init_evq.index; - unsigned buf_count = req->u.init_evq.buf_count; - unsigned abs_evq = abs_index(vf, vf_evq); - unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq); - efx_oword_t reg; - - if (bad_vf_index(efx, vf_evq) || - bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) { - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n", - vf->pci_name, vf_evq, buf_count); - return VFDI_RC_EINVAL; - } - - efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); - - EFX_POPULATE_OWORD_3(reg, - FRF_CZ_TIMER_Q_EN, 1, - FRF_CZ_HOST_NOTIFY_MODE, 0, - FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); - efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq); - EFX_POPULATE_OWORD_3(reg, - FRF_AZ_EVQ_EN, 1, - FRF_AZ_EVQ_SIZE, __ffs(buf_count), - FRF_AZ_EVQ_BUF_BASE_ID, buftbl); - efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq); - - if (vf_evq == 0) { - memcpy(vf->evq0_addrs, req->u.init_evq.addr, - buf_count * sizeof(u64)); - vf->evq0_count = buf_count; - } - - return VFDI_RC_SUCCESS; -} - -static int efx_vfdi_init_rxq(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct vfdi_req *req = vf->buf.addr; - unsigned vf_rxq = req->u.init_rxq.index; - unsigned vf_evq = req->u.init_rxq.evq; - unsigned buf_count = req->u.init_rxq.buf_count; - unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq); - unsigned label; - efx_oword_t reg; - - if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) || - vf_rxq >= VF_MAX_RX_QUEUES || - bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d " - "buf_count %d\n", vf->pci_name, vf_rxq, - vf_evq, buf_count); - return VFDI_RC_EINVAL; - } - if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) - ++vf->rxq_count; - efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); - - label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); - EFX_POPULATE_OWORD_6(reg, - FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl, - FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), - FRF_AZ_RX_DESCQ_LABEL, label, - FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count), - FRF_AZ_RX_DESCQ_JUMBO, - !!(req->u.init_rxq.flags & - VFDI_RXQ_FLAG_SCATTER_EN), - FRF_AZ_RX_DESCQ_EN, 1); - efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL, - abs_index(vf, vf_rxq)); - - return VFDI_RC_SUCCESS; -} - -static int efx_vfdi_init_txq(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct vfdi_req *req = vf->buf.addr; - unsigned vf_txq = req->u.init_txq.index; - unsigned vf_evq = req->u.init_txq.evq; - unsigned buf_count = req->u.init_txq.buf_count; - unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq); - unsigned label, eth_filt_en; - efx_oword_t reg; - - if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) || - vf_txq >= vf_max_tx_channels || - bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d " - "buf_count %d\n", vf->pci_name, vf_txq, - vf_evq, buf_count); - return VFDI_RC_EINVAL; - } - - mutex_lock(&vf->txq_lock); - if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) - ++vf->txq_count; - mutex_unlock(&vf->txq_lock); - efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); - - eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; - - label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL); - EFX_POPULATE_OWORD_8(reg, - FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U), - FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en, - FRF_AZ_TX_DESCQ_EN, 1, - FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl, - FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), - FRF_AZ_TX_DESCQ_LABEL, label, - FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count), - FRF_BZ_TX_NON_IP_DROP_DIS, 1); - efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL, - abs_index(vf, vf_txq)); - - return VFDI_RC_SUCCESS; -} - -/* Returns true when efx_vfdi_fini_all_queues should wake */ -static bool efx_vfdi_flush_wake(struct siena_vf *vf) -{ - /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */ - smp_mb(); - - return (!vf->txq_count && !vf->rxq_count) || - atomic_read(&vf->rxq_retry_count); -} - -static void efx_vfdi_flush_clear(struct siena_vf *vf) -{ - memset(vf->txq_mask, 0, sizeof(vf->txq_mask)); - vf->txq_count = 0; - memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask)); - vf->rxq_count = 0; - memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask)); - atomic_set(&vf->rxq_retry_count, 0); -} - -static int efx_vfdi_fini_all_queues(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - efx_oword_t reg; - unsigned count = efx_vf_size(efx); - unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); - unsigned timeout = HZ; - unsigned index, rxqs_count; - MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX); - int rc; - - BUILD_BUG_ON(VF_MAX_RX_QUEUES > - MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); - - rtnl_lock(); - siena_prepare_flush(efx); - rtnl_unlock(); - - /* Flush all the initialized queues */ - rxqs_count = 0; - for (index = 0; index < count; ++index) { - if (test_bit(index, vf->txq_mask)) { - EFX_POPULATE_OWORD_2(reg, - FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, - FRF_AZ_TX_FLUSH_DESCQ, - vf_offset + index); - efx_writeo(efx, ®, FR_AZ_TX_FLUSH_DESCQ); - } - if (test_bit(index, vf->rxq_mask)) { - MCDI_SET_ARRAY_DWORD( - inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, - rxqs_count, vf_offset + index); - rxqs_count++; - } - } - - atomic_set(&vf->rxq_retry_count, 0); - while (timeout && (vf->rxq_count || vf->txq_count)) { - rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, - MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count), - NULL, 0, NULL); - WARN_ON(rc < 0); - - timeout = wait_event_timeout(vf->flush_waitq, - efx_vfdi_flush_wake(vf), - timeout); - rxqs_count = 0; - for (index = 0; index < count; ++index) { - if (test_and_clear_bit(index, vf->rxq_retry_mask)) { - atomic_dec(&vf->rxq_retry_count); - MCDI_SET_ARRAY_DWORD( - inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, - rxqs_count, vf_offset + index); - rxqs_count++; - } - } - } - - rtnl_lock(); - siena_finish_flush(efx); - rtnl_unlock(); - - /* Irrespective of success/failure, fini the queues */ - EFX_ZERO_OWORD(reg); - for (index = 0; index < count; ++index) { - efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL, - vf_offset + index); - efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL, - vf_offset + index); - efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, - vf_offset + index); - efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, - vf_offset + index); - } - efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL, - EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); - efx_vfdi_flush_clear(vf); - - vf->evq0_count = 0; - - return timeout ? 0 : VFDI_RC_ETIMEDOUT; -} - -static int efx_vfdi_insert_filter(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct siena_nic_data *nic_data = efx->nic_data; - struct vfdi_req *req = vf->buf.addr; - unsigned vf_rxq = req->u.mac_filter.rxq; - unsigned flags; - - if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) { - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "ERROR: Invalid INSERT_FILTER from %s: rxq %d " - "flags 0x%x\n", vf->pci_name, vf_rxq, - req->u.mac_filter.flags); - return VFDI_RC_EINVAL; - } - - flags = 0; - if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS) - flags |= EFX_FILTER_FLAG_RX_RSS; - if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER) - flags |= EFX_FILTER_FLAG_RX_SCATTER; - vf->rx_filter_flags = flags; - vf->rx_filter_qid = vf_rxq; - vf->rx_filtering = true; - - efx_siena_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &nic_data->peer_work); - - return VFDI_RC_SUCCESS; -} - -static int efx_vfdi_remove_all_filters(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct siena_nic_data *nic_data = efx->nic_data; - - vf->rx_filtering = false; - efx_siena_sriov_reset_rx_filter(vf); - queue_work(vfdi_workqueue, &nic_data->peer_work); - - return VFDI_RC_SUCCESS; -} - -static int efx_vfdi_set_status_page(struct siena_vf *vf) -{ - struct efx_nic *efx = vf->efx; - struct siena_nic_data *nic_data = efx->nic_data; - struct vfdi_req *req = vf->buf.addr; - u64 page_count = req->u.set_status_page.peer_page_count; - u64 max_page_count = - (EFX_PAGE_SIZE - - offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0])) - / sizeof(req->u.set_status_page.peer_page_addr[0]); - - if (!req->u.set_status_page.dma_addr || page_count > max_page_count) { - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "ERROR: Invalid SET_STATUS_PAGE from %s\n", - vf->pci_name); - return VFDI_RC_EINVAL; - } - - mutex_lock(&nic_data->local_lock); - mutex_lock(&vf->status_lock); - vf->status_addr = req->u.set_status_page.dma_addr; - - kfree(vf->peer_page_addrs); - vf->peer_page_addrs = NULL; - vf->peer_page_count = 0; - - if (page_count) { - vf->peer_page_addrs = kcalloc(page_count, sizeof(u64), - GFP_KERNEL); - if (vf->peer_page_addrs) { - memcpy(vf->peer_page_addrs, - req->u.set_status_page.peer_page_addr, - page_count * sizeof(u64)); - vf->peer_page_count = page_count; - } - } - - __efx_siena_sriov_push_vf_status(vf); - mutex_unlock(&vf->status_lock); - mutex_unlock(&nic_data->local_lock); - - return VFDI_RC_SUCCESS; -} - -static int efx_vfdi_clear_status_page(struct siena_vf *vf) -{ - mutex_lock(&vf->status_lock); - vf->status_addr = 0; - mutex_unlock(&vf->status_lock); - - return VFDI_RC_SUCCESS; -} - -typedef int (*efx_vfdi_op_t)(struct siena_vf *vf); - -static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { - [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq, - [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq, - [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq, - [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues, - [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter, - [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters, - [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page, - [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, -}; - -static void efx_siena_sriov_vfdi(struct work_struct *work) -{ - struct siena_vf *vf = container_of(work, struct siena_vf, req); - struct efx_nic *efx = vf->efx; - struct vfdi_req *req = vf->buf.addr; - struct efx_memcpy_req copy[2]; - int rc; - - /* Copy this page into the local address space */ - memset(copy, '\0', sizeof(copy)); - copy[0].from_rid = vf->pci_rid; - copy[0].from_addr = vf->req_addr; - copy[0].to_rid = efx->pci_dev->devfn; - copy[0].to_addr = vf->buf.dma_addr; - copy[0].length = EFX_PAGE_SIZE; - rc = efx_siena_sriov_memcpy(efx, copy, 1); - if (rc) { - /* If we can't get the request, we can't reply to the caller */ - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "ERROR: Unable to fetch VFDI request from %s rc %d\n", - vf->pci_name, -rc); - vf->busy = false; - return; - } - - if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) { - rc = vfdi_ops[req->op](vf); - if (rc == 0) { - netif_dbg(efx, hw, efx->net_dev, - "vfdi request %d from %s ok\n", - req->op, vf->pci_name); - } - } else { - netif_dbg(efx, hw, efx->net_dev, - "ERROR: Unrecognised request %d from VF %s addr " - "%llx\n", req->op, vf->pci_name, - (unsigned long long)vf->req_addr); - rc = VFDI_RC_EOPNOTSUPP; - } - - /* Allow subsequent VF requests */ - vf->busy = false; - smp_wmb(); - - /* Respond to the request */ - req->rc = rc; - req->op = VFDI_OP_RESPONSE; - - memset(copy, '\0', sizeof(copy)); - copy[0].from_buf = &req->rc; - copy[0].to_rid = vf->pci_rid; - copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc); - copy[0].length = sizeof(req->rc); - copy[1].from_buf = &req->op; - copy[1].to_rid = vf->pci_rid; - copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); - copy[1].length = sizeof(req->op); - - (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); -} - - - -/* After a reset the event queues inside the guests no longer exist. Fill the - * event ring in guest memory with VFDI reset events, then (re-initialise) the - * event queue to raise an interrupt. The guest driver will then recover. - */ - -static void efx_siena_sriov_reset_vf(struct siena_vf *vf, - struct efx_buffer *buffer) -{ - struct efx_nic *efx = vf->efx; - struct efx_memcpy_req copy_req[4]; - efx_qword_t event; - unsigned int pos, count, k, buftbl, abs_evq; - efx_oword_t reg; - efx_dword_t ptr; - int rc; - - BUG_ON(buffer->len != EFX_PAGE_SIZE); - - if (!vf->evq0_count) - return; - BUG_ON(vf->evq0_count & (vf->evq0_count - 1)); - - mutex_lock(&vf->status_lock); - EFX_POPULATE_QWORD_3(event, - FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, - VFDI_EV_SEQ, vf->msg_seqno, - VFDI_EV_TYPE, VFDI_EV_TYPE_RESET); - vf->msg_seqno++; - for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event)) - memcpy(buffer->addr + pos, &event, sizeof(event)); - - for (pos = 0; pos < vf->evq0_count; pos += count) { - count = min_t(unsigned, vf->evq0_count - pos, - ARRAY_SIZE(copy_req)); - for (k = 0; k < count; k++) { - copy_req[k].from_buf = NULL; - copy_req[k].from_rid = efx->pci_dev->devfn; - copy_req[k].from_addr = buffer->dma_addr; - copy_req[k].to_rid = vf->pci_rid; - copy_req[k].to_addr = vf->evq0_addrs[pos + k]; - copy_req[k].length = EFX_PAGE_SIZE; - } - rc = efx_siena_sriov_memcpy(efx, copy_req, count); - if (rc) { - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "ERROR: Unable to notify %s of reset" - ": %d\n", vf->pci_name, -rc); - break; - } - } - - /* Reinitialise, arm and trigger evq0 */ - abs_evq = abs_index(vf, 0); - buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); - efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); - - EFX_POPULATE_OWORD_3(reg, - FRF_CZ_TIMER_Q_EN, 1, - FRF_CZ_HOST_NOTIFY_MODE, 0, - FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); - efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq); - EFX_POPULATE_OWORD_3(reg, - FRF_AZ_EVQ_EN, 1, - FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count), - FRF_AZ_EVQ_BUF_BASE_ID, buftbl); - efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq); - EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0); - efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq); - - mutex_unlock(&vf->status_lock); -} - -static void efx_siena_sriov_reset_vf_work(struct work_struct *work) -{ - struct siena_vf *vf = container_of(work, struct siena_vf, req); - struct efx_nic *efx = vf->efx; - struct efx_buffer buf; - - if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { - efx_siena_sriov_reset_vf(vf, &buf); - efx_nic_free_buffer(efx, &buf); - } -} - -static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx) -{ - netif_err(efx, drv, efx->net_dev, - "ERROR: IOV requires MSI-X and 1 additional interrupt" - "vector. IOV disabled\n"); - efx->vf_count = 0; -} - -static int efx_siena_sriov_probe_channel(struct efx_channel *channel) -{ - struct siena_nic_data *nic_data = channel->efx->nic_data; - nic_data->vfdi_channel = channel; - - return 0; -} - -static void -efx_siena_sriov_get_channel_name(struct efx_channel *channel, - char *buf, size_t len) -{ - snprintf(buf, len, "%s-iov", channel->efx->name); -} - -static const struct efx_channel_type efx_siena_sriov_channel_type = { - .handle_no_channel = efx_siena_sriov_handle_no_channel, - .pre_probe = efx_siena_sriov_probe_channel, - .post_remove = efx_channel_dummy_op_void, - .get_name = efx_siena_sriov_get_channel_name, - /* no copy operation; channel must not be reallocated */ - .keep_eventq = true, -}; - -void efx_siena_sriov_probe(struct efx_nic *efx) -{ - unsigned count; - - if (!max_vfs) - return; - - if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) { - pci_info(efx->pci_dev, "no SR-IOV VFs probed\n"); - return; - } - if (count > 0 && count > max_vfs) - count = max_vfs; - - /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ - efx->vf_count = count; - - efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type; -} - -/* Copy the list of individual addresses into the vfdi_status.peers - * array and auxiliary pages, protected by %local_lock. Drop that lock - * and then broadcast the address list to every VF. - */ -static void efx_siena_sriov_peer_work(struct work_struct *data) -{ - struct siena_nic_data *nic_data = container_of(data, - struct siena_nic_data, - peer_work); - struct efx_nic *efx = nic_data->efx; - struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; - struct siena_vf *vf; - struct efx_local_addr *local_addr; - struct vfdi_endpoint *peer; - struct efx_endpoint_page *epp; - struct list_head pages; - unsigned int peer_space; - unsigned int peer_count; - unsigned int pos; - - mutex_lock(&nic_data->local_lock); - - /* Move the existing peer pages off %local_page_list */ - INIT_LIST_HEAD(&pages); - list_splice_tail_init(&nic_data->local_page_list, &pages); - - /* Populate the VF addresses starting from entry 1 (entry 0 is - * the PF address) - */ - peer = vfdi_status->peers + 1; - peer_space = ARRAY_SIZE(vfdi_status->peers) - 1; - peer_count = 1; - for (pos = 0; pos < efx->vf_count; ++pos) { - vf = nic_data->vf + pos; - - mutex_lock(&vf->status_lock); - if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) { - *peer++ = vf->addr; - ++peer_count; - --peer_space; - BUG_ON(peer_space == 0); - } - mutex_unlock(&vf->status_lock); - } - - /* Fill the remaining addresses */ - list_for_each_entry(local_addr, &nic_data->local_addr_list, link) { - ether_addr_copy(peer->mac_addr, local_addr->addr); - peer->tci = 0; - ++peer; - ++peer_count; - if (--peer_space == 0) { - if (list_empty(&pages)) { - epp = kmalloc(sizeof(*epp), GFP_KERNEL); - if (!epp) - break; - epp->ptr = dma_alloc_coherent( - &efx->pci_dev->dev, EFX_PAGE_SIZE, - &epp->addr, GFP_KERNEL); - if (!epp->ptr) { - kfree(epp); - break; - } - } else { - epp = list_first_entry( - &pages, struct efx_endpoint_page, link); - list_del(&epp->link); - } - - list_add_tail(&epp->link, &nic_data->local_page_list); - peer = (struct vfdi_endpoint *)epp->ptr; - peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); - } - } - vfdi_status->peer_count = peer_count; - mutex_unlock(&nic_data->local_lock); - - /* Free any now unused endpoint pages */ - while (!list_empty(&pages)) { - epp = list_first_entry( - &pages, struct efx_endpoint_page, link); - list_del(&epp->link); - dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, - epp->ptr, epp->addr); - kfree(epp); - } - - /* Finally, push the pages */ - for (pos = 0; pos < efx->vf_count; ++pos) { - vf = nic_data->vf + pos; - - mutex_lock(&vf->status_lock); - if (vf->status_addr) - __efx_siena_sriov_push_vf_status(vf); - mutex_unlock(&vf->status_lock); - } -} - -static void efx_siena_sriov_free_local(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data = efx->nic_data; - struct efx_local_addr *local_addr; - struct efx_endpoint_page *epp; - - while (!list_empty(&nic_data->local_addr_list)) { - local_addr = list_first_entry(&nic_data->local_addr_list, - struct efx_local_addr, link); - list_del(&local_addr->link); - kfree(local_addr); - } - - while (!list_empty(&nic_data->local_page_list)) { - epp = list_first_entry(&nic_data->local_page_list, - struct efx_endpoint_page, link); - list_del(&epp->link); - dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, - epp->ptr, epp->addr); - kfree(epp); - } -} - -static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) -{ - unsigned index; - struct siena_vf *vf; - struct siena_nic_data *nic_data = efx->nic_data; - - nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf), - GFP_KERNEL); - if (!nic_data->vf) - return -ENOMEM; - - for (index = 0; index < efx->vf_count; ++index) { - vf = nic_data->vf + index; - - vf->efx = efx; - vf->index = index; - vf->rx_filter_id = -1; - vf->tx_filter_mode = VF_TX_FILTER_AUTO; - vf->tx_filter_id = -1; - INIT_WORK(&vf->req, efx_siena_sriov_vfdi); - INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work); - init_waitqueue_head(&vf->flush_waitq); - mutex_init(&vf->status_lock); - mutex_init(&vf->txq_lock); - } - - return 0; -} - -static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data = efx->nic_data; - struct siena_vf *vf; - unsigned int pos; - - for (pos = 0; pos < efx->vf_count; ++pos) { - vf = nic_data->vf + pos; - - efx_nic_free_buffer(efx, &vf->buf); - kfree(vf->peer_page_addrs); - vf->peer_page_addrs = NULL; - vf->peer_page_count = 0; - - vf->evq0_count = 0; - } -} - -static int efx_siena_sriov_vfs_init(struct efx_nic *efx) -{ - struct pci_dev *pci_dev = efx->pci_dev; - struct siena_nic_data *nic_data = efx->nic_data; - unsigned index, devfn, sriov, buftbl_base; - u16 offset, stride; - struct siena_vf *vf; - int rc; - - sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV); - if (!sriov) - return -ENOENT; - - pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); - pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); - - buftbl_base = nic_data->vf_buftbl_base; - devfn = pci_dev->devfn + offset; - for (index = 0; index < efx->vf_count; ++index) { - vf = nic_data->vf + index; - - /* Reserve buffer entries */ - vf->buftbl_base = buftbl_base; - buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx); - - vf->pci_rid = devfn; - snprintf(vf->pci_name, sizeof(vf->pci_name), - "%04x:%02x:%02x.%d", - pci_domain_nr(pci_dev->bus), pci_dev->bus->number, - PCI_SLOT(devfn), PCI_FUNC(devfn)); - - rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE, - GFP_KERNEL); - if (rc) - goto fail; - - devfn += stride; - } - - return 0; - -fail: - efx_siena_sriov_vfs_fini(efx); - return rc; -} - -int efx_siena_sriov_init(struct efx_nic *efx) -{ - struct net_device *net_dev = efx->net_dev; - struct siena_nic_data *nic_data = efx->nic_data; - struct vfdi_status *vfdi_status; - int rc; - - /* Ensure there's room for vf_channel */ - BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE); - /* Ensure that VI_BASE is aligned on VI_SCALE */ - BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1)); - - if (efx->vf_count == 0) - return 0; - - rc = efx_siena_sriov_cmd(efx, true, NULL, NULL); - if (rc) - goto fail_cmd; - - rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status, - sizeof(*vfdi_status), GFP_KERNEL); - if (rc) - goto fail_status; - vfdi_status = nic_data->vfdi_status.addr; - memset(vfdi_status, 0, sizeof(*vfdi_status)); - vfdi_status->version = 1; - vfdi_status->length = sizeof(*vfdi_status); - vfdi_status->max_tx_channels = vf_max_tx_channels; - vfdi_status->vi_scale = efx->vi_scale; - vfdi_status->rss_rxq_count = efx->rss_spread; - vfdi_status->peer_count = 1 + efx->vf_count; - vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; - - rc = efx_siena_sriov_vf_alloc(efx); - if (rc) - goto fail_alloc; - - mutex_init(&nic_data->local_lock); - INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work); - INIT_LIST_HEAD(&nic_data->local_addr_list); - INIT_LIST_HEAD(&nic_data->local_page_list); - - rc = efx_siena_sriov_vfs_init(efx); - if (rc) - goto fail_vfs; - - rtnl_lock(); - ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr); - efx->vf_init_count = efx->vf_count; - rtnl_unlock(); - - efx_siena_sriov_usrev(efx, true); - - /* At this point we must be ready to accept VFDI requests */ - - rc = pci_enable_sriov(efx->pci_dev, efx->vf_count); - if (rc) - goto fail_pci; - - netif_info(efx, probe, net_dev, - "enabled SR-IOV for %d VFs, %d VI per VF\n", - efx->vf_count, efx_vf_size(efx)); - return 0; - -fail_pci: - efx_siena_sriov_usrev(efx, false); - rtnl_lock(); - efx->vf_init_count = 0; - rtnl_unlock(); - efx_siena_sriov_vfs_fini(efx); -fail_vfs: - cancel_work_sync(&nic_data->peer_work); - efx_siena_sriov_free_local(efx); - kfree(nic_data->vf); -fail_alloc: - efx_nic_free_buffer(efx, &nic_data->vfdi_status); -fail_status: - efx_siena_sriov_cmd(efx, false, NULL, NULL); -fail_cmd: - return rc; -} - -void efx_siena_sriov_fini(struct efx_nic *efx) -{ - struct siena_vf *vf; - unsigned int pos; - struct siena_nic_data *nic_data = efx->nic_data; - - if (efx->vf_init_count == 0) - return; - - /* Disable all interfaces to reconfiguration */ - BUG_ON(nic_data->vfdi_channel->enabled); - efx_siena_sriov_usrev(efx, false); - rtnl_lock(); - efx->vf_init_count = 0; - rtnl_unlock(); - - /* Flush all reconfiguration work */ - for (pos = 0; pos < efx->vf_count; ++pos) { - vf = nic_data->vf + pos; - cancel_work_sync(&vf->req); - cancel_work_sync(&vf->reset_work); - } - cancel_work_sync(&nic_data->peer_work); - - pci_disable_sriov(efx->pci_dev); - - /* Tear down back-end state */ - efx_siena_sriov_vfs_fini(efx); - efx_siena_sriov_free_local(efx); - kfree(nic_data->vf); - efx_nic_free_buffer(efx, &nic_data->vfdi_status); - efx_siena_sriov_cmd(efx, false, NULL, NULL); -} - -void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) -{ - struct efx_nic *efx = channel->efx; - struct siena_vf *vf; - unsigned qid, seq, type, data; - - qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID); - - /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */ - BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0); - seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ); - type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE); - data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA); - - netif_vdbg(efx, hw, efx->net_dev, - "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n", - qid, seq, type, data); - - if (map_vi_index(efx, qid, &vf, NULL)) - return; - if (vf->busy) - goto error; - - if (type == VFDI_EV_TYPE_REQ_WORD0) { - /* Resynchronise */ - vf->req_type = VFDI_EV_TYPE_REQ_WORD0; - vf->req_seqno = seq + 1; - vf->req_addr = 0; - } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type) - goto error; - - switch (vf->req_type) { - case VFDI_EV_TYPE_REQ_WORD0: - case VFDI_EV_TYPE_REQ_WORD1: - case VFDI_EV_TYPE_REQ_WORD2: - vf->req_addr |= (u64)data << (vf->req_type << 4); - ++vf->req_type; - return; - - case VFDI_EV_TYPE_REQ_WORD3: - vf->req_addr |= (u64)data << 48; - vf->req_type = VFDI_EV_TYPE_REQ_WORD0; - vf->busy = true; - queue_work(vfdi_workqueue, &vf->req); - return; - } - -error: - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "ERROR: Screaming VFDI request from %s\n", - vf->pci_name); - /* Reset the request and sequence number */ - vf->req_type = VFDI_EV_TYPE_REQ_WORD0; - vf->req_seqno = seq + 1; -} - -void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) -{ - struct siena_nic_data *nic_data = efx->nic_data; - struct siena_vf *vf; - - if (vf_i > efx->vf_init_count) - return; - vf = nic_data->vf + vf_i; - netif_info(efx, hw, efx->net_dev, - "FLR on VF %s\n", vf->pci_name); - - vf->status_addr = 0; - efx_vfdi_remove_all_filters(vf); - efx_vfdi_flush_clear(vf); - - vf->evq0_count = 0; -} - -int efx_siena_sriov_mac_address_changed(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data = efx->nic_data; - struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; - - if (!efx->vf_init_count) - return 0; - ether_addr_copy(vfdi_status->peers[0].mac_addr, - efx->net_dev->dev_addr); - queue_work(vfdi_workqueue, &nic_data->peer_work); - - return 0; -} - -void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) -{ - struct siena_vf *vf; - unsigned queue, qid; - - queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); - if (map_vi_index(efx, queue, &vf, &qid)) - return; - /* Ignore flush completions triggered by an FLR */ - if (!test_bit(qid, vf->txq_mask)) - return; - - __clear_bit(qid, vf->txq_mask); - --vf->txq_count; - - if (efx_vfdi_flush_wake(vf)) - wake_up(&vf->flush_waitq); -} - -void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) -{ - struct siena_vf *vf; - unsigned ev_failed, queue, qid; - - queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); - ev_failed = EFX_QWORD_FIELD(*event, - FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); - if (map_vi_index(efx, queue, &vf, &qid)) - return; - if (!test_bit(qid, vf->rxq_mask)) - return; - - if (ev_failed) { - set_bit(qid, vf->rxq_retry_mask); - atomic_inc(&vf->rxq_retry_count); - } else { - __clear_bit(qid, vf->rxq_mask); - --vf->rxq_count; - } - if (efx_vfdi_flush_wake(vf)) - wake_up(&vf->flush_waitq); -} - -/* Called from napi. Schedule the reset work item */ -void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) -{ - struct siena_vf *vf; - unsigned int rel; - - if (map_vi_index(efx, dmaq, &vf, &rel)) - return; - - if (net_ratelimit()) - netif_err(efx, hw, efx->net_dev, - "VF %d DMA Q %d reports descriptor fetch error.\n", - vf->index, rel); - queue_work(vfdi_workqueue, &vf->reset_work); -} - -/* Reset all VFs */ -void efx_siena_sriov_reset(struct efx_nic *efx) -{ - struct siena_nic_data *nic_data = efx->nic_data; - unsigned int vf_i; - struct efx_buffer buf; - struct siena_vf *vf; - - ASSERT_RTNL(); - - if (efx->vf_init_count == 0) - return; - - efx_siena_sriov_usrev(efx, true); - (void)efx_siena_sriov_cmd(efx, true, NULL, NULL); - - if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) - return; - - for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { - vf = nic_data->vf + vf_i; - efx_siena_sriov_reset_vf(vf, &buf); - } - - efx_nic_free_buffer(efx, &buf); -} - -int efx_init_sriov(void) -{ - /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and - * efx_siena_sriov_peer_work() spend almost all their time sleeping for - * MCDI to complete anyway - */ - vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); - if (!vfdi_workqueue) - return -ENOMEM; - return 0; -} - -void efx_fini_sriov(void) -{ - destroy_workqueue(vfdi_workqueue); -} - -int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) -{ - struct siena_nic_data *nic_data = efx->nic_data; - struct siena_vf *vf; - - if (vf_i >= efx->vf_init_count) - return -EINVAL; - vf = nic_data->vf + vf_i; - - mutex_lock(&vf->status_lock); - ether_addr_copy(vf->addr.mac_addr, mac); - __efx_siena_sriov_update_vf_addr(vf); - mutex_unlock(&vf->status_lock); - - return 0; -} - -int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, - u16 vlan, u8 qos) -{ - struct siena_nic_data *nic_data = efx->nic_data; - struct siena_vf *vf; - u16 tci; - - if (vf_i >= efx->vf_init_count) - return -EINVAL; - vf = nic_data->vf + vf_i; - - mutex_lock(&vf->status_lock); - tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); - vf->addr.tci = htons(tci); - __efx_siena_sriov_update_vf_addr(vf); - mutex_unlock(&vf->status_lock); - - return 0; -} - -int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, - bool spoofchk) -{ - struct siena_nic_data *nic_data = efx->nic_data; - struct siena_vf *vf; - int rc; - - if (vf_i >= efx->vf_init_count) - return -EINVAL; - vf = nic_data->vf + vf_i; - - mutex_lock(&vf->txq_lock); - if (vf->txq_count == 0) { - vf->tx_filter_mode = - spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF; - rc = 0; - } else { - /* This cannot be changed while TX queues are running */ - rc = -EBUSY; - } - mutex_unlock(&vf->txq_lock); - return rc; -} - -int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i, - struct ifla_vf_info *ivi) -{ - struct siena_nic_data *nic_data = efx->nic_data; - struct siena_vf *vf; - u16 tci; - - if (vf_i >= efx->vf_init_count) - return -EINVAL; - vf = nic_data->vf + vf_i; - - ivi->vf = vf_i; - ether_addr_copy(ivi->mac, vf->addr.mac_addr); - ivi->max_tx_rate = 0; - ivi->min_tx_rate = 0; - tci = ntohs(vf->addr.tci); - ivi->vlan = tci & VLAN_VID_MASK; - ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; - ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON; - - return 0; -} - -bool efx_siena_sriov_wanted(struct efx_nic *efx) -{ - return efx->vf_count != 0; -} - -int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs) -{ - return 0; -} diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h deleted file mode 100644 index e548c4daf189..000000000000 --- a/drivers/net/ethernet/sfc/siena_sriov.h +++ /dev/null @@ -1,76 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/**************************************************************************** - * Driver for Solarflare network controllers and boards - * Copyright 2015 Solarflare Communications Inc. - */ - -#ifndef SIENA_SRIOV_H -#define SIENA_SRIOV_H - -#include "net_driver.h" - -/* On the SFC9000 family each port is associated with 1 PCI physical - * function (PF) handled by sfc and a configurable number of virtual - * functions (VFs) that may be handled by some other driver, often in - * a VM guest. The queue pointer registers are mapped in both PF and - * VF BARs such that an 8K region provides access to a single RX, TX - * and event queue (collectively a Virtual Interface, VI or VNIC). - * - * The PF has access to all 1024 VIs while VFs are mapped to VIs - * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered - * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE). - * The number of VIs and the VI_SCALE value are configurable but must - * be established at boot time by firmware. - */ - -/* Maximum VI_SCALE parameter supported by Siena */ -#define EFX_VI_SCALE_MAX 6 -/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX), - * so this is the smallest allowed value. - */ -#define EFX_VI_BASE 128U -/* Maximum number of VFs allowed */ -#define EFX_VF_COUNT_MAX 127 -/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */ -#define EFX_MAX_VF_EVQ_SIZE 8192UL -/* The number of buffer table entries reserved for each VI on a VF */ -#define EFX_VF_BUFTBL_PER_VI \ - ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \ - sizeof(efx_qword_t) / EFX_BUF_SIZE) - -int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs); -int efx_siena_sriov_init(struct efx_nic *efx); -void efx_siena_sriov_fini(struct efx_nic *efx); -int efx_siena_sriov_mac_address_changed(struct efx_nic *efx); -bool efx_siena_sriov_wanted(struct efx_nic *efx); -void efx_siena_sriov_reset(struct efx_nic *efx); -void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); - -int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac); -int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf, - u16 vlan, u8 qos); -int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, - bool spoofchk); -int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf, - struct ifla_vf_info *ivf); - -#ifdef CONFIG_SFC_SRIOV - -static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) -{ - return efx->vf_init_count != 0; -} -#else /* !CONFIG_SFC_SRIOV */ -static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) -{ - return false; -} -#endif /* CONFIG_SFC_SRIOV */ - -void efx_siena_sriov_probe(struct efx_nic *efx); -void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); -void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); -void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); -void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); - -#endif /* SIENA_SRIOV_H */ -- cgit v1.2.3 From 6e173d3b4af9e8804ebdbdb7a4afd7ed8f96220b Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:31:31 +0100 Subject: sfc: Copy shared files needed for Siena (part 1) These are the files starting with b through i. No changes are done, those will be done with subsequent commits. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/bitfield.h | 614 +++++ drivers/net/ethernet/sfc/siena/efx.c | 1335 +++++++++++ drivers/net/ethernet/sfc/siena/efx.h | 236 ++ drivers/net/ethernet/sfc/siena/efx_channels.c | 1368 +++++++++++ drivers/net/ethernet/sfc/siena/efx_channels.h | 52 + drivers/net/ethernet/sfc/siena/efx_common.c | 1396 +++++++++++ drivers/net/ethernet/sfc/siena/efx_common.h | 116 + drivers/net/ethernet/sfc/siena/enum.h | 176 ++ drivers/net/ethernet/sfc/siena/ethtool.c | 282 +++ drivers/net/ethernet/sfc/siena/ethtool_common.c | 1338 +++++++++++ drivers/net/ethernet/sfc/siena/ethtool_common.h | 63 + drivers/net/ethernet/sfc/siena/farch_regs.h | 2929 +++++++++++++++++++++++ drivers/net/ethernet/sfc/siena/filter.h | 309 +++ drivers/net/ethernet/sfc/siena/io.h | 310 +++ 14 files changed, 10524 insertions(+) create mode 100644 drivers/net/ethernet/sfc/siena/bitfield.h create mode 100644 drivers/net/ethernet/sfc/siena/efx.c create mode 100644 drivers/net/ethernet/sfc/siena/efx.h create mode 100644 drivers/net/ethernet/sfc/siena/efx_channels.c create mode 100644 drivers/net/ethernet/sfc/siena/efx_channels.h create mode 100644 drivers/net/ethernet/sfc/siena/efx_common.c create mode 100644 drivers/net/ethernet/sfc/siena/efx_common.h create mode 100644 drivers/net/ethernet/sfc/siena/enum.h create mode 100644 drivers/net/ethernet/sfc/siena/ethtool.c create mode 100644 drivers/net/ethernet/sfc/siena/ethtool_common.c create mode 100644 drivers/net/ethernet/sfc/siena/ethtool_common.h create mode 100644 drivers/net/ethernet/sfc/siena/farch_regs.h create mode 100644 drivers/net/ethernet/sfc/siena/filter.h create mode 100644 drivers/net/ethernet/sfc/siena/io.h (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/bitfield.h b/drivers/net/ethernet/sfc/siena/bitfield.h new file mode 100644 index 000000000000..1f981dfe4bdc --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/bitfield.h @@ -0,0 +1,614 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_BITFIELD_H +#define EFX_BITFIELD_H + +/* + * Efx bitfield access + * + * Efx NICs make extensive use of bitfields up to 128 bits + * wide. Since there is no native 128-bit datatype on most systems, + * and since 64-bit datatypes are inefficient on 32-bit systems and + * vice versa, we wrap accesses in a way that uses the most efficient + * datatype. + * + * The NICs are PCI devices and therefore little-endian. Since most + * of the quantities that we deal with are DMAed to/from host memory, + * we define our datatypes (efx_oword_t, efx_qword_t and + * efx_dword_t) to be little-endian. + */ + +/* Lowest bit numbers and widths */ +#define EFX_DUMMY_FIELD_LBN 0 +#define EFX_DUMMY_FIELD_WIDTH 0 +#define EFX_WORD_0_LBN 0 +#define EFX_WORD_0_WIDTH 16 +#define EFX_WORD_1_LBN 16 +#define EFX_WORD_1_WIDTH 16 +#define EFX_DWORD_0_LBN 0 +#define EFX_DWORD_0_WIDTH 32 +#define EFX_DWORD_1_LBN 32 +#define EFX_DWORD_1_WIDTH 32 +#define EFX_DWORD_2_LBN 64 +#define EFX_DWORD_2_WIDTH 32 +#define EFX_DWORD_3_LBN 96 +#define EFX_DWORD_3_WIDTH 32 +#define EFX_QWORD_0_LBN 0 +#define EFX_QWORD_0_WIDTH 64 + +/* Specified attribute (e.g. LBN) of the specified field */ +#define EFX_VAL(field, attribute) field ## _ ## attribute +/* Low bit number of the specified field */ +#define EFX_LOW_BIT(field) EFX_VAL(field, LBN) +/* Bit width of the specified field */ +#define EFX_WIDTH(field) EFX_VAL(field, WIDTH) +/* High bit number of the specified field */ +#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1) +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 64 bits. + */ +#define EFX_MASK64(width) \ + ((width) == 64 ? ~((u64) 0) : \ + (((((u64) 1) << (width))) - 1)) + +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 32 bits. Use + * EFX_MASK64 for higher width fields. + */ +#define EFX_MASK32(width) \ + ((width) == 32 ? ~((u32) 0) : \ + (((((u32) 1) << (width))) - 1)) + +/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */ +typedef union efx_dword { + __le32 u32[1]; +} efx_dword_t; + +/* A quadword (i.e. 8 byte) datatype - little-endian in HW */ +typedef union efx_qword { + __le64 u64[1]; + __le32 u32[2]; + efx_dword_t dword[2]; +} efx_qword_t; + +/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */ +typedef union efx_oword { + __le64 u64[2]; + efx_qword_t qword[2]; + __le32 u32[4]; + efx_dword_t dword[4]; +} efx_oword_t; + +/* Format string and value expanders for printk */ +#define EFX_DWORD_FMT "%08x" +#define EFX_QWORD_FMT "%08x:%08x" +#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x" +#define EFX_DWORD_VAL(dword) \ + ((unsigned int) le32_to_cpu((dword).u32[0])) +#define EFX_QWORD_VAL(qword) \ + ((unsigned int) le32_to_cpu((qword).u32[1])), \ + ((unsigned int) le32_to_cpu((qword).u32[0])) +#define EFX_OWORD_VAL(oword) \ + ((unsigned int) le32_to_cpu((oword).u32[3])), \ + ((unsigned int) le32_to_cpu((oword).u32[2])), \ + ((unsigned int) le32_to_cpu((oword).u32[1])), \ + ((unsigned int) le32_to_cpu((oword).u32[0])) + +/* + * Extract bit field portion [low,high) from the native-endian element + * which contains bits [min,max). + * + * For example, suppose "element" represents the high 32 bits of a + * 64-bit value, and we wish to extract the bits belonging to the bit + * field occupying bits 28-45 of this 64-bit value. + * + * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give + * + * ( element ) << 4 + * + * The result will contain the relevant bits filled in in the range + * [0,high-low), with garbage in bits [high-low+1,...). + */ +#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ + ((low) > (max) || (high) < (min) ? 0 : \ + (low) > (min) ? \ + (native_element) >> ((low) - (min)) : \ + (native_element) << ((min) - (low))) + +/* + * Extract bit field portion [low,high) from the 64-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT64(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high) + +/* + * Extract bit field portion [low,high) from the 32-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT32(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) + +#define EFX_EXTRACT_OWORD64(oword, low, high) \ + ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ + EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD64(qword, low, high) \ + (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_OWORD32(oword, low, high) \ + ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ + EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ + EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD32(qword, low, high) \ + ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_DWORD(dword, low, high) \ + (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_OWORD_FIELD64(oword, field) \ + EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD64(qword, field) \ + EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_FIELD32(oword, field) \ + EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD32(qword, field) \ + EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_DWORD_FIELD(dword, field) \ + EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_IS_ZERO64(oword) \ + (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) + +#define EFX_QWORD_IS_ZERO64(qword) \ + (((qword).u64[0]) == (__force __le64) 0) + +#define EFX_OWORD_IS_ZERO32(oword) \ + (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \ + == (__force __le32) 0) + +#define EFX_QWORD_IS_ZERO32(qword) \ + (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0) + +#define EFX_DWORD_IS_ZERO(dword) \ + (((dword).u32[0]) == (__force __le32) 0) + +#define EFX_OWORD_IS_ALL_ONES64(oword) \ + (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0)) + +#define EFX_QWORD_IS_ALL_ONES64(qword) \ + ((qword).u64[0] == ~((__force __le64) 0)) + +#define EFX_OWORD_IS_ALL_ONES32(oword) \ + (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \ + == ~((__force __le32) 0)) + +#define EFX_QWORD_IS_ALL_ONES32(qword) \ + (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0)) + +#define EFX_DWORD_IS_ALL_ONES(dword) \ + ((dword).u32[0] == ~((__force __le32) 0)) + +#if BITS_PER_LONG == 64 +#define EFX_OWORD_FIELD EFX_OWORD_FIELD64 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD64 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64 +#else +#define EFX_OWORD_FIELD EFX_OWORD_FIELD32 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD32 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32 +#endif + +/* + * Construct bit field portion + * + * Creates the portion of the bit field [low,high) that lies within + * the range [min,max). + */ +#define EFX_INSERT_NATIVE64(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u64) (value)) << (low - min)) : \ + (((u64) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE32(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u32) (value)) << (low - min)) : \ + (((u32) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE(min, max, low, high, value) \ + ((((max - min) >= 32) || ((high - low) >= 32)) ? \ + EFX_INSERT_NATIVE64(min, max, low, high, value) : \ + EFX_INSERT_NATIVE32(min, max, low, high, value)) + +/* + * Construct bit field portion + * + * Creates the portion of the named bit field that lies within the + * range [min,max). + */ +#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \ + EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +/* + * Construct bit field + * + * Creates the portion of the named bit fields that lie within the + * range [min,max). + */ +#define EFX_INSERT_FIELDS_NATIVE(min, max, \ + field1, value1, \ + field2, value2, \ + field3, value3, \ + field4, value4, \ + field5, value5, \ + field6, value6, \ + field7, value7, \ + field8, value8, \ + field9, value9, \ + field10, value10, \ + field11, value11, \ + field12, value12, \ + field13, value13, \ + field14, value14, \ + field15, value15, \ + field16, value16, \ + field17, value17, \ + field18, value18, \ + field19, value19) \ + (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19))) + +#define EFX_INSERT_FIELDS64(...) \ + cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_INSERT_FIELDS32(...) \ + cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_POPULATE_OWORD64(oword, ...) do { \ + (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD64(qword, ...) do { \ + (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_OWORD32(oword, ...) do { \ + (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \ + (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD32(qword, ...) do { \ + (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_DWORD(dword, ...) do { \ + (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + } while (0) + +#if BITS_PER_LONG == 64 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 +#else +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 +#endif + +/* Populate an octword field with various numbers of arguments */ +#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_18(oword, ...) \ + EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_17(oword, ...) \ + EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_16(oword, ...) \ + EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_15(oword, ...) \ + EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_14(oword, ...) \ + EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_13(oword, ...) \ + EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_12(oword, ...) \ + EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_11(oword, ...) \ + EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_10(oword, ...) \ + EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_9(oword, ...) \ + EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_8(oword, ...) \ + EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_7(oword, ...) \ + EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_6(oword, ...) \ + EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_5(oword, ...) \ + EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_4(oword, ...) \ + EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_3(oword, ...) \ + EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_2(oword, ...) \ + EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_1(oword, ...) \ + EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_OWORD(oword) \ + EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_OWORD(oword) \ + EFX_POPULATE_OWORD_4(oword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff, \ + EFX_DWORD_2, 0xffffffff, \ + EFX_DWORD_3, 0xffffffff) + +/* Populate a quadword field with various numbers of arguments */ +#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_18(qword, ...) \ + EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_17(qword, ...) \ + EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_16(qword, ...) \ + EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_15(qword, ...) \ + EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_14(qword, ...) \ + EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_13(qword, ...) \ + EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_12(qword, ...) \ + EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_11(qword, ...) \ + EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_10(qword, ...) \ + EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_9(qword, ...) \ + EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_8(qword, ...) \ + EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_7(qword, ...) \ + EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_6(qword, ...) \ + EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_5(qword, ...) \ + EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_4(qword, ...) \ + EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_3(qword, ...) \ + EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_2(qword, ...) \ + EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_1(qword, ...) \ + EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_QWORD(qword) \ + EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_QWORD(qword) \ + EFX_POPULATE_QWORD_2(qword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff) + +/* Populate a dword field with various numbers of arguments */ +#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_18(dword, ...) \ + EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_17(dword, ...) \ + EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_16(dword, ...) \ + EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_15(dword, ...) \ + EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_14(dword, ...) \ + EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_13(dword, ...) \ + EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_12(dword, ...) \ + EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_11(dword, ...) \ + EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_10(dword, ...) \ + EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_9(dword, ...) \ + EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_8(dword, ...) \ + EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_7(dword, ...) \ + EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_6(dword, ...) \ + EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_5(dword, ...) \ + EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_4(dword, ...) \ + EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_3(dword, ...) \ + EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_2(dword, ...) \ + EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_1(dword, ...) \ + EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff) + +/* + * Modify a named field within an already-populated structure. Used + * for read-modify-write operations. + * + */ +#define EFX_INVERT_OWORD(oword) do { \ + (oword).u64[0] = ~((oword).u64[0]); \ + (oword).u64[1] = ~((oword).u64[1]); \ + } while (0) + +#define EFX_AND_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ + } while (0) + +#define EFX_AND_QWORD(qword, from, mask) \ + (qword).u64[0] = (from).u64[0] & (mask).u64[0] + +#define EFX_OR_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \ + } while (0) + +#define EFX_INSERT64(min, max, low, high, value) \ + cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INSERT32(min, max, low, high, value) \ + cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INPLACE_MASK64(min, max, low, high) \ + EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low))) + +#define EFX_INPLACE_MASK32(min, max, low, high) \ + EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low))) + +#define EFX_SET_OWORD64(oword, low, high, value) do { \ + (oword).u64[0] = (((oword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + (oword).u64[1] = (((oword).u64[1] \ + & ~EFX_INPLACE_MASK64(64, 127, low, high)) \ + | EFX_INSERT64(64, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD64(qword, low, high, value) do { \ + (qword).u64[0] = (((qword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD32(oword, low, high, value) do { \ + (oword).u32[0] = (((oword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (oword).u32[1] = (((oword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + (oword).u32[2] = (((oword).u32[2] \ + & ~EFX_INPLACE_MASK32(64, 95, low, high)) \ + | EFX_INSERT32(64, 95, low, high, value)); \ + (oword).u32[3] = (((oword).u32[3] \ + & ~EFX_INPLACE_MASK32(96, 127, low, high)) \ + | EFX_INSERT32(96, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD32(qword, low, high, value) do { \ + (qword).u32[0] = (((qword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (qword).u32[1] = (((qword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_DWORD32(dword, low, high, value) do { \ + (dword).u32[0] = (((dword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD_FIELD64(oword, field, value) \ + EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD64(qword, field, value) \ + EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_OWORD_FIELD32(oword, field, value) \ + EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD32(qword, field, value) \ + EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_DWORD_FIELD(dword, field, value) \ + EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + + + +#if BITS_PER_LONG == 64 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 +#else +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 +#endif + +/* Used to avoid compiler warnings about shift range exceeding width + * of the data types when dma_addr_t is only 32 bits wide. + */ +#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) +#define EFX_DMA_TYPE_WIDTH(width) \ + (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) + + +/* Static initialiser */ +#define EFX_OWORD32(a, b, c, d) \ + { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ + cpu_to_le32(c), cpu_to_le32(d) } } + +#endif /* EFX_BITFIELD_H */ diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c new file mode 100644 index 000000000000..5a772354da83 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -0,0 +1,1335 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include +#include +#include "efx.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "ef100.h" +#include "rx_common.h" +#include "tx_common.h" +#include "nic.h" +#include "io.h" +#include "selftest.h" +#include "sriov.h" + +#include "mcdi_port_common.h" +#include "mcdi_pcol.h" +#include "workarounds.h" + +/************************************************************************** + * + * Configurable values + * + *************************************************************************/ + +module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); +MODULE_PARM_DESC(interrupt_mode, + "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); + +module_param(rss_cpus, uint, 0444); +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); + +/* + * Use separate channels for TX and RX events + * + * Set this to 1 to use separate channels for TX and RX. It allows us + * to control interrupt affinity separately for TX and RX. + * + * This is only used in MSI-X interrupt mode + */ +bool efx_separate_tx_channels; +module_param(efx_separate_tx_channels, bool, 0444); +MODULE_PARM_DESC(efx_separate_tx_channels, + "Use separate channels for TX and RX"); + +/* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * The default for RX should strike a balance between increasing the + * round-trip latency and reducing overhead. + */ +static unsigned int rx_irq_mod_usec = 60; + +/* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * This default is chosen to ensure that a 10G link does not go idle + * while a TX queue is stopped after it has become full. A queue is + * restarted when it drops below half full. The time this takes (assuming + * worst case 3 descriptors per packet and 1024 descriptors) is + * 512 / 3 * 1.2 = 205 usec. + */ +static unsigned int tx_irq_mod_usec = 150; + +static bool phy_flash_cfg; +module_param(phy_flash_cfg, bool, 0644); +MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); + +static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW); +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); + +/************************************************************************** + * + * Utility functions and prototypes + * + *************************************************************************/ + +static void efx_remove_port(struct efx_nic *efx); +static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); +static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); +static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, + u32 flags); + +#define EFX_ASSERT_RESET_SERIALISED(efx) \ + do { \ + if ((efx->state == STATE_READY) || \ + (efx->state == STATE_RECOVERY) || \ + (efx->state == STATE_DISABLED)) \ + ASSERT_RTNL(); \ + } while (0) + +/************************************************************************** + * + * Port handling + * + **************************************************************************/ + +static void efx_fini_port(struct efx_nic *efx); + +static int efx_probe_port(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, probe, efx->net_dev, "create port\n"); + + if (phy_flash_cfg) + efx->phy_mode = PHY_MODE_SPECIAL; + + /* Connect up MAC/PHY operations table */ + rc = efx->type->probe_port(efx); + if (rc) + return rc; + + /* Initialise MAC address to permanent address */ + eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr); + + return 0; +} + +static int efx_init_port(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, drv, efx->net_dev, "init port\n"); + + mutex_lock(&efx->mac_lock); + + efx->port_initialized = true; + + /* Ensure the PHY advertises the correct flow control settings */ + rc = efx_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + goto fail; + + mutex_unlock(&efx->mac_lock); + return 0; + +fail: + mutex_unlock(&efx->mac_lock); + return rc; +} + +static void efx_fini_port(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); + + if (!efx->port_initialized) + return; + + efx->port_initialized = false; + + efx->link_state.up = false; + efx_link_status_changed(efx); +} + +static void efx_remove_port(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); + + efx->type->remove_port(efx); +} + +/************************************************************************** + * + * NIC handling + * + **************************************************************************/ + +static LIST_HEAD(efx_primary_list); +static LIST_HEAD(efx_unassociated_list); + +static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) +{ + return left->type == right->type && + left->vpd_sn && right->vpd_sn && + !strcmp(left->vpd_sn, right->vpd_sn); +} + +static void efx_associate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + if (efx->primary == efx) { + /* Adding primary function; look for secondaries */ + + netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); + list_add_tail(&efx->node, &efx_primary_list); + + list_for_each_entry_safe(other, next, &efx_unassociated_list, + node) { + if (efx_same_controller(efx, other)) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to secondary list of %s %s\n", + pci_name(efx->pci_dev), + efx->net_dev->name); + list_add_tail(&other->node, + &efx->secondary_list); + other->primary = efx; + } + } + } else { + /* Adding secondary function; look for primary */ + + list_for_each_entry(other, &efx_primary_list, node) { + if (efx_same_controller(efx, other)) { + netif_dbg(efx, probe, efx->net_dev, + "adding to secondary list of %s %s\n", + pci_name(other->pci_dev), + other->net_dev->name); + list_add_tail(&efx->node, + &other->secondary_list); + efx->primary = other; + return; + } + } + + netif_dbg(efx, probe, efx->net_dev, + "adding to unassociated list\n"); + list_add_tail(&efx->node, &efx_unassociated_list); + } +} + +static void efx_dissociate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + list_del(&efx->node); + efx->primary = NULL; + + list_for_each_entry_safe(other, next, &efx->secondary_list, node) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to unassociated list\n"); + list_add_tail(&other->node, &efx_unassociated_list); + other->primary = NULL; + } +} + +static int efx_probe_nic(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); + + /* Carry out hardware-type specific initialisation */ + rc = efx->type->probe(efx); + if (rc) + return rc; + + do { + if (!efx->max_channels || !efx->max_tx_channels) { + netif_err(efx, drv, efx->net_dev, + "Insufficient resources to allocate" + " any channels\n"); + rc = -ENOSPC; + goto fail1; + } + + /* Determine the number of channels and queues by trying + * to hook in MSI-X interrupts. + */ + rc = efx_probe_interrupts(efx); + if (rc) + goto fail1; + + rc = efx_set_channels(efx); + if (rc) + goto fail1; + + /* dimension_resources can fail with EAGAIN */ + rc = efx->type->dimension_resources(efx); + if (rc != 0 && rc != -EAGAIN) + goto fail2; + + if (rc == -EAGAIN) + /* try again with new max_channels */ + efx_remove_interrupts(efx); + + } while (rc == -EAGAIN); + + if (efx->n_channels > 1) + netdev_rss_key_fill(efx->rss_context.rx_hash_key, + sizeof(efx->rss_context.rx_hash_key)); + efx_set_default_rx_indir_table(efx, &efx->rss_context); + + /* Initialise the interrupt moderation settings */ + efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); + efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, + true); + + return 0; + +fail2: + efx_remove_interrupts(efx); +fail1: + efx->type->remove(efx); + return rc; +} + +static void efx_remove_nic(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); + + efx_remove_interrupts(efx); + efx->type->remove(efx); +} + +/************************************************************************** + * + * NIC startup/shutdown + * + *************************************************************************/ + +static int efx_probe_all(struct efx_nic *efx) +{ + int rc; + + rc = efx_probe_nic(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); + goto fail1; + } + + rc = efx_probe_port(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to create port\n"); + goto fail2; + } + + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } + +#ifdef CONFIG_SFC_SRIOV + rc = efx->type->vswitching_probe(efx); + if (rc) /* not fatal; the PF will still work fine */ + netif_warn(efx, probe, efx->net_dev, + "failed to setup vswitching rc=%d;" + " VFs may not function\n", rc); +#endif + + rc = efx_probe_filters(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create filter tables\n"); + goto fail4; + } + + rc = efx_probe_channels(efx); + if (rc) + goto fail5; + + return 0; + + fail5: + efx_remove_filters(efx); + fail4: +#ifdef CONFIG_SFC_SRIOV + efx->type->vswitching_remove(efx); +#endif + fail3: + efx_remove_port(efx); + fail2: + efx_remove_nic(efx); + fail1: + return rc; +} + +static void efx_remove_all(struct efx_nic *efx) +{ + rtnl_lock(); + efx_xdp_setup_prog(efx, NULL); + rtnl_unlock(); + + efx_remove_channels(efx); + efx_remove_filters(efx); +#ifdef CONFIG_SFC_SRIOV + efx->type->vswitching_remove(efx); +#endif + efx_remove_port(efx); + efx_remove_nic(efx); +} + +/************************************************************************** + * + * Interrupt moderation + * + **************************************************************************/ +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) +{ + if (usecs == 0) + return 0; + if (usecs * 1000 < efx->timer_quantum_ns) + return 1; /* never round down to 0 */ + return usecs * 1000 / efx->timer_quantum_ns; +} + +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) +{ + /* We must round up when converting ticks to microseconds + * because we round down when converting the other way. + */ + return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); +} + +/* Set interrupt moderation parameters */ +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx) +{ + struct efx_channel *channel; + unsigned int timer_max_us; + + EFX_ASSERT_RESET_SERIALISED(efx); + + timer_max_us = efx->timer_max_ns / 1000; + + if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) + return -EINVAL; + + if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && + !rx_may_override_tx) { + netif_err(efx, drv, efx->net_dev, "Channels are shared. " + "RX and TX IRQ moderation must be equal\n"); + return -EINVAL; + } + + efx->irq_rx_adaptive = rx_adaptive; + efx->irq_rx_moderation_us = rx_usecs; + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) + channel->irq_moderation_us = rx_usecs; + else if (efx_channel_has_tx_queues(channel)) + channel->irq_moderation_us = tx_usecs; + else if (efx_channel_is_xdp_tx(channel)) + channel->irq_moderation_us = tx_usecs; + } + + return 0; +} + +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive) +{ + *rx_adaptive = efx->irq_rx_adaptive; + *rx_usecs = efx->irq_rx_moderation_us; + + /* If channels are shared between RX and TX, so is IRQ + * moderation. Otherwise, IRQ moderation is the same for all + * TX channels and is not adaptive. + */ + if (efx->tx_channel_offset == 0) { + *tx_usecs = *rx_usecs; + } else { + struct efx_channel *tx_channel; + + tx_channel = efx->channel[efx->tx_channel_offset]; + *tx_usecs = tx_channel->irq_moderation_us; + } +} + +/************************************************************************** + * + * ioctls + * + *************************************************************************/ + +/* Net device ioctl + * Context: process, rtnl_lock() held. + */ +static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (cmd == SIOCSHWTSTAMP) + return efx_ptp_set_ts_config(efx, ifr); + if (cmd == SIOCGHWTSTAMP) + return efx_ptp_get_ts_config(efx, ifr); + + /* Convert phy_id from older PRTAD/DEVAD format */ + if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && + (data->phy_id & 0xfc00) == 0x0400) + data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; + + return mdio_mii_ioctl(&efx->mdio, data, cmd); +} + +/************************************************************************** + * + * Kernel net device interface + * + *************************************************************************/ + +/* Context: process, rtnl_lock() held. */ +int efx_net_open(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", + raw_smp_processor_id()); + + rc = efx_check_disabled(efx); + if (rc) + return rc; + if (efx->phy_mode & PHY_MODE_SPECIAL) + return -EBUSY; + if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) + return -EIO; + + /* Notify the kernel of the link state polled during driver load, + * before the monitor starts running */ + efx_link_status_changed(efx); + + efx_start_all(efx); + if (efx->state == STATE_DISABLED || efx->reset_pending) + netif_device_detach(efx->net_dev); + efx_selftest_async_start(efx); + return 0; +} + +/* Context: process, rtnl_lock() held. + * Note that the kernel will ignore our return code; this method + * should really be a void. + */ +int efx_net_stop(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", + raw_smp_processor_id()); + + /* Stop the device and flush all the channels */ + efx_stop_all(efx); + + return 0; +} + +static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->vlan_rx_add_vid) + return efx->type->vlan_rx_add_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + +static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->vlan_rx_kill_vid) + return efx->type->vlan_rx_kill_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + +static const struct net_device_ops efx_netdev_ops = { + .ndo_open = efx_net_open, + .ndo_stop = efx_net_stop, + .ndo_get_stats64 = efx_net_stats, + .ndo_tx_timeout = efx_watchdog, + .ndo_start_xmit = efx_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = efx_ioctl, + .ndo_change_mtu = efx_change_mtu, + .ndo_set_mac_address = efx_set_mac_address, + .ndo_set_rx_mode = efx_set_rx_mode, + .ndo_set_features = efx_set_features, + .ndo_features_check = efx_features_check, + .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, +#ifdef CONFIG_SFC_SRIOV + .ndo_set_vf_mac = efx_sriov_set_vf_mac, + .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, + .ndo_get_vf_config = efx_sriov_get_vf_config, + .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, +#endif + .ndo_get_phys_port_id = efx_get_phys_port_id, + .ndo_get_phys_port_name = efx_get_phys_port_name, + .ndo_setup_tc = efx_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif + .ndo_xdp_xmit = efx_xdp_xmit, + .ndo_bpf = efx_xdp +}; + +static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + + if (efx->xdp_rxq_info_failed) { + netif_err(efx, drv, efx->net_dev, + "Unable to bind XDP program due to previous failure of rxq_info\n"); + return -EINVAL; + } + + if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { + netif_err(efx, drv, efx->net_dev, + "Unable to configure XDP with MTU of %d (max: %d)\n", + efx->net_dev->mtu, efx_xdp_max_mtu(efx)); + return -EINVAL; + } + + old_prog = rtnl_dereference(efx->xdp_prog); + rcu_assign_pointer(efx->xdp_prog, prog); + /* Release the reference that was originally passed by the caller. */ + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +/* Context: process, rtnl_lock() held. */ +static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct efx_nic *efx = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return efx_xdp_setup_prog(efx, xdp->prog); + default: + return -EINVAL; + } +} + +static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, + u32 flags) +{ + struct efx_nic *efx = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); +} + +static void efx_update_name(struct efx_nic *efx) +{ + strcpy(efx->name, efx->net_dev->name); + efx_mtd_rename(efx); + efx_set_channel_names(efx); +} + +static int efx_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); + + if ((net_dev->netdev_ops == &efx_netdev_ops) && + event == NETDEV_CHANGENAME) + efx_update_name(netdev_priv(net_dev)); + + return NOTIFY_DONE; +} + +static struct notifier_block efx_netdev_notifier = { + .notifier_call = efx_netdev_event, +}; + +static ssize_t phy_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", efx->phy_type); +} +static DEVICE_ATTR_RO(phy_type); + +static int efx_register_netdev(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct efx_channel *channel; + int rc; + + net_dev->watchdog_timeo = 5 * HZ; + net_dev->irq = efx->pci_dev->irq; + net_dev->netdev_ops = &efx_netdev_ops; + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + net_dev->priv_flags |= IFF_UNICAST_FLT; + net_dev->ethtool_ops = &efx_ethtool_ops; + netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); + net_dev->min_mtu = EFX_MIN_MTU; + net_dev->max_mtu = EFX_MAX_MTU; + + rtnl_lock(); + + /* Enable resets to be scheduled and check whether any were + * already requested. If so, the NIC is probably hosed so we + * abort. + */ + efx->state = STATE_READY; + smp_mb(); /* ensure we change state before checking reset_pending */ + if (efx->reset_pending) { + pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n"); + rc = -EIO; + goto fail_locked; + } + + rc = dev_alloc_name(net_dev, net_dev->name); + if (rc < 0) + goto fail_locked; + efx_update_name(efx); + + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(net_dev); + + rc = register_netdevice(net_dev); + if (rc) + goto fail_locked; + + efx_for_each_channel(channel, efx) { + struct efx_tx_queue *tx_queue; + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_init_tx_queue_core_txq(tx_queue); + } + + efx_associate(efx); + + rtnl_unlock(); + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail_registered; + } + + efx_init_mcdi_logging(efx); + + return 0; + +fail_registered: + rtnl_lock(); + efx_dissociate(efx); + unregister_netdevice(net_dev); +fail_locked: + efx->state = STATE_UNINIT; + rtnl_unlock(); + netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); + return rc; +} + +static void efx_unregister_netdev(struct efx_nic *efx) +{ + if (!efx->net_dev) + return; + + BUG_ON(netdev_priv(efx->net_dev) != efx); + + if (efx_dev_registered(efx)) { + strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); + efx_fini_mcdi_logging(efx); + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + unregister_netdev(efx->net_dev); + } +} + +/************************************************************************** + * + * List of NICs we support + * + **************************************************************************/ + +/* PCI device ID table */ +static const struct pci_device_id efx_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {0} /* end of list */ +}; + +/************************************************************************** + * + * Data housekeeping + * + **************************************************************************/ + +void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) +{ + u64 n_rx_nodesc_trunc = 0; + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); +} + +/************************************************************************** + * + * PCI interface + * + **************************************************************************/ + +/* Main body of final NIC shutdown code + * This is called only at module unload (or hotplug removal). + */ +static void efx_pci_remove_main(struct efx_nic *efx) +{ + /* Flush reset_work. It can no longer be scheduled since we + * are not READY. + */ + BUG_ON(efx->state == STATE_READY); + efx_flush_reset_workqueue(efx); + + efx_disable_interrupts(efx); + efx_clear_interrupt_affinity(efx); + efx_nic_fini_interrupt(efx); + efx_fini_port(efx); + efx->type->fini(efx); + efx_fini_napi(efx); + efx_remove_all(efx); +} + +/* Final NIC shutdown + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. + */ +static void efx_pci_remove(struct pci_dev *pci_dev) +{ + struct efx_nic *efx; + + efx = pci_get_drvdata(pci_dev); + if (!efx) + return; + + /* Mark the NIC as fini, then stop the interface */ + rtnl_lock(); + efx_dissociate(efx); + dev_close(efx->net_dev); + efx_disable_interrupts(efx); + efx->state = STATE_UNINIT; + rtnl_unlock(); + + if (efx->type->sriov_fini) + efx->type->sriov_fini(efx); + + efx_unregister_netdev(efx); + + efx_mtd_remove(efx); + + efx_pci_remove_main(efx); + + efx_fini_io(efx); + netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); + + efx_fini_struct(efx); + free_netdev(efx->net_dev); + + pci_disable_pcie_error_reporting(pci_dev); +}; + +/* NIC VPD information + * Called during probe to display the part number of the + * installed NIC. + */ +static void efx_probe_vpd_strings(struct efx_nic *efx) +{ + struct pci_dev *dev = efx->pci_dev; + unsigned int vpd_size, kw_len; + u8 *vpd_data; + int start; + + vpd_data = pci_vpd_alloc(dev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(dev, "Unable to read VPD\n"); + return; + } + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); + if (start < 0) + pci_err(dev, "Part number not found or incomplete\n"); + else + pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start); + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); + if (start < 0) + pci_err(dev, "Serial number not found or incomplete\n"); + else + efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL); + + kfree(vpd_data); +} + + +/* Main body of NIC initialisation + * This is called at module load (or hotplug insertion, theoretically). + */ +static int efx_pci_probe_main(struct efx_nic *efx) +{ + int rc; + + /* Do start-of-day initialisation */ + rc = efx_probe_all(efx); + if (rc) + goto fail1; + + efx_init_napi(efx); + + down_write(&efx->filter_sem); + rc = efx->type->init(efx); + up_write(&efx->filter_sem); + if (rc) { + pci_err(efx->pci_dev, "failed to initialise NIC\n"); + goto fail3; + } + + rc = efx_init_port(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to initialise port\n"); + goto fail4; + } + + rc = efx_nic_init_interrupt(efx); + if (rc) + goto fail5; + + efx_set_interrupt_affinity(efx); + rc = efx_enable_interrupts(efx); + if (rc) + goto fail6; + + return 0; + + fail6: + efx_clear_interrupt_affinity(efx); + efx_nic_fini_interrupt(efx); + fail5: + efx_fini_port(efx); + fail4: + efx->type->fini(efx); + fail3: + efx_fini_napi(efx); + efx_remove_all(efx); + fail1: + return rc; +} + +static int efx_pci_probe_post_io(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + int rc = efx_pci_probe_main(efx); + + if (rc) + return rc; + + if (efx->type->sriov_init) { + rc = efx->type->sriov_init(efx); + if (rc) + pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n", + rc); + } + + /* Determine netdevice features */ + net_dev->features |= (efx->type->offload_features | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL); + if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) + net_dev->features |= NETIF_F_TSO6; + /* Check whether device supports TSO */ + if (!efx->type->tso_versions || !efx->type->tso_versions(efx)) + net_dev->features &= ~NETIF_F_ALL_TSO; + /* Mask for features that also apply to VLAN devices */ + net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | + NETIF_F_RXCSUM); + + net_dev->hw_features |= net_dev->features & ~efx->fixed_features; + + /* Disable receiving frames with bad FCS, by default. */ + net_dev->features &= ~NETIF_F_RXALL; + + /* Disable VLAN filtering by default. It may be enforced if + * the feature is fixed (i.e. VLAN filters are required to + * receive VLAN tagged packets due to vPort restrictions). + */ + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->features |= efx->fixed_features; + + rc = efx_register_netdev(efx); + if (!rc) + return 0; + + efx_pci_remove_main(efx); + return rc; +} + +/* NIC initialisation + * + * This is called at module load (or hotplug insertion, + * theoretically). It sets up PCI mappings, resets the NIC, + * sets up and registers the network devices with the kernel and hooks + * the interrupt service routine. It does not prepare the device for + * transmission; this is left to the first time one of the network + * interfaces is brought up (i.e. efx_net_open). + */ +static int efx_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *entry) +{ + struct net_device *net_dev; + struct efx_nic *efx; + int rc; + + /* Allocate and initialise a struct net_device and struct efx_nic */ + net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, + EFX_MAX_RX_QUEUES); + if (!net_dev) + return -ENOMEM; + efx = netdev_priv(net_dev); + efx->type = (const struct efx_nic_type *) entry->driver_data; + efx->fixed_features |= NETIF_F_HIGHDMA; + + pci_set_drvdata(pci_dev, efx); + SET_NETDEV_DEV(net_dev, &pci_dev->dev); + rc = efx_init_struct(efx, pci_dev, net_dev); + if (rc) + goto fail1; + + pci_info(pci_dev, "Solarflare NIC detected\n"); + + if (!efx->type->is_vf) + efx_probe_vpd_strings(efx); + + /* Set up basic I/O (BAR mappings etc) */ + rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, + efx->type->mem_map_size(efx)); + if (rc) + goto fail2; + + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On failure, retry once immediately. + * If we aborted probe due to a scheduled reset, dismiss it. + */ + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + if (rc) { + /* On another failure, retry once more + * after a 50-305ms delay. + */ + unsigned char r; + + get_random_bytes(&r, 1); + msleep((unsigned int)r + 50); + efx->reset_pending = 0; + rc = efx_pci_probe_post_io(efx); + } + } + if (rc) + goto fail3; + + netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); + + /* Try to create MTDs, but allow this to fail */ + rtnl_lock(); + rc = efx_mtd_probe(efx); + rtnl_unlock(); + if (rc && rc != -EPERM) + netif_warn(efx, probe, efx->net_dev, + "failed to create MTDs (%d)\n", rc); + + (void)pci_enable_pcie_error_reporting(pci_dev); + + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + + return 0; + + fail3: + efx_fini_io(efx); + fail2: + efx_fini_struct(efx); + fail1: + WARN_ON(rc > 0); + netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); + free_netdev(net_dev); + return rc; +} + +/* efx_pci_sriov_configure returns the actual number of Virtual Functions + * enabled on success + */ +#ifdef CONFIG_SFC_SRIOV +static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + int rc; + struct efx_nic *efx = pci_get_drvdata(dev); + + if (efx->type->sriov_configure) { + rc = efx->type->sriov_configure(efx, num_vfs); + if (rc) + return rc; + else + return num_vfs; + } else + return -EOPNOTSUPP; +} +#endif + +static int efx_pm_freeze(struct device *dev) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_UNINIT; + + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + } + + rtnl_unlock(); + + return 0; +} + +static int efx_pm_thaw(struct device *dev) +{ + int rc; + struct efx_nic *efx = dev_get_drvdata(dev); + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; + + mutex_lock(&efx->mac_lock); + efx_mcdi_port_reconfigure(efx); + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + + efx_device_attach_if_not_resetting(efx); + + efx->state = STATE_READY; + + efx->type->resume_wol(efx); + } + + rtnl_unlock(); + + /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ + efx_queue_reset_work(efx); + + return 0; + +fail: + rtnl_unlock(); + + return rc; +} + +static int efx_pm_poweroff(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + + efx->type->fini(efx); + + efx->reset_pending = 0; + + pci_save_state(pci_dev); + return pci_set_power_state(pci_dev, PCI_D3hot); +} + +/* Used for both resume and restore */ +static int efx_pm_resume(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + int rc; + + rc = pci_set_power_state(pci_dev, PCI_D0); + if (rc) + return rc; + pci_restore_state(pci_dev); + rc = pci_enable_device(pci_dev); + if (rc) + return rc; + pci_set_master(efx->pci_dev); + rc = efx->type->reset(efx, RESET_TYPE_ALL); + if (rc) + return rc; + down_write(&efx->filter_sem); + rc = efx->type->init(efx); + up_write(&efx->filter_sem); + if (rc) + return rc; + rc = efx_pm_thaw(dev); + return rc; +} + +static int efx_pm_suspend(struct device *dev) +{ + int rc; + + efx_pm_freeze(dev); + rc = efx_pm_poweroff(dev); + if (rc) + efx_pm_resume(dev); + return rc; +} + +static const struct dev_pm_ops efx_pm_ops = { + .suspend = efx_pm_suspend, + .resume = efx_pm_resume, + .freeze = efx_pm_freeze, + .thaw = efx_pm_thaw, + .poweroff = efx_pm_poweroff, + .restore = efx_pm_resume, +}; + +static struct pci_driver efx_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = efx_pci_table, + .probe = efx_pci_probe, + .remove = efx_pci_remove, + .driver.pm = &efx_pm_ops, + .err_handler = &efx_err_handlers, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_pci_sriov_configure, +#endif +}; + +/************************************************************************** + * + * Kernel module interface + * + *************************************************************************/ + +static int __init efx_init_module(void) +{ + int rc; + + printk(KERN_INFO "Solarflare NET driver\n"); + + rc = register_netdevice_notifier(&efx_netdev_notifier); + if (rc) + goto err_notifier; + + rc = efx_create_reset_workqueue(); + if (rc) + goto err_reset; + + rc = pci_register_driver(&efx_pci_driver); + if (rc < 0) + goto err_pci; + + rc = pci_register_driver(&ef100_pci_driver); + if (rc < 0) + goto err_pci_ef100; + + return 0; + + err_pci_ef100: + pci_unregister_driver(&efx_pci_driver); + err_pci: + efx_destroy_reset_workqueue(); + err_reset: + unregister_netdevice_notifier(&efx_netdev_notifier); + err_notifier: + return rc; +} + +static void __exit efx_exit_module(void) +{ + printk(KERN_INFO "Solarflare NET driver unloading\n"); + + pci_unregister_driver(&ef100_pci_driver); + pci_unregister_driver(&efx_pci_driver); + efx_destroy_reset_workqueue(); + unregister_netdevice_notifier(&efx_netdev_notifier); + +} + +module_init(efx_init_module); +module_exit(efx_exit_module); + +MODULE_AUTHOR("Solarflare Communications and " + "Michael Brown "); +MODULE_DESCRIPTION("Solarflare network driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, efx_pci_table); diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h new file mode 100644 index 000000000000..c05a83da9e44 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_EFX_H +#define EFX_EFX_H + +#include +#include "net_driver.h" +#include "ef100_rx.h" +#include "ef100_tx.h" +#include "filter.h" + +int efx_net_open(struct net_device *net_dev); +int efx_net_stop(struct net_device *net_dev); + +/* TX */ +void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); +netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev); +netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue, + ef100_enqueue_skb, __efx_enqueue_skb, + tx_queue, skb); +} +void efx_xmit_done_single(struct efx_tx_queue *tx_queue); +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data); +extern unsigned int efx_piobuf_size; + +/* RX */ +void __efx_rx_packet(struct efx_channel *channel); +void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags); +static inline void efx_rx_flush_packet(struct efx_channel *channel) +{ + if (channel->rx_pkt_n_frags) + INDIRECT_CALL_2(channel->efx->type->rx_packet, + __ef100_rx_packet, __efx_rx_packet, + channel); +} +static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) +{ + if (efx->type->rx_buf_hash_valid) + return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid, + ef100_rx_buf_hash_valid, + prefix); + return true; +} + +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) + +/* All EF10 architecture NICs steal one bit of the DMAQ size for various + * other purposes when counting TxQ entries, so we halve the queue size. + */ +#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \ + EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) + +static inline bool efx_rss_enabled(struct efx_nic *efx) +{ + return efx->rss_spread > 1; +} + +/* Filters */ + +/** + * efx_filter_insert_filter - add or replace a filter + * @efx: NIC in which to insert the filter + * @spec: Specification for the filter + * @replace_equal: Flag for whether the specified filter may replace an + * existing filter with equal priority + * + * On success, return the filter ID. + * On failure, return a negative error code. + * + * If existing filters have equal match values to the new filter spec, + * then the new filter might replace them or the function might fail, + * as follows. + * + * 1. If the existing filters have lower priority, or @replace_equal + * is set and they have equal priority, replace them. + * + * 2. If the existing filters have higher priority, return -%EPERM. + * + * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not + * support delivery to multiple recipients, return -%EEXIST. + * + * This implies that filters for multiple multicast recipients must + * all be inserted with the same priority and @replace_equal = %false. + */ +static inline s32 efx_filter_insert_filter(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace_equal) +{ + return efx->type->filter_insert(efx, spec, replace_equal); +} + +/** + * efx_filter_remove_id_safe - remove a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +static inline int efx_filter_remove_id_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx->type->filter_remove_safe(efx, priority, filter_id); +} + +/** + * efx_filter_get_filter_safe - retrieve a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * @spec: Buffer in which to store filter specification + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +static inline int +efx_filter_get_filter_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec) +{ + return efx->type->filter_get_safe(efx, priority, filter_id, spec); +} + +static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + return efx->type->filter_count_rx_used(efx, priority); +} +static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) +{ + return efx->type->filter_get_rx_id_limit(efx); +} +static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + return efx->type->filter_get_rx_ids(efx, priority, buf, size); +} + +/* RSS contexts */ +static inline bool efx_rss_active(struct efx_rss_context *ctx) +{ + return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID; +} + +/* Ethtool support */ +extern const struct ethtool_ops efx_ethtool_ops; + +/* Global */ +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx); +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive); + +/* Update the generic software stats in the passed stats array */ +void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); + +/* MTD */ +#ifdef CONFIG_SFC_MTD +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part); +static inline int efx_mtd_probe(struct efx_nic *efx) +{ + return efx->type->mtd_probe(efx); +} +void efx_mtd_rename(struct efx_nic *efx); +void efx_mtd_remove(struct efx_nic *efx); +#else +static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } +static inline void efx_mtd_rename(struct efx_nic *efx) {} +static inline void efx_mtd_remove(struct efx_nic *efx) {} +#endif + +#ifdef CONFIG_SFC_SRIOV +static inline unsigned int efx_vf_size(struct efx_nic *efx) +{ + return 1 << efx->vi_scale; +} +#endif + +static inline void efx_device_detach_sync(struct efx_nic *efx) +{ + struct net_device *dev = efx->net_dev; + + /* Lock/freeze all TX queues so that we can be sure the + * TX scheduler is stopped when we're done and before + * netif_device_present() becomes false. + */ + netif_tx_lock_bh(dev); + netif_device_detach(dev); + netif_tx_unlock_bh(dev); +} + +static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) +{ + if ((efx->state != STATE_DISABLED) && !efx->reset_pending) + netif_device_attach(efx->net_dev); +} + +static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) +{ + if (WARN_ON(down_read_trylock(sem))) { + up_read(sem); + return false; + } + return true; +} + +int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, + bool flush); + +#endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c new file mode 100644 index 000000000000..3f28f9861dfa --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -0,0 +1,1368 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#include "efx_channels.h" +#include "efx.h" +#include "efx_common.h" +#include "tx_common.h" +#include "rx_common.h" +#include "nic.h" +#include "sriov.h" +#include "workarounds.h" + +/* This is the first interrupt mode to try out of: + * 0 => MSI-X + * 1 => MSI + * 2 => legacy + */ +unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX; + +/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), + * i.e. the number of CPUs among which we may distribute simultaneous + * interrupt handling. + * + * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. + * The default (0) means to assign an interrupt to each core. + */ +unsigned int rss_cpus; + +static unsigned int irq_adapt_low_thresh = 8000; +module_param(irq_adapt_low_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_low_thresh, + "Threshold score for reducing IRQ moderation"); + +static unsigned int irq_adapt_high_thresh = 16000; +module_param(irq_adapt_high_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_high_thresh, + "Threshold score for increasing IRQ moderation"); + +/* This is the weight assigned to each of the (per-channel) virtual + * NAPI devices. + */ +static int napi_weight = 64; + +static const struct efx_channel_type efx_default_channel_type; + +/************* + * INTERRUPTS + *************/ + +static unsigned int count_online_cores(struct efx_nic *efx, bool local_node) +{ + cpumask_var_t filter_mask; + unsigned int count; + int cpu; + + if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) { + netif_warn(efx, probe, efx->net_dev, + "RSS disabled due to allocation failure\n"); + return 1; + } + + cpumask_copy(filter_mask, cpu_online_mask); + if (local_node) + cpumask_and(filter_mask, filter_mask, + cpumask_of_pcibus(efx->pci_dev->bus)); + + count = 0; + for_each_cpu(cpu, filter_mask) { + ++count; + cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu)); + } + + free_cpumask_var(filter_mask); + + return count; +} + +static unsigned int efx_wanted_parallelism(struct efx_nic *efx) +{ + unsigned int count; + + if (rss_cpus) { + count = rss_cpus; + } else { + count = count_online_cores(efx, true); + + /* If no online CPUs in local node, fallback to any online CPUs */ + if (count == 0) + count = count_online_cores(efx, false); + } + + if (count > EFX_MAX_RX_QUEUES) { + netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, + "Reducing number of rx queues from %u to %u.\n", + count, EFX_MAX_RX_QUEUES); + count = EFX_MAX_RX_QUEUES; + } + + /* If RSS is requested for the PF *and* VFs then we can't write RSS + * table entries that are inaccessible to VFs + */ +#ifdef CONFIG_SFC_SRIOV + if (efx->type->sriov_wanted) { + if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && + count > efx_vf_size(efx)) { + netif_warn(efx, probe, efx->net_dev, + "Reducing number of RSS channels from %u to %u for " + "VF support. Increase vf-msix-limit to use more " + "channels on the PF.\n", + count, efx_vf_size(efx)); + count = efx_vf_size(efx); + } + } +#endif + + return count; +} + +static int efx_allocate_msix_channels(struct efx_nic *efx, + unsigned int max_channels, + unsigned int extra_channels, + unsigned int parallelism) +{ + unsigned int n_channels = parallelism; + int vec_count; + int tx_per_ev; + int n_xdp_tx; + int n_xdp_ev; + + if (efx_separate_tx_channels) + n_channels *= 2; + n_channels += extra_channels; + + /* To allow XDP transmit to happen from arbitrary NAPI contexts + * we allocate a TX queue per CPU. We share event queues across + * multiple tx queues, assuming tx and ev queues are both + * maximum size. + */ + tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); + tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL); + n_xdp_tx = num_possible_cpus(); + n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev); + + vec_count = pci_msix_vec_count(efx->pci_dev); + if (vec_count < 0) + return vec_count; + + max_channels = min_t(unsigned int, vec_count, max_channels); + + /* Check resources. + * We need a channel per event queue, plus a VI per tx queue. + * This may be more pessimistic than it needs to be. + */ + if (n_channels >= max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); + } else if (n_channels + n_xdp_tx > efx->max_vis) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", + n_xdp_tx, n_channels, efx->max_vis); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); + } else if (n_channels + n_xdp_ev > max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + + n_xdp_ev = max_channels - n_channels; + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n", + DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev)); + } else { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; + } + + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { + efx->n_xdp_channels = n_xdp_ev; + efx->xdp_tx_per_channel = tx_per_ev; + efx->xdp_tx_queue_count = n_xdp_tx; + n_channels += n_xdp_ev; + netif_dbg(efx, drv, efx->net_dev, + "Allocating %d TX and %d event queues for XDP\n", + n_xdp_ev * tx_per_ev, n_xdp_ev); + } else { + efx->n_xdp_channels = 0; + efx->xdp_tx_per_channel = 0; + efx->xdp_tx_queue_count = n_xdp_tx; + } + + if (vec_count < n_channels) { + netif_err(efx, drv, efx->net_dev, + "WARNING: Insufficient MSI-X vectors available (%d < %u).\n", + vec_count, n_channels); + netif_err(efx, drv, efx->net_dev, + "WARNING: Performance may be reduced.\n"); + n_channels = vec_count; + } + + n_channels = min(n_channels, max_channels); + + efx->n_channels = n_channels; + + /* Ignore XDP tx channels when creating rx channels. */ + n_channels -= efx->n_xdp_channels; + + if (efx_separate_tx_channels) { + efx->n_tx_channels = + min(max(n_channels / 2, 1U), + efx->max_tx_channels); + efx->tx_channel_offset = + n_channels - efx->n_tx_channels; + efx->n_rx_channels = + max(n_channels - + efx->n_tx_channels, 1U); + } else { + efx->n_tx_channels = min(n_channels, efx->max_tx_channels); + efx->tx_channel_offset = 0; + efx->n_rx_channels = n_channels; + } + + efx->n_rx_channels = min(efx->n_rx_channels, parallelism); + efx->n_tx_channels = min(efx->n_tx_channels, parallelism); + + efx->xdp_channel_offset = n_channels; + + netif_dbg(efx, drv, efx->net_dev, + "Allocating %u RX channels\n", + efx->n_rx_channels); + + return efx->n_channels; +} + +/* Probe the number and type of interrupts we are able to obtain, and + * the resulting numbers of channels and RX queues. + */ +int efx_probe_interrupts(struct efx_nic *efx) +{ + unsigned int extra_channels = 0; + unsigned int rss_spread; + unsigned int i, j; + int rc; + + for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) + if (efx->extra_channel_type[i]) + ++extra_channels; + + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + unsigned int parallelism = efx_wanted_parallelism(efx); + struct msix_entry xentries[EFX_MAX_CHANNELS]; + unsigned int n_channels; + + rc = efx_allocate_msix_channels(efx, efx->max_channels, + extra_channels, parallelism); + if (rc >= 0) { + n_channels = rc; + for (i = 0; i < n_channels; i++) + xentries[i].entry = i; + rc = pci_enable_msix_range(efx->pci_dev, xentries, 1, + n_channels); + } + if (rc < 0) { + /* Fall back to single channel MSI */ + netif_err(efx, drv, efx->net_dev, + "could not enable MSI-X\n"); + if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) + efx->interrupt_mode = EFX_INT_MODE_MSI; + else + return rc; + } else if (rc < n_channels) { + netif_err(efx, drv, efx->net_dev, + "WARNING: Insufficient MSI-X vectors" + " available (%d < %u).\n", rc, n_channels); + netif_err(efx, drv, efx->net_dev, + "WARNING: Performance may be reduced.\n"); + n_channels = rc; + } + + if (rc > 0) { + for (i = 0; i < efx->n_channels; i++) + efx_get_channel(efx, i)->irq = + xentries[i].vector; + } + } + + /* Try single interrupt MSI */ + if (efx->interrupt_mode == EFX_INT_MODE_MSI) { + efx->n_channels = 1; + efx->n_rx_channels = 1; + efx->n_tx_channels = 1; + efx->n_xdp_channels = 0; + efx->xdp_channel_offset = efx->n_channels; + rc = pci_enable_msi(efx->pci_dev); + if (rc == 0) { + efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; + } else { + netif_err(efx, drv, efx->net_dev, + "could not enable MSI\n"); + if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) + efx->interrupt_mode = EFX_INT_MODE_LEGACY; + else + return rc; + } + } + + /* Assume legacy interrupts */ + if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { + efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); + efx->n_rx_channels = 1; + efx->n_tx_channels = 1; + efx->n_xdp_channels = 0; + efx->xdp_channel_offset = efx->n_channels; + efx->legacy_irq = efx->pci_dev->irq; + } + + /* Assign extra channels if possible, before XDP channels */ + efx->n_extra_tx_channels = 0; + j = efx->xdp_channel_offset; + for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { + if (!efx->extra_channel_type[i]) + continue; + if (j <= efx->tx_channel_offset + efx->n_tx_channels) { + efx->extra_channel_type[i]->handle_no_channel(efx); + } else { + --j; + efx_get_channel(efx, j)->type = + efx->extra_channel_type[i]; + if (efx_channel_has_tx_queues(efx_get_channel(efx, j))) + efx->n_extra_tx_channels++; + } + } + + rss_spread = efx->n_rx_channels; + /* RSS might be usable on VFs even if it is disabled on the PF */ +#ifdef CONFIG_SFC_SRIOV + if (efx->type->sriov_wanted) { + efx->rss_spread = ((rss_spread > 1 || + !efx->type->sriov_wanted(efx)) ? + rss_spread : efx_vf_size(efx)); + return 0; + } +#endif + efx->rss_spread = rss_spread; + + return 0; +} + +#if defined(CONFIG_SMP) +void efx_set_interrupt_affinity(struct efx_nic *efx) +{ + const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus); + struct efx_channel *channel; + unsigned int cpu; + + /* If no online CPUs in local node, fallback to any online CPU */ + if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids) + numa_mask = cpu_online_mask; + + cpu = -1; + efx_for_each_channel(channel, efx) { + cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first_and(cpu_online_mask, numa_mask); + irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); + } +} + +void efx_clear_interrupt_affinity(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + irq_set_affinity_hint(channel->irq, NULL); +} +#else +void +efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) +{ +} + +void +efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) +{ +} +#endif /* CONFIG_SMP */ + +void efx_remove_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + /* Remove MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + channel->irq = 0; + pci_disable_msi(efx->pci_dev); + pci_disable_msix(efx->pci_dev); + + /* Remove legacy interrupt */ + efx->legacy_irq = 0; +} + +/*************** + * EVENT QUEUES + ***************/ + +/* Create event queue + * Event queue memory allocations are done only once. If the channel + * is reset, the memory buffer will be reused; this guards against + * errors during channel reset and also simplifies interrupt handling. + */ +int efx_probe_eventq(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned long entries; + + netif_dbg(efx, probe, efx->net_dev, + "chan %d create event queue\n", channel->channel); + + /* Build an event queue with room for one event per tx and rx buffer, + * plus some extra for link state events and MCDI completions. + */ + entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); + EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); + channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; + + return efx_nic_probe_eventq(channel); +} + +/* Prepare channel's event queue */ +int efx_init_eventq(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + int rc; + + EFX_WARN_ON_PARANOID(channel->eventq_init); + + netif_dbg(efx, drv, efx->net_dev, + "chan %d init event queue\n", channel->channel); + + rc = efx_nic_init_eventq(channel); + if (rc == 0) { + efx->type->push_irq_moderation(channel); + channel->eventq_read_ptr = 0; + channel->eventq_init = true; + } + return rc; +} + +/* Enable event queue processing and NAPI */ +void efx_start_eventq(struct efx_channel *channel) +{ + netif_dbg(channel->efx, ifup, channel->efx->net_dev, + "chan %d start event queue\n", channel->channel); + + /* Make sure the NAPI handler sees the enabled flag set */ + channel->enabled = true; + smp_wmb(); + + napi_enable(&channel->napi_str); + efx_nic_eventq_read_ack(channel); +} + +/* Disable event queue processing and NAPI */ +void efx_stop_eventq(struct efx_channel *channel) +{ + if (!channel->enabled) + return; + + napi_disable(&channel->napi_str); + channel->enabled = false; +} + +void efx_fini_eventq(struct efx_channel *channel) +{ + if (!channel->eventq_init) + return; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d fini event queue\n", channel->channel); + + efx_nic_fini_eventq(channel); + channel->eventq_init = false; +} + +void efx_remove_eventq(struct efx_channel *channel) +{ + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d remove event queue\n", channel->channel); + + efx_nic_remove_eventq(channel); +} + +/************************************************************************** + * + * Channel handling + * + *************************************************************************/ + +#ifdef CONFIG_RFS_ACCEL +static void efx_filter_rfs_expire(struct work_struct *data) +{ + struct delayed_work *dwork = to_delayed_work(data); + struct efx_channel *channel; + unsigned int time, quota; + + channel = container_of(dwork, struct efx_channel, filter_work); + time = jiffies - channel->rfs_last_expiry; + quota = channel->rfs_filter_count * time / (30 * HZ); + if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) + channel->rfs_last_expiry += time; + /* Ensure we do more work eventually even if NAPI poll is not happening */ + schedule_delayed_work(dwork, 30 * HZ); +} +#endif + +/* Allocate and initialise a channel structure. */ +static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) +{ + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + int j; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + channel->efx = efx; + channel->channel = i; + channel->type = &efx_default_channel_type; + + for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) { + tx_queue = &channel->tx_queue[j]; + tx_queue->efx = efx; + tx_queue->queue = -1; + tx_queue->label = j; + tx_queue->channel = channel; + } + +#ifdef CONFIG_RFS_ACCEL + INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); +#endif + + rx_queue = &channel->rx_queue; + rx_queue->efx = efx; + timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); + + return channel; +} + +int efx_init_channels(struct efx_nic *efx) +{ + unsigned int i; + + for (i = 0; i < EFX_MAX_CHANNELS; i++) { + efx->channel[i] = efx_alloc_channel(efx, i); + if (!efx->channel[i]) + return -ENOMEM; + efx->msi_context[i].efx = efx; + efx->msi_context[i].index = i; + } + + /* Higher numbered interrupt modes are less capable! */ + efx->interrupt_mode = min(efx->type->min_interrupt_mode, + efx_interrupt_mode); + + efx->max_channels = EFX_MAX_CHANNELS; + efx->max_tx_channels = EFX_MAX_CHANNELS; + + return 0; +} + +void efx_fini_channels(struct efx_nic *efx) +{ + unsigned int i; + + for (i = 0; i < EFX_MAX_CHANNELS; i++) + if (efx->channel[i]) { + kfree(efx->channel[i]); + efx->channel[i] = NULL; + } +} + +/* Allocate and initialise a channel structure, copying parameters + * (but not resources) from an old channel structure. + */ +static +struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) +{ + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + int j; + + channel = kmalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + *channel = *old_channel; + + channel->napi_dev = NULL; + INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); + channel->napi_str.napi_id = 0; + channel->napi_str.state = 0; + memset(&channel->eventq, 0, sizeof(channel->eventq)); + + for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) { + tx_queue = &channel->tx_queue[j]; + if (tx_queue->channel) + tx_queue->channel = channel; + tx_queue->buffer = NULL; + tx_queue->cb_page = NULL; + memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); + } + + rx_queue = &channel->rx_queue; + rx_queue->buffer = NULL; + memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); + timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); +#ifdef CONFIG_RFS_ACCEL + INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); +#endif + + return channel; +} + +static int efx_probe_channel(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc; + + netif_dbg(channel->efx, probe, channel->efx->net_dev, + "creating channel %d\n", channel->channel); + + rc = channel->type->pre_probe(channel); + if (rc) + goto fail; + + rc = efx_probe_eventq(channel); + if (rc) + goto fail; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + rc = efx_probe_tx_queue(tx_queue); + if (rc) + goto fail; + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + rc = efx_probe_rx_queue(rx_queue); + if (rc) + goto fail; + } + + channel->rx_list = NULL; + + return 0; + +fail: + efx_remove_channel(channel); + return rc; +} + +static void efx_get_channel_name(struct efx_channel *channel, char *buf, + size_t len) +{ + struct efx_nic *efx = channel->efx; + const char *type; + int number; + + number = channel->channel; + + if (number >= efx->xdp_channel_offset && + !WARN_ON_ONCE(!efx->n_xdp_channels)) { + type = "-xdp"; + number -= efx->xdp_channel_offset; + } else if (efx->tx_channel_offset == 0) { + type = ""; + } else if (number < efx->tx_channel_offset) { + type = "-rx"; + } else { + type = "-tx"; + number -= efx->tx_channel_offset; + } + snprintf(buf, len, "%s%s-%d", efx->name, type, number); +} + +void efx_set_channel_names(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + channel->type->get_name(channel, + efx->msi_context[channel->channel].name, + sizeof(efx->msi_context[0].name)); +} + +int efx_probe_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + int rc; + + /* Restart special buffer allocation */ + efx->next_buffer_table = 0; + + /* Probe channels in reverse, so that any 'extra' channels + * use the start of the buffer table. This allows the traffic + * channels to be resized without moving them or wasting the + * entries before them. + */ + efx_for_each_channel_rev(channel, efx) { + rc = efx_probe_channel(channel); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create channel %d\n", + channel->channel); + goto fail; + } + } + efx_set_channel_names(efx); + + return 0; + +fail: + efx_remove_channels(efx); + return rc; +} + +void efx_remove_channel(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "destroy chan %d\n", channel->channel); + + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_remove_rx_queue(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_remove_tx_queue(tx_queue); + efx_remove_eventq(channel); + channel->type->post_remove(channel); +} + +void efx_remove_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_remove_channel(channel); + + kfree(efx->xdp_tx_queues); +} + +static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, + struct efx_tx_queue *tx_queue) +{ + if (xdp_queue_number >= efx->xdp_tx_queue_count) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is XDP %u, HW %u\n", + tx_queue->channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + return 0; +} + +static void efx_set_xdp_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + unsigned int next_queue = 0; + int xdp_queue_number = 0; + int rc; + + /* We need to mark which channels really have RX and TX + * queues, and adjust the TX queue numbers if we have separate + * RX-only and TX-only channels. + */ + efx_for_each_channel(channel, efx) { + if (channel->channel < efx->tx_channel_offset) + continue; + + if (efx_channel_is_xdp_tx(channel)) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } else { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + netif_dbg(efx, drv, efx->net_dev, + "Channel %u TXQ %u is HW %u\n", + channel->channel, tx_queue->label, + tx_queue->queue); + } + + /* If XDP is borrowing queues from net stack, it must + * use the queue with no csum offload, which is the + * first one of the channel + * (note: tx_queue_by_type is not initialized yet) + */ + if (efx->xdp_txq_queues_mode == + EFX_XDP_TX_QUEUES_BORROWED) { + tx_queue = &channel->tx_queue[0]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, + tx_queue); + if (rc == 0) + xdp_queue_number++; + } + } + } + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number > efx->xdp_tx_queue_count); + + /* If we have more CPUs than assigned XDP TX queues, assign the already + * existing queues to the exceeding CPUs + */ + next_queue = 0; + while (xdp_queue_number < efx->xdp_tx_queue_count) { + tx_queue = efx->xdp_tx_queues[next_queue++]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } +} + +int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) +{ + struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; + unsigned int i, next_buffer_table = 0; + u32 old_rxq_entries, old_txq_entries; + int rc, rc2; + + rc = efx_check_disabled(efx); + if (rc) + return rc; + + /* Not all channels should be reallocated. We must avoid + * reallocating their buffer table entries. + */ + efx_for_each_channel(channel, efx) { + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + + if (channel->type->copy) + continue; + next_buffer_table = max(next_buffer_table, + channel->eventq.index + + channel->eventq.entries); + efx_for_each_channel_rx_queue(rx_queue, channel) + next_buffer_table = max(next_buffer_table, + rx_queue->rxd.index + + rx_queue->rxd.entries); + efx_for_each_channel_tx_queue(tx_queue, channel) + next_buffer_table = max(next_buffer_table, + tx_queue->txd.index + + tx_queue->txd.entries); + } + + efx_device_detach_sync(efx); + efx_stop_all(efx); + efx_soft_disable_interrupts(efx); + + /* Clone channels (where possible) */ + memset(other_channel, 0, sizeof(other_channel)); + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + if (channel->type->copy) + channel = channel->type->copy(channel); + if (!channel) { + rc = -ENOMEM; + goto out; + } + other_channel[i] = channel; + } + + /* Swap entry counts and channel pointers */ + old_rxq_entries = efx->rxq_entries; + old_txq_entries = efx->txq_entries; + efx->rxq_entries = rxq_entries; + efx->txq_entries = txq_entries; + for (i = 0; i < efx->n_channels; i++) + swap(efx->channel[i], other_channel[i]); + + /* Restart buffer table allocation */ + efx->next_buffer_table = next_buffer_table; + + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + if (!channel->type->copy) + continue; + rc = efx_probe_channel(channel); + if (rc) + goto rollback; + efx_init_napi_channel(efx->channel[i]); + } + + efx_set_xdp_channels(efx); +out: + /* Destroy unused channel structures */ + for (i = 0; i < efx->n_channels; i++) { + channel = other_channel[i]; + if (channel && channel->type->copy) { + efx_fini_napi_channel(channel); + efx_remove_channel(channel); + kfree(channel); + } + } + + rc2 = efx_soft_enable_interrupts(efx); + if (rc2) { + rc = rc ? rc : rc2; + netif_err(efx, drv, efx->net_dev, + "unable to restart interrupts on channel reallocation\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + } else { + efx_start_all(efx); + efx_device_attach_if_not_resetting(efx); + } + return rc; + +rollback: + /* Swap back */ + efx->rxq_entries = old_rxq_entries; + efx->txq_entries = old_txq_entries; + for (i = 0; i < efx->n_channels; i++) + swap(efx->channel[i], other_channel[i]); + goto out; +} + +int efx_set_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + int rc; + + efx->tx_channel_offset = + efx_separate_tx_channels ? + efx->n_channels - efx->n_tx_channels : 0; + + if (efx->xdp_tx_queue_count) { + EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); + + /* Allocate array for XDP TX queue lookup. */ + efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count, + sizeof(*efx->xdp_tx_queues), + GFP_KERNEL); + if (!efx->xdp_tx_queues) + return -ENOMEM; + } + + efx_for_each_channel(channel, efx) { + if (channel->channel < efx->n_rx_channels) + channel->rx_queue.core_index = channel->channel; + else + channel->rx_queue.core_index = -1; + } + + efx_set_xdp_channels(efx); + + rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); + if (rc) + return rc; + return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); +} + +static bool efx_default_channel_want_txqs(struct efx_channel *channel) +{ + return channel->channel - channel->efx->tx_channel_offset < + channel->efx->n_tx_channels; +} + +/************* + * START/STOP + *************/ + +int efx_soft_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + BUG_ON(efx->state == STATE_DISABLED); + + efx->irq_soft_enabled = true; + smp_wmb(); + + efx_for_each_channel(channel, efx) { + if (!channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + efx_start_eventq(channel); + } + + efx_mcdi_mode_event(efx); + + return 0; +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + efx_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + return rc; +} + +void efx_soft_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + if (efx->state == STATE_DISABLED) + return; + + efx_mcdi_mode_poll(efx); + + efx->irq_soft_enabled = false; + smp_wmb(); + + if (efx->legacy_irq) + synchronize_irq(efx->legacy_irq); + + efx_for_each_channel(channel, efx) { + if (channel->irq) + synchronize_irq(channel->irq); + + efx_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + /* Flush the asynchronous MCDI request queue */ + efx_mcdi_flush_async(efx); +} + +int efx_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + /* TODO: Is this really a bug? */ + BUG_ON(efx->state == STATE_DISABLED); + + if (efx->eeh_disabled_legacy_irq) { + enable_irq(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = false; + } + + efx->type->irq_enable_master(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + } + + rc = efx_soft_enable_interrupts(efx); + if (rc) + goto fail; + + return 0; + +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); + + return rc; +} + +void efx_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_soft_disable_interrupts(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); +} + +void efx_start_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + + efx_for_each_channel_rev(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_init_tx_queue(tx_queue); + atomic_inc(&efx->active_queues); + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + efx_init_rx_queue(rx_queue); + atomic_inc(&efx->active_queues); + efx_stop_eventq(channel); + efx_fast_push_rx_descriptors(rx_queue, false); + efx_start_eventq(channel); + } + + WARN_ON(channel->rx_pkt_n_frags); + } +} + +void efx_stop_channels(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + int rc = 0; + + /* Stop RX refill */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + rx_queue->refill_enabled = false; + } + + efx_for_each_channel(channel, efx) { + /* RX packet processing is pipelined, so wait for the + * NAPI handler to complete. At least event queue 0 + * might be kept active by non-data events, so don't + * use napi_synchronize() but actually disable NAPI + * temporarily. + */ + if (efx_channel_has_rx_queue(channel)) { + efx_stop_eventq(channel); + efx_start_eventq(channel); + } + } + + if (efx->type->fini_dmaq) + rc = efx->type->fini_dmaq(efx); + + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_fini_rx_queue(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_fini_tx_queue(tx_queue); + } +} + +/************************************************************************** + * + * NAPI interface + * + *************************************************************************/ + +/* Process channel's event queue + * + * This function is responsible for processing the event queue of a + * single channel. The caller must guarantee that this function will + * never be concurrently called more than once on the same channel, + * though different channels may be being processed concurrently. + */ +static int efx_process_channel(struct efx_channel *channel, int budget) +{ + struct efx_tx_queue *tx_queue; + struct list_head rx_list; + int spent; + + if (unlikely(!channel->enabled)) + return 0; + + /* Prepare the batch receive list */ + EFX_WARN_ON_PARANOID(channel->rx_list != NULL); + INIT_LIST_HEAD(&rx_list); + channel->rx_list = &rx_list; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->pkts_compl = 0; + tx_queue->bytes_compl = 0; + } + + spent = efx_nic_process_eventq(channel, budget); + if (spent && efx_channel_has_rx_queue(channel)) { + struct efx_rx_queue *rx_queue = + efx_channel_get_rx_queue(channel); + + efx_rx_flush_packet(channel); + efx_fast_push_rx_descriptors(rx_queue, true); + } + + /* Update BQL */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (tx_queue->bytes_compl) { + netdev_tx_completed_queue(tx_queue->core_txq, + tx_queue->pkts_compl, + tx_queue->bytes_compl); + } + } + + /* Receive any packets we queued up */ + netif_receive_skb_list(channel->rx_list); + channel->rx_list = NULL; + + return spent; +} + +static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) +{ + int step = efx->irq_mod_step_us; + + if (channel->irq_mod_score < irq_adapt_low_thresh) { + if (channel->irq_moderation_us > step) { + channel->irq_moderation_us -= step; + efx->type->push_irq_moderation(channel); + } + } else if (channel->irq_mod_score > irq_adapt_high_thresh) { + if (channel->irq_moderation_us < + efx->irq_rx_moderation_us) { + channel->irq_moderation_us += step; + efx->type->push_irq_moderation(channel); + } + } + + channel->irq_count = 0; + channel->irq_mod_score = 0; +} + +/* NAPI poll handler + * + * NAPI guarantees serialisation of polls of the same device, which + * provides the guarantee required by efx_process_channel(). + */ +static int efx_poll(struct napi_struct *napi, int budget) +{ + struct efx_channel *channel = + container_of(napi, struct efx_channel, napi_str); + struct efx_nic *efx = channel->efx; +#ifdef CONFIG_RFS_ACCEL + unsigned int time; +#endif + int spent; + + netif_vdbg(efx, intr, efx->net_dev, + "channel %d NAPI poll executing on CPU %d\n", + channel->channel, raw_smp_processor_id()); + + spent = efx_process_channel(channel, budget); + + xdp_do_flush_map(); + + if (spent < budget) { + if (efx_channel_has_rx_queue(channel) && + efx->irq_rx_adaptive && + unlikely(++channel->irq_count == 1000)) { + efx_update_irq_mod(efx, channel); + } + +#ifdef CONFIG_RFS_ACCEL + /* Perhaps expire some ARFS filters */ + time = jiffies - channel->rfs_last_expiry; + /* Would our quota be >= 20? */ + if (channel->rfs_filter_count * time >= 600 * HZ) + mod_delayed_work(system_wq, &channel->filter_work, 0); +#endif + + /* There is no race here; although napi_disable() will + * only wait for napi_complete(), this isn't a problem + * since efx_nic_eventq_read_ack() will have no effect if + * interrupts have already been disabled. + */ + if (napi_complete_done(napi, spent)) + efx_nic_eventq_read_ack(channel); + } + + return spent; +} + +void efx_init_napi_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + channel->napi_dev = efx->net_dev; + netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll, + napi_weight); +} + +void efx_init_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_init_napi_channel(channel); +} + +void efx_fini_napi_channel(struct efx_channel *channel) +{ + if (channel->napi_dev) + netif_napi_del(&channel->napi_str); + + channel->napi_dev = NULL; +} + +void efx_fini_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_fini_napi_channel(channel); +} + +/*************** + * Housekeeping + ***************/ + +static int efx_channel_dummy_op_int(struct efx_channel *channel) +{ + return 0; +} + +void efx_channel_dummy_op_void(struct efx_channel *channel) +{ +} + +static const struct efx_channel_type efx_default_channel_type = { + .pre_probe = efx_channel_dummy_op_int, + .post_remove = efx_channel_dummy_op_void, + .get_name = efx_get_channel_name, + .copy = efx_copy_channel, + .want_txqs = efx_default_channel_want_txqs, + .keep_eventq = false, + .want_pio = true, +}; diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.h b/drivers/net/ethernet/sfc/siena/efx_channels.h new file mode 100644 index 000000000000..64abb99a56b8 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx_channels.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_CHANNELS_H +#define EFX_CHANNELS_H + +extern unsigned int efx_interrupt_mode; +extern unsigned int rss_cpus; + +int efx_probe_interrupts(struct efx_nic *efx); +void efx_remove_interrupts(struct efx_nic *efx); +int efx_soft_enable_interrupts(struct efx_nic *efx); +void efx_soft_disable_interrupts(struct efx_nic *efx); +int efx_enable_interrupts(struct efx_nic *efx); +void efx_disable_interrupts(struct efx_nic *efx); + +void efx_set_interrupt_affinity(struct efx_nic *efx); +void efx_clear_interrupt_affinity(struct efx_nic *efx); + +int efx_probe_eventq(struct efx_channel *channel); +int efx_init_eventq(struct efx_channel *channel); +void efx_start_eventq(struct efx_channel *channel); +void efx_stop_eventq(struct efx_channel *channel); +void efx_fini_eventq(struct efx_channel *channel); +void efx_remove_eventq(struct efx_channel *channel); + +int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); +void efx_set_channel_names(struct efx_nic *efx); +int efx_init_channels(struct efx_nic *efx); +int efx_probe_channels(struct efx_nic *efx); +int efx_set_channels(struct efx_nic *efx); +void efx_remove_channel(struct efx_channel *channel); +void efx_remove_channels(struct efx_nic *efx); +void efx_fini_channels(struct efx_nic *efx); +void efx_start_channels(struct efx_nic *efx); +void efx_stop_channels(struct efx_nic *efx); + +void efx_init_napi_channel(struct efx_channel *channel); +void efx_init_napi(struct efx_nic *efx); +void efx_fini_napi_channel(struct efx_channel *channel); +void efx_fini_napi(struct efx_nic *efx); + +void efx_channel_dummy_op_void(struct efx_channel *channel); + +#endif diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c new file mode 100644 index 000000000000..f6577e74d6e6 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -0,0 +1,1396 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#include +#include +#include "efx_common.h" +#include "efx_channels.h" +#include "efx.h" +#include "mcdi.h" +#include "selftest.h" +#include "rx_common.h" +#include "tx_common.h" +#include "nic.h" +#include "mcdi_port_common.h" +#include "io.h" +#include "mcdi_pcol.h" + +static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW); +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); + +/* This is the time (in jiffies) between invocations of the hardware + * monitor. + * On Falcon-based NICs, this will: + * - Check the on-board hardware monitor; + * - Poll the link state and reconfigure the hardware as necessary. + * On Siena-based NICs for power systems with EEH support, this will give EEH a + * chance to start. + */ +static unsigned int efx_monitor_interval = 1 * HZ; + +/* How often and how many times to poll for a reset while waiting for a + * BIST that another function started to complete. + */ +#define BIST_WAIT_DELAY_MS 100 +#define BIST_WAIT_DELAY_COUNT 100 + +/* Default stats update time */ +#define STATS_PERIOD_MS_DEFAULT 1000 + +static const unsigned int efx_reset_type_max = RESET_TYPE_MAX; +static const char *const efx_reset_type_names[] = { + [RESET_TYPE_INVISIBLE] = "INVISIBLE", + [RESET_TYPE_ALL] = "ALL", + [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", + [RESET_TYPE_WORLD] = "WORLD", + [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", + [RESET_TYPE_DATAPATH] = "DATAPATH", + [RESET_TYPE_MC_BIST] = "MC_BIST", + [RESET_TYPE_DISABLE] = "DISABLE", + [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", + [RESET_TYPE_INT_ERROR] = "INT_ERROR", + [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", + [RESET_TYPE_TX_SKIP] = "TX_SKIP", + [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", +}; + +#define RESET_TYPE(type) \ + STRING_TABLE_LOOKUP(type, efx_reset_type) + +/* Loopback mode names (see LOOPBACK_MODE()) */ +const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; +const char *const efx_loopback_mode_names[] = { + [LOOPBACK_NONE] = "NONE", + [LOOPBACK_DATA] = "DATAPATH", + [LOOPBACK_GMAC] = "GMAC", + [LOOPBACK_XGMII] = "XGMII", + [LOOPBACK_XGXS] = "XGXS", + [LOOPBACK_XAUI] = "XAUI", + [LOOPBACK_GMII] = "GMII", + [LOOPBACK_SGMII] = "SGMII", + [LOOPBACK_XGBR] = "XGBR", + [LOOPBACK_XFI] = "XFI", + [LOOPBACK_XAUI_FAR] = "XAUI_FAR", + [LOOPBACK_GMII_FAR] = "GMII_FAR", + [LOOPBACK_SGMII_FAR] = "SGMII_FAR", + [LOOPBACK_XFI_FAR] = "XFI_FAR", + [LOOPBACK_GPHY] = "GPHY", + [LOOPBACK_PHYXS] = "PHYXS", + [LOOPBACK_PCS] = "PCS", + [LOOPBACK_PMAPMD] = "PMA/PMD", + [LOOPBACK_XPORT] = "XPORT", + [LOOPBACK_XGMII_WS] = "XGMII_WS", + [LOOPBACK_XAUI_WS] = "XAUI_WS", + [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", + [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", + [LOOPBACK_GMII_WS] = "GMII_WS", + [LOOPBACK_XFI_WS] = "XFI_WS", + [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", + [LOOPBACK_PHYXS_WS] = "PHYXS_WS", +}; + +/* Reset workqueue. If any NIC has a hardware failure then a reset will be + * queued onto this work queue. This is not a per-nic work queue, because + * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. + */ +static struct workqueue_struct *reset_workqueue; + +int efx_create_reset_workqueue(void) +{ + reset_workqueue = create_singlethread_workqueue("sfc_reset"); + if (!reset_workqueue) { + printk(KERN_ERR "Failed to create reset workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +void efx_queue_reset_work(struct efx_nic *efx) +{ + queue_work(reset_workqueue, &efx->reset_work); +} + +void efx_flush_reset_workqueue(struct efx_nic *efx) +{ + cancel_work_sync(&efx->reset_work); +} + +void efx_destroy_reset_workqueue(void) +{ + if (reset_workqueue) { + destroy_workqueue(reset_workqueue); + reset_workqueue = NULL; + } +} + +/* We assume that efx->type->reconfigure_mac will always try to sync RX + * filters and therefore needs to read-lock the filter table against freeing + */ +void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only) +{ + if (efx->type->reconfigure_mac) { + down_read(&efx->filter_sem); + efx->type->reconfigure_mac(efx, mtu_only); + up_read(&efx->filter_sem); + } +} + +/* Asynchronous work item for changing MAC promiscuity and multicast + * hash. Avoid a drain/rx_ingress enable by reconfiguring the current + * MAC directly. + */ +static void efx_mac_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); + + mutex_lock(&efx->mac_lock); + if (efx->port_enabled) + efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); +} + +int efx_set_mac_address(struct net_device *net_dev, void *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct sockaddr *addr = data; + u8 *new_addr = addr->sa_data; + u8 old_addr[6]; + int rc; + + if (!is_valid_ether_addr(new_addr)) { + netif_err(efx, drv, efx->net_dev, + "invalid ethernet MAC address requested: %pM\n", + new_addr); + return -EADDRNOTAVAIL; + } + + /* save old address */ + ether_addr_copy(old_addr, net_dev->dev_addr); + eth_hw_addr_set(net_dev, new_addr); + if (efx->type->set_mac_address) { + rc = efx->type->set_mac_address(efx); + if (rc) { + eth_hw_addr_set(net_dev, old_addr); + return rc; + } + } + + /* Reconfigure the MAC */ + mutex_lock(&efx->mac_lock); + efx_mac_reconfigure(efx, false); + mutex_unlock(&efx->mac_lock); + + return 0; +} + +/* Context: netif_addr_lock held, BHs disabled. */ +void efx_set_rx_mode(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->port_enabled) + queue_work(efx->workqueue, &efx->mac_work); + /* Otherwise efx_start_port() will do this */ +} + +int efx_set_features(struct net_device *net_dev, netdev_features_t data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + /* If disabling RX n-tuple filtering, clear existing filters */ + if (net_dev->features & ~data & NETIF_F_NTUPLE) { + rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (rc) + return rc; + } + + /* If Rx VLAN filter is changed, update filters via mac_reconfigure. + * If rx-fcs is changed, mac_reconfigure updates that too. + */ + if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXFCS)) { + /* efx_set_rx_mode() will schedule MAC work to update filters + * when a new features are finally set in net_dev. + */ + efx_set_rx_mode(net_dev); + } + + return 0; +} + +/* This ensures that the kernel is kept informed (via + * netif_carrier_on/off) of the link status, and also maintains the + * link status's stop on the port's TX queue. + */ +void efx_link_status_changed(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + + /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure + * that no events are triggered between unregister_netdev() and the + * driver unloading. A more general condition is that NETDEV_CHANGE + * can only be generated between NETDEV_UP and NETDEV_DOWN + */ + if (!netif_running(efx->net_dev)) + return; + + if (link_state->up != netif_carrier_ok(efx->net_dev)) { + efx->n_link_state_changes++; + + if (link_state->up) + netif_carrier_on(efx->net_dev); + else + netif_carrier_off(efx->net_dev); + } + + /* Status message for kernel log */ + if (link_state->up) + netif_info(efx, link, efx->net_dev, + "link up at %uMbps %s-duplex (MTU %d)\n", + link_state->speed, link_state->fd ? "full" : "half", + efx->net_dev->mtu); + else + netif_info(efx, link, efx->net_dev, "link down\n"); +} + +unsigned int efx_xdp_max_mtu(struct efx_nic *efx) +{ + /* The maximum MTU that we can fit in a single page, allowing for + * framing, overhead and XDP headroom + tailroom. + */ + int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) + + efx->rx_prefix_size + efx->type->rx_buffer_padding + + efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM; + + return PAGE_SIZE - overhead; +} + +/* Context: process, rtnl_lock() held. */ +int efx_change_mtu(struct net_device *net_dev, int new_mtu) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx_check_disabled(efx); + if (rc) + return rc; + + if (rtnl_dereference(efx->xdp_prog) && + new_mtu > efx_xdp_max_mtu(efx)) { + netif_err(efx, drv, efx->net_dev, + "Requested MTU of %d too big for XDP (max: %d)\n", + new_mtu, efx_xdp_max_mtu(efx)); + return -EINVAL; + } + + netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + + efx_device_detach_sync(efx); + efx_stop_all(efx); + + mutex_lock(&efx->mac_lock); + net_dev->mtu = new_mtu; + efx_mac_reconfigure(efx, true); + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + efx_device_attach_if_not_resetting(efx); + return 0; +} + +/************************************************************************** + * + * Hardware monitor + * + **************************************************************************/ + +/* Run periodically off the general workqueue */ +static void efx_monitor(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + monitor_work.work); + + netif_vdbg(efx, timer, efx->net_dev, + "hardware monitor executing on CPU %d\n", + raw_smp_processor_id()); + BUG_ON(efx->type->monitor == NULL); + + /* If the mac_lock is already held then it is likely a port + * reconfiguration is already in place, which will likely do + * most of the work of monitor() anyway. + */ + if (mutex_trylock(&efx->mac_lock)) { + if (efx->port_enabled && efx->type->monitor) + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + efx_start_monitor(efx); +} + +void efx_start_monitor(struct efx_nic *efx) +{ + if (efx->type->monitor) + queue_delayed_work(efx->workqueue, &efx->monitor_work, + efx_monitor_interval); +} + +/************************************************************************** + * + * Event queue processing + * + *************************************************************************/ + +/* Channels are shutdown and reinitialised whilst the NIC is running + * to propagate configuration changes (mtu, checksum offload), or + * to clear hardware error conditions + */ +static void efx_start_datapath(struct efx_nic *efx) +{ + netdev_features_t old_features = efx->net_dev->features; + bool old_rx_scatter = efx->rx_scatter; + size_t rx_buf_len; + + /* Calculate the rx buffer allocation parameters required to + * support the current MTU, including padding for header + * alignment and overruns. + */ + efx->rx_dma_len = (efx->rx_prefix_size + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_padding); + rx_buf_len = (sizeof(struct efx_rx_page_state) + EFX_XDP_HEADROOM + + efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM); + + if (rx_buf_len <= PAGE_SIZE) { + efx->rx_scatter = efx->type->always_rx_scatter; + efx->rx_buffer_order = 0; + } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); + BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + + 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, + EFX_RX_BUF_ALIGNMENT) > + PAGE_SIZE); + efx->rx_scatter = true; + efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; + efx->rx_buffer_order = 0; + } else { + efx->rx_scatter = false; + efx->rx_buffer_order = get_order(rx_buf_len); + } + + efx_rx_config_page_split(efx); + if (efx->rx_buffer_order) + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u; page order=%u batch=%u\n", + efx->rx_dma_len, efx->rx_buffer_order, + efx->rx_pages_per_batch); + else + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u step=%u bpp=%u; page batch=%u\n", + efx->rx_dma_len, efx->rx_page_buf_step, + efx->rx_bufs_per_page, efx->rx_pages_per_batch); + + /* Restore previously fixed features in hw_features and remove + * features which are fixed now + */ + efx->net_dev->hw_features |= efx->net_dev->features; + efx->net_dev->hw_features &= ~efx->fixed_features; + efx->net_dev->features |= efx->fixed_features; + if (efx->net_dev->features != old_features) + netdev_features_change(efx->net_dev); + + /* RX filters may also have scatter-enabled flags */ + if ((efx->rx_scatter != old_rx_scatter) && + efx->type->filter_update_rx_scatter) + efx->type->filter_update_rx_scatter(efx); + + /* We must keep at least one descriptor in a TX ring empty. + * We could avoid this when the queue size does not exactly + * match the hardware ring size, but it's not that important. + * Therefore we stop the queue when one more skb might fill + * the ring completely. We wake it when half way back to + * empty. + */ + efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); + efx->txq_wake_thresh = efx->txq_stop_thresh / 2; + + /* Initialise the channels */ + efx_start_channels(efx); + + efx_ptp_start_datapath(efx); + + if (netif_device_present(efx->net_dev)) + netif_tx_wake_all_queues(efx->net_dev); +} + +static void efx_stop_datapath(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->port_enabled); + + efx_ptp_stop_datapath(efx); + + efx_stop_channels(efx); +} + +/************************************************************************** + * + * Port handling + * + **************************************************************************/ + +/* Equivalent to efx_link_set_advertising with all-zeroes, except does not + * force the Autoneg bit on. + */ +void efx_link_clear_advertising(struct efx_nic *efx) +{ + bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); +} + +void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) +{ + efx->wanted_fc = wanted_fc; + if (efx->link_advertising[0]) { + if (wanted_fc & EFX_FC_RX) + efx->link_advertising[0] |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else + efx->link_advertising[0] &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + if (wanted_fc & EFX_FC_TX) + efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; + } +} + +static void efx_start_port(struct efx_nic *efx) +{ + netif_dbg(efx, ifup, efx->net_dev, "start port\n"); + BUG_ON(efx->port_enabled); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = true; + + /* Ensure MAC ingress/egress is enabled */ + efx_mac_reconfigure(efx, false); + + mutex_unlock(&efx->mac_lock); +} + +/* Cancel work for MAC reconfiguration, periodic hardware monitoring + * and the async self-test, wait for them to finish and prevent them + * being scheduled again. This doesn't cover online resets, which + * should only be cancelled when removing the device. + */ +static void efx_stop_port(struct efx_nic *efx) +{ + netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = false; + mutex_unlock(&efx->mac_lock); + + /* Serialise against efx_set_multicast_list() */ + netif_addr_lock_bh(efx->net_dev); + netif_addr_unlock_bh(efx->net_dev); + + cancel_delayed_work_sync(&efx->monitor_work); + efx_selftest_async_cancel(efx); + cancel_work_sync(&efx->mac_work); +} + +/* If the interface is supposed to be running but is not, start + * the hardware and software data path, regular activity for the port + * (MAC statistics, link polling, etc.) and schedule the port to be + * reconfigured. Interrupts must already be enabled. This function + * is safe to call multiple times, so long as the NIC is not disabled. + * Requires the RTNL lock. + */ +void efx_start_all(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->state == STATE_DISABLED); + + /* Check that it is appropriate to restart the interface. All + * of these flags are safe to read under just the rtnl lock + */ + if (efx->port_enabled || !netif_running(efx->net_dev) || + efx->reset_pending) + return; + + efx_start_port(efx); + efx_start_datapath(efx); + + /* Start the hardware monitor if there is one */ + efx_start_monitor(efx); + + /* Link state detection is normally event-driven; we have + * to poll now because we could have missed a change + */ + mutex_lock(&efx->mac_lock); + if (efx_mcdi_phy_poll(efx)) + efx_link_status_changed(efx); + mutex_unlock(&efx->mac_lock); + + if (efx->type->start_stats) { + efx->type->start_stats(efx); + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); + } +} + +/* Quiesce the hardware and software data path, and regular activity + * for the port without bringing the link down. Safe to call multiple + * times with the NIC in almost any state, but interrupts should be + * enabled. Requires the RTNL lock. + */ +void efx_stop_all(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + + /* port_enabled can be read safely under the rtnl lock */ + if (!efx->port_enabled) + return; + + if (efx->type->update_stats) { + /* update stats before we go down so we can accurately count + * rx_nodesc_drops + */ + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); + efx->type->stop_stats(efx); + } + + efx_stop_port(efx); + + /* Stop the kernel transmit interface. This is only valid if + * the device is stopped or detached; otherwise the watchdog + * may fire immediately. + */ + WARN_ON(netif_running(efx->net_dev) && + netif_device_present(efx->net_dev)); + netif_tx_disable(efx->net_dev); + + efx_stop_datapath(efx); +} + +/* Context: process, dev_base_lock or RTNL held, non-blocking. */ +void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + spin_lock_bh(&efx->stats_lock); + efx_nic_update_stats_atomic(efx, NULL, stats); + spin_unlock_bh(&efx->stats_lock); +} + +/* Push loopback/power/transmit disable settings to the PHY, and reconfigure + * the MAC appropriately. All other PHY configuration changes are pushed + * through phy_op->set_settings(), and pushed asynchronously to the MAC + * through efx_monitor(). + * + * Callers must hold the mac_lock + */ +int __efx_reconfigure_port(struct efx_nic *efx) +{ + enum efx_phy_mode phy_mode; + int rc = 0; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + /* Disable PHY transmit in mac level loopbacks */ + phy_mode = efx->phy_mode; + if (LOOPBACK_INTERNAL(efx)) + efx->phy_mode |= PHY_MODE_TX_DISABLED; + else + efx->phy_mode &= ~PHY_MODE_TX_DISABLED; + + if (efx->type->reconfigure_port) + rc = efx->type->reconfigure_port(efx); + + if (rc) + efx->phy_mode = phy_mode; + + return rc; +} + +/* Reinitialise the MAC to pick up new PHY settings, even if the port is + * disabled. + */ +int efx_reconfigure_port(struct efx_nic *efx) +{ + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + rc = __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/************************************************************************** + * + * Device reset and suspend + * + **************************************************************************/ + +static void efx_wait_for_bist_end(struct efx_nic *efx) +{ + int i; + + for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { + if (efx_mcdi_poll_reboot(efx)) + goto out; + msleep(BIST_WAIT_DELAY_MS); + } + + netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); +out: + /* Either way unset the BIST flag. If we found no reboot we probably + * won't recover, but we should try. + */ + efx->mc_bist_for_other_fn = false; +} + +/* Try recovery mechanisms. + * For now only EEH is supported. + * Returns 0 if the recovery mechanisms are unsuccessful. + * Returns a non-zero value otherwise. + */ +int efx_try_recovery(struct efx_nic *efx) +{ +#ifdef CONFIG_EEH + /* A PCI error can occur and not be seen by EEH because nothing + * happens on the PCI bus. In this case the driver may fail and + * schedule a 'recover or reset', leading to this recovery handler. + * Manually call the eeh failure check function. + */ + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); + if (eeh_dev_check_failure(eehdev)) { + /* The EEH mechanisms will handle the error and reset the + * device if necessary. + */ + return 1; + } +#endif + return 0; +} + +/* Tears down the entire software state and most of the hardware state + * before reset. + */ +void efx_reset_down(struct efx_nic *efx, enum reset_type method) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->prepare_flr(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + mutex_lock(&efx->rss_lock); + efx->type->fini(efx); +} + +/* Context: netif_tx_lock held, BHs disabled. */ +void efx_watchdog(struct net_device *net_dev, unsigned int txqueue) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + netif_err(efx, tx_err, efx->net_dev, + "TX stuck with port_enabled=%d: resetting channels\n", + efx->port_enabled); + + efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); +} + +/* This function will always ensure that the locks acquired in + * efx_reset_down() are released. A failure return code indicates + * that we were unable to reinitialise the hardware, and the + * driver should be disabled. If ok is false, then the rx and tx + * engines are not restarted, pending a RESET_DISABLE. + */ +int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) +{ + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->finish_flr(efx); + + /* Ensure that SRAM is initialised even if we're disabling the device */ + rc = efx->type->init(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); + goto fail; + } + + if (!ok) + goto fail; + + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && + method != RESET_TYPE_DATAPATH) { + rc = efx_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + netif_err(efx, drv, efx->net_dev, + "could not restore PHY settings\n"); + } + + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; + +#ifdef CONFIG_SFC_SRIOV + rc = efx->type->vswitching_restore(efx); + if (rc) /* not fatal; the PF will still work fine */ + netif_warn(efx, probe, efx->net_dev, + "failed to restore vswitching rc=%d;" + " VFs may not function\n", rc); +#endif + + if (efx->type->rx_restore_rss_contexts) + efx->type->rx_restore_rss_contexts(efx); + mutex_unlock(&efx->rss_lock); + efx->type->filter_table_restore(efx); + up_write(&efx->filter_sem); + if (efx->type->sriov_reset) + efx->type->sriov_reset(efx); + + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + + if (efx->type->udp_tnl_push_ports) + efx->type->udp_tnl_push_ports(efx); + + return 0; + +fail: + efx->port_initialized = false; + + mutex_unlock(&efx->rss_lock); + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/* Reset the NIC using the specified method. Note that the reset may + * fail, in which case the card will be left in an unusable state. + * + * Caller must hold the rtnl_lock. + */ +int efx_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc, rc2 = 0; + bool disabled; + + netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", + RESET_TYPE(method)); + + efx_device_detach_sync(efx); + /* efx_reset_down() grabs locks that prevent recovery on EF100. + * EF100 reset is handled in the efx_nic_type callback below. + */ + if (efx_nic_rev(efx) != EFX_REV_EF100) + efx_reset_down(efx, method); + + rc = efx->type->reset(efx, method); + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); + goto out; + } + + /* Clear flags for the scopes we covered. We assume the NIC and + * driver are now quiescent so that there is no race here. + */ + if (method < RESET_TYPE_MAX_METHOD) + efx->reset_pending &= -(1 << (method + 1)); + else /* it doesn't fit into the well-ordered scope hierarchy */ + __clear_bit(method, &efx->reset_pending); + + /* Reinitialise bus-mastering, which may have been turned off before + * the reset was scheduled. This is still appropriate, even in the + * RESET_TYPE_DISABLE since this driver generally assumes the hardware + * can respond to requests. + */ + pci_set_master(efx->pci_dev); + +out: + /* Leave device stopped if necessary */ + disabled = rc || + method == RESET_TYPE_DISABLE || + method == RESET_TYPE_RECOVER_OR_DISABLE; + if (efx_nic_rev(efx) != EFX_REV_EF100) + rc2 = efx_reset_up(efx, method, !disabled); + if (rc2) { + disabled = true; + if (!rc) + rc = rc2; + } + + if (disabled) { + dev_close(efx->net_dev); + netif_err(efx, drv, efx->net_dev, "has been disabled\n"); + efx->state = STATE_DISABLED; + } else { + netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); + efx_device_attach_if_not_resetting(efx); + } + return rc; +} + +/* The worker thread exists so that code that cannot sleep can + * schedule a reset for later. + */ +static void efx_reset_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); + unsigned long pending; + enum reset_type method; + + pending = READ_ONCE(efx->reset_pending); + method = fls(pending) - 1; + + if (method == RESET_TYPE_MC_BIST) + efx_wait_for_bist_end(efx); + + if ((method == RESET_TYPE_RECOVER_OR_DISABLE || + method == RESET_TYPE_RECOVER_OR_ALL) && + efx_try_recovery(efx)) + return; + + if (!pending) + return; + + rtnl_lock(); + + /* We checked the state in efx_schedule_reset() but it may + * have changed by now. Now that we have the RTNL lock, + * it cannot change again. + */ + if (efx->state == STATE_READY) + (void)efx_reset(efx, method); + + rtnl_unlock(); +} + +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) +{ + enum reset_type method; + + if (efx->state == STATE_RECOVERY) { + netif_dbg(efx, drv, efx->net_dev, + "recovering: skip scheduling %s reset\n", + RESET_TYPE(type)); + return; + } + + switch (type) { + case RESET_TYPE_INVISIBLE: + case RESET_TYPE_ALL: + case RESET_TYPE_RECOVER_OR_ALL: + case RESET_TYPE_WORLD: + case RESET_TYPE_DISABLE: + case RESET_TYPE_RECOVER_OR_DISABLE: + case RESET_TYPE_DATAPATH: + case RESET_TYPE_MC_BIST: + case RESET_TYPE_MCDI_TIMEOUT: + method = type; + netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", + RESET_TYPE(method)); + break; + default: + method = efx->type->map_reset_reason(type); + netif_dbg(efx, drv, efx->net_dev, + "scheduling %s reset for %s\n", + RESET_TYPE(method), RESET_TYPE(type)); + break; + } + + set_bit(method, &efx->reset_pending); + smp_mb(); /* ensure we change reset_pending before checking state */ + + /* If we're not READY then just leave the flags set as the cue + * to abort probing or reschedule the reset later. + */ + if (READ_ONCE(efx->state) != STATE_READY) + return; + + /* efx_process_channel() will no longer read events once a + * reset is scheduled. So switch back to poll'd MCDI completions. + */ + efx_mcdi_mode_poll(efx); + + efx_queue_reset_work(efx); +} + +/************************************************************************** + * + * Dummy NIC operations + * + * Can be used for some unimplemented operations + * Needed so all function pointers are valid and do not have to be tested + * before use + * + **************************************************************************/ +int efx_port_dummy_op_int(struct efx_nic *efx) +{ + return 0; +} +void efx_port_dummy_op_void(struct efx_nic *efx) {} + +/************************************************************************** + * + * Data housekeeping + * + **************************************************************************/ + +/* This zeroes out and then fills in the invariants in a struct + * efx_nic (including all sub-structures). + */ +int efx_init_struct(struct efx_nic *efx, + struct pci_dev *pci_dev, struct net_device *net_dev) +{ + int rc = -ENOMEM; + + /* Initialise common structures */ + INIT_LIST_HEAD(&efx->node); + INIT_LIST_HEAD(&efx->secondary_list); + spin_lock_init(&efx->biu_lock); +#ifdef CONFIG_SFC_MTD + INIT_LIST_HEAD(&efx->mtd_list); +#endif + INIT_WORK(&efx->reset_work, efx_reset_work); + INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); + efx_selftest_async_init(efx); + efx->pci_dev = pci_dev; + efx->msg_enable = debug; + efx->state = STATE_UNINIT; + strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); + + efx->net_dev = net_dev; + efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_ip_align = + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; + efx->rx_packet_hash_offset = + efx->type->rx_hash_offset - efx->type->rx_prefix_size; + efx->rx_packet_ts_offset = + efx->type->rx_ts_offset - efx->type->rx_prefix_size; + INIT_LIST_HEAD(&efx->rss_context.list); + efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + mutex_init(&efx->rss_lock); + efx->vport_id = EVB_PORT_ID_ASSIGNED; + spin_lock_init(&efx->stats_lock); + efx->vi_stride = EFX_DEFAULT_VI_STRIDE; + efx->num_mac_stats = MC_CMD_MAC_NSTATS; + BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END); + mutex_init(&efx->mac_lock); + init_rwsem(&efx->filter_sem); +#ifdef CONFIG_RFS_ACCEL + mutex_init(&efx->rps_mutex); + spin_lock_init(&efx->rps_hash_lock); + /* Failure to allocate is not fatal, but may degrade ARFS performance */ + efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, + sizeof(*efx->rps_hash_table), GFP_KERNEL); +#endif + efx->mdio.dev = net_dev; + INIT_WORK(&efx->mac_work, efx_mac_work); + init_waitqueue_head(&efx->flush_wq); + + efx->tx_queues_per_channel = 1; + efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE; + efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; + + efx->mem_bar = UINT_MAX; + + rc = efx_init_channels(efx); + if (rc) + goto fail; + + /* Would be good to use the net_dev name, but we're too early */ + snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", + pci_name(pci_dev)); + efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); + if (!efx->workqueue) { + rc = -ENOMEM; + goto fail; + } + + return 0; + +fail: + efx_fini_struct(efx); + return rc; +} + +void efx_fini_struct(struct efx_nic *efx) +{ +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_hash_table); +#endif + + efx_fini_channels(efx); + + kfree(efx->vpd_sn); + + if (efx->workqueue) { + destroy_workqueue(efx->workqueue); + efx->workqueue = NULL; + } +} + +/* This configures the PCI device to enable I/O and DMA. */ +int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, + unsigned int mem_map_size) +{ + struct pci_dev *pci_dev = efx->pci_dev; + int rc; + + efx->mem_bar = UINT_MAX; + + netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar); + + rc = pci_enable_device(pci_dev); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to enable PCI device\n"); + goto fail1; + } + + pci_set_master(pci_dev); + + rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "could not find a suitable DMA mask\n"); + goto fail2; + } + netif_dbg(efx, probe, efx->net_dev, + "using DMA mask %llx\n", (unsigned long long)dma_mask); + + efx->membase_phys = pci_resource_start(efx->pci_dev, bar); + if (!efx->membase_phys) { + netif_err(efx, probe, efx->net_dev, + "ERROR: No BAR%d mapping from the BIOS. " + "Try pci=realloc on the kernel command line\n", bar); + rc = -ENODEV; + goto fail3; + } + + rc = pci_request_region(pci_dev, bar, "sfc"); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "request for memory BAR[%d] failed\n", bar); + rc = -EIO; + goto fail3; + } + efx->mem_bar = bar; + efx->membase = ioremap(efx->membase_phys, mem_map_size); + if (!efx->membase) { + netif_err(efx, probe, efx->net_dev, + "could not map memory BAR[%d] at %llx+%x\n", bar, + (unsigned long long)efx->membase_phys, mem_map_size); + rc = -ENOMEM; + goto fail4; + } + netif_dbg(efx, probe, efx->net_dev, + "memory BAR[%d] at %llx+%x (virtual %p)\n", bar, + (unsigned long long)efx->membase_phys, mem_map_size, + efx->membase); + + return 0; + +fail4: + pci_release_region(efx->pci_dev, bar); +fail3: + efx->membase_phys = 0; +fail2: + pci_disable_device(efx->pci_dev); +fail1: + return rc; +} + +void efx_fini_io(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); + + if (efx->membase) { + iounmap(efx->membase); + efx->membase = NULL; + } + + if (efx->membase_phys) { + pci_release_region(efx->pci_dev, efx->mem_bar); + efx->membase_phys = 0; + efx->mem_bar = UINT_MAX; + } + + /* Don't disable bus-mastering if VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) + pci_disable_device(efx->pci_dev); +} + +#ifdef CONFIG_SFC_MCDI_LOGGING +static ssize_t mcdi_logging_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); +} + +static ssize_t mcdi_logging_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = dev_get_drvdata(dev); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool enable = count > 0 && *buf != '0'; + + mcdi->logging_enabled = enable; + return count; +} + +static DEVICE_ATTR_RW(mcdi_logging); + +void efx_init_mcdi_logging(struct efx_nic *efx) +{ + int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); + + if (rc) { + netif_warn(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + } +} + +void efx_fini_mcdi_logging(struct efx_nic *efx) +{ + device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); +} +#endif + +/* A PCI error affecting this device was detected. + * At this point MMIO and DMA may be disabled. + * Stop the software path and request a slot reset. + */ +static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + struct efx_nic *efx = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_RECOVERY; + efx->reset_pending = 0; + + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + + status = PCI_ERS_RESULT_NEED_RESET; + } else { + /* If the interface is disabled we don't want to do anything + * with it. + */ + status = PCI_ERS_RESULT_RECOVERED; + } + + rtnl_unlock(); + + pci_disable_device(pdev); + + return status; +} + +/* Fake a successful reset, which will be performed later in efx_io_resume. */ +static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + + if (pci_enable_device(pdev)) { + netif_err(efx, hw, efx->net_dev, + "Cannot re-enable PCI device after reset.\n"); + status = PCI_ERS_RESULT_DISCONNECT; + } + + return status; +} + +/* Perform the actual reset and resume I/O operations. */ +static void efx_io_resume(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + int rc; + + rtnl_lock(); + + if (efx->state == STATE_DISABLED) + goto out; + + rc = efx_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "efx_reset failed after PCI error (%d)\n", rc); + } else { + efx->state = STATE_READY; + netif_dbg(efx, hw, efx->net_dev, + "Done resetting and resuming IO after PCI error.\n"); + } + +out: + rtnl_unlock(); +} + +/* For simplicity and reliability, we always require a slot reset and try to + * reset the hardware when a pci error affecting the device is detected. + * We leave both the link_reset and mmio_enabled callback unimplemented: + * with our request for slot reset the mmio_enabled callback will never be + * called, and the link_reset callback is not used by AER or EEH mechanisms. + */ +const struct pci_error_handlers efx_err_handlers = { + .error_detected = efx_io_error_detected, + .slot_reset = efx_io_slot_reset, + .resume = efx_io_resume, +}; + +/* Determine whether the NIC will be able to handle TX offloads for a given + * encapsulated packet. + */ +static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb) +{ + struct gre_base_hdr *greh; + __be16 dst_port; + u8 ipproto; + + /* Does the NIC support encap offloads? + * If not, we should never get here, because we shouldn't have + * advertised encap offload feature flags in the first place. + */ + if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port)) + return false; + + /* Determine encapsulation protocol in use */ + switch (skb->protocol) { + case htons(ETH_P_IP): + ipproto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + /* If there are extension headers, this will cause us to + * think we can't offload something that we maybe could have. + */ + ipproto = ipv6_hdr(skb)->nexthdr; + break; + default: + /* Not IP, so can't offload it */ + return false; + } + switch (ipproto) { + case IPPROTO_GRE: + /* We support NVGRE but not IP over GRE or random gretaps. + * Specifically, the NIC will accept GRE as encapsulated if + * the inner protocol is Ethernet, but only handle it + * correctly if the GRE header is 8 bytes long. Moreover, + * it will not update the Checksum or Sequence Number fields + * if they are present. (The Routing Present flag, + * GRE_ROUTING, cannot be set else the header would be more + * than 8 bytes long; so we don't have to worry about it.) + */ + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER) + return false; + if (ntohs(skb->inner_protocol) != ETH_P_TEB) + return false; + if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8) + return false; + greh = (struct gre_base_hdr *)skb_transport_header(skb); + return !(greh->flags & (GRE_CSUM | GRE_SEQ)); + case IPPROTO_UDP: + /* If the port is registered for a UDP tunnel, we assume the + * packet is for that tunnel, and the NIC will handle it as + * such. If not, the NIC won't know what to do with it. + */ + dst_port = udp_hdr(skb)->dest; + return efx->type->udp_tnl_has_port(efx, dst_port); + default: + return false; + } +} + +netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + struct efx_nic *efx = netdev_priv(dev); + + if (skb->encapsulation) { + if (features & NETIF_F_GSO_MASK) + /* Hardware can only do TSO with at most 208 bytes + * of headers. + */ + if (skb_inner_transport_offset(skb) > + EFX_TSO2_MAX_HDRLEN) + features &= ~(NETIF_F_GSO_MASK); + if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK)) + if (!efx_can_encap_offloads(efx, skb)) + features &= ~(NETIF_F_GSO_MASK | + NETIF_F_CSUM_MASK); + } + return features; +} + +int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_phys_port_id) + return efx->type->get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} + +int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (snprintf(name, len, "p%u", efx->port_num) >= len) + return -EINVAL; + return 0; +} diff --git a/drivers/net/ethernet/sfc/siena/efx_common.h b/drivers/net/ethernet/sfc/siena/efx_common.h new file mode 100644 index 000000000000..65513fd0cf6c --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/efx_common.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_COMMON_H +#define EFX_COMMON_H + +int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, + unsigned int mem_map_size); +void efx_fini_io(struct efx_nic *efx); +int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, + struct net_device *net_dev); +void efx_fini_struct(struct efx_nic *efx); + +#define EFX_MAX_DMAQ_SIZE 4096UL +#define EFX_DEFAULT_DMAQ_SIZE 1024UL +#define EFX_MIN_DMAQ_SIZE 512UL + +#define EFX_MAX_EVQ_SIZE 16384UL +#define EFX_MIN_EVQ_SIZE 512UL + +void efx_link_clear_advertising(struct efx_nic *efx); +void efx_link_set_wanted_fc(struct efx_nic *efx, u8); + +void efx_start_all(struct efx_nic *efx); +void efx_stop_all(struct efx_nic *efx); + +void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats); + +int efx_create_reset_workqueue(void); +void efx_queue_reset_work(struct efx_nic *efx); +void efx_flush_reset_workqueue(struct efx_nic *efx); +void efx_destroy_reset_workqueue(void); + +void efx_start_monitor(struct efx_nic *efx); + +int __efx_reconfigure_port(struct efx_nic *efx); +int efx_reconfigure_port(struct efx_nic *efx); + +#define EFX_ASSERT_RESET_SERIALISED(efx) \ + do { \ + if ((efx->state == STATE_READY) || \ + (efx->state == STATE_RECOVERY) || \ + (efx->state == STATE_DISABLED)) \ + ASSERT_RTNL(); \ + } while (0) + +int efx_try_recovery(struct efx_nic *efx); +void efx_reset_down(struct efx_nic *efx, enum reset_type method); +void efx_watchdog(struct net_device *net_dev, unsigned int txqueue); +int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); +int efx_reset(struct efx_nic *efx, enum reset_type method); +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); + +/* Dummy PHY ops for PHY drivers */ +int efx_port_dummy_op_int(struct efx_nic *efx); +void efx_port_dummy_op_void(struct efx_nic *efx); + +static inline int efx_check_disabled(struct efx_nic *efx) +{ + if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { + netif_err(efx, drv, efx->net_dev, + "device is disabled due to earlier errors\n"); + return -EIO; + } + return 0; +} + +static inline void efx_schedule_channel(struct efx_channel *channel) +{ + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d scheduling NAPI poll on CPU%d\n", + channel->channel, raw_smp_processor_id()); + + napi_schedule(&channel->napi_str); +} + +static inline void efx_schedule_channel_irq(struct efx_channel *channel) +{ + channel->event_test_cpu = raw_smp_processor_id(); + efx_schedule_channel(channel); +} + +#ifdef CONFIG_SFC_MCDI_LOGGING +void efx_init_mcdi_logging(struct efx_nic *efx); +void efx_fini_mcdi_logging(struct efx_nic *efx); +#else +static inline void efx_init_mcdi_logging(struct efx_nic *efx) {} +static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {} +#endif + +void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only); +int efx_set_mac_address(struct net_device *net_dev, void *data); +void efx_set_rx_mode(struct net_device *net_dev); +int efx_set_features(struct net_device *net_dev, netdev_features_t data); +void efx_link_status_changed(struct efx_nic *efx); +unsigned int efx_xdp_max_mtu(struct efx_nic *efx); +int efx_change_mtu(struct net_device *net_dev, int new_mtu); + +extern const struct pci_error_handlers efx_err_handlers; + +netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features); + +int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid); + +int efx_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len); +#endif diff --git a/drivers/net/ethernet/sfc/siena/enum.h b/drivers/net/ethernet/sfc/siena/enum.h new file mode 100644 index 000000000000..cd590e0685e5 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/enum.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2007-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_ENUM_H +#define EFX_ENUM_H + +/** + * enum efx_loopback_mode - loopback modes + * @LOOPBACK_NONE: no loopback + * @LOOPBACK_DATA: data path loopback + * @LOOPBACK_GMAC: loopback within GMAC + * @LOOPBACK_XGMII: loopback after XMAC + * @LOOPBACK_XGXS: loopback within BPX after XGXS + * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes + * @LOOPBACK_GMII: loopback within BPX after GMAC + * @LOOPBACK_SGMII: loopback within BPX within SGMII + * @LOOPBACK_XGBR: loopback within BPX within XGBR + * @LOOPBACK_XFI: loopback within BPX before XFI serdes + * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes + * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII + * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII + * @LOOPBACK_XFI_FAR: loopback after XFI serdes + * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level + * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level + * @LOOPBACK_PCS: loopback within 10G PHY at PCS level + * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level + * @LOOPBACK_XPORT: cross port loopback + * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC + * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes + * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes + * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes + * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC + * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes + * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes + * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level + */ +/* Please keep up-to-date w.r.t the following two #defines */ +enum efx_loopback_mode { + LOOPBACK_NONE = 0, + LOOPBACK_DATA = 1, + LOOPBACK_GMAC = 2, + LOOPBACK_XGMII = 3, + LOOPBACK_XGXS = 4, + LOOPBACK_XAUI = 5, + LOOPBACK_GMII = 6, + LOOPBACK_SGMII = 7, + LOOPBACK_XGBR = 8, + LOOPBACK_XFI = 9, + LOOPBACK_XAUI_FAR = 10, + LOOPBACK_GMII_FAR = 11, + LOOPBACK_SGMII_FAR = 12, + LOOPBACK_XFI_FAR = 13, + LOOPBACK_GPHY = 14, + LOOPBACK_PHYXS = 15, + LOOPBACK_PCS = 16, + LOOPBACK_PMAPMD = 17, + LOOPBACK_XPORT = 18, + LOOPBACK_XGMII_WS = 19, + LOOPBACK_XAUI_WS = 20, + LOOPBACK_XAUI_WS_FAR = 21, + LOOPBACK_XAUI_WS_NEAR = 22, + LOOPBACK_GMII_WS = 23, + LOOPBACK_XFI_WS = 24, + LOOPBACK_XFI_WS_FAR = 25, + LOOPBACK_PHYXS_WS = 26, + LOOPBACK_MAX +}; +#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD + +/* These loopbacks occur within the controller */ +#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \ + (1 << LOOPBACK_GMAC) | \ + (1 << LOOPBACK_XGMII)| \ + (1 << LOOPBACK_XGXS) | \ + (1 << LOOPBACK_XAUI) | \ + (1 << LOOPBACK_GMII) | \ + (1 << LOOPBACK_SGMII) | \ + (1 << LOOPBACK_XGBR) | \ + (1 << LOOPBACK_XFI) | \ + (1 << LOOPBACK_XAUI_FAR) | \ + (1 << LOOPBACK_GMII_FAR) | \ + (1 << LOOPBACK_SGMII_FAR) | \ + (1 << LOOPBACK_XFI_FAR) | \ + (1 << LOOPBACK_XGMII_WS) | \ + (1 << LOOPBACK_XAUI_WS) | \ + (1 << LOOPBACK_XAUI_WS_FAR) | \ + (1 << LOOPBACK_XAUI_WS_NEAR) | \ + (1 << LOOPBACK_GMII_WS) | \ + (1 << LOOPBACK_XFI_WS) | \ + (1 << LOOPBACK_XFI_WS_FAR)) + +#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \ + (1 << LOOPBACK_XAUI_WS) | \ + (1 << LOOPBACK_XAUI_WS_FAR) | \ + (1 << LOOPBACK_XAUI_WS_NEAR) | \ + (1 << LOOPBACK_GMII_WS) | \ + (1 << LOOPBACK_XFI_WS) | \ + (1 << LOOPBACK_XFI_WS_FAR) | \ + (1 << LOOPBACK_PHYXS_WS)) + +#define LOOPBACKS_EXTERNAL(_efx) \ + ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \ + ~(1 << LOOPBACK_NONE)) + +#define LOOPBACK_MASK(_efx) \ + (1 << (_efx)->loopback_mode) + +#define LOOPBACK_INTERNAL(_efx) \ + (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx))) + +#define LOOPBACK_EXTERNAL(_efx) \ + (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx))) + +#define LOOPBACK_CHANGED(_from, _to, _mask) \ + (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask))) + +#define LOOPBACK_OUT_OF(_from, _to, _mask) \ + ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask))) + +/*****************************************************************************/ + +/** + * enum reset_type - reset types + * + * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and + * %RESET_TYPE_DISABLE specify the method/scope of the reset. The + * other valuesspecify reasons, which efx_schedule_reset() will choose + * a method for. + * + * Reset methods are numbered in order of increasing scope. + * + * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) + * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL + * if unsuccessful. + * @RESET_TYPE_ALL: Reset datapath, MAC and PHY + * @RESET_TYPE_WORLD: Reset as much as possible + * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if + * unsuccessful. + * @RESET_TYPE_DATAPATH: Reset datapath only. + * @RESET_TYPE_MC_BIST: MC entering BIST mode. + * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled + * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog + * @RESET_TYPE_INT_ERROR: reset due to internal error + * @RESET_TYPE_DMA_ERROR: DMA error + * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors + * @RESET_TYPE_MC_FAILURE: MC reboot/assertion + * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout. + */ +enum reset_type { + RESET_TYPE_INVISIBLE, + RESET_TYPE_RECOVER_OR_ALL, + RESET_TYPE_ALL, + RESET_TYPE_WORLD, + RESET_TYPE_RECOVER_OR_DISABLE, + RESET_TYPE_DATAPATH, + RESET_TYPE_MC_BIST, + RESET_TYPE_DISABLE, + RESET_TYPE_MAX_METHOD, + RESET_TYPE_TX_WATCHDOG, + RESET_TYPE_INT_ERROR, + RESET_TYPE_DMA_ERROR, + RESET_TYPE_TX_SKIP, + RESET_TYPE_MC_FAILURE, + /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but + * it doesn't fit the scope hierarchy (not well-ordered by inclusion). + * We encode this by having its enum value be greater than + * RESET_TYPE_MAX_METHOD. + */ + RESET_TYPE_MCDI_TIMEOUT, + RESET_TYPE_MAX, +}; + +#endif /* EFX_ENUM_H */ diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c new file mode 100644 index 000000000000..48506373721a --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include "net_driver.h" +#include "workarounds.h" +#include "selftest.h" +#include "efx.h" +#include "efx_channels.h" +#include "rx_common.h" +#include "tx_common.h" +#include "ethtool_common.h" +#include "filter.h" +#include "nic.h" + +#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB + +/************************************************************************** + * + * Ethtool operations + * + ************************************************************************** + */ + +/* Identify device by flashing LEDs */ +static int efx_ethtool_phys_id(struct net_device *net_dev, + enum ethtool_phys_id_state state) +{ + struct efx_nic *efx = netdev_priv(net_dev); + enum efx_led_mode mode = EFX_LED_DEFAULT; + + switch (state) { + case ETHTOOL_ID_ON: + mode = EFX_LED_ON; + break; + case ETHTOOL_ID_OFF: + mode = EFX_LED_OFF; + break; + case ETHTOOL_ID_INACTIVE: + mode = EFX_LED_DEFAULT; + break; + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + } + + return efx_mcdi_set_id_led(efx, mode); +} + +static int efx_ethtool_get_regs_len(struct net_device *net_dev) +{ + return efx_nic_get_regs_len(netdev_priv(net_dev)); +} + +static void efx_ethtool_get_regs(struct net_device *net_dev, + struct ethtool_regs *regs, void *buf) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + regs->version = efx->type->revision; + efx_nic_get_regs(efx, buf); +} + +/* + * Each channel has a single IRQ and moderation timer, started by any + * completion (or other event). Unless the module parameter + * separate_tx_channels is set, IRQs and moderation are therefore + * shared between RX and TX completions. In this case, when RX IRQ + * moderation is explicitly changed then TX IRQ moderation is + * automatically changed too, but otherwise we fail if the two values + * are requested to be different. + * + * The hardware does not support a limit on the number of completions + * before an IRQ, so we do not use the max_frames fields. We should + * report and require that max_frames == (usecs != 0), but this would + * invalidate existing user documentation. + * + * The hardware does not have distinct settings for interrupt + * moderation while the previous IRQ is being handled, so we should + * not use the 'irq' fields. However, an earlier developer + * misunderstood the meaning of the 'irq' fields and the driver did + * not support the standard fields. To avoid invalidating existing + * user documentation, we report and accept changes through either the + * standard or 'irq' fields. If both are changed at the same time, we + * prefer the standard field. + * + * We implement adaptive IRQ moderation, but use a different algorithm + * from that assumed in the definition of struct ethtool_coalesce. + * Therefore we do not use any of the adaptive moderation parameters + * in it. + */ + +static int efx_ethtool_get_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = netdev_priv(net_dev); + unsigned int tx_usecs, rx_usecs; + bool rx_adaptive; + + efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); + + coalesce->tx_coalesce_usecs = tx_usecs; + coalesce->tx_coalesce_usecs_irq = tx_usecs; + coalesce->rx_coalesce_usecs = rx_usecs; + coalesce->rx_coalesce_usecs_irq = rx_usecs; + coalesce->use_adaptive_rx_coalesce = rx_adaptive; + + return 0; +} + +static int efx_ethtool_set_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + unsigned int tx_usecs, rx_usecs; + bool adaptive, rx_may_override_tx; + int rc; + + efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); + + if (coalesce->rx_coalesce_usecs != rx_usecs) + rx_usecs = coalesce->rx_coalesce_usecs; + else + rx_usecs = coalesce->rx_coalesce_usecs_irq; + + adaptive = coalesce->use_adaptive_rx_coalesce; + + /* If channels are shared, TX IRQ moderation can be quietly + * overridden unless it is changed from its old value. + */ + rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs && + coalesce->tx_coalesce_usecs_irq == tx_usecs); + if (coalesce->tx_coalesce_usecs != tx_usecs) + tx_usecs = coalesce->tx_coalesce_usecs; + else + tx_usecs = coalesce->tx_coalesce_usecs_irq; + + rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, + rx_may_override_tx); + if (rc != 0) + return rc; + + efx_for_each_channel(channel, efx) + efx->type->push_irq_moderation(channel); + + return 0; +} + +static void +efx_ethtool_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; + ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx); + ring->rx_pending = efx->rxq_entries; + ring->tx_pending = efx->txq_entries; +} + +static int +efx_ethtool_set_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending || + ring->rx_pending > EFX_MAX_DMAQ_SIZE || + ring->tx_pending > EFX_TXQ_MAX_ENT(efx)) + return -EINVAL; + + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { + netif_err(efx, drv, efx->net_dev, + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); + return -EINVAL; + } + + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); +} + +static void efx_ethtool_get_wol(struct net_device *net_dev, + struct ethtool_wolinfo *wol) +{ + struct efx_nic *efx = netdev_priv(net_dev); + return efx->type->get_wol(efx, wol); +} + + +static int efx_ethtool_set_wol(struct net_device *net_dev, + struct ethtool_wolinfo *wol) +{ + struct efx_nic *efx = netdev_priv(net_dev); + return efx->type->set_wol(efx, wol->wolopts); +} + +static void efx_ethtool_get_fec_stats(struct net_device *net_dev, + struct ethtool_fec_stats *fec_stats) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_fec_stats) + efx->type->get_fec_stats(efx, fec_stats); +} + +static int efx_ethtool_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* Software capabilities */ + ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE); + ts_info->phc_index = -1; + + efx_ptp_get_ts_info(efx, ts_info); + return 0; +} + +const struct ethtool_ops efx_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USECS_IRQ | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_drvinfo = efx_ethtool_get_drvinfo, + .get_regs_len = efx_ethtool_get_regs_len, + .get_regs = efx_ethtool_get_regs, + .get_msglevel = efx_ethtool_get_msglevel, + .set_msglevel = efx_ethtool_set_msglevel, + .get_link = ethtool_op_get_link, + .get_coalesce = efx_ethtool_get_coalesce, + .set_coalesce = efx_ethtool_set_coalesce, + .get_ringparam = efx_ethtool_get_ringparam, + .set_ringparam = efx_ethtool_set_ringparam, + .get_pauseparam = efx_ethtool_get_pauseparam, + .set_pauseparam = efx_ethtool_set_pauseparam, + .get_sset_count = efx_ethtool_get_sset_count, + .self_test = efx_ethtool_self_test, + .get_strings = efx_ethtool_get_strings, + .set_phys_id = efx_ethtool_phys_id, + .get_ethtool_stats = efx_ethtool_get_stats, + .get_wol = efx_ethtool_get_wol, + .set_wol = efx_ethtool_set_wol, + .reset = efx_ethtool_reset, + .get_rxnfc = efx_ethtool_get_rxnfc, + .set_rxnfc = efx_ethtool_set_rxnfc, + .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, + .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, + .get_rxfh = efx_ethtool_get_rxfh, + .set_rxfh = efx_ethtool_set_rxfh, + .get_rxfh_context = efx_ethtool_get_rxfh_context, + .set_rxfh_context = efx_ethtool_set_rxfh_context, + .get_ts_info = efx_ethtool_get_ts_info, + .get_module_info = efx_ethtool_get_module_info, + .get_module_eeprom = efx_ethtool_get_module_eeprom, + .get_link_ksettings = efx_ethtool_get_link_ksettings, + .set_link_ksettings = efx_ethtool_set_link_ksettings, + .get_fec_stats = efx_ethtool_get_fec_stats, + .get_fecparam = efx_ethtool_get_fecparam, + .set_fecparam = efx_ethtool_set_fecparam, +}; diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c new file mode 100644 index 000000000000..bd552c7dffcb --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c @@ -0,0 +1,1338 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include +#include "net_driver.h" +#include "mcdi.h" +#include "nic.h" +#include "selftest.h" +#include "rx_common.h" +#include "ethtool_common.h" +#include "mcdi_port_common.h" + +struct efx_sw_stat_desc { + const char *name; + enum { + EFX_ETHTOOL_STAT_SOURCE_nic, + EFX_ETHTOOL_STAT_SOURCE_channel, + EFX_ETHTOOL_STAT_SOURCE_tx_queue + } source; + unsigned int offset; + u64 (*get_stat)(void *field); /* Reader function */ +}; + +/* Initialiser for a struct efx_sw_stat_desc with type-checking */ +#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ + get_stat_function) { \ + .name = #stat_name, \ + .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ + .offset = ((((field_type *) 0) == \ + &((struct efx_##source_name *)0)->field) ? \ + offsetof(struct efx_##source_name, field) : \ + offsetof(struct efx_##source_name, field)), \ + .get_stat = get_stat_function, \ +} + +static u64 efx_get_uint_stat(void *field) +{ + return *(unsigned int *)field; +} + +static u64 efx_get_atomic_stat(void *field) +{ + return atomic_read((atomic_t *) field); +} + +#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ + EFX_ETHTOOL_STAT(field, nic, field, \ + atomic_t, efx_get_atomic_stat) + +#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ + EFX_ETHTOOL_STAT(field, channel, n_##field, \ + unsigned int, efx_get_uint_stat) +#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \ + EFX_ETHTOOL_STAT(field, channel, field, \ + unsigned int, efx_get_uint_stat) + +#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ + EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ + unsigned int, efx_get_uint_stat) + +static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { + EFX_ETHTOOL_UINT_TXQ_STAT(merge_events), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks), + EFX_ETHTOOL_UINT_TXQ_STAT(pushes), + EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets), + EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets), + EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect), +#ifdef CONFIG_RFS_ACCEL + EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed), +#endif +}; + +#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) + +void efx_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + efx_mcdi_print_fwver(efx, info->fw_version, + sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); +} + +u32 efx_ethtool_get_msglevel(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->msg_enable; +} + +void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + efx->msg_enable = msg_enable; +} + +void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_self_tests *efx_tests; + bool already_up; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + + if (efx->state != STATE_READY) { + rc = -EBUSY; + goto out; + } + + netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + + /* We need rx buffers and interrupts. */ + already_up = (efx->net_dev->flags & IFF_UP); + if (!already_up) { + rc = dev_open(efx->net_dev, NULL); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed opening device.\n"); + goto out; + } + } + + rc = efx_selftest(efx, efx_tests, test->flags); + + if (!already_up) + dev_close(efx->net_dev); + + netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", + rc == 0 ? "passed" : "failed", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + +out: + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + kfree(efx_tests); +fail: + if (rc) + test->flags |= ETH_TEST_FL_FAILED; +} + +void efx_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); + pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); + pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); +} + +int efx_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u8 wanted_fc, old_fc; + u32 old_adv; + int rc = 0; + + mutex_lock(&efx->mac_lock); + + wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | + (pause->tx_pause ? EFX_FC_TX : 0) | + (pause->autoneg ? EFX_FC_AUTO : 0)); + + if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { + netif_dbg(efx, drv, efx->net_dev, + "Flow control unsupported: tx ON rx OFF\n"); + rc = -EINVAL; + goto out; + } + + if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) { + netif_dbg(efx, drv, efx->net_dev, + "Autonegotiation is disabled\n"); + rc = -EINVAL; + goto out; + } + + /* Hook for Falcon bug 11482 workaround */ + if (efx->type->prepare_enable_fc_tx && + (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) + efx->type->prepare_enable_fc_tx(efx); + + old_adv = efx->link_advertising[0]; + old_fc = efx->wanted_fc; + efx_link_set_wanted_fc(efx, wanted_fc); + if (efx->link_advertising[0] != old_adv || + (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { + rc = efx_mcdi_port_reconfigure(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Unable to advertise requested flow " + "control setting\n"); + goto out; + } + } + + /* Reconfigure the MAC. The PHY *may* generate a link state change event + * if the user just changed the advertised capabilities, but there's no + * harm doing this twice */ + efx_mac_reconfigure(efx, false); + +out: + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/** + * efx_fill_test - fill in an individual self-test entry + * @test_index: Index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * @test: Pointer to test result (used only if data != %NULL) + * @unit_format: Unit name format (e.g. "chan\%d") + * @unit_id: Unit id (e.g. 0 for "chan0") + * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") + * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") + * + * Fill in an individual self-test entry. + */ +static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, + int *test, const char *unit_format, int unit_id, + const char *test_format, const char *test_id) +{ + char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN]; + + /* Fill data value, if applicable */ + if (data) + data[test_index] = *test; + + /* Fill string, if applicable */ + if (strings) { + if (strchr(unit_format, '%')) + snprintf(unit_str, sizeof(unit_str), + unit_format, unit_id); + else + strcpy(unit_str, unit_format); + snprintf(test_str, sizeof(test_str), test_format, test_id); + snprintf(strings + test_index * ETH_GSTRING_LEN, + ETH_GSTRING_LEN, + "%-6s %-24s", unit_str, test_str); + } +} + +#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel +#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label +#define EFX_LOOPBACK_NAME(_mode, _counter) \ + "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) + +/** + * efx_fill_loopback_test - fill in a block of loopback self-test entries + * @efx: Efx NIC + * @lb_tests: Efx loopback self-test results structure + * @mode: Loopback test mode + * @test_index: Starting index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * + * Fill in a block of loopback self-test entries. Return new test + * index. + */ +static int efx_fill_loopback_test(struct efx_nic *efx, + struct efx_loopback_self_tests *lb_tests, + enum efx_loopback_mode mode, + unsigned int test_index, + u8 *strings, u64 *data) +{ + struct efx_channel *channel = + efx_get_channel(efx, efx->tx_channel_offset); + struct efx_tx_queue *tx_queue; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_sent[tx_queue->label], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_sent")); + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_done[tx_queue->label], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_done")); + } + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_good, + "rx", 0, + EFX_LOOPBACK_NAME(mode, "rx_good")); + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_bad, + "rx", 0, + EFX_LOOPBACK_NAME(mode, "rx_bad")); + + return test_index; +} + +/** + * efx_ethtool_fill_self_tests - get self-test details + * @efx: Efx NIC + * @tests: Efx self-test results structure, or %NULL + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * + * Get self-test number of strings, strings, and/or test results. + * Return number of strings (== number of test results). + * + * The reason for merging these three functions is to make sure that + * they can never be inconsistent. + */ +int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + u8 *strings, u64 *data) +{ + struct efx_channel *channel; + unsigned int n = 0, i; + enum efx_loopback_mode mode; + + efx_fill_test(n++, strings, data, &tests->phy_alive, + "phy", 0, "alive", NULL); + efx_fill_test(n++, strings, data, &tests->nvram, + "core", 0, "nvram", NULL); + efx_fill_test(n++, strings, data, &tests->interrupt, + "core", 0, "interrupt", NULL); + + /* Event queues */ + efx_for_each_channel(channel, efx) { + efx_fill_test(n++, strings, data, + &tests->eventq_dma[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.dma", NULL); + efx_fill_test(n++, strings, data, + &tests->eventq_int[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.int", NULL); + } + + efx_fill_test(n++, strings, data, &tests->memory, + "core", 0, "memory", NULL); + efx_fill_test(n++, strings, data, &tests->registers, + "core", 0, "registers", NULL); + + for (i = 0; true; ++i) { + const char *name; + + EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); + name = efx_mcdi_phy_test_name(efx, i); + if (name == NULL) + break; + + efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL); + } + + /* Loopback tests */ + for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { + if (!(efx->loopback_modes & (1 << mode))) + continue; + n = efx_fill_loopback_test(efx, + &tests->loopback[mode], mode, n, + strings, data); + } + + return n; +} + +static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) +{ + size_t n_stats = 0; + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_tx_queues(channel)) { + n_stats++; + if (strings != NULL) { + snprintf(strings, ETH_GSTRING_LEN, + "tx-%u.tx_packets", + channel->tx_queue[0].queue / + EFX_MAX_TXQ_PER_CHANNEL); + + strings += ETH_GSTRING_LEN; + } + } + } + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) { + n_stats++; + if (strings != NULL) { + snprintf(strings, ETH_GSTRING_LEN, + "rx-%d.rx_packets", channel->channel); + strings += ETH_GSTRING_LEN; + } + } + } + if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) { + unsigned short xdp; + + for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) { + n_stats++; + if (strings) { + snprintf(strings, ETH_GSTRING_LEN, + "tx-xdp-cpu-%hu.tx_packets", xdp); + strings += ETH_GSTRING_LEN; + } + } + } + + return n_stats; +} + +int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + switch (string_set) { + case ETH_SS_STATS: + return efx->type->describe_stats(efx, NULL) + + EFX_ETHTOOL_SW_STAT_COUNT + + efx_describe_per_queue_stats(efx, NULL) + + efx_ptp_describe_stats(efx, NULL); + case ETH_SS_TEST: + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); + default: + return -EINVAL; + } +} + +void efx_ethtool_get_strings(struct net_device *net_dev, + u32 string_set, u8 *strings) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int i; + + switch (string_set) { + case ETH_SS_STATS: + strings += (efx->type->describe_stats(efx, strings) * + ETH_GSTRING_LEN); + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) + strlcpy(strings + i * ETH_GSTRING_LEN, + efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); + strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; + strings += (efx_describe_per_queue_stats(efx, strings) * + ETH_GSTRING_LEN); + efx_ptp_describe_stats(efx, strings); + break; + case ETH_SS_TEST: + efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); + break; + default: + /* No other string sets */ + break; + } +} + +void efx_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + const struct efx_sw_stat_desc *stat; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int i; + + spin_lock_bh(&efx->stats_lock); + + /* Get NIC statistics */ + data += efx->type->update_stats(efx, data, NULL); + + /* Get software statistics */ + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) { + stat = &efx_sw_stat_desc[i]; + switch (stat->source) { + case EFX_ETHTOOL_STAT_SOURCE_nic: + data[i] = stat->get_stat((void *)efx + stat->offset); + break; + case EFX_ETHTOOL_STAT_SOURCE_channel: + data[i] = 0; + efx_for_each_channel(channel, efx) + data[i] += stat->get_stat((void *)channel + + stat->offset); + break; + case EFX_ETHTOOL_STAT_SOURCE_tx_queue: + data[i] = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + data[i] += + stat->get_stat((void *)tx_queue + + stat->offset); + } + break; + } + } + data += EFX_ETHTOOL_SW_STAT_COUNT; + + spin_unlock_bh(&efx->stats_lock); + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_tx_queues(channel)) { + *data = 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + *data += tx_queue->tx_packets; + } + data++; + } + } + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) { + *data = 0; + efx_for_each_channel_rx_queue(rx_queue, channel) { + *data += rx_queue->rx_packets; + } + data++; + } + } + if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) { + int xdp; + + for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) { + data[0] = efx->xdp_tx_queues[xdp]->tx_packets; + data++; + } + } + + efx_ptp_update_stats(efx, data); +} + +/* This must be called with rtnl_lock held. */ +int efx_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_link_state *link_state = &efx->link_state; + + mutex_lock(&efx->mac_lock); + efx_mcdi_phy_get_link_ksettings(efx, cmd); + mutex_unlock(&efx->mac_lock); + + /* Both MACs support pause frames (bidirectional and respond-only) */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); + + if (LOOPBACK_INTERNAL(efx)) { + cmd->base.speed = link_state->speed; + cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; + } + + return 0; +} + +/* This must be called with rtnl_lock held. */ +int efx_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + /* GMAC does not support 1000Mbps HD */ + if ((cmd->base.speed == SPEED_1000) && + (cmd->base.duplex != DUPLEX_FULL)) { + netif_dbg(efx, drv, efx->net_dev, + "rejecting unsupported 1000Mbps HD setting\n"); + return -EINVAL; + } + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_set_link_ksettings(efx, cmd); + mutex_unlock(&efx->mac_lock); + return rc; +} + +int efx_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_get_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +int efx_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_set_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/* MAC address mask including only I/G bit */ +static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; + +#define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define IP_PROTO_FULL_MASK 0xFF +#define PORT_FULL_MASK ((__force __be16)~0) +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) + +static inline void ip6_fill_mask(__be32 *mask) +{ + mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0; +} + +static int efx_ethtool_get_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule, + u32 *rss_context) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + struct efx_filter_spec spec; + int rc; + + rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, + rule->location, &spec); + if (rc) + return rc; + + if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + rule->ring_cookie = RX_CLS_FLOW_DISC; + else + rule->ring_cookie = spec.dmaq_id; + + if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IP) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V4_FLOW : UDP_V4_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + ip_entry->ip4dst = spec.loc_host[0]; + ip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + ip_entry->ip4src = spec.rem_host[0]; + ip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip_entry->pdst = spec.loc_port; + ip_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip_entry->psrc = spec.rem_port; + ip_mask->psrc = PORT_FULL_MASK; + } + } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IPV6) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V6_FLOW : UDP_V6_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(ip6_entry->ip6dst, spec.loc_host, + sizeof(ip6_entry->ip6dst)); + ip6_fill_mask(ip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(ip6_entry->ip6src, spec.rem_host, + sizeof(ip6_entry->ip6src)); + ip6_fill_mask(ip6_mask->ip6src); + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip6_entry->pdst = spec.loc_port; + ip6_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip6_entry->psrc = spec.rem_port; + ip6_mask->psrc = PORT_FULL_MASK; + } + } else if (!(spec.match_flags & + ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | + EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_OUTER_VID))) { + rule->flow_type = ETHER_FLOW; + if (spec.match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { + ether_addr_copy(mac_entry->h_dest, spec.loc_mac); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) + eth_broadcast_addr(mac_mask->h_dest); + else + ether_addr_copy(mac_mask->h_dest, + mac_addr_ig_mask); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { + ether_addr_copy(mac_entry->h_source, spec.rem_mac); + eth_broadcast_addr(mac_mask->h_source); + } + if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + mac_entry->h_proto = spec.ether_type; + mac_mask->h_proto = ETHER_TYPE_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV4_USER_FLOW; + uip_entry->ip_ver = ETH_RX_NFC_IP4; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip_mask->proto = IP_PROTO_FULL_MASK; + uip_entry->proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + uip_entry->ip4dst = spec.loc_host[0]; + uip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + uip_entry->ip4src = spec.rem_host[0]; + uip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IPV6) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV6_USER_FLOW; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip6_mask->l4_proto = IP_PROTO_FULL_MASK; + uip6_entry->l4_proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(uip6_entry->ip6dst, spec.loc_host, + sizeof(uip6_entry->ip6dst)); + ip6_fill_mask(uip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(uip6_entry->ip6src, spec.rem_host, + sizeof(uip6_entry->ip6src)); + ip6_fill_mask(uip6_mask->ip6src); + } + } else { + /* The above should handle all filters that we insert */ + WARN_ON(1); + return -EINVAL; + } + + if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) { + rule->flow_type |= FLOW_EXT; + rule->h_ext.vlan_tci = spec.outer_vid; + rule->m_ext.vlan_tci = htons(0xfff); + } + + if (spec.flags & EFX_FILTER_FLAG_RX_RSS) { + rule->flow_type |= FLOW_RSS; + *rss_context = spec.rss_context; + } + + return rc; +} + +int efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u32 rss_context = 0; + s32 rc = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = efx->n_rx_channels; + return 0; + + case ETHTOOL_GRXFH: { + struct efx_rss_context *ctx = &efx->rss_context; + __u64 data; + + mutex_lock(&efx->rss_lock); + if (info->flow_type & FLOW_RSS && info->rss_context) { + ctx = efx_find_rss_context_entry(efx, info->rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + + data = 0; + if (!efx_rss_active(ctx)) /* No RSS */ + goto out_setdata_unlock; + + switch (info->flow_type & ~FLOW_RSS) { + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (ctx->rx_hash_udp_4tuple) + data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST); + else + data = RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V4_FLOW: + case TCP_V6_FLOW: + data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST); + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + data = RXH_IP_SRC | RXH_IP_DST; + break; + default: + break; + } +out_setdata_unlock: + info->data = data; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; + } + + case ETHTOOL_GRXCLSRLCNT: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + info->data |= RX_CLS_LOC_SPECIAL; + info->rule_cnt = + efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); + return 0; + + case ETHTOOL_GRXCLSRULE: + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context); + if (rc < 0) + return rc; + if (info->fs.flow_type & FLOW_RSS) + info->rss_context = rss_context; + return 0; + + case ETHTOOL_GRXCLSRLALL: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, + rule_locs, info->rule_cnt); + if (rc < 0) + return rc; + info->rule_cnt = rc; + return 0; + + default: + return -EOPNOTSUPP; + } +} + +static inline bool ip6_mask_is_full(__be32 mask[4]) +{ + return !~(mask[0] & mask[1] & mask[2] & mask[3]); +} + +static inline bool ip6_mask_is_empty(__be32 mask[4]) +{ + return !(mask[0] | mask[1] | mask[2] | mask[3]); +} + +static int efx_ethtool_set_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule, + u32 rss_context) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; + u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS); + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + enum efx_filter_flags flags = 0; + struct efx_filter_spec spec; + int rc; + + /* Check that user wants us to choose the location */ + if (rule->location != RX_CLS_LOC_ANY) + return -EINVAL; + + /* Range-check ring_cookie */ + if (rule->ring_cookie >= efx->n_rx_channels && + rule->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + /* Check for unsupported extensions */ + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_etype || rule->m_ext.data[0] || + rule->m_ext.data[1])) + return -EINVAL; + + if (efx->rx_scatter) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + if (rule->flow_type & FLOW_RSS) + flags |= EFX_FILTER_FLAG_RX_RSS; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags, + (rule->ring_cookie == RX_CLS_FLOW_DISC) ? + EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); + + if (rule->flow_type & FLOW_RSS) + spec.rss_context = rss_context; + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IP); + spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; + if (ip_mask->ip4dst) { + if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = ip_entry->ip4dst; + } + if (ip_mask->ip4src) { + if (ip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = ip_entry->ip4src; + } + if (ip_mask->pdst) { + if (ip_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip_entry->pdst; + } + if (ip_mask->psrc) { + if (ip_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip_entry->psrc; + } + if (ip_mask->tos) + return -EINVAL; + break; + + case TCP_V6_FLOW: + case UDP_V6_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IPV6); + spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; + if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { + if (!ip6_mask_is_full(ip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(ip6_mask->ip6src)) { + if (!ip6_mask_is_full(ip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (ip6_mask->pdst) { + if (ip6_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip6_entry->pdst; + } + if (ip6_mask->psrc) { + if (ip6_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip6_entry->psrc; + } + if (ip6_mask->tclass) + return -EINVAL; + break; + + case IPV4_USER_FLOW: + if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver || + uip_entry->ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IP); + if (uip_mask->ip4dst) { + if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = uip_entry->ip4dst; + } + if (uip_mask->ip4src) { + if (uip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = uip_entry->ip4src; + } + if (uip_mask->proto) { + if (uip_mask->proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip_entry->proto; + } + break; + + case IPV6_USER_FLOW: + if (uip6_mask->l4_4_bytes || uip6_mask->tclass) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IPV6); + if (!ip6_mask_is_empty(uip6_mask->ip6dst)) { + if (!ip6_mask_is_full(uip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(uip6_mask->ip6src)) { + if (!ip6_mask_is_full(uip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (uip6_mask->l4_proto) { + if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip6_entry->l4_proto; + } + break; + + case ETHER_FLOW: + if (!is_zero_ether_addr(mac_mask->h_dest)) { + if (ether_addr_equal(mac_mask->h_dest, + mac_addr_ig_mask)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + else if (is_broadcast_ether_addr(mac_mask->h_dest)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; + else + return -EINVAL; + ether_addr_copy(spec.loc_mac, mac_entry->h_dest); + } + if (!is_zero_ether_addr(mac_mask->h_source)) { + if (!is_broadcast_ether_addr(mac_mask->h_source)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; + ether_addr_copy(spec.rem_mac, mac_entry->h_source); + } + if (mac_mask->h_proto) { + if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = mac_entry->h_proto; + } + break; + + default: + return -EINVAL; + } + + if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) { + if (rule->m_ext.vlan_tci != htons(0xfff)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec.outer_vid = rule->h_ext.vlan_tci; + } + + rc = efx_filter_insert_filter(efx, &spec, true); + if (rc < 0) + return rc; + + rule->location = rc; + return 0; +} + +int efx_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXCLSRLINS: + return efx_ethtool_set_class_rule(efx, &info->fs, + info->rss_context); + + case ETHTOOL_SRXCLSRLDEL: + return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, + info->fs.location); + + default: + return -EOPNOTSUPP; + } +} + +u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->n_rx_channels == 1) + return 0; + return ARRAY_SIZE(efx->rss_context.rx_indir_table); +} + +u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->type->rx_hash_key_size; +} + +int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->rx_pull_rss_config(efx); + if (rc) + return rc; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, efx->rss_context.rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + if (key) + memcpy(key, efx->rss_context.rx_hash_key, + efx->type->rx_hash_key_size); + return 0; +} + +int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + if (!indir && !key) + return 0; + + if (!key) + key = efx->rss_context.rx_hash_key; + if (!indir) + indir = efx->rss_context.rx_indir_table; + + return efx->type->rx_push_rss_config(efx, true, indir, key); +} + +int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_rss_context *ctx; + int rc = 0; + + if (!efx->type->rx_pull_rss_context_config) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + ctx = efx_find_rss_context_entry(efx, rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + rc = efx->type->rx_pull_rss_context_config(efx, ctx); + if (rc) + goto out_unlock; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table)); + if (key) + memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size); +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +int efx_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_rss_context *ctx; + bool allocated = false; + int rc; + + if (!efx->type->rx_push_rss_context_config) + return -EOPNOTSUPP; + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { + if (delete) { + /* alloc + delete == Nothing to do */ + rc = -EINVAL; + goto out_unlock; + } + ctx = efx_alloc_rss_context_entry(efx); + if (!ctx) { + rc = -ENOMEM; + goto out_unlock; + } + ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + /* Initialise indir table and key to defaults */ + efx_set_default_rx_indir_table(efx, ctx); + netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); + allocated = true; + } else { + ctx = efx_find_rss_context_entry(efx, *rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + + if (delete) { + /* delete this context */ + rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); + if (!rc) + efx_free_rss_context_entry(ctx); + goto out_unlock; + } + + if (!key) + key = ctx->rx_hash_key; + if (!indir) + indir = ctx->rx_indir_table; + + rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); + if (rc && allocated) + efx_free_rss_context_entry(ctx); + else + *rss_context = ctx->user_id; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->map_reset_flags(flags); + if (rc < 0) + return rc; + + return efx_reset(efx, rc); +} + +int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + mutex_lock(&efx->mac_lock); + ret = efx_mcdi_phy_get_module_eeprom(efx, ee, data); + mutex_unlock(&efx->mac_lock); + + return ret; +} + +int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + mutex_lock(&efx->mac_lock); + ret = efx_mcdi_phy_get_module_info(efx, modinfo); + mutex_unlock(&efx->mac_lock); + + return ret; +} diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h new file mode 100644 index 000000000000..659491932101 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_ETHTOOL_COMMON_H +#define EFX_ETHTOOL_COMMON_H + +void efx_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info); +u32 efx_ethtool_get_msglevel(struct net_device *net_dev); +void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable); +void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data); +void efx_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); +int efx_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); +int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + u8 *strings, u64 *data); +int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set); +void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, + u8 *strings); +void efx_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats __attribute__ ((unused)), + u64 *data); +int efx_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *out); +int efx_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *settings); +int efx_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); +int efx_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); +int efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); +int efx_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info); +u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev); +u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev); +int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc); +int efx_ethtool_set_rxfh(struct net_device *net_dev, + const u32 *indir, const u8 *key, const u8 hfunc); +int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context); +int efx_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete); +int efx_ethtool_reset(struct net_device *net_dev, u32 *flags); +int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data); +int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo); +#endif diff --git a/drivers/net/ethernet/sfc/siena/farch_regs.h b/drivers/net/ethernet/sfc/siena/farch_regs.h new file mode 100644 index 000000000000..d138be423e63 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/farch_regs.h @@ -0,0 +1,2929 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + */ + +#ifndef EFX_FARCH_REGS_H +#define EFX_FARCH_REGS_H + +/* + * Falcon hardware architecture definitions have a name prefix following + * the format: + * + * F__ + * + * The following strings are used: + * + * MMIO register MC register Host memory structure + * ------------------------------------------------------------- + * Address R MCR + * Bitfield RF MCRF SF + * Enumerator FE MCFE SE + * + * is the first revision to which the definition applies: + * + * A: Falcon A1 (SFC4000AB) + * B: Falcon B0 (SFC4000BA) + * C: Siena A0 (SFL9021AA) + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * Falcon/Siena registers and descriptors + * + ************************************************************************** + */ + +/* ADR_REGION_REG: Address region register */ +#define FR_AZ_ADR_REGION 0x00000000 +#define FRF_AZ_ADR_REGION3_LBN 96 +#define FRF_AZ_ADR_REGION3_WIDTH 18 +#define FRF_AZ_ADR_REGION2_LBN 64 +#define FRF_AZ_ADR_REGION2_WIDTH 18 +#define FRF_AZ_ADR_REGION1_LBN 32 +#define FRF_AZ_ADR_REGION1_WIDTH 18 +#define FRF_AZ_ADR_REGION0_LBN 0 +#define FRF_AZ_ADR_REGION0_WIDTH 18 + +/* INT_EN_REG_KER: Kernel driver Interrupt enable register */ +#define FR_AZ_INT_EN_KER 0x00000010 +#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 +#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 +#define FRF_AZ_KER_INT_CHAR_LBN 4 +#define FRF_AZ_KER_INT_CHAR_WIDTH 1 +#define FRF_AZ_KER_INT_KER_LBN 3 +#define FRF_AZ_KER_INT_KER_WIDTH 1 +#define FRF_AZ_DRV_INT_EN_KER_LBN 0 +#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 + +/* INT_EN_REG_CHAR: Char Driver interrupt enable register */ +#define FR_BZ_INT_EN_CHAR 0x00000020 +#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8 +#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6 +#define FRF_BZ_CHAR_INT_CHAR_LBN 4 +#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1 +#define FRF_BZ_CHAR_INT_KER_LBN 3 +#define FRF_BZ_CHAR_INT_KER_WIDTH 1 +#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0 +#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1 + +/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */ +#define FR_AZ_INT_ADR_KER 0x00000030 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 +#define FRF_AZ_INT_ADR_KER_LBN 0 +#define FRF_AZ_INT_ADR_KER_WIDTH 64 + +/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */ +#define FR_BZ_INT_ADR_CHAR 0x00000040 +#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64 +#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 +#define FRF_BZ_INT_ADR_CHAR_LBN 0 +#define FRF_BZ_INT_ADR_CHAR_WIDTH 64 + +/* INT_ACK_KER: Kernel interrupt acknowledge register */ +#define FR_AA_INT_ACK_KER 0x00000050 +#define FRF_AA_INT_ACK_KER_FIELD_LBN 0 +#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 + +/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ +#define FR_BZ_INT_ISR0 0x00000090 +#define FRF_BZ_INT_ISR_REG_LBN 0 +#define FRF_BZ_INT_ISR_REG_WIDTH 64 + +/* HW_INIT_REG: Hardware initialization register */ +#define FR_AZ_HW_INIT 0x000000c0 +#define FRF_BB_BDMRD_CPLF_FULL_LBN 124 +#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 +#define FRF_CZ_TX_MRG_TAGS_LBN 120 +#define FRF_CZ_TX_MRG_TAGS_WIDTH 1 +#define FRF_AB_TRGT_MASK_ALL_LBN 100 +#define FRF_AB_TRGT_MASK_ALL_WIDTH 1 +#define FRF_AZ_DOORBELL_DROP_LBN 92 +#define FRF_AZ_DOORBELL_DROP_WIDTH 8 +#define FRF_AB_TX_RREQ_MASK_EN_LBN 76 +#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 +#define FRF_AB_PE_EIDLE_DIS_LBN 75 +#define FRF_AB_PE_EIDLE_DIS_WIDTH 1 +#define FRF_AA_FC_BLOCKING_EN_LBN 45 +#define FRF_AA_FC_BLOCKING_EN_WIDTH 1 +#define FRF_BZ_B2B_REQ_EN_LBN 45 +#define FRF_BZ_B2B_REQ_EN_WIDTH 1 +#define FRF_AA_B2B_REQ_EN_LBN 44 +#define FRF_AA_B2B_REQ_EN_WIDTH 1 +#define FRF_BB_FC_BLOCKING_EN_LBN 44 +#define FRF_BB_FC_BLOCKING_EN_WIDTH 1 +#define FRF_AZ_POST_WR_MASK_LBN 40 +#define FRF_AZ_POST_WR_MASK_WIDTH 4 +#define FRF_AZ_TLP_TC_LBN 34 +#define FRF_AZ_TLP_TC_WIDTH 3 +#define FRF_AZ_TLP_ATTR_LBN 32 +#define FRF_AZ_TLP_ATTR_WIDTH 2 +#define FRF_AB_INTB_VEC_LBN 24 +#define FRF_AB_INTB_VEC_WIDTH 5 +#define FRF_AB_INTA_VEC_LBN 16 +#define FRF_AB_INTA_VEC_WIDTH 5 +#define FRF_AZ_WD_TIMER_LBN 8 +#define FRF_AZ_WD_TIMER_WIDTH 8 +#define FRF_AZ_US_DISABLE_LBN 5 +#define FRF_AZ_US_DISABLE_WIDTH 1 +#define FRF_AZ_TLP_EP_LBN 4 +#define FRF_AZ_TLP_EP_WIDTH 1 +#define FRF_AZ_ATTR_SEL_LBN 3 +#define FRF_AZ_ATTR_SEL_WIDTH 1 +#define FRF_AZ_TD_SEL_LBN 1 +#define FRF_AZ_TD_SEL_WIDTH 1 +#define FRF_AZ_TLP_TD_LBN 0 +#define FRF_AZ_TLP_TD_WIDTH 1 + +/* EE_SPI_HCMD_REG: SPI host command register */ +#define FR_AB_EE_SPI_HCMD 0x00000100 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 +#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 +#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 +#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 +#define FRF_AB_EE_SPI_HCMD_READ_LBN 15 +#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 +#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 + +/* USR_EV_CFG: User Level Event Configuration register */ +#define FR_CZ_USR_EV_CFG 0x00000100 +#define FRF_CZ_USREV_DIS_LBN 16 +#define FRF_CZ_USREV_DIS_WIDTH 1 +#define FRF_CZ_DFLT_EVQ_LBN 0 +#define FRF_CZ_DFLT_EVQ_WIDTH 10 + +/* EE_SPI_HADR_REG: SPI host address register */ +#define FR_AB_EE_SPI_HADR 0x00000110 +#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 +#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 +#define FRF_AB_EE_SPI_HADR_ADR_LBN 0 +#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 + +/* EE_SPI_HDATA_REG: SPI host data register */ +#define FR_AB_EE_SPI_HDATA 0x00000120 +#define FRF_AB_EE_SPI_HDATA3_LBN 96 +#define FRF_AB_EE_SPI_HDATA3_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA2_LBN 64 +#define FRF_AB_EE_SPI_HDATA2_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA1_LBN 32 +#define FRF_AB_EE_SPI_HDATA1_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA0_LBN 0 +#define FRF_AB_EE_SPI_HDATA0_WIDTH 32 + +/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */ +#define FR_AB_EE_BASE_PAGE 0x00000130 +#define FRF_AB_EE_EXPROM_MASK_LBN 16 +#define FRF_AB_EE_EXPROM_MASK_WIDTH 13 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 + +/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */ +#define FR_AB_EE_VPD_CFG0 0x00000140 +#define FRF_AB_EE_SF_FASTRD_EN_LBN 127 +#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 +#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 +#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_VPD_WIP_POLL_LBN 119 +#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 +#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 +#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 +#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 +#define FRF_AB_EE_VPDW_LENGTH_LBN 80 +#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPDW_BASE_LBN 64 +#define FRF_AB_EE_VPDW_BASE_WIDTH 15 +#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 +#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 +#define FRF_AB_EE_VPD_BASE_LBN 32 +#define FRF_AB_EE_VPD_BASE_WIDTH 24 +#define FRF_AB_EE_VPD_LENGTH_LBN 16 +#define FRF_AB_EE_VPD_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPD_AD_SIZE_LBN 8 +#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 +#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 +#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 +#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 +#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 +#define FRF_AB_EE_VPD_EN_LBN 0 +#define FRF_AB_EE_VPD_EN_WIDTH 1 + +/* EE_VPD_SW_CNTL_REG: VPD access SW control register */ +#define FR_AB_EE_VPD_SW_CNTL 0x00000150 +#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 +#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 +#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_ADR_LBN 0 +#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 + +/* EE_VPD_SW_DATA_REG: VPD access SW data register */ +#define FR_AB_EE_VPD_SW_DATA 0x00000160 +#define FRF_AB_EE_VPD_CYC_DAT_LBN 0 +#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 + +/* PBMX_DBG_IADDR_REG: Capture Module address register */ +#define FR_CZ_PBMX_DBG_IADDR 0x000001f0 +#define FRF_CZ_PBMX_DBG_IADDR_LBN 0 +#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32 + +/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */ +#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0 +#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 +#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 + +/* PBMX_DBG_IDATA_REG: Capture Module data register */ +#define FR_CZ_PBMX_DBG_IDATA 0x000001f8 +#define FRF_CZ_PBMX_DBG_IDATA_LBN 0 +#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64 + +/* NIC_STAT_REG: NIC status register */ +#define FR_AB_NIC_STAT 0x00000200 +#define FRF_BB_AER_DIS_LBN 34 +#define FRF_BB_AER_DIS_WIDTH 1 +#define FRF_BB_EE_STRAP_EN_LBN 31 +#define FRF_BB_EE_STRAP_EN_WIDTH 1 +#define FRF_BB_EE_STRAP_LBN 24 +#define FRF_BB_EE_STRAP_WIDTH 4 +#define FRF_BB_REVISION_ID_LBN 17 +#define FRF_BB_REVISION_ID_WIDTH 7 +#define FRF_AB_ONCHIP_SRAM_LBN 16 +#define FRF_AB_ONCHIP_SRAM_WIDTH 1 +#define FRF_AB_SF_PRST_LBN 9 +#define FRF_AB_SF_PRST_WIDTH 1 +#define FRF_AB_EE_PRST_LBN 8 +#define FRF_AB_EE_PRST_WIDTH 1 +#define FRF_AB_ATE_MODE_LBN 3 +#define FRF_AB_ATE_MODE_WIDTH 1 +#define FRF_AB_STRAP_PINS_LBN 0 +#define FRF_AB_STRAP_PINS_WIDTH 3 + +/* GPIO_CTL_REG: GPIO control register */ +#define FR_AB_GPIO_CTL 0x00000210 +#define FRF_AB_GPIO_OUT3_LBN 112 +#define FRF_AB_GPIO_OUT3_WIDTH 16 +#define FRF_AB_GPIO_IN3_LBN 104 +#define FRF_AB_GPIO_IN3_WIDTH 8 +#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96 +#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8 +#define FRF_AB_GPIO_OUT2_LBN 80 +#define FRF_AB_GPIO_OUT2_WIDTH 16 +#define FRF_AB_GPIO_IN2_LBN 72 +#define FRF_AB_GPIO_IN2_WIDTH 8 +#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64 +#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8 +#define FRF_AB_GPIO15_OEN_LBN 63 +#define FRF_AB_GPIO15_OEN_WIDTH 1 +#define FRF_AB_GPIO14_OEN_LBN 62 +#define FRF_AB_GPIO14_OEN_WIDTH 1 +#define FRF_AB_GPIO13_OEN_LBN 61 +#define FRF_AB_GPIO13_OEN_WIDTH 1 +#define FRF_AB_GPIO12_OEN_LBN 60 +#define FRF_AB_GPIO12_OEN_WIDTH 1 +#define FRF_AB_GPIO11_OEN_LBN 59 +#define FRF_AB_GPIO11_OEN_WIDTH 1 +#define FRF_AB_GPIO10_OEN_LBN 58 +#define FRF_AB_GPIO10_OEN_WIDTH 1 +#define FRF_AB_GPIO9_OEN_LBN 57 +#define FRF_AB_GPIO9_OEN_WIDTH 1 +#define FRF_AB_GPIO8_OEN_LBN 56 +#define FRF_AB_GPIO8_OEN_WIDTH 1 +#define FRF_AB_GPIO15_OUT_LBN 55 +#define FRF_AB_GPIO15_OUT_WIDTH 1 +#define FRF_AB_GPIO14_OUT_LBN 54 +#define FRF_AB_GPIO14_OUT_WIDTH 1 +#define FRF_AB_GPIO13_OUT_LBN 53 +#define FRF_AB_GPIO13_OUT_WIDTH 1 +#define FRF_AB_GPIO12_OUT_LBN 52 +#define FRF_AB_GPIO12_OUT_WIDTH 1 +#define FRF_AB_GPIO11_OUT_LBN 51 +#define FRF_AB_GPIO11_OUT_WIDTH 1 +#define FRF_AB_GPIO10_OUT_LBN 50 +#define FRF_AB_GPIO10_OUT_WIDTH 1 +#define FRF_AB_GPIO9_OUT_LBN 49 +#define FRF_AB_GPIO9_OUT_WIDTH 1 +#define FRF_AB_GPIO8_OUT_LBN 48 +#define FRF_AB_GPIO8_OUT_WIDTH 1 +#define FRF_AB_GPIO15_IN_LBN 47 +#define FRF_AB_GPIO15_IN_WIDTH 1 +#define FRF_AB_GPIO14_IN_LBN 46 +#define FRF_AB_GPIO14_IN_WIDTH 1 +#define FRF_AB_GPIO13_IN_LBN 45 +#define FRF_AB_GPIO13_IN_WIDTH 1 +#define FRF_AB_GPIO12_IN_LBN 44 +#define FRF_AB_GPIO12_IN_WIDTH 1 +#define FRF_AB_GPIO11_IN_LBN 43 +#define FRF_AB_GPIO11_IN_WIDTH 1 +#define FRF_AB_GPIO10_IN_LBN 42 +#define FRF_AB_GPIO10_IN_WIDTH 1 +#define FRF_AB_GPIO9_IN_LBN 41 +#define FRF_AB_GPIO9_IN_WIDTH 1 +#define FRF_AB_GPIO8_IN_LBN 40 +#define FRF_AB_GPIO8_IN_WIDTH 1 +#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 +#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 +#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 +#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 +#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 +#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 +#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 +#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 +#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_CLK156_OUT_EN_LBN 31 +#define FRF_AB_CLK156_OUT_EN_WIDTH 1 +#define FRF_AB_USE_NIC_CLK_LBN 30 +#define FRF_AB_USE_NIC_CLK_WIDTH 1 +#define FRF_AB_GPIO5_OEN_LBN 29 +#define FRF_AB_GPIO5_OEN_WIDTH 1 +#define FRF_AB_GPIO4_OEN_LBN 28 +#define FRF_AB_GPIO4_OEN_WIDTH 1 +#define FRF_AB_GPIO3_OEN_LBN 27 +#define FRF_AB_GPIO3_OEN_WIDTH 1 +#define FRF_AB_GPIO2_OEN_LBN 26 +#define FRF_AB_GPIO2_OEN_WIDTH 1 +#define FRF_AB_GPIO1_OEN_LBN 25 +#define FRF_AB_GPIO1_OEN_WIDTH 1 +#define FRF_AB_GPIO0_OEN_LBN 24 +#define FRF_AB_GPIO0_OEN_WIDTH 1 +#define FRF_AB_GPIO7_OUT_LBN 23 +#define FRF_AB_GPIO7_OUT_WIDTH 1 +#define FRF_AB_GPIO6_OUT_LBN 22 +#define FRF_AB_GPIO6_OUT_WIDTH 1 +#define FRF_AB_GPIO5_OUT_LBN 21 +#define FRF_AB_GPIO5_OUT_WIDTH 1 +#define FRF_AB_GPIO4_OUT_LBN 20 +#define FRF_AB_GPIO4_OUT_WIDTH 1 +#define FRF_AB_GPIO3_OUT_LBN 19 +#define FRF_AB_GPIO3_OUT_WIDTH 1 +#define FRF_AB_GPIO2_OUT_LBN 18 +#define FRF_AB_GPIO2_OUT_WIDTH 1 +#define FRF_AB_GPIO1_OUT_LBN 17 +#define FRF_AB_GPIO1_OUT_WIDTH 1 +#define FRF_AB_GPIO0_OUT_LBN 16 +#define FRF_AB_GPIO0_OUT_WIDTH 1 +#define FRF_AB_GPIO7_IN_LBN 15 +#define FRF_AB_GPIO7_IN_WIDTH 1 +#define FRF_AB_GPIO6_IN_LBN 14 +#define FRF_AB_GPIO6_IN_WIDTH 1 +#define FRF_AB_GPIO5_IN_LBN 13 +#define FRF_AB_GPIO5_IN_WIDTH 1 +#define FRF_AB_GPIO4_IN_LBN 12 +#define FRF_AB_GPIO4_IN_WIDTH 1 +#define FRF_AB_GPIO3_IN_LBN 11 +#define FRF_AB_GPIO3_IN_WIDTH 1 +#define FRF_AB_GPIO2_IN_LBN 10 +#define FRF_AB_GPIO2_IN_WIDTH 1 +#define FRF_AB_GPIO1_IN_LBN 9 +#define FRF_AB_GPIO1_IN_WIDTH 1 +#define FRF_AB_GPIO0_IN_LBN 8 +#define FRF_AB_GPIO0_IN_WIDTH 1 +#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7 +#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6 +#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 +#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 +#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 +#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 +#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 +#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 + +/* GLB_CTL_REG: Global control register */ +#define FR_AB_GLB_CTL 0x00000220 +#define FRF_AB_EXT_PHY_RST_CTL_LBN 63 +#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 +#define FRF_AB_XAUI_SD_RST_CTL_LBN 62 +#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_SD_RST_CTL_LBN 61 +#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 +#define FRF_AA_PCIX_RST_CTL_LBN 60 +#define FRF_AA_PCIX_RST_CTL_WIDTH 1 +#define FRF_BB_BIU_RST_CTL_LBN 60 +#define FRF_BB_BIU_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 +#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 +#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 +#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 +#define FRF_AB_XGRX_RST_CTL_LBN 56 +#define FRF_AB_XGRX_RST_CTL_WIDTH 1 +#define FRF_AB_XGTX_RST_CTL_LBN 55 +#define FRF_AB_XGTX_RST_CTL_WIDTH 1 +#define FRF_AB_EM_RST_CTL_LBN 54 +#define FRF_AB_EM_RST_CTL_WIDTH 1 +#define FRF_AB_EV_RST_CTL_LBN 53 +#define FRF_AB_EV_RST_CTL_WIDTH 1 +#define FRF_AB_SR_RST_CTL_LBN 52 +#define FRF_AB_SR_RST_CTL_WIDTH 1 +#define FRF_AB_RX_RST_CTL_LBN 51 +#define FRF_AB_RX_RST_CTL_WIDTH 1 +#define FRF_AB_TX_RST_CTL_LBN 50 +#define FRF_AB_TX_RST_CTL_WIDTH 1 +#define FRF_AB_EE_RST_CTL_LBN 49 +#define FRF_AB_EE_RST_CTL_WIDTH 1 +#define FRF_AB_CS_RST_CTL_LBN 48 +#define FRF_AB_CS_RST_CTL_WIDTH 1 +#define FRF_AB_HOT_RST_CTL_LBN 40 +#define FRF_AB_HOT_RST_CTL_WIDTH 2 +#define FRF_AB_RST_EXT_PHY_LBN 31 +#define FRF_AB_RST_EXT_PHY_WIDTH 1 +#define FRF_AB_RST_XAUI_SD_LBN 30 +#define FRF_AB_RST_XAUI_SD_WIDTH 1 +#define FRF_AB_RST_PCIE_SD_LBN 29 +#define FRF_AB_RST_PCIE_SD_WIDTH 1 +#define FRF_AA_RST_PCIX_LBN 28 +#define FRF_AA_RST_PCIX_WIDTH 1 +#define FRF_BB_RST_BIU_LBN 28 +#define FRF_BB_RST_BIU_WIDTH 1 +#define FRF_AB_RST_PCIE_STKY_LBN 27 +#define FRF_AB_RST_PCIE_STKY_WIDTH 1 +#define FRF_AB_RST_PCIE_NSTKY_LBN 26 +#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 +#define FRF_AB_RST_PCIE_CORE_LBN 25 +#define FRF_AB_RST_PCIE_CORE_WIDTH 1 +#define FRF_AB_RST_XGRX_LBN 24 +#define FRF_AB_RST_XGRX_WIDTH 1 +#define FRF_AB_RST_XGTX_LBN 23 +#define FRF_AB_RST_XGTX_WIDTH 1 +#define FRF_AB_RST_EM_LBN 22 +#define FRF_AB_RST_EM_WIDTH 1 +#define FRF_AB_RST_EV_LBN 21 +#define FRF_AB_RST_EV_WIDTH 1 +#define FRF_AB_RST_SR_LBN 20 +#define FRF_AB_RST_SR_WIDTH 1 +#define FRF_AB_RST_RX_LBN 19 +#define FRF_AB_RST_RX_WIDTH 1 +#define FRF_AB_RST_TX_LBN 18 +#define FRF_AB_RST_TX_WIDTH 1 +#define FRF_AB_RST_SF_LBN 17 +#define FRF_AB_RST_SF_WIDTH 1 +#define FRF_AB_RST_CS_LBN 16 +#define FRF_AB_RST_CS_WIDTH 1 +#define FRF_AB_INT_RST_DUR_LBN 4 +#define FRF_AB_INT_RST_DUR_WIDTH 3 +#define FRF_AB_EXT_PHY_RST_DUR_LBN 1 +#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 +#define FFE_AB_EXT_PHY_RST_DUR_10240US 7 +#define FFE_AB_EXT_PHY_RST_DUR_5120US 6 +#define FFE_AB_EXT_PHY_RST_DUR_2560US 5 +#define FFE_AB_EXT_PHY_RST_DUR_1280US 4 +#define FFE_AB_EXT_PHY_RST_DUR_640US 3 +#define FFE_AB_EXT_PHY_RST_DUR_320US 2 +#define FFE_AB_EXT_PHY_RST_DUR_160US 1 +#define FFE_AB_EXT_PHY_RST_DUR_80US 0 +#define FRF_AB_SWRST_LBN 0 +#define FRF_AB_SWRST_WIDTH 1 + +/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ +#define FR_AZ_FATAL_INTR_KER 0x00000230 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 +#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 +#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 +#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 +#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_LBN 11 +#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 +#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_LBN 8 +#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 +#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 +#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 +#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 +#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 +#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 +#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_LBN 1 +#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_LBN 0 +#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 + +/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */ +#define FR_BZ_FATAL_INTR_CHAR 0x00000240 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 +#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43 +#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42 +#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41 +#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40 +#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39 +#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38 +#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35 +#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34 +#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33 +#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32 +#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 +#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11 +#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 +#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 +#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10 +#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1 +#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9 +#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 +#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8 +#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1 +#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7 +#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6 +#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3 +#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2 +#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1 +#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0 +#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1 + +/* DP_CTRL_REG: Datapath control register */ +#define FR_BZ_DP_CTRL 0x00000250 +#define FRF_BZ_FLS_EVQ_ID_LBN 0 +#define FRF_BZ_FLS_EVQ_ID_WIDTH 12 + +/* MEM_STAT_REG: Memory status register */ +#define FR_AZ_MEM_STAT 0x00000260 +#define FRF_AB_MEM_PERR_VEC_LBN 53 +#define FRF_AB_MEM_PERR_VEC_WIDTH 38 +#define FRF_AB_MBIST_CORR_LBN 38 +#define FRF_AB_MBIST_CORR_WIDTH 15 +#define FRF_AB_MBIST_ERR_LBN 0 +#define FRF_AB_MBIST_ERR_WIDTH 40 +#define FRF_CZ_MEM_PERR_VEC_LBN 0 +#define FRF_CZ_MEM_PERR_VEC_WIDTH 35 + +/* CS_DEBUG_REG: Debug register */ +#define FR_AZ_CS_DEBUG 0x00000270 +#define FRF_AB_GLB_DEBUG2_SEL_LBN 50 +#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL2_LBN 47 +#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL1_LBN 44 +#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL0_LBN 41 +#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 +#define FRF_CZ_CS_PORT_NUM_LBN 40 +#define FRF_CZ_CS_PORT_NUM_WIDTH 2 +#define FRF_AB_MISC_DEBUG_ADDR_LBN 36 +#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 +#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 +#define FRF_CZ_CS_PORT_FPE_LBN 1 +#define FRF_CZ_CS_PORT_FPE_WIDTH 35 +#define FRF_AB_EM_DEBUG_ADDR_LBN 26 +#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SR_DEBUG_ADDR_LBN 21 +#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_EV_DEBUG_ADDR_LBN 16 +#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_RX_DEBUG_ADDR_LBN 11 +#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_TX_DEBUG_ADDR_LBN 6 +#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 +#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 +#define FRF_AZ_CS_DEBUG_EN_LBN 0 +#define FRF_AZ_CS_DEBUG_EN_WIDTH 1 + +/* DRIVER_REG: Driver scratch register [0-7] */ +#define FR_AZ_DRIVER 0x00000280 +#define FR_AZ_DRIVER_STEP 16 +#define FR_AZ_DRIVER_ROWS 8 +#define FRF_AZ_DRIVER_DW0_LBN 0 +#define FRF_AZ_DRIVER_DW0_WIDTH 32 + +/* ALTERA_BUILD_REG: Altera build register */ +#define FR_AZ_ALTERA_BUILD 0x00000300 +#define FRF_AZ_ALTERA_BUILD_VER_LBN 0 +#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 + +/* CSR_SPARE_REG: Spare register */ +#define FR_AZ_CSR_SPARE 0x00000310 +#define FRF_AB_MEM_PERR_EN_LBN 64 +#define FRF_AB_MEM_PERR_EN_WIDTH 38 +#define FRF_CZ_MEM_PERR_EN_LBN 64 +#define FRF_CZ_MEM_PERR_EN_WIDTH 35 +#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72 +#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2 +#define FRF_AZ_CSR_SPARE_BITS_LBN 0 +#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 + +/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */ +#define FR_AB_PCIE_SD_CTL0123 0x00000320 +#define FRF_AB_PCIE_TESTSIG_H_LBN 96 +#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 +#define FRF_AB_PCIE_TESTSIG_L_LBN 64 +#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 +#define FRF_AB_PCIE_OFFSET_LBN 56 +#define FRF_AB_PCIE_OFFSET_WIDTH 8 +#define FRF_AB_PCIE_OFFSETEN_H_LBN 55 +#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 +#define FRF_AB_PCIE_OFFSETEN_L_LBN 54 +#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_H_LBN 53 +#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_L_LBN 52 +#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_H_LBN 51 +#define FRF_AB_PCIE_PARRESET_H_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_L_LBN 50 +#define FRF_AB_PCIE_PARRESET_L_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 +#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 +#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 +#define FRF_AB_PCIE_LPBK_LBN 40 +#define FRF_AB_PCIE_LPBK_WIDTH 8 +#define FRF_AB_PCIE_PARLPBK_LBN 32 +#define FRF_AB_PCIE_PARLPBK_WIDTH 8 +#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 +#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 +#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 +#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 +#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_RXEQCTL_H_LBN 18 +#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 +#define FRF_AB_PCIE_RXEQCTL_L_LBN 16 +#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 +#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 +#define FFE_AB_PCIE_RXEQCTL_OFF 2 +#define FFE_AB_PCIE_RXEQCTL_MIN 1 +#define FFE_AB_PCIE_RXEQCTL_MAX 0 +#define FRF_AB_PCIE_HIDRV_LBN 8 +#define FRF_AB_PCIE_HIDRV_WIDTH 8 +#define FRF_AB_PCIE_LODRV_LBN 0 +#define FRF_AB_PCIE_LODRV_WIDTH 8 + +/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */ +#define FR_AB_PCIE_SD_CTL45 0x00000330 +#define FRF_AB_PCIE_DTX7_LBN 60 +#define FRF_AB_PCIE_DTX7_WIDTH 4 +#define FRF_AB_PCIE_DTX6_LBN 56 +#define FRF_AB_PCIE_DTX6_WIDTH 4 +#define FRF_AB_PCIE_DTX5_LBN 52 +#define FRF_AB_PCIE_DTX5_WIDTH 4 +#define FRF_AB_PCIE_DTX4_LBN 48 +#define FRF_AB_PCIE_DTX4_WIDTH 4 +#define FRF_AB_PCIE_DTX3_LBN 44 +#define FRF_AB_PCIE_DTX3_WIDTH 4 +#define FRF_AB_PCIE_DTX2_LBN 40 +#define FRF_AB_PCIE_DTX2_WIDTH 4 +#define FRF_AB_PCIE_DTX1_LBN 36 +#define FRF_AB_PCIE_DTX1_WIDTH 4 +#define FRF_AB_PCIE_DTX0_LBN 32 +#define FRF_AB_PCIE_DTX0_WIDTH 4 +#define FRF_AB_PCIE_DEQ7_LBN 28 +#define FRF_AB_PCIE_DEQ7_WIDTH 4 +#define FRF_AB_PCIE_DEQ6_LBN 24 +#define FRF_AB_PCIE_DEQ6_WIDTH 4 +#define FRF_AB_PCIE_DEQ5_LBN 20 +#define FRF_AB_PCIE_DEQ5_WIDTH 4 +#define FRF_AB_PCIE_DEQ4_LBN 16 +#define FRF_AB_PCIE_DEQ4_WIDTH 4 +#define FRF_AB_PCIE_DEQ3_LBN 12 +#define FRF_AB_PCIE_DEQ3_WIDTH 4 +#define FRF_AB_PCIE_DEQ2_LBN 8 +#define FRF_AB_PCIE_DEQ2_WIDTH 4 +#define FRF_AB_PCIE_DEQ1_LBN 4 +#define FRF_AB_PCIE_DEQ1_WIDTH 4 +#define FRF_AB_PCIE_DEQ0_LBN 0 +#define FRF_AB_PCIE_DEQ0_WIDTH 4 + +/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */ +#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 +#define FRF_AB_PCIE_PRBSERR_LBN 40 +#define FRF_AB_PCIE_PRBSERR_WIDTH 8 +#define FRF_AB_PCIE_PRBSERRH0_LBN 32 +#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 +#define FRF_AB_PCIE_FASTINIT_H_LBN 15 +#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 +#define FRF_AB_PCIE_FASTINIT_L_LBN 14 +#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 +#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 +#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 +#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 +#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 +#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 +#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSEL_LBN 0 +#define FRF_AB_PCIE_PRBSSEL_WIDTH 8 + +/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */ +#define FR_BB_DEBUG_DATA_OUT 0x00000350 +#define FRF_BB_DEBUG2_PORT_LBN 25 +#define FRF_BB_DEBUG2_PORT_WIDTH 15 +#define FRF_BB_DEBUG1_PORT_LBN 0 +#define FRF_BB_DEBUG1_PORT_WIDTH 25 + +/* EVQ_RPTR_REGP0: Event queue read pointer register */ +#define FR_BZ_EVQ_RPTR_P0 0x00000400 +#define FR_BZ_EVQ_RPTR_P0_STEP 8192 +#define FR_BZ_EVQ_RPTR_P0_ROWS 1024 +/* EVQ_RPTR_REG_KER: Event queue read pointer register */ +#define FR_AA_EVQ_RPTR_KER 0x00011b00 +#define FR_AA_EVQ_RPTR_KER_STEP 4 +#define FR_AA_EVQ_RPTR_KER_ROWS 4 +/* EVQ_RPTR_REG: Event queue read pointer register */ +#define FR_BZ_EVQ_RPTR 0x00fa0000 +#define FR_BZ_EVQ_RPTR_STEP 16 +#define FR_BB_EVQ_RPTR_ROWS 4096 +#define FR_CZ_EVQ_RPTR_ROWS 1024 +/* EVQ_RPTR_REGP123: Event queue read pointer register */ +#define FR_BB_EVQ_RPTR_P123 0x01000400 +#define FR_BB_EVQ_RPTR_P123_STEP 8192 +#define FR_BB_EVQ_RPTR_P123_ROWS 3072 +#define FRF_AZ_EVQ_RPTR_VLD_LBN 15 +#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 +#define FRF_AZ_EVQ_RPTR_LBN 0 +#define FRF_AZ_EVQ_RPTR_WIDTH 15 + +/* TIMER_COMMAND_REGP0: Timer Command Registers */ +#define FR_BZ_TIMER_COMMAND_P0 0x00000420 +#define FR_BZ_TIMER_COMMAND_P0_STEP 8192 +#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024 +/* TIMER_COMMAND_REG_KER: Timer Command Registers */ +#define FR_AA_TIMER_COMMAND_KER 0x00000420 +#define FR_AA_TIMER_COMMAND_KER_STEP 8192 +#define FR_AA_TIMER_COMMAND_KER_ROWS 4 +/* TIMER_COMMAND_REGP123: Timer Command Registers */ +#define FR_BB_TIMER_COMMAND_P123 0x01000420 +#define FR_BB_TIMER_COMMAND_P123_STEP 8192 +#define FR_BB_TIMER_COMMAND_P123_ROWS 3072 +#define FRF_CZ_TC_TIMER_MODE_LBN 14 +#define FRF_CZ_TC_TIMER_MODE_WIDTH 2 +#define FRF_AB_TC_TIMER_MODE_LBN 12 +#define FRF_AB_TC_TIMER_MODE_WIDTH 2 +#define FRF_CZ_TC_TIMER_VAL_LBN 0 +#define FRF_CZ_TC_TIMER_VAL_WIDTH 14 +#define FRF_AB_TC_TIMER_VAL_LBN 0 +#define FRF_AB_TC_TIMER_VAL_WIDTH 12 + +/* DRV_EV_REG: Driver generated event register */ +#define FR_AZ_DRV_EV 0x00000440 +#define FRF_AZ_DRV_EV_QID_LBN 64 +#define FRF_AZ_DRV_EV_QID_WIDTH 12 +#define FRF_AZ_DRV_EV_DATA_LBN 0 +#define FRF_AZ_DRV_EV_DATA_WIDTH 64 + +/* EVQ_CTL_REG: Event queue control register */ +#define FR_AZ_EVQ_CTL 0x00000450 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 +#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 +#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 +#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 +#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 + +/* EVQ_CNT1_REG: Event counter 1 register */ +#define FR_AZ_EVQ_CNT1 0x00000460 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 +#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 +#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 +#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 +#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 + +/* EVQ_CNT2_REG: Event counter 2 register */ +#define FR_AZ_EVQ_CNT2 0x00000470 +#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 +#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 +#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RDY_CNT_LBN 80 +#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 +#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 + +/* USR_EV_REG: Event mailbox register */ +#define FR_CZ_USR_EV 0x00000540 +#define FR_CZ_USR_EV_STEP 8192 +#define FR_CZ_USR_EV_ROWS 1024 +#define FRF_CZ_USR_EV_DATA_LBN 0 +#define FRF_CZ_USR_EV_DATA_WIDTH 32 + +/* BUF_TBL_CFG_REG: Buffer table configuration register */ +#define FR_AZ_BUF_TBL_CFG 0x00000600 +#define FRF_AZ_BUF_TBL_MODE_LBN 3 +#define FRF_AZ_BUF_TBL_MODE_WIDTH 1 + +/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */ +#define FR_AZ_SRM_RX_DC_CFG 0x00000610 +#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 +#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 + +/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */ +#define FR_AZ_SRM_TX_DC_CFG 0x00000620 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 + +/* SRM_CFG_REG: SRAM configuration register */ +#define FR_AZ_SRM_CFG 0x00000630 +#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 +#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 +#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 +#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 +#define FRF_AZ_SRM_INIT_EN_LBN 3 +#define FRF_AZ_SRM_INIT_EN_WIDTH 1 +#define FRF_AZ_SRM_NUM_BANK_LBN 2 +#define FRF_AZ_SRM_NUM_BANK_WIDTH 1 +#define FRF_AZ_SRM_BANK_SIZE_LBN 0 +#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 + +/* BUF_TBL_UPD_REG: Buffer table update register */ +#define FR_AZ_BUF_TBL_UPD 0x00000650 +#define FRF_AZ_BUF_UPD_CMD_LBN 63 +#define FRF_AZ_BUF_UPD_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_CMD_LBN 62 +#define FRF_AZ_BUF_CLR_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_END_ID_LBN 32 +#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 +#define FRF_AZ_BUF_CLR_START_ID_LBN 0 +#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 + +/* SRM_UPD_EVQ_REG: Buffer table update register */ +#define FR_AZ_SRM_UPD_EVQ 0x00000660 +#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 +#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 + +/* SRAM_PARITY_REG: SRAM parity register. */ +#define FR_AZ_SRAM_PARITY 0x00000670 +#define FRF_CZ_BYPASS_ECC_LBN 3 +#define FRF_CZ_BYPASS_ECC_WIDTH 1 +#define FRF_CZ_SEC_INT_LBN 2 +#define FRF_CZ_SEC_INT_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 +#define FRF_AB_FORCE_SRAM_PERR_LBN 0 +#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 + +/* RX_CFG_REG: Receive configuration register */ +#define FR_AZ_RX_CFG 0x00000800 +#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72 +#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14 +#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 +#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 +#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 +#define FRF_BZ_RX_TCP_SUP_LBN 48 +#define FRF_BZ_RX_TCP_SUP_WIDTH 1 +#define FRF_BZ_RX_INGR_EN_LBN 47 +#define FRF_BZ_RX_INGR_EN_WIDTH 1 +#define FRF_BZ_RX_IP_HASH_LBN 46 +#define FRF_BZ_RX_IP_HASH_WIDTH 1 +#define FRF_BZ_RX_HASH_ALG_LBN 45 +#define FRF_BZ_RX_HASH_ALG_WIDTH 1 +#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 +#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 +#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 +#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 +#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 +#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_OWNERR_CTL_LBN 38 +#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 +#define FRF_BZ_RX_XON_TX_TH_LBN 33 +#define FRF_BZ_RX_XON_TX_TH_WIDTH 5 +#define FRF_AA_RX_DESC_PUSH_EN_LBN 35 +#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_AA_RX_RDW_PATCH_EN_LBN 34 +#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 +#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_XOFF_TX_TH_LBN 28 +#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_OWNERR_CTL_LBN 30 +#define FRF_AA_RX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_RX_XON_TX_TH_LBN 25 +#define FRF_AA_RX_XON_TX_TH_WIDTH 5 +#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 +#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_AA_RX_XOFF_TX_TH_LBN 20 +#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_USR_BUF_SIZE_LBN 11 +#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_BZ_RX_XON_MAC_TH_LBN 10 +#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XON_MAC_TH_LBN 6 +#define FRF_AA_RX_XON_MAC_TH_WIDTH 5 +#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 +#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XOFF_MAC_TH_LBN 1 +#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 +#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 +#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 + +/* RX_FILTER_CTL_REG: Receive filter control registers */ +#define FR_BZ_RX_FILTER_CTL 0x00000810 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 +#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32 +#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_NUM_KER_LBN 24 +#define FRF_BZ_NUM_KER_WIDTH 2 +#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16 +#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8 +#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0 +#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 + +/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */ +#define FR_AZ_RX_FLUSH_DESCQ 0x00000820 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 + +/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ +#define FR_BZ_RX_DESC_UPD_P0 0x00000830 +#define FR_BZ_RX_DESC_UPD_P0_STEP 8192 +#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024 +/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */ +#define FR_AA_RX_DESC_UPD_KER 0x00000830 +#define FR_AA_RX_DESC_UPD_KER_STEP 8192 +#define FR_AA_RX_DESC_UPD_KER_ROWS 4 +/* RX_DESC_UPD_REGP123: Receive descriptor update register. */ +#define FR_BB_RX_DESC_UPD_P123 0x01000830 +#define FR_BB_RX_DESC_UPD_P123_STEP 8192 +#define FR_BB_RX_DESC_UPD_P123_ROWS 3072 +#define FRF_AZ_RX_DESC_WPTR_LBN 96 +#define FRF_AZ_RX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_RX_DESC_LBN 0 +#define FRF_AZ_RX_DESC_WIDTH 64 + +/* RX_DC_CFG_REG: Receive descriptor cache configuration register */ +#define FR_AZ_RX_DC_CFG 0x00000840 +#define FRF_AB_RX_MAX_PF_LBN 2 +#define FRF_AB_RX_MAX_PF_WIDTH 2 +#define FRF_AZ_RX_DC_SIZE_LBN 0 +#define FRF_AZ_RX_DC_SIZE_WIDTH 2 +#define FFE_AZ_RX_DC_SIZE_64 3 +#define FFE_AZ_RX_DC_SIZE_32 2 +#define FFE_AZ_RX_DC_SIZE_16 1 +#define FFE_AZ_RX_DC_SIZE_8 0 + +/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */ +#define FR_AZ_RX_DC_PF_WM 0x00000850 +#define FRF_AZ_RX_DC_PF_HWM_LBN 6 +#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 +#define FRF_AZ_RX_DC_PF_LWM_LBN 0 +#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 + +/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */ +#define FR_BZ_RX_RSS_TKEY 0x00000860 +#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64 +#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64 +#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0 +#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64 + +/* RX_NODESC_DROP_REG: Receive dropped packet counter register */ +#define FR_AZ_RX_NODESC_DROP 0x00000880 +#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32 +#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16 + +/* RX_SELF_RST_REG: Receive self reset register */ +#define FR_AA_RX_SELF_RST 0x00000890 +#define FRF_AA_RX_ISCSI_DIS_LBN 17 +#define FRF_AA_RX_ISCSI_DIS_WIDTH 1 +#define FRF_AA_RX_SW_RST_REG_LBN 16 +#define FRF_AA_RX_SW_RST_REG_WIDTH 1 +#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9 +#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1 +#define FRF_AA_RX_SELF_RST_EN_LBN 8 +#define FRF_AA_RX_SELF_RST_EN_WIDTH 1 +#define FRF_AA_RX_MAX_PF_LAT_LBN 4 +#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4 +#define FRF_AA_RX_MAX_LU_LAT_LBN 0 +#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4 + +/* RX_DEBUG_REG: undocumented register */ +#define FR_AZ_RX_DEBUG 0x000008a0 +#define FRF_AZ_RX_DEBUG_LBN 0 +#define FRF_AZ_RX_DEBUG_WIDTH 64 + +/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */ +#define FR_AZ_RX_PUSH_DROP 0x000008b0 +#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 + +/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */ +#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 + +/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */ +#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 + +/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */ +#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 + +/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */ +#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 + +/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ +#define FR_BZ_TX_DESC_UPD_P0 0x00000a10 +#define FR_BZ_TX_DESC_UPD_P0_STEP 8192 +#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024 +/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */ +#define FR_AA_TX_DESC_UPD_KER 0x00000a10 +#define FR_AA_TX_DESC_UPD_KER_STEP 8192 +#define FR_AA_TX_DESC_UPD_KER_ROWS 8 +/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */ +#define FR_BB_TX_DESC_UPD_P123 0x01000a10 +#define FR_BB_TX_DESC_UPD_P123_STEP 8192 +#define FR_BB_TX_DESC_UPD_P123_ROWS 3072 +#define FRF_AZ_TX_DESC_WPTR_LBN 96 +#define FRF_AZ_TX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_TX_DESC_LBN 0 +#define FRF_AZ_TX_DESC_WIDTH 95 + +/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */ +#define FR_AZ_TX_DC_CFG 0x00000a20 +#define FRF_AZ_TX_DC_SIZE_LBN 0 +#define FRF_AZ_TX_DC_SIZE_WIDTH 2 +#define FFE_AZ_TX_DC_SIZE_32 2 +#define FFE_AZ_TX_DC_SIZE_16 1 +#define FFE_AZ_TX_DC_SIZE_8 0 + +/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */ +#define FR_AA_TX_CHKSM_CFG 0x00000a30 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 + +/* TX_CFG_REG: Transmit configuration register */ +#define FR_AZ_TX_CFG 0x00000a50 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 +#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 +#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 +#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 +#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 +#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 +#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 +#define FRF_AZ_TX_P1_PRI_EN_LBN 4 +#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 +#define FRF_AZ_TX_OWNERR_CTL_LBN 2 +#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 +#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 + +/* TX_PUSH_DROP_REG: Transmit push dropped register */ +#define FR_AZ_TX_PUSH_DROP 0x00000a60 +#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 + +/* TX_RESERVED_REG: Transmit configuration register */ +#define FR_AZ_TX_RESERVED 0x00000a80 +#define FRF_AZ_TX_EVT_CNT_LBN 121 +#define FRF_AZ_TX_EVT_CNT_WIDTH 7 +#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 +#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 +#define FRF_AZ_TX_RD_COMP_TMR_LBN 96 +#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 +#define FRF_AZ_TX_PUSH_EN_LBN 89 +#define FRF_AZ_TX_PUSH_EN_WIDTH 1 +#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 +#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 +#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 +#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 +#define FRF_AZ_TX_DMAR_ST_P0_LBN 81 +#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 +#define FRF_AZ_TX_DMAQ_ST_LBN 78 +#define FRF_AZ_TX_DMAQ_ST_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_LBN 64 +#define FRF_AZ_TX_RX_SPACER_WIDTH 8 +#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 +#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 +#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 +#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 +#define FRF_AZ_TX_PS_EVT_DIS_LBN 58 +#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_EN_LBN 57 +#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 +#define FRF_AZ_TX_XP_TIMER_LBN 52 +#define FRF_AZ_TX_XP_TIMER_WIDTH 5 +#define FRF_AZ_TX_PREF_SPACER_LBN 44 +#define FRF_AZ_TX_PREF_SPACER_WIDTH 8 +#define FRF_AZ_TX_PREF_WD_TMR_LBN 22 +#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 +#define FRF_AZ_TX_ONLY1TAG_LBN 21 +#define FRF_AZ_TX_ONLY1TAG_WIDTH 1 +#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 +#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 +#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 +#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 +#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 +#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 +#define FRF_AA_TX_DMA_FF_THR_LBN 16 +#define FRF_AA_TX_DMA_FF_THR_WIDTH 1 +#define FRF_AZ_TX_DMA_SPACER_LBN 8 +#define FRF_AZ_TX_DMA_SPACER_WIDTH 8 +#define FRF_AA_TX_TCP_DIS_LBN 7 +#define FRF_AA_TX_TCP_DIS_WIDTH 1 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 +#define FRF_AA_TX_IP_DIS_LBN 6 +#define FRF_AA_TX_IP_DIS_WIDTH 1 +#define FRF_AZ_TX_MAX_CPL_LBN 2 +#define FRF_AZ_TX_MAX_CPL_WIDTH 2 +#define FFE_AZ_TX_MAX_CPL_16 3 +#define FFE_AZ_TX_MAX_CPL_8 2 +#define FFE_AZ_TX_MAX_CPL_4 1 +#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 +#define FRF_AZ_TX_MAX_PREF_LBN 0 +#define FRF_AZ_TX_MAX_PREF_WIDTH 2 +#define FFE_AZ_TX_MAX_PREF_32 3 +#define FFE_AZ_TX_MAX_PREF_16 2 +#define FFE_AZ_TX_MAX_PREF_8 1 +#define FFE_AZ_TX_MAX_PREF_OFF 0 + +/* TX_PACE_REG: Transmit pace control register */ +#define FR_BZ_TX_PACE 0x00000a90 +#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19 +#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10 +#define FRF_BZ_TX_PACE_SB_AF_LBN 9 +#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10 +#define FRF_BZ_TX_PACE_FB_BASE_LBN 5 +#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4 +#define FRF_BZ_TX_PACE_BIN_TH_LBN 0 +#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5 + +/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */ +#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0 +#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0 +#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16 + +/* TX_VLAN_REG: Transmit VLAN tag register */ +#define FR_BB_TX_VLAN 0x00000ae0 +#define FRF_BB_TX_VLAN_EN_LBN 127 +#define FRF_BB_TX_VLAN_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125 +#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124 +#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_LBN 112 +#define FRF_BB_TX_VLAN7_WIDTH 12 +#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109 +#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108 +#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN6_LBN 96 +#define FRF_BB_TX_VLAN6_WIDTH 12 +#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93 +#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92 +#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN5_LBN 80 +#define FRF_BB_TX_VLAN5_WIDTH 12 +#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77 +#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76 +#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN4_LBN 64 +#define FRF_BB_TX_VLAN4_WIDTH 12 +#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61 +#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60 +#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN3_LBN 48 +#define FRF_BB_TX_VLAN3_WIDTH 12 +#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45 +#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44 +#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN2_LBN 32 +#define FRF_BB_TX_VLAN2_WIDTH 12 +#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29 +#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28 +#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN1_LBN 16 +#define FRF_BB_TX_VLAN1_WIDTH 12 +#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13 +#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12 +#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN0_LBN 0 +#define FRF_BB_TX_VLAN0_WIDTH 12 + +/* TX_IPFIL_PORTEN_REG: Transmit filter control register */ +#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0 +#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64 +#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62 +#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60 +#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58 +#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56 +#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54 +#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52 +#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50 +#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48 +#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46 +#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44 +#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42 +#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40 +#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38 +#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36 +#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34 +#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32 +#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30 +#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28 +#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26 +#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24 +#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22 +#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20 +#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18 +#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16 +#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14 +#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12 +#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10 +#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8 +#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6 +#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4 +#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2 +#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0 +#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1 + +/* TX_IPFIL_TBL: Transmit IP source address filter table */ +#define FR_BB_TX_IPFIL_TBL 0x00000b00 +#define FR_BB_TX_IPFIL_TBL_STEP 16 +#define FR_BB_TX_IPFIL_TBL_ROWS 16 +#define FRF_BB_TX_IPFIL_MASK_1_LBN 96 +#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32 +#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64 +#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32 +#define FRF_BB_TX_IPFIL_MASK_0_LBN 32 +#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32 +#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0 +#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32 + +/* MD_TXD_REG: PHY management transmit data register */ +#define FR_AB_MD_TXD 0x00000c00 +#define FRF_AB_MD_TXD_LBN 0 +#define FRF_AB_MD_TXD_WIDTH 16 + +/* MD_RXD_REG: PHY management receive data register */ +#define FR_AB_MD_RXD 0x00000c10 +#define FRF_AB_MD_RXD_LBN 0 +#define FRF_AB_MD_RXD_WIDTH 16 + +/* MD_CS_REG: PHY management configuration & status register */ +#define FR_AB_MD_CS 0x00000c20 +#define FRF_AB_MD_RD_EN_CMD_LBN 15 +#define FRF_AB_MD_RD_EN_CMD_WIDTH 1 +#define FRF_AB_MD_WR_EN_CMD_LBN 14 +#define FRF_AB_MD_WR_EN_CMD_WIDTH 1 +#define FRF_AB_MD_ADDR_CMD_LBN 13 +#define FRF_AB_MD_ADDR_CMD_WIDTH 1 +#define FRF_AB_MD_PT_LBN 7 +#define FRF_AB_MD_PT_WIDTH 3 +#define FRF_AB_MD_PL_LBN 6 +#define FRF_AB_MD_PL_WIDTH 1 +#define FRF_AB_MD_INT_CLR_LBN 5 +#define FRF_AB_MD_INT_CLR_WIDTH 1 +#define FRF_AB_MD_GC_LBN 4 +#define FRF_AB_MD_GC_WIDTH 1 +#define FRF_AB_MD_PRSP_LBN 3 +#define FRF_AB_MD_PRSP_WIDTH 1 +#define FRF_AB_MD_RIC_LBN 2 +#define FRF_AB_MD_RIC_WIDTH 1 +#define FRF_AB_MD_RDC_LBN 1 +#define FRF_AB_MD_RDC_WIDTH 1 +#define FRF_AB_MD_WRC_LBN 0 +#define FRF_AB_MD_WRC_WIDTH 1 + +/* MD_PHY_ADR_REG: PHY management PHY address register */ +#define FR_AB_MD_PHY_ADR 0x00000c30 +#define FRF_AB_MD_PHY_ADR_LBN 0 +#define FRF_AB_MD_PHY_ADR_WIDTH 16 + +/* MD_ID_REG: PHY management ID register */ +#define FR_AB_MD_ID 0x00000c40 +#define FRF_AB_MD_PRT_ADR_LBN 11 +#define FRF_AB_MD_PRT_ADR_WIDTH 5 +#define FRF_AB_MD_DEV_ADR_LBN 6 +#define FRF_AB_MD_DEV_ADR_WIDTH 5 + +/* MD_STAT_REG: PHY management status & mask register */ +#define FR_AB_MD_STAT 0x00000c50 +#define FRF_AB_MD_PINT_LBN 4 +#define FRF_AB_MD_PINT_WIDTH 1 +#define FRF_AB_MD_DONE_LBN 3 +#define FRF_AB_MD_DONE_WIDTH 1 +#define FRF_AB_MD_BSERR_LBN 2 +#define FRF_AB_MD_BSERR_WIDTH 1 +#define FRF_AB_MD_LNFL_LBN 1 +#define FRF_AB_MD_LNFL_WIDTH 1 +#define FRF_AB_MD_BSY_LBN 0 +#define FRF_AB_MD_BSY_WIDTH 1 + +/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */ +#define FR_AB_MAC_STAT_DMA 0x00000c60 +#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 +#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 +#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 +#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 + +/* MAC_CTRL_REG: Port MAC control register */ +#define FR_AB_MAC_CTRL 0x00000c80 +#define FRF_AB_MAC_XOFF_VAL_LBN 16 +#define FRF_AB_MAC_XOFF_VAL_WIDTH 16 +#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 +#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 +#define FRF_AB_MAC_XG_DISTXCRC_LBN 5 +#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 +#define FRF_AB_MAC_BCAD_ACPT_LBN 4 +#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 +#define FRF_AB_MAC_UC_PROM_LBN 3 +#define FRF_AB_MAC_UC_PROM_WIDTH 1 +#define FRF_AB_MAC_LINK_STATUS_LBN 2 +#define FRF_AB_MAC_LINK_STATUS_WIDTH 1 +#define FRF_AB_MAC_SPEED_LBN 0 +#define FRF_AB_MAC_SPEED_WIDTH 2 +#define FFE_AB_MAC_SPEED_10G 3 +#define FFE_AB_MAC_SPEED_1G 2 +#define FFE_AB_MAC_SPEED_100M 1 +#define FFE_AB_MAC_SPEED_10M 0 + +/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */ +#define FR_BB_GEN_MODE 0x00000c90 +#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 +#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 +#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XFP_PHY_INT_MASK_LBN 1 +#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 +#define FRF_BB_XG_PHY_INT_MASK_LBN 0 +#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 + +/* MAC_MC_HASH_REG0: Multicast address hash table */ +#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0 +#define FRF_AB_MAC_MCAST_HASH0_LBN 0 +#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 + +/* MAC_MC_HASH_REG1: Multicast address hash table */ +#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0 +#define FRF_AB_MAC_MCAST_HASH1_LBN 0 +#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 + +/* GM_CFG1_REG: GMAC configuration register 1 */ +#define FR_AB_GM_CFG1 0x00000e00 +#define FRF_AB_GM_SW_RST_LBN 31 +#define FRF_AB_GM_SW_RST_WIDTH 1 +#define FRF_AB_GM_SIM_RST_LBN 30 +#define FRF_AB_GM_SIM_RST_WIDTH 1 +#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 +#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 +#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_RX_FUNC_LBN 17 +#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 +#define FRF_AB_GM_RST_TX_FUNC_LBN 16 +#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 +#define FRF_AB_GM_LOOP_LBN 8 +#define FRF_AB_GM_LOOP_WIDTH 1 +#define FRF_AB_GM_RX_FC_EN_LBN 5 +#define FRF_AB_GM_RX_FC_EN_WIDTH 1 +#define FRF_AB_GM_TX_FC_EN_LBN 4 +#define FRF_AB_GM_TX_FC_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_RXEN_LBN 3 +#define FRF_AB_GM_SYNC_RXEN_WIDTH 1 +#define FRF_AB_GM_RX_EN_LBN 2 +#define FRF_AB_GM_RX_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_TXEN_LBN 1 +#define FRF_AB_GM_SYNC_TXEN_WIDTH 1 +#define FRF_AB_GM_TX_EN_LBN 0 +#define FRF_AB_GM_TX_EN_WIDTH 1 + +/* GM_CFG2_REG: GMAC configuration register 2 */ +#define FR_AB_GM_CFG2 0x00000e10 +#define FRF_AB_GM_PAMBL_LEN_LBN 12 +#define FRF_AB_GM_PAMBL_LEN_WIDTH 4 +#define FRF_AB_GM_IF_MODE_LBN 8 +#define FRF_AB_GM_IF_MODE_WIDTH 2 +#define FFE_AB_IF_MODE_BYTE_MODE 2 +#define FFE_AB_IF_MODE_NIBBLE_MODE 1 +#define FRF_AB_GM_HUGE_FRM_EN_LBN 5 +#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 +#define FRF_AB_GM_LEN_CHK_LBN 4 +#define FRF_AB_GM_LEN_CHK_WIDTH 1 +#define FRF_AB_GM_PAD_CRC_EN_LBN 2 +#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 +#define FRF_AB_GM_CRC_EN_LBN 1 +#define FRF_AB_GM_CRC_EN_WIDTH 1 +#define FRF_AB_GM_FD_LBN 0 +#define FRF_AB_GM_FD_WIDTH 1 + +/* GM_IPG_REG: GMAC IPG register */ +#define FR_AB_GM_IPG 0x00000e20 +#define FRF_AB_GM_NONB2B_IPG1_LBN 24 +#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 +#define FRF_AB_GM_NONB2B_IPG2_LBN 16 +#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 +#define FRF_AB_GM_MIN_IPG_ENF_LBN 8 +#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 +#define FRF_AB_GM_B2B_IPG_LBN 0 +#define FRF_AB_GM_B2B_IPG_WIDTH 7 + +/* GM_HD_REG: GMAC half duplex register */ +#define FR_AB_GM_HD 0x00000e30 +#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 +#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 +#define FRF_AB_GM_ALT_BOFF_EN_LBN 19 +#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 +#define FRF_AB_GM_BP_NO_BOFF_LBN 18 +#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 +#define FRF_AB_GM_DIS_BOFF_LBN 17 +#define FRF_AB_GM_DIS_BOFF_WIDTH 1 +#define FRF_AB_GM_EXDEF_TX_EN_LBN 16 +#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 +#define FRF_AB_GM_RTRY_LIMIT_LBN 12 +#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 +#define FRF_AB_GM_COL_WIN_LBN 0 +#define FRF_AB_GM_COL_WIN_WIDTH 10 + +/* GM_MAX_FLEN_REG: GMAC maximum frame length register */ +#define FR_AB_GM_MAX_FLEN 0x00000e40 +#define FRF_AB_GM_MAX_FLEN_LBN 0 +#define FRF_AB_GM_MAX_FLEN_WIDTH 16 + +/* GM_TEST_REG: GMAC test register */ +#define FR_AB_GM_TEST 0x00000e70 +#define FRF_AB_GM_MAX_BOFF_LBN 3 +#define FRF_AB_GM_MAX_BOFF_WIDTH 1 +#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 +#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 +#define FRF_AB_GM_TEST_PAUSE_LBN 1 +#define FRF_AB_GM_TEST_PAUSE_WIDTH 1 +#define FRF_AB_GM_SHORT_SLOT_LBN 0 +#define FRF_AB_GM_SHORT_SLOT_WIDTH 1 + +/* GM_ADR1_REG: GMAC station address register 1 */ +#define FR_AB_GM_ADR1 0x00000f00 +#define FRF_AB_GM_ADR_B0_LBN 24 +#define FRF_AB_GM_ADR_B0_WIDTH 8 +#define FRF_AB_GM_ADR_B1_LBN 16 +#define FRF_AB_GM_ADR_B1_WIDTH 8 +#define FRF_AB_GM_ADR_B2_LBN 8 +#define FRF_AB_GM_ADR_B2_WIDTH 8 +#define FRF_AB_GM_ADR_B3_LBN 0 +#define FRF_AB_GM_ADR_B3_WIDTH 8 + +/* GM_ADR2_REG: GMAC station address register 2 */ +#define FR_AB_GM_ADR2 0x00000f10 +#define FRF_AB_GM_ADR_B4_LBN 24 +#define FRF_AB_GM_ADR_B4_WIDTH 8 +#define FRF_AB_GM_ADR_B5_LBN 16 +#define FRF_AB_GM_ADR_B5_WIDTH 8 + +/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */ +#define FR_AB_GMF_CFG0 0x00000f20 +#define FRF_AB_GMF_FTFENRPLY_LBN 20 +#define FRF_AB_GMF_FTFENRPLY_WIDTH 1 +#define FRF_AB_GMF_STFENRPLY_LBN 19 +#define FRF_AB_GMF_STFENRPLY_WIDTH 1 +#define FRF_AB_GMF_FRFENRPLY_LBN 18 +#define FRF_AB_GMF_FRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_SRFENRPLY_LBN 17 +#define FRF_AB_GMF_SRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_WTMENRPLY_LBN 16 +#define FRF_AB_GMF_WTMENRPLY_WIDTH 1 +#define FRF_AB_GMF_FTFENREQ_LBN 12 +#define FRF_AB_GMF_FTFENREQ_WIDTH 1 +#define FRF_AB_GMF_STFENREQ_LBN 11 +#define FRF_AB_GMF_STFENREQ_WIDTH 1 +#define FRF_AB_GMF_FRFENREQ_LBN 10 +#define FRF_AB_GMF_FRFENREQ_WIDTH 1 +#define FRF_AB_GMF_SRFENREQ_LBN 9 +#define FRF_AB_GMF_SRFENREQ_WIDTH 1 +#define FRF_AB_GMF_WTMENREQ_LBN 8 +#define FRF_AB_GMF_WTMENREQ_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFT_LBN 4 +#define FRF_AB_GMF_HSTRSTFT_WIDTH 1 +#define FRF_AB_GMF_HSTRSTST_LBN 3 +#define FRF_AB_GMF_HSTRSTST_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFR_LBN 2 +#define FRF_AB_GMF_HSTRSTFR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTSR_LBN 1 +#define FRF_AB_GMF_HSTRSTSR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTWT_LBN 0 +#define FRF_AB_GMF_HSTRSTWT_WIDTH 1 + +/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */ +#define FR_AB_GMF_CFG1 0x00000f30 +#define FRF_AB_GMF_CFGFRTH_LBN 16 +#define FRF_AB_GMF_CFGFRTH_WIDTH 5 +#define FRF_AB_GMF_CFGXOFFRTX_LBN 0 +#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 + +/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */ +#define FR_AB_GMF_CFG2 0x00000f40 +#define FRF_AB_GMF_CFGHWM_LBN 16 +#define FRF_AB_GMF_CFGHWM_WIDTH 6 +#define FRF_AB_GMF_CFGLWM_LBN 0 +#define FRF_AB_GMF_CFGLWM_WIDTH 6 + +/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */ +#define FR_AB_GMF_CFG3 0x00000f50 +#define FRF_AB_GMF_CFGHWMFT_LBN 16 +#define FRF_AB_GMF_CFGHWMFT_WIDTH 6 +#define FRF_AB_GMF_CFGFTTH_LBN 0 +#define FRF_AB_GMF_CFGFTTH_WIDTH 6 + +/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ +#define FR_AB_GMF_CFG4 0x00000f60 +#define FRF_AB_GMF_HSTFLTRFRM_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 + +/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ +#define FR_AB_GMF_CFG5 0x00000f70 +#define FRF_AB_GMF_CFGHDPLX_LBN 22 +#define FRF_AB_GMF_CFGHDPLX_WIDTH 1 +#define FRF_AB_GMF_SRFULL_LBN 21 +#define FRF_AB_GMF_SRFULL_WIDTH 1 +#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 +#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 +#define FRF_AB_GMF_CFGBYTMODE_LBN 19 +#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 +#define FRF_AB_GMF_HSTDRPLT64_LBN 18 +#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 +#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 + +/* TX_SRC_MAC_TBL: Transmit IP source address filter table */ +#define FR_BB_TX_SRC_MAC_TBL 0x00001000 +#define FR_BB_TX_SRC_MAC_TBL_STEP 16 +#define FR_BB_TX_SRC_MAC_TBL_ROWS 16 +#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 +#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 +#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 +#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 + +/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */ +#define FR_BB_TX_SRC_MAC_CTL 0x00001100 +#define FRF_BB_TX_SRC_DROP_CTR_LBN 16 +#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 +#define FRF_BB_TX_SRC_FLTR_EN_LBN 15 +#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 +#define FRF_BB_TX_DROP_CTR_CLR_LBN 12 +#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 +#define FRF_BB_TX_MAC_QID_SEL_LBN 0 +#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 + +/* XM_ADR_LO_REG: XGMAC address register low */ +#define FR_AB_XM_ADR_LO 0x00001200 +#define FRF_AB_XM_ADR_LO_LBN 0 +#define FRF_AB_XM_ADR_LO_WIDTH 32 + +/* XM_ADR_HI_REG: XGMAC address register high */ +#define FR_AB_XM_ADR_HI 0x00001210 +#define FRF_AB_XM_ADR_HI_LBN 0 +#define FRF_AB_XM_ADR_HI_WIDTH 16 + +/* XM_GLB_CFG_REG: XGMAC global configuration */ +#define FR_AB_XM_GLB_CFG 0x00001220 +#define FRF_AB_XM_RMTFLT_GEN_LBN 17 +#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 +#define FRF_AB_XM_DEBUG_MODE_LBN 16 +#define FRF_AB_XM_DEBUG_MODE_WIDTH 1 +#define FRF_AB_XM_RX_STAT_EN_LBN 11 +#define FRF_AB_XM_RX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_TX_STAT_EN_LBN 10 +#define FRF_AB_XM_TX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 +#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_WAN_MODE_LBN 5 +#define FRF_AB_XM_WAN_MODE_WIDTH 1 +#define FRF_AB_XM_INTCLR_MODE_LBN 3 +#define FRF_AB_XM_INTCLR_MODE_WIDTH 1 +#define FRF_AB_XM_CORE_RST_LBN 0 +#define FRF_AB_XM_CORE_RST_WIDTH 1 + +/* XM_TX_CFG_REG: XGMAC transmit configuration */ +#define FR_AB_XM_TX_CFG 0x00001230 +#define FRF_AB_XM_TX_PROG_LBN 24 +#define FRF_AB_XM_TX_PROG_WIDTH 1 +#define FRF_AB_XM_IPG_LBN 16 +#define FRF_AB_XM_IPG_WIDTH 4 +#define FRF_AB_XM_FCNTL_LBN 10 +#define FRF_AB_XM_FCNTL_WIDTH 1 +#define FRF_AB_XM_TXCRC_LBN 8 +#define FRF_AB_XM_TXCRC_WIDTH 1 +#define FRF_AB_XM_EDRC_LBN 6 +#define FRF_AB_XM_EDRC_WIDTH 1 +#define FRF_AB_XM_AUTO_PAD_LBN 5 +#define FRF_AB_XM_AUTO_PAD_WIDTH 1 +#define FRF_AB_XM_TX_PRMBL_LBN 2 +#define FRF_AB_XM_TX_PRMBL_WIDTH 1 +#define FRF_AB_XM_TXEN_LBN 1 +#define FRF_AB_XM_TXEN_WIDTH 1 +#define FRF_AB_XM_TX_RST_LBN 0 +#define FRF_AB_XM_TX_RST_WIDTH 1 + +/* XM_RX_CFG_REG: XGMAC receive configuration */ +#define FR_AB_XM_RX_CFG 0x00001240 +#define FRF_AB_XM_PASS_LENERR_LBN 26 +#define FRF_AB_XM_PASS_LENERR_WIDTH 1 +#define FRF_AB_XM_PASS_CRC_ERR_LBN 25 +#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 +#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 +#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_REJ_BCAST_LBN 20 +#define FRF_AB_XM_REJ_BCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 +#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 +#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 +#define FRF_AB_XM_AUTO_DEPAD_LBN 8 +#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 +#define FRF_AB_XM_RXCRC_LBN 3 +#define FRF_AB_XM_RXCRC_WIDTH 1 +#define FRF_AB_XM_RX_PRMBL_LBN 2 +#define FRF_AB_XM_RX_PRMBL_WIDTH 1 +#define FRF_AB_XM_RXEN_LBN 1 +#define FRF_AB_XM_RXEN_WIDTH 1 +#define FRF_AB_XM_RX_RST_LBN 0 +#define FRF_AB_XM_RX_RST_WIDTH 1 + +/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */ +#define FR_AB_XM_MGT_INT_MASK 0x00001250 +#define FRF_AB_XM_MSK_STA_INTR_LBN 16 +#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_MSK_RMTFLT_LBN 1 +#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 +#define FRF_AB_XM_MSK_LCLFLT_LBN 0 +#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 + +/* XM_FC_REG: XGMAC flow control register */ +#define FR_AB_XM_FC 0x00001270 +#define FRF_AB_XM_PAUSE_TIME_LBN 16 +#define FRF_AB_XM_PAUSE_TIME_WIDTH 16 +#define FRF_AB_XM_RX_MAC_STAT_LBN 11 +#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_TX_MAC_STAT_LBN 10 +#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_MCNTL_PASS_LBN 8 +#define FRF_AB_XM_MCNTL_PASS_WIDTH 2 +#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 +#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 +#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 +#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 +#define FRF_AB_XM_ZPAUSE_LBN 2 +#define FRF_AB_XM_ZPAUSE_WIDTH 1 +#define FRF_AB_XM_XMIT_PAUSE_LBN 1 +#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 +#define FRF_AB_XM_DIS_FCNTL_LBN 0 +#define FRF_AB_XM_DIS_FCNTL_WIDTH 1 + +/* XM_PAUSE_TIME_REG: XGMAC pause time register */ +#define FR_AB_XM_PAUSE_TIME 0x00001290 +#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 +#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 +#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 +#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 + +/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ +#define FR_AB_XM_TX_PARAM 0x000012d0 +#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 +#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 +#define FRF_AB_XM_PAD_CHAR_LBN 0 +#define FRF_AB_XM_PAD_CHAR_WIDTH 8 + +/* XM_RX_PARAM_REG: XGMAC receive parameter register */ +#define FR_AB_XM_RX_PARAM 0x000012e0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 + +/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */ +#define FR_AB_XM_MGT_INT_MSK 0x000012f0 +#define FRF_AB_XM_STAT_CNTR_OF_LBN 9 +#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_STAT_CNTR_HF_LBN 8 +#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_RMTFLT_LBN 1 +#define FRF_AB_XM_RMTFLT_WIDTH 1 +#define FRF_AB_XM_LCLFLT_LBN 0 +#define FRF_AB_XM_LCLFLT_WIDTH 1 + +/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */ +#define FR_AB_XX_PWR_RST 0x00001300 +#define FRF_AB_XX_PWRDND_SIG_LBN 31 +#define FRF_AB_XX_PWRDND_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNC_SIG_LBN 30 +#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNB_SIG_LBN 29 +#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNA_SIG_LBN 28 +#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 +#define FRF_AB_XX_SIM_MODE_LBN 27 +#define FRF_AB_XX_SIM_MODE_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 +#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 +#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETD_SIG_LBN 23 +#define FRF_AB_XX_RESETD_SIG_WIDTH 1 +#define FRF_AB_XX_RESETC_SIG_LBN 22 +#define FRF_AB_XX_RESETC_SIG_WIDTH 1 +#define FRF_AB_XX_RESETB_SIG_LBN 21 +#define FRF_AB_XX_RESETB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETA_SIG_LBN 20 +#define FRF_AB_XX_RESETA_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 +#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 +#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 +#define FRF_AB_XX_SD_RST_ACT_LBN 16 +#define FRF_AB_XX_SD_RST_ACT_WIDTH 1 +#define FRF_AB_XX_PWRDND_EN_LBN 15 +#define FRF_AB_XX_PWRDND_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNC_EN_LBN 14 +#define FRF_AB_XX_PWRDNC_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNB_EN_LBN 13 +#define FRF_AB_XX_PWRDNB_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNA_EN_LBN 12 +#define FRF_AB_XX_PWRDNA_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_EN_LBN 9 +#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_EN_LBN 8 +#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 +#define FRF_AB_XX_RESETD_EN_LBN 7 +#define FRF_AB_XX_RESETD_EN_WIDTH 1 +#define FRF_AB_XX_RESETC_EN_LBN 6 +#define FRF_AB_XX_RESETC_EN_WIDTH 1 +#define FRF_AB_XX_RESETB_EN_LBN 5 +#define FRF_AB_XX_RESETB_EN_WIDTH 1 +#define FRF_AB_XX_RESETA_EN_LBN 4 +#define FRF_AB_XX_RESETA_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 +#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 +#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 +#define FRF_AB_XX_RST_XX_EN_LBN 0 +#define FRF_AB_XX_RST_XX_EN_WIDTH 1 + +/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */ +#define FR_AB_XX_SD_CTL 0x00001310 +#define FRF_AB_XX_TERMADJ1_LBN 17 +#define FRF_AB_XX_TERMADJ1_WIDTH 1 +#define FRF_AB_XX_TERMADJ0_LBN 16 +#define FRF_AB_XX_TERMADJ0_WIDTH 1 +#define FRF_AB_XX_HIDRVD_LBN 15 +#define FRF_AB_XX_HIDRVD_WIDTH 1 +#define FRF_AB_XX_LODRVD_LBN 14 +#define FRF_AB_XX_LODRVD_WIDTH 1 +#define FRF_AB_XX_HIDRVC_LBN 13 +#define FRF_AB_XX_HIDRVC_WIDTH 1 +#define FRF_AB_XX_LODRVC_LBN 12 +#define FRF_AB_XX_LODRVC_WIDTH 1 +#define FRF_AB_XX_HIDRVB_LBN 11 +#define FRF_AB_XX_HIDRVB_WIDTH 1 +#define FRF_AB_XX_LODRVB_LBN 10 +#define FRF_AB_XX_LODRVB_WIDTH 1 +#define FRF_AB_XX_HIDRVA_LBN 9 +#define FRF_AB_XX_HIDRVA_WIDTH 1 +#define FRF_AB_XX_LODRVA_LBN 8 +#define FRF_AB_XX_LODRVA_WIDTH 1 +#define FRF_AB_XX_LPBKD_LBN 3 +#define FRF_AB_XX_LPBKD_WIDTH 1 +#define FRF_AB_XX_LPBKC_LBN 2 +#define FRF_AB_XX_LPBKC_WIDTH 1 +#define FRF_AB_XX_LPBKB_LBN 1 +#define FRF_AB_XX_LPBKB_WIDTH 1 +#define FRF_AB_XX_LPBKA_LBN 0 +#define FRF_AB_XX_LPBKA_WIDTH 1 + +/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ +#define FR_AB_XX_TXDRV_CTL 0x00001320 +#define FRF_AB_XX_DEQD_LBN 28 +#define FRF_AB_XX_DEQD_WIDTH 4 +#define FRF_AB_XX_DEQC_LBN 24 +#define FRF_AB_XX_DEQC_WIDTH 4 +#define FRF_AB_XX_DEQB_LBN 20 +#define FRF_AB_XX_DEQB_WIDTH 4 +#define FRF_AB_XX_DEQA_LBN 16 +#define FRF_AB_XX_DEQA_WIDTH 4 +#define FRF_AB_XX_DTXD_LBN 12 +#define FRF_AB_XX_DTXD_WIDTH 4 +#define FRF_AB_XX_DTXC_LBN 8 +#define FRF_AB_XX_DTXC_WIDTH 4 +#define FRF_AB_XX_DTXB_LBN 4 +#define FRF_AB_XX_DTXB_WIDTH 4 +#define FRF_AB_XX_DTXA_LBN 0 +#define FRF_AB_XX_DTXA_WIDTH 4 + +/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */ +#define FR_AB_XX_PRBS_CTL 0x00001330 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 +#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 +#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 +#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 +#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 +#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 +#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 +#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 +#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 + +/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */ +#define FR_AB_XX_PRBS_CHK 0x00001340 +#define FRF_AB_XX_REV_LB_EN_LBN 16 +#define FRF_AB_XX_REV_LB_EN_WIDTH 1 +#define FRF_AB_XX_CH3_DEG_DET_LBN 15 +#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 +#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH3_ERR_CHK_LBN 12 +#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH2_DEG_DET_LBN 11 +#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 +#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH2_ERR_CHK_LBN 8 +#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH1_DEG_DET_LBN 7 +#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 +#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH1_ERR_CHK_LBN 4 +#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH0_DEG_DET_LBN 3 +#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH0_ERR_CHK_LBN 0 +#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 + +/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */ +#define FR_AB_XX_PRBS_ERR 0x00001350 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 + +/* XX_CORE_STAT_REG: XAUI XGXS core status register */ +#define FR_AB_XX_CORE_STAT 0x00001360 +#define FRF_AB_XX_FORCE_SIG3_LBN 31 +#define FRF_AB_XX_FORCE_SIG3_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 +#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_LBN 29 +#define FRF_AB_XX_FORCE_SIG2_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 +#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_LBN 27 +#define FRF_AB_XX_FORCE_SIG1_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 +#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_LBN 25 +#define FRF_AB_XX_FORCE_SIG0_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 +#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 +#define FRF_AB_XX_XGXS_LB_EN_LBN 23 +#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 +#define FRF_AB_XX_XGMII_LB_EN_LBN 22 +#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 +#define FRF_AB_XX_MATCH_FAULT_LBN 21 +#define FRF_AB_XX_MATCH_FAULT_WIDTH 1 +#define FRF_AB_XX_ALIGN_DONE_LBN 20 +#define FRF_AB_XX_ALIGN_DONE_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT3_LBN 19 +#define FRF_AB_XX_SYNC_STAT3_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT2_LBN 18 +#define FRF_AB_XX_SYNC_STAT2_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT1_LBN 17 +#define FRF_AB_XX_SYNC_STAT1_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT0_LBN 16 +#define FRF_AB_XX_SYNC_STAT0_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH3_LBN 15 +#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH2_LBN 14 +#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH1_LBN 13 +#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH0_LBN 12 +#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 +#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 +#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 +#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 +#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 +#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 +#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 +#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 +#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH3_LBN 3 +#define FRF_AB_XX_DISPERR_CH3_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH2_LBN 2 +#define FRF_AB_XX_DISPERR_CH2_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH1_LBN 1 +#define FRF_AB_XX_DISPERR_CH1_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH0_LBN 0 +#define FRF_AB_XX_DISPERR_CH0_WIDTH 1 + +/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */ +#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800 +#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 +/* RX_DESC_PTR_TBL: Receive descriptor pointer table */ +#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000 +#define FR_BZ_RX_DESC_PTR_TBL_STEP 16 +#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 +#define FRF_CZ_RX_HDR_SPLIT_LBN 90 +#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 +#define FRF_AA_RX_RESET_LBN 89 +#define FRF_AA_RX_RESET_WIDTH 1 +#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 +#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 +#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 +#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 +#define FRF_AZ_RX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_RX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_RX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_RX_DESCQ_SIZE_4K 3 +#define FFE_AZ_RX_DESCQ_SIZE_2K 2 +#define FFE_AZ_RX_DESCQ_SIZE_1K 1 +#define FFE_AZ_RX_DESCQ_SIZE_512 0 +#define FRF_AZ_RX_DESCQ_TYPE_LBN 2 +#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 +#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 +#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 +#define FRF_AZ_RX_DESCQ_EN_LBN 0 +#define FRF_AZ_RX_DESCQ_EN_WIDTH 1 + +/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */ +#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900 +#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 +/* TX_DESC_PTR_TBL: Transmit descriptor pointer */ +#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000 +#define FR_BZ_TX_DESC_PTR_TBL_STEP 16 +#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 +#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 +#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 +#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 +#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 +#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 +#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 +#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 +#define FRF_AZ_TX_DESCQ_EN_LBN 88 +#define FRF_AZ_TX_DESCQ_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 +#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 +#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_TX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_TX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_TX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_TX_DESCQ_SIZE_4K 3 +#define FFE_AZ_TX_DESCQ_SIZE_2K 2 +#define FFE_AZ_TX_DESCQ_SIZE_1K 1 +#define FFE_AZ_TX_DESCQ_SIZE_512 0 +#define FRF_AZ_TX_DESCQ_TYPE_LBN 1 +#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 +#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 +#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 + +/* EVQ_PTR_TBL_KER: Event queue pointer table */ +#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00 +#define FR_AA_EVQ_PTR_TBL_KER_STEP 16 +#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 +/* EVQ_PTR_TBL: Event queue pointer table */ +#define FR_BZ_EVQ_PTR_TBL 0x00f60000 +#define FR_BZ_EVQ_PTR_TBL_STEP 16 +#define FR_CZ_EVQ_PTR_TBL_ROWS 1024 +#define FR_BB_EVQ_PTR_TBL_ROWS 4096 +#define FRF_BZ_EVQ_RPTR_IGN_LBN 40 +#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 +#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39 +#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1 +#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39 +#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1 +#define FRF_AZ_EVQ_NXT_WPTR_LBN 24 +#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 +#define FRF_AZ_EVQ_EN_LBN 23 +#define FRF_AZ_EVQ_EN_WIDTH 1 +#define FRF_AZ_EVQ_SIZE_LBN 20 +#define FRF_AZ_EVQ_SIZE_WIDTH 3 +#define FFE_AZ_EVQ_SIZE_32K 6 +#define FFE_AZ_EVQ_SIZE_16K 5 +#define FFE_AZ_EVQ_SIZE_8K 4 +#define FFE_AZ_EVQ_SIZE_4K 3 +#define FFE_AZ_EVQ_SIZE_2K 2 +#define FFE_AZ_EVQ_SIZE_1K 1 +#define FFE_AZ_EVQ_SIZE_512 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 + +/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */ +#define FR_AA_BUF_HALF_TBL_KER 0x00018000 +#define FR_AA_BUF_HALF_TBL_KER_STEP 8 +#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 +/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */ +#define FR_BZ_BUF_HALF_TBL 0x00800000 +#define FR_BZ_BUF_HALF_TBL_STEP 8 +#define FR_CZ_BUF_HALF_TBL_ROWS 147456 +#define FR_BB_BUF_HALF_TBL_ROWS 524288 +#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 +#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 + +/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */ +#define FR_AA_BUF_FULL_TBL_KER 0x00018000 +#define FR_AA_BUF_FULL_TBL_KER_STEP 8 +#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 +/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */ +#define FR_BZ_BUF_FULL_TBL 0x00800000 +#define FR_BZ_BUF_FULL_TBL_STEP 8 +#define FR_CZ_BUF_FULL_TBL_ROWS 147456 +#define FR_BB_BUF_FULL_TBL_ROWS 917504 +#define FRF_AZ_BUF_FULL_UNUSED_LBN 51 +#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 +#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 +#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 +#define FRF_AZ_BUF_ADR_REGION_LBN 48 +#define FRF_AZ_BUF_ADR_REGION_WIDTH 2 +#define FFE_AZ_BUF_ADR_REGN3 3 +#define FFE_AZ_BUF_ADR_REGN2 2 +#define FFE_AZ_BUF_ADR_REGN1 1 +#define FFE_AZ_BUF_ADR_REGN0 0 +#define FRF_AZ_BUF_ADR_FBUF_LBN 14 +#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 +#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 + +/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */ +#define FR_BZ_RX_FILTER_TBL0 0x00f00000 +#define FR_BZ_RX_FILTER_TBL0_STEP 32 +#define FR_BZ_RX_FILTER_TBL0_ROWS 8192 +/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */ +#define FR_BB_RX_FILTER_TBL1 0x00f00010 +#define FR_BB_RX_FILTER_TBL1_STEP 32 +#define FR_BB_RX_FILTER_TBL1_ROWS 8192 +#define FRF_BZ_RSS_EN_LBN 110 +#define FRF_BZ_RSS_EN_WIDTH 1 +#define FRF_BZ_SCATTER_EN_LBN 109 +#define FRF_BZ_SCATTER_EN_WIDTH 1 +#define FRF_BZ_TCP_UDP_LBN 108 +#define FRF_BZ_TCP_UDP_WIDTH 1 +#define FRF_BZ_RXQ_ID_LBN 96 +#define FRF_BZ_RXQ_ID_WIDTH 12 +#define FRF_BZ_DEST_IP_LBN 64 +#define FRF_BZ_DEST_IP_WIDTH 32 +#define FRF_BZ_DEST_PORT_TCP_LBN 48 +#define FRF_BZ_DEST_PORT_TCP_WIDTH 16 +#define FRF_BZ_SRC_IP_LBN 16 +#define FRF_BZ_SRC_IP_WIDTH 32 +#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16 + +/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */ +#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010 +#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 +#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 +#define FRF_CZ_RMFT_RSS_EN_LBN 75 +#define FRF_CZ_RMFT_RSS_EN_WIDTH 1 +#define FRF_CZ_RMFT_SCATTER_EN_LBN 74 +#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 +#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 +#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_RMFT_RXQ_ID_LBN 61 +#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 +#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_RMFT_DEST_MAC_LBN 12 +#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48 +#define FRF_CZ_RMFT_VLAN_ID_LBN 0 +#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 + +/* TIMER_TBL: Timer table */ +#define FR_BZ_TIMER_TBL 0x00f70000 +#define FR_BZ_TIMER_TBL_STEP 16 +#define FR_CZ_TIMER_TBL_ROWS 1024 +#define FR_BB_TIMER_TBL_ROWS 4096 +#define FRF_CZ_TIMER_Q_EN_LBN 33 +#define FRF_CZ_TIMER_Q_EN_WIDTH 1 +#define FRF_CZ_INT_ARMD_LBN 32 +#define FRF_CZ_INT_ARMD_WIDTH 1 +#define FRF_CZ_INT_PEND_LBN 31 +#define FRF_CZ_INT_PEND_WIDTH 1 +#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 +#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 +#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 +#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 +#define FRF_CZ_TIMER_MODE_LBN 14 +#define FRF_CZ_TIMER_MODE_WIDTH 2 +#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 +#define FFE_CZ_TIMER_MODE_TRIG_START 2 +#define FFE_CZ_TIMER_MODE_IMMED_START 1 +#define FFE_CZ_TIMER_MODE_DIS 0 +#define FRF_BB_TIMER_MODE_LBN 12 +#define FRF_BB_TIMER_MODE_WIDTH 2 +#define FFE_BB_TIMER_MODE_INT_HLDOFF 2 +#define FFE_BB_TIMER_MODE_TRIG_START 2 +#define FFE_BB_TIMER_MODE_IMMED_START 1 +#define FFE_BB_TIMER_MODE_DIS 0 +#define FRF_CZ_TIMER_VAL_LBN 0 +#define FRF_CZ_TIMER_VAL_WIDTH 14 +#define FRF_BB_TIMER_VAL_LBN 0 +#define FRF_BB_TIMER_VAL_WIDTH 12 + +/* TX_PACE_TBL: Transmit pacing table */ +#define FR_BZ_TX_PACE_TBL 0x00f80000 +#define FR_BZ_TX_PACE_TBL_STEP 16 +#define FR_CZ_TX_PACE_TBL_ROWS 1024 +#define FR_BB_TX_PACE_TBL_ROWS 4096 +#define FRF_BZ_TX_PACE_LBN 0 +#define FRF_BZ_TX_PACE_WIDTH 5 + +/* RX_INDIRECTION_TBL: RX Indirection Table */ +#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000 +#define FR_BZ_RX_INDIRECTION_TBL_STEP 16 +#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 +#define FRF_BZ_IT_QUEUE_LBN 0 +#define FRF_BZ_IT_QUEUE_WIDTH 6 + +/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */ +#define FR_CZ_TX_FILTER_TBL0 0x00fc0000 +#define FR_CZ_TX_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_FILTER_TBL0_ROWS 8192 +#define FRF_CZ_TIFT_TCP_UDP_LBN 108 +#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 +#define FRF_CZ_TIFT_TXQ_ID_LBN 96 +#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TIFT_DEST_IP_LBN 64 +#define FRF_CZ_TIFT_DEST_IP_WIDTH 32 +#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 +#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 +#define FRF_CZ_TIFT_SRC_IP_LBN 16 +#define FRF_CZ_TIFT_SRC_IP_WIDTH 32 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 + +/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */ +#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000 +#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 +#define FRF_CZ_TMFT_TXQ_ID_LBN 61 +#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_TMFT_SRC_MAC_LBN 12 +#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48 +#define FRF_CZ_TMFT_VLAN_ID_LBN 0 +#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 + +/* MC_TREG_SMEM: MC Shared Memory */ +#define FR_CZ_MC_TREG_SMEM 0x00ff0000 +#define FR_CZ_MC_TREG_SMEM_STEP 4 +#define FR_CZ_MC_TREG_SMEM_ROWS 512 +#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 +#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 + +/* MSIX_VECTOR_TABLE: MSIX Vector Table */ +#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000 +#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 +#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 +/* MSIX_VECTOR_TABLE: MSIX Vector Table */ +#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000 +/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ +#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 +#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 +#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 +#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 +#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 +#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 +#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 + +/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_BB_MSIX_PBA_TABLE 0x00ff2000 +#define FR_BZ_MSIX_PBA_TABLE_STEP 4 +#define FR_BB_MSIX_PBA_TABLE_ROWS 2 +/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_CZ_MSIX_PBA_TABLE 0x00008000 +/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ +#define FR_CZ_MSIX_PBA_TABLE_ROWS 32 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 + +/* SRM_DBG_REG: SRAM debug access */ +#define FR_BZ_SRM_DBG 0x03000000 +#define FR_BZ_SRM_DBG_STEP 8 +#define FR_CZ_SRM_DBG_ROWS 262144 +#define FR_BB_SRM_DBG_ROWS 2097152 +#define FRF_BZ_SRM_DBG_LBN 0 +#define FRF_BZ_SRM_DBG_WIDTH 64 + +/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000 +#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4 +#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024 +#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32 + +/* DRIVER_EV */ +#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 +#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 +#define FSE_BZ_TX_DSC_ERROR_EV 15 +#define FSE_BZ_RX_DSC_ERROR_EV 14 +#define FSE_AA_RX_RECOVER_EV 11 +#define FSE_AZ_TIMER_EV 10 +#define FSE_AZ_TX_PKT_NON_TCP_UDP 9 +#define FSE_AZ_WAKE_UP_EV 6 +#define FSE_AZ_SRM_UPD_DONE_EV 5 +#define FSE_AB_EVQ_NOT_EN_EV 3 +#define FSE_AZ_EVQ_INIT_DONE_EV 2 +#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 +#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 + +/* EVENT_ENTRY */ +#define FSF_AZ_EV_CODE_LBN 60 +#define FSF_AZ_EV_CODE_WIDTH 4 +#define FSE_CZ_EV_CODE_MCDI_EV 12 +#define FSE_CZ_EV_CODE_USER_EV 8 +#define FSE_AZ_EV_CODE_DRV_GEN_EV 7 +#define FSE_AZ_EV_CODE_GLOBAL_EV 6 +#define FSE_AZ_EV_CODE_DRIVER_EV 5 +#define FSE_AZ_EV_CODE_TX_EV 2 +#define FSE_AZ_EV_CODE_RX_EV 0 +#define FSF_AZ_EV_DATA_LBN 0 +#define FSF_AZ_EV_DATA_WIDTH 60 + +/* GLOBAL_EV */ +#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12 +#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11 +#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11 +#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10 +#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9 +#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7 +#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1 + +/* LEGACY_INT_VEC */ +#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 +#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 +#define FSF_AZ_NET_IVEC_INT_Q_LBN 40 +#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 +#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 +#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 + +/* MC_XGMAC_FLTR_RULE_DEF */ +#define FSF_CZ_MC_XFRC_MODE_LBN 416 +#define FSF_CZ_MC_XFRC_MODE_WIDTH 1 +#define FSE_CZ_MC_XFRC_MODE_LAYERED 1 +#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0 +#define FSF_CZ_MC_XFRC_HASH_LBN 384 +#define FSF_CZ_MC_XFRC_HASH_WIDTH 32 +#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256 +#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128 +#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128 +#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128 +#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0 +#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128 + +/* RX_EV */ +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 +#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 +#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_OK_LBN 56 +#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 +#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 +#define FSF_AA_RX_EV_DRIB_NIB_LBN 49 +#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1 +#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 +#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 +#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 +#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 +#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 +#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 +#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 +#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 +#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 +#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 +#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 +#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 +#define FSF_AZ_RX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 +#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 +#define FSF_AZ_RX_EV_PORT_LBN 30 +#define FSF_AZ_RX_EV_PORT_WIDTH 1 +#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 +#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 +#define FSF_AZ_RX_EV_SOP_LBN 15 +#define FSF_AZ_RX_EV_SOP_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 + +/* RX_KER_DESC */ +#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 +#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 +#define FSF_AZ_RX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 + +/* RX_USER_DESC */ +#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 +#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 +#define FSF_AZ_RX_USER_BUF_ID_LBN 0 +#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 + +/* TX_EV */ +#define FSF_AZ_TX_EV_PKT_ERR_LBN 38 +#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 +#define FSF_AZ_TX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_TX_EV_PORT_LBN 16 +#define FSF_AZ_TX_EV_PORT_WIDTH 1 +#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 +#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_COMP_LBN 12 +#define FSF_AZ_TX_EV_COMP_WIDTH 1 +#define FSF_AZ_TX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 + +/* TX_KER_DESC */ +#define FSF_AZ_TX_KER_CONT_LBN 62 +#define FSF_AZ_TX_KER_CONT_WIDTH 1 +#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 +#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 +#define FSF_AZ_TX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 + +/* TX_USER_DESC */ +#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 +#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 +#define FSF_AZ_TX_USER_CONT_LBN 46 +#define FSF_AZ_TX_USER_CONT_WIDTH 1 +#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 +#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 +#define FSF_AZ_TX_USER_BUF_ID_LBN 13 +#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 +#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 +#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 + +/* USER_EV */ +#define FSF_CZ_USER_QID_LBN 32 +#define FSF_CZ_USER_QID_WIDTH 10 +#define FSF_CZ_USER_EV_REG_VALUE_LBN 0 +#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 + +/************************************************************************** + * + * Falcon B0 PCIe core indirect registers + * + ************************************************************************** + */ + +#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68 + +#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70 + +#define FPCR_BB_ACK_RPL_TIMER 0x700 +#define FPCRF_BB_ACK_TL_LBN 0 +#define FPCRF_BB_ACK_TL_WIDTH 16 +#define FPCRF_BB_RPL_TL_LBN 16 +#define FPCRF_BB_RPL_TL_WIDTH 16 + +#define FPCR_BB_ACK_FREQ 0x70C +#define FPCRF_BB_ACK_FREQ_LBN 0 +#define FPCRF_BB_ACK_FREQ_WIDTH 7 + +/************************************************************************** + * + * Pseudo-registers and fields + * + ************************************************************************** + */ + +/* Interrupt acknowledge work-around register (A0/A1 only) */ +#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070 + +/* EE_SPI_HCMD_REG: SPI host command register */ +/* Values for the EE_SPI_HCMD_SF_SEL register field */ +#define FFE_AB_SPI_DEVICE_EEPROM 0 +#define FFE_AB_SPI_DEVICE_FLASH 1 + +/* NIC_STAT_REG: NIC status register */ +#define FRF_AB_STRAP_10G_LBN 2 +#define FRF_AB_STRAP_10G_WIDTH 1 +#define FRF_AA_STRAP_PCIE_LBN 0 +#define FRF_AA_STRAP_PCIE_WIDTH 1 + +/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ +#define FRF_AZ_FATAL_INTR_LBN 0 +#define FRF_AZ_FATAL_INTR_WIDTH 12 + +/* SRM_CFG_REG: SRAM configuration register */ +/* We treat the number of SRAM banks and bank size as a single field */ +#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN +#define FRF_AZ_SRM_NB_SZ_WIDTH \ + (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH) +#define FFE_AB_SRM_NB1_SZ2M 0 +#define FFE_AB_SRM_NB1_SZ4M 1 +#define FFE_AB_SRM_NB1_SZ8M 2 +#define FFE_AB_SRM_NB_SZ_DEF 3 +#define FFE_AB_SRM_NB2_SZ4M 4 +#define FFE_AB_SRM_NB2_SZ8M 5 +#define FFE_AB_SRM_NB2_SZ16M 6 +#define FFE_AB_SRM_NB_SZ_RES 7 + +/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ +/* We write just the last dword of these registers */ +#define FR_AZ_RX_DESC_UPD_DWORD_P0 \ + (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \ + FR_BZ_RX_DESC_UPD_P0 + 3 * 4) +#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32) +#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH + +/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ +#define FR_AZ_TX_DESC_UPD_DWORD_P0 \ + (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \ + FR_BZ_TX_DESC_UPD_P0 + 3 * 4) +#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32) +#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH + +/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ +#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12 +#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1 + +/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ +#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12 +#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 + +/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN +#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \ + FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH) + +/* XM_RX_PARAM_REG: XGMAC receive parameter register */ +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN +#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \ + FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH) + +/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ +/* Default values */ +#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */ +#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */ +#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */ + +/* XX_CORE_STAT_REG: XAUI XGXS core status register */ +/* XGXS all-lanes status fields */ +#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN +#define FRF_AB_XX_SYNC_STAT_WIDTH 4 +#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN +#define FRF_AB_XX_COMMA_DET_WIDTH 4 +#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN +#define FRF_AB_XX_CHAR_ERR_WIDTH 4 +#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN +#define FRF_AB_XX_DISPERR_WIDTH 4 +#define FFE_AB_XX_STAT_ALL_LANES 0xf +#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN +#define FRF_AB_XX_FORCE_SIG_WIDTH 8 +#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff + +/* RX_MAC_FILTER_TBL0 */ +/* RMFT_DEST_MAC is wider than 32 bits */ +#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN +#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 +#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32) +#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32) + +/* TX_MAC_FILTER_TBL0 */ +/* TMFT_SRC_MAC is wider than 32 bits */ +#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN +#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 +#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32) +#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32) + +/* TX_PACE_TBL */ +/* Values >20 are documented as reserved, but will result in a queue going + * into the fast bin with a pace value of zero. */ +#define FFE_BZ_TX_PACE_OFF 0 +#define FFE_BZ_TX_PACE_RESERVED 21 + +/* DRIVER_EV */ +/* Sub-fields of an RX flush completion event */ +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 + +/* EVENT_ENTRY */ +/* Magic number field for event test */ +#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 +#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 + +/* RX packet prefix */ +#define FS_BZ_RX_PREFIX_HASH_OFST 12 +#define FS_BZ_RX_PREFIX_SIZE 16 + +#endif /* EFX_FARCH_REGS_H */ diff --git a/drivers/net/ethernet/sfc/siena/filter.h b/drivers/net/ethernet/sfc/siena/filter.h new file mode 100644 index 000000000000..40b2af8bfb81 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/filter.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_FILTER_H +#define EFX_FILTER_H + +#include +#include +#include + +/** + * enum efx_filter_match_flags - Flags for hardware filter match type + * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address + * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address + * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address + * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port + * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address + * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port + * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type + * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID + * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID + * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol + * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type. + * Used for RX default unicast and multicast/broadcast filters. + * + * Only some combinations are supported, depending on NIC type: + * + * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or + * local 2-tuple (only implemented for Falcon B0) + * + * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple + * or local 2-tuple, or local MAC with or without outer VID, and RX + * default filters + * + * - Huntington supports filter matching controlled by firmware, potentially + * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit, + * with or without outer and inner VID + */ +enum efx_filter_match_flags { + EFX_FILTER_MATCH_REM_HOST = 0x0001, + EFX_FILTER_MATCH_LOC_HOST = 0x0002, + EFX_FILTER_MATCH_REM_MAC = 0x0004, + EFX_FILTER_MATCH_REM_PORT = 0x0008, + EFX_FILTER_MATCH_LOC_MAC = 0x0010, + EFX_FILTER_MATCH_LOC_PORT = 0x0020, + EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, + EFX_FILTER_MATCH_INNER_VID = 0x0080, + EFX_FILTER_MATCH_OUTER_VID = 0x0100, + EFX_FILTER_MATCH_IP_PROTO = 0x0200, + EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, + EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800, +}; + +/** + * enum efx_filter_priority - priority of a hardware filter specification + * @EFX_FILTER_PRI_HINT: Performance hint + * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list + * or hardware requirements. This may only be used by the filter + * implementation for each NIC type. + * @EFX_FILTER_PRI_MANUAL: Manually configured filter + * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level + * networking and SR-IOV) + */ +enum efx_filter_priority { + EFX_FILTER_PRI_HINT = 0, + EFX_FILTER_PRI_AUTO, + EFX_FILTER_PRI_MANUAL, + EFX_FILTER_PRI_REQUIRED, +}; + +/** + * enum efx_filter_flags - flags for hardware filter specifications + * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues. + * By default, matching packets will be delivered only to the + * specified queue. If this flag is set, they will be delivered + * to a range of queues offset from the specified queue number + * according to the indirection table. + * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving + * queue. + * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is + * overriding an automatic filter (priority + * %EFX_FILTER_PRI_AUTO). This may only be set by the filter + * implementation for each type. A removal request will restore + * the automatic filter in its place. + * @EFX_FILTER_FLAG_RX: Filter is for RX + * @EFX_FILTER_FLAG_TX: Filter is for TX + */ +enum efx_filter_flags { + EFX_FILTER_FLAG_RX_RSS = 0x01, + EFX_FILTER_FLAG_RX_SCATTER = 0x02, + EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, + EFX_FILTER_FLAG_RX = 0x08, + EFX_FILTER_FLAG_TX = 0x10, +}; + +/** enum efx_encap_type - types of encapsulation + * @EFX_ENCAP_TYPE_NONE: no encapsulation + * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation + * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation + * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation + * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame + * + * Contains both enumerated types and flags. + * To get just the type, OR with @EFX_ENCAP_TYPES_MASK. + */ +enum efx_encap_type { + EFX_ENCAP_TYPE_NONE = 0, + EFX_ENCAP_TYPE_VXLAN = 1, + EFX_ENCAP_TYPE_NVGRE = 2, + EFX_ENCAP_TYPE_GENEVE = 3, + + EFX_ENCAP_TYPES_MASK = 7, + EFX_ENCAP_FLAG_IPV6 = 8, +}; + +/** + * struct efx_filter_spec - specification for a hardware filter + * @match_flags: Match type flags, from &enum efx_filter_match_flags + * @priority: Priority of the filter, from &enum efx_filter_priority + * @flags: Miscellaneous flags, from &enum efx_filter_flags + * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set. This + * is a user_id (with 0 meaning the driver/default RSS context), not an + * MCFW context_id. + * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for + * an RX drop filter + * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set + * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set + * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or + * %EFX_FILTER_MATCH_LOC_MAC_IG is set + * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set + * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set + * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO + * is set + * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set + * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set + * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set + * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set + * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if + * %EFX_FILTER_MATCH_ENCAP_TYPE is set + * + * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be + * used to initialise the structure. The efx_filter_set_*() functions + * may then be used to set @rss_context, @match_flags and related + * fields. + * + * The @priority field is used by software to determine whether a new + * filter may replace an old one. The hardware priority of a filter + * depends on which fields are matched. + */ +struct efx_filter_spec { + u32 match_flags:12; + u32 priority:2; + u32 flags:6; + u32 dmaq_id:12; + u32 rss_context; + __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + __be16 inner_vid; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + __be16 ether_type; + u8 ip_proto; + __be32 loc_host[4]; + __be32 rem_host[4]; + __be16 loc_port; + __be16 rem_port; + u32 encap_type:4; + /* total 65 bytes */ +}; + +enum { + EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff +}; + +static inline void efx_filter_init_rx(struct efx_filter_spec *spec, + enum efx_filter_priority priority, + enum efx_filter_flags flags, + unsigned rxq_id) +{ + memset(spec, 0, sizeof(*spec)); + spec->priority = priority; + spec->flags = EFX_FILTER_FLAG_RX | flags; + spec->rss_context = 0; + spec->dmaq_id = rxq_id; +} + +static inline void efx_filter_init_tx(struct efx_filter_spec *spec, + unsigned txq_id) +{ + memset(spec, 0, sizeof(*spec)); + spec->priority = EFX_FILTER_PRI_REQUIRED; + spec->flags = EFX_FILTER_FLAG_TX; + spec->dmaq_id = txq_id; +} + +/** + * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @host: Local host address (network byte order) + * @port: Local port (network byte order) + */ +static inline int +efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, + __be32 host, __be16 port) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = host; + spec->loc_port = port; + return 0; +} + +/** + * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @lhost: Local host address (network byte order) + * @lport: Local port (network byte order) + * @rhost: Remote host address (network byte order) + * @rport: Remote port (network byte order) + */ +static inline int +efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, + __be32 lhost, __be16 lport, + __be32 rhost, __be16 rport) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = lhost; + spec->loc_port = lport; + spec->rem_host[0] = rhost; + spec->rem_port = rport; + return 0; +} + +enum { + EFX_FILTER_VID_UNSPEC = 0xffff, +}; + +/** + * efx_filter_set_eth_local - specify local Ethernet address and/or VID + * @spec: Specification to initialise + * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC + * @addr: Local Ethernet MAC address, or %NULL + */ +static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec, + u16 vid, const u8 *addr) +{ + if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL) + return -EINVAL; + + if (vid != EFX_FILTER_VID_UNSPEC) { + spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec->outer_vid = htons(vid); + } + if (addr != NULL) { + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC; + ether_addr_copy(spec->loc_mac, addr); + } + return 0; +} + +/** + * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast + * @spec: Specification to initialise + */ +static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + return 0; +} + +/** + * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast + * @spec: Specification to initialise + */ +static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + spec->loc_mac[0] = 1; + return 0; +} + +static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec, + enum efx_encap_type encap_type) +{ + spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; + spec->encap_type = encap_type; +} + +static inline enum efx_encap_type efx_filter_get_encap_type( + const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE) + return spec->encap_type; + return EFX_ENCAP_TYPE_NONE; +} +#endif /* EFX_FILTER_H */ diff --git a/drivers/net/ethernet/sfc/siena/io.h b/drivers/net/ethernet/sfc/siena/io.h new file mode 100644 index 000000000000..30439cc83a89 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/io.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_IO_H +#define EFX_IO_H + +#include +#include + +/************************************************************************** + * + * NIC register I/O + * + ************************************************************************** + * + * Notes on locking strategy for the Falcon architecture: + * + * Many CSRs are very wide and cannot be read or written atomically. + * Writes from the host are buffered by the Bus Interface Unit (BIU) + * up to 128 bits. Whenever the host writes part of such a register, + * the BIU collects the written value and does not write to the + * underlying register until all 4 dwords have been written. A + * similar buffering scheme applies to host access to the NIC's 64-bit + * SRAM. + * + * Writes to different CSRs and 64-bit SRAM words must be serialised, + * since interleaved access can result in lost writes. We use + * efx_nic::biu_lock for this. + * + * We also serialise reads from 128-bit CSRs and SRAM with the same + * spinlock. This may not be necessary, but it doesn't really matter + * as there are no such reads on the fast path. + * + * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are + * 128-bit but are special-cased in the BIU to avoid the need for + * locking in the host: + * + * - They are write-only. + * - The semantics of writing to these registers are such that + * replacing the low 96 bits with zero does not affect functionality. + * - If the host writes to the last dword address of such a register + * (i.e. the high 32 bits) the underlying register will always be + * written. If the collector and the current write together do not + * provide values for all 128 bits of the register, the low 96 bits + * will be written as zero. + * - If the host writes to the address of any other part of such a + * register while the collector already holds values for some other + * register, the write is discarded and the collector maintains its + * current state. + * + * The EF10 architecture exposes very few registers to the host and + * most of them are only 32 bits wide. The only exceptions are the MC + * doorbell register pair, which has its own latching, and + * TX_DESC_UPD, which works in a similar way to the Falcon + * architecture. + */ + +#if BITS_PER_LONG == 64 +#define EFX_USE_QWORD_IO 1 +#endif + +/* Hardware issue requires that only 64-bit naturally aligned writes + * are seen by hardware. Its not strictly necessary to restrict to + * x86_64 arch, but done for safety since unusual write combining behaviour + * can break PIO. + */ +#ifdef CONFIG_X86_64 +/* PIO is a win only if write-combining is possible */ +#ifdef ARCH_HAS_IOREMAP_WC +#define EFX_USE_PIO 1 +#endif +#endif + +static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg) +{ + return efx->reg_base + reg; +} + +#ifdef EFX_USE_QWORD_IO +static inline void _efx_writeq(struct efx_nic *efx, __le64 value, + unsigned int reg) +{ + __raw_writeq((__force u64)value, efx->membase + reg); +} +static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le64)__raw_readq(efx->membase + reg); +} +#endif + +static inline void _efx_writed(struct efx_nic *efx, __le32 value, + unsigned int reg) +{ + __raw_writel((__force u32)value, efx->membase + reg); +} +static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le32)__raw_readl(efx->membase + reg); +} + +/* Write a normal 128-bit CSR, locking as appropriate. */ +static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + _efx_writeq(efx, value->u64[0], reg + 0); + _efx_writeq(efx, value->u64[1], reg + 8); +#else + _efx_writed(efx, value->u32[0], reg + 0); + _efx_writed(efx, value->u32[1], reg + 4); + _efx_writed(efx, value->u32[2], reg + 8); + _efx_writed(efx, value->u32[3], reg + 12); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); +} + +/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ +static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, + const efx_qword_t *value, unsigned int index) +{ + unsigned int addr = index * sizeof(*value); + unsigned long flags __attribute__ ((unused)); + + netif_vdbg(efx, hw, efx->net_dev, + "writing SRAM address %x with " EFX_QWORD_FMT "\n", + addr, EFX_QWORD_VAL(*value)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + __raw_writeq((__force u64)value->u64[0], membase + addr); +#else + __raw_writel((__force u32)value->u32[0], membase + addr); + __raw_writel((__force u32)value->u32[1], membase + addr + 4); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); +} + +/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ +static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg) +{ + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); + + /* No lock required */ + _efx_writed(efx, value->u32[0], reg); +} + +/* Read a 128-bit CSR, locking as appropriate. */ +static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + spin_lock_irqsave(&efx->biu_lock, flags); + value->u32[0] = _efx_readd(efx, reg + 0); + value->u32[1] = _efx_readd(efx, reg + 4); + value->u32[2] = _efx_readd(efx, reg + 8); + value->u32[3] = _efx_readd(efx, reg + 12); + spin_unlock_irqrestore(&efx->biu_lock, flags); + + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); +} + +/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ +static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, + efx_qword_t *value, unsigned int index) +{ + unsigned int addr = index * sizeof(*value); + unsigned long flags __attribute__ ((unused)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + value->u64[0] = (__force __le64)__raw_readq(membase + addr); +#else + value->u32[0] = (__force __le32)__raw_readl(membase + addr); + value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); + + netif_vdbg(efx, hw, efx->net_dev, + "read from SRAM address %x, got "EFX_QWORD_FMT"\n", + addr, EFX_QWORD_VAL(*value)); +} + +/* Read a 32-bit CSR or SRAM */ +static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, + unsigned int reg) +{ + value->u32[0] = _efx_readd(efx, reg); + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); +} + +/* Write a 128-bit CSR forming part of a table */ +static inline void +efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg, unsigned int index) +{ + efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); +} + +/* Read a 128-bit CSR forming part of a table */ +static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg, unsigned int index) +{ + efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); +} + +/* default VI stride (step between per-VI registers) is 8K on EF10 and + * 64K on EF100 + */ +#define EFX_DEFAULT_VI_STRIDE 0x2000 +#define EF100_DEFAULT_VI_STRIDE 0x10000 + +/* Calculate offset to page-mapped register */ +static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page, + unsigned int reg) +{ + return page * efx->vi_stride + reg; +} + +/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ +static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg, unsigned int page) +{ + reg = efx_paged_reg(efx, page, reg); + + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); + +#ifdef EFX_USE_QWORD_IO + _efx_writeq(efx, value->u64[0], reg + 0); + _efx_writeq(efx, value->u64[1], reg + 8); +#else + _efx_writed(efx, value->u32[0], reg + 0); + _efx_writed(efx, value->u32[1], reg + 4); + _efx_writed(efx, value->u32[2], reg + 8); + _efx_writed(efx, value->u32[3], reg + 12); +#endif +} +#define efx_writeo_page(efx, value, reg, page) \ + _efx_writeo_page(efx, value, \ + reg + \ + BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ + page) + +/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the + * high bits of RX_DESC_UPD or TX_DESC_UPD) + */ +static inline void +_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg, unsigned int page) +{ + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); +} +#define efx_writed_page(efx, value, reg, page) \ + _efx_writed_page(efx, value, \ + reg + \ + BUILD_BUG_ON_ZERO((reg) != 0x180 && \ + (reg) != 0x200 && \ + (reg) != 0x400 && \ + (reg) != 0x420 && \ + (reg) != 0x830 && \ + (reg) != 0x83c && \ + (reg) != 0xa18 && \ + (reg) != 0xa1c), \ + page) + +/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug + * in the BIU means that writes to TIMER_COMMAND[0] invalidate the + * collector register. + */ +static inline void _efx_writed_page_locked(struct efx_nic *efx, + const efx_dword_t *value, + unsigned int reg, + unsigned int page) +{ + unsigned long flags __attribute__ ((unused)); + + if (page == 0) { + spin_lock_irqsave(&efx->biu_lock, flags); + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); + spin_unlock_irqrestore(&efx->biu_lock, flags); + } else { + efx_writed(efx, value, efx_paged_reg(efx, page, reg)); + } +} +#define efx_writed_page_locked(efx, value, reg, page) \ + _efx_writed_page_locked(efx, value, \ + reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ + page) + +#endif /* EFX_IO_H */ -- cgit v1.2.3 From d48523cb88e0703055c1b33e61eb644a7976f92b Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:31:43 +0100 Subject: sfc: Copy shared files needed for Siena (part 2) These are the files starting with m through w. No changes are done, those will be done with subsequent commits. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/mcdi.c | 2375 +++++++++++++++++++++ drivers/net/ethernet/sfc/siena/mcdi.h | 388 ++++ drivers/net/ethernet/sfc/siena/mcdi_mon.c | 531 +++++ drivers/net/ethernet/sfc/siena/mcdi_port.c | 117 + drivers/net/ethernet/sfc/siena/mcdi_port.h | 18 + drivers/net/ethernet/sfc/siena/mcdi_port_common.c | 1301 +++++++++++ drivers/net/ethernet/sfc/siena/mcdi_port_common.h | 67 + drivers/net/ethernet/sfc/siena/mtd.c | 124 ++ drivers/net/ethernet/sfc/siena/net_driver.h | 1716 +++++++++++++++ drivers/net/ethernet/sfc/siena/nic.c | 580 +++++ drivers/net/ethernet/sfc/siena/nic.h | 392 ++++ drivers/net/ethernet/sfc/siena/nic_common.h | 262 +++ drivers/net/ethernet/sfc/siena/ptp.c | 2210 +++++++++++++++++++ drivers/net/ethernet/sfc/siena/ptp.h | 45 + drivers/net/ethernet/sfc/siena/rx.c | 399 ++++ drivers/net/ethernet/sfc/siena/rx_common.c | 1086 ++++++++++ drivers/net/ethernet/sfc/siena/rx_common.h | 116 + drivers/net/ethernet/sfc/siena/selftest.c | 807 +++++++ drivers/net/ethernet/sfc/siena/selftest.h | 52 + drivers/net/ethernet/sfc/siena/sriov.c | 72 + drivers/net/ethernet/sfc/siena/sriov.h | 25 + drivers/net/ethernet/sfc/siena/tx.c | 643 ++++++ drivers/net/ethernet/sfc/siena/tx.h | 47 + drivers/net/ethernet/sfc/siena/tx_common.c | 449 ++++ drivers/net/ethernet/sfc/siena/tx_common.h | 45 + drivers/net/ethernet/sfc/siena/vfdi.h | 252 +++ drivers/net/ethernet/sfc/siena/workarounds.h | 34 + 27 files changed, 14153 insertions(+) create mode 100644 drivers/net/ethernet/sfc/siena/mcdi.c create mode 100644 drivers/net/ethernet/sfc/siena/mcdi.h create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_mon.c create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_port.c create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_port.h create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_port_common.c create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_port_common.h create mode 100644 drivers/net/ethernet/sfc/siena/mtd.c create mode 100644 drivers/net/ethernet/sfc/siena/net_driver.h create mode 100644 drivers/net/ethernet/sfc/siena/nic.c create mode 100644 drivers/net/ethernet/sfc/siena/nic.h create mode 100644 drivers/net/ethernet/sfc/siena/nic_common.h create mode 100644 drivers/net/ethernet/sfc/siena/ptp.c create mode 100644 drivers/net/ethernet/sfc/siena/ptp.h create mode 100644 drivers/net/ethernet/sfc/siena/rx.c create mode 100644 drivers/net/ethernet/sfc/siena/rx_common.c create mode 100644 drivers/net/ethernet/sfc/siena/rx_common.h create mode 100644 drivers/net/ethernet/sfc/siena/selftest.c create mode 100644 drivers/net/ethernet/sfc/siena/selftest.h create mode 100644 drivers/net/ethernet/sfc/siena/sriov.c create mode 100644 drivers/net/ethernet/sfc/siena/sriov.h create mode 100644 drivers/net/ethernet/sfc/siena/tx.c create mode 100644 drivers/net/ethernet/sfc/siena/tx.h create mode 100644 drivers/net/ethernet/sfc/siena/tx_common.c create mode 100644 drivers/net/ethernet/sfc/siena/tx_common.h create mode 100644 drivers/net/ethernet/sfc/siena/vfdi.h create mode 100644 drivers/net/ethernet/sfc/siena/workarounds.h (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c new file mode 100644 index 000000000000..50baf62b2cbc --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi.c @@ -0,0 +1,2375 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include "net_driver.h" +#include "nic.h" +#include "io.h" +#include "farch_regs.h" +#include "mcdi_pcol.h" + +/************************************************************************** + * + * Management-Controller-to-Driver Interface + * + ************************************************************************** + */ + +#define MCDI_RPC_TIMEOUT (10 * HZ) + +/* A reboot/assertion causes the MCDI status word to be set after the + * command word is set or a REBOOT event is sent. If we notice a reboot + * via these mechanisms then wait 250ms for the status word to be set. + */ +#define MCDI_STATUS_DELAY_US 100 +#define MCDI_STATUS_DELAY_COUNT 2500 +#define MCDI_STATUS_SLEEP_MS \ + (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) + +#define SEQ_MASK \ + EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) + +struct efx_mcdi_async_param { + struct list_head list; + unsigned int cmd; + size_t inlen; + size_t outlen; + bool quiet; + efx_mcdi_async_completer *complete; + unsigned long cookie; + /* followed by request/response buffer */ +}; + +static void efx_mcdi_timeout_async(struct timer_list *t); +static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, + bool *was_attached_out); +static bool efx_mcdi_poll_once(struct efx_nic *efx); +static void efx_mcdi_abandon(struct efx_nic *efx); + +#ifdef CONFIG_SFC_MCDI_LOGGING +static bool mcdi_logging_default; +module_param(mcdi_logging_default, bool, 0644); +MODULE_PARM_DESC(mcdi_logging_default, + "Enable MCDI logging on newly-probed functions"); +#endif + +int efx_mcdi_init(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + bool already_attached; + int rc = -ENOMEM; + + efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); + if (!efx->mcdi) + goto fail; + + mcdi = efx_mcdi(efx); + mcdi->efx = efx; +#ifdef CONFIG_SFC_MCDI_LOGGING + /* consuming code assumes buffer is page-sized */ + mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL); + if (!mcdi->logging_buffer) + goto fail1; + mcdi->logging_enabled = mcdi_logging_default; +#endif + init_waitqueue_head(&mcdi->wq); + init_waitqueue_head(&mcdi->proxy_rx_wq); + spin_lock_init(&mcdi->iface_lock); + mcdi->state = MCDI_STATE_QUIESCENT; + mcdi->mode = MCDI_MODE_POLL; + spin_lock_init(&mcdi->async_lock); + INIT_LIST_HEAD(&mcdi->async_list); + timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0); + + (void) efx_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; + + /* Recover from a failed assertion before probing */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + goto fail2; + + /* Let the MC (and BMC, if this is a LOM) know that the driver + * is loaded. We should do this before we reset the NIC. + */ + rc = efx_mcdi_drv_attach(efx, true, &already_attached); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Unable to register driver with MCPU\n"); + goto fail2; + } + if (already_attached) + /* Not a fatal error */ + netif_err(efx, probe, efx->net_dev, + "Host already registered with MCPU\n"); + + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) + efx->primary = efx; + + return 0; +fail2: +#ifdef CONFIG_SFC_MCDI_LOGGING + free_page((unsigned long)mcdi->logging_buffer); +fail1: +#endif + kfree(efx->mcdi); + efx->mcdi = NULL; +fail: + return rc; +} + +void efx_mcdi_detach(struct efx_nic *efx) +{ + if (!efx->mcdi) + return; + + BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); + + /* Relinquish the device (back to the BMC, if this is a LOM) */ + efx_mcdi_drv_attach(efx, false, NULL); +} + +void efx_mcdi_fini(struct efx_nic *efx) +{ + if (!efx->mcdi) + return; + +#ifdef CONFIG_SFC_MCDI_LOGGING + free_page((unsigned long)efx->mcdi->iface.logging_buffer); +#endif + + kfree(efx->mcdi); +} + +static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif + efx_dword_t hdr[2]; + size_t hdr_len; + u32 xflags, seqno; + + BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT); + + /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ + spin_lock_bh(&mcdi->iface_lock); + ++mcdi->seqno; + seqno = mcdi->seqno & SEQ_MASK; + spin_unlock_bh(&mcdi->iface_lock); + + xflags = 0; + if (mcdi->mode == MCDI_MODE_EVENTS) + xflags |= MCDI_HEADER_XFLAGS_EVREQ; + + if (efx->type->mcdi_max_ver == 1) { + /* MCDI v1 */ + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, cmd, + MCDI_HEADER_DATALEN, inlen, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + hdr_len = 4; + } else { + /* MCDI v2 */ + BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_DATALEN, 0, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + EFX_POPULATE_DWORD_2(hdr[1], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); + hdr_len = 8; + } + +#ifdef CONFIG_SFC_MCDI_LOGGING + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { + int bytes = 0; + int i; + /* Lengths should always be a whole number of dwords, so scream + * if they're not. + */ + WARN_ON_ONCE(hdr_len % 4); + WARN_ON_ONCE(inlen % 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++) + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", + le32_to_cpu(hdr[i].u32[0])); + + for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++) + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", + le32_to_cpu(inbuf[i].u32[0])); + + netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); + } +#endif + + efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); + + mcdi->new_epoch = false; +} + +static int efx_mcdi_errno(unsigned int mcdi_err) +{ + switch (mcdi_err) { + case 0: + return 0; +#define TRANSLATE_ERROR(name) \ + case MC_CMD_ERR_ ## name: \ + return -name; + TRANSLATE_ERROR(EPERM); + TRANSLATE_ERROR(ENOENT); + TRANSLATE_ERROR(EINTR); + TRANSLATE_ERROR(EAGAIN); + TRANSLATE_ERROR(EACCES); + TRANSLATE_ERROR(EBUSY); + TRANSLATE_ERROR(EINVAL); + TRANSLATE_ERROR(EDEADLK); + TRANSLATE_ERROR(ENOSYS); + TRANSLATE_ERROR(ETIME); + TRANSLATE_ERROR(EALREADY); + TRANSLATE_ERROR(ENOSPC); +#undef TRANSLATE_ERROR + case MC_CMD_ERR_ENOTSUP: + return -EOPNOTSUPP; + case MC_CMD_ERR_ALLOC_FAIL: + return -ENOBUFS; + case MC_CMD_ERR_MAC_EXIST: + return -EADDRINUSE; + default: + return -EPROTO; + } +} + +static void efx_mcdi_read_response_header(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + unsigned int respseq, respcmd, error; +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif + efx_dword_t hdr; + + efx->type->mcdi_read_response(efx, &hdr, 0, 4); + respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); + respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); + error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); + + if (respcmd != MC_CMD_V2_EXTN) { + mcdi->resp_hdr_len = 4; + mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); + } else { + efx->type->mcdi_read_response(efx, &hdr, 4, 4); + mcdi->resp_hdr_len = 8; + mcdi->resp_data_len = + EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); + } + +#ifdef CONFIG_SFC_MCDI_LOGGING + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { + size_t hdr_len, data_len; + int bytes = 0; + int i; + + WARN_ON_ONCE(mcdi->resp_hdr_len % 4); + hdr_len = mcdi->resp_hdr_len / 4; + /* MCDI_DECLARE_BUF ensures that underlying buffer is padded + * to dword size, and the MCDI buffer is always dword size + */ + data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, + mcdi->resp_hdr_len + (i * 4), 4); + bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf); + } +#endif + + mcdi->resprc_raw = 0; + if (error && mcdi->resp_data_len == 0) { + netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); + mcdi->resprc = -EIO; + } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx seq 0x%x\n", + respseq, mcdi->seqno); + mcdi->resprc = -EIO; + } else if (error) { + efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); + mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0); + mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw); + } else { + mcdi->resprc = 0; + } +} + +static bool efx_mcdi_poll_once(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + rmb(); + if (!efx->type->mcdi_poll_response(efx)) + return false; + + spin_lock_bh(&mcdi->iface_lock); + efx_mcdi_read_response_header(efx); + spin_unlock_bh(&mcdi->iface_lock); + + return true; +} + +static int efx_mcdi_poll(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + unsigned long time, finish; + unsigned int spins; + int rc; + + /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ + rc = efx_mcdi_poll_reboot(efx); + if (rc) { + spin_lock_bh(&mcdi->iface_lock); + mcdi->resprc = rc; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + spin_unlock_bh(&mcdi->iface_lock); + return 0; + } + + /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, + * because generally mcdi responses are fast. After that, back off + * and poll once a jiffy (approximately) + */ + spins = USER_TICK_USEC; + finish = jiffies + MCDI_RPC_TIMEOUT; + + while (1) { + if (spins != 0) { + --spins; + udelay(1); + } else { + schedule_timeout_uninterruptible(1); + } + + time = jiffies; + + if (efx_mcdi_poll_once(efx)) + break; + + if (time_after(time, finish)) + return -ETIMEDOUT; + } + + /* Return rc=0 like wait_event_timeout() */ + return 0; +} + +/* Test and clear MC-rebooted flag for this port/function; reset + * software state as necessary. + */ +int efx_mcdi_poll_reboot(struct efx_nic *efx) +{ + if (!efx->mcdi) + return 0; + + return efx->type->mcdi_poll_reboot(efx); +} + +static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi) +{ + return cmpxchg(&mcdi->state, + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) == + MCDI_STATE_QUIESCENT; +} + +static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi) +{ + /* Wait until the interface becomes QUIESCENT and we win the race + * to mark it RUNNING_SYNC. + */ + wait_event(mcdi->wq, + cmpxchg(&mcdi->state, + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) == + MCDI_STATE_QUIESCENT); +} + +static int efx_mcdi_await_completion(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, + MCDI_RPC_TIMEOUT) == 0) + return -ETIMEDOUT; + + /* Check if efx_mcdi_set_mode() switched us back to polled completions. + * In which case, poll for completions directly. If efx_mcdi_ev_cpl() + * completed the request first, then we'll just end up completing the + * request again, which is safe. + * + * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which + * wait_event_timeout() implicitly provides. + */ + if (mcdi->mode == MCDI_MODE_POLL) + return efx_mcdi_poll(efx); + + return 0; +} + +/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the + * requester. Return whether this was done. Does not take any locks. + */ +static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi) +{ + if (cmpxchg(&mcdi->state, + MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) == + MCDI_STATE_RUNNING_SYNC) { + wake_up(&mcdi->wq); + return true; + } + + return false; +} + +static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) +{ + if (mcdi->mode == MCDI_MODE_EVENTS) { + struct efx_mcdi_async_param *async; + struct efx_nic *efx = mcdi->efx; + + /* Process the asynchronous request queue */ + spin_lock_bh(&mcdi->async_lock); + async = list_first_entry_or_null( + &mcdi->async_list, struct efx_mcdi_async_param, list); + if (async) { + mcdi->state = MCDI_STATE_RUNNING_ASYNC; + efx_mcdi_send_request(efx, async->cmd, + (const efx_dword_t *)(async + 1), + async->inlen); + mod_timer(&mcdi->async_timer, + jiffies + MCDI_RPC_TIMEOUT); + } + spin_unlock_bh(&mcdi->async_lock); + + if (async) + return; + } + + mcdi->state = MCDI_STATE_QUIESCENT; + wake_up(&mcdi->wq); +} + +/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the + * asynchronous completion function, and release the interface. + * Return whether this was done. Must be called in bh-disabled + * context. Will take iface_lock and async_lock. + */ +static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) +{ + struct efx_nic *efx = mcdi->efx; + struct efx_mcdi_async_param *async; + size_t hdr_len, data_len, err_len; + efx_dword_t *outbuf; + MCDI_DECLARE_BUF_ERR(errbuf); + int rc; + + if (cmpxchg(&mcdi->state, + MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) != + MCDI_STATE_RUNNING_ASYNC) + return false; + + spin_lock(&mcdi->iface_lock); + if (timeout) { + /* Ensure that if the completion event arrives later, + * the seqno check in efx_mcdi_ev_cpl() will fail + */ + ++mcdi->seqno; + ++mcdi->credits; + rc = -ETIMEDOUT; + hdr_len = 0; + data_len = 0; + } else { + rc = mcdi->resprc; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; + } + spin_unlock(&mcdi->iface_lock); + + /* Stop the timer. In case the timer function is running, we + * must wait for it to return so that there is no possibility + * of it aborting the next request. + */ + if (!timeout) + del_timer_sync(&mcdi->async_timer); + + spin_lock(&mcdi->async_lock); + async = list_first_entry(&mcdi->async_list, + struct efx_mcdi_async_param, list); + list_del(&async->list); + spin_unlock(&mcdi->async_lock); + + outbuf = (efx_dword_t *)(async + 1); + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(async->outlen, data_len)); + if (!timeout && rc && !async->quiet) { + err_len = min(sizeof(errbuf), data_len); + efx->type->mcdi_read_response(efx, errbuf, hdr_len, + sizeof(errbuf)); + efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, + err_len, rc); + } + + if (async->complete) + async->complete(efx, async->cookie, rc, outbuf, + min(async->outlen, data_len)); + kfree(async); + + efx_mcdi_release(mcdi); + + return true; +} + +static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, + unsigned int datalen, unsigned int mcdi_err) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool wake = false; + + spin_lock(&mcdi->iface_lock); + + if ((seqno ^ mcdi->seqno) & SEQ_MASK) { + if (mcdi->credits) + /* The request has been cancelled */ + --mcdi->credits; + else + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx " + "seq 0x%x\n", seqno, mcdi->seqno); + } else { + if (efx->type->mcdi_max_ver >= 2) { + /* MCDI v2 responses don't fit in an event */ + efx_mcdi_read_response_header(efx); + } else { + mcdi->resprc = efx_mcdi_errno(mcdi_err); + mcdi->resp_hdr_len = 4; + mcdi->resp_data_len = datalen; + } + + wake = true; + } + + spin_unlock(&mcdi->iface_lock); + + if (wake) { + if (!efx_mcdi_complete_async(mcdi, false)) + (void) efx_mcdi_complete_sync(mcdi); + + /* If the interface isn't RUNNING_ASYNC or + * RUNNING_SYNC then we've received a duplicate + * completion after we've already transitioned back to + * QUIESCENT. [A subsequent invocation would increment + * seqno, so would have failed the seqno check]. + */ + } +} + +static void efx_mcdi_timeout_async(struct timer_list *t) +{ + struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer); + + efx_mcdi_complete_async(mcdi, true); +} + +static int +efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) +{ + if (efx->type->mcdi_max_ver < 0 || + (efx->type->mcdi_max_ver < 2 && + cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) + return -EINVAL; + + if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || + (efx->type->mcdi_max_ver < 2 && + inlen > MCDI_CTL_SDU_LEN_MAX_V1)) + return -EMSGSIZE; + + return 0; +} + +static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx, + size_t hdr_len, size_t data_len, + u32 *proxy_handle) +{ + MCDI_DECLARE_BUF_ERR(testbuf); + const size_t buflen = sizeof(testbuf); + + if (!proxy_handle || data_len < buflen) + return false; + + efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen); + if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) { + *proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE); + return true; + } + + return false; +} + +static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, + size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet, + u32 *proxy_handle, int *raw_rc) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + MCDI_DECLARE_BUF_ERR(errbuf); + int rc; + + if (mcdi->mode == MCDI_MODE_POLL) + rc = efx_mcdi_poll(efx); + else + rc = efx_mcdi_await_completion(efx); + + if (rc != 0) { + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d mode %d timed out\n", + cmd, (int)inlen, mcdi->mode); + + if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { + netif_err(efx, hw, efx->net_dev, + "MCDI request was completed without an event\n"); + rc = 0; + } + + efx_mcdi_abandon(efx); + + /* Close the race with efx_mcdi_ev_cpl() executing just too late + * and completing a request we've just cancelled, by ensuring + * that the seqno check therein fails. + */ + spin_lock_bh(&mcdi->iface_lock); + ++mcdi->seqno; + ++mcdi->credits; + spin_unlock_bh(&mcdi->iface_lock); + } + + if (proxy_handle) + *proxy_handle = 0; + + if (rc != 0) { + if (outlen_actual) + *outlen_actual = 0; + } else { + size_t hdr_len, data_len, err_len; + + /* At the very least we need a memory barrier here to ensure + * we pick up changes from efx_mcdi_ev_cpl(). Protect against + * a spurious efx_mcdi_ev_cpl() running concurrently by + * acquiring the iface_lock. */ + spin_lock_bh(&mcdi->iface_lock); + rc = mcdi->resprc; + if (raw_rc) + *raw_rc = mcdi->resprc_raw; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; + err_len = min(sizeof(errbuf), data_len); + spin_unlock_bh(&mcdi->iface_lock); + + BUG_ON(rc > 0); + + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(outlen, data_len)); + if (outlen_actual) + *outlen_actual = data_len; + + efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len); + + if (cmd == MC_CMD_REBOOT && rc == -EIO) { + /* Don't reset if MC_CMD_REBOOT returns EIO */ + } else if (rc == -EIO || rc == -EINTR) { + netif_err(efx, hw, efx->net_dev, "MC reboot detected\n"); + netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n", + cmd, -rc); + if (efx->type->mcdi_reboot_detected) + efx->type->mcdi_reboot_detected(efx); + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } else if (proxy_handle && (rc == -EPROTO) && + efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, + proxy_handle)) { + mcdi->proxy_rx_status = 0; + mcdi->proxy_rx_handle = 0; + mcdi->state = MCDI_STATE_PROXY_WAIT; + } else if (rc && !quiet) { + efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len, + rc); + } + + if (rc == -EIO || rc == -EINTR) { + msleep(MCDI_STATUS_SLEEP_MS); + efx_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; + } + } + + if (!proxy_handle || !*proxy_handle) + efx_mcdi_release(mcdi); + return rc; +} + +static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi) +{ + if (mcdi->state == MCDI_STATE_PROXY_WAIT) { + /* Interrupt the proxy wait. */ + mcdi->proxy_rx_status = -EINTR; + wake_up(&mcdi->proxy_rx_wq); + } +} + +static void efx_mcdi_ev_proxy_response(struct efx_nic *efx, + u32 handle, int status) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT); + + mcdi->proxy_rx_status = efx_mcdi_errno(status); + /* Ensure the status is written before we update the handle, since the + * latter is used to check if we've finished. + */ + wmb(); + mcdi->proxy_rx_handle = handle; + wake_up(&mcdi->proxy_rx_wq); +} + +static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; + + /* Wait for a proxy event, or timeout. */ + rc = wait_event_timeout(mcdi->proxy_rx_wq, + mcdi->proxy_rx_handle != 0 || + mcdi->proxy_rx_status == -EINTR, + MCDI_RPC_TIMEOUT); + + if (rc <= 0) { + netif_dbg(efx, hw, efx->net_dev, + "MCDI proxy timeout %d\n", handle); + return -ETIMEDOUT; + } else if (mcdi->proxy_rx_handle != handle) { + netif_warn(efx, hw, efx->net_dev, + "MCDI proxy unexpected handle %d (expected %d)\n", + mcdi->proxy_rx_handle, handle); + return -EINVAL; + } + + return mcdi->proxy_rx_status; +} + +static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet, int *raw_rc) +{ + u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */ + int rc; + + if (inbuf && inlen && (inbuf == outbuf)) { + /* The input buffer can't be aliased with the output. */ + WARN_ON(1); + return -EINVAL; + } + + rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); + if (rc) + return rc; + + rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, quiet, &proxy_handle, raw_rc); + + if (proxy_handle) { + /* Handle proxy authorisation. This allows approval of MCDI + * operations to be delegated to the admin function, allowing + * fine control over (eg) multicast subscriptions. + */ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + netif_dbg(efx, hw, efx->net_dev, + "MCDI waiting for proxy auth %d\n", + proxy_handle); + rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet); + + if (rc == 0) { + netif_dbg(efx, hw, efx->net_dev, + "MCDI proxy retry %d\n", proxy_handle); + + /* We now retry the original request. */ + mcdi->state = MCDI_STATE_RUNNING_SYNC; + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + + rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, + outbuf, outlen, outlen_actual, + quiet, NULL, raw_rc); + } else { + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x failed after proxy auth rc=%d\n", + cmd, rc); + + if (rc == -EINTR || rc == -EIO) + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + efx_mcdi_release(mcdi); + } + } + + return rc; +} + +static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + int raw_rc = 0; + int rc; + + rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, true, &raw_rc); + + if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && + efx->type->is_vf) { + /* If the EVB port isn't available within a VF this may + * mean the PF is still bringing the switch up. We should + * retry our request shortly. + */ + unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT; + unsigned int delay_us = 10000; + + netif_dbg(efx, hw, efx->net_dev, + "%s: NO_EVB_PORT; will retry request\n", + __func__); + + do { + usleep_range(delay_us, delay_us + 10000); + rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen, + outbuf, outlen, outlen_actual, + true, &raw_rc); + if (delay_us < 100000) + delay_us <<= 1; + } while ((rc == -EPROTO) && + (raw_rc == MC_CMD_ERR_NO_EVB_PORT) && + time_before(jiffies, abort_time)); + } + + if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO)) + efx_mcdi_display_error(efx, cmd, inlen, + outbuf, outlen, rc); + + return rc; +} + +/** + * efx_mcdi_rpc - Issue an MCDI command and wait for completion + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes. Must be a multiple + * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1. + * @outbuf: Response buffer. May be %NULL if @outlen is 0. + * @outlen: Length of response buffer, in bytes. If the actual + * response is longer than @outlen & ~3, it will be truncated + * to that length. + * @outlen_actual: Pointer through which to return the actual response + * length. May be %NULL if this is not needed. + * + * This function may sleep and therefore must be called in an appropriate + * context. + * + * Return: A negative error code, or zero if successful. The error + * code may come from the MCDI response or may indicate a failure + * to communicate with the MC. In the former case, the response + * will still be copied to @outbuf and *@outlen_actual will be + * set accordingly. In the latter case, *@outlen_actual will be + * set to zero. + */ +int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, false); +} + +/* Normally, on receiving an error code in the MCDI response, + * efx_mcdi_rpc will log an error message containing (among other + * things) the raw error code, by means of efx_mcdi_display_error. + * This _quiet version suppresses that; if the caller wishes to log + * the error conditionally on the return code, it should call this + * function and is then responsible for calling efx_mcdi_display_error + * as needed. + */ +int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, true); +} + +int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; + + rc = efx_mcdi_check_supported(efx, cmd, inlen); + if (rc) + return rc; + + if (efx->mc_bist_for_other_fn) + return -ENETDOWN; + + if (mcdi->mode == MCDI_MODE_FAIL) + return -ENETDOWN; + + efx_mcdi_acquire_sync(mcdi); + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + return 0; +} + +static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + struct efx_mcdi_async_param *async; + int rc; + + rc = efx_mcdi_check_supported(efx, cmd, inlen); + if (rc) + return rc; + + if (efx->mc_bist_for_other_fn) + return -ENETDOWN; + + async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), + GFP_ATOMIC); + if (!async) + return -ENOMEM; + + async->cmd = cmd; + async->inlen = inlen; + async->outlen = outlen; + async->quiet = quiet; + async->complete = complete; + async->cookie = cookie; + memcpy(async + 1, inbuf, inlen); + + spin_lock_bh(&mcdi->async_lock); + + if (mcdi->mode == MCDI_MODE_EVENTS) { + list_add_tail(&async->list, &mcdi->async_list); + + /* If this is at the front of the queue, try to start it + * immediately + */ + if (mcdi->async_list.next == &async->list && + efx_mcdi_acquire_async(mcdi)) { + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + mod_timer(&mcdi->async_timer, + jiffies + MCDI_RPC_TIMEOUT); + } + } else { + kfree(async); + rc = -ENETDOWN; + } + + spin_unlock_bh(&mcdi->async_lock); + + return rc; +} + +/** + * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes + * @outlen: Length to allocate for response buffer, in bytes + * @complete: Function to be called on completion or cancellation. + * @cookie: Arbitrary value to be passed to @complete. + * + * This function does not sleep and therefore may be called in atomic + * context. It will fail if event queues are disabled or if MCDI + * event completions have been disabled due to an error. + * + * If it succeeds, the @complete function will be called exactly once + * in atomic context, when one of the following occurs: + * (a) the completion event is received (in NAPI context) + * (b) event queues are disabled (in the process that disables them) + * (c) the request times-out (in timer context) + */ +int +efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, unsigned long cookie) +{ + return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, + cookie, false); +} + +int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, efx_mcdi_async_completer *complete, + unsigned long cookie) +{ + return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, + cookie, true); +} + +int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, false, NULL, NULL); +} + +int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, true, NULL, NULL); +} + +void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc) +{ + int code = 0, err_arg = 0; + + if (outlen >= MC_CMD_ERR_CODE_OFST + 4) + code = MCDI_DWORD(outbuf, ERR_CODE); + if (outlen >= MC_CMD_ERR_ARG_OFST + 4) + err_arg = MCDI_DWORD(outbuf, ERR_ARG); + netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err, + "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n", + cmd, inlen, rc, code, err_arg); +} + +/* Switch to polled MCDI completions. This can be called in various + * error conditions with various locks held, so it must be lockless. + * Caller is responsible for flushing asynchronous requests later. + */ +void efx_mcdi_mode_poll(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + /* If already in polling mode, nothing to do. + * If in fail-fast state, don't switch to polled completion. + * FLR recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL) + return; + + /* We can switch from event completion to polled completion, because + * mcdi requests are always completed in shared memory. We do this by + * switching the mode to POLL'd then completing the request. + * efx_mcdi_await_completion() will then call efx_mcdi_poll(). + * + * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), + * which efx_mcdi_complete_sync() provides for us. + */ + mcdi->mode = MCDI_MODE_POLL; + + efx_mcdi_complete_sync(mcdi); +} + +/* Flush any running or queued asynchronous requests, after event processing + * is stopped + */ +void efx_mcdi_flush_async(struct efx_nic *efx) +{ + struct efx_mcdi_async_param *async, *next; + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + + /* We must be in poll or fail mode so no more requests can be queued */ + BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); + + del_timer_sync(&mcdi->async_timer); + + /* If a request is still running, make sure we give the MC + * time to complete it so that the response won't overwrite our + * next request. + */ + if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) { + efx_mcdi_poll(efx); + mcdi->state = MCDI_STATE_QUIESCENT; + } + + /* Nothing else will access the async list now, so it is safe + * to walk it without holding async_lock. If we hold it while + * calling a completer then lockdep may warn that we have + * acquired locks in the wrong order. + */ + list_for_each_entry_safe(async, next, &mcdi->async_list, list) { + if (async->complete) + async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); + list_del(&async->list); + kfree(async); + } +} + +void efx_mcdi_mode_event(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + /* If already in event completion mode, nothing to do. + * If in fail-fast state, don't switch to event completion. FLR + * recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL) + return; + + /* We can't switch from polled to event completion in the middle of a + * request, because the completion method is specified in the request. + * So acquire the interface to serialise the requestors. We don't need + * to acquire the iface_lock to change the mode here, but we do need a + * write memory barrier ensure that efx_mcdi_rpc() sees it, which + * efx_mcdi_acquire() provides. + */ + efx_mcdi_acquire_sync(mcdi); + mcdi->mode = MCDI_MODE_EVENTS; + efx_mcdi_release(mcdi); +} + +static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + /* If there is an outstanding MCDI request, it has been terminated + * either by a BADASSERT or REBOOT event. If the mcdi interface is + * in polled mode, then do nothing because the MC reboot handler will + * set the header correctly. However, if the mcdi interface is waiting + * for a CMDDONE event it won't receive it [and since all MCDI events + * are sent to the same queue, we can't be racing with + * efx_mcdi_ev_cpl()] + * + * If there is an outstanding asynchronous request, we can't + * complete it now (efx_mcdi_complete() would deadlock). The + * reset process will take care of this. + * + * There's a race here with efx_mcdi_send_request(), because + * we might receive a REBOOT event *before* the request has + * been copied out. In polled mode (during startup) this is + * irrelevant, because efx_mcdi_complete_sync() is ignored. In + * event mode, this condition is just an edge-case of + * receiving a REBOOT event after posting the MCDI + * request. Did the mc reboot before or after the copyout? The + * best we can do always is just return failure. + * + * If there is an outstanding proxy response expected it is not going + * to arrive. We should thus abort it. + */ + spin_lock(&mcdi->iface_lock); + efx_mcdi_proxy_abort(mcdi); + + if (efx_mcdi_complete_sync(mcdi)) { + if (mcdi->mode == MCDI_MODE_EVENTS) { + mcdi->resprc = rc; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + ++mcdi->credits; + } + } else { + int count; + + /* Consume the status word since efx_mcdi_rpc_finish() won't */ + for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { + rc = efx_mcdi_poll_reboot(efx); + if (rc) + break; + udelay(MCDI_STATUS_DELAY_US); + } + + /* On EF10, a CODE_MC_REBOOT event can be received without the + * reboot detection in efx_mcdi_poll_reboot() being triggered. + * If zero was returned from the final call to + * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the + * MC has definitely rebooted so prepare for the reset. + */ + if (!rc && efx->type->mcdi_reboot_detected) + efx->type->mcdi_reboot_detected(efx); + + mcdi->new_epoch = true; + + /* Nobody was waiting for an MCDI request, so trigger a reset */ + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } + + spin_unlock(&mcdi->iface_lock); +} + +/* The MC is going down in to BIST mode. set the BIST flag to block + * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset + * (which doesn't actually execute a reset, it waits for the controlling + * function to reset it). + */ +static void efx_mcdi_ev_bist(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + spin_lock(&mcdi->iface_lock); + efx->mc_bist_for_other_fn = true; + efx_mcdi_proxy_abort(mcdi); + + if (efx_mcdi_complete_sync(mcdi)) { + if (mcdi->mode == MCDI_MODE_EVENTS) { + mcdi->resprc = -EIO; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + ++mcdi->credits; + } + } + mcdi->new_epoch = true; + efx_schedule_reset(efx, RESET_TYPE_MC_BIST); + spin_unlock(&mcdi->iface_lock); +} + +/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try + * to recover. + */ +static void efx_mcdi_abandon(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) + return; /* it had already been done */ + netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n"); + efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); +} + +static void efx_handle_drain_event(struct efx_nic *efx) +{ + if (atomic_dec_and_test(&efx->active_queues)) + wake_up(&efx->flush_wq); + + WARN_ON(atomic_read(&efx->active_queues) < 0); +} + +/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */ +void efx_mcdi_process_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); + u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); + + switch (code) { + case MCDI_EVENT_CODE_BADSSERT: + netif_err(efx, hw, efx->net_dev, + "MC watchdog or assertion failure at 0x%x\n", data); + efx_mcdi_ev_death(efx, -EINTR); + break; + + case MCDI_EVENT_CODE_PMNOTICE: + netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); + break; + + case MCDI_EVENT_CODE_CMDDONE: + efx_mcdi_ev_cpl(efx, + MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), + MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), + MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); + break; + + case MCDI_EVENT_CODE_LINKCHANGE: + efx_mcdi_process_link_change(efx, event); + break; + case MCDI_EVENT_CODE_SENSOREVT: + efx_sensor_event(efx, event); + break; + case MCDI_EVENT_CODE_SCHEDERR: + netif_dbg(efx, hw, efx->net_dev, + "MC Scheduler alert (0x%x)\n", data); + break; + case MCDI_EVENT_CODE_REBOOT: + case MCDI_EVENT_CODE_MC_REBOOT: + netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); + efx_mcdi_ev_death(efx, -EIO); + break; + case MCDI_EVENT_CODE_MC_BIST: + netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n"); + efx_mcdi_ev_bist(efx); + break; + case MCDI_EVENT_CODE_MAC_STATS_DMA: + /* MAC stats are gather lazily. We can ignore this. */ + break; + case MCDI_EVENT_CODE_FLR: + if (efx->type->sriov_flr) + efx->type->sriov_flr(efx, + MCDI_EVENT_FIELD(*event, FLR_VF)); + break; + case MCDI_EVENT_CODE_PTP_RX: + case MCDI_EVENT_CODE_PTP_FAULT: + case MCDI_EVENT_CODE_PTP_PPS: + efx_ptp_event(efx, event); + break; + case MCDI_EVENT_CODE_PTP_TIME: + efx_time_sync_event(channel, event); + break; + case MCDI_EVENT_CODE_TX_FLUSH: + case MCDI_EVENT_CODE_RX_FLUSH: + /* Two flush events will be sent: one to the same event + * queue as completions, and one to event queue 0. + * In the latter case the {RX,TX}_FLUSH_TO_DRIVER + * flag will be set, and we should ignore the event + * because we want to wait for all completions. + */ + BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != + MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); + if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) + efx_handle_drain_event(efx); + break; + case MCDI_EVENT_CODE_TX_ERR: + case MCDI_EVENT_CODE_RX_ERR: + netif_err(efx, hw, efx->net_dev, + "%s DMA error (event: "EFX_QWORD_FMT")\n", + code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", + EFX_QWORD_VAL(*event)); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + break; + case MCDI_EVENT_CODE_PROXY_RESPONSE: + efx_mcdi_ev_proxy_response(efx, + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE), + MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC)); + break; + default: + netif_err(efx, hw, efx->net_dev, + "Unknown MCDI event " EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + } +} + +/************************************************************************** + * + * Specific request functions + * + ************************************************************************** + */ + +void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); + size_t outlength; + const __le16 *ver_words; + size_t offset; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) + goto fail; + if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { + rc = -EIO; + goto fail; + } + + ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); + offset = scnprintf(buf, len, "%u.%u.%u.%u", + le16_to_cpu(ver_words[0]), + le16_to_cpu(ver_words[1]), + le16_to_cpu(ver_words[2]), + le16_to_cpu(ver_words[3])); + + if (efx->type->print_additional_fwver) + offset += efx->type->print_additional_fwver(efx, buf + offset, + len - offset); + + /* It's theoretically possible for the string to exceed 31 + * characters, though in practice the first three version + * components are short enough that this doesn't happen. + */ + if (WARN_ON(offset >= len)) + buf[0] = 0; + + return; + +fail: + netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + buf[0] = 0; +} + +static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, + bool *was_attached) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, + driver_operating ? 1 : 0); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID + * specified will fail with EPERM, and we have to tell the MC we don't + * care what firmware we get. + */ + if (rc == -EPERM) { + netif_dbg(efx, probe, efx->net_dev, + "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n"); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, + MC_CMD_FW_DONT_CARE); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); + } + if (rc) { + efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf), + outbuf, outlen, rc); + goto fail; + } + if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { + rc = -EIO; + goto fail; + } + + if (driver_operating) { + if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) { + efx->mcdi->fn_flags = + MCDI_DWORD(outbuf, + DRV_ATTACH_EXT_OUT_FUNC_FLAGS); + } else { + /* Synthesise flags for Siena */ + efx->mcdi->fn_flags = + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED | + (efx_port_num(efx) == 0) << + MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY; + } + } + + /* We currently assume we have control of the external link + * and are completely trusted by firmware. Abort probing + * if that's not true for this function. + */ + + if (was_attached != NULL) + *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); + return 0; + +fail: + netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); + size_t outlen, i; + int port_num = efx_port_num(efx); + int rc; + + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); + /* we need __aligned(2) for ether_addr_copy */ + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1); + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { + rc = -EIO; + goto fail; + } + + if (mac_address) + ether_addr_copy(mac_address, + port_num ? + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0)); + if (fw_subtype_list) { + for (i = 0; + i < MCDI_VAR_ARRAY_LEN(outlen, + GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); + i++) + fw_subtype_list[i] = MCDI_ARRAY_WORD( + outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); + for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) + fw_subtype_list[i] = 0; + } + if (capabilities) { + if (port_num) + *capabilities = MCDI_DWORD(outbuf, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); + else + *capabilities = MCDI_DWORD(outbuf, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); + } + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", + __func__, rc, (int)outlen); + + return rc; +} + +int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); + u32 dest = 0; + int rc; + + if (uart) + dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; + if (evq) + dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; + + MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); + MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); + + BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); + return rc; +} + +/* This function finds types using the new NVRAM_PARTITIONS mcdi. */ +static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number, + u32 *nvram_types) +{ + efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, + GFP_KERNEL); + size_t outlen; + int rc; + + if (!outbuf) + return -ENOMEM; + + BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, + outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen); + if (rc) + goto fail; + + *number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); + + memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID), + *number * sizeof(u32)); + +fail: + kfree(outbuf); + return rc; +} + +int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); + *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); + *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & + (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { + case MC_CMD_NVRAM_TEST_PASS: + case MC_CMD_NVRAM_TEST_NOTSUPP: + return 0; + default: + return -EIO; + } +} + +/* This function tests nvram partitions using the new mcdi partition lookup scheme */ +int efx_new_mcdi_nvram_test_all(struct efx_nic *efx) +{ + u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, + GFP_KERNEL); + unsigned int number; + int rc, i; + + if (!nvram_types) + return -ENOMEM; + + rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types); + if (rc) + goto fail; + + /* Require at least one check */ + rc = -EAGAIN; + + for (i = 0; i < number; i++) { + if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP || + nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG) + continue; + + rc = efx_mcdi_nvram_test(efx, nvram_types[i]); + if (rc) + goto fail; + } + +fail: + kfree(nvram_types); + return rc; +} + +int efx_mcdi_nvram_test_all(struct efx_nic *efx) +{ + u32 nvram_types; + unsigned int type; + int rc; + + rc = efx_mcdi_nvram_types(efx, &nvram_types); + if (rc) + goto fail1; + + type = 0; + while (nvram_types != 0) { + if (nvram_types & 1) { + rc = efx_mcdi_nvram_test(efx, type); + if (rc) + goto fail2; + } + type++; + nvram_types >>= 1; + } + + return 0; + +fail2: + netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", + __func__, type); +fail1: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +/* Returns 1 if an assertion was read, 0 if no assertion had fired, + * negative on error. + */ +static int efx_mcdi_read_assertion(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); + unsigned int flags, index; + const char *reason; + size_t outlen; + int retry; + int rc; + + /* Attempt to read any stored assertion state before we reboot + * the mcfw out of the assertion handler. Retry twice, once + * because a boot-time assertion might cause this command to fail + * with EINTR. And once again because GET_ASSERTS can race with + * MC_CMD_REBOOT running on the other port. */ + retry = 2; + do { + MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, + inbuf, MC_CMD_GET_ASSERTS_IN_LEN, + outbuf, sizeof(outbuf), &outlen); + if (rc == -EPERM) + return 0; + } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); + + if (rc) { + efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, + MC_CMD_GET_ASSERTS_IN_LEN, outbuf, + outlen, rc); + return rc; + } + if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) + return -EIO; + + /* Print out any recorded assertion state */ + flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); + if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) + return 0; + + reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) + ? "system-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) + ? "thread-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) + ? "watchdog reset" + : "unknown assertion"; + netif_err(efx, hw, efx->net_dev, + "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); + + /* Print out the registers */ + for (index = 0; + index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; + index++) + netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", + 1 + index, + MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, + index)); + + return 1; +} + +static int efx_mcdi_exit_assertion(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + int rc; + + /* If the MC is running debug firmware, it might now be + * waiting for a debugger to attach, but we just want it to + * reboot. We set a flag that makes the command a no-op if it + * has already done so. + * The MCDI will thus return either 0 or -EIO. + */ + BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, + MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, + NULL, 0, NULL); + if (rc == -EIO) + rc = 0; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN, + NULL, 0, rc); + return rc; +} + +int efx_mcdi_handle_assertion(struct efx_nic *efx) +{ + int rc; + + rc = efx_mcdi_read_assertion(efx); + if (rc <= 0) + return rc; + + return efx_mcdi_exit_assertion(efx); +} + +int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); + + BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); + BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); + BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); + + BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); + + return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +static int efx_mcdi_reset_func(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0); + MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG, + ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); + rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_reset_mc(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* White is black, and up is down */ + if (rc == -EIO) + return 0; + if (rc == 0) + rc = -EIO; + return rc; +} + +enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) +{ + return RESET_TYPE_RECOVER_OR_ALL; +} + +int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc; + + /* If MCDI is down, we can't handle_assertion */ + if (method == RESET_TYPE_MCDI_TIMEOUT) { + rc = pci_reset_function(efx->pci_dev); + if (rc) + return rc; + /* Re-enable polled MCDI completion */ + if (efx->mcdi) { + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + mcdi->mode = MCDI_MODE_POLL; + } + return 0; + } + + /* Recover from a failed assertion pre-reset */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + return rc; + + if (method == RESET_TYPE_DATAPATH) + return 0; + else if (method == RESET_TYPE_WORLD) + return efx_mcdi_reset_mc(efx); + else + return efx_mcdi_reset_func(efx); +} + +static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, + const u8 *mac, int *id_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); + MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, + MC_CMD_FILTER_MODE_SIMPLE); + ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac); + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); + + return 0; + +fail: + *id_out = -1; + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; + +} + + +int +efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) +{ + return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); +} + + +int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); + + return 0; + +fail: + *id_out = -1; + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + + +int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_mcdi_flush_rxqs(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); + int rc, count; + + BUILD_BUG_ON(EFX_MAX_CHANNELS > + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); + + count = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (rx_queue->flush_pending) { + rx_queue->flush_pending = false; + atomic_dec(&efx->rxq_flush_pending); + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + count, efx_rx_queue_index(rx_queue)); + count++; + } + } + } + + rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); + WARN_ON(rc < 0); + + return rc; +} + +int efx_mcdi_wol_filter_reset(struct efx_nic *efx) +{ + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); + return rc; +} + +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); + rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (!flags) + return 0; + + if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN) + *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS); + else + *flags = 0; + + return 0; +} + +int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, + unsigned int *enabled_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) { + rc = -EIO; + goto fail; + } + + if (impl_out) + *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED); + + if (enabled_out) + *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED); + + return 0; + +fail: + /* Older firmware lacks GET_WORKAROUNDS and this isn't especially + * terrifying. The call site will have to deal with it though. + */ + netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err, + "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +#ifdef CONFIG_SFC_MTD + +#define EFX_MCDI_NVRAM_LEN_MAX 128 + +static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); + MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS, + NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, + 1); + + BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), + NULL, 0, NULL); + + return rc; +} + +static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, + loff_t offset, u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN); + MCDI_DECLARE_BUF(outbuf, + MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE, + MC_CMD_NVRAM_READ_IN_V2_DEFAULT); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); + return 0; +} + +static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, + loff_t offset, const u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, + MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); + memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, + ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, + loff_t offset, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN); + size_t outlen; + int rc, rc2; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); + /* Always set this flag. Old firmware ignores it */ + MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS, + NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, + 1); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) { + rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE); + if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) + netif_err(efx, drv, efx->net_dev, + "NVRAM update failed verification with code 0x%x\n", + rc2); + switch (rc2) { + case MC_CMD_NVRAM_VERIFY_RC_SUCCESS: + break; + case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED: + rc = -EIO; + break; + case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT: + case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST: + rc = -EINVAL; + break; + case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES: + case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS: + case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH: + rc = -EPERM; + break; + default: + netif_err(efx, drv, efx->net_dev, + "Unknown response to NVRAM_UPDATE_FINISH\n"); + rc = -EIO; + } + } + + return rc; +} + +int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk = part->common.mtd.erasesize; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + /* The MCDI interface can in fact do multiple erase blocks at once; + * but erasing may be slow, so we make multiple calls here to avoid + * tripping the MCDI RPC timeout. */ + while (offset < end) { + rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, + chunk); + if (rc) + goto out; + offset += chunk; + } +out: + return rc; +} + +int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_mcdi_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + int rc = 0; + + if (part->updating) { + part->updating = false; + rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); + } + + return rc; +} + +void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_mcdi_mtd_partition *mcdi_part = + container_of(part, struct efx_mcdi_mtd_partition, common); + struct efx_nic *efx = part->mtd.priv; + + snprintf(part->name, sizeof(part->name), "%s %s:%02x", + efx->name, part->type_name, mcdi_part->fw_subtype); +} + +#endif /* CONFIG_SFC_MTD */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi.h b/drivers/net/ethernet/sfc/siena/mcdi.h new file mode 100644 index 000000000000..69c2924a147c --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi.h @@ -0,0 +1,388 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_MCDI_H +#define EFX_MCDI_H + +/** + * enum efx_mcdi_state - MCDI request handling state + * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the + * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING + * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending. + * Only the thread that moved into this state is allowed to move out of it. + * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending. + * @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that + * indicates we must wait for a proxy try again message. + * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread + * has not yet consumed the result. For all other threads, equivalent to + * %MCDI_STATE_RUNNING. + */ +enum efx_mcdi_state { + MCDI_STATE_QUIESCENT, + MCDI_STATE_RUNNING_SYNC, + MCDI_STATE_RUNNING_ASYNC, + MCDI_STATE_PROXY_WAIT, + MCDI_STATE_COMPLETED, +}; + +/** + * enum efx_mcdi_mode - MCDI transaction mode + * @MCDI_MODE_POLL: poll for MCDI completion, until timeout + * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once + * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls + */ +enum efx_mcdi_mode { + MCDI_MODE_POLL, + MCDI_MODE_EVENTS, + MCDI_MODE_FAIL, +}; + +/** + * struct efx_mcdi_iface - MCDI protocol context + * @efx: The associated NIC. + * @state: Request handling state. Waited for by @wq. + * @mode: Poll for mcdi completion, or wait for an mcdi_event. + * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING + * @new_epoch: Indicates start of day or start of MC reboot recovery + * @iface_lock: Serialises access to @seqno, @credits and response metadata + * @seqno: The next sequence number to use for mcdi requests. + * @credits: Number of spurious MCDI completion events allowed before we + * trigger a fatal error + * @resprc: Response error/success code (Linux numbering) + * @resp_hdr_len: Response header length + * @resp_data_len: Response data (SDU or error) length + * @async_lock: Serialises access to @async_list while event processing is + * enabled + * @async_list: Queue of asynchronous requests + * @async_timer: Timer for asynchronous request timeout + * @logging_buffer: buffer that may be used to build MCDI tracing messages + * @logging_enabled: whether to trace MCDI + * @proxy_rx_handle: Most recently received proxy authorisation handle + * @proxy_rx_status: Status of most recent proxy authorisation + * @proxy_rx_wq: Wait queue for updates to proxy_rx_handle + */ +struct efx_mcdi_iface { + struct efx_nic *efx; + enum efx_mcdi_state state; + enum efx_mcdi_mode mode; + wait_queue_head_t wq; + spinlock_t iface_lock; + bool new_epoch; + unsigned int credits; + unsigned int seqno; + int resprc; + int resprc_raw; + size_t resp_hdr_len; + size_t resp_data_len; + spinlock_t async_lock; + struct list_head async_list; + struct timer_list async_timer; +#ifdef CONFIG_SFC_MCDI_LOGGING + char *logging_buffer; + bool logging_enabled; +#endif + unsigned int proxy_rx_handle; + int proxy_rx_status; + wait_queue_head_t proxy_rx_wq; +}; + +struct efx_mcdi_mon { + struct efx_buffer dma_buf; + struct mutex update_lock; + unsigned long last_update; + struct device *device; + struct efx_mcdi_mon_attribute *attrs; + struct attribute_group group; + const struct attribute_group *groups[2]; + unsigned int n_attrs; +}; + +struct efx_mcdi_mtd_partition { + struct efx_mtd_partition common; + bool updating; + u16 nvram_type; + u16 fw_subtype; +}; + +#define to_efx_mcdi_mtd_partition(mtd) \ + container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd) + +/** + * struct efx_mcdi_data - extra state for NICs that implement MCDI + * @iface: Interface/protocol state + * @hwmon: Hardware monitor state + * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH. + */ +struct efx_mcdi_data { + struct efx_mcdi_iface iface; +#ifdef CONFIG_SFC_MCDI_MON + struct efx_mcdi_mon hwmon; +#endif + u32 fn_flags; +}; + +static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) +{ + EFX_WARN_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->iface; +} + +#ifdef CONFIG_SFC_MCDI_MON +static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) +{ + EFX_WARN_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->hwmon; +} +#endif + +int efx_mcdi_init(struct efx_nic *efx); +void efx_mcdi_detach(struct efx_nic *efx); +void efx_mcdi_fini(struct efx_nic *efx); + +int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); +int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); + +int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen); +int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); +int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual); + +typedef void efx_mcdi_async_completer(struct efx_nic *efx, + unsigned long cookie, int rc, + efx_dword_t *outbuf, + size_t outlen_actual); +int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); +int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); + +void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc); + +int efx_mcdi_poll_reboot(struct efx_nic *efx); +void efx_mcdi_mode_poll(struct efx_nic *efx); +void efx_mcdi_mode_event(struct efx_nic *efx); +void efx_mcdi_flush_async(struct efx_nic *efx); + +void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); +void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); + +/* We expect that 16- and 32-bit fields in MCDI requests and responses + * are appropriately aligned, but 64-bit fields are only + * 32-bit-aligned. Also, on Siena we must copy to the MC shared + * memory strictly 32 bits at a time, so add any necessary padding. + */ +#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4) +#define _MCDI_DECLARE_BUF(_name, _len) \ + efx_dword_t _name[DIV_ROUND_UP(_len, 4)] +#define MCDI_DECLARE_BUF(_name, _len) \ + _MCDI_DECLARE_BUF(_name, _len) = {{{0}}} +#define MCDI_DECLARE_BUF_ERR(_name) \ + MCDI_DECLARE_BUF(_name, 8) +#define _MCDI_PTR(_buf, _offset) \ + ((u8 *)(_buf) + (_offset)) +#define MCDI_PTR(_buf, _field) \ + _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST) +#define _MCDI_CHECK_ALIGN(_ofst, _align) \ + ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1))) +#define _MCDI_DWORD(_buf, _field) \ + ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) + +#define MCDI_BYTE(_buf, _field) \ + ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \ + *MCDI_PTR(_buf, _field)) +#define MCDI_WORD(_buf, _field) \ + ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) +#define MCDI_SET_DWORD(_buf, _field, _value) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) +#define MCDI_DWORD(_buf, _field) \ + EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) +#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1) +#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \ + _name2, _value2) \ + EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2) +#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3) \ + EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3) +#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4) \ + EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4) +#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5) \ + EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5) +#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6) \ + EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6) +#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7) \ + EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7) +#define MCDI_SET_QWORD(_buf, _field, _value) \ + do { \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ + EFX_DWORD_0, (u32)(_value)); \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_QWORD(_buf, _field) \ + (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \ + (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32) +#define MCDI_FIELD(_ptr, _type, _field) \ + EFX_EXTRACT_DWORD( \ + *(efx_dword_t *) \ + _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\ + MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \ + (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \ + MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1) + +#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \ + (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\ + + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align)) +#define MCDI_DECLARE_STRUCT_PTR(_name) \ + efx_dword_t *_name +#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \ + ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_VAR_ARRAY_LEN(_len, _field) \ + min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \ + ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN) +#define MCDI_ARRAY_WORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *) \ + _MCDI_ARRAY_PTR(_buf, _field, _index, 2))) +#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \ + EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \ + EFX_DWORD_0, _value) +#define MCDI_ARRAY_DWORD(_buf, _field, _index) \ + EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0) +#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \ + do { \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\ + EFX_DWORD_0, (u32)(_value)); \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ + MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \ + _type ## _TYPEDEF, _field2) + +#define MCDI_EVENT_FIELD(_ev, _field) \ + EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) + +#define MCDI_CAPABILITY(field) \ + MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN + +#define MCDI_CAPABILITY_OFST(field) \ + MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST + +#define efx_has_cap(efx, field) \ + efx->type->check_caps(efx, \ + MCDI_CAPABILITY(field), \ + MCDI_CAPABILITY_OFST(field)) + +void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); +int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities); +int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq); +int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); +int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out); +int efx_new_mcdi_nvram_test_all(struct efx_nic *efx); +int efx_mcdi_nvram_test_all(struct efx_nic *efx); +int efx_mcdi_handle_assertion(struct efx_nic *efx); +int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); +int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, + int *id_out); +int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); +int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); +int efx_mcdi_wol_filter_reset(struct efx_nic *efx); +int efx_mcdi_flush_rxqs(struct efx_nic *efx); +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); +void efx_mcdi_mac_start_stats(struct efx_nic *efx); +void efx_mcdi_mac_stop_stats(struct efx_nic *efx); +void efx_mcdi_mac_pull_stats(struct efx_nic *efx); +enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); +int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags); +int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, + unsigned int *enabled_out); + +#ifdef CONFIG_SFC_MCDI_MON +int efx_mcdi_mon_probe(struct efx_nic *efx); +void efx_mcdi_mon_remove(struct efx_nic *efx); +#else +static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } +static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} +#endif + +#ifdef CONFIG_SFC_MTD +int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); +int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); +int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); +int efx_mcdi_mtd_sync(struct mtd_info *mtd); +void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); +#endif + +#endif /* EFX_MCDI_H */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c new file mode 100644 index 000000000000..5954fcfee2b1 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c @@ -0,0 +1,531 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2011-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include + +#include "net_driver.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" + +enum efx_hwmon_type { + EFX_HWMON_UNKNOWN, + EFX_HWMON_TEMP, /* temperature */ + EFX_HWMON_COOL, /* cooling device, probably a heatsink */ + EFX_HWMON_IN, /* voltage */ + EFX_HWMON_CURR, /* current */ + EFX_HWMON_POWER, /* power */ + EFX_HWMON_TYPES_COUNT +}; + +static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = { + [EFX_HWMON_TEMP] = " degC", + [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */ + [EFX_HWMON_IN] = " mV", + [EFX_HWMON_CURR] = " mA", + [EFX_HWMON_POWER] = " W", +}; + +static const struct { + const char *label; + enum efx_hwmon_type hwmon_type; + int port; +} efx_mcdi_sensor_type[] = { +#define SENSOR(name, label, hwmon_type, port) \ + [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port } + SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1), + SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1), + SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1), + SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0), + SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0), + SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1), + SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1), + SENSOR(IN_1V0, "1.0V supply", IN, -1), + SENSOR(IN_1V2, "1.2V supply", IN, -1), + SENSOR(IN_1V8, "1.8V supply", IN, -1), + SENSOR(IN_2V5, "2.5V supply", IN, -1), + SENSOR(IN_3V3, "3.3V supply", IN, -1), + SENSOR(IN_12V0, "12.0V supply", IN, -1), + SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1), + SENSOR(IN_VREF, "Ref. voltage", IN, -1), + SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1), + SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1), + SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1), + SENSOR(PSU_TEMP, "Controller regulator temp.", + TEMP, -1), + SENSOR(FAN_0, "Fan 0", COOL, -1), + SENSOR(FAN_1, "Fan 1", COOL, -1), + SENSOR(FAN_2, "Fan 2", COOL, -1), + SENSOR(FAN_3, "Fan 3", COOL, -1), + SENSOR(FAN_4, "Fan 4", COOL, -1), + SENSOR(IN_VAOE, "AOE input supply", IN, -1), + SENSOR(OUT_IAOE, "AOE output current", CURR, -1), + SENSOR(IN_IAOE, "AOE input current", CURR, -1), + SENSOR(NIC_POWER, "Board power use", POWER, -1), + SENSOR(IN_0V9, "0.9V supply", IN, -1), + SENSOR(IN_I0V9, "0.9V supply current", CURR, -1), + SENSOR(IN_I1V2, "1.2V supply current", CURR, -1), + SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1), + SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1), + SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1), + SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1), + SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1), + SENSOR(CONTROLLER_VPTAT, + "Controller PTAT voltage (int. ADC)", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP, + "Controller die temp. (int. ADC)", TEMP, -1), + SENSOR(CONTROLLER_VPTAT_EXTADC, + "Controller PTAT voltage (ext. ADC)", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC, + "Controller die temp. (ext. ADC)", TEMP, -1), + SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1), + SENSOR(AIRFLOW, "Air flow raw", IN, -1), + SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1), + SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1), + SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1), +#undef SENSOR +}; + +static const char *const sensor_status_names[] = { + [MC_CMD_SENSOR_STATE_OK] = "OK", + [MC_CMD_SENSOR_STATE_WARNING] = "Warning", + [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", + [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", + [MC_CMD_SENSOR_STATE_NO_READING] = "No reading", +}; + +void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ + unsigned int type, state, value; + enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN; + const char *name = NULL, *state_txt, *unit; + + type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); + state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); + value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); + + /* Deal gracefully with the board having more drivers than we + * know about, but do not expect new sensor states. */ + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + name = efx_mcdi_sensor_type[type].label; + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + } + if (!name) + name = "No sensor name available"; + EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); + state_txt = sensor_status_names[state]; + EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT); + unit = efx_hwmon_unit[hwmon_type]; + if (!unit) + unit = ""; + + netif_err(efx, hw, efx->net_dev, + "Sensor %d (%s) reports condition '%s' for value %d%s\n", + type, name, state_txt, value, unit); +} + +#ifdef CONFIG_SFC_MCDI_MON + +struct efx_mcdi_mon_attribute { + struct device_attribute dev_attr; + unsigned int index; + unsigned int type; + enum efx_hwmon_type hwmon_type; + unsigned int limit_value; + char name[12]; +}; + +static int efx_mcdi_mon_update(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN); + int rc; + + MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR, + hwmon->dma_buf.dma_addr); + MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len); + + rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS, + inbuf, sizeof(inbuf), NULL, 0, NULL); + if (rc == 0) + hwmon->last_update = jiffies; + return rc; +} + +static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, + efx_dword_t *entry) +{ + struct efx_nic *efx = dev_get_drvdata(dev->parent); + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + int rc; + + BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0); + + mutex_lock(&hwmon->update_lock); + + /* Use cached value if last update was < 1 s ago */ + if (time_before(jiffies, hwmon->last_update + HZ)) + rc = 0; + else + rc = efx_mcdi_mon_update(efx); + + /* Copy out the requested entry */ + *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index]; + + mutex_unlock(&hwmon->update_lock); + + return rc; +} + +static ssize_t efx_mcdi_mon_show_value(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + efx_dword_t entry; + unsigned int value, state; + int rc; + + rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); + if (rc) + return rc; + + state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + if (state == MC_CMD_SENSOR_STATE_NO_READING) + return -EBUSY; + + value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); + + switch (mon_attr->hwmon_type) { + case EFX_HWMON_TEMP: + /* Convert temperature from degrees to milli-degrees Celsius */ + value *= 1000; + break; + case EFX_HWMON_POWER: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } + + return sprintf(buf, "%u\n", value); +} + +static ssize_t efx_mcdi_mon_show_limit(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + unsigned int value; + + value = mon_attr->limit_value; + + switch (mon_attr->hwmon_type) { + case EFX_HWMON_TEMP: + /* Convert temperature from degrees to milli-degrees Celsius */ + value *= 1000; + break; + case EFX_HWMON_POWER: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } + + return sprintf(buf, "%u\n", value); +} + +static ssize_t efx_mcdi_mon_show_alarm(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + efx_dword_t entry; + int state; + int rc; + + rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); + if (rc) + return rc; + + state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK); +} + +static ssize_t efx_mcdi_mon_show_label(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + return sprintf(buf, "%s\n", + efx_mcdi_sensor_type[mon_attr->type].label); +} + +static void +efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, + ssize_t (*reader)(struct device *, + struct device_attribute *, char *), + unsigned int index, unsigned int type, + unsigned int limit_value) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; + + strlcpy(attr->name, name, sizeof(attr->name)); + attr->index = index; + attr->type = type; + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) + attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + else + attr->hwmon_type = EFX_HWMON_UNKNOWN; + attr->limit_value = limit_value; + sysfs_attr_init(&attr->dev_attr.attr); + attr->dev_attr.attr.name = attr->name; + attr->dev_attr.attr.mode = 0444; + attr->dev_attr.show = reader; + hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; +} + +int efx_mcdi_mon_probe(struct efx_nic *efx) +{ + unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0; + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); + unsigned int n_pages, n_sensors, n_attrs, page; + size_t outlen; + char name[12]; + u32 mask; + int rc, i, j, type; + + /* Find out how many sensors are present */ + n_sensors = 0; + page = 0; + do { + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); + + rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) + return -EIO; + + mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK); + n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + ++page; + } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT)); + n_pages = page; + + /* Don't create a device if there are none */ + if (n_sensors == 0) + return 0; + + rc = efx_nic_alloc_buffer( + efx, &hwmon->dma_buf, + n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN, + GFP_KERNEL); + if (rc) + return rc; + + mutex_init(&hwmon->update_lock); + efx_mcdi_mon_update(efx); + + /* Allocate space for the maximum possible number of + * attributes for this set of sensors: + * value, min, max, crit, alarm and label for each sensor. + */ + n_attrs = 6 * n_sensors; + hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); + if (!hwmon->attrs) { + rc = -ENOMEM; + goto fail; + } + hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), + GFP_KERNEL); + if (!hwmon->group.attrs) { + rc = -ENOMEM; + goto fail; + } + + for (i = 0, j = -1, type = -1; ; i++) { + enum efx_hwmon_type hwmon_type; + const char *hwmon_prefix; + unsigned hwmon_index; + u16 min1, max1, min2, max2; + + /* Find next sensor type or exit if there is none */ + do { + type++; + + if ((type % 32) == 0) { + page = type / 32; + j = -1; + if (page == n_pages) + goto hwmon_register; + + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, + page); + rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) { + rc = -EIO; + goto fail; + } + + mask = (MCDI_DWORD(outbuf, + SENSOR_INFO_OUT_MASK) & + ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + + /* Check again for short response */ + if (outlen < + MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) { + rc = -EIO; + goto fail; + } + } + } while (!(mask & (1 << type % 32))); + j++; + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + + /* Skip sensors specific to a different port */ + if (hwmon_type != EFX_HWMON_UNKNOWN && + efx_mcdi_sensor_type[type].port >= 0 && + efx_mcdi_sensor_type[type].port != + efx_port_num(efx)) + continue; + } else { + hwmon_type = EFX_HWMON_UNKNOWN; + } + + switch (hwmon_type) { + case EFX_HWMON_TEMP: + hwmon_prefix = "temp"; + hwmon_index = ++n_temp; /* 1-based */ + break; + case EFX_HWMON_COOL: + /* This is likely to be a heatsink, but there + * is no convention for representing cooling + * devices other than fans. + */ + hwmon_prefix = "fan"; + hwmon_index = ++n_cool; /* 1-based */ + break; + default: + hwmon_prefix = "in"; + hwmon_index = n_in++; /* 0-based */ + break; + case EFX_HWMON_CURR: + hwmon_prefix = "curr"; + hwmon_index = ++n_curr; /* 1-based */ + break; + case EFX_HWMON_POWER: + hwmon_prefix = "power"; + hwmon_index = ++n_power; /* 1-based */ + break; + } + + min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MIN1); + max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MAX1); + min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MIN2); + max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MAX2); + + if (min1 != max1) { + snprintf(name, sizeof(name), "%s%u_input", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_value, i, type, 0); + + if (hwmon_type != EFX_HWMON_POWER) { + snprintf(name, sizeof(name), "%s%u_min", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, min1); + } + + snprintf(name, sizeof(name), "%s%u_max", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, max1); + + if (min2 != max2) { + /* Assume max2 is critical value. + * But we have no good way to expose min2. + */ + snprintf(name, sizeof(name), "%s%u_crit", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, max2); + } + } + + snprintf(name, sizeof(name), "%s%u_alarm", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_alarm, i, type, 0); + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && + efx_mcdi_sensor_type[type].label) { + snprintf(name, sizeof(name), "%s%u_label", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_label, i, type, 0); + } + } + +hwmon_register: + hwmon->groups[0] = &hwmon->group; + hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, + KBUILD_MODNAME, NULL, + hwmon->groups); + if (IS_ERR(hwmon->device)) { + rc = PTR_ERR(hwmon->device); + goto fail; + } + + return 0; + +fail: + efx_mcdi_mon_remove(efx); + return rc; +} + +void efx_mcdi_mon_remove(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + + if (hwmon->device) + hwmon_device_unregister(hwmon->device); + kfree(hwmon->attrs); + kfree(hwmon->group.attrs); + efx_nic_free_buffer(efx, &hwmon->dma_buf); +} + +#endif /* CONFIG_SFC_MCDI_MON */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.c b/drivers/net/ethernet/sfc/siena/mcdi_port.c new file mode 100644 index 000000000000..94c6a345c0b1 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_port.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2009-2013 Solarflare Communications Inc. + */ + +/* + * Driver for PHY related operations via MCDI. + */ + +#include +#include "efx.h" +#include "mcdi_port.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" +#include "selftest.h" +#include "mcdi_port_common.h" + +static int efx_mcdi_mdio_read(struct net_device *net_dev, + int prtad, int devad, u16 addr) +{ + struct efx_nic *efx = netdev_priv(net_dev); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); + + rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) != + MC_CMD_MDIO_STATUS_GOOD) + return -EIO; + + return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); +} + +static int efx_mcdi_mdio_write(struct net_device *net_dev, + int prtad, int devad, u16 addr, u16 value) +{ + struct efx_nic *efx = netdev_priv(net_dev); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); + + rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) != + MC_CMD_MDIO_STATUS_GOOD) + return -EIO; + + return 0; +} + +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + return phy_data->supported_cap; +} + +bool efx_mcdi_mac_check_fault(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + size_t outlength; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) + return true; + + return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; +} + +int efx_mcdi_port_probe(struct efx_nic *efx) +{ + int rc; + + /* Set up MDIO structure for PHY */ + efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + efx->mdio.mdio_read = efx_mcdi_mdio_read; + efx->mdio.mdio_write = efx_mcdi_mdio_write; + + /* Fill out MDIO structure, loopback modes, and initial link state */ + rc = efx_mcdi_phy_probe(efx); + if (rc != 0) + return rc; + + return efx_mcdi_mac_init_stats(efx); +} + +void efx_mcdi_port_remove(struct efx_nic *efx) +{ + efx_mcdi_phy_remove(efx); + efx_mcdi_mac_fini_stats(efx); +} diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.h b/drivers/net/ethernet/sfc/siena/mcdi_port.h new file mode 100644 index 000000000000..07863ddbe740 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_port.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_MCDI_PORT_H +#define EFX_MCDI_PORT_H + +#include "net_driver.h" + +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); +bool efx_mcdi_mac_check_fault(struct efx_nic *efx); +int efx_mcdi_port_probe(struct efx_nic *efx); +void efx_mcdi_port_remove(struct efx_nic *efx); + +#endif /* EFX_MCDI_PORT_H */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c new file mode 100644 index 000000000000..899cc1671004 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c @@ -0,0 +1,1301 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "mcdi_port_common.h" +#include "efx_common.h" +#include "nic.h" + +int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { + rc = -EIO; + goto fail; + } + + cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); + cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); + cfg->supported_cap = + MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); + cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); + cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); + cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); + memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), + sizeof(cfg->name)); + cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); + cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); + memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), + sizeof(cfg->revision)); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +void efx_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising) +{ + memcpy(efx->link_advertising, advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + + efx->link_advertising[0] |= ADVERTISED_Autoneg; + if (advertising[0] & ADVERTISED_Pause) + efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); + else + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); + if (advertising[0] & ADVERTISED_Asym_Pause) + efx->wanted_fc ^= EFX_FC_TX; +} + +int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, + u32 flags, u32 loopback_mode, u32 loopback_speed) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); + + return efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) { + rc = -EIO; + goto fail; + } + + *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset) +{ + #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + + bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS); + switch (media) { + case MC_CMD_MEDIA_KX4: + SET_BIT(Backplane); + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + SET_BIT(1000baseKX_Full); + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + SET_BIT(10000baseKX4_Full); + if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + SET_BIT(40000baseKR4_Full); + break; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + case MC_CMD_MEDIA_QSFP_PLUS: + SET_BIT(FIBRE); + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) { + SET_BIT(1000baseT_Full); + SET_BIT(1000baseX_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) { + SET_BIT(10000baseCR_Full); + SET_BIT(10000baseLR_Full); + SET_BIT(10000baseSR_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { + SET_BIT(40000baseCR4_Full); + SET_BIT(40000baseSR4_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) { + SET_BIT(100000baseCR4_Full); + SET_BIT(100000baseSR4_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) { + SET_BIT(25000baseCR_Full); + SET_BIT(25000baseSR_Full); + } + if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN)) + SET_BIT(50000baseCR2_Full); + break; + + case MC_CMD_MEDIA_BASE_T: + SET_BIT(TP); + if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) + SET_BIT(10baseT_Half); + if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) + SET_BIT(10baseT_Full); + if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) + SET_BIT(100baseT_Half); + if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) + SET_BIT(100baseT_Full); + if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) + SET_BIT(1000baseT_Half); + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + SET_BIT(1000baseT_Full); + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + SET_BIT(10000baseT_Full); + break; + } + + if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + SET_BIT(Pause); + if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + SET_BIT(Asym_Pause); + if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + SET_BIT(Autoneg); + + #undef SET_BIT +} + +u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) +{ + u32 result = 0; + + #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + + if (TEST_BIT(10baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); + if (TEST_BIT(10baseT_Full)) + result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); + if (TEST_BIT(100baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); + if (TEST_BIT(100baseT_Full)) + result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); + if (TEST_BIT(1000baseT_Half)) + result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); + if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) || + TEST_BIT(1000baseX_Full)) + result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); + if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) || + TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) || + TEST_BIT(10000baseSR_Full)) + result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); + if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) || + TEST_BIT(40000baseSR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN); + if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN); + if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full)) + result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN); + if (TEST_BIT(50000baseCR2_Full)) + result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN); + if (TEST_BIT(Pause)) + result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); + if (TEST_BIT(Asym_Pause)) + result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); + if (TEST_BIT(Autoneg)) + result |= (1 << MC_CMD_PHY_CAP_AN_LBN); + + #undef TEST_BIT + + return result; +} + +u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + enum efx_phy_mode mode, supported; + u32 flags; + + /* TODO: Advertise the capabilities supported by this PHY */ + supported = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN)) + supported |= PHY_MODE_TX_DISABLED; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN)) + supported |= PHY_MODE_LOW_POWER; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN)) + supported |= PHY_MODE_OFF; + + mode = efx->phy_mode & supported; + + flags = 0; + if (mode & PHY_MODE_TX_DISABLED) + flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN); + if (mode & PHY_MODE_LOW_POWER) + flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN); + if (mode & PHY_MODE_OFF) + flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN); + + return flags; +} + +u8 mcdi_to_ethtool_media(u32 media) +{ + switch (media) { + case MC_CMD_MEDIA_XAUI: + case MC_CMD_MEDIA_CX4: + case MC_CMD_MEDIA_KX4: + return PORT_OTHER; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + case MC_CMD_MEDIA_QSFP_PLUS: + return PORT_FIBRE; + + case MC_CMD_MEDIA_BASE_T: + return PORT_TP; + + default: + return PORT_OTHER; + } +} + +void efx_mcdi_phy_decode_link(struct efx_nic *efx, + struct efx_link_state *link_state, + u32 speed, u32 flags, u32 fcntl) +{ + switch (fcntl) { + case MC_CMD_FCNTL_AUTO: + WARN_ON(1); /* This is not a link mode */ + link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_BIDIR: + link_state->fc = EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_RESPOND: + link_state->fc = EFX_FC_RX; + break; + default: + WARN_ON(1); + fallthrough; + case MC_CMD_FCNTL_OFF: + link_state->fc = 0; + break; + } + + link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); + link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); + link_state->speed = speed; +} + +/* The semantics of the ethtool FEC mode bitmask are not well defined, + * particularly the meaning of combinations of bits. Which means we get to + * define our own semantics, as follows: + * OFF overrides any other bits, and means "disable all FEC" (with the + * exception of 25G KR4/CR4, where it is not possible to reject it if AN + * partner requests it). + * AUTO on its own means use cable requirements and link partner autoneg with + * fw-default preferences for the cable type. + * AUTO and either RS or BASER means use the specified FEC type if cable and + * link partner support it, otherwise autoneg/fw-default. + * RS or BASER alone means use the specified FEC type if cable and link partner + * support it and either requests it, otherwise no FEC. + * Both RS and BASER (whether AUTO or not) means use FEC if cable and link + * partner support it, preferring RS to BASER. + */ +u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap) +{ + u32 ret = 0; + + if (ethtool_cap & ETHTOOL_FEC_OFF) + return 0; + + if (ethtool_cap & ETHTOOL_FEC_AUTO) + ret |= ((1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) & supported_cap; + if (ethtool_cap & ETHTOOL_FEC_RS && + supported_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) + ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN); + if (ethtool_cap & ETHTOOL_FEC_BASER) { + if (supported_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN)) + ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN); + if (supported_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)) + ret |= (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) | + (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN); + } + return ret; +} + +/* Invert ethtool_fec_caps_to_mcdi. There are two combinations that function + * can never produce, (baser xor rs) and neither req; the implementation below + * maps both of those to AUTO. This should never matter, and it's not clear + * what a better mapping would be anyway. + */ +u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g) +{ + bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN), + rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN), + baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) + : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN), + baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN) + : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN); + + if (!baser && !rs) + return ETHTOOL_FEC_OFF; + return (rs_req ? ETHTOOL_FEC_RS : 0) | + (baser_req ? ETHTOOL_FEC_BASER : 0) | + (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO); +} + +/* Verify that the forced flow control settings (!EFX_FC_AUTO) are + * supported by the link partner. Warn the user if this isn't the case + */ +void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 rmtadv; + + /* The link partner capabilities are only relevant if the + * link supports flow control autonegotiation + */ + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + return; + + /* If flow control autoneg is supported and enabled, then fine */ + if (efx->wanted_fc & EFX_FC_AUTO) + return; + + rmtadv = 0; + if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + rmtadv |= ADVERTISED_Pause; + if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + rmtadv |= ADVERTISED_Asym_Pause; + + if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) + netif_err(efx, link, efx->net_dev, + "warning: link partner doesn't support pause frames"); +} + +bool efx_mcdi_phy_poll(struct efx_nic *efx) +{ + struct efx_link_state old_state = efx->link_state; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + efx->link_state.up = false; + else + efx_mcdi_phy_decode_link( + efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + + return !efx_link_state_equal(&efx->link_state, &old_state); +} + +int efx_mcdi_phy_probe(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + u32 caps; + int rc; + + /* Initialise and populate phy_data */ + phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); + if (phy_data == NULL) + return -ENOMEM; + + rc = efx_mcdi_get_phy_cfg(efx, phy_data); + if (rc != 0) + goto fail; + + /* Read initial link advertisement */ + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + goto fail; + + /* Fill out nic state */ + efx->phy_data = phy_data; + efx->phy_type = phy_data->type; + + efx->mdio_bus = phy_data->channel; + efx->mdio.prtad = phy_data->port; + efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); + efx->mdio.mode_support = 0; + if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) + efx->mdio.mode_support |= MDIO_SUPPORTS_C22; + if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) + efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); + if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) + mcdi_to_ethtool_linkset(phy_data->media, caps, + efx->link_advertising); + else + phy_data->forced_cap = caps; + + /* Assert that we can map efx -> mcdi loopback modes */ + BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); + BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); + BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); + BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); + BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); + BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); + BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); + BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); + BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); + BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); + BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); + BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); + BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); + BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); + BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); + BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); + BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); + BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); + BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); + BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); + BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); + + rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); + if (rc != 0) + goto fail; + /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, + * but by convention we don't + */ + efx->loopback_modes &= ~(1 << LOOPBACK_NONE); + + /* Set the initial link mode */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + + /* Record the initial FEC configuration (or nearest approximation + * representable in the ethtool configuration space) + */ + efx->fec_config = mcdi_fec_caps_to_ethtool(caps, + efx->link_state.speed == 25000 || + efx->link_state.speed == 50000); + + /* Default to Autonegotiated flow control if the PHY supports it */ + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->wanted_fc |= EFX_FC_AUTO; + efx_link_set_wanted_fc(efx, efx->wanted_fc); + + return 0; + +fail: + kfree(phy_data); + return rc; +} + +void efx_mcdi_phy_remove(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + efx->phy_data = NULL; + kfree(phy_data); +} + +void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + cmd->base.speed = efx->link_state.speed; + cmd->base.duplex = efx->link_state.fd; + cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media); + cmd->base.phy_address = phy_cfg->port; + cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg); + cmd->base.mdio_support = (efx->mdio.mode_support & + (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); + + mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap, + cmd->link_modes.supported); + memcpy(cmd->link_modes.advertising, efx->link_advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return; + mcdi_to_ethtool_linkset(phy_cfg->media, + MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP), + cmd->link_modes.lp_advertising); +} + +int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps; + int rc; + + if (cmd->base.autoneg) { + caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); + } else if (cmd->base.duplex) { + switch (cmd->base.speed) { + case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; + case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; + case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break; + case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break; + case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break; + case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break; + default: return -EINVAL; + } + } else { + switch (cmd->base.speed) { + case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; + default: return -EINVAL; + } + } + + caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config); + + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); + if (rc) + return rc; + + if (cmd->base.autoneg) { + efx_link_set_advertising(efx, cmd->link_modes.advertising); + phy_cfg->forced_cap = 0; + } else { + efx_link_clear_advertising(efx); + phy_cfg->forced_cap = caps; + } + return 0; +} + +int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN); + u32 caps, active, speed; /* MCDI format */ + bool is_25g = false; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN) + return -EOPNOTSUPP; + + /* behaviour for 25G/50G links depends on 25G BASER bit */ + speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED); + is_25g = speed == 25000 || speed == 50000; + + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP); + fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g); + /* BASER is never supported on 100G */ + if (speed == 100000) + fec->fec &= ~ETHTOOL_FEC_BASER; + + active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE); + switch (active) { + case MC_CMD_FEC_NONE: + fec->active_fec = ETHTOOL_FEC_OFF; + break; + case MC_CMD_FEC_BASER: + fec->active_fec = ETHTOOL_FEC_BASER; + break; + case MC_CMD_FEC_RS: + fec->active_fec = ETHTOOL_FEC_RS; + break; + default: + netif_warn(efx, hw, efx->net_dev, + "Firmware reports unrecognised FEC_TYPE %u\n", + active); + /* We don't know what firmware has picked. AUTO is as good a + * "can't happen" value as any other. + */ + fec->active_fec = ETHTOOL_FEC_AUTO; + break; + } + + return 0; +} + +/* Basic validation to ensure that the caps we are going to attempt to set are + * in fact supported by the adapter. Note that 'no FEC' is always supported. + */ +static int ethtool_fec_supported(u32 supported_cap, u32 ethtool_cap) +{ + if (ethtool_cap & ETHTOOL_FEC_OFF) + return 0; + + if (ethtool_cap && + !ethtool_fec_caps_to_mcdi(supported_cap, ethtool_cap)) + return -EINVAL; + return 0; +} + +int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps; + int rc; + + rc = ethtool_fec_supported(phy_cfg->supported_cap, fec->fec); + if (rc) + return rc; + + /* Work out what efx_mcdi_phy_set_link_ksettings() would produce from + * saved advertising bits + */ + if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising)) + caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); + else + caps = phy_cfg->forced_cap; + + caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, fec->fec); + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); + if (rc) + return rc; + + /* Record the new FEC setting for subsequent set_link calls */ + efx->fec_config = fec->fec; + return 0; +} + +int efx_mcdi_phy_test_alive(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) + return -EIO; + if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK) + return -EINVAL; + + return 0; +} + +int efx_mcdi_port_reconfigure(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps = (efx->link_advertising[0] ? + ethtool_linkset_to_mcdi_cap(efx->link_advertising) : + phy_cfg->forced_cap); + + caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config); + + return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); +} + +static const char *const mcdi_sft9001_cable_diag_names[] = { + "cable.pairA.length", + "cable.pairB.length", + "cable.pairC.length", + "cable.pairD.length", + "cable.pairA.status", + "cable.pairB.status", + "cable.pairC.status", + "cable.pairD.status", +}; + +static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, + int *results) +{ + unsigned int retry, i, count = 0; + size_t outlen; + u32 status; + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN); + u8 *ptr; + int rc; + + BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode); + rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, + inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); + if (rc) + goto out; + + /* Wait up to 10s for BIST to finish */ + for (retry = 0; retry < 100; ++retry) { + BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto out; + + status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); + if (status != MC_CMD_POLL_BIST_RUNNING) + goto finished; + + msleep(100); + } + + rc = -ETIMEDOUT; + goto out; + +finished: + results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; + + /* SFT9001 specific cable diagnostics output */ + if (efx->phy_type == PHY_TYPE_SFT9001B && + (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || + bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { + ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); + if (status == MC_CMD_POLL_BIST_PASSED && + outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { + for (i = 0; i < 8; i++) { + results[count + i] = + EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], + EFX_DWORD_0); + } + } + count += 8; + } + rc = count; + +out: + return rc; +} + +int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 mode; + int rc; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); + if (rc < 0) + return rc; + + results += rc; + } + + /* If we support both LONG and SHORT, then run each in response to + * break or not. Otherwise, run the one we support + */ + mode = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) { + if ((flags & ETH_TEST_FL_OFFLINE) && + (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + else + mode = MC_CMD_PHY_BIST_CABLE_SHORT; + } else if (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + + if (mode != 0) { + rc = efx_mcdi_bist(efx, mode, results); + if (rc < 0) + return rc; + results += rc; + } + + return 0; +} + +const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + if (index == 0) + return "bist"; + --index; + } + + if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) | + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) { + if (index == 0) + return "cable"; + --index; + + if (efx->phy_type == PHY_TYPE_SFT9001B) { + if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) + return mcdi_sft9001_cable_diag_names[index]; + index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); + } + } + + return NULL; +} + +#define SFP_PAGE_SIZE 128 +#define SFF_DIAG_TYPE_OFFSET 92 +#define SFF_DIAG_ADDR_CHANGE BIT(2) +#define SFF_8079_NUM_PAGES 2 +#define SFF_8472_NUM_PAGES 4 +#define SFF_8436_NUM_PAGES 5 +#define SFF_DMT_LEVEL_OFFSET 94 + +/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom + * @efx: NIC context + * @page: EEPROM page number + * @data: Destination data pointer + * @offset: Offset in page to copy from in to data + * @space: Space available in data + * + * Return: + * >=0 - amount of data copied + * <0 - error + */ +static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx, + unsigned int page, + u8 *data, ssize_t offset, + ssize_t space) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); + unsigned int payload_len; + unsigned int to_copy; + size_t outlen; + int rc; + + if (offset > SFP_PAGE_SIZE) + return -EINVAL; + + to_copy = min(space, SFP_PAGE_SIZE - offset); + + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) + return -EIO; + + payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; + + memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, + to_copy); + + return to_copy; +} + +static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx, + unsigned int page, + u8 byte) +{ + u8 data; + int rc; + + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1); + if (rc == 1) + return data; + + return rc; +} + +static int efx_mcdi_phy_diag_type(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the diagnostic type at byte 92. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DIAG_TYPE_OFFSET); +} + +static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the DMT level at byte 94. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DMT_LEVEL_OFFSET); +} + +static u32 efx_mcdi_phy_module_type(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS) + return phy_data->media; + + /* A QSFP+ NIC may actually have an SFP+ module attached. + * The ID is page 0, byte 0. + */ + switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) { + case 0x3: + return MC_CMD_MEDIA_SFP_PLUS; + case 0xc: + case 0xd: + return MC_CMD_MEDIA_QSFP_PLUS; + default: + return 0; + } +} + +int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data) +{ + int rc; + ssize_t space_remaining = ee->len; + unsigned int page_off; + bool ignore_missing; + int num_pages; + int page; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ? + SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES; + page = 0; + ignore_missing = false; + break; + case MC_CMD_MEDIA_QSFP_PLUS: + num_pages = SFF_8436_NUM_PAGES; + page = -1; /* We obtain the lower page by asking for -1. */ + ignore_missing = true; /* Ignore missing pages after page 0. */ + break; + default: + return -EOPNOTSUPP; + } + + page_off = ee->offset % SFP_PAGE_SIZE; + page += ee->offset / SFP_PAGE_SIZE; + + while (space_remaining && (page < num_pages)) { + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, + data, page_off, + space_remaining); + + if (rc > 0) { + space_remaining -= rc; + data += rc; + page_off = 0; + page++; + } else if (rc == 0) { + space_remaining = 0; + } else if (ignore_missing && (page > 0)) { + int intended_size = SFP_PAGE_SIZE - page_off; + + space_remaining -= intended_size; + if (space_remaining < 0) { + space_remaining = 0; + } else { + memset(data, 0, intended_size); + data += intended_size; + page_off = 0; + page++; + rc = 0; + } + } else { + return rc; + } + } + + return 0; +} + +int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo) +{ + int sff_8472_level; + int diag_type; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + sff_8472_level = efx_mcdi_phy_sff_8472_level(efx); + + /* If we can't read the diagnostics level we have none. */ + if (sff_8472_level < 0) + return -EOPNOTSUPP; + + /* Check if this module requires the (unsupported) address + * change operation. + */ + diag_type = efx_mcdi_phy_diag_type(efx); + + if (sff_8472_level == 0 || + (diag_type & SFF_DIAG_ADDR_CHANGE)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + + case MC_CMD_MEDIA_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static unsigned int efx_calc_mac_mtu(struct efx_nic *efx) +{ + return EFX_MAX_FRAME_LEN(efx->net_dev->mtu); +} + +int efx_mcdi_set_mac(struct efx_nic *efx) +{ + u32 fcntl; + MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + + /* This has no effect on EF10 */ + ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), + efx->net_dev->dev_addr); + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx)); + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); + + /* Set simple MAC filter for Siena */ + MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT, + SET_MAC_IN_REJECT_UNCST, efx->unicast_filter); + + MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS, + SET_MAC_IN_FLAG_INCLUDE_FCS, + !!(efx->net_dev->features & NETIF_F_RXFCS)); + + switch (efx->wanted_fc) { + case EFX_FC_RX | EFX_FC_TX: + fcntl = MC_CMD_FCNTL_BIDIR; + break; + case EFX_FC_RX: + fcntl = MC_CMD_FCNTL_RESPOND; + break; + default: + fcntl = MC_CMD_FCNTL_OFF; + break; + } + if (efx->wanted_fc & EFX_FC_AUTO) + fcntl = MC_CMD_FCNTL_AUTO; + if (efx->fc_disable) + fcntl = MC_CMD_FCNTL_OFF; + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); + + return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), + NULL, 0, NULL); +} + +int efx_mcdi_set_mtu(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MAC_EXT_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_MAC_EXT_IN_MTU, efx_calc_mac_mtu(efx)); + + MCDI_POPULATE_DWORD_1(inbuf, SET_MAC_EXT_IN_CONTROL, + SET_MAC_EXT_IN_CFG_MTU, 1); + + return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +enum efx_stats_action { + EFX_STATS_ENABLE, + EFX_STATS_DISABLE, + EFX_STATS_PULL, +}; + +static int efx_mcdi_mac_stats(struct efx_nic *efx, + enum efx_stats_action action, int clear) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + int rc; + int change = action == EFX_STATS_PULL ? 0 : 1; + int enable = action == EFX_STATS_ENABLE ? 1 : 0; + int period = action == EFX_STATS_ENABLE ? 1000 : 0; + dma_addr_t dma_addr = efx->stats_buffer.dma_addr; + u32 dma_len = action != EFX_STATS_DISABLE ? + efx->num_mac_stats * sizeof(u64) : 0; + + BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); + MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, !!enable, + MAC_STATS_IN_CLEAR, clear, + MAC_STATS_IN_PERIODIC_CHANGE, change, + MAC_STATS_IN_PERIODIC_ENABLE, enable, + MAC_STATS_IN_PERIODIC_CLEAR, 0, + MAC_STATS_IN_PERIODIC_NOEVENT, 1, + MAC_STATS_IN_PERIOD_MS, period); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* Expect ENOENT if DMA queues have not been set up */ + if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), + NULL, 0, rc); + return rc; +} + +void efx_mcdi_mac_start_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + + efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); +} + +void efx_mcdi_mac_stop_stats(struct efx_nic *efx) +{ + efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0); +} + +#define EFX_MAC_STATS_WAIT_US 100 +#define EFX_MAC_STATS_WAIT_ATTEMPTS 10 + +void efx_mcdi_mac_pull_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS; + + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0); + + while (dma_stats[efx->num_mac_stats - 1] == + EFX_MC_STATS_GENERATION_INVALID && + attempts-- != 0) + udelay(EFX_MAC_STATS_WAIT_US); +} + +int efx_mcdi_mac_init_stats(struct efx_nic *efx) +{ + int rc; + + if (!efx->num_mac_stats) + return 0; + + /* Allocate buffer for stats */ + rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, + efx->num_mac_stats * sizeof(u64), GFP_KERNEL); + if (rc) { + netif_warn(efx, probe, efx->net_dev, + "failed to allocate DMA buffer: %d\n", rc); + return rc; + } + + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64) efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64) virt_to_phys(efx->stats_buffer.addr)); + + return 0; +} + +void efx_mcdi_mac_fini_stats(struct efx_nic *efx) +{ + efx_nic_free_buffer(efx, &efx->stats_buffer); +} + +/* Get physical port number (EF10 only; on Siena it is same as PF number) */ +int efx_mcdi_port_get_number(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT); +} + +static unsigned int efx_mcdi_event_link_speed[] = { + [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, + [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, + [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, + [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000, + [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000, + [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000, + [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000, +}; + +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) +{ + u32 flags, fcntl, speed, lpa; + + speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); + EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); + speed = efx_mcdi_event_link_speed[speed]; + + flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); + fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); + lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); + + /* efx->link_state is only modified by efx_mcdi_phy_get_link(), + * which is only run after flushing the event queues. Therefore, it + * is safe to modify the link state outside of the mac_lock here. + */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); + + efx_mcdi_phy_check_fcntl(efx, lpa); + + efx_link_status_changed(efx); +} diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.h b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h new file mode 100644 index 000000000000..ed31690e591c --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef EFX_MCDI_PORT_COMMON_H +#define EFX_MCDI_PORT_COMMON_H + +#include "net_driver.h" +#include "mcdi.h" +#include "mcdi_pcol.h" + +struct efx_mcdi_phy_data { + u32 flags; + u32 type; + u32 supported_cap; + u32 channel; + u32 port; + u32 stats_mask; + u8 name[20]; + u32 media; + u32 mmd_mask; + u8 revision[20]; + u32 forced_cap; +}; + +int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg); +void efx_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising); +int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, + u32 flags, u32 loopback_mode, u32 loopback_speed); +int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes); +void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset); +u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset); +u32 efx_get_mcdi_phy_flags(struct efx_nic *efx); +u8 mcdi_to_ethtool_media(u32 media); +void efx_mcdi_phy_decode_link(struct efx_nic *efx, + struct efx_link_state *link_state, + u32 speed, u32 flags, u32 fcntl); +u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap); +u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g); +void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa); +bool efx_mcdi_phy_poll(struct efx_nic *efx); +int efx_mcdi_phy_probe(struct efx_nic *efx); +void efx_mcdi_phy_remove(struct efx_nic *efx); +void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd); +int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd); +int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec); +int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec); +int efx_mcdi_phy_test_alive(struct efx_nic *efx); +int efx_mcdi_port_reconfigure(struct efx_nic *efx); +int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags); +const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index); +int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data); +int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo); +int efx_mcdi_set_mac(struct efx_nic *efx); +int efx_mcdi_set_mtu(struct efx_nic *efx); +int efx_mcdi_mac_init_stats(struct efx_nic *efx); +void efx_mcdi_mac_fini_stats(struct efx_nic *efx); +int efx_mcdi_port_get_number(struct efx_nic *efx); +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); + +#endif diff --git a/drivers/net/ethernet/sfc/siena/mtd.c b/drivers/net/ethernet/sfc/siena/mtd.c new file mode 100644 index 000000000000..273c08e5455f --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/mtd.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include + +#include "net_driver.h" +#include "efx.h" + +#define to_efx_mtd_partition(mtd) \ + container_of(mtd, struct efx_mtd_partition, mtd) + +/* MTD interface */ + +static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) +{ + struct efx_nic *efx = mtd->priv; + + return efx->type->mtd_erase(mtd, erase->addr, erase->len); +} + +static void efx_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + int rc; + + rc = efx->type->mtd_sync(mtd); + if (rc) + pr_err("%s: %s sync failed (%d)\n", + part->name, part->dev_type_name, rc); +} + +static void efx_mtd_remove_partition(struct efx_mtd_partition *part) +{ + int rc; + + for (;;) { + rc = mtd_device_unregister(&part->mtd); + if (rc != -EBUSY) + break; + ssleep(1); + } + WARN_ON(rc); + list_del(&part->node); +} + +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part) +{ + struct efx_mtd_partition *part; + size_t i; + + for (i = 0; i < n_parts; i++) { + part = (struct efx_mtd_partition *)((char *)parts + + i * sizeof_part); + + part->mtd.writesize = 1; + + if (!(part->mtd.flags & MTD_NO_ERASE)) + part->mtd.flags |= MTD_WRITEABLE; + + part->mtd.owner = THIS_MODULE; + part->mtd.priv = efx; + part->mtd.name = part->name; + part->mtd._erase = efx_mtd_erase; + part->mtd._read = efx->type->mtd_read; + part->mtd._write = efx->type->mtd_write; + part->mtd._sync = efx_mtd_sync; + + efx->type->mtd_rename(part); + + if (mtd_device_register(&part->mtd, NULL, 0)) + goto fail; + + /* Add to list in order - efx_mtd_remove() depends on this */ + list_add_tail(&part->node, &efx->mtd_list); + } + + return 0; + +fail: + while (i--) { + part = (struct efx_mtd_partition *)((char *)parts + + i * sizeof_part); + efx_mtd_remove_partition(part); + } + /* Failure is unlikely here, but probably means we're out of memory */ + return -ENOMEM; +} + +void efx_mtd_remove(struct efx_nic *efx) +{ + struct efx_mtd_partition *parts, *part, *next; + + WARN_ON(efx_dev_registered(efx)); + + if (list_empty(&efx->mtd_list)) + return; + + parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition, + node); + + list_for_each_entry_safe(part, next, &efx->mtd_list, node) + efx_mtd_remove_partition(part); + + kfree(parts); +} + +void efx_mtd_rename(struct efx_nic *efx) +{ + struct efx_mtd_partition *part; + + ASSERT_RTNL(); + + list_for_each_entry(part, &efx->mtd_list, node) + efx->type->mtd_rename(part); +} diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h new file mode 100644 index 000000000000..318db906a154 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -0,0 +1,1716 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +/* Common definitions for all Efx net driver code */ + +#ifndef EFX_NET_DRIVER_H +#define EFX_NET_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "enum.h" +#include "bitfield.h" +#include "filter.h" + +/************************************************************************** + * + * Build definitions + * + **************************************************************************/ + +#ifdef DEBUG +#define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x) +#define EFX_WARN_ON_PARANOID(x) WARN_ON(x) +#else +#define EFX_WARN_ON_ONCE_PARANOID(x) do {} while (0) +#define EFX_WARN_ON_PARANOID(x) do {} while (0) +#endif + +/************************************************************************** + * + * Efx data structures + * + **************************************************************************/ + +#define EFX_MAX_CHANNELS 32U +#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS +#define EFX_EXTRA_CHANNEL_IOV 0 +#define EFX_EXTRA_CHANNEL_PTP 1 +#define EFX_MAX_EXTRA_CHANNELS 2U + +/* Checksum generation is a per-queue option in hardware, so each + * queue visible to the networking core is backed by two hardware TX + * queues. */ +#define EFX_MAX_TX_TC 2 +#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) +#define EFX_TXQ_TYPE_OUTER_CSUM 1 /* Outer checksum offload */ +#define EFX_TXQ_TYPE_INNER_CSUM 2 /* Inner checksum offload */ +#define EFX_TXQ_TYPE_HIGHPRI 4 /* High-priority (for TC) */ +#define EFX_TXQ_TYPES 8 +/* HIGHPRI is Siena-only, and INNER_CSUM is EF10, so no need for both */ +#define EFX_MAX_TXQ_PER_CHANNEL 4 +#define EFX_MAX_TX_QUEUES (EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS) + +/* Maximum possible MTU the driver supports */ +#define EFX_MAX_MTU (9 * 1024) + +/* Minimum MTU, from RFC791 (IP) */ +#define EFX_MIN_MTU 68 + +/* Maximum total header length for TSOv2 */ +#define EFX_TSO2_MAX_HDRLEN 208 + +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, + * and should be a multiple of the cache line size. + */ +#define EFX_RX_USR_BUF_SIZE (2048 - 256) + +/* If possible, we should ensure cache line alignment at start and end + * of every buffer. Otherwise, we just need to ensure 4-byte + * alignment of the network header. + */ +#if NET_IP_ALIGN == 0 +#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES +#else +#define EFX_RX_BUF_ALIGNMENT 4 +#endif + +/* Non-standard XDP_PACKET_HEADROOM and tailroom to satisfy XDP_REDIRECT and + * still fit two standard MTU size packets into a single 4K page. + */ +#define EFX_XDP_HEADROOM 128 +#define EFX_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +/* Forward declare Precision Time Protocol (PTP) support structure. */ +struct efx_ptp_data; +struct hwtstamp_config; + +struct efx_self_tests; + +/** + * struct efx_buffer - A general-purpose DMA buffer + * @addr: host base address of the buffer + * @dma_addr: DMA base address of the buffer + * @len: Buffer length, in bytes + * + * The NIC uses these buffers for its interrupt status registers and + * MAC stats dumps. + */ +struct efx_buffer { + void *addr; + dma_addr_t dma_addr; + unsigned int len; +}; + +/** + * struct efx_special_buffer - DMA buffer entered into buffer table + * @buf: Standard &struct efx_buffer + * @index: Buffer index within controller;s buffer table + * @entries: Number of buffer table entries + * + * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE. + * Event and descriptor rings are addressed via one or more buffer + * table entries (and so can be physically non-contiguous, although we + * currently do not take advantage of that). On Falcon and Siena we + * have to take care of allocating and initialising the entries + * ourselves. On later hardware this is managed by the firmware and + * @index and @entries are left as 0. + */ +struct efx_special_buffer { + struct efx_buffer buf; + unsigned int index; + unsigned int entries; +}; + +/** + * struct efx_tx_buffer - buffer state for a TX descriptor + * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be + * freed when descriptor completes + * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data + * member is the associated buffer to drop a page reference on. + * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option + * descriptor. + * @dma_addr: DMA address of the fragment. + * @flags: Flags for allocation and DMA mapping type + * @len: Length of this fragment. + * This field is zero when the queue slot is empty. + * @unmap_len: Length of this fragment to unmap + * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping. + * Only valid if @unmap_len != 0. + */ +struct efx_tx_buffer { + union { + const struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + union { + efx_qword_t option; /* EF10 */ + dma_addr_t dma_addr; + }; + unsigned short flags; + unsigned short len; + unsigned short unmap_len; + unsigned short dma_offset; +}; +#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ +#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ +#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ +#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ +#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */ +#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */ + +/** + * struct efx_tx_queue - An Efx TX queue + * + * This is a ring buffer of TX fragments. + * Since the TX completion path always executes on the same + * CPU and the xmit path can operate on different CPUs, + * performance is increased by ensuring that the completion + * path and the xmit path operate on different cache lines. + * This is particularly important if the xmit path is always + * executing on one CPU which is different from the completion + * path. There is also a cache line for members which are + * read but not written on the fast path. + * + * @efx: The associated Efx NIC + * @queue: DMA queue number + * @label: Label for TX completion events. + * Is our index within @channel->tx_queue array. + * @type: configuration type of this TX queue. A bitmask of %EFX_TXQ_TYPE_* flags. + * @tso_version: Version of TSO in use for this queue. + * @tso_encap: Is encapsulated TSO supported? Supported in TSOv2 on 8000 series. + * @channel: The associated channel + * @core_txq: The networking core TX queue structure + * @buffer: The software buffer ring + * @cb_page: Array of pages of copy buffers. Carved up according to + * %EFX_TX_CB_ORDER into %EFX_TX_CB_SIZE-sized chunks. + * @txd: The hardware descriptor ring + * @ptr_mask: The size of the ring minus 1. + * @piobuf: PIO buffer region for this TX queue (shared with its partner). + * Size of the region is efx_piobuf_size. + * @piobuf_offset: Buffer offset to be specified in PIO descriptors + * @initialised: Has hardware queue been initialised? + * @timestamping: Is timestamping enabled for this channel? + * @xdp_tx: Is this an XDP tx queue? + * @read_count: Current read pointer. + * This is the number of buffers that have been removed from both rings. + * @old_write_count: The value of @write_count when last checked. + * This is here for performance reasons. The xmit path will + * only get the up-to-date value of @write_count if this + * variable indicates that the queue is empty. This is to + * avoid cache-line ping-pong between the xmit path and the + * completion path. + * @merge_events: Number of TX merged completion events + * @completed_timestamp_major: Top part of the most recent tx timestamp. + * @completed_timestamp_minor: Low part of the most recent tx timestamp. + * @insert_count: Current insert pointer + * This is the number of buffers that have been added to the + * software ring. + * @write_count: Current write pointer + * This is the number of buffers that have been added to the + * hardware ring. + * @packet_write_count: Completable write pointer + * This is the write pointer of the last packet written. + * Normally this will equal @write_count, but as option descriptors + * don't produce completion events, they won't update this. + * Filled in iff @efx->type->option_descriptors; only used for PIO. + * Thus, this is written and used on EF10, and neither on farch. + * @old_read_count: The value of read_count when last checked. + * This is here for performance reasons. The xmit path will + * only get the up-to-date value of read_count if this + * variable indicates that the queue is full. This is to + * avoid cache-line ping-pong between the xmit path and the + * completion path. + * @tso_bursts: Number of times TSO xmit invoked by kernel + * @tso_long_headers: Number of packets with headers too long for standard + * blocks + * @tso_packets: Number of packets via the TSO xmit path + * @tso_fallbacks: Number of times TSO fallback used + * @pushes: Number of times the TX push feature has been used + * @pio_packets: Number of times the TX PIO feature has been used + * @xmit_pending: Are any packets waiting to be pushed to the NIC + * @cb_packets: Number of times the TX copybreak feature has been used + * @notify_count: Count of notified descriptors to the NIC + * @empty_read_count: If the completion path has seen the queue as empty + * and the transmission path has not yet checked this, the value of + * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. + */ +struct efx_tx_queue { + /* Members which don't change on the fast path */ + struct efx_nic *efx ____cacheline_aligned_in_smp; + unsigned int queue; + unsigned int label; + unsigned int type; + unsigned int tso_version; + bool tso_encap; + struct efx_channel *channel; + struct netdev_queue *core_txq; + struct efx_tx_buffer *buffer; + struct efx_buffer *cb_page; + struct efx_special_buffer txd; + unsigned int ptr_mask; + void __iomem *piobuf; + unsigned int piobuf_offset; + bool initialised; + bool timestamping; + bool xdp_tx; + + /* Members used mainly on the completion path */ + unsigned int read_count ____cacheline_aligned_in_smp; + unsigned int old_write_count; + unsigned int merge_events; + unsigned int bytes_compl; + unsigned int pkts_compl; + u32 completed_timestamp_major; + u32 completed_timestamp_minor; + + /* Members used only on the xmit path */ + unsigned int insert_count ____cacheline_aligned_in_smp; + unsigned int write_count; + unsigned int packet_write_count; + unsigned int old_read_count; + unsigned int tso_bursts; + unsigned int tso_long_headers; + unsigned int tso_packets; + unsigned int tso_fallbacks; + unsigned int pushes; + unsigned int pio_packets; + bool xmit_pending; + unsigned int cb_packets; + unsigned int notify_count; + /* Statistics to supplement MAC stats */ + unsigned long tx_packets; + + /* Members shared between paths and sometimes updated */ + unsigned int empty_read_count ____cacheline_aligned_in_smp; +#define EFX_EMPTY_COUNT_VALID 0x80000000 + atomic_t flush_outstanding; +}; + +#define EFX_TX_CB_ORDER 7 +#define EFX_TX_CB_SIZE (1 << EFX_TX_CB_ORDER) - NET_IP_ALIGN + +/** + * struct efx_rx_buffer - An Efx RX data buffer + * @dma_addr: DMA base address of the buffer + * @page: The associated page buffer. + * Will be %NULL if the buffer slot is currently free. + * @page_offset: If pending: offset in @page of DMA base address. + * If completed: offset in @page of Ethernet header. + * @len: If pending: length for DMA descriptor. + * If completed: received length, excluding hash prefix. + * @flags: Flags for buffer and packet state. These are only set on the + * first buffer of a scattered packet. + */ +struct efx_rx_buffer { + dma_addr_t dma_addr; + struct page *page; + u16 page_offset; + u16 len; + u16 flags; +}; +#define EFX_RX_BUF_LAST_IN_PAGE 0x0001 +#define EFX_RX_PKT_CSUMMED 0x0002 +#define EFX_RX_PKT_DISCARD 0x0004 +#define EFX_RX_PKT_TCP 0x0040 +#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */ +#define EFX_RX_PKT_CSUM_LEVEL 0x0200 + +/** + * struct efx_rx_page_state - Page-based rx buffer state + * + * Inserted at the start of every page allocated for receive buffers. + * Used to facilitate sharing dma mappings between recycled rx buffers + * and those passed up to the kernel. + * + * @dma_addr: The dma address of this page. + */ +struct efx_rx_page_state { + dma_addr_t dma_addr; + + unsigned int __pad[] ____cacheline_aligned; +}; + +/** + * struct efx_rx_queue - An Efx RX queue + * @efx: The associated Efx NIC + * @core_index: Index of network core RX queue. Will be >= 0 iff this + * is associated with a real RX queue. + * @buffer: The software buffer ring + * @rxd: The hardware descriptor ring + * @ptr_mask: The size of the ring minus 1. + * @refill_enabled: Enable refill whenever fill level is low + * @flush_pending: Set when a RX flush is pending. Has the same lifetime as + * @rxq_flush_pending. + * @added_count: Number of buffers added to the receive queue. + * @notified_count: Number of buffers given to NIC (<= @added_count). + * @removed_count: Number of buffers removed from the receive queue. + * @scatter_n: Used by NIC specific receive code. + * @scatter_len: Used by NIC specific receive code. + * @page_ring: The ring to store DMA mapped pages for reuse. + * @page_add: Counter to calculate the write pointer for the recycle ring. + * @page_remove: Counter to calculate the read pointer for the recycle ring. + * @page_recycle_count: The number of pages that have been recycled. + * @page_recycle_failed: The number of pages that couldn't be recycled because + * the kernel still held a reference to them. + * @page_recycle_full: The number of pages that were released because the + * recycle ring was full. + * @page_ptr_mask: The number of pages in the RX recycle ring minus 1. + * @max_fill: RX descriptor maximum fill level (<= ring size) + * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill + * (<= @max_fill) + * @min_fill: RX descriptor minimum non-zero fill level. + * This records the minimum fill level observed when a ring + * refill was triggered. + * @recycle_count: RX buffer recycle counter. + * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). + * @xdp_rxq_info: XDP specific RX queue information. + * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?. + */ +struct efx_rx_queue { + struct efx_nic *efx; + int core_index; + struct efx_rx_buffer *buffer; + struct efx_special_buffer rxd; + unsigned int ptr_mask; + bool refill_enabled; + bool flush_pending; + + unsigned int added_count; + unsigned int notified_count; + unsigned int removed_count; + unsigned int scatter_n; + unsigned int scatter_len; + struct page **page_ring; + unsigned int page_add; + unsigned int page_remove; + unsigned int page_recycle_count; + unsigned int page_recycle_failed; + unsigned int page_recycle_full; + unsigned int page_ptr_mask; + unsigned int max_fill; + unsigned int fast_fill_trigger; + unsigned int min_fill; + unsigned int min_overfill; + unsigned int recycle_count; + struct timer_list slow_fill; + unsigned int slow_fill_count; + /* Statistics to supplement MAC stats */ + unsigned long rx_packets; + struct xdp_rxq_info xdp_rxq_info; + bool xdp_rxq_info_valid; +}; + +enum efx_sync_events_state { + SYNC_EVENTS_DISABLED = 0, + SYNC_EVENTS_QUIESCENT, + SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID, +}; + +/** + * struct efx_channel - An Efx channel + * + * A channel comprises an event queue, at least one TX queue, at least + * one RX queue, and an associated tasklet for processing the event + * queue. + * + * @efx: Associated Efx NIC + * @channel: Channel instance number + * @type: Channel type definition + * @eventq_init: Event queue initialised flag + * @enabled: Channel enabled indicator + * @irq: IRQ number (MSI and MSI-X only) + * @irq_moderation_us: IRQ moderation value (in microseconds) + * @napi_dev: Net device used with NAPI + * @napi_str: NAPI control structure + * @state: state for NAPI vs busy polling + * @state_lock: lock protecting @state + * @eventq: Event queue buffer + * @eventq_mask: Event queue pointer mask + * @eventq_read_ptr: Event queue read pointer + * @event_test_cpu: Last CPU to handle interrupt or test event for this channel + * @irq_count: Number of IRQs since last adaptive moderation decision + * @irq_mod_score: IRQ moderation score + * @rfs_filter_count: number of accelerated RFS filters currently in place; + * equals the count of @rps_flow_id slots filled + * @rfs_last_expiry: value of jiffies last time some accelerated RFS filters + * were checked for expiry + * @rfs_expire_index: next accelerated RFS filter ID to check for expiry + * @n_rfs_succeeded: number of successful accelerated RFS filter insertions + * @n_rfs_failed: number of failed accelerated RFS filter insertions + * @filter_work: Work item for efx_filter_rfs_expire() + * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, + * indexed by filter ID + * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors + * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors + * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors + * @n_rx_mcast_mismatch: Count of unmatched multicast frames + * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors + * @n_rx_overlength: Count of RX_OVERLENGTH errors + * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun + * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to + * lack of descriptors + * @n_rx_merge_events: Number of RX merged completion events + * @n_rx_merge_packets: Number of RX packets completed by merged events + * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP + * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors + * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP + * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP + * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by + * __efx_rx_packet(), or zero if there is none + * @rx_pkt_index: Ring index of first buffer for next packet to be delivered + * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 + * @rx_list: list of SKBs from current RX, awaiting processing + * @rx_queue: RX queue for this channel + * @tx_queue: TX queues for this channel + * @tx_queue_by_type: pointers into @tx_queue, or %NULL, indexed by txq type + * @sync_events_state: Current state of sync events on this channel + * @sync_timestamp_major: Major part of the last ptp sync event + * @sync_timestamp_minor: Minor part of the last ptp sync event + */ +struct efx_channel { + struct efx_nic *efx; + int channel; + const struct efx_channel_type *type; + bool eventq_init; + bool enabled; + int irq; + unsigned int irq_moderation_us; + struct net_device *napi_dev; + struct napi_struct napi_str; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned long busy_poll_state; +#endif + struct efx_special_buffer eventq; + unsigned int eventq_mask; + unsigned int eventq_read_ptr; + int event_test_cpu; + + unsigned int irq_count; + unsigned int irq_mod_score; +#ifdef CONFIG_RFS_ACCEL + unsigned int rfs_filter_count; + unsigned int rfs_last_expiry; + unsigned int rfs_expire_index; + unsigned int n_rfs_succeeded; + unsigned int n_rfs_failed; + struct delayed_work filter_work; +#define RPS_FLOW_ID_INVALID 0xFFFFFFFF + u32 *rps_flow_id; +#endif + + unsigned int n_rx_tobe_disc; + unsigned int n_rx_ip_hdr_chksum_err; + unsigned int n_rx_tcp_udp_chksum_err; + unsigned int n_rx_outer_ip_hdr_chksum_err; + unsigned int n_rx_outer_tcp_udp_chksum_err; + unsigned int n_rx_inner_ip_hdr_chksum_err; + unsigned int n_rx_inner_tcp_udp_chksum_err; + unsigned int n_rx_eth_crc_err; + unsigned int n_rx_mcast_mismatch; + unsigned int n_rx_frm_trunc; + unsigned int n_rx_overlength; + unsigned int n_skbuff_leaks; + unsigned int n_rx_nodesc_trunc; + unsigned int n_rx_merge_events; + unsigned int n_rx_merge_packets; + unsigned int n_rx_xdp_drops; + unsigned int n_rx_xdp_bad_drops; + unsigned int n_rx_xdp_tx; + unsigned int n_rx_xdp_redirect; + + unsigned int rx_pkt_n_frags; + unsigned int rx_pkt_index; + + struct list_head *rx_list; + + struct efx_rx_queue rx_queue; + struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL]; + struct efx_tx_queue *tx_queue_by_type[EFX_TXQ_TYPES]; + + enum efx_sync_events_state sync_events_state; + u32 sync_timestamp_major; + u32 sync_timestamp_minor; +}; + +/** + * struct efx_msi_context - Context for each MSI + * @efx: The associated NIC + * @index: Index of the channel/IRQ + * @name: Name of the channel/IRQ + * + * Unlike &struct efx_channel, this is never reallocated and is always + * safe for the IRQ handler to access. + */ +struct efx_msi_context { + struct efx_nic *efx; + unsigned int index; + char name[IFNAMSIZ + 6]; +}; + +/** + * struct efx_channel_type - distinguishes traffic and extra channels + * @handle_no_channel: Handle failure to allocate an extra channel + * @pre_probe: Set up extra state prior to initialisation + * @post_remove: Tear down extra state after finalisation, if allocated. + * May be called on channels that have not been probed. + * @get_name: Generate the channel's name (used for its IRQ handler) + * @copy: Copy the channel state prior to reallocation. May be %NULL if + * reallocation is not supported. + * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() + * @want_txqs: Determine whether this channel should have TX queues + * created. If %NULL, TX queues are not created. + * @keep_eventq: Flag for whether event queue should be kept initialised + * while the device is stopped + * @want_pio: Flag for whether PIO buffers should be linked to this + * channel's TX queues. + */ +struct efx_channel_type { + void (*handle_no_channel)(struct efx_nic *); + int (*pre_probe)(struct efx_channel *); + void (*post_remove)(struct efx_channel *); + void (*get_name)(struct efx_channel *, char *buf, size_t len); + struct efx_channel *(*copy)(const struct efx_channel *); + bool (*receive_skb)(struct efx_channel *, struct sk_buff *); + bool (*want_txqs)(struct efx_channel *); + bool keep_eventq; + bool want_pio; +}; + +enum efx_led_mode { + EFX_LED_OFF = 0, + EFX_LED_ON = 1, + EFX_LED_DEFAULT = 2 +}; + +#define STRING_TABLE_LOOKUP(val, member) \ + ((val) < member ## _max) ? member ## _names[val] : "(invalid)" + +extern const char *const efx_loopback_mode_names[]; +extern const unsigned int efx_loopback_mode_max; +#define LOOPBACK_MODE(efx) \ + STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) + +enum efx_int_mode { + /* Be careful if altering to correct macro below */ + EFX_INT_MODE_MSIX = 0, + EFX_INT_MODE_MSI = 1, + EFX_INT_MODE_LEGACY = 2, + EFX_INT_MODE_MAX /* Insert any new items before this */ +}; +#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) + +enum nic_state { + STATE_UNINIT = 0, /* device being probed/removed or is frozen */ + STATE_READY = 1, /* hardware ready and netdev registered */ + STATE_DISABLED = 2, /* device disabled due to hardware errors */ + STATE_RECOVERY = 3, /* device recovering from PCI error */ +}; + +/* Forward declaration */ +struct efx_nic; + +/* Pseudo bit-mask flow control field */ +#define EFX_FC_RX FLOW_CTRL_RX +#define EFX_FC_TX FLOW_CTRL_TX +#define EFX_FC_AUTO 4 + +/** + * struct efx_link_state - Current state of the link + * @up: Link is up + * @fd: Link is full-duplex + * @fc: Actual flow control flags + * @speed: Link speed (Mbps) + */ +struct efx_link_state { + bool up; + bool fd; + u8 fc; + unsigned int speed; +}; + +static inline bool efx_link_state_equal(const struct efx_link_state *left, + const struct efx_link_state *right) +{ + return left->up == right->up && left->fd == right->fd && + left->fc == right->fc && left->speed == right->speed; +} + +/** + * enum efx_phy_mode - PHY operating mode flags + * @PHY_MODE_NORMAL: on and should pass traffic + * @PHY_MODE_TX_DISABLED: on with TX disabled + * @PHY_MODE_LOW_POWER: set to low power through MDIO + * @PHY_MODE_OFF: switched off through external control + * @PHY_MODE_SPECIAL: on but will not pass traffic + */ +enum efx_phy_mode { + PHY_MODE_NORMAL = 0, + PHY_MODE_TX_DISABLED = 1, + PHY_MODE_LOW_POWER = 2, + PHY_MODE_OFF = 4, + PHY_MODE_SPECIAL = 8, +}; + +static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) +{ + return !!(mode & ~PHY_MODE_TX_DISABLED); +} + +/** + * struct efx_hw_stat_desc - Description of a hardware statistic + * @name: Name of the statistic as visible through ethtool, or %NULL if + * it should not be exposed + * @dma_width: Width in bits (0 for non-DMA statistics) + * @offset: Offset within stats (ignored for non-DMA statistics) + */ +struct efx_hw_stat_desc { + const char *name; + u16 dma_width; + u16 offset; +}; + +/* Number of bits used in a multicast filter hash address */ +#define EFX_MCAST_HASH_BITS 8 + +/* Number of (single-bit) entries in a multicast filter hash */ +#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) + +/* An Efx multicast filter hash */ +union efx_multicast_hash { + u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; + efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; +}; + +struct vfdi_status; + +/* The reserved RSS context value */ +#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff +/** + * struct efx_rss_context - A user-defined RSS context for filtering + * @list: node of linked list on which this struct is stored + * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or + * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC. + * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID. + * @user_id: the rss_context ID exposed to userspace over ethtool. + * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled + * @rx_hash_key: Toeplitz hash key for this RSS context + * @indir_table: Indirection table for this RSS context + */ +struct efx_rss_context { + struct list_head list; + u32 context_id; + u32 user_id; + bool rx_hash_udp_4tuple; + u8 rx_hash_key[40]; + u32 rx_indir_table[128]; +}; + +#ifdef CONFIG_RFS_ACCEL +/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING + * is used to test if filter does or will exist. + */ +#define EFX_ARFS_FILTER_ID_PENDING -1 +#define EFX_ARFS_FILTER_ID_ERROR -2 +#define EFX_ARFS_FILTER_ID_REMOVING -3 +/** + * struct efx_arfs_rule - record of an ARFS filter and its IDs + * @node: linkage into hash table + * @spec: details of the filter (used as key for hash table). Use efx->type to + * determine which member to use. + * @rxq_index: channel to which the filter will steer traffic. + * @arfs_id: filter ID which was returned to ARFS + * @filter_id: index in software filter table. May be + * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet, + * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or + * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter. + */ +struct efx_arfs_rule { + struct hlist_node node; + struct efx_filter_spec spec; + u16 rxq_index; + u16 arfs_id; + s32 filter_id; +}; + +/* Size chosen so that the table is one page (4kB) */ +#define EFX_ARFS_HASH_TABLE_SIZE 512 + +/** + * struct efx_async_filter_insertion - Request to asynchronously insert a filter + * @net_dev: Reference to the netdevice + * @spec: The filter to insert + * @work: Workitem for this request + * @rxq_index: Identifies the channel for which this request was made + * @flow_id: Identifies the kernel-side flow for which this request was made + */ +struct efx_async_filter_insertion { + struct net_device *net_dev; + struct efx_filter_spec spec; + struct work_struct work; + u16 rxq_index; + u32 flow_id; +}; + +/* Maximum number of ARFS workitems that may be in flight on an efx_nic */ +#define EFX_RPS_MAX_IN_FLIGHT 8 +#endif /* CONFIG_RFS_ACCEL */ + +enum efx_xdp_tx_queues_mode { + EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */ + EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */ + EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */ +}; + +/** + * struct efx_nic - an Efx NIC + * @name: Device name (net device name or bus id before net device registered) + * @pci_dev: The PCI device + * @node: List node for maintaning primary/secondary function lists + * @primary: &struct efx_nic instance for the primary function of this + * controller. May be the same structure, and may be %NULL if no + * primary function is bound. Serialised by rtnl_lock. + * @secondary_list: List of &struct efx_nic instances for the secondary PCI + * functions of the controller, if this is for the primary function. + * Serialised by rtnl_lock. + * @type: Controller type attributes + * @legacy_irq: IRQ number + * @workqueue: Workqueue for port reconfigures and the HW monitor. + * Work items do not hold and must not acquire RTNL. + * @workqueue_name: Name of workqueue + * @reset_work: Scheduled reset workitem + * @membase_phys: Memory BAR value as physical address + * @membase: Memory BAR value + * @vi_stride: step between per-VI registers / memory regions + * @interrupt_mode: Interrupt mode + * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds + * @timer_max_ns: Interrupt timer maximum value, in nanoseconds + * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues + * @irqs_hooked: Channel interrupts are hooked + * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues + * @irq_rx_moderation_us: IRQ moderation time for RX event queues + * @msg_enable: Log message enable flags + * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. + * @reset_pending: Bitmask for pending resets + * @tx_queue: TX DMA queues + * @rx_queue: RX DMA queues + * @channel: Channels + * @msi_context: Context for each MSI + * @extra_channel_types: Types of extra (non-traffic) channels that + * should be allocated for this NIC + * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues. + * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit. + * @xdp_txq_queues_mode: XDP TX queues sharing strategy. + * @rxq_entries: Size of receive queues requested by user. + * @txq_entries: Size of transmit queues requested by user. + * @txq_stop_thresh: TX queue fill level at or above which we stop it. + * @txq_wake_thresh: TX queue fill level at or below which we wake it. + * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches + * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches + * @sram_lim_qw: Qword address limit of SRAM + * @next_buffer_table: First available buffer table id + * @n_channels: Number of channels in use + * @n_rx_channels: Number of channels used for RX (= number of RX queues) + * @n_tx_channels: Number of channels used for TX + * @n_extra_tx_channels: Number of extra channels with TX queues + * @tx_queues_per_channel: number of TX queues probed on each channel + * @n_xdp_channels: Number of channels used for XDP TX + * @xdp_channel_offset: Offset of zeroth channel used for XPD TX. + * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel. + * @rx_ip_align: RX DMA address offset to have IP header aligned in + * in accordance with NET_IP_ALIGN + * @rx_dma_len: Current maximum RX DMA length + * @rx_buffer_order: Order (log2) of number of pages for each RX buffer + * @rx_buffer_truesize: Amortised allocation size of an RX buffer, + * for use in sk_buff::truesize + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data + * (valid only if @rx_prefix_size != 0; always negative) + * @rx_packet_len_offset: Offset of RX packet length from start of packet data + * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative) + * @rx_packet_ts_offset: Offset of timestamp from start of packet data + * (valid only if channel->sync_timestamps_enabled; always negative) + * @rx_scatter: Scatter mode enabled for receives + * @rss_context: Main RSS context. Its @list member is the head of the list of + * RSS contexts created by user requests + * @rss_lock: Protects custom RSS context software state in @rss_context.list + * @vport_id: The function's vport ID, only relevant for PFs + * @int_error_count: Number of internal errors seen recently + * @int_error_expire: Time at which error count will be expired + * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot + * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will + * acknowledge but do nothing else. + * @irq_status: Interrupt status buffer + * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 + * @irq_level: IRQ level/index for IRQs not triggered by an event queue + * @selftest_work: Work item for asynchronous self-test + * @mtd_list: List of MTDs attached to the NIC + * @nic_data: Hardware dependent state + * @mcdi: Management-Controller-to-Driver Interface state + * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, + * efx_monitor() and efx_reconfigure_port() + * @port_enabled: Port enabled indicator. + * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and + * efx_mac_work() with kernel interfaces. Safe to read under any + * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must + * be held to modify it. + * @port_initialized: Port initialized? + * @net_dev: Operating system network device. Consider holding the rtnl lock + * @fixed_features: Features which cannot be turned off + * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS + * field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS) + * @stats_buffer: DMA buffer for statistics + * @phy_type: PHY type + * @phy_data: PHY private data (including PHY-specific stats) + * @mdio: PHY MDIO interface + * @mdio_bus: PHY MDIO bus ID (only used by Siena) + * @phy_mode: PHY operating mode. Serialised by @mac_lock. + * @link_advertising: Autonegotiation advertising flags + * @fec_config: Forward Error Correction configuration flags. For bit positions + * see &enum ethtool_fec_config_bits. + * @link_state: Current state of the link + * @n_link_state_changes: Number of times the link has changed state + * @unicast_filter: Flag for Falcon-arch simple unicast filter. + * Protected by @mac_lock. + * @multicast_hash: Multicast hash table for Falcon-arch. + * Protected by @mac_lock. + * @wanted_fc: Wanted flow control flags + * @fc_disable: When non-zero flow control is disabled. Typically used to + * ensure that network back pressure doesn't delay dma queue flushes. + * Serialised by the rtnl lock. + * @mac_work: Work item for changing MAC promiscuity and multicast hash + * @loopback_mode: Loopback status + * @loopback_modes: Supported loopback mode bitmask + * @loopback_selftest: Offline self-test private state + * @xdp_prog: Current XDP programme for this interface + * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state + * @filter_state: Architecture-dependent filter table state + * @rps_mutex: Protects RPS state of all channels + * @rps_slot_map: bitmap of in-flight entries in @rps_slot + * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() + * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and + * @rps_next_id). + * @rps_hash_table: Mapping between ARFS filters and their various IDs + * @rps_next_id: next arfs_id for an ARFS filter + * @active_queues: Count of RX and TX queues that haven't been flushed and drained. + * @rxq_flush_pending: Count of number of receive queues that need to be flushed. + * Decremented when the efx_flush_rx_queue() is called. + * @rxq_flush_outstanding: Count of number of RX flushes started but not yet + * completed (either success or failure). Not used when MCDI is used to + * flush receive queues. + * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions. + * @vf_count: Number of VFs intended to be enabled. + * @vf_init_count: Number of VFs that have been fully initialised. + * @vi_scale: log2 number of vnics per VF. + * @ptp_data: PTP state data + * @ptp_warned: has this NIC seen and warned about unexpected PTP events? + * @vpd_sn: Serial number read from VPD + * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their + * xdp_rxq_info structures? + * @netdev_notifier: Netdevice notifier. + * @mem_bar: The BAR that is mapped into membase. + * @reg_base: Offset from the start of the bar to the function control window. + * @monitor_work: Hardware monitor workitem + * @biu_lock: BIU (bus interface unit) lock + * @last_irq_cpu: Last CPU to handle a possible test interrupt. This + * field is used by efx_test_interrupts() to verify that an + * interrupt has occurred. + * @stats_lock: Statistics update lock. Must be held when calling + * efx_nic_type::{update,start,stop}_stats. + * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb + * + * This is stored in the private area of the &struct net_device. + */ +struct efx_nic { + /* The following fields should be written very rarely */ + + char name[IFNAMSIZ]; + struct list_head node; + struct efx_nic *primary; + struct list_head secondary_list; + struct pci_dev *pci_dev; + unsigned int port_num; + const struct efx_nic_type *type; + int legacy_irq; + bool eeh_disabled_legacy_irq; + struct workqueue_struct *workqueue; + char workqueue_name[16]; + struct work_struct reset_work; + resource_size_t membase_phys; + void __iomem *membase; + + unsigned int vi_stride; + + enum efx_int_mode interrupt_mode; + unsigned int timer_quantum_ns; + unsigned int timer_max_ns; + bool irq_rx_adaptive; + bool irqs_hooked; + unsigned int irq_mod_step_us; + unsigned int irq_rx_moderation_us; + u32 msg_enable; + + enum nic_state state; + unsigned long reset_pending; + + struct efx_channel *channel[EFX_MAX_CHANNELS]; + struct efx_msi_context msi_context[EFX_MAX_CHANNELS]; + const struct efx_channel_type * + extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; + + unsigned int xdp_tx_queue_count; + struct efx_tx_queue **xdp_tx_queues; + enum efx_xdp_tx_queues_mode xdp_txq_queues_mode; + + unsigned rxq_entries; + unsigned txq_entries; + unsigned int txq_stop_thresh; + unsigned int txq_wake_thresh; + + unsigned tx_dc_base; + unsigned rx_dc_base; + unsigned sram_lim_qw; + unsigned next_buffer_table; + + unsigned int max_channels; + unsigned int max_vis; + unsigned int max_tx_channels; + unsigned n_channels; + unsigned n_rx_channels; + unsigned rss_spread; + unsigned tx_channel_offset; + unsigned n_tx_channels; + unsigned n_extra_tx_channels; + unsigned int tx_queues_per_channel; + unsigned int n_xdp_channels; + unsigned int xdp_channel_offset; + unsigned int xdp_tx_per_channel; + unsigned int rx_ip_align; + unsigned int rx_dma_len; + unsigned int rx_buffer_order; + unsigned int rx_buffer_truesize; + unsigned int rx_page_buf_step; + unsigned int rx_bufs_per_page; + unsigned int rx_pages_per_batch; + unsigned int rx_prefix_size; + int rx_packet_hash_offset; + int rx_packet_len_offset; + int rx_packet_ts_offset; + bool rx_scatter; + struct efx_rss_context rss_context; + struct mutex rss_lock; + u32 vport_id; + + unsigned int_error_count; + unsigned long int_error_expire; + + bool must_realloc_vis; + bool irq_soft_enabled; + struct efx_buffer irq_status; + unsigned irq_zero_count; + unsigned irq_level; + struct delayed_work selftest_work; + +#ifdef CONFIG_SFC_MTD + struct list_head mtd_list; +#endif + + void *nic_data; + struct efx_mcdi_data *mcdi; + + struct mutex mac_lock; + struct work_struct mac_work; + bool port_enabled; + + bool mc_bist_for_other_fn; + bool port_initialized; + struct net_device *net_dev; + + netdev_features_t fixed_features; + + u16 num_mac_stats; + struct efx_buffer stats_buffer; + u64 rx_nodesc_drops_total; + u64 rx_nodesc_drops_while_down; + bool rx_nodesc_drops_prev_state; + + unsigned int phy_type; + void *phy_data; + struct mdio_if_info mdio; + unsigned int mdio_bus; + enum efx_phy_mode phy_mode; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); + u32 fec_config; + struct efx_link_state link_state; + unsigned int n_link_state_changes; + + bool unicast_filter; + union efx_multicast_hash multicast_hash; + u8 wanted_fc; + unsigned fc_disable; + + atomic_t rx_reset; + enum efx_loopback_mode loopback_mode; + u64 loopback_modes; + + void *loopback_selftest; + /* We access loopback_selftest immediately before running XDP, + * so we want them next to each other. + */ + struct bpf_prog __rcu *xdp_prog; + + struct rw_semaphore filter_sem; + void *filter_state; +#ifdef CONFIG_RFS_ACCEL + struct mutex rps_mutex; + unsigned long rps_slot_map; + struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; + spinlock_t rps_hash_lock; + struct hlist_head *rps_hash_table; + u32 rps_next_id; +#endif + + atomic_t active_queues; + atomic_t rxq_flush_pending; + atomic_t rxq_flush_outstanding; + wait_queue_head_t flush_wq; + +#ifdef CONFIG_SFC_SRIOV + unsigned vf_count; + unsigned vf_init_count; + unsigned vi_scale; +#endif + + struct efx_ptp_data *ptp_data; + bool ptp_warned; + + char *vpd_sn; + bool xdp_rxq_info_failed; + + struct notifier_block netdev_notifier; + + unsigned int mem_bar; + u32 reg_base; + + /* The following fields may be written more often */ + + struct delayed_work monitor_work ____cacheline_aligned_in_smp; + spinlock_t biu_lock; + int last_irq_cpu; + spinlock_t stats_lock; + atomic_t n_rx_noskb_drops; +}; + +static inline int efx_dev_registered(struct efx_nic *efx) +{ + return efx->net_dev->reg_state == NETREG_REGISTERED; +} + +static inline unsigned int efx_port_num(struct efx_nic *efx) +{ + return efx->port_num; +} + +struct efx_mtd_partition { + struct list_head node; + struct mtd_info mtd; + const char *dev_type_name; + const char *type_name; + char name[IFNAMSIZ + 20]; +}; + +struct efx_udp_tunnel { +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID 0xffff + u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */ + __be16 port; +}; + +/** + * struct efx_nic_type - Efx device type definition + * @mem_bar: Get the memory BAR + * @mem_map_size: Get memory BAR mapped size + * @probe: Probe the controller + * @remove: Free resources allocated by probe() + * @init: Initialise the controller + * @dimension_resources: Dimension controller resources (buffer table, + * and VIs once the available interrupt resources are clear) + * @fini: Shut down the controller + * @monitor: Periodic function for polling link state and hardware monitor + * @map_reset_reason: Map ethtool reset reason to a reset method + * @map_reset_flags: Map ethtool reset flags to a reset method, if possible + * @reset: Reset the controller hardware and possibly the PHY. This will + * be called while the controller is uninitialised. + * @probe_port: Probe the MAC and PHY + * @remove_port: Free resources allocated by probe_port() + * @handle_global_event: Handle a "global" event (may be %NULL) + * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues) + * @prepare_flush: Prepare the hardware for flushing the DMA queues + * (for Falcon architecture) + * @finish_flush: Clean up after flushing the DMA queues (for Falcon + * architecture) + * @prepare_flr: Prepare for an FLR + * @finish_flr: Clean up after an FLR + * @describe_stats: Describe statistics for ethtool + * @update_stats: Update statistics not provided by event handling. + * Either argument may be %NULL. + * @update_stats_atomic: Update statistics while in atomic context, if that + * is more limiting than @update_stats. Otherwise, leave %NULL and + * driver core will call @update_stats. + * @start_stats: Start the regular fetching of statistics + * @pull_stats: Pull stats from the NIC and wait until they arrive. + * @stop_stats: Stop the regular fetching of statistics + * @push_irq_moderation: Apply interrupt moderation value + * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY + * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL) + * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings + * to the hardware. Serialised by the mac_lock. + * @check_mac_fault: Check MAC fault state. True if fault present. + * @get_wol: Get WoL configuration from driver state + * @set_wol: Push WoL configuration to the NIC + * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) + * @get_fec_stats: Get standard FEC statistics. + * @test_chip: Test registers. May use efx_farch_test_registers(), and is + * expected to reset the NIC. + * @test_nvram: Test validity of NVRAM contents + * @mcdi_request: Send an MCDI request with the given header and SDU. + * The SDU length may be any value from 0 up to the protocol- + * defined maximum, but its buffer will be padded to a multiple + * of 4 bytes. + * @mcdi_poll_response: Test whether an MCDI response is available. + * @mcdi_read_response: Read the MCDI response PDU. The offset will + * be a multiple of 4. The length may not be, but the buffer + * will be padded so it is safe to round up. + * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so, + * return an appropriate error code for aborting any current + * request; otherwise return 0. + * @irq_enable_master: Enable IRQs on the NIC. Each event queue must + * be separately enabled after this. + * @irq_test_generate: Generate a test IRQ + * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event + * queue must be separately disabled before this. + * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is + * a pointer to the &struct efx_msi_context for the channel. + * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument + * is a pointer to the &struct efx_nic. + * @tx_probe: Allocate resources for TX queue (and select TXQ type) + * @tx_init: Initialise TX queue on the NIC + * @tx_remove: Free resources for TX queue + * @tx_write: Write TX descriptors and doorbell + * @tx_enqueue: Add an SKB to TX queue + * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC + * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC + * @rx_push_rss_context_config: Write RSS hash key and indirection table for + * user RSS context to the NIC + * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user + * RSS context back from the NIC + * @rx_probe: Allocate resources for RX queue + * @rx_init: Initialise RX queue on the NIC + * @rx_remove: Free resources for RX queue + * @rx_write: Write RX descriptors and doorbell + * @rx_defer_refill: Generate a refill reminder event + * @rx_packet: Receive the queued RX buffer on a channel + * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash + * @ev_probe: Allocate resources for event queue + * @ev_init: Initialise event queue on the NIC + * @ev_fini: Deinitialise event queue on the NIC + * @ev_remove: Free resources for event queue + * @ev_process: Process events for a queue, up to the given NAPI quota + * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ + * @ev_test_generate: Generate a test event + * @filter_table_probe: Probe filter capabilities and set up filter software state + * @filter_table_restore: Restore filters removed from hardware + * @filter_table_remove: Remove filters from hardware and tear down software state + * @filter_update_rx_scatter: Update filters after change to rx scatter setting + * @filter_insert: add or replace a filter + * @filter_remove_safe: remove a filter by ID, carefully + * @filter_get_safe: retrieve a filter by ID, carefully + * @filter_clear_rx: Remove all RX filters whose priority is less than or + * equal to the given priority and is not %EFX_FILTER_PRI_AUTO + * @filter_count_rx_used: Get the number of filters in use at a given priority + * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 + * @filter_get_rx_ids: Get list of RX filters at a given priority + * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS. + * This must check whether the specified table entry is used by RFS + * and that rps_may_expire_flow() returns true for it. + * @mtd_probe: Probe and add MTD partitions associated with this net device, + * using efx_mtd_add() + * @mtd_rename: Set an MTD partition name using the net device name + * @mtd_read: Read from an MTD partition + * @mtd_erase: Erase part of an MTD partition + * @mtd_write: Write to an MTD partition + * @mtd_sync: Wait for write-back to complete on MTD partition. This + * also notifies the driver that a writer has finished using this + * partition. + * @ptp_write_host_time: Send host time to MC as part of sync protocol + * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX + * timestamping, possibly only temporarily for the purposes of a reset. + * @ptp_set_ts_config: Set hardware timestamp configuration. The flags + * and tx_type will already have been validated but this operation + * must validate and update rx_filter. + * @get_phys_port_id: Get the underlying physical port id. + * @set_mac_address: Set the MAC address of the device + * @tso_versions: Returns mask of firmware-assisted TSO versions supported. + * If %NULL, then device does not support any TSO version. + * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required. + * @udp_tnl_has_port: Check if a port has been added as UDP tunnel + * @print_additional_fwver: Dump NIC-specific additional FW version info + * @sensor_event: Handle a sensor event from MCDI + * @rx_recycle_ring_size: Size of the RX recycle ring + * @revision: Hardware architecture revision + * @txd_ptr_tbl_base: TX descriptor ring base address + * @rxd_ptr_tbl_base: RX descriptor ring base address + * @buf_tbl_base: Buffer table base address + * @evq_ptr_tbl_base: Event queue pointer table base address + * @evq_rptr_tbl_base: Event queue read-pointer table base address + * @max_dma_mask: Maximum possible DMA mask + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_hash_offset: Offset of RX flow hash within prefix + * @rx_ts_offset: Offset of timestamp within prefix + * @rx_buffer_padding: Size of padding at end of RX packet + * @can_rx_scatter: NIC is able to scatter packets to multiple buffers + * @always_rx_scatter: NIC will always scatter packets to multiple buffers + * @option_descriptors: NIC supports TX option descriptors + * @min_interrupt_mode: Lowest capability interrupt mode supported + * from &enum efx_int_mode. + * @timer_period_max: Maximum period of interrupt timer (in ticks) + * @offload_features: net_device feature flags for protocol offload + * features implemented in hardware + * @mcdi_max_ver: Maximum MCDI version supported + * @hwtstamp_filters: Mask of hardware timestamp filter types supported + */ +struct efx_nic_type { + bool is_vf; + unsigned int (*mem_bar)(struct efx_nic *efx); + unsigned int (*mem_map_size)(struct efx_nic *efx); + int (*probe)(struct efx_nic *efx); + void (*remove)(struct efx_nic *efx); + int (*init)(struct efx_nic *efx); + int (*dimension_resources)(struct efx_nic *efx); + void (*fini)(struct efx_nic *efx); + void (*monitor)(struct efx_nic *efx); + enum reset_type (*map_reset_reason)(enum reset_type reason); + int (*map_reset_flags)(u32 *flags); + int (*reset)(struct efx_nic *efx, enum reset_type method); + int (*probe_port)(struct efx_nic *efx); + void (*remove_port)(struct efx_nic *efx); + bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); + int (*fini_dmaq)(struct efx_nic *efx); + void (*prepare_flush)(struct efx_nic *efx); + void (*finish_flush)(struct efx_nic *efx); + void (*prepare_flr)(struct efx_nic *efx); + void (*finish_flr)(struct efx_nic *efx); + size_t (*describe_stats)(struct efx_nic *efx, u8 *names); + size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats); + size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats); + void (*start_stats)(struct efx_nic *efx); + void (*pull_stats)(struct efx_nic *efx); + void (*stop_stats)(struct efx_nic *efx); + void (*push_irq_moderation)(struct efx_channel *channel); + int (*reconfigure_port)(struct efx_nic *efx); + void (*prepare_enable_fc_tx)(struct efx_nic *efx); + int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only); + bool (*check_mac_fault)(struct efx_nic *efx); + void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); + int (*set_wol)(struct efx_nic *efx, u32 type); + void (*resume_wol)(struct efx_nic *efx); + void (*get_fec_stats)(struct efx_nic *efx, + struct ethtool_fec_stats *fec_stats); + unsigned int (*check_caps)(const struct efx_nic *efx, + u8 flag, + u32 offset); + int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); + int (*test_nvram)(struct efx_nic *efx); + void (*mcdi_request)(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len); + bool (*mcdi_poll_response)(struct efx_nic *efx); + void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu, + size_t pdu_offset, size_t pdu_len); + int (*mcdi_poll_reboot)(struct efx_nic *efx); + void (*mcdi_reboot_detected)(struct efx_nic *efx); + void (*irq_enable_master)(struct efx_nic *efx); + int (*irq_test_generate)(struct efx_nic *efx); + void (*irq_disable_non_ev)(struct efx_nic *efx); + irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); + irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id); + int (*tx_probe)(struct efx_tx_queue *tx_queue); + void (*tx_init)(struct efx_tx_queue *tx_queue); + void (*tx_remove)(struct efx_tx_queue *tx_queue); + void (*tx_write)(struct efx_tx_queue *tx_queue); + netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len); + int (*rx_push_rss_config)(struct efx_nic *efx, bool user, + const u32 *rx_indir_table, const u8 *key); + int (*rx_pull_rss_config)(struct efx_nic *efx); + int (*rx_push_rss_context_config)(struct efx_nic *efx, + struct efx_rss_context *ctx, + const u32 *rx_indir_table, + const u8 *key); + int (*rx_pull_rss_context_config)(struct efx_nic *efx, + struct efx_rss_context *ctx); + void (*rx_restore_rss_contexts)(struct efx_nic *efx); + int (*rx_probe)(struct efx_rx_queue *rx_queue); + void (*rx_init)(struct efx_rx_queue *rx_queue); + void (*rx_remove)(struct efx_rx_queue *rx_queue); + void (*rx_write)(struct efx_rx_queue *rx_queue); + void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); + void (*rx_packet)(struct efx_channel *channel); + bool (*rx_buf_hash_valid)(const u8 *prefix); + int (*ev_probe)(struct efx_channel *channel); + int (*ev_init)(struct efx_channel *channel); + void (*ev_fini)(struct efx_channel *channel); + void (*ev_remove)(struct efx_channel *channel); + int (*ev_process)(struct efx_channel *channel, int quota); + void (*ev_read_ack)(struct efx_channel *channel); + void (*ev_test_generate)(struct efx_channel *channel); + int (*filter_table_probe)(struct efx_nic *efx); + void (*filter_table_restore)(struct efx_nic *efx); + void (*filter_table_remove)(struct efx_nic *efx); + void (*filter_update_rx_scatter)(struct efx_nic *efx); + s32 (*filter_insert)(struct efx_nic *efx, + struct efx_filter_spec *spec, bool replace); + int (*filter_remove_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); + int (*filter_get_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *); + int (*filter_clear_rx)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_count_rx_used)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_get_rx_id_limit)(struct efx_nic *efx); + s32 (*filter_get_rx_ids)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size); +#ifdef CONFIG_RFS_ACCEL + bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif +#ifdef CONFIG_SFC_MTD + int (*mtd_probe)(struct efx_nic *efx); + void (*mtd_rename)(struct efx_mtd_partition *part); + int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); + int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len); + int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); + int (*mtd_sync)(struct mtd_info *mtd); +#endif + void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time); + int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); + int (*ptp_set_ts_config)(struct efx_nic *efx, + struct hwtstamp_config *init); + int (*sriov_configure)(struct efx_nic *efx, int num_vfs); + int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); + int (*sriov_init)(struct efx_nic *efx); + void (*sriov_fini)(struct efx_nic *efx); + bool (*sriov_wanted)(struct efx_nic *efx); + void (*sriov_reset)(struct efx_nic *efx); + void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i); + int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac); + int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan, + u8 qos); + int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i, + bool spoofchk); + int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivi); + int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, + int link_state); + int (*vswitching_probe)(struct efx_nic *efx); + int (*vswitching_restore)(struct efx_nic *efx); + void (*vswitching_remove)(struct efx_nic *efx); + int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr); + int (*set_mac_address)(struct efx_nic *efx); + u32 (*tso_versions)(struct efx_nic *efx); + int (*udp_tnl_push_ports)(struct efx_nic *efx); + bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port); + size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf, + size_t len); + void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev); + unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx); + + int revision; + unsigned int txd_ptr_tbl_base; + unsigned int rxd_ptr_tbl_base; + unsigned int buf_tbl_base; + unsigned int evq_ptr_tbl_base; + unsigned int evq_rptr_tbl_base; + u64 max_dma_mask; + unsigned int rx_prefix_size; + unsigned int rx_hash_offset; + unsigned int rx_ts_offset; + unsigned int rx_buffer_padding; + bool can_rx_scatter; + bool always_rx_scatter; + bool option_descriptors; + unsigned int min_interrupt_mode; + unsigned int timer_period_max; + netdev_features_t offload_features; + int mcdi_max_ver; + unsigned int max_rx_ip_filters; + u32 hwtstamp_filters; + unsigned int rx_hash_key_size; +}; + +/************************************************************************** + * + * Prototypes and inline functions + * + *************************************************************************/ + +static inline struct efx_channel * +efx_get_channel(struct efx_nic *efx, unsigned index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_channels); + return efx->channel[index]; +} + +/* Iterate over all used channels */ +#define efx_for_each_channel(_channel, _efx) \ + for (_channel = (_efx)->channel[0]; \ + _channel; \ + _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ + (_efx)->channel[_channel->channel + 1] : NULL) + +/* Iterate over all used channels in reverse */ +#define efx_for_each_channel_rev(_channel, _efx) \ + for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \ + _channel; \ + _channel = _channel->channel ? \ + (_efx)->channel[_channel->channel - 1] : NULL) + +static inline struct efx_channel * +efx_get_tx_channel(struct efx_nic *efx, unsigned int index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels); + return efx->channel[efx->tx_channel_offset + index]; +} + +static inline struct efx_channel * +efx_get_xdp_channel(struct efx_nic *efx, unsigned int index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels); + return efx->channel[efx->xdp_channel_offset + index]; +} + +static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel) +{ + return channel->channel - channel->efx->xdp_channel_offset < + channel->efx->n_xdp_channels; +} + +static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) +{ + return true; +} + +static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel) +{ + if (efx_channel_is_xdp_tx(channel)) + return channel->efx->xdp_tx_per_channel; + return channel->efx->tx_queues_per_channel; +} + +static inline struct efx_tx_queue * +efx_channel_get_tx_queue(struct efx_channel *channel, unsigned int type) +{ + EFX_WARN_ON_ONCE_PARANOID(type >= EFX_TXQ_TYPES); + return channel->tx_queue_by_type[type]; +} + +static inline struct efx_tx_queue * +efx_get_tx_queue(struct efx_nic *efx, unsigned int index, unsigned int type) +{ + struct efx_channel *channel = efx_get_tx_channel(efx, index); + + return efx_channel_get_tx_queue(channel, type); +} + +/* Iterate over all TX queues belonging to a channel */ +#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ + if (!efx_channel_has_tx_queues(_channel)) \ + ; \ + else \ + for (_tx_queue = (_channel)->tx_queue; \ + _tx_queue < (_channel)->tx_queue + \ + efx_channel_num_tx_queues(_channel); \ + _tx_queue++) + +static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) +{ + return channel->rx_queue.core_index >= 0; +} + +static inline struct efx_rx_queue * +efx_channel_get_rx_queue(struct efx_channel *channel) +{ + EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_rx_queue(channel)); + return &channel->rx_queue; +} + +/* Iterate over all RX queues belonging to a channel */ +#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ + if (!efx_channel_has_rx_queue(_channel)) \ + ; \ + else \ + for (_rx_queue = &(_channel)->rx_queue; \ + _rx_queue; \ + _rx_queue = NULL) + +static inline struct efx_channel * +efx_rx_queue_channel(struct efx_rx_queue *rx_queue) +{ + return container_of(rx_queue, struct efx_channel, rx_queue); +} + +static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) +{ + return efx_rx_queue_channel(rx_queue)->channel; +} + +/* Returns a pointer to the specified receive buffer in the RX + * descriptor queue. + */ +static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, + unsigned int index) +{ + return &rx_queue->buffer[index]; +} + +static inline struct efx_rx_buffer * +efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) +{ + if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) + return efx_rx_buffer(rx_queue, 0); + else + return rx_buf + 1; +} + +/** + * EFX_MAX_FRAME_LEN - calculate maximum frame length + * + * This calculates the maximum frame length that will be used for a + * given MTU. The frame length will be equal to the MTU plus a + * constant amount of header space and padding. This is the quantity + * that the net driver will program into the MAC as the maximum frame + * length. + * + * The 10G MAC requires 8-byte alignment on the frame + * length, so we round up to the nearest 8. + * + * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an + * XGMII cycle). If the frame length reaches the maximum value in the + * same cycle, the XMAC can miss the IPG altogether. We work around + * this by adding a further 16 bytes. + */ +#define EFX_FRAME_PAD 16 +#define EFX_MAX_FRAME_LEN(mtu) \ + (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8)) + +static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; +} +static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) +{ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +} + +/* Get the max fill level of the TX queues on this channel */ +static inline unsigned int +efx_channel_tx_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->read_count); + + return fill_level; +} + +/* Conservative approximation of efx_channel_tx_fill_level using cached value */ +static inline unsigned int +efx_channel_tx_old_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->old_read_count); + + return fill_level; +} + +/* Get all supported features. + * If a feature is not fixed, it is present in hw_features. + * If a feature is fixed, it does not present in hw_features, but + * always in features. + */ +static inline netdev_features_t efx_supported_features(const struct efx_nic *efx) +{ + const struct net_device *net_dev = efx->net_dev; + + return net_dev->features | net_dev->hw_features; +} + +/* Get the current TX queue insert index. */ +static inline unsigned int +efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) +{ + return tx_queue->insert_count & tx_queue->ptr_mask; +} + +/* Get a TX buffer. */ +static inline struct efx_tx_buffer * +__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; +} + +/* Get a TX buffer, checking it's not currently in use. */ +static inline struct efx_tx_buffer * +efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer = + __efx_tx_queue_get_insert_buffer(tx_queue); + + EFX_WARN_ON_ONCE_PARANOID(buffer->len); + EFX_WARN_ON_ONCE_PARANOID(buffer->flags); + EFX_WARN_ON_ONCE_PARANOID(buffer->unmap_len); + + return buffer; +} + +#endif /* EFX_NET_DRIVER_H */ diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c new file mode 100644 index 000000000000..22fbb0ae77fb --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/nic.c @@ -0,0 +1,580 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "nic.h" +#include "ef10_regs.h" +#include "farch_regs.h" +#include "io.h" +#include "workarounds.h" +#include "mcdi_pcol.h" + +/************************************************************************** + * + * Generic buffer handling + * These buffers are used for interrupt status, MAC stats, etc. + * + **************************************************************************/ + +int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags) +{ + buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, + &buffer->dma_addr, gfp_flags); + if (!buffer->addr) + return -ENOMEM; + buffer->len = len; + return 0; +} + +void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) +{ + if (buffer->addr) { + dma_free_coherent(&efx->pci_dev->dev, buffer->len, + buffer->addr, buffer->dma_addr); + buffer->addr = NULL; + } +} + +/* Check whether an event is present in the eventq at the current + * read pointer. Only useful for self-test. + */ +bool efx_nic_event_present(struct efx_channel *channel) +{ + return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); +} + +void efx_nic_event_test_start(struct efx_channel *channel) +{ + channel->event_test_cpu = -1; + smp_wmb(); + channel->efx->type->ev_test_generate(channel); +} + +int efx_nic_irq_test_start(struct efx_nic *efx) +{ + efx->last_irq_cpu = -1; + smp_wmb(); + return efx->type->irq_test_generate(efx); +} + +/* Hook interrupt handler(s) + * Try MSI and then legacy interrupts. + */ +int efx_nic_init_interrupt(struct efx_nic *efx) +{ + struct efx_channel *channel; + unsigned int n_irqs; + int rc; + + if (!EFX_INT_MODE_USE_MSI(efx)) { + rc = request_irq(efx->legacy_irq, + efx->type->irq_handle_legacy, IRQF_SHARED, + efx->name, efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to hook legacy IRQ %d\n", + efx->pci_dev->irq); + goto fail1; + } + efx->irqs_hooked = true; + return 0; + } + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + efx->net_dev->rx_cpu_rmap = + alloc_irq_cpu_rmap(efx->n_rx_channels); + if (!efx->net_dev->rx_cpu_rmap) { + rc = -ENOMEM; + goto fail1; + } + } +#endif + + /* Hook MSI or MSI-X interrupt */ + n_irqs = 0; + efx_for_each_channel(channel, efx) { + rc = request_irq(channel->irq, efx->type->irq_handle_msi, + IRQF_PROBE_SHARED, /* Not shared */ + efx->msi_context[channel->channel].name, + &efx->msi_context[channel->channel]); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to hook IRQ %d\n", channel->irq); + goto fail2; + } + ++n_irqs; + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX && + channel->channel < efx->n_rx_channels) { + rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, + channel->irq); + if (rc) + goto fail2; + } +#endif + } + + efx->irqs_hooked = true; + return 0; + + fail2: +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + efx_for_each_channel(channel, efx) { + if (n_irqs-- == 0) + break; + free_irq(channel->irq, &efx->msi_context[channel->channel]); + } + fail1: + return rc; +} + +void efx_nic_fini_interrupt(struct efx_nic *efx) +{ + struct efx_channel *channel; + +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + + if (!efx->irqs_hooked) + return; + if (EFX_INT_MODE_USE_MSI(efx)) { + /* Disable MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + free_irq(channel->irq, + &efx->msi_context[channel->channel]); + } else { + /* Disable legacy interrupt */ + free_irq(efx->legacy_irq, efx); + } + efx->irqs_hooked = false; +} + +/* Register dump */ + +#define REGISTER_REVISION_FA 1 +#define REGISTER_REVISION_FB 2 +#define REGISTER_REVISION_FC 3 +#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */ +#define REGISTER_REVISION_ED 4 +#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */ + +struct efx_nic_reg { + u32 offset:24; + u32 min_revision:3, max_revision:3; +}; + +#define REGISTER(name, arch, min_rev, max_rev) { \ + arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev \ +} +#define REGISTER_AA(name) REGISTER(name, F, A, A) +#define REGISTER_AB(name) REGISTER(name, F, A, B) +#define REGISTER_AZ(name) REGISTER(name, F, A, Z) +#define REGISTER_BB(name) REGISTER(name, F, B, B) +#define REGISTER_BZ(name) REGISTER(name, F, B, Z) +#define REGISTER_CZ(name) REGISTER(name, F, C, Z) +#define REGISTER_DZ(name) REGISTER(name, E, D, Z) + +static const struct efx_nic_reg efx_nic_regs[] = { + REGISTER_AZ(ADR_REGION), + REGISTER_AZ(INT_EN_KER), + REGISTER_BZ(INT_EN_CHAR), + REGISTER_AZ(INT_ADR_KER), + REGISTER_BZ(INT_ADR_CHAR), + /* INT_ACK_KER is WO */ + /* INT_ISR0 is RC */ + REGISTER_AZ(HW_INIT), + REGISTER_CZ(USR_EV_CFG), + REGISTER_AB(EE_SPI_HCMD), + REGISTER_AB(EE_SPI_HADR), + REGISTER_AB(EE_SPI_HDATA), + REGISTER_AB(EE_BASE_PAGE), + REGISTER_AB(EE_VPD_CFG0), + /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ + /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ + /* PCIE_CORE_INDIRECT is indirect */ + REGISTER_AB(NIC_STAT), + REGISTER_AB(GPIO_CTL), + REGISTER_AB(GLB_CTL), + /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ + REGISTER_BZ(DP_CTRL), + REGISTER_AZ(MEM_STAT), + REGISTER_AZ(CS_DEBUG), + REGISTER_AZ(ALTERA_BUILD), + REGISTER_AZ(CSR_SPARE), + REGISTER_AB(PCIE_SD_CTL0123), + REGISTER_AB(PCIE_SD_CTL45), + REGISTER_AB(PCIE_PCS_CTL_STAT), + /* DEBUG_DATA_OUT is not used */ + /* DRV_EV is WO */ + REGISTER_AZ(EVQ_CTL), + REGISTER_AZ(EVQ_CNT1), + REGISTER_AZ(EVQ_CNT2), + REGISTER_AZ(BUF_TBL_CFG), + REGISTER_AZ(SRM_RX_DC_CFG), + REGISTER_AZ(SRM_TX_DC_CFG), + REGISTER_AZ(SRM_CFG), + /* BUF_TBL_UPD is WO */ + REGISTER_AZ(SRM_UPD_EVQ), + REGISTER_AZ(SRAM_PARITY), + REGISTER_AZ(RX_CFG), + REGISTER_BZ(RX_FILTER_CTL), + /* RX_FLUSH_DESCQ is WO */ + REGISTER_AZ(RX_DC_CFG), + REGISTER_AZ(RX_DC_PF_WM), + REGISTER_BZ(RX_RSS_TKEY), + /* RX_NODESC_DROP is RC */ + REGISTER_AA(RX_SELF_RST), + /* RX_DEBUG, RX_PUSH_DROP are not used */ + REGISTER_CZ(RX_RSS_IPV6_REG1), + REGISTER_CZ(RX_RSS_IPV6_REG2), + REGISTER_CZ(RX_RSS_IPV6_REG3), + /* TX_FLUSH_DESCQ is WO */ + REGISTER_AZ(TX_DC_CFG), + REGISTER_AA(TX_CHKSM_CFG), + REGISTER_AZ(TX_CFG), + /* TX_PUSH_DROP is not used */ + REGISTER_AZ(TX_RESERVED), + REGISTER_BZ(TX_PACE), + /* TX_PACE_DROP_QID is RC */ + REGISTER_BB(TX_VLAN), + REGISTER_BZ(TX_IPFIL_PORTEN), + REGISTER_AB(MD_TXD), + REGISTER_AB(MD_RXD), + REGISTER_AB(MD_CS), + REGISTER_AB(MD_PHY_ADR), + REGISTER_AB(MD_ID), + /* MD_STAT is RC */ + REGISTER_AB(MAC_STAT_DMA), + REGISTER_AB(MAC_CTRL), + REGISTER_BB(GEN_MODE), + REGISTER_AB(MAC_MC_HASH_REG0), + REGISTER_AB(MAC_MC_HASH_REG1), + REGISTER_AB(GM_CFG1), + REGISTER_AB(GM_CFG2), + /* GM_IPG and GM_HD are not used */ + REGISTER_AB(GM_MAX_FLEN), + /* GM_TEST is not used */ + REGISTER_AB(GM_ADR1), + REGISTER_AB(GM_ADR2), + REGISTER_AB(GMF_CFG0), + REGISTER_AB(GMF_CFG1), + REGISTER_AB(GMF_CFG2), + REGISTER_AB(GMF_CFG3), + REGISTER_AB(GMF_CFG4), + REGISTER_AB(GMF_CFG5), + REGISTER_BB(TX_SRC_MAC_CTL), + REGISTER_AB(XM_ADR_LO), + REGISTER_AB(XM_ADR_HI), + REGISTER_AB(XM_GLB_CFG), + REGISTER_AB(XM_TX_CFG), + REGISTER_AB(XM_RX_CFG), + REGISTER_AB(XM_MGT_INT_MASK), + REGISTER_AB(XM_FC), + REGISTER_AB(XM_PAUSE_TIME), + REGISTER_AB(XM_TX_PARAM), + REGISTER_AB(XM_RX_PARAM), + /* XM_MGT_INT_MSK (note no 'A') is RC */ + REGISTER_AB(XX_PWR_RST), + REGISTER_AB(XX_SD_CTL), + REGISTER_AB(XX_TXDRV_CTL), + /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ + /* XX_CORE_STAT is partly RC */ + REGISTER_DZ(BIU_HW_REV_ID), + REGISTER_DZ(MC_DB_LWRD), + REGISTER_DZ(MC_DB_HWRD), +}; + +struct efx_nic_reg_table { + u32 offset:24; + u32 min_revision:3, max_revision:3; + u32 step:6, rows:21; +}; + +#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ + offset, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev, \ + step, rows \ +} +#define REGISTER_TABLE(name, arch, min_rev, max_rev) \ + REGISTER_TABLE_DIMENSIONS( \ + name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + arch, min_rev, max_rev, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) +#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A) +#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z) +#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B) +#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z) +#define REGISTER_TABLE_BB_CZ(name) \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \ + FR_BZ_ ## name ## _STEP, \ + FR_BB_ ## name ## _ROWS), \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \ + FR_BZ_ ## name ## _STEP, \ + FR_CZ_ ## name ## _ROWS) +#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z) +#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z) + +static const struct efx_nic_reg_table efx_nic_reg_tables[] = { + /* DRIVER is not used */ + /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ + REGISTER_TABLE_BB(TX_IPFIL_TBL), + REGISTER_TABLE_BB(TX_SRC_MAC_TBL), + REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), + REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), + REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), + /* We can't reasonably read all of the buffer table (up to 8MB!). + * However this driver will only use a few entries. Reading + * 1K entries allows for some expansion of queue count and + * size before we need to change the version. */ + REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, + F, A, A, 8, 1024), + REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, + F, B, Z, 8, 1024), + REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), + REGISTER_TABLE_BB_CZ(TIMER_TBL), + REGISTER_TABLE_BB_CZ(TX_PACE_TBL), + REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), + /* TX_FILTER_TBL0 is huge and not used by this driver */ + REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), + REGISTER_TABLE_CZ(MC_TREG_SMEM), + /* MSIX_PBA_TABLE is not mapped */ + /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ + REGISTER_TABLE_BZ(RX_FILTER_TBL0), + REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS), +}; + +size_t efx_nic_get_regs_len(struct efx_nic *efx) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + size_t len = 0; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) + len += sizeof(efx_oword_t); + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) + if (efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision) + len += table->rows * min_t(size_t, table->step, 16); + + return len; +} + +void efx_nic_get_regs(struct efx_nic *efx, void *buf) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) { + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) { + efx_reado(efx, (efx_oword_t *)buf, reg->offset); + buf += sizeof(efx_oword_t); + } + } + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) { + size_t size, i; + + if (!(efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision)) + continue; + + size = min_t(size_t, table->step, 16); + + for (i = 0; i < table->rows; i++) { + switch (table->step) { + case 4: /* 32-bit SRAM */ + efx_readd(efx, buf, table->offset + 4 * i); + break; + case 8: /* 64-bit SRAM */ + efx_sram_readq(efx, + efx->membase + table->offset, + buf, i); + break; + case 16: /* 128-bit-readable register */ + efx_reado_table(efx, buf, table->offset, i); + break; + case 32: /* 128-bit register, interleaved */ + efx_reado_table(efx, buf, table->offset, 2 * i); + break; + default: + WARN_ON(1); + return; + } + buf += size; + } + } +} + +/** + * efx_nic_describe_stats - Describe supported statistics for ethtool + * @desc: Array of &struct efx_hw_stat_desc describing the statistics + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @names: Buffer to copy names to, or %NULL. The names are copied + * starting at intervals of %ETH_GSTRING_LEN bytes. + * + * Returns the number of visible statistics, i.e. the number of set + * bits in the first @count bits of @mask for which a name is defined. + */ +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names) +{ + size_t visible = 0; + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].name) { + if (names) { + strlcpy(names, desc[index].name, + ETH_GSTRING_LEN); + names += ETH_GSTRING_LEN; + } + ++visible; + } + } + + return visible; +} + +/** + * efx_nic_copy_stats - Copy stats from the DMA buffer in to an + * intermediate buffer. This is used to get a consistent + * set of stats while the DMA buffer can be written at any time + * by the NIC. + * @efx: The associated NIC. + * @dest: Destination buffer. Must be the same size as the DMA buffer. + */ +int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + __le64 generation_start, generation_end; + int rc = 0, retry; + + if (!dest) + return 0; + + if (!dma_stats) + goto return_zeroes; + + /* If we're unlucky enough to read statistics during the DMA, wait + * up to 10ms for it to finish (typically takes <500us) + */ + for (retry = 0; retry < 100; ++retry) { + generation_end = dma_stats[efx->num_mac_stats - 1]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + goto return_zeroes; + rmb(); + memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64)); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end == generation_start) + return 0; /* return good data */ + udelay(100); + } + + rc = -EIO; + +return_zeroes: + memset(dest, 0, efx->num_mac_stats * sizeof(u64)); + return rc; +} + +/** + * efx_nic_update_stats - Convert statistics DMA buffer to array of u64 + * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer + * layout. DMA widths of 0, 16, 32 and 64 are supported; where + * the width is specified as 0 the corresponding element of + * @stats is not updated. + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @stats: Buffer to update with the converted statistics. The length + * of this array must be at least @count. + * @dma_buf: DMA buffer containing hardware statistics + * @accumulate: If set, the converted values will be added rather than + * directly stored to the corresponding elements of @stats + */ +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, + u64 *stats, const void *dma_buf, bool accumulate) +{ + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].dma_width) { + const void *addr = dma_buf + desc[index].offset; + u64 val; + + switch (desc[index].dma_width) { + case 16: + val = le16_to_cpup((__le16 *)addr); + break; + case 32: + val = le32_to_cpup((__le32 *)addr); + break; + case 64: + val = le64_to_cpup((__le64 *)addr); + break; + default: + WARN_ON(1); + val = 0; + break; + } + + if (accumulate) + stats[index] += val; + else + stats[index] = val; + } + } +} + +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) +{ + /* if down, or this is the first update after coming up */ + if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) + efx->rx_nodesc_drops_while_down += + *rx_nodesc_drops - efx->rx_nodesc_drops_total; + efx->rx_nodesc_drops_total = *rx_nodesc_drops; + efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); + *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; +} diff --git a/drivers/net/ethernet/sfc/siena/nic.h b/drivers/net/ethernet/sfc/siena/nic.h new file mode 100644 index 000000000000..251868235ae4 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/nic.h @@ -0,0 +1,392 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_NIC_H +#define EFX_NIC_H + +#include "nic_common.h" +#include "efx.h" + +u32 efx_farch_fpga_ver(struct efx_nic *efx); + +enum { + PHY_TYPE_NONE = 0, + PHY_TYPE_TXC43128 = 1, + PHY_TYPE_88E1111 = 2, + PHY_TYPE_SFX7101 = 3, + PHY_TYPE_QT2022C2 = 4, + PHY_TYPE_PM8358 = 6, + PHY_TYPE_SFT9001A = 8, + PHY_TYPE_QT2025C = 9, + PHY_TYPE_SFT9001B = 10, +}; + +enum { + SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT, + SIENA_STAT_tx_good_bytes, + SIENA_STAT_tx_bad_bytes, + SIENA_STAT_tx_packets, + SIENA_STAT_tx_bad, + SIENA_STAT_tx_pause, + SIENA_STAT_tx_control, + SIENA_STAT_tx_unicast, + SIENA_STAT_tx_multicast, + SIENA_STAT_tx_broadcast, + SIENA_STAT_tx_lt64, + SIENA_STAT_tx_64, + SIENA_STAT_tx_65_to_127, + SIENA_STAT_tx_128_to_255, + SIENA_STAT_tx_256_to_511, + SIENA_STAT_tx_512_to_1023, + SIENA_STAT_tx_1024_to_15xx, + SIENA_STAT_tx_15xx_to_jumbo, + SIENA_STAT_tx_gtjumbo, + SIENA_STAT_tx_collision, + SIENA_STAT_tx_single_collision, + SIENA_STAT_tx_multiple_collision, + SIENA_STAT_tx_excessive_collision, + SIENA_STAT_tx_deferred, + SIENA_STAT_tx_late_collision, + SIENA_STAT_tx_excessive_deferred, + SIENA_STAT_tx_non_tcpudp, + SIENA_STAT_tx_mac_src_error, + SIENA_STAT_tx_ip_src_error, + SIENA_STAT_rx_bytes, + SIENA_STAT_rx_good_bytes, + SIENA_STAT_rx_bad_bytes, + SIENA_STAT_rx_packets, + SIENA_STAT_rx_good, + SIENA_STAT_rx_bad, + SIENA_STAT_rx_pause, + SIENA_STAT_rx_control, + SIENA_STAT_rx_unicast, + SIENA_STAT_rx_multicast, + SIENA_STAT_rx_broadcast, + SIENA_STAT_rx_lt64, + SIENA_STAT_rx_64, + SIENA_STAT_rx_65_to_127, + SIENA_STAT_rx_128_to_255, + SIENA_STAT_rx_256_to_511, + SIENA_STAT_rx_512_to_1023, + SIENA_STAT_rx_1024_to_15xx, + SIENA_STAT_rx_15xx_to_jumbo, + SIENA_STAT_rx_gtjumbo, + SIENA_STAT_rx_bad_gtjumbo, + SIENA_STAT_rx_overflow, + SIENA_STAT_rx_false_carrier, + SIENA_STAT_rx_symbol_error, + SIENA_STAT_rx_align_error, + SIENA_STAT_rx_length_error, + SIENA_STAT_rx_internal_error, + SIENA_STAT_rx_nodesc_drop_cnt, + SIENA_STAT_COUNT +}; + +/** + * struct siena_nic_data - Siena NIC state + * @efx: Pointer back to main interface structure + * @wol_filter_id: Wake-on-LAN packet filter id + * @stats: Hardware statistics + * @vf: Array of &struct siena_vf objects + * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. + * @vfdi_status: Common VFDI status page to be dmad to VF address space. + * @local_addr_list: List of local addresses. Protected by %local_lock. + * @local_page_list: List of DMA addressable pages used to broadcast + * %local_addr_list. Protected by %local_lock. + * @local_lock: Mutex protecting %local_addr_list and %local_page_list. + * @peer_work: Work item to broadcast peer addresses to VMs. + */ +struct siena_nic_data { + struct efx_nic *efx; + int wol_filter_id; + u64 stats[SIENA_STAT_COUNT]; +#ifdef CONFIG_SFC_SRIOV + struct siena_vf *vf; + struct efx_channel *vfdi_channel; + unsigned vf_buftbl_base; + struct efx_buffer vfdi_status; + struct list_head local_addr_list; + struct list_head local_page_list; + struct mutex local_lock; + struct work_struct peer_work; +#endif +}; + +enum { + EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT, + EF10_STAT_port_tx_packets, + EF10_STAT_port_tx_pause, + EF10_STAT_port_tx_control, + EF10_STAT_port_tx_unicast, + EF10_STAT_port_tx_multicast, + EF10_STAT_port_tx_broadcast, + EF10_STAT_port_tx_lt64, + EF10_STAT_port_tx_64, + EF10_STAT_port_tx_65_to_127, + EF10_STAT_port_tx_128_to_255, + EF10_STAT_port_tx_256_to_511, + EF10_STAT_port_tx_512_to_1023, + EF10_STAT_port_tx_1024_to_15xx, + EF10_STAT_port_tx_15xx_to_jumbo, + EF10_STAT_port_rx_bytes, + EF10_STAT_port_rx_bytes_minus_good_bytes, + EF10_STAT_port_rx_good_bytes, + EF10_STAT_port_rx_bad_bytes, + EF10_STAT_port_rx_packets, + EF10_STAT_port_rx_good, + EF10_STAT_port_rx_bad, + EF10_STAT_port_rx_pause, + EF10_STAT_port_rx_control, + EF10_STAT_port_rx_unicast, + EF10_STAT_port_rx_multicast, + EF10_STAT_port_rx_broadcast, + EF10_STAT_port_rx_lt64, + EF10_STAT_port_rx_64, + EF10_STAT_port_rx_65_to_127, + EF10_STAT_port_rx_128_to_255, + EF10_STAT_port_rx_256_to_511, + EF10_STAT_port_rx_512_to_1023, + EF10_STAT_port_rx_1024_to_15xx, + EF10_STAT_port_rx_15xx_to_jumbo, + EF10_STAT_port_rx_gtjumbo, + EF10_STAT_port_rx_bad_gtjumbo, + EF10_STAT_port_rx_overflow, + EF10_STAT_port_rx_align_error, + EF10_STAT_port_rx_length_error, + EF10_STAT_port_rx_nodesc_drops, + EF10_STAT_port_rx_pm_trunc_bb_overflow, + EF10_STAT_port_rx_pm_discard_bb_overflow, + EF10_STAT_port_rx_pm_trunc_vfifo_full, + EF10_STAT_port_rx_pm_discard_vfifo_full, + EF10_STAT_port_rx_pm_trunc_qbb, + EF10_STAT_port_rx_pm_discard_qbb, + EF10_STAT_port_rx_pm_discard_mapping, + EF10_STAT_port_rx_dp_q_disabled_packets, + EF10_STAT_port_rx_dp_di_dropped_packets, + EF10_STAT_port_rx_dp_streaming_packets, + EF10_STAT_port_rx_dp_hlb_fetch, + EF10_STAT_port_rx_dp_hlb_wait, + EF10_STAT_rx_unicast, + EF10_STAT_rx_unicast_bytes, + EF10_STAT_rx_multicast, + EF10_STAT_rx_multicast_bytes, + EF10_STAT_rx_broadcast, + EF10_STAT_rx_broadcast_bytes, + EF10_STAT_rx_bad, + EF10_STAT_rx_bad_bytes, + EF10_STAT_rx_overflow, + EF10_STAT_tx_unicast, + EF10_STAT_tx_unicast_bytes, + EF10_STAT_tx_multicast, + EF10_STAT_tx_multicast_bytes, + EF10_STAT_tx_broadcast, + EF10_STAT_tx_broadcast_bytes, + EF10_STAT_tx_bad, + EF10_STAT_tx_bad_bytes, + EF10_STAT_tx_overflow, + EF10_STAT_V1_COUNT, + EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT, + EF10_STAT_fec_corrected_errors, + EF10_STAT_fec_corrected_symbols_lane0, + EF10_STAT_fec_corrected_symbols_lane1, + EF10_STAT_fec_corrected_symbols_lane2, + EF10_STAT_fec_corrected_symbols_lane3, + EF10_STAT_ctpio_vi_busy_fallback, + EF10_STAT_ctpio_long_write_success, + EF10_STAT_ctpio_missing_dbell_fail, + EF10_STAT_ctpio_overflow_fail, + EF10_STAT_ctpio_underflow_fail, + EF10_STAT_ctpio_timeout_fail, + EF10_STAT_ctpio_noncontig_wr_fail, + EF10_STAT_ctpio_frm_clobber_fail, + EF10_STAT_ctpio_invalid_wr_fail, + EF10_STAT_ctpio_vi_clobber_fallback, + EF10_STAT_ctpio_unqualified_fallback, + EF10_STAT_ctpio_runt_fallback, + EF10_STAT_ctpio_success, + EF10_STAT_ctpio_fallback, + EF10_STAT_ctpio_poison, + EF10_STAT_ctpio_erase, + EF10_STAT_COUNT +}; + +/* Maximum number of TX PIO buffers we may allocate to a function. + * This matches the total number of buffers on each SFC9100-family + * controller. + */ +#define EF10_TX_PIOBUF_COUNT 16 + +/** + * struct efx_ef10_nic_data - EF10 architecture NIC state + * @mcdi_buf: DMA buffer for MCDI + * @warm_boot_count: Last seen MC warm boot count + * @vi_base: Absolute index of first VI in this function + * @n_allocated_vis: Number of VIs allocated to this function + * @n_piobufs: Number of PIO buffers allocated to this function + * @wc_membase: Base address of write-combining mapping of the memory BAR + * @pio_write_base: Base address for writing PIO buffers + * @pio_write_vi_base: Relative VI number for @pio_write_base + * @piobuf_handle: Handle of each PIO buffer allocated + * @piobuf_size: size of a single PIO buffer + * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC + * reboot + * @mc_stats: Scratch buffer for converting statistics to the kernel's format + * @stats: Hardware statistics + * @workaround_35388: Flag: firmware supports workaround for bug 35388 + * @workaround_26807: Flag: firmware supports workaround for bug 26807 + * @workaround_61265: Flag: firmware supports workaround for bug 61265 + * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated + * after MC reboot + * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of + * %MC_CMD_GET_CAPABILITIES response) + * @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of + * %MC_CMD_GET_CAPABILITIES response) + * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU + * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU + * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot + * @pf_index: The number for this PF, or the parent PF if this is a VF +#ifdef CONFIG_SFC_SRIOV + * @vf: Pointer to VF data structure +#endif + * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero + * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock. + * @vlan_lock: Lock to serialize access to vlan_list. + * @udp_tunnels: UDP tunnel port numbers and types. + * @udp_tunnels_dirty: flag indicating a reboot occurred while pushing + * @udp_tunnels to hardware and thus the push must be re-done. + * @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty. + */ +struct efx_ef10_nic_data { + struct efx_buffer mcdi_buf; + u16 warm_boot_count; + unsigned int vi_base; + unsigned int n_allocated_vis; + unsigned int n_piobufs; + void __iomem *wc_membase, *pio_write_base; + unsigned int pio_write_vi_base; + unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; + u16 piobuf_size; + bool must_restore_piobufs; + __le64 *mc_stats; + u64 stats[EF10_STAT_COUNT]; + bool workaround_35388; + bool workaround_26807; + bool workaround_61265; + bool must_check_datapath_caps; + u32 datapath_caps; + u32 datapath_caps2; + unsigned int rx_dpcpu_fw_id; + unsigned int tx_dpcpu_fw_id; + bool must_probe_vswitching; + unsigned int pf_index; + u8 port_id[ETH_ALEN]; +#ifdef CONFIG_SFC_SRIOV + unsigned int vf_index; + struct ef10_vf *vf; +#endif + u8 vport_mac[ETH_ALEN]; + struct list_head vlan_list; + struct mutex vlan_lock; + struct efx_udp_tunnel udp_tunnels[16]; + bool udp_tunnels_dirty; + struct mutex udp_tunnels_lock; + u64 licensed_features; +}; + +/* TSOv2 */ +int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + bool *data_mapped); + +extern const struct efx_nic_type efx_hunt_a0_nic_type; +extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; + +int falcon_probe_board(struct efx_nic *efx, u16 revision_info); + +/* Falcon/Siena queue operations */ +int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); +void efx_farch_tx_init(struct efx_tx_queue *tx_queue); +void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); +void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); +void efx_farch_tx_write(struct efx_tx_queue *tx_queue); +unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len); +int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); +void efx_farch_rx_init(struct efx_rx_queue *rx_queue); +void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); +void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); +void efx_farch_rx_write(struct efx_rx_queue *rx_queue); +void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); +int efx_farch_ev_probe(struct efx_channel *channel); +int efx_farch_ev_init(struct efx_channel *channel); +void efx_farch_ev_fini(struct efx_channel *channel); +void efx_farch_ev_remove(struct efx_channel *channel); +int efx_farch_ev_process(struct efx_channel *channel, int quota); +void efx_farch_ev_read_ack(struct efx_channel *channel); +void efx_farch_ev_test_generate(struct efx_channel *channel); + +/* Falcon/Siena filter operations */ +int efx_farch_filter_table_probe(struct efx_nic *efx); +void efx_farch_filter_table_restore(struct efx_nic *efx); +void efx_farch_filter_table_remove(struct efx_nic *efx); +void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); +s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, + bool replace); +int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); +int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, u32 filter_id, + struct efx_filter_spec *); +int efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); +s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, u32 *buf, + u32 size); +#ifdef CONFIG_RFS_ACCEL +bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif +void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); + +/* Falcon/Siena interrupts */ +void efx_farch_irq_enable_master(struct efx_nic *efx); +int efx_farch_irq_test_generate(struct efx_nic *efx); +void efx_farch_irq_disable_master(struct efx_nic *efx); +irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); +irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); +irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); + +/* Global Resources */ +void siena_prepare_flush(struct efx_nic *efx); +int efx_farch_fini_dmaq(struct efx_nic *efx); +void efx_farch_finish_flr(struct efx_nic *efx); +void siena_finish_flush(struct efx_nic *efx); +void falcon_start_nic_stats(struct efx_nic *efx); +void falcon_stop_nic_stats(struct efx_nic *efx); +int falcon_reset_xaui(struct efx_nic *efx); +void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); +void efx_farch_init_common(struct efx_nic *efx); +void efx_farch_rx_push_indir_table(struct efx_nic *efx); +void efx_farch_rx_pull_indir_table(struct efx_nic *efx); + +/* Tests */ +struct efx_farch_register_test { + unsigned address; + efx_oword_t mask; +}; + +int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs); + +void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event); + +#endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h new file mode 100644 index 000000000000..0cef35c0c559 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/nic_common.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_NIC_COMMON_H +#define EFX_NIC_COMMON_H + +#include "net_driver.h" +#include "efx_common.h" +#include "mcdi.h" +#include "ptp.h" + +enum { + /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. + * They are not supported by this driver but these revision numbers + * form part of the ethtool API for register dumping. + */ + EFX_REV_SIENA_A0 = 3, + EFX_REV_HUNT_A0 = 4, + EFX_REV_EF100 = 5, +}; + +static inline int efx_nic_rev(struct efx_nic *efx) +{ + return efx->type->revision; +} + +/* Read the current event from the event queue */ +static inline efx_qword_t *efx_event(struct efx_channel *channel, + unsigned int index) +{ + return ((efx_qword_t *) (channel->eventq.buf.addr)) + + (index & channel->eventq_mask); +} + +/* See if an event is present + * + * We check both the high and low dword of the event for all ones. We + * wrote all ones when we cleared the event, and no valid event can + * have all ones in either its high or low dwords. This approach is + * robust against reordering. + * + * Note that using a single 64-bit comparison is incorrect; even + * though the CPU read will be atomic, the DMA write may not be. + */ +static inline int efx_event_present(efx_qword_t *event) +{ + return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | + EFX_DWORD_IS_ALL_ONES(event->dword[1])); +} + +/* Returns a pointer to the specified transmit descriptor in the TX + * descriptor queue belonging to the specified channel. + */ +static inline efx_qword_t * +efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) +{ + return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; +} + +/* Report whether this TX queue would be empty for the given write_count. + * May return false negative. + */ +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) +{ + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); + + if (empty_read_count == 0) + return false; + + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; +} + +int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + bool *data_mapped); + +/* Decide whether to push a TX descriptor to the NIC vs merely writing + * the doorbell. This can reduce latency when we are adding a single + * descriptor to an empty queue, but is otherwise pointless. Further, + * Falcon and Siena have hardware bugs (SF bug 33851) that may be + * triggered if we don't check this. + * We use the write_count used for the last doorbell push, to get the + * NIC's view of the tx queue. + */ +static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count); + + tx_queue->empty_read_count = 0; + return was_empty && tx_queue->write_count - write_count == 1; +} + +/* Returns a pointer to the specified descriptor in the RX descriptor queue */ +static inline efx_qword_t * +efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; +} + +/* Alignment of PCIe DMA boundaries (4KB) */ +#define EFX_PAGE_SIZE 4096 +/* Size and alignment of buffer table entries (same) */ +#define EFX_BUF_SIZE EFX_PAGE_SIZE + +/* NIC-generic software stats */ +enum { + GENERIC_STAT_rx_noskb_drops, + GENERIC_STAT_rx_nodesc_trunc, + GENERIC_STAT_COUNT +}; + +#define EFX_GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +/* TX data path */ +static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) +{ + return tx_queue->efx->type->tx_probe(tx_queue); +} +static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_init(tx_queue); +} +static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) +{ + if (tx_queue->efx->type->tx_remove) + tx_queue->efx->type->tx_remove(tx_queue); +} +static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_write(tx_queue); +} + +/* RX data path */ +static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) +{ + return rx_queue->efx->type->rx_probe(rx_queue); +} +static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_init(rx_queue); +} +static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_remove(rx_queue); +} +static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_write(rx_queue); +} +static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_defer_refill(rx_queue); +} + +/* Event data path */ +static inline int efx_nic_probe_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_probe(channel); +} +static inline int efx_nic_init_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_init(channel); +} +static inline void efx_nic_fini_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_fini(channel); +} +static inline void efx_nic_remove_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_remove(channel); +} +static inline int +efx_nic_process_eventq(struct efx_channel *channel, int quota) +{ + return channel->efx->type->ev_process(channel, quota); +} +static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) +{ + channel->efx->type->ev_read_ack(channel); +} + +void efx_nic_event_test_start(struct efx_channel *channel); + +bool efx_nic_event_present(struct efx_channel *channel); + +static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ + if (efx->type->sensor_event) + efx->type->sensor_event(efx, ev); +} + +static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx) +{ + return efx->type->rx_recycle_ring_size(efx); +} + +/* Some statistics are computed as A - B where A and B each increase + * linearly with some hardware counter(s) and the counters are read + * asynchronously. If the counters contributing to B are always read + * after those contributing to A, the computed value may be lower than + * the true value by some variable amount, and may decrease between + * subsequent computations. + * + * We should never allow statistics to decrease or to exceed the true + * value. Since the computed value will never be greater than the + * true value, we can achieve this by only storing the computed value + * when it increases. + */ +static inline void efx_update_diff_stat(u64 *stat, u64 diff) +{ + if ((s64)(diff - *stat) > 0) + *stat = diff; +} + +/* Interrupts */ +int efx_nic_init_interrupt(struct efx_nic *efx); +int efx_nic_irq_test_start(struct efx_nic *efx); +void efx_nic_fini_interrupt(struct efx_nic *efx); + +static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) +{ + return READ_ONCE(channel->event_test_cpu); +} +static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) +{ + return READ_ONCE(efx->last_irq_cpu); +} + +/* Global Resources */ +int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags); +void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); + +size_t efx_nic_get_regs_len(struct efx_nic *efx); +void efx_nic_get_regs(struct efx_nic *efx, void *buf); + +#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) + +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names); +int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u64 *stats, + const void *dma_buf, bool accumulate); +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); +static inline size_t efx_nic_update_stats_atomic(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + if (efx->type->update_stats_atomic) + return efx->type->update_stats_atomic(efx, full_stats, core_stats); + return efx->type->update_stats(efx, full_stats, core_stats); +} + +#define EFX_MAX_FLUSH_TIME 5000 + +#endif /* EFX_NIC_COMMON_H */ diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c new file mode 100644 index 000000000000..f0ef515e2ade --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -0,0 +1,2210 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2011-2013 Solarflare Communications Inc. + */ + +/* Theory of operation: + * + * PTP support is assisted by firmware running on the MC, which provides + * the hardware timestamping capabilities. Both transmitted and received + * PTP event packets are queued onto internal queues for subsequent processing; + * this is because the MC operations are relatively long and would block + * block NAPI/interrupt operation. + * + * Receive event processing: + * The event contains the packet's UUID and sequence number, together + * with the hardware timestamp. The PTP receive packet queue is searched + * for this UUID/sequence number and, if found, put on a pending queue. + * Packets not matching are delivered without timestamps (MCDI events will + * always arrive after the actual packet). + * It is important for the operation of the PTP protocol that the ordering + * of packets between the event and general port is maintained. + * + * Work queue processing: + * If work waiting, synchronise host/hardware time + * + * Transmit: send packet through MC, which returns the transmission time + * that is converted to an appropriate timestamp. + * + * Receive: the packet's reception time is converted to an appropriate + * timestamp. + */ +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "io.h" +#include "farch_regs.h" +#include "tx.h" +#include "nic.h" /* indirectly includes ptp.h */ + +/* Maximum number of events expected to make up a PTP event */ +#define MAX_EVENT_FRAGS 3 + +/* Maximum delay, ms, to begin synchronisation */ +#define MAX_SYNCHRONISE_WAIT_MS 2 + +/* How long, at most, to spend synchronising */ +#define SYNCHRONISE_PERIOD_NS 250000 + +/* How often to update the shared memory time */ +#define SYNCHRONISATION_GRANULARITY_NS 200 + +/* Minimum permitted length of a (corrected) synchronisation time */ +#define DEFAULT_MIN_SYNCHRONISATION_NS 120 + +/* Maximum permitted length of a (corrected) synchronisation time */ +#define MAX_SYNCHRONISATION_NS 1000 + +/* How many (MC) receive events that can be queued */ +#define MAX_RECEIVE_EVENTS 8 + +/* Length of (modified) moving average. */ +#define AVERAGE_LENGTH 16 + +/* How long an unmatched event or packet can be held */ +#define PKT_EVENT_LIFETIME_MS 10 + +/* Offsets into PTP packet for identification. These offsets are from the + * start of the IP header, not the MAC header. Note that neither PTP V1 nor + * PTP V2 permit the use of IPV4 options. + */ +#define PTP_DPORT_OFFSET 22 + +#define PTP_V1_VERSION_LENGTH 2 +#define PTP_V1_VERSION_OFFSET 28 + +#define PTP_V1_UUID_LENGTH 6 +#define PTP_V1_UUID_OFFSET 50 + +#define PTP_V1_SEQUENCE_LENGTH 2 +#define PTP_V1_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V1 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V1_MIN_LENGTH 64 + +#define PTP_V2_VERSION_LENGTH 1 +#define PTP_V2_VERSION_OFFSET 29 + +#define PTP_V2_UUID_LENGTH 8 +#define PTP_V2_UUID_OFFSET 48 + +/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), + * the MC only captures the last six bytes of the clock identity. These values + * reflect those, not the ones used in the standard. The standard permits + * mapping of V1 UUIDs to V2 UUIDs with these same values. + */ +#define PTP_V2_MC_UUID_LENGTH 6 +#define PTP_V2_MC_UUID_OFFSET 50 + +#define PTP_V2_SEQUENCE_LENGTH 2 +#define PTP_V2_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V2 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V2_MIN_LENGTH 63 + +#define PTP_MIN_LENGTH 63 + +#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */ +#define PTP_EVENT_PORT 319 +#define PTP_GENERAL_PORT 320 + +/* Annoyingly the format of the version numbers are different between + * versions 1 and 2 so it isn't possible to simply look for 1 or 2. + */ +#define PTP_VERSION_V1 1 + +#define PTP_VERSION_V2 2 +#define PTP_VERSION_V2_MASK 0x0f + +enum ptp_packet_state { + PTP_PACKET_STATE_UNMATCHED = 0, + PTP_PACKET_STATE_MATCHED, + PTP_PACKET_STATE_TIMED_OUT, + PTP_PACKET_STATE_MATCH_UNWANTED +}; + +/* NIC synchronised with single word of time only comprising + * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds. + */ +#define MC_NANOSECOND_BITS 30 +#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1) +#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1) + +/* Maximum parts-per-billion adjustment that is acceptable */ +#define MAX_PPB 1000000 + +/* Precalculate scale word to avoid long long division at runtime */ +/* This is equivalent to 2^66 / 10^9. */ +#define PPB_SCALE_WORD ((1LL << (57)) / 1953125LL) + +/* How much to shift down after scaling to convert to FP40 */ +#define PPB_SHIFT_FP40 26 +/* ... and FP44. */ +#define PPB_SHIFT_FP44 22 + +#define PTP_SYNC_ATTEMPTS 4 + +/** + * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area. + * @words: UUID and (partial) sequence number + * @expiry: Time after which the packet should be delivered irrespective of + * event arrival. + * @state: The state of the packet - whether it is ready for processing or + * whether that is of no interest. + */ +struct efx_ptp_match { + u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)]; + unsigned long expiry; + enum ptp_packet_state state; +}; + +/** + * struct efx_ptp_event_rx - A PTP receive event (from MC) + * @link: list of events + * @seq0: First part of (PTP) UUID + * @seq1: Second part of (PTP) UUID and sequence number + * @hwtimestamp: Event timestamp + * @expiry: Time which the packet arrived + */ +struct efx_ptp_event_rx { + struct list_head link; + u32 seq0; + u32 seq1; + ktime_t hwtimestamp; + unsigned long expiry; +}; + +/** + * struct efx_ptp_timeset - Synchronisation between host and MC + * @host_start: Host time immediately before hardware timestamp taken + * @major: Hardware timestamp, major + * @minor: Hardware timestamp, minor + * @host_end: Host time immediately after hardware timestamp taken + * @wait: Number of NIC clock ticks between hardware timestamp being read and + * host end time being seen + * @window: Difference of host_end and host_start + * @valid: Whether this timeset is valid + */ +struct efx_ptp_timeset { + u32 host_start; + u32 major; + u32 minor; + u32 host_end; + u32 wait; + u32 window; /* Derived: end - start, allowing for wrap */ +}; + +/** + * struct efx_ptp_data - Precision Time Protocol (PTP) state + * @efx: The NIC context + * @channel: The PTP channel (Siena only) + * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are + * separate events) + * @rxq: Receive SKB queue (awaiting timestamps) + * @txq: Transmit SKB queue + * @evt_list: List of MC receive events awaiting packets + * @evt_free_list: List of free events + * @evt_lock: Lock for manipulating evt_list and evt_free_list + * @rx_evts: Instantiated events (on evt_list and evt_free_list) + * @workwq: Work queue for processing pending PTP operations + * @work: Work task + * @reset_required: A serious error has occurred and the PTP task needs to be + * reset (disable, enable). + * @rxfilter_event: Receive filter when operating + * @rxfilter_general: Receive filter when operating + * @rxfilter_installed: Receive filter installed + * @config: Current timestamp configuration + * @enabled: PTP operation enabled + * @mode: Mode in which PTP operating (PTP version) + * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time + * @nic_to_kernel_time: Function to convert from NIC to kernel time + * @nic_time: contains time details + * @nic_time.minor_max: Wrap point for NIC minor times + * @nic_time.sync_event_diff_min: Minimum acceptable difference between time + * in packet prefix and last MCDI time sync event i.e. how much earlier than + * the last sync event time a packet timestamp can be. + * @nic_time.sync_event_diff_max: Maximum acceptable difference between time + * in packet prefix and last MCDI time sync event i.e. how much later than + * the last sync event time a packet timestamp can be. + * @nic_time.sync_event_minor_shift: Shift required to make minor time from + * field in MCDI time sync event. + * @min_synchronisation_ns: Minimum acceptable corrected sync window + * @capabilities: Capabilities flags from the NIC + * @ts_corrections: contains corrections details + * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit + * timestamps + * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive + * timestamps + * @ts_corrections.pps_out: PPS output error (information only) + * @ts_corrections.pps_in: Required driver correction of PPS input timestamps + * @ts_corrections.general_tx: Required driver correction of general packet + * transmit timestamps + * @ts_corrections.general_rx: Required driver correction of general packet + * receive timestamps + * @evt_frags: Partly assembled PTP events + * @evt_frag_idx: Current fragment number + * @evt_code: Last event code + * @start: Address at which MC indicates ready for synchronisation + * @host_time_pps: Host time at last PPS + * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion + * frequency adjustment into a fixed point fractional nanosecond format. + * @current_adjfreq: Current ppb adjustment. + * @phc_clock: Pointer to registered phc device (if primary function) + * @phc_clock_info: Registration structure for phc device + * @pps_work: pps work task for handling pps events + * @pps_workwq: pps work queue + * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled + * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids + * allocations in main data path). + * @good_syncs: Number of successful synchronisations. + * @fast_syncs: Number of synchronisations requiring short delay + * @bad_syncs: Number of failed synchronisations. + * @sync_timeouts: Number of synchronisation timeouts + * @no_time_syncs: Number of synchronisations with no good times. + * @invalid_sync_windows: Number of sync windows with bad durations. + * @undersize_sync_windows: Number of corrected sync windows that are too small + * @oversize_sync_windows: Number of corrected sync windows that are too large + * @rx_no_timestamp: Number of packets received without a timestamp. + * @timeset: Last set of synchronisation statistics. + * @xmit_skb: Transmit SKB function. + */ +struct efx_ptp_data { + struct efx_nic *efx; + struct efx_channel *channel; + bool rx_ts_inline; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct list_head evt_list; + struct list_head evt_free_list; + spinlock_t evt_lock; + struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; + struct workqueue_struct *workwq; + struct work_struct work; + bool reset_required; + u32 rxfilter_event; + u32 rxfilter_general; + bool rxfilter_installed; + struct hwtstamp_config config; + bool enabled; + unsigned int mode; + void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor); + ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor, + s32 correction); + struct { + u32 minor_max; + u32 sync_event_diff_min; + u32 sync_event_diff_max; + unsigned int sync_event_minor_shift; + } nic_time; + unsigned int min_synchronisation_ns; + unsigned int capabilities; + struct { + s32 ptp_tx; + s32 ptp_rx; + s32 pps_out; + s32 pps_in; + s32 general_tx; + s32 general_rx; + } ts_corrections; + efx_qword_t evt_frags[MAX_EVENT_FRAGS]; + int evt_frag_idx; + int evt_code; + struct efx_buffer start; + struct pps_event_time host_time_pps; + unsigned int adjfreq_ppb_shift; + s64 current_adjfreq; + struct ptp_clock *phc_clock; + struct ptp_clock_info phc_clock_info; + struct work_struct pps_work; + struct workqueue_struct *pps_workwq; + bool nic_ts_enabled; + efx_dword_t txbuf[MCDI_TX_BUF_LEN(MC_CMD_PTP_IN_TRANSMIT_LENMAX)]; + + unsigned int good_syncs; + unsigned int fast_syncs; + unsigned int bad_syncs; + unsigned int sync_timeouts; + unsigned int no_time_syncs; + unsigned int invalid_sync_windows; + unsigned int undersize_sync_windows; + unsigned int oversize_sync_windows; + unsigned int rx_no_timestamp; + struct efx_ptp_timeset + timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; + void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb); +}; + +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *e_ts); +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on); + +bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx) +{ + return efx_has_cap(efx, TX_MAC_TIMESTAMPING); +} + +/* PTP 'extra' channel is still a traffic channel, but we only create TX queues + * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit. + */ +static bool efx_ptp_want_txqs(struct efx_channel *channel) +{ + return efx_ptp_use_mac_tx_timestamps(channel->efx); +} + +#define PTP_SW_STAT(ext_name, field_name) \ + { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) } +#define PTP_MC_STAT(ext_name, mcdi_name) \ + { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST } +static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = { + PTP_SW_STAT(ptp_good_syncs, good_syncs), + PTP_SW_STAT(ptp_fast_syncs, fast_syncs), + PTP_SW_STAT(ptp_bad_syncs, bad_syncs), + PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts), + PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs), + PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows), + PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows), + PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows), + PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp), + PTP_MC_STAT(ptp_tx_timestamp_packets, TX), + PTP_MC_STAT(ptp_rx_timestamp_packets, RX), + PTP_MC_STAT(ptp_timestamp_packets, TS), + PTP_MC_STAT(ptp_filter_matches, FM), + PTP_MC_STAT(ptp_non_filter_matches, NFM), +}; +#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc) +static const unsigned long efx_ptp_stat_mask[] = { + [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL, +}; + +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +{ + if (!efx->ptp_data) + return 0; + + return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, strings); +} + +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN); + size_t i; + int rc; + + if (!efx->ptp_data) + return 0; + + /* Copy software statistics */ + for (i = 0; i < PTP_STAT_COUNT; i++) { + if (efx_ptp_stat_desc[i].dma_width) + continue; + stats[i] = *(unsigned int *)((char *)efx->ptp_data + + efx_ptp_stat_desc[i].offset); + } + + /* Fetch MC statistics. We *must* fill in all statistics or + * risk leaking kernel memory to userland, so if the MCDI + * request fails we pretend we got zeroes. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + memset(outbuf, 0, sizeof(outbuf)); + efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, + stats, _MCDI_PTR(outbuf, 0), false); + + return PTP_STAT_COUNT; +} + +/* For Siena platforms NIC time is s and ns */ +static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + *nic_major = (u32)ts.tv_sec; + *nic_minor = ts.tv_nsec; +} + +static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt = ktime_set(nic_major, nic_minor); + if (correction >= 0) + kt = ktime_add_ns(kt, (u64)correction); + else + kt = ktime_sub_ns(kt, (u64)-correction); + return kt; +} + +/* To convert from s27 format to ns we multiply then divide by a power of 2. + * For the conversion from ns to s27, the operation is also converted to a + * multiply and shift. + */ +#define S27_TO_NS_SHIFT (27) +#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC) +#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT) +#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT) + +/* For Huntington platforms NIC time is in seconds and fractions of a second + * where the minor register only uses 27 bits in units of 2^-27s. + */ +static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + u32 maj = (u32)ts.tv_sec; + u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + + (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); + + /* The conversion can result in the minor value exceeding the maximum. + * In this case, round up to the next second. + */ + if (min >= S27_MINOR_MAX) { + min -= S27_MINOR_MAX; + maj++; + } + + *nic_major = maj; + *nic_minor = min; +} + +static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor) +{ + u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC + + (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT); + return ktime_set(nic_major, ns); +} + +static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + /* Apply the correction and deal with carry */ + nic_minor += correction; + if ((s32)nic_minor < 0) { + nic_minor += S27_MINOR_MAX; + nic_major--; + } else if (nic_minor >= S27_MINOR_MAX) { + nic_minor -= S27_MINOR_MAX; + nic_major++; + } + + return efx_ptp_s27_to_ktime(nic_major, nic_minor); +} + +/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */ +static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec64 ts = ns_to_timespec64(ns); + + *nic_major = (u32)ts.tv_sec; + *nic_minor = ts.tv_nsec * 4; +} + +static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt; + + nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4); + correction = DIV_ROUND_CLOSEST(correction, 4); + + kt = ktime_set(nic_major, nic_minor); + + if (correction >= 0) + kt = ktime_add_ns(kt, (u64)correction); + else + kt = ktime_sub_ns(kt, (u64)-correction); + return kt; +} + +struct efx_channel *efx_ptp_channel(struct efx_nic *efx) +{ + return efx->ptp_data ? efx->ptp_data->channel : NULL; +} + +static u32 last_sync_timestamp_major(struct efx_nic *efx) +{ + struct efx_channel *channel = efx_ptp_channel(efx); + u32 major = 0; + + if (channel) + major = channel->sync_timestamp_major; + return major; +} + +/* The 8000 series and later can provide the time from the MAC, which is only + * 48 bits long and provides meta-information in the top 2 bits. + */ +static ktime_t +efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx, + struct efx_ptp_data *ptp, + u32 nic_major, u32 nic_minor, + s32 correction) +{ + u32 sync_timestamp; + ktime_t kt = { 0 }; + s16 delta; + + if (!(nic_major & 0x80000000)) { + WARN_ON_ONCE(nic_major >> 16); + + /* Medford provides 48 bits of timestamp, so we must get the top + * 16 bits from the timesync event state. + * + * We only have the lower 16 bits of the time now, but we do + * have a full resolution timestamp at some point in past. As + * long as the difference between the (real) now and the sync + * is less than 2^15, then we can reconstruct the difference + * between those two numbers using only the lower 16 bits of + * each. + * + * Put another way + * + * a - b = ((a mod k) - b) mod k + * + * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know + * (a mod k) and b, so can calculate the delta, a - b. + * + */ + sync_timestamp = last_sync_timestamp_major(efx); + + /* Because delta is s16 this does an implicit mask down to + * 16 bits which is what we need, assuming + * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that + * we can deal with the (unlikely) case of sync timestamps + * arriving from the future. + */ + delta = nic_major - sync_timestamp; + + /* Recover the fully specified time now, by applying the offset + * to the (fully specified) sync time. + */ + nic_major = sync_timestamp + delta; + + kt = ptp->nic_to_kernel_time(nic_major, nic_minor, + correction); + } + return kt; +} + +ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + ktime_t kt; + + if (efx_ptp_use_mac_tx_timestamps(efx)) + kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp, + tx_queue->completed_timestamp_major, + tx_queue->completed_timestamp_minor, + ptp->ts_corrections.general_tx); + else + kt = ptp->nic_to_kernel_time( + tx_queue->completed_timestamp_major, + tx_queue->completed_timestamp_minor, + ptp->ts_corrections.general_tx); + return kt; +} + +/* Get PTP attributes and set up time conversions */ +static int efx_ptp_get_attributes(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN); + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + u32 fmt; + size_t out_len; + + /* Get the PTP attributes. If the NIC doesn't support the operation we + * use the default format for compatibility with older NICs i.e. + * seconds and nanoseconds. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (rc == 0) { + fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT); + } else if (rc == -EINVAL) { + fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS; + } else if (rc == -EPERM) { + pci_info(efx->pci_dev, "no PTP support\n"); + return rc; + } else { + efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), + outbuf, sizeof(outbuf), rc); + return rc; + } + + switch (fmt) { + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION: + ptp->ns_to_nic_time = efx_ptp_ns_to_s27; + ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction; + ptp->nic_time.minor_max = 1 << 27; + ptp->nic_time.sync_event_minor_shift = 19; + break; + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS: + ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns; + ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction; + ptp->nic_time.minor_max = 1000000000; + ptp->nic_time.sync_event_minor_shift = 22; + break; + case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS: + ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns; + ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction; + ptp->nic_time.minor_max = 4000000000UL; + ptp->nic_time.sync_event_minor_shift = 24; + break; + default: + return -ERANGE; + } + + /* Precalculate acceptable difference between the minor time in the + * packet prefix and the last MCDI time sync event. We expect the + * packet prefix timestamp to be after of sync event by up to one + * sync event interval (0.25s) but we allow it to exceed this by a + * fuzz factor of (0.1s) + */ + ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max + - (ptp->nic_time.minor_max / 10); + ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4) + + (ptp->nic_time.minor_max / 10); + + /* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older + * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return + * a value to use for the minimum acceptable corrected synchronization + * window and may return further capabilities. + * If we have the extra information store it. For older firmware that + * does not implement the extended command use the default value. + */ + if (rc == 0 && + out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST) + ptp->min_synchronisation_ns = + MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN); + else + ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS; + + if (rc == 0 && + out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN) + ptp->capabilities = MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_CAPABILITIES); + else + ptp->capabilities = 0; + + /* Set up the shift for conversion between frequency + * adjustments in parts-per-billion and the fixed-point + * fractional ns format that the adapter uses. + */ + if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN)) + ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44; + else + ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40; + + return 0; +} + +/* Get PTP timestamp corrections */ +static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN); + int rc; + size_t out_len; + + /* Get the timestamp corrections from the NIC. If this operation is + * not supported (older NICs) then no correction is required. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, + MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (rc == 0) { + efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT); + efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE); + efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT); + efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN); + + if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) { + efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD( + outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX); + efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD( + outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX); + } else { + efx->ptp_data->ts_corrections.general_tx = + efx->ptp_data->ts_corrections.ptp_tx; + efx->ptp_data->ts_corrections.general_rx = + efx->ptp_data->ts_corrections.ptp_rx; + } + } else if (rc == -EINVAL) { + efx->ptp_data->ts_corrections.ptp_tx = 0; + efx->ptp_data->ts_corrections.ptp_rx = 0; + efx->ptp_data->ts_corrections.pps_out = 0; + efx->ptp_data->ts_corrections.pps_in = 0; + efx->ptp_data->ts_corrections.general_tx = 0; + efx->ptp_data->ts_corrections.general_rx = 0; + } else { + efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf, + sizeof(outbuf), rc); + return rc; + } + + return 0; +} + +/* Enable MCDI PTP support. */ +static int efx_ptp_enable(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, + efx->ptp_data->channel ? + efx->ptp_data->channel->channel : 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_ENABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; +} + +/* Disable MCDI PTP support. + * + * Note that this function should never rely on the presence of ptp_data - + * may be called before that exists. + */ +static int efx_ptp_disable(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function + * should only have been called during probe. + */ + if (rc == -ENOSYS || rc == -EPERM) + pci_info(efx->pci_dev, "no PTP support\n"); + else if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_DISABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; +} + +static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(q))) { + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); + } +} + +static void efx_ptp_handle_no_channel(struct efx_nic *efx) +{ + netif_err(efx, drv, efx->net_dev, + "ERROR: PTP requires MSI-X and 1 additional interrupt" + "vector. PTP disabled\n"); +} + +/* Repeatedly send the host time to the MC which will capture the hardware + * time. + */ +static void efx_ptp_send_times(struct efx_nic *efx, + struct pps_event_time *last_time) +{ + struct pps_event_time now; + struct timespec64 limit; + struct efx_ptp_data *ptp = efx->ptp_data; + int *mc_running = ptp->start.addr; + + pps_get_ts(&now); + limit = now.ts_real; + timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS); + + /* Write host time for specified period or until MC is done */ + while ((timespec64_compare(&now.ts_real, &limit) < 0) && + READ_ONCE(*mc_running)) { + struct timespec64 update_time; + unsigned int host_time; + + /* Don't update continuously to avoid saturating the PCIe bus */ + update_time = now.ts_real; + timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); + do { + pps_get_ts(&now); + } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && + READ_ONCE(*mc_running)); + + /* Synchronise NIC with single word of time only */ + host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | + now.ts_real.tv_nsec); + /* Update host time in NIC memory */ + efx->type->ptp_write_host_time(efx, host_time); + } + *last_time = now; +} + +/* Read a timeset from the MC's results and partial process. */ +static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), + struct efx_ptp_timeset *timeset) +{ + unsigned start_ns, end_ns; + + timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); + timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR); + timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR); + timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), + timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); + + /* Ignore seconds */ + start_ns = timeset->host_start & MC_NANOSECOND_MASK; + end_ns = timeset->host_end & MC_NANOSECOND_MASK; + /* Allow for rollover */ + if (end_ns < start_ns) + end_ns += NSEC_PER_SEC; + /* Determine duration of operation */ + timeset->window = end_ns - start_ns; +} + +/* Process times received from MC. + * + * Extract times from returned results, and establish the minimum value + * seen. The minimum value represents the "best" possible time and events + * too much greater than this are rejected - the machine is, perhaps, too + * busy. A number of readings are taken so that, hopefully, at least one good + * synchronisation will be seen in the results. + */ +static int +efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), + size_t response_length, + const struct pps_event_time *last_time) +{ + unsigned number_readings = + MCDI_VAR_ARRAY_LEN(response_length, + PTP_OUT_SYNCHRONIZE_TIMESET); + unsigned i; + unsigned ngood = 0; + unsigned last_good = 0; + struct efx_ptp_data *ptp = efx->ptp_data; + u32 last_sec; + u32 start_sec; + struct timespec64 delta; + ktime_t mc_time; + + if (number_readings == 0) + return -EAGAIN; + + /* Read the set of results and find the last good host-MC + * synchronization result. The MC times when it finishes reading the + * host time so the corrected window time should be fairly constant + * for a given platform. Increment stats for any results that appear + * to be erroneous. + */ + for (i = 0; i < number_readings; i++) { + s32 window, corrected; + struct timespec64 wait; + + efx_ptp_read_timeset( + MCDI_ARRAY_STRUCT_PTR(synch_buf, + PTP_OUT_SYNCHRONIZE_TIMESET, i), + &ptp->timeset[i]); + + wait = ktime_to_timespec64( + ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); + window = ptp->timeset[i].window; + corrected = window - wait.tv_nsec; + + /* We expect the uncorrected synchronization window to be at + * least as large as the interval between host start and end + * times. If it is smaller than this then this is mostly likely + * to be a consequence of the host's time being adjusted. + * Check that the corrected sync window is in a reasonable + * range. If it is out of range it is likely to be because an + * interrupt or other delay occurred between reading the system + * time and writing it to MC memory. + */ + if (window < SYNCHRONISATION_GRANULARITY_NS) { + ++ptp->invalid_sync_windows; + } else if (corrected >= MAX_SYNCHRONISATION_NS) { + ++ptp->oversize_sync_windows; + } else if (corrected < ptp->min_synchronisation_ns) { + ++ptp->undersize_sync_windows; + } else { + ngood++; + last_good = i; + } + } + + if (ngood == 0) { + netif_warn(efx, drv, efx->net_dev, + "PTP no suitable synchronisations\n"); + return -EAGAIN; + } + + /* Calculate delay from last good sync (host time) to last_time. + * It is possible that the seconds rolled over between taking + * the start reading and the last value written by the host. The + * timescales are such that a gap of more than one second is never + * expected. delta is *not* normalised. + */ + start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; + last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; + if (start_sec != last_sec && + ((start_sec + 1) & MC_SECOND_MASK) != last_sec) { + netif_warn(efx, hw, efx->net_dev, + "PTP bad synchronisation seconds\n"); + return -EAGAIN; + } + delta.tv_sec = (last_sec - start_sec) & 1; + delta.tv_nsec = + last_time->ts_real.tv_nsec - + (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); + + /* Convert the NIC time at last good sync into kernel time. + * No correction is required - this time is the output of a + * firmware process. + */ + mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major, + ptp->timeset[last_good].minor, 0); + + /* Calculate delay from NIC top of second to last_time */ + delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec; + + /* Set PPS timestamp to match NIC top of second */ + ptp->host_time_pps = *last_time; + pps_sub_ts(&ptp->host_time_pps, delta); + + return 0; +} + +/* Synchronize times between the host and the MC */ +static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX); + size_t response_length; + int rc; + unsigned long timeout; + struct pps_event_time last_time = {}; + unsigned int loops = 0; + int *start = ptp->start.addr; + + MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); + MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, + num_readings); + MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR, + ptp->start.dma_addr); + + /* Clear flag that signals MC ready */ + WRITE_ONCE(*start, 0); + rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + EFX_WARN_ON_ONCE_PARANOID(rc); + + /* Wait for start from MCDI (or timeout) */ + timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); + while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { + udelay(20); /* Usually start MCDI execution quickly */ + loops++; + } + + if (loops <= 1) + ++ptp->fast_syncs; + if (!time_before(jiffies, timeout)) + ++ptp->sync_timeouts; + + if (READ_ONCE(*start)) + efx_ptp_send_times(efx, &last_time); + + /* Collect results */ + rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN, + synch_buf, sizeof(synch_buf), + &response_length); + if (rc == 0) { + rc = efx_ptp_process_times(efx, synch_buf, response_length, + &last_time); + if (rc == 0) + ++ptp->good_syncs; + else + ++ptp->no_time_syncs; + } + + /* Increment the bad syncs counter if the synchronize fails, whatever + * the reason. + */ + if (rc != 0) + ++ptp->bad_syncs; + + return rc; +} + +/* Transmit a PTP packet via the dedicated hardware timestamped queue. */ +static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp_data = efx->ptp_data; + u8 type = efx_tx_csum_type_skb(skb); + struct efx_tx_queue *tx_queue; + + tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type); + if (tx_queue && tx_queue->timestamping) { + efx_enqueue_skb(tx_queue, skb); + } else { + WARN_ONCE(1, "PTP channel has no timestamped tx queue\n"); + dev_kfree_skb_any(skb); + } +} + +/* Transmit a PTP packet, via the MCDI interface, to the wire. */ +static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp_data = efx->ptp_data; + struct skb_shared_hwtstamps timestamps; + int rc = -EIO; + MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); + size_t len; + + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); + if (skb_shinfo(skb)->nr_frags != 0) { + rc = skb_linearize(skb); + if (rc != 0) + goto fail; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + rc = skb_checksum_help(skb); + if (rc != 0) + goto fail; + } + skb_copy_from_linear_data(skb, + MCDI_PTR(ptp_data->txbuf, + PTP_IN_TRANSMIT_PACKET), + skb->len); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, + ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), + txtime, sizeof(txtime), &len); + if (rc != 0) + goto fail; + + memset(×tamps, 0, sizeof(timestamps)); + timestamps.hwtstamp = ptp_data->nic_to_kernel_time( + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR), + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR), + ptp_data->ts_corrections.ptp_tx); + + skb_tstamp_tx(skb, ×tamps); + + rc = 0; + +fail: + dev_kfree_skb_any(skb); + + return; +} + +static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct list_head *cursor; + struct list_head *next; + + if (ptp->rx_ts_inline) + return; + + /* Drop time-expired events */ + spin_lock_bh(&ptp->evt_lock); + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; + + evt = list_entry(cursor, struct efx_ptp_event_rx, + link); + if (time_after(jiffies, evt->expiry)) { + list_move(&evt->link, &ptp->evt_free_list); + netif_warn(efx, hw, efx->net_dev, + "PTP rx event dropped\n"); + } + } + spin_unlock_bh(&ptp->evt_lock); +} + +static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, + struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + bool evts_waiting; + struct list_head *cursor; + struct list_head *next; + struct efx_ptp_match *match; + enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED; + + WARN_ON_ONCE(ptp->rx_ts_inline); + + spin_lock_bh(&ptp->evt_lock); + evts_waiting = !list_empty(&ptp->evt_list); + spin_unlock_bh(&ptp->evt_lock); + + if (!evts_waiting) + return PTP_PACKET_STATE_UNMATCHED; + + match = (struct efx_ptp_match *)skb->cb; + /* Look for a matching timestamp in the event queue */ + spin_lock_bh(&ptp->evt_lock); + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; + + evt = list_entry(cursor, struct efx_ptp_event_rx, link); + if ((evt->seq0 == match->words[0]) && + (evt->seq1 == match->words[1])) { + struct skb_shared_hwtstamps *timestamps; + + /* Match - add in hardware timestamp */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = evt->hwtimestamp; + + match->state = PTP_PACKET_STATE_MATCHED; + rc = PTP_PACKET_STATE_MATCHED; + list_move(&evt->link, &ptp->evt_free_list); + break; + } + } + spin_unlock_bh(&ptp->evt_lock); + + return rc; +} + +/* Process any queued receive events and corresponding packets + * + * q is returned with all the packets that are ready for delivery. + */ +static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&ptp->rxq))) { + struct efx_ptp_match *match; + + match = (struct efx_ptp_match *)skb->cb; + if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) { + __skb_queue_tail(q, skb); + } else if (efx_ptp_match_rx(efx, skb) == + PTP_PACKET_STATE_MATCHED) { + __skb_queue_tail(q, skb); + } else if (time_after(jiffies, match->expiry)) { + match->state = PTP_PACKET_STATE_TIMED_OUT; + ++ptp->rx_no_timestamp; + __skb_queue_tail(q, skb); + } else { + /* Replace unprocessed entry and stop */ + skb_queue_head(&ptp->rxq, skb); + break; + } + } +} + +/* Complete processing of a received packet */ +static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb) +{ + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); +} + +static void efx_ptp_remove_multicast_filters(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + if (ptp->rxfilter_installed) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_general); + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + ptp->rxfilter_installed = false; + } +} + +static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_filter_spec rxfilter; + int rc; + + if (!ptp->channel || ptp->rxfilter_installed) + return 0; + + /* Must filter on both event and general ports to ensure + * that there is no packet re-ordering. + */ + efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index( + efx_channel_get_rx_queue(ptp->channel))); + rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, + htonl(PTP_ADDRESS), + htons(PTP_EVENT_PORT)); + if (rc != 0) + return rc; + + rc = efx_filter_insert_filter(efx, &rxfilter, true); + if (rc < 0) + return rc; + ptp->rxfilter_event = rc; + + efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index( + efx_channel_get_rx_queue(ptp->channel))); + rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, + htonl(PTP_ADDRESS), + htons(PTP_GENERAL_PORT)); + if (rc != 0) + goto fail; + + rc = efx_filter_insert_filter(efx, &rxfilter, true); + if (rc < 0) + goto fail; + ptp->rxfilter_general = rc; + + ptp->rxfilter_installed = true; + return 0; + +fail: + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + return rc; +} + +static int efx_ptp_start(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + + ptp->reset_required = false; + + rc = efx_ptp_insert_multicast_filters(efx); + if (rc) + return rc; + + rc = efx_ptp_enable(efx); + if (rc != 0) + goto fail; + + ptp->evt_frag_idx = 0; + ptp->current_adjfreq = 0; + + return 0; + +fail: + efx_ptp_remove_multicast_filters(efx); + return rc; +} + +static int efx_ptp_stop(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct list_head *cursor; + struct list_head *next; + int rc; + + if (ptp == NULL) + return 0; + + rc = efx_ptp_disable(efx); + + efx_ptp_remove_multicast_filters(efx); + + /* Make sure RX packets are really delivered */ + efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); + + /* Drop any pending receive events */ + spin_lock_bh(&efx->ptp_data->evt_lock); + list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { + list_move(cursor, &efx->ptp_data->evt_free_list); + } + spin_unlock_bh(&efx->ptp_data->evt_lock); + + return rc; +} + +static int efx_ptp_restart(struct efx_nic *efx) +{ + if (efx->ptp_data && efx->ptp_data->enabled) + return efx_ptp_start(efx); + return 0; +} + +static void efx_ptp_pps_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp = + container_of(work, struct efx_ptp_data, pps_work); + struct efx_nic *efx = ptp->efx; + struct ptp_clock_event ptp_evt; + + if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS)) + return; + + ptp_evt.type = PTP_CLOCK_PPSUSR; + ptp_evt.pps_times = ptp->host_time_pps; + ptp_clock_event(ptp->phc_clock, &ptp_evt); +} + +static void efx_ptp_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp_data = + container_of(work, struct efx_ptp_data, work); + struct efx_nic *efx = ptp_data->efx; + struct sk_buff *skb; + struct sk_buff_head tempq; + + if (ptp_data->reset_required) { + efx_ptp_stop(efx); + efx_ptp_start(efx); + return; + } + + efx_ptp_drop_time_expired_events(efx); + + __skb_queue_head_init(&tempq); + efx_ptp_process_events(efx, &tempq); + + while ((skb = skb_dequeue(&ptp_data->txq))) + ptp_data->xmit_skb(efx, skb); + + while ((skb = __skb_dequeue(&tempq))) + efx_ptp_process_rx(efx, skb); +} + +static const struct ptp_clock_info efx_phc_clock_info = { + .owner = THIS_MODULE, + .name = "sfc", + .max_adj = MAX_PPB, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfreq = efx_phc_adjfreq, + .adjtime = efx_phc_adjtime, + .gettime64 = efx_phc_gettime, + .settime64 = efx_phc_settime, + .enable = efx_phc_enable, +}; + +/* Initialise PTP state. */ +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) +{ + struct efx_ptp_data *ptp; + int rc = 0; + unsigned int pos; + + ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); + efx->ptp_data = ptp; + if (!efx->ptp_data) + return -ENOMEM; + + ptp->efx = efx; + ptp->channel = channel; + ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; + + rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); + if (rc != 0) + goto fail1; + + skb_queue_head_init(&ptp->rxq); + skb_queue_head_init(&ptp->txq); + ptp->workwq = create_singlethread_workqueue("sfc_ptp"); + if (!ptp->workwq) { + rc = -ENOMEM; + goto fail2; + } + + if (efx_ptp_use_mac_tx_timestamps(efx)) { + ptp->xmit_skb = efx_ptp_xmit_skb_queue; + /* Request sync events on this channel. */ + channel->sync_events_state = SYNC_EVENTS_QUIESCENT; + } else { + ptp->xmit_skb = efx_ptp_xmit_skb_mc; + } + + INIT_WORK(&ptp->work, efx_ptp_worker); + ptp->config.flags = 0; + ptp->config.tx_type = HWTSTAMP_TX_OFF; + ptp->config.rx_filter = HWTSTAMP_FILTER_NONE; + INIT_LIST_HEAD(&ptp->evt_list); + INIT_LIST_HEAD(&ptp->evt_free_list); + spin_lock_init(&ptp->evt_lock); + for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) + list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); + + /* Get the NIC PTP attributes and set up time conversions */ + rc = efx_ptp_get_attributes(efx); + if (rc < 0) + goto fail3; + + /* Get the timestamp corrections */ + rc = efx_ptp_get_timestamp_corrections(efx); + if (rc < 0) + goto fail3; + + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) { + ptp->phc_clock_info = efx_phc_clock_info; + ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, + &efx->pci_dev->dev); + if (IS_ERR(ptp->phc_clock)) { + rc = PTR_ERR(ptp->phc_clock); + goto fail3; + } else if (ptp->phc_clock) { + INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); + ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); + if (!ptp->pps_workwq) { + rc = -ENOMEM; + goto fail4; + } + } + } + ptp->nic_ts_enabled = false; + + return 0; +fail4: + ptp_clock_unregister(efx->ptp_data->phc_clock); + +fail3: + destroy_workqueue(efx->ptp_data->workwq); + +fail2: + efx_nic_free_buffer(efx, &ptp->start); + +fail1: + kfree(efx->ptp_data); + efx->ptp_data = NULL; + + return rc; +} + +/* Initialise PTP channel. + * + * Setting core_index to zero causes the queue to be initialised and doesn't + * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. + */ +static int efx_ptp_probe_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + int rc; + + channel->irq_moderation_us = 0; + channel->rx_queue.core_index = 0; + + rc = efx_ptp_probe(efx, channel); + /* Failure to probe PTP is not fatal; this channel will just not be + * used for anything. + * In the case of EPERM, efx_ptp_probe will print its own message (in + * efx_ptp_get_attributes()), so we don't need to. + */ + if (rc && rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "Failed to probe PTP, rc=%d\n", rc); + return 0; +} + +void efx_ptp_remove(struct efx_nic *efx) +{ + if (!efx->ptp_data) + return; + + (void)efx_ptp_disable(efx); + + cancel_work_sync(&efx->ptp_data->work); + if (efx->ptp_data->pps_workwq) + cancel_work_sync(&efx->ptp_data->pps_work); + + skb_queue_purge(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); + + if (efx->ptp_data->phc_clock) { + destroy_workqueue(efx->ptp_data->pps_workwq); + ptp_clock_unregister(efx->ptp_data->phc_clock); + } + + destroy_workqueue(efx->ptp_data->workwq); + + efx_nic_free_buffer(efx, &efx->ptp_data->start); + kfree(efx->ptp_data); + efx->ptp_data = NULL; +} + +static void efx_ptp_remove_channel(struct efx_channel *channel) +{ + efx_ptp_remove(channel->efx); +} + +static void efx_ptp_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-ptp", channel->efx->name); +} + +/* Determine whether this packet should be processed by the PTP module + * or transmitted conventionally. + */ +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return efx->ptp_data && + efx->ptp_data->enabled && + skb->len >= PTP_MIN_LENGTH && + skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && + likely(skb->protocol == htons(ETH_P_IP)) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) >= sizeof(struct iphdr) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + skb_headlen(skb) >= + skb_transport_offset(skb) + sizeof(struct udphdr) && + udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); +} + +/* Receive a PTP packet. Packets are queued until the arrival of + * the receive timestamp from the MC - this will probably occur after the + * packet arrival because of the processing in the MC. + */ +static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; + u8 *match_data_012, *match_data_345; + unsigned int version; + u8 *data; + + match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + + /* Correct version? */ + if (ptp->mode == MC_CMD_PTP_MODE_V1) { + if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { + return false; + } + data = skb->data; + version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]); + if (version != PTP_VERSION_V1) { + return false; + } + + /* PTP V1 uses all six bytes of the UUID to match the packet + * to the timestamp + */ + match_data_012 = data + PTP_V1_UUID_OFFSET; + match_data_345 = data + PTP_V1_UUID_OFFSET + 3; + } else { + if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { + return false; + } + data = skb->data; + version = data[PTP_V2_VERSION_OFFSET]; + if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { + return false; + } + + /* The original V2 implementation uses bytes 2-7 of + * the UUID to match the packet to the timestamp. This + * discards two of the bytes of the MAC address used + * to create the UUID (SF bug 33070). The PTP V2 + * enhanced mode fixes this issue and uses bytes 0-2 + * and byte 5-7 of the UUID. + */ + match_data_345 = data + PTP_V2_UUID_OFFSET + 5; + if (ptp->mode == MC_CMD_PTP_MODE_V2) { + match_data_012 = data + PTP_V2_UUID_OFFSET + 2; + } else { + match_data_012 = data + PTP_V2_UUID_OFFSET + 0; + BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); + } + } + + /* Does this packet require timestamping? */ + if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { + match->state = PTP_PACKET_STATE_UNMATCHED; + + /* We expect the sequence number to be in the same position in + * the packet for PTP V1 and V2 + */ + BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); + BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); + + /* Extract UUID/Sequence information */ + match->words[0] = (match_data_012[0] | + (match_data_012[1] << 8) | + (match_data_012[2] << 16) | + (match_data_345[0] << 24)); + match->words[1] = (match_data_345[1] | + (match_data_345[2] << 8) | + (data[PTP_V1_SEQUENCE_OFFSET + + PTP_V1_SEQUENCE_LENGTH - 1] << + 16)); + } else { + match->state = PTP_PACKET_STATE_MATCH_UNWANTED; + } + + skb_queue_tail(&ptp->rxq, skb); + queue_work(ptp->workwq, &ptp->work); + + return true; +} + +/* Transmit a PTP packet. This has to be transmitted by the MC + * itself, through an MCDI call. MCDI calls aren't permitted + * in the transmit path so defer the actual transmission to a suitable worker. + */ +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + skb_queue_tail(&ptp->txq, skb); + + if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) && + (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM)) + efx_xmit_hwtstamp_pending(skb); + queue_work(ptp->workwq, &ptp->work); + + return NETDEV_TX_OK; +} + +int efx_ptp_get_mode(struct efx_nic *efx) +{ + return efx->ptp_data->mode; +} + +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode) +{ + if ((enable_wanted != efx->ptp_data->enabled) || + (enable_wanted && (efx->ptp_data->mode != new_mode))) { + int rc = 0; + + if (enable_wanted) { + /* Change of mode requires disable */ + if (efx->ptp_data->enabled && + (efx->ptp_data->mode != new_mode)) { + efx->ptp_data->enabled = false; + rc = efx_ptp_stop(efx); + if (rc != 0) + return rc; + } + + /* Set new operating mode and establish + * baseline synchronisation, which must + * succeed. + */ + efx->ptp_data->mode = new_mode; + if (netif_running(efx->net_dev)) + rc = efx_ptp_start(efx); + if (rc == 0) { + rc = efx_ptp_synchronize(efx, + PTP_SYNC_ATTEMPTS * 2); + if (rc != 0) + efx_ptp_stop(efx); + } + } else { + rc = efx_ptp_stop(efx); + } + + if (rc != 0) + return rc; + + efx->ptp_data->enabled = enable_wanted; + } + + return 0; +} + +static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) +{ + int rc; + + if ((init->tx_type != HWTSTAMP_TX_OFF) && + (init->tx_type != HWTSTAMP_TX_ON)) + return -ERANGE; + + rc = efx->type->ptp_set_ts_config(efx, init); + if (rc) + return rc; + + efx->ptp_data->config = *init; + return 0; +} + +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_nic *primary = efx->primary; + + ASSERT_RTNL(); + + if (!ptp) + return; + + ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); + /* Check licensed features. If we don't have the license for TX + * timestamps, the NIC will not support them. + */ + if (efx_ptp_use_mac_tx_timestamps(efx)) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!(nic_data->licensed_features & + (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) + ts_info->so_timestamping &= + ~SOF_TIMESTAMPING_TX_HARDWARE; + } + if (primary && primary->ptp_data && primary->ptp_data->phc_clock) + ts_info->phc_index = + ptp_clock_index(primary->ptp_data->phc_clock); + ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; + ts_info->rx_filters = ptp->efx->type->hwtstamp_filters; +} + +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int rc; + + /* Not a PTP enabled port */ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + rc = efx_ptp_ts_init(efx, &config); + if (rc != 0) + return rc; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) + ? -EFAULT : 0; +} + +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, &efx->ptp_data->config, + sizeof(efx->ptp_data->config)) ? -EFAULT : 0; +} + +static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + netif_err(efx, hw, efx->net_dev, + "PTP unexpected event length: got %d expected %d\n", + ptp->evt_frag_idx, expected_frag_len); + ptp->reset_required = true; + queue_work(ptp->workwq, &ptp->work); +} + +/* Process a completed receive event. Put it on the event queue and + * start worker thread. This is required because event and their + * correspoding packets may come in either order. + */ +static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + struct efx_ptp_event_rx *evt = NULL; + + if (WARN_ON_ONCE(ptp->rx_ts_inline)) + return; + + if (ptp->evt_frag_idx != 3) { + ptp_event_failure(efx, 3); + return; + } + + spin_lock_bh(&ptp->evt_lock); + if (!list_empty(&ptp->evt_free_list)) { + evt = list_first_entry(&ptp->evt_free_list, + struct efx_ptp_event_rx, link); + list_del(&evt->link); + + evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA); + evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2], + MCDI_EVENT_SRC) | + (EFX_QWORD_FIELD(ptp->evt_frags[1], + MCDI_EVENT_SRC) << 8) | + (EFX_QWORD_FIELD(ptp->evt_frags[0], + MCDI_EVENT_SRC) << 16)); + evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time( + EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), + EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA), + ptp->ts_corrections.ptp_rx); + evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + list_add_tail(&evt->link, &ptp->evt_list); + + queue_work(ptp->workwq, &ptp->work); + } else if (net_ratelimit()) { + /* Log a rate-limited warning message. */ + netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); + } + spin_unlock_bh(&ptp->evt_lock); +} + +static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA); + if (ptp->evt_frag_idx != 1) { + ptp_event_failure(efx, 1); + return; + } + + netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code); +} + +static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + if (ptp->nic_ts_enabled) + queue_work(ptp->pps_workwq, &ptp->pps_work); +} + +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); + + if (!ptp) { + if (!efx->ptp_warned) { + netif_warn(efx, drv, efx->net_dev, + "Received PTP event but PTP not set up\n"); + efx->ptp_warned = true; + } + return; + } + + if (!ptp->enabled) + return; + + if (ptp->evt_frag_idx == 0) { + ptp->evt_code = code; + } else if (ptp->evt_code != code) { + netif_err(efx, hw, efx->net_dev, + "PTP out of sequence event %d\n", code); + ptp->evt_frag_idx = 0; + } + + ptp->evt_frags[ptp->evt_frag_idx++] = *ev; + if (!MCDI_EVENT_FIELD(*ev, CONT)) { + /* Process resulting event */ + switch (code) { + case MCDI_EVENT_CODE_PTP_RX: + ptp_event_rx(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_FAULT: + ptp_event_fault(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_PPS: + ptp_event_pps(efx, ptp); + break; + default: + netif_err(efx, hw, efx->net_dev, + "PTP unknown event %d\n", code); + break; + } + ptp->evt_frag_idx = 0; + } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) { + netif_err(efx, hw, efx->net_dev, + "PTP too many event fragments\n"); + ptp->evt_frag_idx = 0; + } +} + +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + + /* When extracting the sync timestamp minor value, we should discard + * the least significant two bits. These are not required in order + * to reconstruct full-range timestamps and they are optionally used + * to report status depending on the options supplied when subscribing + * for sync events. + */ + channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR); + channel->sync_timestamp_minor = + (MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC) + << ptp->nic_time.sync_event_minor_shift; + + /* if sync events have been disabled then we want to silently ignore + * this event, so throw away result. + */ + (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID); +} + +static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset)); +#else + const u8 *data = eh + efx->rx_packet_ts_offset; + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + u32 pkt_timestamp_major, pkt_timestamp_minor; + u32 diff, carry; + struct skb_shared_hwtstamps *timestamps; + + if (channel->sync_events_state != SYNC_EVENTS_VALID) + return; + + pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb)); + + /* get the difference between the packet and sync timestamps, + * modulo one second + */ + diff = pkt_timestamp_minor - channel->sync_timestamp_minor; + if (pkt_timestamp_minor < channel->sync_timestamp_minor) + diff += ptp->nic_time.minor_max; + + /* do we roll over a second boundary and need to carry the one? */ + carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ? + 1 : 0; + + if (diff <= ptp->nic_time.sync_event_diff_max) { + /* packet is ahead of the sync event by a quarter of a second or + * less (allowing for fuzz) + */ + pkt_timestamp_major = channel->sync_timestamp_major + carry; + } else if (diff >= ptp->nic_time.sync_event_diff_min) { + /* packet is behind the sync event but within the fuzz factor. + * This means the RX packet and sync event crossed as they were + * placed on the event queue, which can sometimes happen. + */ + pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry; + } else { + /* it's outside tolerance in both directions. this might be + * indicative of us missing sync events for some reason, so + * we'll call it an error rather than risk giving a bogus + * timestamp. + */ + netif_vdbg(efx, drv, efx->net_dev, + "packet timestamp %x too far from sync event %x:%x\n", + pkt_timestamp_minor, channel->sync_timestamp_major, + channel->sync_timestamp_minor); + return; + } + + /* attach the timestamps to the skb */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = + ptp->nic_to_kernel_time(pkt_timestamp_major, + pkt_timestamp_minor, + ptp->ts_corrections.general_rx); +} + +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); + s64 adjustment_ns; + int rc; + + if (delta > MAX_PPB) + delta = MAX_PPB; + else if (delta < -MAX_PPB) + delta = -MAX_PPB; + + /* Convert ppb to fixed point ns taking care to round correctly. */ + adjustment_ns = ((s64)delta * PPB_SCALE_WORD + + (1 << (ptp_data->adjfreq_ppb_shift - 1))) >> + ptp_data->adjfreq_ppb_shift; + + MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0); + MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), + NULL, 0, NULL); + if (rc != 0) + return rc; + + ptp_data->current_adjfreq = adjustment_ns; + return 0; +} + +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + u32 nic_major, nic_minor; + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); + + efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor); + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor); + return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); + int rc; + ktime_t kt; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc != 0) + return rc; + + kt = ptp_data->nic_to_kernel_time( + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR), + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0); + *ts = ktime_to_timespec64(kt); + return 0; +} + +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *e_ts) +{ + /* Get the current NIC time, efx_phc_gettime. + * Subtract from the desired time to get the offset + * call efx_phc_adjtime with the offset + */ + int rc; + struct timespec64 time_now; + struct timespec64 delta; + + rc = efx_phc_gettime(ptp, &time_now); + if (rc != 0) + return rc; + + delta = timespec64_sub(*e_ts, time_now); + + rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta)); + if (rc != 0) + return rc; + + return 0; +} + +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, + int enable) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + if (request->type != PTP_CLK_REQ_PPS) + return -EOPNOTSUPP; + + ptp_data->nic_ts_enabled = !!enable; + return 0; +} + +static const struct efx_channel_type efx_ptp_channel_type = { + .handle_no_channel = efx_ptp_handle_no_channel, + .pre_probe = efx_ptp_probe_channel, + .post_remove = efx_ptp_remove_channel, + .get_name = efx_ptp_get_channel_name, + /* no copy operation; there is no need to reallocate this channel */ + .receive_skb = efx_ptp_rx, + .want_txqs = efx_ptp_want_txqs, + .keep_eventq = false, +}; + +void efx_ptp_defer_probe_with_channel(struct efx_nic *efx) +{ + /* Check whether PTP is implemented on this NIC. The DISABLE + * operation will succeed if and only if it is implemented. + */ + if (efx_ptp_disable(efx) == 0) + efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = + &efx_ptp_channel_type; +} + +void efx_ptp_start_datapath(struct efx_nic *efx) +{ + if (efx_ptp_restart(efx)) + netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); + /* re-enable timestamping if it was previously enabled */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, true, true); +} + +void efx_ptp_stop_datapath(struct efx_nic *efx) +{ + /* temporarily disable timestamping */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, false, true); + efx_ptp_stop(efx); +} diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h new file mode 100644 index 000000000000..9855e8c9e544 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/ptp.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_PTP_H +#define EFX_PTP_H + +#include +#include "net_driver.h" + +struct ethtool_ts_info; +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); +void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); +struct efx_channel *efx_ptp_channel(struct efx_nic *efx); +void efx_ptp_remove(struct efx_nic *efx); +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +int efx_ptp_get_mode(struct efx_nic *efx); +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode); +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb); +static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + if (channel->sync_events_state == SYNC_EVENTS_VALID) + __efx_rx_skb_attach_timestamp(channel, skb); +} +void efx_ptp_start_datapath(struct efx_nic *efx); +void efx_ptp_stop_datapath(struct efx_nic *efx); +bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx); +ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue); + +#endif /* EFX_PTP_H */ diff --git a/drivers/net/ethernet/sfc/siena/rx.c b/drivers/net/ethernet/sfc/siena/rx.c new file mode 100644 index 000000000000..2375cef577e4 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/rx.c @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "rx_common.h" +#include "filter.h" +#include "nic.h" +#include "selftest.h" +#include "workarounds.h" + +/* Preferred number of descriptors to fill at once */ +#define EFX_RX_PREFERRED_BATCH 8U + +/* Maximum rx prefix used by any architecture. */ +#define EFX_MAX_RX_PREFIX_SIZE 16 + +/* Size of buffer allocated for skb header area. */ +#define EFX_SKB_HEADERS 128u + +/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ +#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ + EFX_RX_USR_BUF_SIZE) + +static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + int len) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; + + if (likely(len <= max_len)) + return; + + /* The packet must be discarded, but this is only a fatal error + * if the caller indicated it was + */ + rx_buf->flags |= EFX_RX_PKT_DISCARD; + + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "RX queue %d overlength RX event (%#x > %#x)\n", + efx_rx_queue_index(rx_queue), len, max_len); + + efx_rx_queue_channel(rx_queue)->n_rx_overlength++; +} + +/* Allocate and construct an SKB around page fragments */ +static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, + u8 *eh, int hdr_len) +{ + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; + + /* Allocate an SKB to store the headers */ + skb = netdev_alloc_skb(efx->net_dev, + efx->rx_ip_align + efx->rx_prefix_size + + hdr_len); + if (unlikely(skb == NULL)) { + atomic_inc(&efx->n_rx_noskb_drops); + return NULL; + } + + EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len); + + memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, + efx->rx_prefix_size + hdr_len); + skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); + __skb_put(skb, hdr_len); + + /* Append the remaining page(s) onto the frag list */ + if (rx_buf->len > hdr_len) { + rx_buf->page_offset += hdr_len; + rx_buf->len -= hdr_len; + + for (;;) { + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len, efx->rx_buffer_truesize); + rx_buf->page = NULL; + + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + } else { + __free_pages(rx_buf->page, efx->rx_buffer_order); + rx_buf->page = NULL; + n_frags = 0; + } + + /* Move past the ethernet header */ + skb->protocol = eth_type_trans(skb, efx->net_dev); + + skb_mark_napi_id(skb, &channel->napi_str); + + return skb; +} + +void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_rx_buffer *rx_buf; + + rx_queue->rx_packets++; + + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->flags |= flags; + + /* Validate the number of fragments and completed length */ + if (n_frags == 1) { + if (!(flags & EFX_RX_PKT_PREFIX_LEN)) + efx_rx_packet__check_len(rx_queue, rx_buf, len); + } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || + unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || + unlikely(len > n_frags * efx->rx_dma_len) || + unlikely(!efx->rx_scatter)) { + /* If this isn't an explicit discard request, either + * the hardware or the driver is broken. + */ + WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); + rx_buf->flags |= EFX_RX_PKT_DISCARD; + } + + netif_vdbg(efx, rx_status, efx->net_dev, + "RX queue %d received ids %x-%x len %d %s%s\n", + efx_rx_queue_index(rx_queue), index, + (index + n_frags - 1) & rx_queue->ptr_mask, len, + (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", + (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); + + /* Discard packet, if instructed to do so. Process the + * previous receive first. + */ + if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { + efx_rx_flush_packet(channel); + efx_discard_rx_packet(channel, rx_buf, n_frags); + return; + } + + if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) + rx_buf->len = len; + + /* Release and/or sync the DMA mapping - assumes all RX buffers + * consumed in-order per RX queue. + */ + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + + /* Prefetch nice and early so data will (hopefully) be in cache by + * the time we look at it. + */ + prefetch(efx_rx_buf_va(rx_buf)); + + rx_buf->page_offset += efx->rx_prefix_size; + rx_buf->len -= efx->rx_prefix_size; + + if (n_frags > 1) { + /* Release/sync DMA mapping for additional fragments. + * Fix length for last fragment. + */ + unsigned int tail_frags = n_frags - 1; + + for (;;) { + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + if (--tail_frags == 0) + break; + efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); + } + rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + } + + /* All fragments have been DMA-synced, so recycle pages. */ + rx_buf = efx_rx_buffer(rx_queue, index); + efx_recycle_rx_pages(channel, rx_buf, n_frags); + + /* Pipeline receives so that we give time for packet headers to be + * prefetched into cache. + */ + efx_rx_flush_packet(channel); + channel->rx_pkt_n_frags = n_frags; + channel->rx_pkt_index = index; +} + +static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct sk_buff *skb; + u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); + + skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); + if (unlikely(skb == NULL)) { + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + return; + } + skb_record_rx_queue(skb, channel->rx_queue.core_index); + + /* Set the SKB flags */ + skb_checksum_none_assert(skb); + if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); + } + + efx_rx_skb_attach_timestamp(channel, skb); + + if (channel->type->receive_skb) + if (channel->type->receive_skb(channel, skb)) + return; + + /* Pass the packet up */ + if (channel->rx_list != NULL) + /* Add to list, will pass up later */ + list_add_tail(&skb->list, channel->rx_list); + else + /* No list, so pass it up now */ + netif_receive_skb(skb); +} + +/** efx_do_xdp: perform XDP processing on a received packet + * + * Returns true if packet should still be delivered. + */ +static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, u8 **ehp) +{ + u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE]; + struct efx_rx_queue *rx_queue; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + struct xdp_buff xdp; + u32 xdp_act; + s16 offset; + int err; + + xdp_prog = rcu_dereference_bh(efx->xdp_prog); + if (!xdp_prog) + return true; + + rx_queue = efx_channel_get_rx_queue(channel); + + if (unlikely(channel->rx_pkt_n_frags > 1)) { + /* We can't do XDP on fragmented packets - drop. */ + efx_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP is not possible with multiple receive fragments (%d)\n", + channel->rx_pkt_n_frags); + channel->n_rx_xdp_bad_drops++; + return false; + } + + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, + rx_buf->len, DMA_FROM_DEVICE); + + /* Save the rx prefix. */ + EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE); + memcpy(rx_prefix, *ehp - efx->rx_prefix_size, + efx->rx_prefix_size); + + xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info); + /* No support yet for XDP metadata */ + xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM, + rx_buf->len, false); + + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); + + offset = (u8 *)xdp.data - *ehp; + + switch (xdp_act) { + case XDP_PASS: + /* Fix up rx prefix. */ + if (offset) { + *ehp += offset; + rx_buf->page_offset += offset; + rx_buf->len -= offset; + memcpy(*ehp - efx->rx_prefix_size, rx_prefix, + efx->rx_prefix_size); + } + break; + + case XDP_TX: + /* Buffer ownership passes to tx on success. */ + xdpf = xdp_convert_buff_to_frame(&xdp); + err = efx_xdp_tx_buffers(efx, 1, &xdpf, true); + if (unlikely(err != 1)) { + efx_free_rx_buffers(rx_queue, rx_buf, 1); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP TX failed (%d)\n", err); + channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + } else { + channel->n_rx_xdp_tx++; + } + break; + + case XDP_REDIRECT: + err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog); + if (unlikely(err)) { + efx_free_rx_buffers(rx_queue, rx_buf, 1); + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + "XDP redirect failed (%d)\n", err); + channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + } else { + channel->n_rx_xdp_redirect++; + } + break; + + default: + bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act); + efx_free_rx_buffers(rx_queue, rx_buf, 1); + channel->n_rx_xdp_bad_drops++; + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + break; + + case XDP_ABORTED: + trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_DROP: + efx_free_rx_buffers(rx_queue, rx_buf, 1); + channel->n_rx_xdp_drops++; + break; + } + + return xdp_act == XDP_PASS; +} + +/* Handle a received packet. Second half: Touches packet payload. */ +void __efx_rx_packet(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_buffer *rx_buf = + efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); + u8 *eh = efx_rx_buf_va(rx_buf); + + /* Read length from the prefix if necessary. This already + * excludes the length of the prefix itself. + */ + if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) + rx_buf->len = le16_to_cpup((__le16 *) + (eh + efx->rx_packet_len_offset)); + + /* If we're in loopback test, then pass the packet directly to the + * loopback layer, and free the rx_buf here + */ + if (unlikely(efx->loopback_selftest)) { + struct efx_rx_queue *rx_queue; + + efx_loopback_rx_packet(efx, eh, rx_buf->len); + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); + goto out; + } + + if (!efx_do_xdp(efx, channel, rx_buf, &eh)) + goto out; + + if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) + rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; + + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) + efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0); + else + efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); +out: + channel->rx_pkt_n_frags = 0; +} diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c new file mode 100644 index 000000000000..fa8b9aacca11 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/rx_common.c @@ -0,0 +1,1086 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#include "efx.h" +#include "nic.h" +#include "rx_common.h" + +/* This is the percentage fill level below which new RX descriptors + * will be added to the RX descriptor ring. + */ +static unsigned int rx_refill_threshold; +module_param(rx_refill_threshold, uint, 0444); +MODULE_PARM_DESC(rx_refill_threshold, + "RX descriptor ring refill threshold (%)"); + +/* RX maximum head room required. + * + * This must be at least 1 to prevent overflow, plus one packet-worth + * to allow pipelined receives. + */ +#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) + +/* Check the RX page recycle ring for a page that can be reused. */ +static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_page_state *state; + unsigned int index; + struct page *page; + + if (unlikely(!rx_queue->page_ring)) + return NULL; + index = rx_queue->page_remove & rx_queue->page_ptr_mask; + page = rx_queue->page_ring[index]; + if (page == NULL) + return NULL; + + rx_queue->page_ring[index] = NULL; + /* page_remove cannot exceed page_add. */ + if (rx_queue->page_remove != rx_queue->page_add) + ++rx_queue->page_remove; + + /* If page_count is 1 then we hold the only reference to this page. */ + if (page_count(page) == 1) { + ++rx_queue->page_recycle_count; + return page; + } else { + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + ++rx_queue->page_recycle_failed; + } + + return NULL; +} + +/* Attempt to recycle the page if there is an RX recycle ring; the page can + * only be added if this is the final RX buffer, to prevent pages being used in + * the descriptor ring and appearing in the recycle ring simultaneously. + */ +static void efx_recycle_rx_page(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_nic *efx = rx_queue->efx; + struct page *page = rx_buf->page; + unsigned int index; + + /* Only recycle the page after processing the final buffer. */ + if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) + return; + + index = rx_queue->page_add & rx_queue->page_ptr_mask; + if (rx_queue->page_ring[index] == NULL) { + unsigned int read_index = rx_queue->page_remove & + rx_queue->page_ptr_mask; + + /* The next slot in the recycle ring is available, but + * increment page_remove if the read pointer currently + * points here. + */ + if (read_index == index) + ++rx_queue->page_remove; + rx_queue->page_ring[index] = page; + ++rx_queue->page_add; + return; + } + ++rx_queue->page_recycle_full; + efx_unmap_rx_buffer(efx, rx_buf); + put_page(rx_buf->page); +} + +/* Recycle the pages that are used by buffers that have just been received. */ +void efx_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + if (unlikely(!rx_queue->page_ring)) + return; + + do { + efx_recycle_rx_page(channel, rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); +} + +void efx_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + efx_recycle_rx_pages(channel, rx_buf, n_frags); + + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); +} + +static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) +{ + unsigned int bufs_in_recycle_ring, page_ring_size; + struct efx_nic *efx = rx_queue->efx; + + bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx); + page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / + efx->rx_bufs_per_page); + rx_queue->page_ring = kcalloc(page_ring_size, + sizeof(*rx_queue->page_ring), GFP_KERNEL); + if (!rx_queue->page_ring) + rx_queue->page_ptr_mask = 0; + else + rx_queue->page_ptr_mask = page_ring_size - 1; +} + +static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + int i; + + if (unlikely(!rx_queue->page_ring)) + return; + + /* Unmap and release the pages in the recycle ring. Remove the ring. */ + for (i = 0; i <= rx_queue->page_ptr_mask; i++) { + struct page *page = rx_queue->page_ring[i]; + struct efx_rx_page_state *state; + + if (page == NULL) + continue; + + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + } + kfree(rx_queue->page_ring); + rx_queue->page_ring = NULL; +} + +static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ + /* Release the page reference we hold for the buffer. */ + if (rx_buf->page) + put_page(rx_buf->page); + + /* If this is the last buffer in a page, unmap and free it. */ + if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { + efx_unmap_rx_buffer(rx_queue->efx, rx_buf); + efx_free_rx_buffers(rx_queue, rx_buf, 1); + } + rx_buf->page = NULL; +} + +int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int entries; + int rc; + + /* Create the smallest power-of-two aligned ring */ + entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); + EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); + rx_queue->ptr_mask = entries - 1; + + netif_dbg(efx, probe, efx->net_dev, + "creating RX queue %d size %#x mask %#x\n", + efx_rx_queue_index(rx_queue), efx->rxq_entries, + rx_queue->ptr_mask); + + /* Allocate RX buffers */ + rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), + GFP_KERNEL); + if (!rx_queue->buffer) + return -ENOMEM; + + rc = efx_nic_probe_rx(rx_queue); + if (rc) { + kfree(rx_queue->buffer); + rx_queue->buffer = NULL; + } + + return rc; +} + +void efx_init_rx_queue(struct efx_rx_queue *rx_queue) +{ + unsigned int max_fill, trigger, max_trigger; + struct efx_nic *efx = rx_queue->efx; + int rc = 0; + + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); + + /* Initialise ptr fields */ + rx_queue->added_count = 0; + rx_queue->notified_count = 0; + rx_queue->removed_count = 0; + rx_queue->min_fill = -1U; + efx_init_rx_recycle_ring(rx_queue); + + rx_queue->page_remove = 0; + rx_queue->page_add = rx_queue->page_ptr_mask + 1; + rx_queue->page_recycle_count = 0; + rx_queue->page_recycle_failed = 0; + rx_queue->page_recycle_full = 0; + + /* Initialise limit fields */ + max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; + max_trigger = + max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; + if (rx_refill_threshold != 0) { + trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; + if (trigger > max_trigger) + trigger = max_trigger; + } else { + trigger = max_trigger; + } + + rx_queue->max_fill = max_fill; + rx_queue->fast_fill_trigger = trigger; + rx_queue->refill_enabled = true; + + /* Initialise XDP queue information */ + rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, + rx_queue->core_index, 0); + + if (rc) { + netif_err(efx, rx_err, efx->net_dev, + "Failure to initialise XDP queue information rc=%d\n", + rc); + efx->xdp_rxq_info_failed = true; + } else { + rx_queue->xdp_rxq_info_valid = true; + } + + /* Set up RX descriptor ring */ + efx_nic_init_rx(rx_queue); +} + +void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_rx_buffer *rx_buf; + int i; + + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); + + del_timer_sync(&rx_queue->slow_fill); + + /* Release RX buffers from the current read ptr to the write ptr */ + if (rx_queue->buffer) { + for (i = rx_queue->removed_count; i < rx_queue->added_count; + i++) { + unsigned int index = i & rx_queue->ptr_mask; + + rx_buf = efx_rx_buffer(rx_queue, index); + efx_fini_rx_buffer(rx_queue, rx_buf); + } + } + + efx_fini_rx_recycle_ring(rx_queue); + + if (rx_queue->xdp_rxq_info_valid) + xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info); + + rx_queue->xdp_rxq_info_valid = false; +} + +void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) +{ + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); + + efx_nic_remove_rx(rx_queue); + + kfree(rx_queue->buffer); + rx_queue->buffer = NULL; +} + +/* Unmap a DMA-mapped page. This function is only called for the final RX + * buffer in a page. + */ +void efx_unmap_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf) +{ + struct page *page = rx_buf->page; + + if (page) { + struct efx_rx_page_state *state = page_address(page); + + dma_unmap_page(&efx->pci_dev->dev, + state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + } +} + +void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs) +{ + do { + if (rx_buf->page) { + put_page(rx_buf->page); + rx_buf->page = NULL; + } + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--num_bufs); +} + +void efx_rx_slow_fill(struct timer_list *t) +{ + struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); + + /* Post an event to cause NAPI to run and refill the queue */ + efx_nic_generate_fill_event(rx_queue); + ++rx_queue->slow_fill_count; +} + +void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) +{ + mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); +} + +/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers + * + * @rx_queue: Efx RX queue + * + * This allocates a batch of pages, maps them for DMA, and populates + * struct efx_rx_buffers for each one. Return a negative error code or + * 0 on success. If a single page can be used for multiple buffers, + * then the page will either be inserted fully, or not at all. + */ +static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) +{ + unsigned int page_offset, index, count; + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_page_state *state; + struct efx_rx_buffer *rx_buf; + dma_addr_t dma_addr; + struct page *page; + + count = 0; + do { + page = efx_reuse_page(rx_queue); + if (page == NULL) { + page = alloc_pages(__GFP_COMP | + (atomic ? GFP_ATOMIC : GFP_KERNEL), + efx->rx_buffer_order); + if (unlikely(page == NULL)) + return -ENOMEM; + dma_addr = + dma_map_page(&efx->pci_dev->dev, page, 0, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&efx->pci_dev->dev, + dma_addr))) { + __free_pages(page, efx->rx_buffer_order); + return -EIO; + } + state = page_address(page); + state->dma_addr = dma_addr; + } else { + state = page_address(page); + dma_addr = state->dma_addr; + } + + dma_addr += sizeof(struct efx_rx_page_state); + page_offset = sizeof(struct efx_rx_page_state); + + do { + index = rx_queue->added_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->dma_addr = dma_addr + efx->rx_ip_align + + EFX_XDP_HEADROOM; + rx_buf->page = page; + rx_buf->page_offset = page_offset + efx->rx_ip_align + + EFX_XDP_HEADROOM; + rx_buf->len = efx->rx_dma_len; + rx_buf->flags = 0; + ++rx_queue->added_count; + get_page(page); + dma_addr += efx->rx_page_buf_step; + page_offset += efx->rx_page_buf_step; + } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); + + rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; + } while (++count < efx->rx_pages_per_batch); + + return 0; +} + +void efx_rx_config_page_split(struct efx_nic *efx) +{ + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM, + EFX_RX_BUF_ALIGNMENT); + efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : + ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / + efx->rx_page_buf_step); + efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / + efx->rx_bufs_per_page; + efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, + efx->rx_bufs_per_page); +} + +/* efx_fast_push_rx_descriptors - push new RX descriptors quickly + * @rx_queue: RX descriptor queue + * + * This will aim to fill the RX descriptor queue up to + * @rx_queue->@max_fill. If there is insufficient atomic + * memory to do so, a slow fill will be scheduled. + * + * The caller must provide serialisation (none is used here). In practise, + * this means this function must run from the NAPI handler, or be called + * when NAPI is disabled. + */ +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int fill_level, batch_size; + int space, rc = 0; + + if (!rx_queue->refill_enabled) + return; + + /* Calculate current fill level, and exit if we don't need to fill */ + fill_level = (rx_queue->added_count - rx_queue->removed_count); + EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries); + if (fill_level >= rx_queue->fast_fill_trigger) + goto out; + + /* Record minimum fill level */ + if (unlikely(fill_level < rx_queue->min_fill)) { + if (fill_level) + rx_queue->min_fill = fill_level; + } + + batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; + space = rx_queue->max_fill - fill_level; + EFX_WARN_ON_ONCE_PARANOID(space < batch_size); + + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filling descriptor ring from" + " level %d to level %d\n", + efx_rx_queue_index(rx_queue), fill_level, + rx_queue->max_fill); + + do { + rc = efx_init_rx_buffers(rx_queue, atomic); + if (unlikely(rc)) { + /* Ensure that we don't leave the rx queue empty */ + efx_schedule_slow_fill(rx_queue); + goto out; + } + } while ((space -= batch_size) >= batch_size); + + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filled descriptor ring " + "to level %d\n", efx_rx_queue_index(rx_queue), + rx_queue->added_count - rx_queue->removed_count); + + out: + if (rx_queue->notified_count != rx_queue->added_count) + efx_nic_notify_rx_desc(rx_queue); +} + +/* Pass a received packet up through GRO. GRO can handle pages + * regardless of checksum state and skbs with a good checksum. + */ +void +efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh, __wsum csum) +{ + struct napi_struct *napi = &channel->napi_str; + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; + + skb = napi_get_frags(napi); + if (unlikely(!skb)) { + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + return; + } + + if (efx->net_dev->features & NETIF_F_RXHASH && + efx_rx_buf_hash_valid(efx, eh)) + skb_set_hash(skb, efx_rx_buf_hash(efx, eh), + PKT_HASH_TYPE_L3); + if (csum) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } else { + skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + } + skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); + + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + + skb->data_len = skb->len; + skb->truesize += n_frags * efx->rx_buffer_truesize; + + skb_record_rx_queue(skb, channel->rx_queue.core_index); + + napi_gro_frags(napi); +} + +/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because + * (a) this is an infrequent control-plane operation and (b) n is small (max 64) + */ +struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) +{ + struct list_head *head = &efx->rss_context.list; + struct efx_rss_context *ctx, *new; + u32 id = 1; /* Don't use zero, that refers to the master RSS context */ + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + /* Search for first gap in the numbering */ + list_for_each_entry(ctx, head, list) { + if (ctx->user_id != id) + break; + id++; + /* Check for wrap. If this happens, we have nearly 2^32 + * allocated RSS contexts, which seems unlikely. + */ + if (WARN_ON_ONCE(!id)) + return NULL; + } + + /* Create the new entry */ + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + new->rx_hash_udp_4tuple = false; + + /* Insert the new entry into the gap */ + new->user_id = id; + list_add_tail(&new->list, &ctx->list); + return new; +} + +struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) +{ + struct list_head *head = &efx->rss_context.list; + struct efx_rss_context *ctx; + + WARN_ON(!mutex_is_locked(&efx->rss_lock)); + + list_for_each_entry(ctx, head, list) + if (ctx->user_id == id) + return ctx; + return NULL; +} + +void efx_free_rss_context_entry(struct efx_rss_context *ctx) +{ + list_del(&ctx->list); + kfree(ctx); +} + +void efx_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) + ctx->rx_indir_table[i] = + ethtool_rxfh_indir_default(i, efx->rss_spread); +} + +/** + * efx_filter_is_mc_recipient - test whether spec is a multicast recipient + * @spec: Specification to test + * + * Return: %true if the specification is a non-drop RX filter that + * matches a local MAC address I/G bit value of 1 or matches a local + * IPv4 or IPv6 address value in the respective multicast address + * range. Otherwise %false. + */ +bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) +{ + if (!(spec->flags & EFX_FILTER_FLAG_RX) || + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + return false; + + if (spec->match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && + is_multicast_ether_addr(spec->loc_mac)) + return true; + + if ((spec->match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if (spec->ether_type == htons(ETH_P_IP) && + ipv4_is_multicast(spec->loc_host[0])) + return true; + if (spec->ether_type == htons(ETH_P_IPV6) && + ((const u8 *)spec->loc_host)[0] == 0xff) + return true; + } + + return false; +} + +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->outer_vid, &right->outer_vid, + sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) == 0; +} + +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); + return jhash2((const u32 *)&spec->outer_vid, + (sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) / 4, + 0); +} + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force) +{ + if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { + /* ARFS is currently updating this entry, leave it */ + return false; + } + if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { + /* ARFS tried and failed to update this, so it's probably out + * of date. Remove the filter and the ARFS rule entry. + */ + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + *force = true; + return true; + } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ + /* ARFS has moved on, so old filter is not needed. Since we did + * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will + * not be removed by efx_rps_hash_del() subsequently. + */ + *force = true; + return true; + } + /* Remove it iff ARFS wants to. */ + return true; +} + +static +struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + u32 hash = efx_filter_spec_hash(spec); + + lockdep_assert_held(&efx->rps_hash_lock); + if (!efx->rps_hash_table) + return NULL; + return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; +} + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) + return rule; + } + return NULL; +} + +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + *new = false; + return rule; + } + } + rule = kmalloc(sizeof(*rule), GFP_ATOMIC); + *new = true; + if (rule) { + memcpy(&rule->spec, spec, sizeof(rule->spec)); + hlist_add_head(&rule->node, head); + } + return rule; +} + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (WARN_ON(!head)) + return; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + /* Someone already reused the entry. We know that if + * this check doesn't fire (i.e. filter_id == REMOVING) + * then the REMOVING mark was put there by our caller, + * because caller is holding a lock on filter table and + * only holders of that lock set REMOVING. + */ + if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) + return; + hlist_del(node); + kfree(rule); + return; + } + } + /* We didn't find it. */ + WARN_ON(1); +} +#endif + +int efx_probe_filters(struct efx_nic *efx) +{ + int rc; + + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + rc = efx->type->filter_table_probe(efx); + if (rc) + goto out_unlock; + +#ifdef CONFIG_RFS_ACCEL + if (efx->type->offload_features & NETIF_F_NTUPLE) { + struct efx_channel *channel; + int i, success = 1; + + efx_for_each_channel(channel, efx) { + channel->rps_flow_id = + kcalloc(efx->type->max_rx_ip_filters, + sizeof(*channel->rps_flow_id), + GFP_KERNEL); + if (!channel->rps_flow_id) + success = 0; + else + for (i = 0; + i < efx->type->max_rx_ip_filters; + ++i) + channel->rps_flow_id[i] = + RPS_FLOW_ID_INVALID; + channel->rfs_expire_index = 0; + channel->rfs_filter_count = 0; + } + + if (!success) { + efx_for_each_channel(channel, efx) + kfree(channel->rps_flow_id); + efx->type->filter_table_remove(efx); + rc = -ENOMEM; + goto out_unlock; + } + } +#endif +out_unlock: + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + return rc; +} + +void efx_remove_filters(struct efx_nic *efx) +{ +#ifdef CONFIG_RFS_ACCEL + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + cancel_delayed_work_sync(&channel->filter_work); + kfree(channel->rps_flow_id); + channel->rps_flow_id = NULL; + } +#endif + down_write(&efx->filter_sem); + efx->type->filter_table_remove(efx); + up_write(&efx->filter_sem); +} + +#ifdef CONFIG_RFS_ACCEL + +static void efx_filter_rfs_work(struct work_struct *data) +{ + struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, + work); + struct efx_nic *efx = netdev_priv(req->net_dev); + struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); + int slot_idx = req - efx->rps_slot; + struct efx_arfs_rule *rule; + u16 arfs_id = 0; + int rc; + + rc = efx->type->filter_insert(efx, &req->spec, true); + if (rc >= 0) + /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */ + rc %= efx->type->max_rx_ip_filters; + if (efx->rps_hash_table) { + spin_lock_bh(&efx->rps_hash_lock); + rule = efx_rps_hash_find(efx, &req->spec); + /* The rule might have already gone, if someone else's request + * for the same spec was already worked and then expired before + * we got around to our work. In that case we have nothing + * tying us to an arfs_id, meaning that as soon as the filter + * is considered for expiry it will be removed. + */ + if (rule) { + if (rc < 0) + rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; + else + rule->filter_id = rc; + arfs_id = rule->arfs_id; + } + spin_unlock_bh(&efx->rps_hash_lock); + } + if (rc >= 0) { + /* Remember this so we can check whether to expire the filter + * later. + */ + mutex_lock(&efx->rps_mutex); + if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) + channel->rfs_filter_count++; + channel->rps_flow_id[rc] = req->flow_id; + mutex_unlock(&efx->rps_mutex); + + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_info(efx, rx_status, efx->net_dev, + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_succeeded++; + } else { + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_failed++; + /* We're overloading the NIC's filter tables, so let's do a + * chunk of extra expiry work. + */ + __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, + 100u)); + } + + /* Release references */ + clear_bit(slot_idx, &efx->rps_slot_map); + dev_put(req->net_dev); +} + +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_async_filter_insertion *req; + struct efx_arfs_rule *rule; + struct flow_keys fk; + int slot_idx; + bool new; + int rc; + + /* find a free slot */ + for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) + if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) + break; + if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) + return -EBUSY; + + if (flow_id == RPS_FLOW_ID_INVALID) { + rc = -EINVAL; + goto out_clear; + } + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + + if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + + req = efx->rps_slot + slot_idx; + efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, + rxq_index); + req->spec.match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + req->spec.ether_type = fk.basic.n_proto; + req->spec.ip_proto = fk.basic.ip_proto; + + if (fk.basic.n_proto == htons(ETH_P_IP)) { + req->spec.rem_host[0] = fk.addrs.v4addrs.src; + req->spec.loc_host[0] = fk.addrs.v4addrs.dst; + } else { + memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, + sizeof(struct in6_addr)); + } + + req->spec.rem_port = fk.ports.src; + req->spec.loc_port = fk.ports.dst; + + if (efx->rps_hash_table) { + /* Add it to ARFS hash table */ + spin_lock(&efx->rps_hash_lock); + rule = efx_rps_hash_add(efx, &req->spec, &new); + if (!rule) { + rc = -ENOMEM; + goto out_unlock; + } + if (new) + rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; + rc = rule->arfs_id; + /* Skip if existing or pending filter already does the right thing */ + if (!new && rule->rxq_index == rxq_index && + rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) + goto out_unlock; + rule->rxq_index = rxq_index; + rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; + spin_unlock(&efx->rps_hash_lock); + } else { + /* Without an ARFS hash table, we just use arfs_id 0 for all + * filters. This means if multiple flows hash to the same + * flow_id, all but the most recently touched will be eligible + * for expiry. + */ + rc = 0; + } + + /* Queue the request */ + dev_hold(req->net_dev = net_dev); + INIT_WORK(&req->work, efx_filter_rfs_work); + req->rxq_index = rxq_index; + req->flow_id = flow_id; + schedule_work(&req->work); + return rc; +out_unlock: + spin_unlock(&efx->rps_hash_lock); +out_clear: + clear_bit(slot_idx, &efx->rps_slot_map); + return rc; +} + +bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota) +{ + bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); + struct efx_nic *efx = channel->efx; + unsigned int index, size, start; + u32 flow_id; + + if (!mutex_trylock(&efx->rps_mutex)) + return false; + expire_one = efx->type->filter_rfs_expire_one; + index = channel->rfs_expire_index; + start = index; + size = efx->type->max_rx_ip_filters; + while (quota) { + flow_id = channel->rps_flow_id[index]; + + if (flow_id != RPS_FLOW_ID_INVALID) { + quota--; + if (expire_one(efx, flow_id, index)) { + netif_info(efx, rx_status, efx->net_dev, + "expired filter %d [channel %u flow %u]\n", + index, channel->channel, flow_id); + channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; + channel->rfs_filter_count--; + } + } + if (++index == size) + index = 0; + /* If we were called with a quota that exceeds the total number + * of filters in the table (which shouldn't happen, but could + * if two callers race), ensure that we don't loop forever - + * stop when we've examined every row of the table. + */ + if (index == start) + break; + } + + channel->rfs_expire_index = index; + mutex_unlock(&efx->rps_mutex); + return true; +} + +#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h new file mode 100644 index 000000000000..fbd2769307f9 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/rx_common.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_RX_COMMON_H +#define EFX_RX_COMMON_H + +/* Preferred number of descriptors to fill at once */ +#define EFX_RX_PREFERRED_BATCH 8U + +/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ +#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ + EFX_RX_USR_BUF_SIZE) + +/* Number of RX buffers to recycle pages for. When creating the RX page recycle + * ring, this number is divided by the number of buffers per page to calculate + * the number of pages to store in the RX page recycle ring. + */ +#define EFX_RECYCLE_RING_SIZE_10G 256 + +static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) +{ + return page_address(buf->page) + buf->page_offset; +} + +static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); +#else + const u8 *data = eh + efx->rx_packet_hash_offset; + + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +void efx_rx_slow_fill(struct timer_list *t); + +void efx_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); +void efx_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); + +int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); +void efx_init_rx_queue(struct efx_rx_queue *rx_queue); +void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); +void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); +void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue); + +void efx_init_rx_buffer(struct efx_rx_queue *rx_queue, + struct page *page, + unsigned int page_offset, + u16 flags); +void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf); + +static inline void efx_sync_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf, + unsigned int len) +{ + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, + DMA_FROM_DEVICE); +} + +void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs); + +void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); +void efx_rx_config_page_split(struct efx_nic *efx); +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); + +void +efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh, __wsum csum); + +struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); +struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); +void efx_free_rss_context_entry(struct efx_rss_context *ctx); +void efx_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx); + +bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right); +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec); + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force); +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec); +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new); +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); + +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); +#endif + +int efx_probe_filters(struct efx_nic *efx); +void efx_remove_filters(struct efx_nic *efx); + +#endif diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c new file mode 100644 index 000000000000..3c5227afd497 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/selftest.c @@ -0,0 +1,807 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "nic.h" +#include "mcdi_port_common.h" +#include "selftest.h" +#include "workarounds.h" + +/* IRQ latency can be enormous because: + * - All IRQs may be disabled on a CPU for a *long* time by e.g. a + * slow serial console or an old IDE driver doing error recovery + * - The PREEMPT_RT patches mostly deal with this, but also allow a + * tasklet or normal task to be given higher priority than our IRQ + * threads + * Try to avoid blaming the hardware for this. + */ +#define IRQ_TIMEOUT HZ + +/* + * Loopback test packet structure + * + * The self-test should stress every RSS vector, and unfortunately + * Falcon only performs RSS on TCP/UDP packets. + */ +struct efx_loopback_payload { + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + char msg[64]; +} __packed; + +/* Loopback test source MAC address */ +static const u8 payload_source[ETH_ALEN] __aligned(2) = { + 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, +}; + +static const char payload_msg[] = + "Hello world! This is an Efx loopback test in progress!"; + +/* Interrupt mode names */ +static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; +static const char *const efx_interrupt_mode_names[] = { + [EFX_INT_MODE_MSIX] = "MSI-X", + [EFX_INT_MODE_MSI] = "MSI", + [EFX_INT_MODE_LEGACY] = "legacy", +}; +#define INT_MODE(efx) \ + STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) + +/** + * struct efx_loopback_state - persistent state during a loopback selftest + * @flush: Drop all packets in efx_loopback_rx_packet + * @packet_count: Number of packets being used in this test + * @skbs: An array of skbs transmitted + * @offload_csum: Checksums are being offloaded + * @rx_good: RX good packet count + * @rx_bad: RX bad packet count + * @payload: Payload used in tests + */ +struct efx_loopback_state { + bool flush; + int packet_count; + struct sk_buff **skbs; + bool offload_csum; + atomic_t rx_good; + atomic_t rx_bad; + struct efx_loopback_payload payload; +}; + +/* How long to wait for all the packets to arrive (in ms) */ +#define LOOPBACK_TIMEOUT_MS 1000 + +/************************************************************************** + * + * MII, NVRAM and register tests + * + **************************************************************************/ + +static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc = 0; + + rc = efx_mcdi_phy_test_alive(efx); + tests->phy_alive = rc ? -1 : 1; + + return rc; +} + +static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc = 0; + + if (efx->type->test_nvram) { + rc = efx->type->test_nvram(efx); + if (rc == -EPERM) + rc = 0; + else + tests->nvram = rc ? -1 : 1; + } + + return rc; +} + +/************************************************************************** + * + * Interrupt and event queue testing + * + **************************************************************************/ + +/* Test generation and receipt of interrupts */ +static int efx_test_interrupts(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + unsigned long timeout, wait; + int cpu; + int rc; + + netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); + tests->interrupt = -1; + + rc = efx_nic_irq_test_start(efx); + if (rc == -ENOTSUPP) { + netif_dbg(efx, drv, efx->net_dev, + "direct interrupt testing not supported\n"); + tests->interrupt = 0; + return 0; + } + + timeout = jiffies + IRQ_TIMEOUT; + wait = 1; + + /* Wait for arrival of test interrupt. */ + netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); + do { + schedule_timeout_uninterruptible(wait); + cpu = efx_nic_irq_test_irq_cpu(efx); + if (cpu >= 0) + goto success; + wait *= 2; + } while (time_before(jiffies, timeout)); + + netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); + return -ETIMEDOUT; + + success: + netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", + INT_MODE(efx), cpu); + tests->interrupt = 1; + return 0; +} + +/* Test generation and receipt of interrupting events */ +static int efx_test_eventq_irq(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + struct efx_channel *channel; + unsigned int read_ptr[EFX_MAX_CHANNELS]; + unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; + unsigned long timeout, wait; + + BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG); + + efx_for_each_channel(channel, efx) { + read_ptr[channel->channel] = channel->eventq_read_ptr; + set_bit(channel->channel, &dma_pend); + set_bit(channel->channel, &int_pend); + efx_nic_event_test_start(channel); + } + + timeout = jiffies + IRQ_TIMEOUT; + wait = 1; + + /* Wait for arrival of interrupts. NAPI processing may or may + * not complete in time, but we can cope in any case. + */ + do { + schedule_timeout_uninterruptible(wait); + + efx_for_each_channel(channel, efx) { + efx_stop_eventq(channel); + if (channel->eventq_read_ptr != + read_ptr[channel->channel]) { + set_bit(channel->channel, &napi_ran); + clear_bit(channel->channel, &dma_pend); + clear_bit(channel->channel, &int_pend); + } else { + if (efx_nic_event_present(channel)) + clear_bit(channel->channel, &dma_pend); + if (efx_nic_event_test_irq_cpu(channel) >= 0) + clear_bit(channel->channel, &int_pend); + } + efx_start_eventq(channel); + } + + wait *= 2; + } while ((dma_pend || int_pend) && time_before(jiffies, timeout)); + + efx_for_each_channel(channel, efx) { + bool dma_seen = !test_bit(channel->channel, &dma_pend); + bool int_seen = !test_bit(channel->channel, &int_pend); + + tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; + tests->eventq_int[channel->channel] = int_seen ? 1 : -1; + + if (dma_seen && int_seen) { + netif_dbg(efx, drv, efx->net_dev, + "channel %d event queue passed (with%s NAPI)\n", + channel->channel, + test_bit(channel->channel, &napi_ran) ? + "" : "out"); + } else { + /* Report failure and whether either interrupt or DMA + * worked + */ + netif_err(efx, drv, efx->net_dev, + "channel %d timed out waiting for event queue\n", + channel->channel); + if (int_seen) + netif_err(efx, drv, efx->net_dev, + "channel %d saw interrupt " + "during event queue test\n", + channel->channel); + if (dma_seen) + netif_err(efx, drv, efx->net_dev, + "channel %d event was generated, but " + "failed to trigger an interrupt\n", + channel->channel); + } + } + + return (dma_pend || int_pend) ? -ETIMEDOUT : 0; +} + +static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned flags) +{ + int rc; + + mutex_lock(&efx->mac_lock); + rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags); + mutex_unlock(&efx->mac_lock); + if (rc == -EPERM) + rc = 0; + else + netif_info(efx, drv, efx->net_dev, + "%s phy selftest\n", rc ? "Failed" : "Passed"); + + return rc; +} + +/************************************************************************** + * + * Loopback testing + * NB Only one loopback test can be executing concurrently. + * + **************************************************************************/ + +/* Loopback test RX callback + * This is called for each received packet during loopback testing. + */ +void efx_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *received; + struct efx_loopback_payload *payload; + + BUG_ON(!buf_ptr); + + /* If we are just flushing, then drop the packet */ + if ((state == NULL) || state->flush) + return; + + payload = &state->payload; + + received = (struct efx_loopback_payload *) buf_ptr; + received->ip.saddr = payload->ip.saddr; + if (state->offload_csum) + received->ip.check = payload->ip.check; + + /* Check that header exists */ + if (pkt_len < sizeof(received->header)) { + netif_err(efx, drv, efx->net_dev, + "saw runt RX packet (length %d) in %s loopback " + "test\n", pkt_len, LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that the ethernet header exists */ + if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw non-loopback RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check packet length */ + if (pkt_len != sizeof(*payload)) { + netif_err(efx, drv, efx->net_dev, + "saw incorrect RX packet length %d (wanted %d) in " + "%s loopback test\n", pkt_len, (int)sizeof(*payload), + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that IP header matches */ + if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw corrupted IP header in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that msg and padding matches */ + if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw corrupted RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that iteration matches */ + if (received->iteration != payload->iteration) { + netif_err(efx, drv, efx->net_dev, + "saw RX packet from iteration %d (wanted %d) in " + "%s loopback test\n", ntohs(received->iteration), + ntohs(payload->iteration), LOOPBACK_MODE(efx)); + goto err; + } + + /* Increase correct RX count */ + netif_vdbg(efx, drv, efx->net_dev, + "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); + + atomic_inc(&state->rx_good); + return; + + err: +#ifdef DEBUG + if (atomic_read(&state->rx_bad) == 0) { + netif_err(efx, drv, efx->net_dev, "received packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + buf_ptr, pkt_len, 0); + netif_err(efx, drv, efx->net_dev, "expected packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + &state->payload, sizeof(state->payload), 0); + } +#endif + atomic_inc(&state->rx_bad); +} + +/* Initialise an efx_selftest_state for a new iteration */ +static void efx_iterate_state(struct efx_nic *efx) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + struct net_device *net_dev = efx->net_dev; + struct efx_loopback_payload *payload = &state->payload; + + /* Initialise the layerII header */ + ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); + ether_addr_copy((u8 *)&payload->header.h_source, payload_source); + payload->header.h_proto = htons(ETH_P_IP); + + /* saddr set later and used as incrementing count */ + payload->ip.daddr = htonl(INADDR_LOOPBACK); + payload->ip.ihl = 5; + payload->ip.check = (__force __sum16) htons(0xdead); + payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); + payload->ip.version = IPVERSION; + payload->ip.protocol = IPPROTO_UDP; + + /* Initialise udp header */ + payload->udp.source = 0; + payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - + sizeof(struct iphdr)); + payload->udp.check = 0; /* checksum ignored */ + + /* Fill out payload */ + payload->iteration = htons(ntohs(payload->iteration) + 1); + memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); + + /* Fill out remaining state members */ + atomic_set(&state->rx_good, 0); + atomic_set(&state->rx_bad, 0); + smp_wmb(); +} + +static int efx_begin_loopback(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *payload; + struct sk_buff *skb; + int i; + netdev_tx_t rc; + + /* Transmit N copies of buffer */ + for (i = 0; i < state->packet_count; i++) { + /* Allocate an skb, holding an extra reference for + * transmit completion counting */ + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); + if (!skb) + return -ENOMEM; + state->skbs[i] = skb; + skb_get(skb); + + /* Copy the payload in, incrementing the source address to + * exercise the rss vectors */ + payload = skb_put(skb, sizeof(state->payload)); + memcpy(payload, &state->payload, sizeof(state->payload)); + payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); + + /* Ensure everything we've written is visible to the + * interrupt handler. */ + smp_wmb(); + + netif_tx_lock_bh(efx->net_dev); + rc = efx_enqueue_skb(tx_queue, skb); + netif_tx_unlock_bh(efx->net_dev); + + if (rc != NETDEV_TX_OK) { + netif_err(efx, drv, efx->net_dev, + "TX queue %d could not transmit packet %d of " + "%d in %s loopback test\n", tx_queue->label, + i + 1, state->packet_count, + LOOPBACK_MODE(efx)); + + /* Defer cleaning up the other skbs for the caller */ + kfree_skb(skb); + return -EPIPE; + } + } + + return 0; +} + +static int efx_poll_loopback(struct efx_nic *efx) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + + return atomic_read(&state->rx_good) == state->packet_count; +} + +static int efx_end_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + struct sk_buff *skb; + int tx_done = 0, rx_good, rx_bad; + int i, rc = 0; + + netif_tx_lock_bh(efx->net_dev); + + /* Count the number of tx completions, and decrement the refcnt. Any + * skbs not already completed will be free'd when the queue is flushed */ + for (i = 0; i < state->packet_count; i++) { + skb = state->skbs[i]; + if (skb && !skb_shared(skb)) + ++tx_done; + dev_kfree_skb(skb); + } + + netif_tx_unlock_bh(efx->net_dev); + + /* Check TX completion and received packet counts */ + rx_good = atomic_read(&state->rx_good); + rx_bad = atomic_read(&state->rx_bad); + if (tx_done != state->packet_count) { + /* Don't free the skbs; they will be picked up on TX + * overflow or channel teardown. + */ + netif_err(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "TX completion events in %s loopback test\n", + tx_queue->label, tx_done, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Allow to fall through so we see the RX errors as well */ + } + + /* We may always be up to a flush away from our desired packet total */ + if (rx_good != state->packet_count) { + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "received packets in %s loopback test\n", + tx_queue->label, rx_good, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Fall through */ + } + + /* Update loopback test structure */ + lb_tests->tx_sent[tx_queue->label] += state->packet_count; + lb_tests->tx_done[tx_queue->label] += tx_done; + lb_tests->rx_good += rx_good; + lb_tests->rx_bad += rx_bad; + + return rc; +} + +static int +efx_test_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + int i, begin_rc, end_rc; + + for (i = 0; i < 3; i++) { + /* Determine how many packets to send */ + state->packet_count = efx->txq_entries / 3; + state->packet_count = min(1 << (i << 2), state->packet_count); + state->skbs = kcalloc(state->packet_count, + sizeof(state->skbs[0]), GFP_KERNEL); + if (!state->skbs) + return -ENOMEM; + state->flush = false; + + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d (hw %d) testing %s loopback with %d packets\n", + tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + efx_iterate_state(efx); + begin_rc = efx_begin_loopback(tx_queue); + + /* This will normally complete very quickly, but be + * prepared to wait much longer. */ + msleep(1); + if (!efx_poll_loopback(efx)) { + msleep(LOOPBACK_TIMEOUT_MS); + efx_poll_loopback(efx); + } + + end_rc = efx_end_loopback(tx_queue, lb_tests); + kfree(state->skbs); + + if (begin_rc || end_rc) { + /* Wait a while to ensure there are no packets + * floating around after a failure. */ + schedule_timeout_uninterruptible(HZ / 10); + return begin_rc ? begin_rc : end_rc; + } + } + + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d passed %s loopback test with a burst length " + "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx), + state->packet_count); + + return 0; +} + +/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but + * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it + * to delay and retry. Therefore, it's safer to just poll directly. Wait + * for link up and any faults to dissipate. */ +static int efx_wait_for_link(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + int count, link_up_count = 0; + bool link_up; + + for (count = 0; count < 40; count++) { + schedule_timeout_uninterruptible(HZ / 10); + + if (efx->type->monitor != NULL) { + mutex_lock(&efx->mac_lock); + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + mutex_lock(&efx->mac_lock); + link_up = link_state->up; + if (link_up) + link_up = !efx->type->check_mac_fault(efx); + mutex_unlock(&efx->mac_lock); + + if (link_up) { + if (++link_up_count == 2) + return 0; + } else { + link_up_count = 0; + } + } + + return -ETIMEDOUT; +} + +static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int loopback_modes) +{ + enum efx_loopback_mode mode; + struct efx_loopback_state *state; + struct efx_channel *channel = + efx_get_channel(efx, efx->tx_channel_offset); + struct efx_tx_queue *tx_queue; + int rc = 0; + + /* Set the port loopback_selftest member. From this point on + * all received packets will be dropped. Mark the state as + * "flushing" so all inflight packets are dropped */ + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) + return -ENOMEM; + BUG_ON(efx->loopback_selftest); + state->flush = true; + efx->loopback_selftest = state; + + /* Test all supported loopback modes */ + for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { + if (!(loopback_modes & (1 << mode))) + continue; + + /* Move the port into the specified loopback mode. */ + state->flush = true; + mutex_lock(&efx->mac_lock); + efx->loopback_mode = mode; + rc = __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "unable to move into %s loopback\n", + LOOPBACK_MODE(efx)); + goto out; + } + + rc = efx_wait_for_link(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "loopback %s never came up\n", + LOOPBACK_MODE(efx)); + goto out; + } + + /* Test all enabled types of TX queue */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + state->offload_csum = (tx_queue->type & + EFX_TXQ_TYPE_OUTER_CSUM); + rc = efx_test_loopback(tx_queue, + &tests->loopback[mode]); + if (rc) + goto out; + } + } + + out: + /* Remove the flush. The caller will remove the loopback setting */ + state->flush = true; + efx->loopback_selftest = NULL; + wmb(); + kfree(state); + + if (rc == -EPERM) + rc = 0; + + return rc; +} + +/************************************************************************** + * + * Entry point + * + *************************************************************************/ + +int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned flags) +{ + enum efx_loopback_mode loopback_mode = efx->loopback_mode; + int phy_mode = efx->phy_mode; + int rc_test = 0, rc_reset, rc; + + efx_selftest_async_cancel(efx); + + /* Online (i.e. non-disruptive) testing + * This checks interrupt generation, event delivery and PHY presence. */ + + rc = efx_test_phy_alive(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_nvram(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_interrupts(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_eventq_irq(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + if (rc_test) + return rc_test; + + if (!(flags & ETH_TEST_FL_OFFLINE)) + return efx_test_phy(efx, tests, flags); + + /* Offline (i.e. disruptive) testing + * This checks MAC and PHY loopback on the specified port. */ + + /* Detach the device so the kernel doesn't transmit during the + * loopback test and the watchdog timeout doesn't fire. + */ + efx_device_detach_sync(efx); + + if (efx->type->test_chip) { + rc_reset = efx->type->test_chip(efx, tests); + if (rc_reset) { + netif_err(efx, hw, efx->net_dev, + "Unable to recover from chip test\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + return rc_reset; + } + + if ((tests->memory < 0 || tests->registers < 0) && !rc_test) + rc_test = -EIO; + } + + /* Ensure that the phy is powered and out of loopback + * for the bist and loopback tests */ + mutex_lock(&efx->mac_lock); + efx->phy_mode &= ~PHY_MODE_LOW_POWER; + efx->loopback_mode = LOOPBACK_NONE; + __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + rc = efx_test_phy(efx, tests, flags); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); + if (rc && !rc_test) + rc_test = rc; + + /* restore the PHY to the previous state */ + mutex_lock(&efx->mac_lock); + efx->phy_mode = phy_mode; + efx->loopback_mode = loopback_mode; + __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + efx_device_attach_if_not_resetting(efx); + + return rc_test; +} + +void efx_selftest_async_start(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_nic_event_test_start(channel); + schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); +} + +void efx_selftest_async_cancel(struct efx_nic *efx) +{ + cancel_delayed_work_sync(&efx->selftest_work); +} + +static void efx_selftest_async_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + selftest_work.work); + struct efx_channel *channel; + int cpu; + + efx_for_each_channel(channel, efx) { + cpu = efx_nic_event_test_irq_cpu(channel); + if (cpu < 0) + netif_err(efx, ifup, efx->net_dev, + "channel %d failed to trigger an interrupt\n", + channel->channel); + else + netif_dbg(efx, ifup, efx->net_dev, + "channel %d triggered interrupt on CPU %d\n", + channel->channel, cpu); + } +} + +void efx_selftest_async_init(struct efx_nic *efx) +{ + INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); +} diff --git a/drivers/net/ethernet/sfc/siena/selftest.h b/drivers/net/ethernet/sfc/siena/selftest.h new file mode 100644 index 000000000000..a23f085bf298 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/selftest.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + */ + +#ifndef EFX_SELFTEST_H +#define EFX_SELFTEST_H + +#include "net_driver.h" + +/* + * Self tests + */ + +struct efx_loopback_self_tests { + int tx_sent[EFX_MAX_TXQ_PER_CHANNEL]; + int tx_done[EFX_MAX_TXQ_PER_CHANNEL]; + int rx_good; + int rx_bad; +}; + +#define EFX_MAX_PHY_TESTS 20 + +/* Efx self test results + * For fields which are not counters, 1 indicates success and -1 + * indicates failure; 0 indicates test could not be run. + */ +struct efx_self_tests { + /* online tests */ + int phy_alive; + int nvram; + int interrupt; + int eventq_dma[EFX_MAX_CHANNELS]; + int eventq_int[EFX_MAX_CHANNELS]; + /* offline tests */ + int memory; + int registers; + int phy_ext[EFX_MAX_PHY_TESTS]; + struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; +}; + +void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr, + int pkt_len); +int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned flags); +void efx_selftest_async_init(struct efx_nic *efx); +void efx_selftest_async_start(struct efx_nic *efx); +void efx_selftest_async_cancel(struct efx_nic *efx); + +#endif /* EFX_SELFTEST_H */ diff --git a/drivers/net/ethernet/sfc/siena/sriov.c b/drivers/net/ethernet/sfc/siena/sriov.c new file mode 100644 index 000000000000..3f241e6c881a --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/sriov.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2015 Solarflare Communications Inc. + */ +#include +#include "net_driver.h" +#include "nic.h" +#include "sriov.h" + +int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_mac) + return efx->type->sriov_set_vf_mac(efx, vf_i, mac); + else + return -EOPNOTSUPP; +} + +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_vlan) { + if ((vlan & ~VLAN_VID_MASK) || + (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos); + } else { + return -EOPNOTSUPP; + } +} + +int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_spoofchk) + return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk); + else + return -EOPNOTSUPP; +} + +int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_get_vf_config) + return efx->type->sriov_get_vf_config(efx, vf_i, ivi); + else + return -EOPNOTSUPP; +} + +int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, + int link_state) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_link_state) + return efx->type->sriov_set_vf_link_state(efx, vf_i, + link_state); + else + return -EOPNOTSUPP; +} diff --git a/drivers/net/ethernet/sfc/siena/sriov.h b/drivers/net/ethernet/sfc/siena/sriov.h new file mode 100644 index 000000000000..747707bee483 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/sriov.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2015 Solarflare Communications Inc. + */ + +#ifndef EFX_SRIOV_H +#define EFX_SRIOV_H + +#include "net_driver.h" + +#ifdef CONFIG_SFC_SRIOV + +int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac); +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos, __be16 vlan_proto); +int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk); +int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi); +int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, + int link_state); +#endif /* CONFIG_SFC_SRIOV */ + +#endif /* EFX_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c new file mode 100644 index 000000000000..138bca611341 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -0,0 +1,643 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "io.h" +#include "nic.h" +#include "tx.h" +#include "tx_common.h" +#include "workarounds.h" +#include "ef10_regs.h" + +#ifdef EFX_USE_PIO + +#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) +unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; + +#endif /* EFX_USE_PIO */ + +static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer) +{ + unsigned int index = efx_tx_queue_get_insert_index(tx_queue); + struct efx_buffer *page_buf = + &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; + unsigned int offset = + ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1); + + if (unlikely(!page_buf->addr) && + efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, + GFP_ATOMIC)) + return NULL; + buffer->dma_addr = page_buf->dma_addr + offset; + buffer->unmap_len = 0; + return (u8 *)page_buf->addr + offset; +} + +u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, size_t len) +{ + if (len > EFX_TX_CB_SIZE) + return NULL; + return efx_tx_get_copy_buffer(tx_queue, buffer); +} + +static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) +{ + /* We need to consider all queues that the net core sees as one */ + struct efx_nic *efx = txq1->efx; + struct efx_tx_queue *txq2; + unsigned int fill_level; + + fill_level = efx_channel_tx_old_fill_level(txq1->channel); + if (likely(fill_level < efx->txq_stop_thresh)) + return; + + /* We used the stale old_read_count above, which gives us a + * pessimistic estimate of the fill level (which may even + * validly be >= efx->txq_entries). Now try again using + * read_count (more likely to be a cache miss). + * + * If we read read_count and then conditionally stop the + * queue, it is possible for the completion path to race with + * us and complete all outstanding descriptors in the middle, + * after which there will be no more completions to wake it. + * Therefore we stop the queue first, then read read_count + * (with a memory barrier to ensure the ordering), then + * restart the queue if the fill level turns out to be low + * enough. + */ + netif_tx_stop_queue(txq1->core_txq); + smp_mb(); + efx_for_each_channel_tx_queue(txq2, txq1->channel) + txq2->old_read_count = READ_ONCE(txq2->read_count); + + fill_level = efx_channel_tx_old_fill_level(txq1->channel); + EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); + if (likely(fill_level < efx->txq_stop_thresh)) { + smp_mb(); + if (likely(!efx->loopback_selftest)) + netif_tx_start_queue(txq1->core_txq); + } +} + +static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + unsigned int copy_len = skb->len; + struct efx_tx_buffer *buffer; + u8 *copy_buffer; + int rc; + + EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE); + + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + + copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); + if (unlikely(!copy_buffer)) + return -ENOMEM; + + rc = skb_copy_bits(skb, 0, copy_buffer, copy_len); + EFX_WARN_ON_PARANOID(rc); + buffer->len = copy_len; + + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB; + + ++tx_queue->insert_count; + return rc; +} + +#ifdef EFX_USE_PIO + +struct efx_short_copy_buffer { + int used; + u8 buf[L1_CACHE_BYTES]; +}; + +/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. + * Advances piobuf pointer. Leaves additional data in the copy buffer. + */ +static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, + u8 *data, int len, + struct efx_short_copy_buffer *copy_buf) +{ + int block_len = len & ~(sizeof(copy_buf->buf) - 1); + + __iowrite64_copy(*piobuf, data, block_len >> 3); + *piobuf += block_len; + len -= block_len; + + if (len) { + data += block_len; + BUG_ON(copy_buf->used); + BUG_ON(len > sizeof(copy_buf->buf)); + memcpy(copy_buf->buf, data, len); + copy_buf->used = len; + } +} + +/* Copy to PIO, respecting dword alignment, popping data from copy buffer first. + * Advances piobuf pointer. Leaves additional data in the copy buffer. + */ +static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, + u8 *data, int len, + struct efx_short_copy_buffer *copy_buf) +{ + if (copy_buf->used) { + /* if the copy buffer is partially full, fill it up and write */ + int copy_to_buf = + min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); + + memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); + copy_buf->used += copy_to_buf; + + /* if we didn't fill it up then we're done for now */ + if (copy_buf->used < sizeof(copy_buf->buf)) + return; + + __iowrite64_copy(*piobuf, copy_buf->buf, + sizeof(copy_buf->buf) >> 3); + *piobuf += sizeof(copy_buf->buf); + data += copy_to_buf; + len -= copy_to_buf; + copy_buf->used = 0; + } + + efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); +} + +static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, + struct efx_short_copy_buffer *copy_buf) +{ + /* if there's anything in it, write the whole buffer, including junk */ + if (copy_buf->used) + __iowrite64_copy(piobuf, copy_buf->buf, + sizeof(copy_buf->buf) >> 3); +} + +/* Traverse skb structure and copy fragments in to PIO buffer. + * Advances piobuf pointer. + */ +static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, + u8 __iomem **piobuf, + struct efx_short_copy_buffer *copy_buf) +{ + int i; + + efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), + copy_buf); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + + vaddr = kmap_atomic(skb_frag_page(f)); + + efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), + skb_frag_size(f), copy_buf); + kunmap_atomic(vaddr); + } + + EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list); +} + +static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + struct efx_tx_buffer *buffer = + efx_tx_queue_get_insert_buffer(tx_queue); + u8 __iomem *piobuf = tx_queue->piobuf; + + /* Copy to PIO buffer. Ensure the writes are padded to the end + * of a cache line, as this is required for write-combining to be + * effective on at least x86. + */ + + if (skb_shinfo(skb)->nr_frags) { + /* The size of the copy buffer will ensure all writes + * are the size of a cache line. + */ + struct efx_short_copy_buffer copy_buf; + + copy_buf.used = 0; + + efx_skb_copy_bits_to_pio(tx_queue->efx, skb, + &piobuf, ©_buf); + efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); + } else { + /* Pad the write to the size of a cache line. + * We can do this because we know the skb_shared_info struct is + * after the source, and the destination buffer is big enough. + */ + BUILD_BUG_ON(L1_CACHE_BYTES > + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + __iowrite64_copy(tx_queue->piobuf, skb->data, + ALIGN(skb->len, L1_CACHE_BYTES) >> 3); + } + + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION; + + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, + ESF_DZ_TX_PIO_CONT, 0, + ESF_DZ_TX_PIO_BYTE_CNT, skb->len, + ESF_DZ_TX_PIO_BUF_ADDR, + tx_queue->piobuf_offset); + ++tx_queue->insert_count; + return 0; +} + +/* Decide whether we can use TX PIO, ie. write packet data directly into + * a buffer on the device. This can reduce latency at the expense of + * throughput, so we only do this if both hardware and software TX rings + * are empty, including all queues for the channel. This also ensures that + * only one packet at a time can be using the PIO buffer. If the xmit_more + * flag is set then we don't use this - there'll be another packet along + * shortly and we want to hold off the doorbell. + */ +static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue) +{ + struct efx_channel *channel = tx_queue->channel; + + if (!tx_queue->piobuf) + return false; + + EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors); + + efx_for_each_channel_tx_queue(tx_queue, channel) + if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count)) + return false; + + return true; +} +#endif /* EFX_USE_PIO */ + +/* Send any pending traffic for a channel. xmit_more is shared across all + * queues for a channel, so we must check all of them. + */ +static void efx_tx_send_pending(struct efx_channel *channel) +{ + struct efx_tx_queue *q; + + efx_for_each_channel_tx_queue(q, channel) { + if (q->xmit_pending) + efx_nic_push_buffers(q); + } +} + +/* + * Add a socket buffer to a TX queue + * + * This maps all fragments of a socket buffer for DMA and adds them to + * the TX queue. The queue's insert pointer will be incremented by + * the number of fragments in the socket buffer. + * + * If any DMA mapping fails, any mapped fragments will be unmapped, + * the queue's insert pointer will be restored to its original value. + * + * This function is split out from efx_hard_start_xmit to allow the + * loopback test to direct packets via specific TX queues. + * + * Returns NETDEV_TX_OK. + * You must hold netif_tx_lock() to call this function. + */ +netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + unsigned int old_insert_count = tx_queue->insert_count; + bool xmit_more = netdev_xmit_more(); + bool data_mapped = false; + unsigned int segments; + unsigned int skb_len; + int rc; + + skb_len = skb->len; + segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; + if (segments == 1) + segments = 0; /* Don't use TSO for a single segment. */ + + /* Handle TSO first - it's *possible* (although unlikely) that we might + * be passed a packet to segment that's smaller than the copybreak/PIO + * size limit. + */ + if (segments) { + switch (tx_queue->tso_version) { + case 1: + rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped); + break; + case 2: + rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped); + break; + case 0: /* No TSO on this queue, SW fallback needed */ + default: + rc = -EINVAL; + break; + } + if (rc == -EINVAL) { + rc = efx_tx_tso_fallback(tx_queue, skb); + tx_queue->tso_fallbacks++; + if (rc == 0) + return 0; + } + if (rc) + goto err; +#ifdef EFX_USE_PIO + } else if (skb_len <= efx_piobuf_size && !xmit_more && + efx_tx_may_pio(tx_queue)) { + /* Use PIO for short packets with an empty queue. */ + if (efx_enqueue_skb_pio(tx_queue, skb)) + goto err; + tx_queue->pio_packets++; + data_mapped = true; +#endif + } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { + /* Pad short packets or coalesce short fragmented packets. */ + if (efx_enqueue_skb_copy(tx_queue, skb)) + goto err; + tx_queue->cb_packets++; + data_mapped = true; + } + + /* Map for DMA and create descriptors if we haven't done so already. */ + if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) + goto err; + + efx_tx_maybe_stop_queue(tx_queue); + + tx_queue->xmit_pending = true; + + /* Pass off to hardware */ + if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) + efx_tx_send_pending(tx_queue->channel); + + if (segments) { + tx_queue->tso_bursts++; + tx_queue->tso_packets += segments; + tx_queue->tx_packets += segments; + } else { + tx_queue->tx_packets++; + } + + return NETDEV_TX_OK; + + +err: + efx_enqueue_unwind(tx_queue, old_insert_count); + dev_kfree_skb_any(skb); + + /* If we're not expecting another transmit and we had something to push + * on this queue or a partner queue then we need to push here to get the + * previous packets out. + */ + if (!xmit_more) + efx_tx_send_pending(tx_queue->channel); + + return NETDEV_TX_OK; +} + +/* Transmit a packet from an XDP buffer + * + * Returns number of packets sent on success, error code otherwise. + * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC + * (for XDP redirect). + */ +int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, + bool flush) +{ + struct efx_tx_buffer *tx_buffer; + struct efx_tx_queue *tx_queue; + struct xdp_frame *xdpf; + dma_addr_t dma_addr; + unsigned int len; + int space; + int cpu; + int i = 0; + + if (unlikely(n && !xdpfs)) + return -EINVAL; + if (unlikely(!n)) + return 0; + + cpu = raw_smp_processor_id(); + if (unlikely(cpu >= efx->xdp_tx_queue_count)) + return -EINVAL; + + tx_queue = efx->xdp_tx_queues[cpu]; + if (unlikely(!tx_queue)) + return -EINVAL; + + if (!tx_queue->initialised) + return -EINVAL; + + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); + + /* If we're borrowing net stack queues we have to handle stop-restart + * or we might block the queue and it will be considered as frozen + */ + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { + if (netif_tx_queue_stopped(tx_queue->core_txq)) + goto unlock; + efx_tx_maybe_stop_queue(tx_queue); + } + + /* Check for available space. We should never need multiple + * descriptors per frame. + */ + space = efx->txq_entries + + tx_queue->read_count - tx_queue->insert_count; + + for (i = 0; i < n; i++) { + xdpf = xdpfs[i]; + + if (i >= space) + break; + + /* We'll want a descriptor for this tx. */ + prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); + + len = xdpf->len; + + /* Map for DMA. */ + dma_addr = dma_map_single(&efx->pci_dev->dev, + xdpf->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&efx->pci_dev->dev, dma_addr)) + break; + + /* Create descriptor and set up for unmapping DMA. */ + tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); + tx_buffer->xdpf = xdpf; + tx_buffer->flags = EFX_TX_BUF_XDP | + EFX_TX_BUF_MAP_SINGLE; + tx_buffer->dma_offset = 0; + tx_buffer->unmap_len = len; + tx_queue->tx_packets++; + } + + /* Pass mapped frames to hardware. */ + if (flush && i > 0) + efx_nic_push_buffers(tx_queue); + +unlock: + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); + + return i == 0 ? -EIO : i; +} + +/* Initiate a packet transmission. We use one channel per CPU + * (sharing when we have more CPUs than channels). + * + * Context: non-blocking. + * Should always return NETDEV_TX_OK and consume the skb. + */ +netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_tx_queue *tx_queue; + unsigned index, type; + + EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); + + index = skb_get_queue_mapping(skb); + type = efx_tx_csum_type_skb(skb); + if (index >= efx->n_tx_channels) { + index -= efx->n_tx_channels; + type |= EFX_TXQ_TYPE_HIGHPRI; + } + + /* PTP "event" packet */ + if (unlikely(efx_xmit_with_hwtstamp(skb)) && + ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || + unlikely(efx_ptp_is_ptp_tx(efx, skb)))) { + /* There may be existing transmits on the channel that are + * waiting for this packet to trigger the doorbell write. + * We need to send the packets at this point. + */ + efx_tx_send_pending(efx_get_tx_channel(efx, index)); + return efx_ptp_tx(efx, skb); + } + + tx_queue = efx_get_tx_queue(efx, index, type); + if (WARN_ON_ONCE(!tx_queue)) { + /* We don't have a TXQ of the right type. + * This should never happen, as we don't advertise offload + * features unless we can support them. + */ + dev_kfree_skb_any(skb); + /* If we're not expecting another transmit and we had something to push + * on this queue or a partner queue then we need to push here to get the + * previous packets out. + */ + if (!netdev_xmit_more()) + efx_tx_send_pending(tx_queue->channel); + return NETDEV_TX_OK; + } + + return __efx_enqueue_skb(tx_queue, skb); +} + +void efx_xmit_done_single(struct efx_tx_queue *tx_queue) +{ + unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int read_ptr; + bool finished = false; + + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (!finished) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!efx_tx_buffer_in_use(buffer)) { + struct efx_nic *efx = tx_queue->efx; + + netif_err(efx, hw, efx->net_dev, + "TX queue %d spurious single TX completion\n", + tx_queue->queue); + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + /* Need to check the flag before dequeueing. */ + if (buffer->flags & EFX_TX_BUF_SKB) + finished = true; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } + + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + EFX_WARN_ON_PARANOID(pkts_compl != 1); + + efx_xmit_done_check_empty(tx_queue); +} + +void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + + /* Must be inverse of queue lookup in efx_hard_start_xmit() */ + tx_queue->core_txq = + netdev_get_tx_queue(efx->net_dev, + tx_queue->channel->channel + + ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? + efx->n_tx_channels : 0)); +} + +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; + unsigned tc, num_tc; + + if (type != TC_SETUP_QDISC_MQPRIO) + return -EOPNOTSUPP; + + /* Only Siena supported highpri queues */ + if (efx_nic_rev(efx) > EFX_REV_SIENA_A0) + return -EOPNOTSUPP; + + num_tc = mqprio->num_tc; + + if (num_tc > EFX_MAX_TX_TC) + return -EINVAL; + + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + if (num_tc == net_dev->num_tc) + return 0; + + for (tc = 0; tc < num_tc; tc++) { + net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; + net_dev->tc_to_txq[tc].count = efx->n_tx_channels; + } + + net_dev->num_tc = num_tc; + + return netif_set_real_num_tx_queues(net_dev, + max_t(int, num_tc, 1) * + efx->n_tx_channels); +} diff --git a/drivers/net/ethernet/sfc/siena/tx.h b/drivers/net/ethernet/sfc/siena/tx.h new file mode 100644 index 000000000000..f2c4d2f89919 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/tx.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2015 Solarflare Communications Inc. + */ + +#ifndef EFX_TX_H +#define EFX_TX_H + +#include + +/* Driver internal tx-path related declarations. */ + +unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned int len); + +u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, size_t len); + +/* What TXQ type will satisfy the checksum offloads required for this skb? */ +static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; /* no checksum offload */ + + if (skb->encapsulation && + skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) { + /* we only advertise features for IPv4 and IPv6 checksums on + * encapsulated packets, so if the checksum is for the inner + * packet, it must be one of them; no further checking required. + */ + + /* Do we also need to offload the outer header checksum? */ + if (skb_shinfo(skb)->gso_segs > 1 && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM; + return EFX_TXQ_TYPE_INNER_CSUM; + } + + /* similarly, we only advertise features for IPv4 and IPv6 checksums, + * so it must be one of them. No need for further checks. + */ + return EFX_TXQ_TYPE_OUTER_CSUM; +} +#endif /* EFX_TX_H */ diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c new file mode 100644 index 000000000000..9bc8281b7f5b --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/tx_common.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "efx.h" +#include "nic_common.h" +#include "tx_common.h" + +static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) +{ + return DIV_ROUND_UP(tx_queue->ptr_mask + 1, + PAGE_SIZE >> EFX_TX_CB_ORDER); +} + +int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int entries; + int rc; + + /* Create the smallest power-of-two aligned ring */ + entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); + EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); + tx_queue->ptr_mask = entries - 1; + + netif_dbg(efx, probe, efx->net_dev, + "creating TX queue %d size %#x mask %#x\n", + tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); + + /* Allocate software ring */ + tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), + GFP_KERNEL); + if (!tx_queue->buffer) + return -ENOMEM; + + tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), + sizeof(tx_queue->cb_page[0]), GFP_KERNEL); + if (!tx_queue->cb_page) { + rc = -ENOMEM; + goto fail1; + } + + /* Allocate hardware ring, determine TXQ type */ + rc = efx_nic_probe_tx(tx_queue); + if (rc) + goto fail2; + + tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; + return 0; + +fail2: + kfree(tx_queue->cb_page); + tx_queue->cb_page = NULL; +fail1: + kfree(tx_queue->buffer); + tx_queue->buffer = NULL; + return rc; +} + +void efx_init_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + + netif_dbg(efx, drv, efx->net_dev, + "initialising TX queue %d\n", tx_queue->queue); + + tx_queue->insert_count = 0; + tx_queue->notify_count = 0; + tx_queue->write_count = 0; + tx_queue->packet_write_count = 0; + tx_queue->old_write_count = 0; + tx_queue->read_count = 0; + tx_queue->old_read_count = 0; + tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; + tx_queue->xmit_pending = false; + tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && + tx_queue->channel == efx_ptp_channel(efx)); + tx_queue->completed_timestamp_major = 0; + tx_queue->completed_timestamp_minor = 0; + + tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); + tx_queue->tso_version = 0; + + /* Set up TX descriptor ring */ + efx_nic_init_tx(tx_queue); + + tx_queue->initialised = true; +} + +void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "shutting down TX queue %d\n", tx_queue->queue); + + tx_queue->initialised = false; + + if (!tx_queue->buffer) + return; + + /* Free any buffers left in the ring */ + while (tx_queue->read_count != tx_queue->write_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; + + buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + } + tx_queue->xmit_pending = false; + netdev_tx_reset_queue(tx_queue->core_txq); +} + +void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) +{ + int i; + + if (!tx_queue->buffer) + return; + + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "destroying TX queue %d\n", tx_queue->queue); + efx_nic_remove_tx(tx_queue); + + if (tx_queue->cb_page) { + for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) + efx_nic_free_buffer(tx_queue->efx, + &tx_queue->cb_page[i]); + kfree(tx_queue->cb_page); + tx_queue->cb_page = NULL; + } + + kfree(tx_queue->buffer); + tx_queue->buffer = NULL; + tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; +} + +void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, + unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + if (buffer->unmap_len) { + struct device *dma_dev = &tx_queue->efx->pci_dev->dev; + dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; + + if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) + dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, + DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, + DMA_TO_DEVICE); + buffer->unmap_len = 0; + } + + if (buffer->flags & EFX_TX_BUF_SKB) { + struct sk_buff *skb = (struct sk_buff *)buffer->skb; + + EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); + (*pkts_compl)++; + (*bytes_compl) += skb->len; + if (tx_queue->timestamping && + (tx_queue->completed_timestamp_major || + tx_queue->completed_timestamp_minor)) { + struct skb_shared_hwtstamps hwtstamp; + + hwtstamp.hwtstamp = + efx_ptp_nic_to_kernel_time(tx_queue); + skb_tstamp_tx(skb, &hwtstamp); + + tx_queue->completed_timestamp_major = 0; + tx_queue->completed_timestamp_minor = 0; + } + dev_consume_skb_any((struct sk_buff *)buffer->skb); + netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, + "TX queue %d transmission id %x complete\n", + tx_queue->queue, tx_queue->read_count); + } else if (buffer->flags & EFX_TX_BUF_XDP) { + xdp_return_frame_rx_napi(buffer->xdpf); + } + + buffer->len = 0; + buffer->flags = 0; +} + +/* Remove packets from the TX queue + * + * This removes packets from the TX queue, up to and including the + * specified index. + */ +static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, + unsigned int index, + unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int stop_index, read_ptr; + + stop_index = (index + 1) & tx_queue->ptr_mask; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (read_ptr != stop_index) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!efx_tx_buffer_in_use(buffer)) { + netif_err(efx, tx_err, efx->net_dev, + "TX queue %d spurious TX completion id %d\n", + tx_queue->queue, read_ptr); + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } +} + +void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) +{ + if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); + if (tx_queue->read_count == tx_queue->old_write_count) { + /* Ensure that read_count is flushed. */ + smp_mb(); + tx_queue->empty_read_count = + tx_queue->read_count | EFX_EMPTY_COUNT_VALID; + } + } +} + +void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) +{ + unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; + struct efx_nic *efx = tx_queue->efx; + + EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); + + efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; + + if (pkts_compl > 1) + ++tx_queue->merge_events; + + /* See if we need to restart the netif queue. This memory + * barrier ensures that we write read_count (inside + * efx_dequeue_buffers()) before reading the queue status. + */ + smp_mb(); + if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && + likely(efx->port_enabled) && + likely(netif_device_present(efx->net_dev))) { + fill_level = efx_channel_tx_fill_level(tx_queue->channel); + if (fill_level <= efx->txq_wake_thresh) + netif_tx_wake_queue(tx_queue->core_txq); + } + + efx_xmit_done_check_empty(tx_queue); +} + +/* Remove buffers put into a tx_queue for the current packet. + * None of the buffers must have an skb attached. + */ +void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count) +{ + struct efx_tx_buffer *buffer; + unsigned int bytes_compl = 0; + unsigned int pkts_compl = 0; + + /* Work backwards until we hit the original insert pointer value */ + while (tx_queue->insert_count != insert_count) { + --tx_queue->insert_count; + buffer = __efx_tx_queue_get_insert_buffer(tx_queue); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + } +} + +struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, size_t len) +{ + const struct efx_nic_type *nic_type = tx_queue->efx->type; + struct efx_tx_buffer *buffer; + unsigned int dma_len; + + /* Map the fragment taking account of NIC-dependent DMA limits. */ + do { + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + + if (nic_type->tx_limit_len) + dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); + else + dma_len = len; + + buffer->len = dma_len; + buffer->dma_addr = dma_addr; + buffer->flags = EFX_TX_BUF_CONT; + len -= dma_len; + dma_addr += dma_len; + ++tx_queue->insert_count; + } while (len); + + return buffer; +} + +int efx_tx_tso_header_length(struct sk_buff *skb) +{ + size_t header_len; + + if (skb->encapsulation) + header_len = skb_inner_transport_header(skb) - + skb->data + + (inner_tcp_hdr(skb)->doff << 2u); + else + header_len = skb_transport_header(skb) - skb->data + + (tcp_hdr(skb)->doff << 2u); + return header_len; +} + +/* Map all data from an SKB for DMA and create descriptors on the queue. */ +int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + unsigned int segment_count) +{ + struct efx_nic *efx = tx_queue->efx; + struct device *dma_dev = &efx->pci_dev->dev; + unsigned int frag_index, nr_frags; + dma_addr_t dma_addr, unmap_addr; + unsigned short dma_flags; + size_t len, unmap_len; + + nr_frags = skb_shinfo(skb)->nr_frags; + frag_index = 0; + + /* Map header data. */ + len = skb_headlen(skb); + dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); + dma_flags = EFX_TX_BUF_MAP_SINGLE; + unmap_len = len; + unmap_addr = dma_addr; + + if (unlikely(dma_mapping_error(dma_dev, dma_addr))) + return -EIO; + + if (segment_count) { + /* For TSO we need to put the header in to a separate + * descriptor. Map this separately if necessary. + */ + size_t header_len = efx_tx_tso_header_length(skb); + + if (header_len != len) { + tx_queue->tso_long_headers++; + efx_tx_map_chunk(tx_queue, dma_addr, header_len); + len -= header_len; + dma_addr += header_len; + } + } + + /* Add descriptors for each fragment. */ + do { + struct efx_tx_buffer *buffer; + skb_frag_t *fragment; + + buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); + + /* The final descriptor for a fragment is responsible for + * unmapping the whole fragment. + */ + buffer->flags = EFX_TX_BUF_CONT | dma_flags; + buffer->unmap_len = unmap_len; + buffer->dma_offset = buffer->dma_addr - unmap_addr; + + if (frag_index >= nr_frags) { + /* Store SKB details with the final buffer for + * the completion. + */ + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB | dma_flags; + return 0; + } + + /* Move on to the next fragment. */ + fragment = &skb_shinfo(skb)->frags[frag_index++]; + len = skb_frag_size(fragment); + dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, + DMA_TO_DEVICE); + dma_flags = 0; + unmap_len = len; + unmap_addr = dma_addr; + + if (unlikely(dma_mapping_error(dma_dev, dma_addr))) + return -EIO; + } while (1); +} + +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for option descriptors */ + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + +/* + * Fallback to software TSO. + * + * This is used if we are unable to send a GSO packet through hardware TSO. + * This should only ever happen due to per-queue restrictions - unsupported + * packets should first be filtered by the feature flags. + * + * Returns 0 on success, error code otherwise. + */ +int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + struct sk_buff *segments, *next; + + segments = skb_gso_segment(skb, 0); + if (IS_ERR(segments)) + return PTR_ERR(segments); + + dev_consume_skb_any(skb); + + skb_list_walk_safe(segments, skb, next) { + skb_mark_not_on_list(skb); + efx_enqueue_skb(tx_queue, skb); + } + + return 0; +} diff --git a/drivers/net/ethernet/sfc/siena/tx_common.h b/drivers/net/ethernet/sfc/siena/tx_common.h new file mode 100644 index 000000000000..bbab7f248250 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/tx_common.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_TX_COMMON_H +#define EFX_TX_COMMON_H + +int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); +void efx_init_tx_queue(struct efx_tx_queue *tx_queue); +void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); +void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); + +void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, + unsigned int *pkts_compl, + unsigned int *bytes_compl); + +static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) +{ + return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION); +} + +void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); +void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); + +void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count); + +struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, size_t len); +int efx_tx_tso_header_length(struct sk_buff *skb); +int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + unsigned int segment_count); + +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); +int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + +extern bool efx_separate_tx_channels; +#endif diff --git a/drivers/net/ethernet/sfc/siena/vfdi.h b/drivers/net/ethernet/sfc/siena/vfdi.h new file mode 100644 index 000000000000..480b872eb4d1 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/vfdi.h @@ -0,0 +1,252 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2010-2012 Solarflare Communications Inc. + */ +#ifndef _VFDI_H +#define _VFDI_H + +/** + * DOC: Virtual Function Driver Interface + * + * This file contains software structures used to form a two way + * communication channel between the VF driver and the PF driver, + * named Virtual Function Driver Interface (VFDI). + * + * For the purposes of VFDI, a page is a memory region with size and + * alignment of 4K. All addresses are DMA addresses to be used within + * the domain of the relevant VF. + * + * The only hardware-defined channels for a VF driver to communicate + * with the PF driver are the event mailboxes (%FR_CZ_USR_EV + * registers). Writing to these registers generates an event with + * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox + * and USER_EV_REG_VALUE set to the value written. The PF driver may + * direct or disable delivery of these events by setting + * %FR_CZ_USR_EV_CFG. + * + * The PF driver can send arbitrary events to arbitrary event queues. + * However, for consistency, VFDI events from the PF are defined to + * follow the same form and be sent to the first event queue assigned + * to the VF while that queue is enabled by the VF driver. + * + * The general form of the variable bits of VFDI events is: + * + * 0 16 24 31 + * | DATA | TYPE | SEQ | + * + * SEQ is a sequence number which should be incremented by 1 (modulo + * 256) for each event. The sequence numbers used in each direction + * are independent. + * + * The VF submits requests of type &struct vfdi_req by sending the + * address of the request (ADDR) in a series of 4 events: + * + * 0 16 24 31 + * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ | + * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 | + * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 | + * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 | + * + * The address must be page-aligned. After receiving such a valid + * series of events, the PF driver will attempt to read the request + * and write a response to the same address. In case of an invalid + * sequence of events or a DMA error, there will be no response. + * + * The VF driver may request that the PF driver writes status + * information into its domain asynchronously. After writing the + * status, the PF driver will send an event of the form: + * + * 0 16 24 31 + * | reserved | VFDI_EV_TYPE_STATUS | SEQ | + * + * In case the VF must be reset for any reason, the PF driver will + * send an event of the form: + * + * 0 16 24 31 + * | reserved | VFDI_EV_TYPE_RESET | SEQ | + * + * It is then the responsibility of the VF driver to request + * reinitialisation of its queues. + */ +#define VFDI_EV_SEQ_LBN 24 +#define VFDI_EV_SEQ_WIDTH 8 +#define VFDI_EV_TYPE_LBN 16 +#define VFDI_EV_TYPE_WIDTH 8 +#define VFDI_EV_TYPE_REQ_WORD0 0 +#define VFDI_EV_TYPE_REQ_WORD1 1 +#define VFDI_EV_TYPE_REQ_WORD2 2 +#define VFDI_EV_TYPE_REQ_WORD3 3 +#define VFDI_EV_TYPE_STATUS 4 +#define VFDI_EV_TYPE_RESET 5 +#define VFDI_EV_DATA_LBN 0 +#define VFDI_EV_DATA_WIDTH 16 + +struct vfdi_endpoint { + u8 mac_addr[ETH_ALEN]; + __be16 tci; +}; + +/** + * enum vfdi_op - VFDI operation enumeration + * @VFDI_OP_RESPONSE: Indicates a response to the request. + * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ. + * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ. + * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ. + * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then + * finalize the SRAM entries. + * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ. + * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters. + * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates + * from PF and write the initial status. + * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status + * updates from PF. + */ +enum vfdi_op { + VFDI_OP_RESPONSE = 0, + VFDI_OP_INIT_EVQ = 1, + VFDI_OP_INIT_RXQ = 2, + VFDI_OP_INIT_TXQ = 3, + VFDI_OP_FINI_ALL_QUEUES = 4, + VFDI_OP_INSERT_FILTER = 5, + VFDI_OP_REMOVE_ALL_FILTERS = 6, + VFDI_OP_SET_STATUS_PAGE = 7, + VFDI_OP_CLEAR_STATUS_PAGE = 8, + VFDI_OP_LIMIT, +}; + +/* Response codes for VFDI operations. Other values may be used in future. */ +#define VFDI_RC_SUCCESS 0 +#define VFDI_RC_ENOMEM (-12) +#define VFDI_RC_EINVAL (-22) +#define VFDI_RC_EOPNOTSUPP (-95) +#define VFDI_RC_ETIMEDOUT (-110) + +/** + * struct vfdi_req - Request from VF driver to PF driver + * @op: Operation code or response indicator, taken from &enum vfdi_op. + * @rc: Response code. Set to 0 on success or a negative error code on failure. + * @u.init_evq.index: Index of event queue to create. + * @u.init_evq.buf_count: Number of 4k buffers backing event queue. + * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA + * address of each page backing the event queue. + * @u.init_rxq.index: Index of receive queue to create. + * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue. + * @u.init_rxq.evq: Instance of event queue to target receive events at. + * @u.init_rxq.label: Label used in receive events. + * @u.init_rxq.flags: Unused. + * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA + * address of each page backing the receive queue. + * @u.init_txq.index: Index of transmit queue to create. + * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue. + * @u.init_txq.evq: Instance of event queue to target transmit completion + * events at. + * @u.init_txq.label: Label used in transmit completion events. + * @u.init_txq.flags: Checksum offload flags. + * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA + * address of each page backing the transmit queue. + * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting + * all traffic at this receive queue. + * @u.mac_filter.flags: MAC filter flags. + * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status. + * This address must be page-aligned and the PF may write up to a + * whole page (allowing for extension of the structure). + * @u.set_status_page.peer_page_count: Number of additional pages the VF + * has provided into which peer addresses may be DMAd. + * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages. + * If the number of peers exceeds 256, then the VF must provide + * additional pages in this array. The PF will then DMA up to + * 512 vfdi_endpoint structures into each page. These addresses + * must be page-aligned. + */ +struct vfdi_req { + u32 op; + u32 reserved1; + s32 rc; + u32 reserved2; + union { + struct { + u32 index; + u32 buf_count; + u64 addr[]; + } init_evq; + struct { + u32 index; + u32 buf_count; + u32 evq; + u32 label; + u32 flags; +#define VFDI_RXQ_FLAG_SCATTER_EN 1 + u32 reserved; + u64 addr[]; + } init_rxq; + struct { + u32 index; + u32 buf_count; + u32 evq; + u32 label; + u32 flags; +#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1 +#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2 + u32 reserved; + u64 addr[]; + } init_txq; + struct { + u32 rxq; + u32 flags; +#define VFDI_MAC_FILTER_FLAG_RSS 1 +#define VFDI_MAC_FILTER_FLAG_SCATTER 2 + } mac_filter; + struct { + u64 dma_addr; + u64 peer_page_count; + u64 peer_page_addr[]; + } set_status_page; + } u; +}; + +/** + * struct vfdi_status - Status provided by PF driver to VF driver + * @generation_start: A generation count DMA'd to VF *before* the + * rest of the structure. + * @generation_end: A generation count DMA'd to VF *after* the + * rest of the structure. + * @version: Version of this structure; currently set to 1. Later + * versions must either be layout-compatible or only be sent to VFs + * that specifically request them. + * @length: Total length of this structure including embedded tables + * @vi_scale: log2 the number of VIs available on this VF. This quantity + * is used by the hardware for register decoding. + * @max_tx_channels: The maximum number of transmit queues the VF can use. + * @rss_rxq_count: The number of receive queues present in the shared RSS + * indirection table. + * @peer_count: Total number of peers in the complete peer list. If larger + * than ARRAY_SIZE(%peers), then the VF must provide sufficient + * additional pages each of which is filled with vfdi_endpoint structures. + * @local: The MAC address and outer VLAN tag of *this* VF + * @peers: Table of peer addresses. The @tci fields in these structures + * are currently unused and must be ignored. Additional peers are + * written into any additional pages provided by the VF. + * @timer_quantum_ns: Timer quantum (nominal period between timer ticks) + * for interrupt moderation timers, in nanoseconds. This member is only + * present if @length is sufficiently large. + */ +struct vfdi_status { + u32 generation_start; + u32 generation_end; + u32 version; + u32 length; + u8 vi_scale; + u8 max_tx_channels; + u8 rss_rxq_count; + u8 reserved1; + u16 peer_count; + u16 reserved2; + struct vfdi_endpoint local; + struct vfdi_endpoint peers[256]; + + /* Members below here extend version 1 of this structure */ + u32 timer_quantum_ns; +}; + +#endif diff --git a/drivers/net/ethernet/sfc/siena/workarounds.h b/drivers/net/ethernet/sfc/siena/workarounds.h new file mode 100644 index 000000000000..815be2d20c4b --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/workarounds.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2006-2013 Solarflare Communications Inc. + */ + +#ifndef EFX_WORKAROUNDS_H +#define EFX_WORKAROUNDS_H + +/* + * Hardware workarounds. + * Bug numbers are from Solarflare's Bugzilla. + */ + +#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) +#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) +#define EFX_WORKAROUND_10G(efx) 1 + +/* Bit-bashed I2C reads cause performance drop */ +#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G +/* Legacy interrupt storm when interrupt fifo fills */ +#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA + +/* Lockup when writing event block registers at gen2/gen3 */ +#define EFX_EF10_WORKAROUND_35388(efx) \ + (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388) +#define EFX_WORKAROUND_35388(efx) \ + (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx)) + +/* Moderation timer access must go through MCDI */ +#define EFX_EF10_WORKAROUND_61265(efx) \ + (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265) + +#endif /* EFX_WORKAROUNDS_H */ -- cgit v1.2.3 From 956f2d86cb37dc6dae8174001a668cbc8b9bbd1f Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:31:55 +0100 Subject: sfc/siena: Remove build references to missing functionality Functionality not supported or needed on Siena includes: - Anything for EF100 - EF10 specifics such as register access, PIO and TSO offload. Also only bind to Siena NICs. Remove EF10 specifics from nic.h. The functions that start with efx_farch_ will be removed from sfc.ko with a subsequent patch. Add the efx_ prefix to siena_prepare_flush() to make it consistent with the other APIs. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx.c | 28 +--- drivers/net/ethernet/sfc/siena/efx.h | 15 +- drivers/net/ethernet/sfc/siena/nic.c | 7 - drivers/net/ethernet/sfc/siena/nic.h | 190 +----------------------- drivers/net/ethernet/sfc/siena/nic_common.h | 3 - drivers/net/ethernet/sfc/siena/ptp.c | 11 -- drivers/net/ethernet/sfc/siena/siena.c | 4 +- drivers/net/ethernet/sfc/siena/siena_sriov.c | 2 +- drivers/net/ethernet/sfc/siena/tx.c | 209 +-------------------------- drivers/net/ethernet/sfc/siena/workarounds.h | 6 - 10 files changed, 17 insertions(+), 458 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index 5a772354da83..08b3e36c34b4 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -26,7 +26,6 @@ #include "efx.h" #include "efx_common.h" #include "efx_channels.h" -#include "ef100.h" #include "rx_common.h" #include "tx_common.h" #include "nic.h" @@ -795,22 +794,10 @@ static void efx_unregister_netdev(struct efx_nic *efx) /* PCI device ID table */ static const struct pci_device_id efx_pci_table[] = { - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ - .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ - .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ - .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */ - .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */ - .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */ - .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */ - .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, - {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */ - .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ + .driver_data = (unsigned long)&siena_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ + .driver_data = (unsigned long)&siena_a0_nic_type}, {0} /* end of list */ }; @@ -1298,14 +1285,8 @@ static int __init efx_init_module(void) if (rc < 0) goto err_pci; - rc = pci_register_driver(&ef100_pci_driver); - if (rc < 0) - goto err_pci_ef100; - return 0; - err_pci_ef100: - pci_unregister_driver(&efx_pci_driver); err_pci: efx_destroy_reset_workqueue(); err_reset: @@ -1318,7 +1299,6 @@ static void __exit efx_exit_module(void) { printk(KERN_INFO "Solarflare NET driver unloading\n"); - pci_unregister_driver(&ef100_pci_driver); pci_unregister_driver(&efx_pci_driver); efx_destroy_reset_workqueue(); unregister_netdevice_notifier(&efx_netdev_notifier); diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h index c05a83da9e44..962c6b66eea7 100644 --- a/drivers/net/ethernet/sfc/siena/efx.h +++ b/drivers/net/ethernet/sfc/siena/efx.h @@ -10,8 +10,6 @@ #include #include "net_driver.h" -#include "ef100_rx.h" -#include "ef100_tx.h" #include "filter.h" int efx_net_open(struct net_device *net_dev); @@ -24,9 +22,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { - return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue, - ef100_enqueue_skb, __efx_enqueue_skb, - tx_queue, skb); + return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue, + __efx_enqueue_skb, tx_queue, skb); } void efx_xmit_done_single(struct efx_tx_queue *tx_queue); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, @@ -40,16 +37,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, static inline void efx_rx_flush_packet(struct efx_channel *channel) { if (channel->rx_pkt_n_frags) - INDIRECT_CALL_2(channel->efx->type->rx_packet, - __ef100_rx_packet, __efx_rx_packet, - channel); + __efx_rx_packet(channel); } static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) { - if (efx->type->rx_buf_hash_valid) - return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid, - ef100_rx_buf_hash_valid, - prefix); return true; } diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c index 22fbb0ae77fb..c59357178657 100644 --- a/drivers/net/ethernet/sfc/siena/nic.c +++ b/drivers/net/ethernet/sfc/siena/nic.c @@ -16,7 +16,6 @@ #include "bitfield.h" #include "efx.h" #include "nic.h" -#include "ef10_regs.h" #include "farch_regs.h" #include "io.h" #include "workarounds.h" @@ -195,7 +194,6 @@ struct efx_nic_reg { #define REGISTER_BB(name) REGISTER(name, F, B, B) #define REGISTER_BZ(name) REGISTER(name, F, B, Z) #define REGISTER_CZ(name) REGISTER(name, F, C, Z) -#define REGISTER_DZ(name) REGISTER(name, E, D, Z) static const struct efx_nic_reg efx_nic_regs[] = { REGISTER_AZ(ADR_REGION), @@ -302,9 +300,6 @@ static const struct efx_nic_reg efx_nic_regs[] = { REGISTER_AB(XX_TXDRV_CTL), /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ /* XX_CORE_STAT is partly RC */ - REGISTER_DZ(BIU_HW_REV_ID), - REGISTER_DZ(MC_DB_LWRD), - REGISTER_DZ(MC_DB_HWRD), }; struct efx_nic_reg_table { @@ -337,7 +332,6 @@ struct efx_nic_reg_table { FR_BZ_ ## name ## _STEP, \ FR_CZ_ ## name ## _ROWS) #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z) -#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z) static const struct efx_nic_reg_table efx_nic_reg_tables[] = { /* DRIVER is not used */ @@ -368,7 +362,6 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = { /* MSIX_PBA_TABLE is not mapped */ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ REGISTER_TABLE_BZ(RX_FILTER_TBL0), - REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS), }; size_t efx_nic_get_regs_len(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/siena/nic.h b/drivers/net/ethernet/sfc/siena/nic.h index 251868235ae4..935cb0ab5ec0 100644 --- a/drivers/net/ethernet/sfc/siena/nic.h +++ b/drivers/net/ethernet/sfc/siena/nic.h @@ -116,193 +116,7 @@ struct siena_nic_data { #endif }; -enum { - EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT, - EF10_STAT_port_tx_packets, - EF10_STAT_port_tx_pause, - EF10_STAT_port_tx_control, - EF10_STAT_port_tx_unicast, - EF10_STAT_port_tx_multicast, - EF10_STAT_port_tx_broadcast, - EF10_STAT_port_tx_lt64, - EF10_STAT_port_tx_64, - EF10_STAT_port_tx_65_to_127, - EF10_STAT_port_tx_128_to_255, - EF10_STAT_port_tx_256_to_511, - EF10_STAT_port_tx_512_to_1023, - EF10_STAT_port_tx_1024_to_15xx, - EF10_STAT_port_tx_15xx_to_jumbo, - EF10_STAT_port_rx_bytes, - EF10_STAT_port_rx_bytes_minus_good_bytes, - EF10_STAT_port_rx_good_bytes, - EF10_STAT_port_rx_bad_bytes, - EF10_STAT_port_rx_packets, - EF10_STAT_port_rx_good, - EF10_STAT_port_rx_bad, - EF10_STAT_port_rx_pause, - EF10_STAT_port_rx_control, - EF10_STAT_port_rx_unicast, - EF10_STAT_port_rx_multicast, - EF10_STAT_port_rx_broadcast, - EF10_STAT_port_rx_lt64, - EF10_STAT_port_rx_64, - EF10_STAT_port_rx_65_to_127, - EF10_STAT_port_rx_128_to_255, - EF10_STAT_port_rx_256_to_511, - EF10_STAT_port_rx_512_to_1023, - EF10_STAT_port_rx_1024_to_15xx, - EF10_STAT_port_rx_15xx_to_jumbo, - EF10_STAT_port_rx_gtjumbo, - EF10_STAT_port_rx_bad_gtjumbo, - EF10_STAT_port_rx_overflow, - EF10_STAT_port_rx_align_error, - EF10_STAT_port_rx_length_error, - EF10_STAT_port_rx_nodesc_drops, - EF10_STAT_port_rx_pm_trunc_bb_overflow, - EF10_STAT_port_rx_pm_discard_bb_overflow, - EF10_STAT_port_rx_pm_trunc_vfifo_full, - EF10_STAT_port_rx_pm_discard_vfifo_full, - EF10_STAT_port_rx_pm_trunc_qbb, - EF10_STAT_port_rx_pm_discard_qbb, - EF10_STAT_port_rx_pm_discard_mapping, - EF10_STAT_port_rx_dp_q_disabled_packets, - EF10_STAT_port_rx_dp_di_dropped_packets, - EF10_STAT_port_rx_dp_streaming_packets, - EF10_STAT_port_rx_dp_hlb_fetch, - EF10_STAT_port_rx_dp_hlb_wait, - EF10_STAT_rx_unicast, - EF10_STAT_rx_unicast_bytes, - EF10_STAT_rx_multicast, - EF10_STAT_rx_multicast_bytes, - EF10_STAT_rx_broadcast, - EF10_STAT_rx_broadcast_bytes, - EF10_STAT_rx_bad, - EF10_STAT_rx_bad_bytes, - EF10_STAT_rx_overflow, - EF10_STAT_tx_unicast, - EF10_STAT_tx_unicast_bytes, - EF10_STAT_tx_multicast, - EF10_STAT_tx_multicast_bytes, - EF10_STAT_tx_broadcast, - EF10_STAT_tx_broadcast_bytes, - EF10_STAT_tx_bad, - EF10_STAT_tx_bad_bytes, - EF10_STAT_tx_overflow, - EF10_STAT_V1_COUNT, - EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT, - EF10_STAT_fec_corrected_errors, - EF10_STAT_fec_corrected_symbols_lane0, - EF10_STAT_fec_corrected_symbols_lane1, - EF10_STAT_fec_corrected_symbols_lane2, - EF10_STAT_fec_corrected_symbols_lane3, - EF10_STAT_ctpio_vi_busy_fallback, - EF10_STAT_ctpio_long_write_success, - EF10_STAT_ctpio_missing_dbell_fail, - EF10_STAT_ctpio_overflow_fail, - EF10_STAT_ctpio_underflow_fail, - EF10_STAT_ctpio_timeout_fail, - EF10_STAT_ctpio_noncontig_wr_fail, - EF10_STAT_ctpio_frm_clobber_fail, - EF10_STAT_ctpio_invalid_wr_fail, - EF10_STAT_ctpio_vi_clobber_fallback, - EF10_STAT_ctpio_unqualified_fallback, - EF10_STAT_ctpio_runt_fallback, - EF10_STAT_ctpio_success, - EF10_STAT_ctpio_fallback, - EF10_STAT_ctpio_poison, - EF10_STAT_ctpio_erase, - EF10_STAT_COUNT -}; - -/* Maximum number of TX PIO buffers we may allocate to a function. - * This matches the total number of buffers on each SFC9100-family - * controller. - */ -#define EF10_TX_PIOBUF_COUNT 16 - -/** - * struct efx_ef10_nic_data - EF10 architecture NIC state - * @mcdi_buf: DMA buffer for MCDI - * @warm_boot_count: Last seen MC warm boot count - * @vi_base: Absolute index of first VI in this function - * @n_allocated_vis: Number of VIs allocated to this function - * @n_piobufs: Number of PIO buffers allocated to this function - * @wc_membase: Base address of write-combining mapping of the memory BAR - * @pio_write_base: Base address for writing PIO buffers - * @pio_write_vi_base: Relative VI number for @pio_write_base - * @piobuf_handle: Handle of each PIO buffer allocated - * @piobuf_size: size of a single PIO buffer - * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC - * reboot - * @mc_stats: Scratch buffer for converting statistics to the kernel's format - * @stats: Hardware statistics - * @workaround_35388: Flag: firmware supports workaround for bug 35388 - * @workaround_26807: Flag: firmware supports workaround for bug 26807 - * @workaround_61265: Flag: firmware supports workaround for bug 61265 - * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated - * after MC reboot - * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of - * %MC_CMD_GET_CAPABILITIES response) - * @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of - * %MC_CMD_GET_CAPABILITIES response) - * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU - * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU - * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot - * @pf_index: The number for this PF, or the parent PF if this is a VF -#ifdef CONFIG_SFC_SRIOV - * @vf: Pointer to VF data structure -#endif - * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero - * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock. - * @vlan_lock: Lock to serialize access to vlan_list. - * @udp_tunnels: UDP tunnel port numbers and types. - * @udp_tunnels_dirty: flag indicating a reboot occurred while pushing - * @udp_tunnels to hardware and thus the push must be re-done. - * @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty. - */ -struct efx_ef10_nic_data { - struct efx_buffer mcdi_buf; - u16 warm_boot_count; - unsigned int vi_base; - unsigned int n_allocated_vis; - unsigned int n_piobufs; - void __iomem *wc_membase, *pio_write_base; - unsigned int pio_write_vi_base; - unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; - u16 piobuf_size; - bool must_restore_piobufs; - __le64 *mc_stats; - u64 stats[EF10_STAT_COUNT]; - bool workaround_35388; - bool workaround_26807; - bool workaround_61265; - bool must_check_datapath_caps; - u32 datapath_caps; - u32 datapath_caps2; - unsigned int rx_dpcpu_fw_id; - unsigned int tx_dpcpu_fw_id; - bool must_probe_vswitching; - unsigned int pf_index; - u8 port_id[ETH_ALEN]; -#ifdef CONFIG_SFC_SRIOV - unsigned int vf_index; - struct ef10_vf *vf; -#endif - u8 vport_mac[ETH_ALEN]; - struct list_head vlan_list; - struct mutex vlan_lock; - struct efx_udp_tunnel udp_tunnels[16]; - bool udp_tunnels_dirty; - struct mutex udp_tunnels_lock; - u64 licensed_features; -}; - -/* TSOv2 */ -int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb, - bool *data_mapped); - -extern const struct efx_nic_type efx_hunt_a0_nic_type; -extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; +extern const struct efx_nic_type siena_a0_nic_type; int falcon_probe_board(struct efx_nic *efx, u16 revision_info); @@ -364,7 +178,7 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); /* Global Resources */ -void siena_prepare_flush(struct efx_nic *efx); +void efx_siena_prepare_flush(struct efx_nic *efx); int efx_farch_fini_dmaq(struct efx_nic *efx); void efx_farch_finish_flr(struct efx_nic *efx); void siena_finish_flush(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h index 0cef35c0c559..47deeae0a034 100644 --- a/drivers/net/ethernet/sfc/siena/nic_common.h +++ b/drivers/net/ethernet/sfc/siena/nic_common.h @@ -75,9 +75,6 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned i return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; } -int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, - bool *data_mapped); - /* Decide whether to push a TX descriptor to the NIC vs merely writing * the doorbell. This can reduce latency when we are adding a single * descriptor to an empty queue, but is otherwise pointless. Further, diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index f0ef515e2ade..daf23070d353 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -1790,17 +1790,6 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE); - /* Check licensed features. If we don't have the license for TX - * timestamps, the NIC will not support them. - */ - if (efx_ptp_use_mac_tx_timestamps(efx)) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - - if (!(nic_data->licensed_features & - (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) - ts_info->so_timestamping &= - ~SOF_TIMESTAMPING_TX_HARDWARE; - } if (primary && primary->ptp_data && primary->ptp_data->phc_clock) ts_info->phc_index = ptp_clock_index(primary->ptp_data->phc_clock); diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c index ce3060e15b54..7cc6a2583d6c 100644 --- a/drivers/net/ethernet/sfc/siena/siena.c +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -56,7 +56,7 @@ static void siena_push_irq_moderation(struct efx_channel *channel) channel->channel); } -void siena_prepare_flush(struct efx_nic *efx) +void efx_siena_prepare_flush(struct efx_nic *efx) { if (efx->fc_disable++ == 0) efx_mcdi_set_mac(efx); @@ -992,7 +992,7 @@ const struct efx_nic_type siena_a0_nic_type = { .probe_port = efx_mcdi_port_probe, .remove_port = efx_mcdi_port_remove, .fini_dmaq = efx_farch_fini_dmaq, - .prepare_flush = siena_prepare_flush, + .prepare_flush = efx_siena_prepare_flush, .finish_flush = siena_finish_flush, .prepare_flr = efx_port_dummy_op_void, .finish_flr = efx_farch_finish_flr, diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.c b/drivers/net/ethernet/sfc/siena/siena_sriov.c index f12851a527d9..8b0fdeebc1a5 100644 --- a/drivers/net/ethernet/sfc/siena/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.c @@ -689,7 +689,7 @@ static int efx_vfdi_fini_all_queues(struct siena_vf *vf) MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); rtnl_lock(); - siena_prepare_flush(efx); + efx_siena_prepare_flush(efx); rtnl_unlock(); /* Flush all the initialized queues */ diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c index 138bca611341..9e68dc434832 100644 --- a/drivers/net/ethernet/sfc/siena/tx.c +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -22,14 +22,6 @@ #include "tx.h" #include "tx_common.h" #include "workarounds.h" -#include "ef10_regs.h" - -#ifdef EFX_USE_PIO - -#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) -unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; - -#endif /* EFX_USE_PIO */ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) @@ -123,173 +115,6 @@ static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, return rc; } -#ifdef EFX_USE_PIO - -struct efx_short_copy_buffer { - int used; - u8 buf[L1_CACHE_BYTES]; -}; - -/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. - * Advances piobuf pointer. Leaves additional data in the copy buffer. - */ -static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, - u8 *data, int len, - struct efx_short_copy_buffer *copy_buf) -{ - int block_len = len & ~(sizeof(copy_buf->buf) - 1); - - __iowrite64_copy(*piobuf, data, block_len >> 3); - *piobuf += block_len; - len -= block_len; - - if (len) { - data += block_len; - BUG_ON(copy_buf->used); - BUG_ON(len > sizeof(copy_buf->buf)); - memcpy(copy_buf->buf, data, len); - copy_buf->used = len; - } -} - -/* Copy to PIO, respecting dword alignment, popping data from copy buffer first. - * Advances piobuf pointer. Leaves additional data in the copy buffer. - */ -static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, - u8 *data, int len, - struct efx_short_copy_buffer *copy_buf) -{ - if (copy_buf->used) { - /* if the copy buffer is partially full, fill it up and write */ - int copy_to_buf = - min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); - - memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); - copy_buf->used += copy_to_buf; - - /* if we didn't fill it up then we're done for now */ - if (copy_buf->used < sizeof(copy_buf->buf)) - return; - - __iowrite64_copy(*piobuf, copy_buf->buf, - sizeof(copy_buf->buf) >> 3); - *piobuf += sizeof(copy_buf->buf); - data += copy_to_buf; - len -= copy_to_buf; - copy_buf->used = 0; - } - - efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); -} - -static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, - struct efx_short_copy_buffer *copy_buf) -{ - /* if there's anything in it, write the whole buffer, including junk */ - if (copy_buf->used) - __iowrite64_copy(piobuf, copy_buf->buf, - sizeof(copy_buf->buf) >> 3); -} - -/* Traverse skb structure and copy fragments in to PIO buffer. - * Advances piobuf pointer. - */ -static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, - u8 __iomem **piobuf, - struct efx_short_copy_buffer *copy_buf) -{ - int i; - - efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), - copy_buf); - - for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - u8 *vaddr; - - vaddr = kmap_atomic(skb_frag_page(f)); - - efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f), - skb_frag_size(f), copy_buf); - kunmap_atomic(vaddr); - } - - EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list); -} - -static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, - struct sk_buff *skb) -{ - struct efx_tx_buffer *buffer = - efx_tx_queue_get_insert_buffer(tx_queue); - u8 __iomem *piobuf = tx_queue->piobuf; - - /* Copy to PIO buffer. Ensure the writes are padded to the end - * of a cache line, as this is required for write-combining to be - * effective on at least x86. - */ - - if (skb_shinfo(skb)->nr_frags) { - /* The size of the copy buffer will ensure all writes - * are the size of a cache line. - */ - struct efx_short_copy_buffer copy_buf; - - copy_buf.used = 0; - - efx_skb_copy_bits_to_pio(tx_queue->efx, skb, - &piobuf, ©_buf); - efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); - } else { - /* Pad the write to the size of a cache line. - * We can do this because we know the skb_shared_info struct is - * after the source, and the destination buffer is big enough. - */ - BUILD_BUG_ON(L1_CACHE_BYTES > - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - __iowrite64_copy(tx_queue->piobuf, skb->data, - ALIGN(skb->len, L1_CACHE_BYTES) >> 3); - } - - buffer->skb = skb; - buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION; - - EFX_POPULATE_QWORD_5(buffer->option, - ESF_DZ_TX_DESC_IS_OPT, 1, - ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, - ESF_DZ_TX_PIO_CONT, 0, - ESF_DZ_TX_PIO_BYTE_CNT, skb->len, - ESF_DZ_TX_PIO_BUF_ADDR, - tx_queue->piobuf_offset); - ++tx_queue->insert_count; - return 0; -} - -/* Decide whether we can use TX PIO, ie. write packet data directly into - * a buffer on the device. This can reduce latency at the expense of - * throughput, so we only do this if both hardware and software TX rings - * are empty, including all queues for the channel. This also ensures that - * only one packet at a time can be using the PIO buffer. If the xmit_more - * flag is set then we don't use this - there'll be another packet along - * shortly and we want to hold off the doorbell. - */ -static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue) -{ - struct efx_channel *channel = tx_queue->channel; - - if (!tx_queue->piobuf) - return false; - - EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors); - - efx_for_each_channel_tx_queue(tx_queue, channel) - if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count)) - return false; - - return true; -} -#endif /* EFX_USE_PIO */ - /* Send any pending traffic for a channel. xmit_more is shared across all * queues for a channel, so we must check all of them. */ @@ -338,35 +163,11 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb * size limit. */ if (segments) { - switch (tx_queue->tso_version) { - case 1: - rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped); - break; - case 2: - rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped); - break; - case 0: /* No TSO on this queue, SW fallback needed */ - default: - rc = -EINVAL; - break; - } - if (rc == -EINVAL) { - rc = efx_tx_tso_fallback(tx_queue, skb); - tx_queue->tso_fallbacks++; - if (rc == 0) - return 0; - } - if (rc) - goto err; -#ifdef EFX_USE_PIO - } else if (skb_len <= efx_piobuf_size && !xmit_more && - efx_tx_may_pio(tx_queue)) { - /* Use PIO for short packets with an empty queue. */ - if (efx_enqueue_skb_pio(tx_queue, skb)) - goto err; - tx_queue->pio_packets++; - data_mapped = true; -#endif + rc = efx_tx_tso_fallback(tx_queue, skb); + tx_queue->tso_fallbacks++; + if (rc == 0) + return 0; + goto err; } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { /* Pad short packets or coalesce short fragmented packets. */ if (efx_enqueue_skb_copy(tx_queue, skb)) diff --git a/drivers/net/ethernet/sfc/siena/workarounds.h b/drivers/net/ethernet/sfc/siena/workarounds.h index 815be2d20c4b..42fb143a94ab 100644 --- a/drivers/net/ethernet/sfc/siena/workarounds.h +++ b/drivers/net/ethernet/sfc/siena/workarounds.h @@ -21,12 +21,6 @@ /* Legacy interrupt storm when interrupt fifo fills */ #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA -/* Lockup when writing event block registers at gen2/gen3 */ -#define EFX_EF10_WORKAROUND_35388(efx) \ - (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388) -#define EFX_WORKAROUND_35388(efx) \ - (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx)) - /* Moderation timer access must go through MCDI */ #define EFX_EF10_WORKAROUND_61265(efx) \ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265) -- cgit v1.2.3 From 71ad88f661253f5f2500f6e20c34927722401a13 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:32:08 +0100 Subject: sfc/siena: Rename functions in efx headers to avoid conflicts with sfc When building with allyesconfig there are many identical symbol names. For siena use efx_siena_ as the function and variable prefix to avoid build errors. efx_mtd_remove_partition can become static as it is no longer called from other files. efx_ticks_to_usecs and efx_xmit_done_single are not used in Siena, so they are removed. Several functions are only used inside efx_channels.c for Siena so they can become static. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx.c | 153 ++++++++++----------- drivers/net/ethernet/sfc/siena/efx.h | 65 ++++----- drivers/net/ethernet/sfc/siena/efx_channels.c | 113 ++++++++-------- drivers/net/ethernet/sfc/siena/efx_channels.h | 67 ++++----- drivers/net/ethernet/sfc/siena/efx_common.c | 158 +++++++++++----------- drivers/net/ethernet/sfc/siena/efx_common.h | 90 ++++++------ drivers/net/ethernet/sfc/siena/enum.h | 2 +- drivers/net/ethernet/sfc/siena/ethtool.c | 12 +- drivers/net/ethernet/sfc/siena/ethtool_common.c | 6 +- drivers/net/ethernet/sfc/siena/farch.c | 41 +++--- drivers/net/ethernet/sfc/siena/mcdi.c | 12 +- drivers/net/ethernet/sfc/siena/mcdi_port_common.c | 6 +- drivers/net/ethernet/sfc/siena/mtd.c | 16 +-- drivers/net/ethernet/sfc/siena/net_driver.h | 17 ++- drivers/net/ethernet/sfc/siena/rx.c | 11 +- drivers/net/ethernet/sfc/siena/rx_common.c | 8 +- drivers/net/ethernet/sfc/siena/rx_common.h | 5 +- drivers/net/ethernet/sfc/siena/selftest.c | 18 +-- drivers/net/ethernet/sfc/siena/siena.c | 24 ++-- drivers/net/ethernet/sfc/siena/siena_sriov.c | 2 +- drivers/net/ethernet/sfc/siena/tx.c | 61 ++------- drivers/net/ethernet/sfc/siena/tx_common.c | 8 +- drivers/net/ethernet/sfc/siena/tx_common.h | 4 +- 23 files changed, 427 insertions(+), 472 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index 08b3e36c34b4..ca41c038f3ab 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -43,11 +43,11 @@ * *************************************************************************/ -module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); +module_param_named(interrupt_mode, efx_siena_interrupt_mode, uint, 0444); MODULE_PARM_DESC(interrupt_mode, "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); -module_param(rss_cpus, uint, 0444); +module_param_named(rss_cpus, efx_siena_rss_cpus, uint, 0444); MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); /* @@ -174,7 +174,7 @@ static void efx_fini_port(struct efx_nic *efx) efx->port_initialized = false; efx->link_state.up = false; - efx_link_status_changed(efx); + efx_siena_link_status_changed(efx); } static void efx_remove_port(struct efx_nic *efx) @@ -284,11 +284,11 @@ static int efx_probe_nic(struct efx_nic *efx) /* Determine the number of channels and queues by trying * to hook in MSI-X interrupts. */ - rc = efx_probe_interrupts(efx); + rc = efx_siena_probe_interrupts(efx); if (rc) goto fail1; - rc = efx_set_channels(efx); + rc = efx_siena_set_channels(efx); if (rc) goto fail1; @@ -299,7 +299,7 @@ static int efx_probe_nic(struct efx_nic *efx) if (rc == -EAGAIN) /* try again with new max_channels */ - efx_remove_interrupts(efx); + efx_siena_remove_interrupts(efx); } while (rc == -EAGAIN); @@ -310,13 +310,13 @@ static int efx_probe_nic(struct efx_nic *efx) /* Initialise the interrupt moderation settings */ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); - efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, - true); + efx_siena_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, + true, true); return 0; fail2: - efx_remove_interrupts(efx); + efx_siena_remove_interrupts(efx); fail1: efx->type->remove(efx); return rc; @@ -326,7 +326,7 @@ static void efx_remove_nic(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); - efx_remove_interrupts(efx); + efx_siena_remove_interrupts(efx); efx->type->remove(efx); } @@ -373,7 +373,7 @@ static int efx_probe_all(struct efx_nic *efx) goto fail4; } - rc = efx_probe_channels(efx); + rc = efx_siena_probe_channels(efx); if (rc) goto fail5; @@ -399,7 +399,7 @@ static void efx_remove_all(struct efx_nic *efx) efx_xdp_setup_prog(efx, NULL); rtnl_unlock(); - efx_remove_channels(efx); + efx_siena_remove_channels(efx); efx_remove_filters(efx); #ifdef CONFIG_SFC_SRIOV efx->type->vswitching_remove(efx); @@ -413,7 +413,7 @@ static void efx_remove_all(struct efx_nic *efx) * Interrupt moderation * **************************************************************************/ -unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) +unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) { if (usecs == 0) return 0; @@ -422,18 +422,10 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) return usecs * 1000 / efx->timer_quantum_ns; } -unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) -{ - /* We must round up when converting ticks to microseconds - * because we round down when converting the other way. - */ - return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); -} - /* Set interrupt moderation parameters */ -int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, - unsigned int rx_usecs, bool rx_adaptive, - bool rx_may_override_tx) +int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx) { struct efx_channel *channel; unsigned int timer_max_us; @@ -466,8 +458,8 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, return 0; } -void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, - unsigned int *rx_usecs, bool *rx_adaptive) +void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive) { *rx_adaptive = efx->irq_rx_adaptive; *rx_usecs = efx->irq_rx_moderation_us; @@ -520,7 +512,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) *************************************************************************/ /* Context: process, rtnl_lock() held. */ -int efx_net_open(struct net_device *net_dev) +static int efx_net_open(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -533,14 +525,14 @@ int efx_net_open(struct net_device *net_dev) return rc; if (efx->phy_mode & PHY_MODE_SPECIAL) return -EBUSY; - if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) + if (efx_mcdi_poll_reboot(efx) && efx_siena_reset(efx, RESET_TYPE_ALL)) return -EIO; /* Notify the kernel of the link state polled during driver load, * before the monitor starts running */ - efx_link_status_changed(efx); + efx_siena_link_status_changed(efx); - efx_start_all(efx); + efx_siena_start_all(efx); if (efx->state == STATE_DISABLED || efx->reset_pending) netif_device_detach(efx->net_dev); efx_selftest_async_start(efx); @@ -551,7 +543,7 @@ int efx_net_open(struct net_device *net_dev) * Note that the kernel will ignore our return code; this method * should really be a void. */ -int efx_net_stop(struct net_device *net_dev) +static int efx_net_stop(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); @@ -559,7 +551,7 @@ int efx_net_stop(struct net_device *net_dev) raw_smp_processor_id()); /* Stop the device and flush all the channels */ - efx_stop_all(efx); + efx_siena_stop_all(efx); return 0; } @@ -587,16 +579,16 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, - .ndo_get_stats64 = efx_net_stats, - .ndo_tx_timeout = efx_watchdog, - .ndo_start_xmit = efx_hard_start_xmit, + .ndo_get_stats64 = efx_siena_net_stats, + .ndo_tx_timeout = efx_siena_watchdog, + .ndo_start_xmit = efx_siena_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = efx_ioctl, - .ndo_change_mtu = efx_change_mtu, - .ndo_set_mac_address = efx_set_mac_address, - .ndo_set_rx_mode = efx_set_rx_mode, - .ndo_set_features = efx_set_features, - .ndo_features_check = efx_features_check, + .ndo_change_mtu = efx_siena_change_mtu, + .ndo_set_mac_address = efx_siena_set_mac_address, + .ndo_set_rx_mode = efx_siena_set_rx_mode, + .ndo_set_features = efx_siena_set_features, + .ndo_features_check = efx_siena_features_check, .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, #ifdef CONFIG_SFC_SRIOV @@ -606,9 +598,9 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_get_vf_config = efx_sriov_get_vf_config, .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, #endif - .ndo_get_phys_port_id = efx_get_phys_port_id, - .ndo_get_phys_port_name = efx_get_phys_port_name, - .ndo_setup_tc = efx_setup_tc, + .ndo_get_phys_port_id = efx_siena_get_phys_port_id, + .ndo_get_phys_port_name = efx_siena_get_phys_port_name, + .ndo_setup_tc = efx_siena_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif @@ -626,10 +618,10 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) return -EINVAL; } - if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { + if (prog && efx->net_dev->mtu > efx_siena_xdp_max_mtu(efx)) { netif_err(efx, drv, efx->net_dev, "Unable to configure XDP with MTU of %d (max: %d)\n", - efx->net_dev->mtu, efx_xdp_max_mtu(efx)); + efx->net_dev->mtu, efx_siena_xdp_max_mtu(efx)); return -EINVAL; } @@ -663,14 +655,14 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, if (!netif_running(dev)) return -EINVAL; - return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); + return efx_siena_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); } static void efx_update_name(struct efx_nic *efx) { strcpy(efx->name, efx->net_dev->name); - efx_mtd_rename(efx); - efx_set_channel_names(efx); + efx_siena_mtd_rename(efx); + efx_siena_set_channel_names(efx); } static int efx_netdev_event(struct notifier_block *this, @@ -708,7 +700,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->netdev_ops = &efx_netdev_ops; if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) net_dev->priv_flags |= IFF_UNICAST_FLT; - net_dev->ethtool_ops = &efx_ethtool_ops; + net_dev->ethtool_ops = &efx_siena_ethtool_ops; netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); net_dev->min_mtu = EFX_MIN_MTU; net_dev->max_mtu = EFX_MAX_MTU; @@ -742,7 +734,7 @@ static int efx_register_netdev(struct efx_nic *efx) efx_for_each_channel(channel, efx) { struct efx_tx_queue *tx_queue; efx_for_each_channel_tx_queue(tx_queue, channel) - efx_init_tx_queue_core_txq(tx_queue); + efx_siena_init_tx_queue_core_txq(tx_queue); } efx_associate(efx); @@ -756,7 +748,7 @@ static int efx_register_netdev(struct efx_nic *efx) goto fail_registered; } - efx_init_mcdi_logging(efx); + efx_siena_init_mcdi_logging(efx); return 0; @@ -780,7 +772,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) if (efx_dev_registered(efx)) { strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); - efx_fini_mcdi_logging(efx); + efx_siena_fini_mcdi_logging(efx); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); unregister_netdev(efx->net_dev); } @@ -807,7 +799,7 @@ static const struct pci_device_id efx_pci_table[] = { * **************************************************************************/ -void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) +void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats) { u64 n_rx_nodesc_trunc = 0; struct efx_channel *channel; @@ -833,14 +825,14 @@ static void efx_pci_remove_main(struct efx_nic *efx) * are not READY. */ BUG_ON(efx->state == STATE_READY); - efx_flush_reset_workqueue(efx); + efx_siena_flush_reset_workqueue(efx); - efx_disable_interrupts(efx); - efx_clear_interrupt_affinity(efx); + efx_siena_disable_interrupts(efx); + efx_siena_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); efx_fini_port(efx); efx->type->fini(efx); - efx_fini_napi(efx); + efx_siena_fini_napi(efx); efx_remove_all(efx); } @@ -860,7 +852,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) rtnl_lock(); efx_dissociate(efx); dev_close(efx->net_dev); - efx_disable_interrupts(efx); + efx_siena_disable_interrupts(efx); efx->state = STATE_UNINIT; rtnl_unlock(); @@ -869,14 +861,14 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_unregister_netdev(efx); - efx_mtd_remove(efx); + efx_siena_mtd_remove(efx); efx_pci_remove_main(efx); - efx_fini_io(efx); + efx_siena_fini_io(efx); netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); - efx_fini_struct(efx); + efx_siena_fini_struct(efx); free_netdev(efx->net_dev); pci_disable_pcie_error_reporting(pci_dev); @@ -929,7 +921,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) if (rc) goto fail1; - efx_init_napi(efx); + efx_siena_init_napi(efx); down_write(&efx->filter_sem); rc = efx->type->init(efx); @@ -950,22 +942,22 @@ static int efx_pci_probe_main(struct efx_nic *efx) if (rc) goto fail5; - efx_set_interrupt_affinity(efx); - rc = efx_enable_interrupts(efx); + efx_siena_set_interrupt_affinity(efx); + rc = efx_siena_enable_interrupts(efx); if (rc) goto fail6; return 0; fail6: - efx_clear_interrupt_affinity(efx); + efx_siena_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); fail5: efx_fini_port(efx); fail4: efx->type->fini(efx); fail3: - efx_fini_napi(efx); + efx_siena_fini_napi(efx); efx_remove_all(efx); fail1: return rc; @@ -1046,7 +1038,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev, pci_set_drvdata(pci_dev, efx); SET_NETDEV_DEV(net_dev, &pci_dev->dev); - rc = efx_init_struct(efx, pci_dev, net_dev); + rc = efx_siena_init_struct(efx, pci_dev, net_dev); if (rc) goto fail1; @@ -1056,8 +1048,9 @@ static int efx_pci_probe(struct pci_dev *pci_dev, efx_probe_vpd_strings(efx); /* Set up basic I/O (BAR mappings etc) */ - rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, - efx->type->mem_map_size(efx)); + rc = efx_siena_init_io(efx, efx->type->mem_bar(efx), + efx->type->max_dma_mask, + efx->type->mem_map_size(efx)); if (rc) goto fail2; @@ -1101,9 +1094,9 @@ static int efx_pci_probe(struct pci_dev *pci_dev, return 0; fail3: - efx_fini_io(efx); + efx_siena_fini_io(efx); fail2: - efx_fini_struct(efx); + efx_siena_fini_struct(efx); fail1: WARN_ON(rc > 0); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); @@ -1142,8 +1135,8 @@ static int efx_pm_freeze(struct device *dev) efx_device_detach_sync(efx); - efx_stop_all(efx); - efx_disable_interrupts(efx); + efx_siena_stop_all(efx); + efx_siena_disable_interrupts(efx); } rtnl_unlock(); @@ -1159,7 +1152,7 @@ static int efx_pm_thaw(struct device *dev) rtnl_lock(); if (efx->state != STATE_DISABLED) { - rc = efx_enable_interrupts(efx); + rc = efx_siena_enable_interrupts(efx); if (rc) goto fail; @@ -1167,7 +1160,7 @@ static int efx_pm_thaw(struct device *dev) efx_mcdi_port_reconfigure(efx); mutex_unlock(&efx->mac_lock); - efx_start_all(efx); + efx_siena_start_all(efx); efx_device_attach_if_not_resetting(efx); @@ -1179,7 +1172,7 @@ static int efx_pm_thaw(struct device *dev) rtnl_unlock(); /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ - efx_queue_reset_work(efx); + efx_siena_queue_reset_work(efx); return 0; @@ -1255,7 +1248,7 @@ static struct pci_driver efx_pci_driver = { .probe = efx_pci_probe, .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, - .err_handler = &efx_err_handlers, + .err_handler = &efx_siena_err_handlers, #ifdef CONFIG_SFC_SRIOV .sriov_configure = efx_pci_sriov_configure, #endif @@ -1277,7 +1270,7 @@ static int __init efx_init_module(void) if (rc) goto err_notifier; - rc = efx_create_reset_workqueue(); + rc = efx_siena_create_reset_workqueue(); if (rc) goto err_reset; @@ -1288,7 +1281,7 @@ static int __init efx_init_module(void) return 0; err_pci: - efx_destroy_reset_workqueue(); + efx_siena_destroy_reset_workqueue(); err_reset: unregister_netdevice_notifier(&efx_netdev_notifier); err_notifier: @@ -1300,7 +1293,7 @@ static void __exit efx_exit_module(void) printk(KERN_INFO "Solarflare NET driver unloading\n"); pci_unregister_driver(&efx_pci_driver); - efx_destroy_reset_workqueue(); + efx_siena_destroy_reset_workqueue(); unregister_netdevice_notifier(&efx_netdev_notifier); } diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h index 962c6b66eea7..a4f9e6e962b0 100644 --- a/drivers/net/ethernet/sfc/siena/efx.h +++ b/drivers/net/ethernet/sfc/siena/efx.h @@ -12,36 +12,28 @@ #include "net_driver.h" #include "filter.h" -int efx_net_open(struct net_device *net_dev); -int efx_net_stop(struct net_device *net_dev); - /* TX */ -void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); -netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, - struct net_device *net_dev); -netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); +netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev); +netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, + struct sk_buff *skb); static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue, - __efx_enqueue_skb, tx_queue, skb); + __efx_siena_enqueue_skb, tx_queue, skb); } -void efx_xmit_done_single(struct efx_tx_queue *tx_queue); -int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - void *type_data); -extern unsigned int efx_piobuf_size; +int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data); /* RX */ -void __efx_rx_packet(struct efx_channel *channel); -void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, - unsigned int n_frags, unsigned int len, u16 flags); +void __efx_siena_rx_packet(struct efx_channel *channel); +void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags); static inline void efx_rx_flush_packet(struct efx_channel *channel) { if (channel->rx_pkt_n_frags) - __efx_rx_packet(channel); -} -static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) -{ - return true; + __efx_siena_rx_packet(channel); } /* Maximum number of TCP segments we support for soft-TSO */ @@ -156,34 +148,33 @@ static inline bool efx_rss_active(struct efx_rss_context *ctx) } /* Ethtool support */ -extern const struct ethtool_ops efx_ethtool_ops; +extern const struct ethtool_ops efx_siena_ethtool_ops; /* Global */ -unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); -unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); -int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, - unsigned int rx_usecs, bool rx_adaptive, - bool rx_may_override_tx); -void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, - unsigned int *rx_usecs, bool *rx_adaptive); +unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); +int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx); +void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive); /* Update the generic software stats in the passed stats array */ -void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); +void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats); /* MTD */ #ifdef CONFIG_SFC_MTD -int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, - size_t n_parts, size_t sizeof_part); +int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part); static inline int efx_mtd_probe(struct efx_nic *efx) { return efx->type->mtd_probe(efx); } -void efx_mtd_rename(struct efx_nic *efx); -void efx_mtd_remove(struct efx_nic *efx); +void efx_siena_mtd_rename(struct efx_nic *efx); +void efx_siena_mtd_remove(struct efx_nic *efx); #else static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } -static inline void efx_mtd_rename(struct efx_nic *efx) {} -static inline void efx_mtd_remove(struct efx_nic *efx) {} +static inline void efx_siena_mtd_rename(struct efx_nic *efx) {} +static inline void efx_siena_mtd_remove(struct efx_nic *efx) {} #endif #ifdef CONFIG_SFC_SRIOV @@ -221,7 +212,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) return true; } -int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, - bool flush); +int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, + struct xdp_frame **xdpfs, bool flush); #endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 3f28f9861dfa..b04affb23f72 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -25,7 +25,7 @@ * 1 => MSI * 2 => legacy */ -unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX; +unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX; /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), * i.e. the number of CPUs among which we may distribute simultaneous @@ -34,7 +34,7 @@ unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX; * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. * The default (0) means to assign an interrupt to each core. */ -unsigned int rss_cpus; +unsigned int efx_siena_rss_cpus; static unsigned int irq_adapt_low_thresh = 8000; module_param(irq_adapt_low_thresh, uint, 0644); @@ -89,8 +89,8 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) { unsigned int count; - if (rss_cpus) { - count = rss_cpus; + if (efx_siena_rss_cpus) { + count = efx_siena_rss_cpus; } else { count = count_online_cores(efx, true); @@ -100,7 +100,8 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) } if (count > EFX_MAX_RX_QUEUES) { - netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, + netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus, + warn, "Reducing number of rx queues from %u to %u.\n", count, EFX_MAX_RX_QUEUES); count = EFX_MAX_RX_QUEUES; @@ -249,7 +250,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, /* Probe the number and type of interrupts we are able to obtain, and * the resulting numbers of channels and RX queues. */ -int efx_probe_interrupts(struct efx_nic *efx) +int efx_siena_probe_interrupts(struct efx_nic *efx) { unsigned int extra_channels = 0; unsigned int rss_spread; @@ -361,7 +362,7 @@ int efx_probe_interrupts(struct efx_nic *efx) } #if defined(CONFIG_SMP) -void efx_set_interrupt_affinity(struct efx_nic *efx) +void efx_siena_set_interrupt_affinity(struct efx_nic *efx) { const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus); struct efx_channel *channel; @@ -380,7 +381,7 @@ void efx_set_interrupt_affinity(struct efx_nic *efx) } } -void efx_clear_interrupt_affinity(struct efx_nic *efx) +void efx_siena_clear_interrupt_affinity(struct efx_nic *efx) { struct efx_channel *channel; @@ -389,17 +390,17 @@ void efx_clear_interrupt_affinity(struct efx_nic *efx) } #else void -efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) +efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused) { } void -efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) +efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused) { } #endif /* CONFIG_SMP */ -void efx_remove_interrupts(struct efx_nic *efx) +void efx_siena_remove_interrupts(struct efx_nic *efx) { struct efx_channel *channel; @@ -422,7 +423,7 @@ void efx_remove_interrupts(struct efx_nic *efx) * is reset, the memory buffer will be reused; this guards against * errors during channel reset and also simplifies interrupt handling. */ -int efx_probe_eventq(struct efx_channel *channel) +static int efx_probe_eventq(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; unsigned long entries; @@ -441,7 +442,7 @@ int efx_probe_eventq(struct efx_channel *channel) } /* Prepare channel's event queue */ -int efx_init_eventq(struct efx_channel *channel) +static int efx_init_eventq(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; int rc; @@ -461,7 +462,7 @@ int efx_init_eventq(struct efx_channel *channel) } /* Enable event queue processing and NAPI */ -void efx_start_eventq(struct efx_channel *channel) +void efx_siena_start_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, ifup, channel->efx->net_dev, "chan %d start event queue\n", channel->channel); @@ -475,7 +476,7 @@ void efx_start_eventq(struct efx_channel *channel) } /* Disable event queue processing and NAPI */ -void efx_stop_eventq(struct efx_channel *channel) +void efx_siena_stop_eventq(struct efx_channel *channel) { if (!channel->enabled) return; @@ -484,7 +485,7 @@ void efx_stop_eventq(struct efx_channel *channel) channel->enabled = false; } -void efx_fini_eventq(struct efx_channel *channel) +static void efx_fini_eventq(struct efx_channel *channel) { if (!channel->eventq_init) return; @@ -496,7 +497,7 @@ void efx_fini_eventq(struct efx_channel *channel) channel->eventq_init = false; } -void efx_remove_eventq(struct efx_channel *channel) +static void efx_remove_eventq(struct efx_channel *channel) { netif_dbg(channel->efx, drv, channel->efx->net_dev, "chan %d remove event queue\n", channel->channel); @@ -562,7 +563,7 @@ static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) return channel; } -int efx_init_channels(struct efx_nic *efx) +int efx_siena_init_channels(struct efx_nic *efx) { unsigned int i; @@ -576,7 +577,7 @@ int efx_init_channels(struct efx_nic *efx) /* Higher numbered interrupt modes are less capable! */ efx->interrupt_mode = min(efx->type->min_interrupt_mode, - efx_interrupt_mode); + efx_siena_interrupt_mode); efx->max_channels = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS; @@ -584,7 +585,7 @@ int efx_init_channels(struct efx_nic *efx) return 0; } -void efx_fini_channels(struct efx_nic *efx) +void efx_siena_fini_channels(struct efx_nic *efx) { unsigned int i; @@ -672,7 +673,7 @@ static int efx_probe_channel(struct efx_channel *channel) return 0; fail: - efx_remove_channel(channel); + efx_siena_remove_channel(channel); return rc; } @@ -700,7 +701,7 @@ static void efx_get_channel_name(struct efx_channel *channel, char *buf, snprintf(buf, len, "%s%s-%d", efx->name, type, number); } -void efx_set_channel_names(struct efx_nic *efx) +void efx_siena_set_channel_names(struct efx_nic *efx) { struct efx_channel *channel; @@ -710,7 +711,7 @@ void efx_set_channel_names(struct efx_nic *efx) sizeof(efx->msi_context[0].name)); } -int efx_probe_channels(struct efx_nic *efx) +int efx_siena_probe_channels(struct efx_nic *efx) { struct efx_channel *channel; int rc; @@ -732,16 +733,16 @@ int efx_probe_channels(struct efx_nic *efx) goto fail; } } - efx_set_channel_names(efx); + efx_siena_set_channel_names(efx); return 0; fail: - efx_remove_channels(efx); + efx_siena_remove_channels(efx); return rc; } -void efx_remove_channel(struct efx_channel *channel) +void efx_siena_remove_channel(struct efx_channel *channel) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; @@ -757,12 +758,12 @@ void efx_remove_channel(struct efx_channel *channel) channel->type->post_remove(channel); } -void efx_remove_channels(struct efx_nic *efx) +void efx_siena_remove_channels(struct efx_nic *efx) { struct efx_channel *channel; efx_for_each_channel(channel, efx) - efx_remove_channel(channel); + efx_siena_remove_channel(channel); kfree(efx->xdp_tx_queues); } @@ -846,7 +847,13 @@ static void efx_set_xdp_channels(struct efx_nic *efx) } } -int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) +static int efx_soft_enable_interrupts(struct efx_nic *efx); +static void efx_soft_disable_interrupts(struct efx_nic *efx); +static void efx_init_napi_channel(struct efx_channel *channel); +static void efx_fini_napi_channel(struct efx_channel *channel); + +int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries, + u32 txq_entries) { struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; unsigned int i, next_buffer_table = 0; @@ -880,7 +887,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) } efx_device_detach_sync(efx); - efx_stop_all(efx); + efx_siena_stop_all(efx); efx_soft_disable_interrupts(efx); /* Clone channels (where possible) */ @@ -924,7 +931,7 @@ out: channel = other_channel[i]; if (channel && channel->type->copy) { efx_fini_napi_channel(channel); - efx_remove_channel(channel); + efx_siena_remove_channel(channel); kfree(channel); } } @@ -934,9 +941,9 @@ out: rc = rc ? rc : rc2; netif_err(efx, drv, efx->net_dev, "unable to restart interrupts on channel reallocation\n"); - efx_schedule_reset(efx, RESET_TYPE_DISABLE); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); } else { - efx_start_all(efx); + efx_siena_start_all(efx); efx_device_attach_if_not_resetting(efx); } return rc; @@ -950,7 +957,7 @@ rollback: goto out; } -int efx_set_channels(struct efx_nic *efx) +int efx_siena_set_channels(struct efx_nic *efx) { struct efx_channel *channel; int rc; @@ -995,7 +1002,7 @@ static bool efx_default_channel_want_txqs(struct efx_channel *channel) * START/STOP *************/ -int efx_soft_enable_interrupts(struct efx_nic *efx) +static int efx_soft_enable_interrupts(struct efx_nic *efx) { struct efx_channel *channel, *end_channel; int rc; @@ -1011,7 +1018,7 @@ int efx_soft_enable_interrupts(struct efx_nic *efx) if (rc) goto fail; } - efx_start_eventq(channel); + efx_siena_start_eventq(channel); } efx_mcdi_mode_event(efx); @@ -1022,7 +1029,7 @@ fail: efx_for_each_channel(channel, efx) { if (channel == end_channel) break; - efx_stop_eventq(channel); + efx_siena_stop_eventq(channel); if (!channel->type->keep_eventq) efx_fini_eventq(channel); } @@ -1030,7 +1037,7 @@ fail: return rc; } -void efx_soft_disable_interrupts(struct efx_nic *efx) +static void efx_soft_disable_interrupts(struct efx_nic *efx) { struct efx_channel *channel; @@ -1049,7 +1056,7 @@ void efx_soft_disable_interrupts(struct efx_nic *efx) if (channel->irq) synchronize_irq(channel->irq); - efx_stop_eventq(channel); + efx_siena_stop_eventq(channel); if (!channel->type->keep_eventq) efx_fini_eventq(channel); } @@ -1058,7 +1065,7 @@ void efx_soft_disable_interrupts(struct efx_nic *efx) efx_mcdi_flush_async(efx); } -int efx_enable_interrupts(struct efx_nic *efx) +int efx_siena_enable_interrupts(struct efx_nic *efx) { struct efx_channel *channel, *end_channel; int rc; @@ -1101,7 +1108,7 @@ fail: return rc; } -void efx_disable_interrupts(struct efx_nic *efx) +void efx_siena_disable_interrupts(struct efx_nic *efx) { struct efx_channel *channel; @@ -1115,7 +1122,7 @@ void efx_disable_interrupts(struct efx_nic *efx) efx->type->irq_disable_non_ev(efx); } -void efx_start_channels(struct efx_nic *efx) +void efx_siena_start_channels(struct efx_nic *efx) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; @@ -1130,16 +1137,16 @@ void efx_start_channels(struct efx_nic *efx) efx_for_each_channel_rx_queue(rx_queue, channel) { efx_init_rx_queue(rx_queue); atomic_inc(&efx->active_queues); - efx_stop_eventq(channel); + efx_siena_stop_eventq(channel); efx_fast_push_rx_descriptors(rx_queue, false); - efx_start_eventq(channel); + efx_siena_start_eventq(channel); } WARN_ON(channel->rx_pkt_n_frags); } } -void efx_stop_channels(struct efx_nic *efx) +void efx_siena_stop_channels(struct efx_nic *efx) { struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; @@ -1160,8 +1167,8 @@ void efx_stop_channels(struct efx_nic *efx) * temporarily. */ if (efx_channel_has_rx_queue(channel)) { - efx_stop_eventq(channel); - efx_start_eventq(channel); + efx_siena_stop_eventq(channel); + efx_siena_start_eventq(channel); } } @@ -1311,7 +1318,7 @@ static int efx_poll(struct napi_struct *napi, int budget) return spent; } -void efx_init_napi_channel(struct efx_channel *channel) +static void efx_init_napi_channel(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; @@ -1320,7 +1327,7 @@ void efx_init_napi_channel(struct efx_channel *channel) napi_weight); } -void efx_init_napi(struct efx_nic *efx) +void efx_siena_init_napi(struct efx_nic *efx) { struct efx_channel *channel; @@ -1328,7 +1335,7 @@ void efx_init_napi(struct efx_nic *efx) efx_init_napi_channel(channel); } -void efx_fini_napi_channel(struct efx_channel *channel) +static void efx_fini_napi_channel(struct efx_channel *channel) { if (channel->napi_dev) netif_napi_del(&channel->napi_str); @@ -1336,7 +1343,7 @@ void efx_fini_napi_channel(struct efx_channel *channel) channel->napi_dev = NULL; } -void efx_fini_napi(struct efx_nic *efx) +void efx_siena_fini_napi(struct efx_nic *efx) { struct efx_channel *channel; @@ -1353,13 +1360,13 @@ static int efx_channel_dummy_op_int(struct efx_channel *channel) return 0; } -void efx_channel_dummy_op_void(struct efx_channel *channel) +void efx_siena_channel_dummy_op_void(struct efx_channel *channel) { } static const struct efx_channel_type efx_default_channel_type = { .pre_probe = efx_channel_dummy_op_int, - .post_remove = efx_channel_dummy_op_void, + .post_remove = efx_siena_channel_dummy_op_void, .get_name = efx_get_channel_name, .copy = efx_copy_channel, .want_txqs = efx_default_channel_want_txqs, diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.h b/drivers/net/ethernet/sfc/siena/efx_channels.h index 64abb99a56b8..10d78049b885 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.h +++ b/drivers/net/ethernet/sfc/siena/efx_channels.h @@ -11,42 +11,35 @@ #ifndef EFX_CHANNELS_H #define EFX_CHANNELS_H -extern unsigned int efx_interrupt_mode; -extern unsigned int rss_cpus; - -int efx_probe_interrupts(struct efx_nic *efx); -void efx_remove_interrupts(struct efx_nic *efx); -int efx_soft_enable_interrupts(struct efx_nic *efx); -void efx_soft_disable_interrupts(struct efx_nic *efx); -int efx_enable_interrupts(struct efx_nic *efx); -void efx_disable_interrupts(struct efx_nic *efx); - -void efx_set_interrupt_affinity(struct efx_nic *efx); -void efx_clear_interrupt_affinity(struct efx_nic *efx); - -int efx_probe_eventq(struct efx_channel *channel); -int efx_init_eventq(struct efx_channel *channel); -void efx_start_eventq(struct efx_channel *channel); -void efx_stop_eventq(struct efx_channel *channel); -void efx_fini_eventq(struct efx_channel *channel); -void efx_remove_eventq(struct efx_channel *channel); - -int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); -void efx_set_channel_names(struct efx_nic *efx); -int efx_init_channels(struct efx_nic *efx); -int efx_probe_channels(struct efx_nic *efx); -int efx_set_channels(struct efx_nic *efx); -void efx_remove_channel(struct efx_channel *channel); -void efx_remove_channels(struct efx_nic *efx); -void efx_fini_channels(struct efx_nic *efx); -void efx_start_channels(struct efx_nic *efx); -void efx_stop_channels(struct efx_nic *efx); - -void efx_init_napi_channel(struct efx_channel *channel); -void efx_init_napi(struct efx_nic *efx); -void efx_fini_napi_channel(struct efx_channel *channel); -void efx_fini_napi(struct efx_nic *efx); - -void efx_channel_dummy_op_void(struct efx_channel *channel); +extern unsigned int efx_siena_interrupt_mode; +extern unsigned int efx_siena_rss_cpus; + +int efx_siena_probe_interrupts(struct efx_nic *efx); +void efx_siena_remove_interrupts(struct efx_nic *efx); +int efx_siena_enable_interrupts(struct efx_nic *efx); +void efx_siena_disable_interrupts(struct efx_nic *efx); + +void efx_siena_set_interrupt_affinity(struct efx_nic *efx); +void efx_siena_clear_interrupt_affinity(struct efx_nic *efx); + +void efx_siena_start_eventq(struct efx_channel *channel); +void efx_siena_stop_eventq(struct efx_channel *channel); + +int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries, + u32 txq_entries); +void efx_siena_set_channel_names(struct efx_nic *efx); +int efx_siena_init_channels(struct efx_nic *efx); +int efx_siena_probe_channels(struct efx_nic *efx); +int efx_siena_set_channels(struct efx_nic *efx); +void efx_siena_remove_channel(struct efx_channel *channel); +void efx_siena_remove_channels(struct efx_nic *efx); +void efx_siena_fini_channels(struct efx_nic *efx); +void efx_siena_start_channels(struct efx_nic *efx); +void efx_siena_stop_channels(struct efx_nic *efx); + +void efx_siena_init_napi(struct efx_nic *efx); +void efx_siena_fini_napi(struct efx_nic *efx); + +void efx_siena_channel_dummy_op_void(struct efx_channel *channel); #endif diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index f6577e74d6e6..fb6fb345cc56 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -110,7 +110,7 @@ const char *const efx_loopback_mode_names[] = { */ static struct workqueue_struct *reset_workqueue; -int efx_create_reset_workqueue(void) +int efx_siena_create_reset_workqueue(void) { reset_workqueue = create_singlethread_workqueue("sfc_reset"); if (!reset_workqueue) { @@ -121,17 +121,17 @@ int efx_create_reset_workqueue(void) return 0; } -void efx_queue_reset_work(struct efx_nic *efx) +void efx_siena_queue_reset_work(struct efx_nic *efx) { queue_work(reset_workqueue, &efx->reset_work); } -void efx_flush_reset_workqueue(struct efx_nic *efx) +void efx_siena_flush_reset_workqueue(struct efx_nic *efx) { cancel_work_sync(&efx->reset_work); } -void efx_destroy_reset_workqueue(void) +void efx_siena_destroy_reset_workqueue(void) { if (reset_workqueue) { destroy_workqueue(reset_workqueue); @@ -142,7 +142,7 @@ void efx_destroy_reset_workqueue(void) /* We assume that efx->type->reconfigure_mac will always try to sync RX * filters and therefore needs to read-lock the filter table against freeing */ -void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only) +void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only) { if (efx->type->reconfigure_mac) { down_read(&efx->filter_sem); @@ -161,11 +161,11 @@ static void efx_mac_work(struct work_struct *data) mutex_lock(&efx->mac_lock); if (efx->port_enabled) - efx_mac_reconfigure(efx, false); + efx_siena_mac_reconfigure(efx, false); mutex_unlock(&efx->mac_lock); } -int efx_set_mac_address(struct net_device *net_dev, void *data) +int efx_siena_set_mac_address(struct net_device *net_dev, void *data) { struct efx_nic *efx = netdev_priv(net_dev); struct sockaddr *addr = data; @@ -193,14 +193,14 @@ int efx_set_mac_address(struct net_device *net_dev, void *data) /* Reconfigure the MAC */ mutex_lock(&efx->mac_lock); - efx_mac_reconfigure(efx, false); + efx_siena_mac_reconfigure(efx, false); mutex_unlock(&efx->mac_lock); return 0; } /* Context: netif_addr_lock held, BHs disabled. */ -void efx_set_rx_mode(struct net_device *net_dev) +void efx_siena_set_rx_mode(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); @@ -209,7 +209,7 @@ void efx_set_rx_mode(struct net_device *net_dev) /* Otherwise efx_start_port() will do this */ } -int efx_set_features(struct net_device *net_dev, netdev_features_t data) +int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -226,10 +226,10 @@ int efx_set_features(struct net_device *net_dev, netdev_features_t data) */ if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS)) { - /* efx_set_rx_mode() will schedule MAC work to update filters + /* efx_siena_set_rx_mode() will schedule MAC work to update filters * when a new features are finally set in net_dev. */ - efx_set_rx_mode(net_dev); + efx_siena_set_rx_mode(net_dev); } return 0; @@ -239,7 +239,7 @@ int efx_set_features(struct net_device *net_dev, netdev_features_t data) * netif_carrier_on/off) of the link status, and also maintains the * link status's stop on the port's TX queue. */ -void efx_link_status_changed(struct efx_nic *efx) +void efx_siena_link_status_changed(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; @@ -270,7 +270,7 @@ void efx_link_status_changed(struct efx_nic *efx) netif_info(efx, link, efx->net_dev, "link down\n"); } -unsigned int efx_xdp_max_mtu(struct efx_nic *efx) +unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx) { /* The maximum MTU that we can fit in a single page, allowing for * framing, overhead and XDP headroom + tailroom. @@ -283,7 +283,7 @@ unsigned int efx_xdp_max_mtu(struct efx_nic *efx) } /* Context: process, rtnl_lock() held. */ -int efx_change_mtu(struct net_device *net_dev, int new_mtu) +int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -293,24 +293,24 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu) return rc; if (rtnl_dereference(efx->xdp_prog) && - new_mtu > efx_xdp_max_mtu(efx)) { + new_mtu > efx_siena_xdp_max_mtu(efx)) { netif_err(efx, drv, efx->net_dev, "Requested MTU of %d too big for XDP (max: %d)\n", - new_mtu, efx_xdp_max_mtu(efx)); + new_mtu, efx_siena_xdp_max_mtu(efx)); return -EINVAL; } netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); efx_device_detach_sync(efx); - efx_stop_all(efx); + efx_siena_stop_all(efx); mutex_lock(&efx->mac_lock); net_dev->mtu = new_mtu; - efx_mac_reconfigure(efx, true); + efx_siena_mac_reconfigure(efx, true); mutex_unlock(&efx->mac_lock); - efx_start_all(efx); + efx_siena_start_all(efx); efx_device_attach_if_not_resetting(efx); return 0; } @@ -342,10 +342,10 @@ static void efx_monitor(struct work_struct *data) mutex_unlock(&efx->mac_lock); } - efx_start_monitor(efx); + efx_siena_start_monitor(efx); } -void efx_start_monitor(struct efx_nic *efx) +void efx_siena_start_monitor(struct efx_nic *efx) { if (efx->type->monitor) queue_delayed_work(efx->workqueue, &efx->monitor_work, @@ -432,7 +432,7 @@ static void efx_start_datapath(struct efx_nic *efx) efx->txq_wake_thresh = efx->txq_stop_thresh / 2; /* Initialise the channels */ - efx_start_channels(efx); + efx_siena_start_channels(efx); efx_ptp_start_datapath(efx); @@ -447,7 +447,7 @@ static void efx_stop_datapath(struct efx_nic *efx) efx_ptp_stop_datapath(efx); - efx_stop_channels(efx); + efx_siena_stop_channels(efx); } /************************************************************************** @@ -459,13 +459,13 @@ static void efx_stop_datapath(struct efx_nic *efx) /* Equivalent to efx_link_set_advertising with all-zeroes, except does not * force the Autoneg bit on. */ -void efx_link_clear_advertising(struct efx_nic *efx) +void efx_siena_link_clear_advertising(struct efx_nic *efx) { bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); } -void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) +void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) { efx->wanted_fc = wanted_fc; if (efx->link_advertising[0]) { @@ -489,7 +489,7 @@ static void efx_start_port(struct efx_nic *efx) efx->port_enabled = true; /* Ensure MAC ingress/egress is enabled */ - efx_mac_reconfigure(efx, false); + efx_siena_mac_reconfigure(efx, false); mutex_unlock(&efx->mac_lock); } @@ -525,7 +525,7 @@ static void efx_stop_port(struct efx_nic *efx) * is safe to call multiple times, so long as the NIC is not disabled. * Requires the RTNL lock. */ -void efx_start_all(struct efx_nic *efx) +void efx_siena_start_all(struct efx_nic *efx) { EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->state == STATE_DISABLED); @@ -541,14 +541,14 @@ void efx_start_all(struct efx_nic *efx) efx_start_datapath(efx); /* Start the hardware monitor if there is one */ - efx_start_monitor(efx); + efx_siena_start_monitor(efx); /* Link state detection is normally event-driven; we have * to poll now because we could have missed a change */ mutex_lock(&efx->mac_lock); if (efx_mcdi_phy_poll(efx)) - efx_link_status_changed(efx); + efx_siena_link_status_changed(efx); mutex_unlock(&efx->mac_lock); if (efx->type->start_stats) { @@ -565,7 +565,7 @@ void efx_start_all(struct efx_nic *efx) * times with the NIC in almost any state, but interrupts should be * enabled. Requires the RTNL lock. */ -void efx_stop_all(struct efx_nic *efx) +void efx_siena_stop_all(struct efx_nic *efx) { EFX_ASSERT_RESET_SERIALISED(efx); @@ -598,7 +598,8 @@ void efx_stop_all(struct efx_nic *efx) } /* Context: process, dev_base_lock or RTNL held, non-blocking. */ -void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) +void efx_siena_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) { struct efx_nic *efx = netdev_priv(net_dev); @@ -614,7 +615,7 @@ void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) * * Callers must hold the mac_lock */ -int __efx_reconfigure_port(struct efx_nic *efx) +int __efx_siena_reconfigure_port(struct efx_nic *efx) { enum efx_phy_mode phy_mode; int rc = 0; @@ -640,14 +641,14 @@ int __efx_reconfigure_port(struct efx_nic *efx) /* Reinitialise the MAC to pick up new PHY settings, even if the port is * disabled. */ -int efx_reconfigure_port(struct efx_nic *efx) +int efx_siena_reconfigure_port(struct efx_nic *efx) { int rc; EFX_ASSERT_RESET_SERIALISED(efx); mutex_lock(&efx->mac_lock); - rc = __efx_reconfigure_port(efx); + rc = __efx_siena_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); return rc; @@ -682,7 +683,7 @@ out: * Returns 0 if the recovery mechanisms are unsuccessful. * Returns a non-zero value otherwise. */ -int efx_try_recovery(struct efx_nic *efx) +int efx_siena_try_recovery(struct efx_nic *efx) { #ifdef CONFIG_EEH /* A PCI error can occur and not be seen by EEH because nothing @@ -704,15 +705,15 @@ int efx_try_recovery(struct efx_nic *efx) /* Tears down the entire software state and most of the hardware state * before reset. */ -void efx_reset_down(struct efx_nic *efx, enum reset_type method) +void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method) { EFX_ASSERT_RESET_SERIALISED(efx); if (method == RESET_TYPE_MCDI_TIMEOUT) efx->type->prepare_flr(efx); - efx_stop_all(efx); - efx_disable_interrupts(efx); + efx_siena_stop_all(efx); + efx_siena_disable_interrupts(efx); mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); @@ -721,7 +722,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) } /* Context: netif_tx_lock held, BHs disabled. */ -void efx_watchdog(struct net_device *net_dev, unsigned int txqueue) +void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue) { struct efx_nic *efx = netdev_priv(net_dev); @@ -729,16 +730,16 @@ void efx_watchdog(struct net_device *net_dev, unsigned int txqueue) "TX stuck with port_enabled=%d: resetting channels\n", efx->port_enabled); - efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); + efx_siena_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); } /* This function will always ensure that the locks acquired in - * efx_reset_down() are released. A failure return code indicates + * efx_siena_reset_down() are released. A failure return code indicates * that we were unable to reinitialise the hardware, and the * driver should be disabled. If ok is false, then the rx and tx * engines are not restarted, pending a RESET_DISABLE. */ -int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) +int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) { int rc; @@ -765,7 +766,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) "could not restore PHY settings\n"); } - rc = efx_enable_interrupts(efx); + rc = efx_siena_enable_interrupts(efx); if (rc) goto fail; @@ -787,7 +788,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) mutex_unlock(&efx->mac_lock); - efx_start_all(efx); + efx_siena_start_all(efx); if (efx->type->udp_tnl_push_ports) efx->type->udp_tnl_push_ports(efx); @@ -809,7 +810,7 @@ fail: * * Caller must hold the rtnl_lock. */ -int efx_reset(struct efx_nic *efx, enum reset_type method) +int efx_siena_reset(struct efx_nic *efx, enum reset_type method) { int rc, rc2 = 0; bool disabled; @@ -818,11 +819,11 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) RESET_TYPE(method)); efx_device_detach_sync(efx); - /* efx_reset_down() grabs locks that prevent recovery on EF100. + /* efx_siena_reset_down() grabs locks that prevent recovery on EF100. * EF100 reset is handled in the efx_nic_type callback below. */ if (efx_nic_rev(efx) != EFX_REV_EF100) - efx_reset_down(efx, method); + efx_siena_reset_down(efx, method); rc = efx->type->reset(efx, method); if (rc) { @@ -851,7 +852,7 @@ out: method == RESET_TYPE_DISABLE || method == RESET_TYPE_RECOVER_OR_DISABLE; if (efx_nic_rev(efx) != EFX_REV_EF100) - rc2 = efx_reset_up(efx, method, !disabled); + rc2 = efx_siena_reset_up(efx, method, !disabled); if (rc2) { disabled = true; if (!rc) @@ -886,7 +887,7 @@ static void efx_reset_work(struct work_struct *data) if ((method == RESET_TYPE_RECOVER_OR_DISABLE || method == RESET_TYPE_RECOVER_OR_ALL) && - efx_try_recovery(efx)) + efx_siena_try_recovery(efx)) return; if (!pending) @@ -894,17 +895,17 @@ static void efx_reset_work(struct work_struct *data) rtnl_lock(); - /* We checked the state in efx_schedule_reset() but it may + /* We checked the state in efx_siena_schedule_reset() but it may * have changed by now. Now that we have the RTNL lock, * it cannot change again. */ if (efx->state == STATE_READY) - (void)efx_reset(efx, method); + (void)efx_siena_reset(efx, method); rtnl_unlock(); } -void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) +void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type) { enum reset_type method; @@ -951,7 +952,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) */ efx_mcdi_mode_poll(efx); - efx_queue_reset_work(efx); + efx_siena_queue_reset_work(efx); } /************************************************************************** @@ -963,11 +964,12 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) * before use * **************************************************************************/ -int efx_port_dummy_op_int(struct efx_nic *efx) +int efx_siena_port_dummy_op_int(struct efx_nic *efx) { return 0; } -void efx_port_dummy_op_void(struct efx_nic *efx) {} + +void efx_siena_port_dummy_op_void(struct efx_nic *efx) {} /************************************************************************** * @@ -978,8 +980,8 @@ void efx_port_dummy_op_void(struct efx_nic *efx) {} /* This zeroes out and then fills in the invariants in a struct * efx_nic (including all sub-structures). */ -int efx_init_struct(struct efx_nic *efx, - struct pci_dev *pci_dev, struct net_device *net_dev) +int efx_siena_init_struct(struct efx_nic *efx, + struct pci_dev *pci_dev, struct net_device *net_dev) { int rc = -ENOMEM; @@ -1033,7 +1035,7 @@ int efx_init_struct(struct efx_nic *efx, efx->mem_bar = UINT_MAX; - rc = efx_init_channels(efx); + rc = efx_siena_init_channels(efx); if (rc) goto fail; @@ -1049,17 +1051,17 @@ int efx_init_struct(struct efx_nic *efx, return 0; fail: - efx_fini_struct(efx); + efx_siena_fini_struct(efx); return rc; } -void efx_fini_struct(struct efx_nic *efx) +void efx_siena_fini_struct(struct efx_nic *efx) { #ifdef CONFIG_RFS_ACCEL kfree(efx->rps_hash_table); #endif - efx_fini_channels(efx); + efx_siena_fini_channels(efx); kfree(efx->vpd_sn); @@ -1070,8 +1072,8 @@ void efx_fini_struct(struct efx_nic *efx) } /* This configures the PCI device to enable I/O and DMA. */ -int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, - unsigned int mem_map_size) +int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, + unsigned int mem_map_size) { struct pci_dev *pci_dev = efx->pci_dev; int rc; @@ -1140,7 +1142,7 @@ fail1: return rc; } -void efx_fini_io(struct efx_nic *efx) +void efx_siena_fini_io(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); @@ -1185,7 +1187,7 @@ static ssize_t mcdi_logging_store(struct device *dev, static DEVICE_ATTR_RW(mcdi_logging); -void efx_init_mcdi_logging(struct efx_nic *efx) +void efx_siena_init_mcdi_logging(struct efx_nic *efx) { int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); @@ -1195,7 +1197,7 @@ void efx_init_mcdi_logging(struct efx_nic *efx) } } -void efx_fini_mcdi_logging(struct efx_nic *efx) +void efx_siena_fini_mcdi_logging(struct efx_nic *efx) { device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); } @@ -1222,8 +1224,8 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, efx_device_detach_sync(efx); - efx_stop_all(efx); - efx_disable_interrupts(efx); + efx_siena_stop_all(efx); + efx_siena_disable_interrupts(efx); status = PCI_ERS_RESULT_NEED_RESET; } else { @@ -1266,10 +1268,10 @@ static void efx_io_resume(struct pci_dev *pdev) if (efx->state == STATE_DISABLED) goto out; - rc = efx_reset(efx, RESET_TYPE_ALL); + rc = efx_siena_reset(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, hw, efx->net_dev, - "efx_reset failed after PCI error (%d)\n", rc); + "efx_siena_reset failed after PCI error (%d)\n", rc); } else { efx->state = STATE_READY; netif_dbg(efx, hw, efx->net_dev, @@ -1286,7 +1288,7 @@ out: * with our request for slot reset the mmio_enabled callback will never be * called, and the link_reset callback is not used by AER or EEH mechanisms. */ -const struct pci_error_handlers efx_err_handlers = { +const struct pci_error_handlers efx_siena_err_handlers = { .error_detected = efx_io_error_detected, .slot_reset = efx_io_slot_reset, .resume = efx_io_resume, @@ -1354,8 +1356,9 @@ static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb) } } -netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev, - netdev_features_t features) +netdev_features_t efx_siena_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) { struct efx_nic *efx = netdev_priv(dev); @@ -1375,8 +1378,8 @@ netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev return features; } -int efx_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid) +int efx_siena_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) { struct efx_nic *efx = netdev_priv(net_dev); @@ -1386,7 +1389,8 @@ int efx_get_phys_port_id(struct net_device *net_dev, return -EOPNOTSUPP; } -int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len) +int efx_siena_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len) { struct efx_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/siena/efx_common.h b/drivers/net/ethernet/sfc/siena/efx_common.h index 65513fd0cf6c..470033611436 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.h +++ b/drivers/net/ethernet/sfc/siena/efx_common.h @@ -11,12 +11,12 @@ #ifndef EFX_COMMON_H #define EFX_COMMON_H -int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, - unsigned int mem_map_size); -void efx_fini_io(struct efx_nic *efx); -int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, - struct net_device *net_dev); -void efx_fini_struct(struct efx_nic *efx); +int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, + unsigned int mem_map_size); +void efx_siena_fini_io(struct efx_nic *efx); +int efx_siena_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, + struct net_device *net_dev); +void efx_siena_fini_struct(struct efx_nic *efx); #define EFX_MAX_DMAQ_SIZE 4096UL #define EFX_DEFAULT_DMAQ_SIZE 1024UL @@ -25,23 +25,24 @@ void efx_fini_struct(struct efx_nic *efx); #define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MIN_EVQ_SIZE 512UL -void efx_link_clear_advertising(struct efx_nic *efx); -void efx_link_set_wanted_fc(struct efx_nic *efx, u8); +void efx_siena_link_clear_advertising(struct efx_nic *efx); +void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc); -void efx_start_all(struct efx_nic *efx); -void efx_stop_all(struct efx_nic *efx); +void efx_siena_start_all(struct efx_nic *efx); +void efx_siena_stop_all(struct efx_nic *efx); -void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats); +void efx_siena_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats); -int efx_create_reset_workqueue(void); -void efx_queue_reset_work(struct efx_nic *efx); -void efx_flush_reset_workqueue(struct efx_nic *efx); -void efx_destroy_reset_workqueue(void); +int efx_siena_create_reset_workqueue(void); +void efx_siena_queue_reset_work(struct efx_nic *efx); +void efx_siena_flush_reset_workqueue(struct efx_nic *efx); +void efx_siena_destroy_reset_workqueue(void); -void efx_start_monitor(struct efx_nic *efx); +void efx_siena_start_monitor(struct efx_nic *efx); -int __efx_reconfigure_port(struct efx_nic *efx); -int efx_reconfigure_port(struct efx_nic *efx); +int __efx_siena_reconfigure_port(struct efx_nic *efx); +int efx_siena_reconfigure_port(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ @@ -51,16 +52,16 @@ int efx_reconfigure_port(struct efx_nic *efx); ASSERT_RTNL(); \ } while (0) -int efx_try_recovery(struct efx_nic *efx); -void efx_reset_down(struct efx_nic *efx, enum reset_type method); -void efx_watchdog(struct net_device *net_dev, unsigned int txqueue); -int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); -int efx_reset(struct efx_nic *efx, enum reset_type method); -void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); +int efx_siena_try_recovery(struct efx_nic *efx); +void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method); +void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue); +int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); +int efx_siena_reset(struct efx_nic *efx, enum reset_type method); +void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type); /* Dummy PHY ops for PHY drivers */ -int efx_port_dummy_op_int(struct efx_nic *efx); -void efx_port_dummy_op_void(struct efx_nic *efx); +int efx_siena_port_dummy_op_int(struct efx_nic *efx); +void efx_siena_port_dummy_op_void(struct efx_nic *efx); static inline int efx_check_disabled(struct efx_nic *efx) { @@ -88,29 +89,30 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel) } #ifdef CONFIG_SFC_MCDI_LOGGING -void efx_init_mcdi_logging(struct efx_nic *efx); -void efx_fini_mcdi_logging(struct efx_nic *efx); +void efx_siena_init_mcdi_logging(struct efx_nic *efx); +void efx_siena_fini_mcdi_logging(struct efx_nic *efx); #else -static inline void efx_init_mcdi_logging(struct efx_nic *efx) {} -static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {} +static inline void efx_siena_init_mcdi_logging(struct efx_nic *efx) {} +static inline void efx_siena_fini_mcdi_logging(struct efx_nic *efx) {} #endif -void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only); -int efx_set_mac_address(struct net_device *net_dev, void *data); -void efx_set_rx_mode(struct net_device *net_dev); -int efx_set_features(struct net_device *net_dev, netdev_features_t data); -void efx_link_status_changed(struct efx_nic *efx); -unsigned int efx_xdp_max_mtu(struct efx_nic *efx); -int efx_change_mtu(struct net_device *net_dev, int new_mtu); +void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only); +int efx_siena_set_mac_address(struct net_device *net_dev, void *data); +void efx_siena_set_rx_mode(struct net_device *net_dev); +int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data); +void efx_siena_link_status_changed(struct efx_nic *efx); +unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx); +int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu); -extern const struct pci_error_handlers efx_err_handlers; +extern const struct pci_error_handlers efx_siena_err_handlers; -netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev, - netdev_features_t features); +netdev_features_t efx_siena_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); -int efx_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid); +int efx_siena_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid); -int efx_get_phys_port_name(struct net_device *net_dev, - char *name, size_t len); +int efx_siena_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len); #endif diff --git a/drivers/net/ethernet/sfc/siena/enum.h b/drivers/net/ethernet/sfc/siena/enum.h index cd590e0685e5..25b28b3969d7 100644 --- a/drivers/net/ethernet/sfc/siena/enum.h +++ b/drivers/net/ethernet/sfc/siena/enum.h @@ -127,7 +127,7 @@ enum efx_loopback_mode { * * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and * %RESET_TYPE_DISABLE specify the method/scope of the reset. The - * other valuesspecify reasons, which efx_schedule_reset() will choose + * other valuesspecify reasons, which efx_siena_schedule_reset() will choose * a method for. * * Reset methods are numbered in order of increasing scope. diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c index 48506373721a..7aa621e97212 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool.c +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -105,7 +105,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, unsigned int tx_usecs, rx_usecs; bool rx_adaptive; - efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); + efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); coalesce->tx_coalesce_usecs = tx_usecs; coalesce->tx_coalesce_usecs_irq = tx_usecs; @@ -127,7 +127,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, bool adaptive, rx_may_override_tx; int rc; - efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); + efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); if (coalesce->rx_coalesce_usecs != rx_usecs) rx_usecs = coalesce->rx_coalesce_usecs; @@ -146,8 +146,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, else tx_usecs = coalesce->tx_coalesce_usecs_irq; - rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, - rx_may_override_tx); + rc = efx_siena_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, + rx_may_override_tx); if (rc != 0) return rc; @@ -198,7 +198,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev, "increasing TX queue size to minimum of %u\n", txq_entries); - return efx_realloc_channels(efx, ring->rx_pending, txq_entries); + return efx_siena_realloc_channels(efx, ring->rx_pending, txq_entries); } static void efx_ethtool_get_wol(struct net_device *net_dev, @@ -239,7 +239,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, return 0; } -const struct ethtool_ops efx_ethtool_ops = { +const struct ethtool_ops efx_siena_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USECS_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c index bd552c7dffcb..e177b58e0664 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool_common.c +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c @@ -218,7 +218,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev, old_adv = efx->link_advertising[0]; old_fc = efx->wanted_fc; - efx_link_set_wanted_fc(efx, wanted_fc); + efx_siena_link_set_wanted_fc(efx, wanted_fc); if (efx->link_advertising[0] != old_adv || (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { rc = efx_mcdi_port_reconfigure(efx); @@ -233,7 +233,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev, /* Reconfigure the MAC. The PHY *may* generate a link state change event * if the user just changed the advertised capabilities, but there's no * harm doing this twice */ - efx_mac_reconfigure(efx, false); + efx_siena_mac_reconfigure(efx, false); out: mutex_unlock(&efx->mac_lock); @@ -1307,7 +1307,7 @@ int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) if (rc < 0) return rc; - return efx_reset(efx, rc); + return efx_siena_reset(efx, rc); } int efx_ethtool_get_module_eeprom(struct net_device *net_dev, diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c index 9599123bc28d..6ee6ca192a44 100644 --- a/drivers/net/ethernet/sfc/siena/farch.c +++ b/drivers/net/ethernet/sfc/siena/farch.c @@ -747,12 +747,13 @@ int efx_farch_fini_dmaq(struct efx_nic *efx) * completion events. This means that efx->rxq_flush_outstanding remained at 4 * after the FLR; also, efx->active_queues was non-zero (as no flush completion * events were received, and we didn't go through efx_check_tx_flush_complete()) - * If we don't fix this up, on the next call to efx_realloc_channels() we won't - * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 - * for batched flush requests; and the efx->active_queues gets messed up because - * we keep incrementing for the newly initialised queues, but it never went to - * zero previously. Then we get a timeout every time we try to restart the - * queues, as it doesn't go back to zero when we should be flushing the queues. + * If we don't fix this up, on the next call to efx_siena_realloc_channels() we + * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit + * of 4 for batched flush requests; and the efx->active_queues gets messed up + * because we keep incrementing for the newly initialised queues, but it never + * went to zero previously. Then we get a timeout every time we try to restart + * the queues, as it doesn't go back to zero when we should be flushing the + * queues. */ void efx_farch_finish_flr(struct efx_nic *efx) { @@ -838,7 +839,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); - efx_xmit_done(tx_queue, tx_ev_desc_ptr); + efx_siena_xmit_done(tx_queue, tx_ev_desc_ptr); } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { /* Rewrite the FIFO write pointer */ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); @@ -849,7 +850,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) efx_farch_notify_tx_desc(tx_queue); netif_tx_unlock(efx->net_dev); } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { - efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } else { netif_err(efx, tx_err, efx->net_dev, "channel %d unexpected TX event " @@ -956,7 +957,7 @@ efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) "dropped %d events (index=%d expected=%d)\n", dropped, index, expected); - efx_schedule_reset(efx, RESET_TYPE_DISABLE); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); return false; } @@ -1001,7 +1002,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) /* Discard all pending fragments */ if (rx_queue->scatter_n) { - efx_rx_packet( + efx_siena_rx_packet( rx_queue, rx_queue->removed_count & rx_queue->ptr_mask, rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); @@ -1015,7 +1016,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) /* Discard new fragment if not SOP */ if (!rx_ev_sop) { - efx_rx_packet( + efx_siena_rx_packet( rx_queue, rx_queue->removed_count & rx_queue->ptr_mask, 1, 0, EFX_RX_PKT_DISCARD); @@ -1067,9 +1068,9 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) channel->irq_mod_score += 2; /* Handle received packet */ - efx_rx_packet(rx_queue, - rx_queue->removed_count & rx_queue->ptr_mask, - rx_queue->scatter_n, rx_ev_byte_cnt, flags); + efx_siena_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_ev_byte_cnt, flags); rx_queue->removed_count += rx_queue->scatter_n; rx_queue->scatter_n = 0; } @@ -1222,7 +1223,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) "channel %d seen DRIVER RX_RESET event. " "Resetting.\n", channel->channel); atomic_inc(&efx->rx_reset); - efx_schedule_reset(efx, RESET_TYPE_DISABLE); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); break; case FSE_BZ_RX_DSC_ERROR_EV: if (ev_sub_data < EFX_VI_BASE) { @@ -1230,7 +1231,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) "RX DMA Q %d reports descriptor fetch error." " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); - efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } #ifdef CONFIG_SFC_SRIOV else @@ -1243,7 +1244,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) "TX DMA Q %d reports descriptor fetch error." " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); - efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } #ifdef CONFIG_SFC_SRIOV else @@ -1496,12 +1497,12 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR - reset scheduled\n"); - efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); + efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR); } else { netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR - max number of errors seen." "NIC will be disabled\n"); - efx_schedule_reset(efx, RESET_TYPE_DISABLE); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); } return IRQ_HANDLED; @@ -1529,7 +1530,7 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) * code. Disable them earlier. * If an EEH error occurred, the read will have returned all ones. */ - if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && + if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) && !efx->eeh_disabled_legacy_irq) { disable_irq_nosync(efx->legacy_irq); efx->eeh_disabled_legacy_irq = true; diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c index 50baf62b2cbc..7f8f0889bf8d 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.c +++ b/drivers/net/ethernet/sfc/siena/mcdi.c @@ -725,7 +725,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, cmd, -rc); if (efx->type->mcdi_reboot_detected) efx->type->mcdi_reboot_detected(efx); - efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } else if (proxy_handle && (rc == -EPROTO) && efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, proxy_handle)) { @@ -849,7 +849,7 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, cmd, rc); if (rc == -EINTR || rc == -EIO) - efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE); efx_mcdi_release(mcdi); } } @@ -1254,7 +1254,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) mcdi->new_epoch = true; /* Nobody was waiting for an MCDI request, so trigger a reset */ - efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } spin_unlock(&mcdi->iface_lock); @@ -1282,7 +1282,7 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx) } } mcdi->new_epoch = true; - efx_schedule_reset(efx, RESET_TYPE_MC_BIST); + efx_siena_schedule_reset(efx, RESET_TYPE_MC_BIST); spin_unlock(&mcdi->iface_lock); } @@ -1296,7 +1296,7 @@ static void efx_mcdi_abandon(struct efx_nic *efx) if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) return; /* it had already been done */ netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n"); - efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); + efx_siena_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); } static void efx_handle_drain_event(struct efx_nic *efx) @@ -1387,7 +1387,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, "%s DMA error (event: "EFX_QWORD_FMT")\n", code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", EFX_QWORD_VAL(*event)); - efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); break; case MCDI_EVENT_CODE_PROXY_RESPONSE: efx_mcdi_ev_proxy_response(efx, diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c index 899cc1671004..57908045fb15 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c @@ -518,7 +518,7 @@ int efx_mcdi_phy_probe(struct efx_nic *efx) efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) efx->wanted_fc |= EFX_FC_AUTO; - efx_link_set_wanted_fc(efx, efx->wanted_fc); + efx_siena_link_set_wanted_fc(efx, efx->wanted_fc); return 0; @@ -605,7 +605,7 @@ int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_li efx_link_set_advertising(efx, cmd->link_modes.advertising); phy_cfg->forced_cap = 0; } else { - efx_link_clear_advertising(efx); + efx_siena_link_clear_advertising(efx); phy_cfg->forced_cap = caps; } return 0; @@ -1297,5 +1297,5 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) efx_mcdi_phy_check_fcntl(efx, lpa); - efx_link_status_changed(efx); + efx_siena_link_status_changed(efx); } diff --git a/drivers/net/ethernet/sfc/siena/mtd.c b/drivers/net/ethernet/sfc/siena/mtd.c index 273c08e5455f..12a624247f44 100644 --- a/drivers/net/ethernet/sfc/siena/mtd.c +++ b/drivers/net/ethernet/sfc/siena/mtd.c @@ -37,7 +37,7 @@ static void efx_mtd_sync(struct mtd_info *mtd) part->name, part->dev_type_name, rc); } -static void efx_mtd_remove_partition(struct efx_mtd_partition *part) +static void efx_siena_mtd_remove_partition(struct efx_mtd_partition *part) { int rc; @@ -51,8 +51,8 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part) list_del(&part->node); } -int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, - size_t n_parts, size_t sizeof_part) +int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part) { struct efx_mtd_partition *part; size_t i; @@ -79,7 +79,7 @@ int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, if (mtd_device_register(&part->mtd, NULL, 0)) goto fail; - /* Add to list in order - efx_mtd_remove() depends on this */ + /* Add to list in order - efx_siena_mtd_remove() depends on this */ list_add_tail(&part->node, &efx->mtd_list); } @@ -89,13 +89,13 @@ fail: while (i--) { part = (struct efx_mtd_partition *)((char *)parts + i * sizeof_part); - efx_mtd_remove_partition(part); + efx_siena_mtd_remove_partition(part); } /* Failure is unlikely here, but probably means we're out of memory */ return -ENOMEM; } -void efx_mtd_remove(struct efx_nic *efx) +void efx_siena_mtd_remove(struct efx_nic *efx) { struct efx_mtd_partition *parts, *part, *next; @@ -108,12 +108,12 @@ void efx_mtd_remove(struct efx_nic *efx) node); list_for_each_entry_safe(part, next, &efx->mtd_list, node) - efx_mtd_remove_partition(part); + efx_siena_mtd_remove_partition(part); kfree(parts); } -void efx_mtd_rename(struct efx_nic *efx) +void efx_siena_mtd_rename(struct efx_nic *efx) { struct efx_mtd_partition *part; diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h index 318db906a154..3fe93f25a569 100644 --- a/drivers/net/ethernet/sfc/siena/net_driver.h +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -207,7 +207,6 @@ struct efx_tx_buffer { * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. * @piobuf: PIO buffer region for this TX queue (shared with its partner). - * Size of the region is efx_piobuf_size. * @piobuf_offset: Buffer offset to be specified in PIO descriptors * @initialised: Has hardware queue been initialised? * @timestamping: Is timestamping enabled for this channel? @@ -478,9 +477,9 @@ enum efx_sync_events_state { * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by - * __efx_rx_packet(), or zero if there is none + * __efx_siena_rx_packet(), or zero if there is none * @rx_pkt_index: Ring index of first buffer for next packet to be delivered - * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 + * by __efx_siena_rx_packet(), if @rx_pkt_n_frags != 0 * @rx_list: list of SKBs from current RX, awaiting processing * @rx_queue: RX queue for this channel * @tx_queue: TX queues for this channel @@ -869,12 +868,12 @@ enum efx_xdp_tx_queues_mode { * @nic_data: Hardware dependent state * @mcdi: Management-Controller-to-Driver Interface state * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, - * efx_monitor() and efx_reconfigure_port() + * efx_monitor() and efx_siena_reconfigure_port() * @port_enabled: Port enabled indicator. - * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and - * efx_mac_work() with kernel interfaces. Safe to read under any - * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must - * be held to modify it. + * Serialises efx_siena_stop_all(), efx_siena_start_all(), + * efx_monitor() and efx_mac_work() with kernel interfaces. + * Safe to read under any one of the rtnl_lock, mac_lock, or netif_tx_lock, + * but all three must be held to modify it. * @port_initialized: Port initialized? * @net_dev: Operating system network device. Consider holding the rtnl lock * @fixed_features: Features which cannot be turned off @@ -1255,7 +1254,7 @@ struct efx_udp_tunnel { * This must check whether the specified table entry is used by RFS * and that rps_may_expire_flow() returns true for it. * @mtd_probe: Probe and add MTD partitions associated with this net device, - * using efx_mtd_add() + * using efx_siena_mtd_add() * @mtd_rename: Set an MTD partition name using the net device name * @mtd_read: Read from an MTD partition * @mtd_erase: Erase part of an MTD partition diff --git a/drivers/net/ethernet/sfc/siena/rx.c b/drivers/net/ethernet/sfc/siena/rx.c index 2375cef577e4..099cb23e3250 100644 --- a/drivers/net/ethernet/sfc/siena/rx.c +++ b/drivers/net/ethernet/sfc/siena/rx.c @@ -118,8 +118,8 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, return skb; } -void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, - unsigned int n_frags, unsigned int len, u16 flags) +void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags) { struct efx_nic *efx = rx_queue->efx; struct efx_channel *channel = efx_rx_queue_channel(rx_queue); @@ -310,7 +310,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, case XDP_TX: /* Buffer ownership passes to tx on success. */ xdpf = xdp_convert_buff_to_frame(&xdp); - err = efx_xdp_tx_buffers(efx, 1, &xdpf, true); + err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true); if (unlikely(err != 1)) { efx_free_rx_buffers(rx_queue, rx_buf, 1); if (net_ratelimit()) @@ -357,7 +357,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, } /* Handle a received packet. Second half: Touches packet payload. */ -void __efx_rx_packet(struct efx_channel *channel) +void __efx_siena_rx_packet(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; struct efx_rx_buffer *rx_buf = @@ -391,7 +391,8 @@ void __efx_rx_packet(struct efx_channel *channel) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) - efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0); + efx_siena_rx_packet_gro(channel, rx_buf, + channel->rx_pkt_n_frags, eh, 0); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); out: diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c index fa8b9aacca11..9fb442da043c 100644 --- a/drivers/net/ethernet/sfc/siena/rx_common.c +++ b/drivers/net/ethernet/sfc/siena/rx_common.c @@ -504,8 +504,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) * regardless of checksum state and skbs with a good checksum. */ void -efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, - unsigned int n_frags, u8 *eh, __wsum csum) +efx_siena_rx_packet_gro(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh, __wsum csum) { struct napi_struct *napi = &channel->napi_str; struct efx_nic *efx = channel->efx; @@ -520,8 +521,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, return; } - if (efx->net_dev->features & NETIF_F_RXHASH && - efx_rx_buf_hash_valid(efx, eh)) + if (efx->net_dev->features & NETIF_F_RXHASH) skb_set_hash(skb, efx_rx_buf_hash(efx, eh), PKT_HASH_TYPE_L3); if (csum) { diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h index fbd2769307f9..909d06a4fdc9 100644 --- a/drivers/net/ethernet/sfc/siena/rx_common.h +++ b/drivers/net/ethernet/sfc/siena/rx_common.h @@ -81,8 +81,9 @@ void efx_rx_config_page_split(struct efx_nic *efx); void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); void -efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, - unsigned int n_frags, u8 *eh, __wsum csum); +efx_siena_rx_packet_gro(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh, __wsum csum); struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c index 3c5227afd497..7e24329bc005 100644 --- a/drivers/net/ethernet/sfc/siena/selftest.c +++ b/drivers/net/ethernet/sfc/siena/selftest.c @@ -58,14 +58,14 @@ static const char payload_msg[] = "Hello world! This is an Efx loopback test in progress!"; /* Interrupt mode names */ -static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; -static const char *const efx_interrupt_mode_names[] = { +static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX; +static const char *const efx_siena_interrupt_mode_names[] = { [EFX_INT_MODE_MSIX] = "MSI-X", [EFX_INT_MODE_MSI] = "MSI", [EFX_INT_MODE_LEGACY] = "legacy", }; #define INT_MODE(efx) \ - STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) + STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode) /** * struct efx_loopback_state - persistent state during a loopback selftest @@ -197,7 +197,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx, schedule_timeout_uninterruptible(wait); efx_for_each_channel(channel, efx) { - efx_stop_eventq(channel); + efx_siena_stop_eventq(channel); if (channel->eventq_read_ptr != read_ptr[channel->channel]) { set_bit(channel->channel, &napi_ran); @@ -209,7 +209,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx, if (efx_nic_event_test_irq_cpu(channel) >= 0) clear_bit(channel->channel, &int_pend); } - efx_start_eventq(channel); + efx_siena_start_eventq(channel); } wait *= 2; @@ -637,7 +637,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, state->flush = true; mutex_lock(&efx->mac_lock); efx->loopback_mode = mode; - rc = __efx_reconfigure_port(efx); + rc = __efx_siena_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); if (rc) { netif_err(efx, drv, efx->net_dev, @@ -731,7 +731,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, if (rc_reset) { netif_err(efx, hw, efx->net_dev, "Unable to recover from chip test\n"); - efx_schedule_reset(efx, RESET_TYPE_DISABLE); + efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE); return rc_reset; } @@ -744,7 +744,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, mutex_lock(&efx->mac_lock); efx->phy_mode &= ~PHY_MODE_LOW_POWER; efx->loopback_mode = LOOPBACK_NONE; - __efx_reconfigure_port(efx); + __efx_siena_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); rc = efx_test_phy(efx, tests, flags); @@ -759,7 +759,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, mutex_lock(&efx->mac_lock); efx->phy_mode = phy_mode; efx->loopback_mode = loopback_mode; - __efx_reconfigure_port(efx); + __efx_siena_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); efx_device_attach_if_not_resetting(efx); diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c index 7cc6a2583d6c..726dd4b72779 100644 --- a/drivers/net/ethernet/sfc/siena/siena.c +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -40,7 +40,7 @@ static void siena_push_irq_moderation(struct efx_channel *channel) if (channel->irq_moderation_us) { unsigned int ticks; - ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us); + ticks = efx_siena_usecs_to_ticks(efx, channel->irq_moderation_us); EFX_POPULATE_DWORD_2(timer_cmd, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, @@ -102,7 +102,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) enum reset_type reset_method = RESET_TYPE_ALL; int rc, rc2; - efx_reset_down(efx, reset_method); + efx_siena_reset_down(efx, reset_method); /* Reset the chip immediately so that it is completely * quiescent regardless of what any VF driver does. @@ -118,7 +118,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) rc = efx_mcdi_reset(efx, reset_method); out: - rc2 = efx_reset_up(efx, reset_method, rc == 0); + rc2 = efx_siena_reset_up(efx, reset_method, rc == 0); return rc ? rc : rc2; } @@ -583,7 +583,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes], stats[SIENA_STAT_rx_bytes] - stats[SIENA_STAT_rx_bad_bytes]); - efx_update_sw_stats(efx, stats); + efx_siena_update_sw_stats(efx, stats); return 0; } @@ -943,7 +943,7 @@ static int siena_mtd_probe(struct efx_nic *efx) if (rc) goto fail; - rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); + rc = efx_siena_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); fail: if (rc) kfree(parts); @@ -980,7 +980,7 @@ const struct efx_nic_type siena_a0_nic_type = { .remove = siena_remove_nic, .init = siena_init_nic, .dimension_resources = siena_dimension_resources, - .fini = efx_port_dummy_op_void, + .fini = efx_siena_port_dummy_op_void, #ifdef CONFIG_EEH .monitor = siena_monitor, #else @@ -994,7 +994,7 @@ const struct efx_nic_type siena_a0_nic_type = { .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = efx_siena_prepare_flush, .finish_flush = siena_finish_flush, - .prepare_flr = efx_port_dummy_op_void, + .prepare_flr = efx_siena_port_dummy_op_void, .finish_flr = efx_farch_finish_flr, .describe_stats = siena_describe_nic_stats, .update_stats = siena_update_nic_stats, @@ -1024,7 +1024,7 @@ const struct efx_nic_type siena_a0_nic_type = { .tx_remove = efx_farch_tx_remove, .tx_write = efx_farch_tx_write, .tx_limit_len = efx_farch_tx_limit_len, - .tx_enqueue = __efx_enqueue_skb, + .tx_enqueue = __efx_siena_enqueue_skb, .rx_push_rss_config = siena_rx_push_rss_config, .rx_pull_rss_config = siena_rx_pull_rss_config, .rx_probe = efx_farch_rx_probe, @@ -1032,7 +1032,7 @@ const struct efx_nic_type siena_a0_nic_type = { .rx_remove = efx_farch_rx_remove, .rx_write = efx_farch_rx_write, .rx_defer_refill = efx_farch_rx_defer_refill, - .rx_packet = __efx_rx_packet, + .rx_packet = __efx_siena_rx_packet, .ev_probe = efx_farch_ev_probe, .ev_init = efx_farch_ev_init, .ev_fini = efx_farch_ev_fini, @@ -1075,9 +1075,9 @@ const struct efx_nic_type siena_a0_nic_type = { .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan, .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, .sriov_get_vf_config = efx_siena_sriov_get_vf_config, - .vswitching_probe = efx_port_dummy_op_int, - .vswitching_restore = efx_port_dummy_op_int, - .vswitching_remove = efx_port_dummy_op_void, + .vswitching_probe = efx_siena_port_dummy_op_int, + .vswitching_restore = efx_siena_port_dummy_op_int, + .vswitching_remove = efx_siena_port_dummy_op_void, .set_mac_address = efx_siena_sriov_mac_address_changed, #endif diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.c b/drivers/net/ethernet/sfc/siena/siena_sriov.c index 8b0fdeebc1a5..f8e14f0d2f34 100644 --- a/drivers/net/ethernet/sfc/siena/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.c @@ -1043,7 +1043,7 @@ efx_siena_sriov_get_channel_name(struct efx_channel *channel, static const struct efx_channel_type efx_siena_sriov_channel_type = { .handle_no_channel = efx_siena_sriov_handle_no_channel, .pre_probe = efx_siena_sriov_probe_channel, - .post_remove = efx_channel_dummy_op_void, + .post_remove = efx_siena_channel_dummy_op_void, .get_name = efx_siena_sriov_get_channel_name, /* no copy operation; channel must not be reallocated */ .keep_eventq = true, diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c index 9e68dc434832..118ec6f5f097 100644 --- a/drivers/net/ethernet/sfc/siena/tx.c +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -138,13 +138,14 @@ static void efx_tx_send_pending(struct efx_channel *channel) * If any DMA mapping fails, any mapped fragments will be unmapped, * the queue's insert pointer will be restored to its original value. * - * This function is split out from efx_hard_start_xmit to allow the + * This function is split out from efx_siena_hard_start_xmit to allow the * loopback test to direct packets via specific TX queues. * * Returns NETDEV_TX_OK. * You must hold netif_tx_lock() to call this function. */ -netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) { unsigned int old_insert_count = tx_queue->insert_count; bool xmit_more = netdev_xmit_more(); @@ -219,8 +220,8 @@ err: * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC * (for XDP redirect). */ -int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, - bool flush) +int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, + bool flush) { struct efx_tx_buffer *tx_buffer; struct efx_tx_queue *tx_queue; @@ -310,8 +311,8 @@ unlock: * Context: non-blocking. * Should always return NETDEV_TX_OK and consume the skb. */ -netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, - struct net_device *net_dev) +netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_tx_queue *tx_queue; @@ -354,52 +355,14 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - return __efx_enqueue_skb(tx_queue, skb); + return __efx_siena_enqueue_skb(tx_queue, skb); } -void efx_xmit_done_single(struct efx_tx_queue *tx_queue) -{ - unsigned int pkts_compl = 0, bytes_compl = 0; - unsigned int read_ptr; - bool finished = false; - - read_ptr = tx_queue->read_count & tx_queue->ptr_mask; - - while (!finished) { - struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; - - if (!efx_tx_buffer_in_use(buffer)) { - struct efx_nic *efx = tx_queue->efx; - - netif_err(efx, hw, efx->net_dev, - "TX queue %d spurious single TX completion\n", - tx_queue->queue); - efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); - return; - } - - /* Need to check the flag before dequeueing. */ - if (buffer->flags & EFX_TX_BUF_SKB) - finished = true; - efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); - - ++tx_queue->read_count; - read_ptr = tx_queue->read_count & tx_queue->ptr_mask; - } - - tx_queue->pkts_compl += pkts_compl; - tx_queue->bytes_compl += bytes_compl; - - EFX_WARN_ON_PARANOID(pkts_compl != 1); - - efx_xmit_done_check_empty(tx_queue); -} - -void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) +void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; - /* Must be inverse of queue lookup in efx_hard_start_xmit() */ + /* Must be inverse of queue lookup in efx_siena_hard_start_xmit() */ tx_queue->core_txq = netdev_get_tx_queue(efx->net_dev, tx_queue->channel->channel + @@ -407,8 +370,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - void *type_data) +int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + void *type_data) { struct efx_nic *efx = netdev_priv(net_dev); struct tc_mqprio_qopt *mqprio = type_data; diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c index 9bc8281b7f5b..7945fe681e29 100644 --- a/drivers/net/ethernet/sfc/siena/tx_common.c +++ b/drivers/net/ethernet/sfc/siena/tx_common.c @@ -214,7 +214,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, netif_err(efx, tx_err, efx->net_dev, "TX queue %d spurious TX completion id %d\n", tx_queue->queue, read_ptr); - efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + efx_siena_schedule_reset(efx, RESET_TYPE_TX_SKIP); return; } @@ -225,7 +225,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, } } -void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) +void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue) { if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); @@ -238,7 +238,7 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) } } -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) +void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; struct efx_nic *efx = tx_queue->efx; @@ -265,7 +265,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) netif_tx_wake_queue(tx_queue->core_txq); } - efx_xmit_done_check_empty(tx_queue); + efx_siena_xmit_done_check_empty(tx_queue); } /* Remove buffers put into a tx_queue for the current packet. diff --git a/drivers/net/ethernet/sfc/siena/tx_common.h b/drivers/net/ethernet/sfc/siena/tx_common.h index bbab7f248250..602f5a052918 100644 --- a/drivers/net/ethernet/sfc/siena/tx_common.h +++ b/drivers/net/ethernet/sfc/siena/tx_common.h @@ -26,8 +26,8 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION); } -void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); -void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue); +void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, unsigned int insert_count); -- cgit v1.2.3 From 7f9e4b2a61ba1ffda9ebcf79dc0374e01051e86a Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:32:20 +0100 Subject: sfc/siena: Rename RX/TX functions to avoid conflicts with sfc For siena use efx_siena_ as the function prefix. Several functions are not used in Siena, so they are removed. Use a Siena specific variable name for module parameter efx_separate_tx_channels. Move efx_fini_tx_queue() to avoid a forward declaration of efx_dequeue_buffer(). Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx.c | 15 +-- drivers/net/ethernet/sfc/siena/efx.h | 4 +- drivers/net/ethernet/sfc/siena/efx_channels.c | 35 +++---- drivers/net/ethernet/sfc/siena/efx_common.c | 4 +- drivers/net/ethernet/sfc/siena/ethtool_common.c | 15 +-- drivers/net/ethernet/sfc/siena/farch.c | 9 +- drivers/net/ethernet/sfc/siena/rx.c | 22 ++--- drivers/net/ethernet/sfc/siena/rx_common.c | 116 +++++++++++++----------- drivers/net/ethernet/sfc/siena/rx_common.h | 89 +++++++++--------- drivers/net/ethernet/sfc/siena/tx.c | 16 +--- drivers/net/ethernet/sfc/siena/tx.h | 7 -- drivers/net/ethernet/sfc/siena/tx_common.c | 85 +++++++++-------- drivers/net/ethernet/sfc/siena/tx_common.h | 32 +++---- 13 files changed, 216 insertions(+), 233 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index ca41c038f3ab..d94e2438ae3a 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -58,8 +58,9 @@ MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); * * This is only used in MSI-X interrupt mode */ -bool efx_separate_tx_channels; -module_param(efx_separate_tx_channels, bool, 0444); +bool efx_siena_separate_tx_channels; +module_param_named(efx_separate_tx_channels, efx_siena_separate_tx_channels, + bool, 0444); MODULE_PARM_DESC(efx_separate_tx_channels, "Use separate channels for TX and RX"); @@ -306,7 +307,7 @@ static int efx_probe_nic(struct efx_nic *efx) if (efx->n_channels > 1) netdev_rss_key_fill(efx->rss_context.rx_hash_key, sizeof(efx->rss_context.rx_hash_key)); - efx_set_default_rx_indir_table(efx, &efx->rss_context); + efx_siena_set_default_rx_indir_table(efx, &efx->rss_context); /* Initialise the interrupt moderation settings */ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); @@ -366,7 +367,7 @@ static int efx_probe_all(struct efx_nic *efx) " VFs may not function\n", rc); #endif - rc = efx_probe_filters(efx); + rc = efx_siena_probe_filters(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create filter tables\n"); @@ -380,7 +381,7 @@ static int efx_probe_all(struct efx_nic *efx) return 0; fail5: - efx_remove_filters(efx); + efx_siena_remove_filters(efx); fail4: #ifdef CONFIG_SFC_SRIOV efx->type->vswitching_remove(efx); @@ -400,7 +401,7 @@ static void efx_remove_all(struct efx_nic *efx) rtnl_unlock(); efx_siena_remove_channels(efx); - efx_remove_filters(efx); + efx_siena_remove_filters(efx); #ifdef CONFIG_SFC_SRIOV efx->type->vswitching_remove(efx); #endif @@ -602,7 +603,7 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_get_phys_port_name = efx_siena_get_phys_port_name, .ndo_setup_tc = efx_siena_setup_tc, #ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = efx_filter_rfs, + .ndo_rx_flow_steer = efx_siena_filter_rfs, #endif .ndo_xdp_xmit = efx_xdp_xmit, .ndo_bpf = efx_xdp diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h index a4f9e6e962b0..f91f3c94a275 100644 --- a/drivers/net/ethernet/sfc/siena/efx.h +++ b/drivers/net/ethernet/sfc/siena/efx.h @@ -44,7 +44,7 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel) * TSO skbs. */ #define EFX_RXQ_MIN_ENT 128U -#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_siena_tx_max_skb_descs(efx)) /* All EF10 architecture NICs steal one bit of the DMAQ size for various * other purposes when counting TxQ entries, so we halve the queue size. @@ -78,7 +78,7 @@ static inline bool efx_rss_enabled(struct efx_nic *efx) * * 2. If the existing filters have higher priority, return -%EPERM. * - * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not + * 3. If !efx_siena_filter_is_mc_recipient(@spec), or the NIC does not * support delivery to multiple recipients, return -%EEXIST. * * This implies that filters for multiple multicast recipients must diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index b04affb23f72..799c0a90358c 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -138,7 +138,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, int n_xdp_tx; int n_xdp_ev; - if (efx_separate_tx_channels) + if (efx_siena_separate_tx_channels) n_channels *= 2; n_channels += extra_channels; @@ -220,7 +220,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, /* Ignore XDP tx channels when creating rx channels. */ n_channels -= efx->n_xdp_channels; - if (efx_separate_tx_channels) { + if (efx_siena_separate_tx_channels) { efx->n_tx_channels = min(max(n_channels / 2, 1U), efx->max_tx_channels); @@ -321,7 +321,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) /* Assume legacy interrupts */ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { - efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); + efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; efx->n_xdp_channels = 0; @@ -521,7 +521,8 @@ static void efx_filter_rfs_expire(struct work_struct *data) channel = container_of(dwork, struct efx_channel, filter_work); time = jiffies - channel->rfs_last_expiry; quota = channel->rfs_filter_count * time / (30 * HZ); - if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) + if (quota >= 20 && __efx_siena_filter_rfs_expire(channel, + min(channel->rfs_filter_count, quota))) channel->rfs_last_expiry += time; /* Ensure we do more work eventually even if NAPI poll is not happening */ schedule_delayed_work(dwork, 30 * HZ); @@ -558,7 +559,7 @@ static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) rx_queue = &channel->rx_queue; rx_queue->efx = efx; - timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); + timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); return channel; } @@ -631,7 +632,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) rx_queue = &channel->rx_queue; rx_queue->buffer = NULL; memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); - timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); + timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); #ifdef CONFIG_RFS_ACCEL INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); #endif @@ -657,13 +658,13 @@ static int efx_probe_channel(struct efx_channel *channel) goto fail; efx_for_each_channel_tx_queue(tx_queue, channel) { - rc = efx_probe_tx_queue(tx_queue); + rc = efx_siena_probe_tx_queue(tx_queue); if (rc) goto fail; } efx_for_each_channel_rx_queue(rx_queue, channel) { - rc = efx_probe_rx_queue(rx_queue); + rc = efx_siena_probe_rx_queue(rx_queue); if (rc) goto fail; } @@ -751,9 +752,9 @@ void efx_siena_remove_channel(struct efx_channel *channel) "destroy chan %d\n", channel->channel); efx_for_each_channel_rx_queue(rx_queue, channel) - efx_remove_rx_queue(rx_queue); + efx_siena_remove_rx_queue(rx_queue); efx_for_each_channel_tx_queue(tx_queue, channel) - efx_remove_tx_queue(tx_queue); + efx_siena_remove_tx_queue(tx_queue); efx_remove_eventq(channel); channel->type->post_remove(channel); } @@ -963,7 +964,7 @@ int efx_siena_set_channels(struct efx_nic *efx) int rc; efx->tx_channel_offset = - efx_separate_tx_channels ? + efx_siena_separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; if (efx->xdp_tx_queue_count) { @@ -1130,15 +1131,15 @@ void efx_siena_start_channels(struct efx_nic *efx) efx_for_each_channel_rev(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { - efx_init_tx_queue(tx_queue); + efx_siena_init_tx_queue(tx_queue); atomic_inc(&efx->active_queues); } efx_for_each_channel_rx_queue(rx_queue, channel) { - efx_init_rx_queue(rx_queue); + efx_siena_init_rx_queue(rx_queue); atomic_inc(&efx->active_queues); efx_siena_stop_eventq(channel); - efx_fast_push_rx_descriptors(rx_queue, false); + efx_siena_fast_push_rx_descriptors(rx_queue, false); efx_siena_start_eventq(channel); } @@ -1184,9 +1185,9 @@ void efx_siena_stop_channels(struct efx_nic *efx) efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) - efx_fini_rx_queue(rx_queue); + efx_siena_fini_rx_queue(rx_queue); efx_for_each_channel_tx_queue(tx_queue, channel) - efx_fini_tx_queue(tx_queue); + efx_siena_fini_tx_queue(tx_queue); } } @@ -1228,7 +1229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget) efx_channel_get_rx_queue(channel); efx_rx_flush_packet(channel); - efx_fast_push_rx_descriptors(rx_queue, true); + efx_siena_fast_push_rx_descriptors(rx_queue, true); } /* Update BQL */ diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index fb6fb345cc56..f245d03c4caa 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -395,7 +395,7 @@ static void efx_start_datapath(struct efx_nic *efx) efx->rx_buffer_order = get_order(rx_buf_len); } - efx_rx_config_page_split(efx); + efx_siena_rx_config_page_split(efx); if (efx->rx_buffer_order) netif_dbg(efx, drv, efx->net_dev, "RX buf len=%u; page order=%u batch=%u\n", @@ -428,7 +428,7 @@ static void efx_start_datapath(struct efx_nic *efx) * the ring completely. We wake it when half way back to * empty. */ - efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); + efx->txq_stop_thresh = efx->txq_entries - efx_siena_tx_max_skb_descs(efx); efx->txq_wake_thresh = efx->txq_stop_thresh / 2; /* Initialise the channels */ diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c index e177b58e0664..f54510cf4e72 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool_common.c +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c @@ -824,7 +824,8 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev, mutex_lock(&efx->rss_lock); if (info->flow_type & FLOW_RSS && info->rss_context) { - ctx = efx_find_rss_context_entry(efx, info->rss_context); + ctx = efx_siena_find_rss_context_entry(efx, + info->rss_context); if (!ctx) { rc = -ENOENT; goto out_unlock; @@ -1213,7 +1214,7 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, return -EOPNOTSUPP; mutex_lock(&efx->rss_lock); - ctx = efx_find_rss_context_entry(efx, rss_context); + ctx = efx_siena_find_rss_context_entry(efx, rss_context); if (!ctx) { rc = -ENOENT; goto out_unlock; @@ -1257,18 +1258,18 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev, rc = -EINVAL; goto out_unlock; } - ctx = efx_alloc_rss_context_entry(efx); + ctx = efx_siena_alloc_rss_context_entry(efx); if (!ctx) { rc = -ENOMEM; goto out_unlock; } ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; /* Initialise indir table and key to defaults */ - efx_set_default_rx_indir_table(efx, ctx); + efx_siena_set_default_rx_indir_table(efx, ctx); netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); allocated = true; } else { - ctx = efx_find_rss_context_entry(efx, *rss_context); + ctx = efx_siena_find_rss_context_entry(efx, *rss_context); if (!ctx) { rc = -ENOENT; goto out_unlock; @@ -1279,7 +1280,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev, /* delete this context */ rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); if (!rc) - efx_free_rss_context_entry(ctx); + efx_siena_free_rss_context_entry(ctx); goto out_unlock; } @@ -1290,7 +1291,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev, rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); if (rc && allocated) - efx_free_rss_context_entry(ctx); + efx_siena_free_rss_context_entry(ctx); else *rss_context = ctx->user_id; out_unlock: diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c index 6ee6ca192a44..4de36c6c28e1 100644 --- a/drivers/net/ethernet/sfc/siena/farch.c +++ b/drivers/net/ethernet/sfc/siena/farch.c @@ -1160,7 +1160,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel, /* The queue must be empty, so we won't receive any rx * events, so efx_process_channel() won't refill the * queue. Refill it here */ - efx_fast_push_rx_descriptors(rx_queue, true); + efx_siena_fast_push_rx_descriptors(rx_queue, true); } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { efx_farch_handle_drain_event(channel); } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { @@ -2925,13 +2925,14 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, */ arfs_id = 0; } else { - rule = efx_rps_hash_find(efx, &spec); + rule = efx_siena_rps_hash_find(efx, &spec); if (!rule) { /* ARFS table doesn't know of this filter, remove it */ force = true; } else { arfs_id = rule->arfs_id; - if (!efx_rps_check_rule(rule, index, &force)) + if (!efx_siena_rps_check_rule(rule, index, + &force)) goto out_unlock; } } @@ -2939,7 +2940,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, flow_id, arfs_id)) { if (rule) rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; - efx_rps_hash_del(efx, &spec); + efx_siena_rps_hash_del(efx, &spec); efx_farch_filter_table_clear_entry(efx, table, index); ret = true; } diff --git a/drivers/net/ethernet/sfc/siena/rx.c b/drivers/net/ethernet/sfc/siena/rx.c index 099cb23e3250..47c09b93f7c4 100644 --- a/drivers/net/ethernet/sfc/siena/rx.c +++ b/drivers/net/ethernet/sfc/siena/rx.c @@ -157,7 +157,7 @@ void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, */ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { efx_rx_flush_packet(channel); - efx_discard_rx_packet(channel, rx_buf, n_frags); + efx_siena_discard_rx_packet(channel, rx_buf, n_frags); return; } @@ -195,7 +195,7 @@ void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, /* All fragments have been DMA-synced, so recycle pages. */ rx_buf = efx_rx_buffer(rx_queue, index); - efx_recycle_rx_pages(channel, rx_buf, n_frags); + efx_siena_recycle_rx_pages(channel, rx_buf, n_frags); /* Pipeline receives so that we give time for packet headers to be * prefetched into cache. @@ -217,7 +217,7 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, struct efx_rx_queue *rx_queue; rx_queue = efx_channel_get_rx_queue(channel); - efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); return; } skb_record_rx_queue(skb, channel->rx_queue.core_index); @@ -268,8 +268,8 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, if (unlikely(channel->rx_pkt_n_frags > 1)) { /* We can't do XDP on fragmented packets - drop. */ - efx_free_rx_buffers(rx_queue, rx_buf, - channel->rx_pkt_n_frags); + efx_siena_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); if (net_ratelimit()) netif_err(efx, rx_err, efx->net_dev, "XDP is not possible with multiple receive fragments (%d)\n", @@ -312,7 +312,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, xdpf = xdp_convert_buff_to_frame(&xdp); err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true); if (unlikely(err != 1)) { - efx_free_rx_buffers(rx_queue, rx_buf, 1); + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); if (net_ratelimit()) netif_err(efx, rx_err, efx->net_dev, "XDP TX failed (%d)\n", err); @@ -326,7 +326,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, case XDP_REDIRECT: err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog); if (unlikely(err)) { - efx_free_rx_buffers(rx_queue, rx_buf, 1); + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); if (net_ratelimit()) netif_err(efx, rx_err, efx->net_dev, "XDP redirect failed (%d)\n", err); @@ -339,7 +339,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, default: bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act); - efx_free_rx_buffers(rx_queue, rx_buf, 1); + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); channel->n_rx_xdp_bad_drops++; trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); break; @@ -348,7 +348,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); fallthrough; case XDP_DROP: - efx_free_rx_buffers(rx_queue, rx_buf, 1); + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); channel->n_rx_xdp_drops++; break; } @@ -379,8 +379,8 @@ void __efx_siena_rx_packet(struct efx_channel *channel) efx_loopback_rx_packet(efx, eh, rx_buf->len); rx_queue = efx_channel_get_rx_queue(channel); - efx_free_rx_buffers(rx_queue, rx_buf, - channel->rx_pkt_n_frags); + efx_siena_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); goto out; } diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c index 9fb442da043c..4579f43484c3 100644 --- a/drivers/net/ethernet/sfc/siena/rx_common.c +++ b/drivers/net/ethernet/sfc/siena/rx_common.c @@ -30,6 +30,9 @@ MODULE_PARM_DESC(rx_refill_threshold, */ #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) +static void efx_unmap_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf); + /* Check the RX page recycle ring for a page that can be reused. */ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) { @@ -103,9 +106,9 @@ static void efx_recycle_rx_page(struct efx_channel *channel, } /* Recycle the pages that are used by buffers that have just been received. */ -void efx_recycle_rx_pages(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf, - unsigned int n_frags) +void efx_siena_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) { struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); @@ -118,15 +121,15 @@ void efx_recycle_rx_pages(struct efx_channel *channel, } while (--n_frags); } -void efx_discard_rx_packet(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf, - unsigned int n_frags) +void efx_siena_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) { struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); - efx_recycle_rx_pages(channel, rx_buf, n_frags); + efx_siena_recycle_rx_pages(channel, rx_buf, n_frags); - efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); } static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue) @@ -181,12 +184,12 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, /* If this is the last buffer in a page, unmap and free it. */ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { efx_unmap_rx_buffer(rx_queue->efx, rx_buf); - efx_free_rx_buffers(rx_queue, rx_buf, 1); + efx_siena_free_rx_buffers(rx_queue, rx_buf, 1); } rx_buf->page = NULL; } -int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) +int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; unsigned int entries; @@ -217,7 +220,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) return rc; } -void efx_init_rx_queue(struct efx_rx_queue *rx_queue) +void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue) { unsigned int max_fill, trigger, max_trigger; struct efx_nic *efx = rx_queue->efx; @@ -272,7 +275,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) efx_nic_init_rx(rx_queue); } -void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) +void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue) { struct efx_rx_buffer *rx_buf; int i; @@ -301,7 +304,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) rx_queue->xdp_rxq_info_valid = false; } -void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) +void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue) { netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); @@ -315,8 +318,8 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) /* Unmap a DMA-mapped page. This function is only called for the final RX * buffer in a page. */ -void efx_unmap_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf) +static void efx_unmap_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf) { struct page *page = rx_buf->page; @@ -330,9 +333,9 @@ void efx_unmap_rx_buffer(struct efx_nic *efx, } } -void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *rx_buf, - unsigned int num_bufs) +void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs) { do { if (rx_buf->page) { @@ -343,7 +346,7 @@ void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, } while (--num_bufs); } -void efx_rx_slow_fill(struct timer_list *t) +void efx_siena_rx_slow_fill(struct timer_list *t) { struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); @@ -352,7 +355,7 @@ void efx_rx_slow_fill(struct timer_list *t) ++rx_queue->slow_fill_count; } -void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) +static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) { mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); } @@ -425,7 +428,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) return 0; } -void efx_rx_config_page_split(struct efx_nic *efx) +void efx_siena_rx_config_page_split(struct efx_nic *efx) { efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM, @@ -439,7 +442,7 @@ void efx_rx_config_page_split(struct efx_nic *efx) efx->rx_bufs_per_page); } -/* efx_fast_push_rx_descriptors - push new RX descriptors quickly +/* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue * * This will aim to fill the RX descriptor queue up to @@ -450,7 +453,8 @@ void efx_rx_config_page_split(struct efx_nic *efx) * this means this function must run from the NAPI handler, or be called * when NAPI is disabled. */ -void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) +void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, + bool atomic) { struct efx_nic *efx = rx_queue->efx; unsigned int fill_level, batch_size; @@ -517,7 +521,7 @@ efx_siena_rx_packet_gro(struct efx_channel *channel, struct efx_rx_queue *rx_queue; rx_queue = efx_channel_get_rx_queue(channel); - efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags); return; } @@ -556,7 +560,7 @@ efx_siena_rx_packet_gro(struct efx_channel *channel, /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because * (a) this is an infrequent control-plane operation and (b) n is small (max 64) */ -struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) +struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx) { struct list_head *head = &efx->rss_context.list; struct efx_rss_context *ctx, *new; @@ -589,7 +593,8 @@ struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) return new; } -struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) +struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx, + u32 id) { struct list_head *head = &efx->rss_context.list; struct efx_rss_context *ctx; @@ -602,14 +607,14 @@ struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) return NULL; } -void efx_free_rss_context_entry(struct efx_rss_context *ctx) +void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx) { list_del(&ctx->list); kfree(ctx); } -void efx_set_default_rx_indir_table(struct efx_nic *efx, - struct efx_rss_context *ctx) +void efx_siena_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx) { size_t i; @@ -619,7 +624,7 @@ void efx_set_default_rx_indir_table(struct efx_nic *efx, } /** - * efx_filter_is_mc_recipient - test whether spec is a multicast recipient + * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient * @spec: Specification to test * * Return: %true if the specification is a non-drop RX filter that @@ -627,7 +632,7 @@ void efx_set_default_rx_indir_table(struct efx_nic *efx, * IPv4 or IPv6 address value in the respective multicast address * range. Otherwise %false. */ -bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) +bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec) { if (!(spec->flags & EFX_FILTER_FLAG_RX) || spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) @@ -652,8 +657,8 @@ bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) return false; } -bool efx_filter_spec_equal(const struct efx_filter_spec *left, - const struct efx_filter_spec *right) +bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) { if ((left->match_flags ^ right->match_flags) | ((left->flags ^ right->flags) & @@ -665,7 +670,7 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left, offsetof(struct efx_filter_spec, outer_vid)) == 0; } -u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) +u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec) { BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); return jhash2((const u32 *)&spec->outer_vid, @@ -675,8 +680,8 @@ u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) } #ifdef CONFIG_RFS_ACCEL -bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, - bool *force) +bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule, + unsigned int filter_idx, bool *force) { if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { /* ARFS is currently updating this entry, leave it */ @@ -692,7 +697,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ /* ARFS has moved on, so old filter is not needed. Since we did * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will - * not be removed by efx_rps_hash_del() subsequently. + * not be removed by efx_siena_rps_hash_del() subsequently. */ *force = true; return true; @@ -705,7 +710,7 @@ static struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, const struct efx_filter_spec *spec) { - u32 hash = efx_filter_spec_hash(spec); + u32 hash = efx_siena_filter_spec_hash(spec); lockdep_assert_held(&efx->rps_hash_lock); if (!efx->rps_hash_table) @@ -713,7 +718,7 @@ struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; } -struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, +struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx, const struct efx_filter_spec *spec) { struct efx_arfs_rule *rule; @@ -725,15 +730,15 @@ struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, return NULL; hlist_for_each(node, head) { rule = container_of(node, struct efx_arfs_rule, node); - if (efx_filter_spec_equal(spec, &rule->spec)) + if (efx_siena_filter_spec_equal(spec, &rule->spec)) return rule; } return NULL; } -struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, - const struct efx_filter_spec *spec, - bool *new) +static struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new) { struct efx_arfs_rule *rule; struct hlist_head *head; @@ -744,7 +749,7 @@ struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, return NULL; hlist_for_each(node, head) { rule = container_of(node, struct efx_arfs_rule, node); - if (efx_filter_spec_equal(spec, &rule->spec)) { + if (efx_siena_filter_spec_equal(spec, &rule->spec)) { *new = false; return rule; } @@ -758,7 +763,8 @@ struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, return rule; } -void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) +void efx_siena_rps_hash_del(struct efx_nic *efx, + const struct efx_filter_spec *spec) { struct efx_arfs_rule *rule; struct hlist_head *head; @@ -769,7 +775,7 @@ void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) return; hlist_for_each(node, head) { rule = container_of(node, struct efx_arfs_rule, node); - if (efx_filter_spec_equal(spec, &rule->spec)) { + if (efx_siena_filter_spec_equal(spec, &rule->spec)) { /* Someone already reused the entry. We know that if * this check doesn't fire (i.e. filter_id == REMOVING) * then the REMOVING mark was put there by our caller, @@ -788,7 +794,7 @@ void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) } #endif -int efx_probe_filters(struct efx_nic *efx) +int efx_siena_probe_filters(struct efx_nic *efx) { int rc; @@ -835,7 +841,7 @@ out_unlock: return rc; } -void efx_remove_filters(struct efx_nic *efx) +void efx_siena_remove_filters(struct efx_nic *efx) { #ifdef CONFIG_RFS_ACCEL struct efx_channel *channel; @@ -870,7 +876,7 @@ static void efx_filter_rfs_work(struct work_struct *data) rc %= efx->type->max_rx_ip_filters; if (efx->rps_hash_table) { spin_lock_bh(&efx->rps_hash_lock); - rule = efx_rps_hash_find(efx, &req->spec); + rule = efx_siena_rps_hash_find(efx, &req->spec); /* The rule might have already gone, if someone else's request * for the same spec was already worked and then expired before * we got around to our work. In that case we have nothing @@ -930,8 +936,9 @@ static void efx_filter_rfs_work(struct work_struct *data) /* We're overloading the NIC's filter tables, so let's do a * chunk of extra expiry work. */ - __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, - 100u)); + __efx_siena_filter_rfs_expire(channel, + min(channel->rfs_filter_count, + 100u)); } /* Release references */ @@ -939,8 +946,8 @@ static void efx_filter_rfs_work(struct work_struct *data) dev_put(req->net_dev); } -int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, - u16 rxq_index, u32 flow_id) +int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_async_filter_insertion *req; @@ -1041,7 +1048,8 @@ out_clear: return rc; } -bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota) +bool __efx_siena_filter_rfs_expire(struct efx_channel *channel, + unsigned int quota) { bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); struct efx_nic *efx = channel->efx; diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h index 909d06a4fdc9..6b37f83ecb30 100644 --- a/drivers/net/ethernet/sfc/siena/rx_common.h +++ b/drivers/net/ethernet/sfc/siena/rx_common.h @@ -43,26 +43,19 @@ static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) #endif } -void efx_rx_slow_fill(struct timer_list *t); - -void efx_recycle_rx_pages(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf, - unsigned int n_frags); -void efx_discard_rx_packet(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf, - unsigned int n_frags); - -int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); -void efx_init_rx_queue(struct efx_rx_queue *rx_queue); -void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); -void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); -void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue); - -void efx_init_rx_buffer(struct efx_rx_queue *rx_queue, - struct page *page, - unsigned int page_offset, - u16 flags); -void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf); +void efx_siena_rx_slow_fill(struct timer_list *t); + +void efx_siena_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); +void efx_siena_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags); + +int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue); +void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue); +void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue); +void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue); static inline void efx_sync_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf, @@ -72,46 +65,46 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, DMA_FROM_DEVICE); } -void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, - struct efx_rx_buffer *rx_buf, - unsigned int num_bufs); +void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs); -void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); -void efx_rx_config_page_split(struct efx_nic *efx); -void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); +void efx_siena_rx_config_page_split(struct efx_nic *efx); +void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, + bool atomic); void efx_siena_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, unsigned int n_frags, u8 *eh, __wsum csum); -struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); -struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); -void efx_free_rss_context_entry(struct efx_rss_context *ctx); -void efx_set_default_rx_indir_table(struct efx_nic *efx, - struct efx_rss_context *ctx); +struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx); +struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx, + u32 id); +void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx); +void efx_siena_set_default_rx_indir_table(struct efx_nic *efx, + struct efx_rss_context *ctx); -bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); -bool efx_filter_spec_equal(const struct efx_filter_spec *left, - const struct efx_filter_spec *right); -u32 efx_filter_spec_hash(const struct efx_filter_spec *spec); +bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right); +u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec); #ifdef CONFIG_RFS_ACCEL -bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, - bool *force); -struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, +bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule, + unsigned int filter_idx, bool *force); +struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx, const struct efx_filter_spec *spec); -struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, - const struct efx_filter_spec *spec, - bool *new); -void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); - -int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, - u16 rxq_index, u32 flow_id); -bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); +void efx_siena_rps_hash_del(struct efx_nic *efx, + const struct efx_filter_spec *spec); + +int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +bool __efx_siena_filter_rfs_expire(struct efx_channel *channel, + unsigned int quota); #endif -int efx_probe_filters(struct efx_nic *efx); -void efx_remove_filters(struct efx_nic *efx); +int efx_siena_probe_filters(struct efx_nic *efx); +void efx_siena_remove_filters(struct efx_nic *efx); #endif diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c index 118ec6f5f097..0677a0254d85 100644 --- a/drivers/net/ethernet/sfc/siena/tx.c +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -41,14 +41,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, return (u8 *)page_buf->addr + offset; } -u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer, size_t len) -{ - if (len > EFX_TX_CB_SIZE) - return NULL; - return efx_tx_get_copy_buffer(tx_queue, buffer); -} - static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) { /* We need to consider all queues that the net core sees as one */ @@ -164,7 +156,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, * size limit. */ if (segments) { - rc = efx_tx_tso_fallback(tx_queue, skb); + rc = efx_siena_tx_tso_fallback(tx_queue, skb); tx_queue->tso_fallbacks++; if (rc == 0) return 0; @@ -178,7 +170,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, } /* Map for DMA and create descriptors if we haven't done so already. */ - if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) + if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments))) goto err; efx_tx_maybe_stop_queue(tx_queue); @@ -201,7 +193,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, err: - efx_enqueue_unwind(tx_queue, old_insert_count); + efx_siena_enqueue_unwind(tx_queue, old_insert_count); dev_kfree_skb_any(skb); /* If we're not expecting another transmit and we had something to push @@ -285,7 +277,7 @@ int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpf break; /* Create descriptor and set up for unmapping DMA. */ - tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); + tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len); tx_buffer->xdpf = xdpf; tx_buffer->flags = EFX_TX_BUF_XDP | EFX_TX_BUF_MAP_SINGLE; diff --git a/drivers/net/ethernet/sfc/siena/tx.h b/drivers/net/ethernet/sfc/siena/tx.h index f2c4d2f89919..ee801950c909 100644 --- a/drivers/net/ethernet/sfc/siena/tx.h +++ b/drivers/net/ethernet/sfc/siena/tx.h @@ -11,13 +11,6 @@ #include /* Driver internal tx-path related declarations. */ - -unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue, - dma_addr_t dma_addr, unsigned int len); - -u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer, size_t len); - /* What TXQ type will satisfy the checksum offloads required for this skb? */ static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb) { diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c index 7945fe681e29..66adc8525a3a 100644 --- a/drivers/net/ethernet/sfc/siena/tx_common.c +++ b/drivers/net/ethernet/sfc/siena/tx_common.c @@ -19,7 +19,7 @@ static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) PAGE_SIZE >> EFX_TX_CB_ORDER); } -int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) +int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; unsigned int entries; @@ -64,7 +64,7 @@ fail1: return rc; } -void efx_init_tx_queue(struct efx_tx_queue *tx_queue) +void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; @@ -94,32 +94,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->initialised = true; } -void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) -{ - struct efx_tx_buffer *buffer; - - netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, - "shutting down TX queue %d\n", tx_queue->queue); - - tx_queue->initialised = false; - - if (!tx_queue->buffer) - return; - - /* Free any buffers left in the ring */ - while (tx_queue->read_count != tx_queue->write_count) { - unsigned int pkts_compl = 0, bytes_compl = 0; - - buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; - efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); - - ++tx_queue->read_count; - } - tx_queue->xmit_pending = false; - netdev_tx_reset_queue(tx_queue->core_txq); -} - -void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) +void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue) { int i; @@ -143,10 +118,10 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; } -void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer, - unsigned int *pkts_compl, - unsigned int *bytes_compl) +static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, + unsigned int *pkts_compl, + unsigned int *bytes_compl) { if (buffer->unmap_len) { struct device *dma_dev = &tx_queue->efx->pci_dev->dev; @@ -191,6 +166,29 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, buffer->flags = 0; } +void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "shutting down TX queue %d\n", tx_queue->queue); + + if (!tx_queue->buffer) + return; + + /* Free any buffers left in the ring */ + while (tx_queue->read_count != tx_queue->write_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; + + buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + } + tx_queue->xmit_pending = false; + netdev_tx_reset_queue(tx_queue->core_txq); +} + /* Remove packets from the TX queue * * This removes packets from the TX queue, up to and including the @@ -271,8 +269,8 @@ void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) /* Remove buffers put into a tx_queue for the current packet. * None of the buffers must have an skb attached. */ -void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, - unsigned int insert_count) +void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count) { struct efx_tx_buffer *buffer; unsigned int bytes_compl = 0; @@ -286,8 +284,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, } } -struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, - dma_addr_t dma_addr, size_t len) +struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, size_t len) { const struct efx_nic_type *nic_type = tx_queue->efx->type; struct efx_tx_buffer *buffer; @@ -313,7 +311,7 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, return buffer; } -int efx_tx_tso_header_length(struct sk_buff *skb) +static int efx_tx_tso_header_length(struct sk_buff *skb) { size_t header_len; @@ -328,8 +326,8 @@ int efx_tx_tso_header_length(struct sk_buff *skb) } /* Map all data from an SKB for DMA and create descriptors on the queue. */ -int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, - unsigned int segment_count) +int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + unsigned int segment_count) { struct efx_nic *efx = tx_queue->efx; struct device *dma_dev = &efx->pci_dev->dev; @@ -359,7 +357,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, if (header_len != len) { tx_queue->tso_long_headers++; - efx_tx_map_chunk(tx_queue, dma_addr, header_len); + efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len); len -= header_len; dma_addr += header_len; } @@ -370,7 +368,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, struct efx_tx_buffer *buffer; skb_frag_t *fragment; - buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); + buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len); /* The final descriptor for a fragment is responsible for * unmapping the whole fragment. @@ -402,7 +400,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, } while (1); } -unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx) { /* Header and payload descriptor for each output segment, plus * one for every input fragment boundary within a segment @@ -430,7 +428,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) * * Returns 0 on success, error code otherwise. */ -int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) { struct sk_buff *segments, *next; diff --git a/drivers/net/ethernet/sfc/siena/tx_common.h b/drivers/net/ethernet/sfc/siena/tx_common.h index 602f5a052918..31ca52a25015 100644 --- a/drivers/net/ethernet/sfc/siena/tx_common.h +++ b/drivers/net/ethernet/sfc/siena/tx_common.h @@ -11,15 +11,10 @@ #ifndef EFX_TX_COMMON_H #define EFX_TX_COMMON_H -int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); -void efx_init_tx_queue(struct efx_tx_queue *tx_queue); -void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); -void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); - -void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer, - unsigned int *pkts_compl, - unsigned int *bytes_compl); +int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue); +void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue); +void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue); +void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue); static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) { @@ -29,17 +24,16 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue); void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, - unsigned int insert_count); +void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count); -struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, - dma_addr_t dma_addr, size_t len); -int efx_tx_tso_header_length(struct sk_buff *skb); -int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, - unsigned int segment_count); +struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, size_t len); +int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + unsigned int segment_count); -unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); -int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx); +int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb); -extern bool efx_separate_tx_channels; +extern bool efx_siena_separate_tx_channels; #endif -- cgit v1.2.3 From 95e96f7788d0ccea7e70322ce75b873d43124dc9 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:32:33 +0100 Subject: sfc/siena: Rename peripheral functions to avoid conflicts with sfc For siena use efx_siena_ as the function prefix. This patch covers selftest.h, ptp.h, net_driver.h and ethtool_common.h. efx_ethtool_fill_self_tests() can become static. Some functions in ptp.c can also become static. Rename loopback_mode in net_driver.h. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx.c | 6 +- drivers/net/ethernet/sfc/siena/efx_common.c | 12 +- drivers/net/ethernet/sfc/siena/ethtool.c | 50 +++--- drivers/net/ethernet/sfc/siena/ethtool_common.c | 199 ++++++++++++------------ drivers/net/ethernet/sfc/siena/ethtool_common.h | 95 ++++++----- drivers/net/ethernet/sfc/siena/mcdi.c | 4 +- drivers/net/ethernet/sfc/siena/net_driver.h | 6 +- drivers/net/ethernet/sfc/siena/ptp.c | 53 +++---- drivers/net/ethernet/sfc/siena/ptp.h | 46 +++--- drivers/net/ethernet/sfc/siena/rx.c | 2 +- drivers/net/ethernet/sfc/siena/selftest.c | 24 +-- drivers/net/ethernet/sfc/siena/selftest.h | 14 +- drivers/net/ethernet/sfc/siena/siena.c | 17 +- drivers/net/ethernet/sfc/siena/tx.c | 6 +- drivers/net/ethernet/sfc/siena/tx_common.c | 6 +- 15 files changed, 270 insertions(+), 270 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index d94e2438ae3a..3b79c39300a4 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -494,9 +494,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) struct mii_ioctl_data *data = if_mii(ifr); if (cmd == SIOCSHWTSTAMP) - return efx_ptp_set_ts_config(efx, ifr); + return efx_siena_ptp_set_ts_config(efx, ifr); if (cmd == SIOCGHWTSTAMP) - return efx_ptp_get_ts_config(efx, ifr); + return efx_siena_ptp_get_ts_config(efx, ifr); /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && @@ -536,7 +536,7 @@ static int efx_net_open(struct net_device *net_dev) efx_siena_start_all(efx); if (efx->state == STATE_DISABLED || efx->reset_pending) netif_device_detach(efx->net_dev); - efx_selftest_async_start(efx); + efx_siena_selftest_async_start(efx); return 0; } diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index f245d03c4caa..6b524775c929 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -73,8 +73,8 @@ static const char *const efx_reset_type_names[] = { STRING_TABLE_LOOKUP(type, efx_reset_type) /* Loopback mode names (see LOOPBACK_MODE()) */ -const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; -const char *const efx_loopback_mode_names[] = { +const unsigned int efx_siena_loopback_mode_max = LOOPBACK_MAX; +const char *const efx_siena_loopback_mode_names[] = { [LOOPBACK_NONE] = "NONE", [LOOPBACK_DATA] = "DATAPATH", [LOOPBACK_GMAC] = "GMAC", @@ -434,7 +434,7 @@ static void efx_start_datapath(struct efx_nic *efx) /* Initialise the channels */ efx_siena_start_channels(efx); - efx_ptp_start_datapath(efx); + efx_siena_ptp_start_datapath(efx); if (netif_device_present(efx->net_dev)) netif_tx_wake_all_queues(efx->net_dev); @@ -445,7 +445,7 @@ static void efx_stop_datapath(struct efx_nic *efx) EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); - efx_ptp_stop_datapath(efx); + efx_siena_ptp_stop_datapath(efx); efx_siena_stop_channels(efx); } @@ -514,7 +514,7 @@ static void efx_stop_port(struct efx_nic *efx) netif_addr_unlock_bh(efx->net_dev); cancel_delayed_work_sync(&efx->monitor_work); - efx_selftest_async_cancel(efx); + efx_siena_selftest_async_cancel(efx); cancel_work_sync(&efx->mac_work); } @@ -994,7 +994,7 @@ int efx_siena_init_struct(struct efx_nic *efx, #endif INIT_WORK(&efx->reset_work, efx_reset_work); INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); - efx_selftest_async_init(efx); + efx_siena_selftest_async_init(efx); efx->pci_dev = pci_dev; efx->msg_enable = debug; efx->state = STATE_UNINIT; diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c index 7aa621e97212..63388bec421d 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool.c +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -235,7 +235,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, SOF_TIMESTAMPING_SOFTWARE); ts_info->phc_index = -1; - efx_ptp_get_ts_info(efx, ts_info); + efx_siena_ptp_get_ts_info(efx, ts_info); return 0; } @@ -243,40 +243,40 @@ const struct ethtool_ops efx_siena_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USECS_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, - .get_drvinfo = efx_ethtool_get_drvinfo, + .get_drvinfo = efx_siena_ethtool_get_drvinfo, .get_regs_len = efx_ethtool_get_regs_len, .get_regs = efx_ethtool_get_regs, - .get_msglevel = efx_ethtool_get_msglevel, - .set_msglevel = efx_ethtool_set_msglevel, + .get_msglevel = efx_siena_ethtool_get_msglevel, + .set_msglevel = efx_siena_ethtool_set_msglevel, .get_link = ethtool_op_get_link, .get_coalesce = efx_ethtool_get_coalesce, .set_coalesce = efx_ethtool_set_coalesce, .get_ringparam = efx_ethtool_get_ringparam, .set_ringparam = efx_ethtool_set_ringparam, - .get_pauseparam = efx_ethtool_get_pauseparam, - .set_pauseparam = efx_ethtool_set_pauseparam, - .get_sset_count = efx_ethtool_get_sset_count, - .self_test = efx_ethtool_self_test, - .get_strings = efx_ethtool_get_strings, + .get_pauseparam = efx_siena_ethtool_get_pauseparam, + .set_pauseparam = efx_siena_ethtool_set_pauseparam, + .get_sset_count = efx_siena_ethtool_get_sset_count, + .self_test = efx_siena_ethtool_self_test, + .get_strings = efx_siena_ethtool_get_strings, .set_phys_id = efx_ethtool_phys_id, - .get_ethtool_stats = efx_ethtool_get_stats, + .get_ethtool_stats = efx_siena_ethtool_get_stats, .get_wol = efx_ethtool_get_wol, .set_wol = efx_ethtool_set_wol, - .reset = efx_ethtool_reset, - .get_rxnfc = efx_ethtool_get_rxnfc, - .set_rxnfc = efx_ethtool_set_rxnfc, - .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, - .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, - .get_rxfh = efx_ethtool_get_rxfh, - .set_rxfh = efx_ethtool_set_rxfh, - .get_rxfh_context = efx_ethtool_get_rxfh_context, - .set_rxfh_context = efx_ethtool_set_rxfh_context, + .reset = efx_siena_ethtool_reset, + .get_rxnfc = efx_siena_ethtool_get_rxnfc, + .set_rxnfc = efx_siena_ethtool_set_rxnfc, + .get_rxfh_indir_size = efx_siena_ethtool_get_rxfh_indir_size, + .get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size, + .get_rxfh = efx_siena_ethtool_get_rxfh, + .set_rxfh = efx_siena_ethtool_set_rxfh, + .get_rxfh_context = efx_siena_ethtool_get_rxfh_context, + .set_rxfh_context = efx_siena_ethtool_set_rxfh_context, .get_ts_info = efx_ethtool_get_ts_info, - .get_module_info = efx_ethtool_get_module_info, - .get_module_eeprom = efx_ethtool_get_module_eeprom, - .get_link_ksettings = efx_ethtool_get_link_ksettings, - .set_link_ksettings = efx_ethtool_set_link_ksettings, + .get_module_info = efx_siena_ethtool_get_module_info, + .get_module_eeprom = efx_siena_ethtool_get_module_eeprom, + .get_link_ksettings = efx_siena_ethtool_get_link_ksettings, + .set_link_ksettings = efx_siena_ethtool_set_link_ksettings, .get_fec_stats = efx_ethtool_get_fec_stats, - .get_fecparam = efx_ethtool_get_fecparam, - .set_fecparam = efx_ethtool_set_fecparam, + .get_fecparam = efx_siena_ethtool_get_fecparam, + .set_fecparam = efx_siena_ethtool_set_fecparam, }; diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c index f54510cf4e72..91f750e4ede8 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool_common.c +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c @@ -100,8 +100,8 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) -void efx_ethtool_get_drvinfo(struct net_device *net_dev, - struct ethtool_drvinfo *info) +void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) { struct efx_nic *efx = netdev_priv(net_dev); @@ -111,70 +111,22 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev, strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } -u32 efx_ethtool_get_msglevel(struct net_device *net_dev) +u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return efx->msg_enable; } -void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) +void efx_siena_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) { struct efx_nic *efx = netdev_priv(net_dev); efx->msg_enable = msg_enable; } -void efx_ethtool_self_test(struct net_device *net_dev, - struct ethtool_test *test, u64 *data) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_self_tests *efx_tests; - bool already_up; - int rc = -ENOMEM; - - efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); - if (!efx_tests) - goto fail; - - if (efx->state != STATE_READY) { - rc = -EBUSY; - goto out; - } - - netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", - (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); - - /* We need rx buffers and interrupts. */ - already_up = (efx->net_dev->flags & IFF_UP); - if (!already_up) { - rc = dev_open(efx->net_dev, NULL); - if (rc) { - netif_err(efx, drv, efx->net_dev, - "failed opening device.\n"); - goto out; - } - } - - rc = efx_selftest(efx, efx_tests, test->flags); - - if (!already_up) - dev_close(efx->net_dev); - - netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", - rc == 0 ? "passed" : "failed", - (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); - -out: - efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); - kfree(efx_tests); -fail: - if (rc) - test->flags |= ETH_TEST_FL_FAILED; -} - -void efx_ethtool_get_pauseparam(struct net_device *net_dev, - struct ethtool_pauseparam *pause) +void efx_siena_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) { struct efx_nic *efx = netdev_priv(net_dev); @@ -183,8 +135,8 @@ void efx_ethtool_get_pauseparam(struct net_device *net_dev, pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); } -int efx_ethtool_set_pauseparam(struct net_device *net_dev, - struct ethtool_pauseparam *pause) +int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) { struct efx_nic *efx = netdev_priv(net_dev); u8 wanted_fc, old_fc; @@ -281,7 +233,7 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label #define EFX_LOOPBACK_NAME(_mode, _counter) \ - "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) + "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_siena_loopback_mode) /** * efx_fill_loopback_test - fill in a block of loopback self-test entries @@ -340,9 +292,9 @@ static int efx_fill_loopback_test(struct efx_nic *efx, * The reason for merging these three functions is to make sure that * they can never be inconsistent. */ -int efx_ethtool_fill_self_tests(struct efx_nic *efx, - struct efx_self_tests *tests, - u8 *strings, u64 *data) +static int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + u8 *strings, u64 *data) { struct efx_channel *channel; unsigned int n = 0, i; @@ -395,6 +347,54 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx, return n; } +void efx_siena_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_self_tests *efx_tests; + bool already_up; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + + if (efx->state != STATE_READY) { + rc = -EBUSY; + goto out; + } + + netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + + /* We need rx buffers and interrupts. */ + already_up = (efx->net_dev->flags & IFF_UP); + if (!already_up) { + rc = dev_open(efx->net_dev, NULL); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed opening device.\n"); + goto out; + } + } + + rc = efx_siena_selftest(efx, efx_tests, test->flags); + + if (!already_up) + dev_close(efx->net_dev); + + netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", + rc == 0 ? "passed" : "failed", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + +out: + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + kfree(efx_tests); +fail: + if (rc) + test->flags |= ETH_TEST_FL_FAILED; +} + static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) { size_t n_stats = 0; @@ -439,7 +439,7 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) return n_stats; } -int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) +int efx_siena_ethtool_get_sset_count(struct net_device *net_dev, int string_set) { struct efx_nic *efx = netdev_priv(net_dev); @@ -448,7 +448,7 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) return efx->type->describe_stats(efx, NULL) + EFX_ETHTOOL_SW_STAT_COUNT + efx_describe_per_queue_stats(efx, NULL) + - efx_ptp_describe_stats(efx, NULL); + efx_siena_ptp_describe_stats(efx, NULL); case ETH_SS_TEST: return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); default: @@ -456,8 +456,8 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set) } } -void efx_ethtool_get_strings(struct net_device *net_dev, - u32 string_set, u8 *strings) +void efx_siena_ethtool_get_strings(struct net_device *net_dev, + u32 string_set, u8 *strings) { struct efx_nic *efx = netdev_priv(net_dev); int i; @@ -472,7 +472,7 @@ void efx_ethtool_get_strings(struct net_device *net_dev, strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; strings += (efx_describe_per_queue_stats(efx, strings) * ETH_GSTRING_LEN); - efx_ptp_describe_stats(efx, strings); + efx_siena_ptp_describe_stats(efx, strings); break; case ETH_SS_TEST: efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); @@ -483,9 +483,9 @@ void efx_ethtool_get_strings(struct net_device *net_dev, } } -void efx_ethtool_get_stats(struct net_device *net_dev, - struct ethtool_stats *stats, - u64 *data) +void efx_siena_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats, + u64 *data) { struct efx_nic *efx = netdev_priv(net_dev); const struct efx_sw_stat_desc *stat; @@ -554,12 +554,12 @@ void efx_ethtool_get_stats(struct net_device *net_dev, } } - efx_ptp_update_stats(efx, data); + efx_siena_ptp_update_stats(efx, data); } /* This must be called with rtnl_lock held. */ -int efx_ethtool_get_link_ksettings(struct net_device *net_dev, - struct ethtool_link_ksettings *cmd) +int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_link_state *link_state = &efx->link_state; @@ -581,8 +581,9 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev, } /* This must be called with rtnl_lock held. */ -int efx_ethtool_set_link_ksettings(struct net_device *net_dev, - const struct ethtool_link_ksettings *cmd) +int +efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -601,8 +602,8 @@ int efx_ethtool_set_link_ksettings(struct net_device *net_dev, return rc; } -int efx_ethtool_get_fecparam(struct net_device *net_dev, - struct ethtool_fecparam *fecparam) +int efx_siena_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -614,8 +615,8 @@ int efx_ethtool_get_fecparam(struct net_device *net_dev, return rc; } -int efx_ethtool_set_fecparam(struct net_device *net_dev, - struct ethtool_fecparam *fecparam) +int efx_siena_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -806,8 +807,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, return rc; } -int efx_ethtool_get_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info, u32 *rule_locs) +int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) { struct efx_nic *efx = netdev_priv(net_dev); u32 rss_context = 0; @@ -1125,8 +1126,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, return 0; } -int efx_ethtool_set_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info) +int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info) { struct efx_nic *efx = netdev_priv(net_dev); @@ -1147,7 +1148,7 @@ int efx_ethtool_set_rxnfc(struct net_device *net_dev, } } -u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) +u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); @@ -1156,15 +1157,15 @@ u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) return ARRAY_SIZE(efx->rss_context.rx_indir_table); } -u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) +u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); return efx->type->rx_hash_key_size; } -int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, - u8 *hfunc) +int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -1184,8 +1185,8 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, return 0; } -int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, - const u8 *key, const u8 hfunc) +int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, + const u8 *key, const u8 hfunc) { struct efx_nic *efx = netdev_priv(net_dev); @@ -1203,8 +1204,8 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, return efx->type->rx_push_rss_config(efx, true, indir, key); } -int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, - u8 *key, u8 *hfunc, u32 rss_context) +int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_rss_context *ctx; @@ -1234,10 +1235,10 @@ out_unlock: return rc; } -int efx_ethtool_set_rxfh_context(struct net_device *net_dev, - const u32 *indir, const u8 *key, - const u8 hfunc, u32 *rss_context, - bool delete) +int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_rss_context *ctx; @@ -1299,7 +1300,7 @@ out_unlock: return rc; } -int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) +int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -1311,9 +1312,9 @@ int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) return efx_siena_reset(efx, rc); } -int efx_ethtool_get_module_eeprom(struct net_device *net_dev, - struct ethtool_eeprom *ee, - u8 *data) +int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data) { struct efx_nic *efx = netdev_priv(net_dev); int ret; @@ -1325,8 +1326,8 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev, return ret; } -int efx_ethtool_get_module_info(struct net_device *net_dev, - struct ethtool_modinfo *modinfo) +int efx_siena_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo) { struct efx_nic *efx = netdev_priv(net_dev); int ret; diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h index 659491932101..04b375dc6800 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool_common.h +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h @@ -11,53 +11,50 @@ #ifndef EFX_ETHTOOL_COMMON_H #define EFX_ETHTOOL_COMMON_H -void efx_ethtool_get_drvinfo(struct net_device *net_dev, - struct ethtool_drvinfo *info); -u32 efx_ethtool_get_msglevel(struct net_device *net_dev); -void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable); -void efx_ethtool_self_test(struct net_device *net_dev, - struct ethtool_test *test, u64 *data); -void efx_ethtool_get_pauseparam(struct net_device *net_dev, - struct ethtool_pauseparam *pause); -int efx_ethtool_set_pauseparam(struct net_device *net_dev, - struct ethtool_pauseparam *pause); -int efx_ethtool_fill_self_tests(struct efx_nic *efx, - struct efx_self_tests *tests, - u8 *strings, u64 *data); -int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set); -void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, - u8 *strings); -void efx_ethtool_get_stats(struct net_device *net_dev, - struct ethtool_stats *stats __attribute__ ((unused)), - u64 *data); -int efx_ethtool_get_link_ksettings(struct net_device *net_dev, - struct ethtool_link_ksettings *out); -int efx_ethtool_set_link_ksettings(struct net_device *net_dev, - const struct ethtool_link_ksettings *settings); -int efx_ethtool_get_fecparam(struct net_device *net_dev, - struct ethtool_fecparam *fecparam); -int efx_ethtool_set_fecparam(struct net_device *net_dev, - struct ethtool_fecparam *fecparam); -int efx_ethtool_get_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info, u32 *rule_locs); -int efx_ethtool_set_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info); -u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev); -u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev); -int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, - u8 *hfunc); -int efx_ethtool_set_rxfh(struct net_device *net_dev, - const u32 *indir, const u8 *key, const u8 hfunc); -int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, - u8 *key, u8 *hfunc, u32 rss_context); -int efx_ethtool_set_rxfh_context(struct net_device *net_dev, - const u32 *indir, const u8 *key, - const u8 hfunc, u32 *rss_context, - bool delete); -int efx_ethtool_reset(struct net_device *net_dev, u32 *flags); -int efx_ethtool_get_module_eeprom(struct net_device *net_dev, - struct ethtool_eeprom *ee, - u8 *data); -int efx_ethtool_get_module_info(struct net_device *net_dev, - struct ethtool_modinfo *modinfo); +void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info); +u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev); +void efx_siena_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable); +void efx_siena_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data); +void efx_siena_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); +int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); +int efx_siena_ethtool_get_sset_count(struct net_device *net_dev, int string_set); +void efx_siena_ethtool_get_strings(struct net_device *net_dev, u32 string_set, + u8 *strings); +void efx_siena_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats __always_unused, + u64 *data); +int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *out); +int efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *settings); +int efx_siena_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); +int efx_siena_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); +int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); +int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info); +u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev); +u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev); +int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc); +int efx_siena_ethtool_set_rxfh(struct net_device *net_dev, + const u32 *indir, const u8 *key, const u8 hfunc); +int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context); +int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete); +int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags); +int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data); +int efx_siena_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo); #endif diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c index 7f8f0889bf8d..ff426b228cb2 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.c +++ b/drivers/net/ethernet/sfc/siena/mcdi.c @@ -1363,10 +1363,10 @@ void efx_mcdi_process_event(struct efx_channel *channel, case MCDI_EVENT_CODE_PTP_RX: case MCDI_EVENT_CODE_PTP_FAULT: case MCDI_EVENT_CODE_PTP_PPS: - efx_ptp_event(efx, event); + efx_siena_ptp_event(efx, event); break; case MCDI_EVENT_CODE_PTP_TIME: - efx_time_sync_event(channel, event); + efx_siena_time_sync_event(channel, event); break; case MCDI_EVENT_CODE_TX_FLUSH: case MCDI_EVENT_CODE_RX_FLUSH: diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h index 3fe93f25a569..7e0659be4348 100644 --- a/drivers/net/ethernet/sfc/siena/net_driver.h +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -606,10 +606,10 @@ enum efx_led_mode { #define STRING_TABLE_LOOKUP(val, member) \ ((val) < member ## _max) ? member ## _names[val] : "(invalid)" -extern const char *const efx_loopback_mode_names[]; -extern const unsigned int efx_loopback_mode_max; +extern const char *const efx_siena_loopback_mode_names[]; +extern const unsigned int efx_siena_loopback_mode_max; #define LOOPBACK_MODE(efx) \ - STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) + STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_siena_loopback_mode) enum efx_int_mode { /* Be careful if altering to correct macro below */ diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index daf23070d353..b67417063a80 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -355,7 +355,7 @@ static int efx_phc_settime(struct ptp_clock_info *ptp, static int efx_phc_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on); -bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx) +bool efx_siena_ptp_use_mac_tx_timestamps(struct efx_nic *efx) { return efx_has_cap(efx, TX_MAC_TIMESTAMPING); } @@ -365,7 +365,7 @@ bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx) */ static bool efx_ptp_want_txqs(struct efx_channel *channel) { - return efx_ptp_use_mac_tx_timestamps(channel->efx); + return efx_siena_ptp_use_mac_tx_timestamps(channel->efx); } #define PTP_SW_STAT(ext_name, field_name) \ @@ -393,7 +393,7 @@ static const unsigned long efx_ptp_stat_mask[] = { [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL, }; -size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings) { if (!efx->ptp_data) return 0; @@ -402,7 +402,7 @@ size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings) efx_ptp_stat_mask, strings); } -size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) +size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats) { MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN); @@ -536,14 +536,14 @@ static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor, return kt; } -struct efx_channel *efx_ptp_channel(struct efx_nic *efx) +struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx) { return efx->ptp_data ? efx->ptp_data->channel : NULL; } static u32 last_sync_timestamp_major(struct efx_nic *efx) { - struct efx_channel *channel = efx_ptp_channel(efx); + struct efx_channel *channel = efx_siena_ptp_channel(efx); u32 major = 0; if (channel) @@ -606,13 +606,13 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx, return kt; } -ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue) +ktime_t efx_siena_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; struct efx_ptp_data *ptp = efx->ptp_data; ktime_t kt; - if (efx_ptp_use_mac_tx_timestamps(efx)) + if (efx_siena_ptp_use_mac_tx_timestamps(efx)) kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp, tx_queue->completed_timestamp_major, tx_queue->completed_timestamp_minor, @@ -1437,7 +1437,7 @@ static const struct ptp_clock_info efx_phc_clock_info = { }; /* Initialise PTP state. */ -int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) +static int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) { struct efx_ptp_data *ptp; int rc = 0; @@ -1464,7 +1464,7 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) goto fail2; } - if (efx_ptp_use_mac_tx_timestamps(efx)) { + if (efx_siena_ptp_use_mac_tx_timestamps(efx)) { ptp->xmit_skb = efx_ptp_xmit_skb_queue; /* Request sync events on this channel. */ channel->sync_events_state = SYNC_EVENTS_QUIESCENT; @@ -1553,7 +1553,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) return 0; } -void efx_ptp_remove(struct efx_nic *efx) +static void efx_ptp_remove(struct efx_nic *efx) { if (!efx->ptp_data) return; @@ -1593,7 +1593,7 @@ static void efx_ptp_get_channel_name(struct efx_channel *channel, /* Determine whether this packet should be processed by the PTP module * or transmitted conventionally. */ -bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) { return efx->ptp_data && efx->ptp_data->enabled && @@ -1699,7 +1699,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) * itself, through an MCDI call. MCDI calls aren't permitted * in the transmit path so defer the actual transmission to a suitable worker. */ -int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) { struct efx_ptp_data *ptp = efx->ptp_data; @@ -1713,13 +1713,13 @@ int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) return NETDEV_TX_OK; } -int efx_ptp_get_mode(struct efx_nic *efx) +int efx_siena_ptp_get_mode(struct efx_nic *efx) { return efx->ptp_data->mode; } -int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, - unsigned int new_mode) +int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode) { if ((enable_wanted != efx->ptp_data->enabled) || (enable_wanted && (efx->ptp_data->mode != new_mode))) { @@ -1777,7 +1777,8 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) return 0; } -void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) +void efx_siena_ptp_get_ts_info(struct efx_nic *efx, + struct ethtool_ts_info *ts_info) { struct efx_ptp_data *ptp = efx->ptp_data; struct efx_nic *primary = efx->primary; @@ -1797,7 +1798,7 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) ts_info->rx_filters = ptp->efx->type->hwtstamp_filters; } -int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) +int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) { struct hwtstamp_config config; int rc; @@ -1817,7 +1818,7 @@ int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) ? -EFAULT : 0; } -int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) +int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) { if (!efx->ptp_data) return -EOPNOTSUPP; @@ -1898,7 +1899,7 @@ static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) queue_work(ptp->pps_workwq, &ptp->pps_work); } -void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) +void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev) { struct efx_ptp_data *ptp = efx->ptp_data; int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); @@ -1949,7 +1950,7 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) } } -void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) +void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) { struct efx_nic *efx = channel->efx; struct efx_ptp_data *ptp = efx->ptp_data; @@ -1985,8 +1986,8 @@ static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh) #endif } -void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, - struct sk_buff *skb) +void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) { struct efx_nic *efx = channel->efx; struct efx_ptp_data *ptp = efx->ptp_data; @@ -2171,7 +2172,7 @@ static const struct efx_channel_type efx_ptp_channel_type = { .keep_eventq = false, }; -void efx_ptp_defer_probe_with_channel(struct efx_nic *efx) +void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx) { /* Check whether PTP is implemented on this NIC. The DISABLE * operation will succeed if and only if it is implemented. @@ -2181,7 +2182,7 @@ void efx_ptp_defer_probe_with_channel(struct efx_nic *efx) &efx_ptp_channel_type; } -void efx_ptp_start_datapath(struct efx_nic *efx) +void efx_siena_ptp_start_datapath(struct efx_nic *efx) { if (efx_ptp_restart(efx)) netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); @@ -2190,7 +2191,7 @@ void efx_ptp_start_datapath(struct efx_nic *efx) efx->type->ptp_set_ts_sync_events(efx, true, true); } -void efx_ptp_stop_datapath(struct efx_nic *efx) +void efx_siena_ptp_stop_datapath(struct efx_nic *efx) { /* temporarily disable timestamping */ if (efx->type->ptp_set_ts_sync_events) diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h index 9855e8c9e544..4172f90e9f6f 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.h +++ b/drivers/net/ethernet/sfc/siena/ptp.h @@ -13,33 +13,33 @@ #include "net_driver.h" struct ethtool_ts_info; -int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); -void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); -struct efx_channel *efx_ptp_channel(struct efx_nic *efx); -void efx_ptp_remove(struct efx_nic *efx); -int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); -int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); -void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); -bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); -int efx_ptp_get_mode(struct efx_nic *efx); -int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, - unsigned int new_mode); -int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); -void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); -size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); -size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); -void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); -void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, - struct sk_buff *skb); +void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx); +struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx); +int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); +int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); +void efx_siena_ptp_get_ts_info(struct efx_nic *efx, + struct ethtool_ts_info *ts_info); +bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +int efx_siena_ptp_get_mode(struct efx_nic *efx); +int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode); +int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats); +void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); +void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb); static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, struct sk_buff *skb) { if (channel->sync_events_state == SYNC_EVENTS_VALID) - __efx_rx_skb_attach_timestamp(channel, skb); + __efx_siena_rx_skb_attach_timestamp(channel, skb); } -void efx_ptp_start_datapath(struct efx_nic *efx); -void efx_ptp_stop_datapath(struct efx_nic *efx); -bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx); -ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue); + +void efx_siena_ptp_start_datapath(struct efx_nic *efx); +void efx_siena_ptp_stop_datapath(struct efx_nic *efx); +bool efx_siena_ptp_use_mac_tx_timestamps(struct efx_nic *efx); +ktime_t efx_siena_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue); #endif /* EFX_PTP_H */ diff --git a/drivers/net/ethernet/sfc/siena/rx.c b/drivers/net/ethernet/sfc/siena/rx.c index 47c09b93f7c4..98d3c0743c0f 100644 --- a/drivers/net/ethernet/sfc/siena/rx.c +++ b/drivers/net/ethernet/sfc/siena/rx.c @@ -377,7 +377,7 @@ void __efx_siena_rx_packet(struct efx_channel *channel) if (unlikely(efx->loopback_selftest)) { struct efx_rx_queue *rx_queue; - efx_loopback_rx_packet(efx, eh, rx_buf->len); + efx_siena_loopback_rx_packet(efx, eh, rx_buf->len); rx_queue = efx_channel_get_rx_queue(channel); efx_siena_free_rx_buffers(rx_queue, rx_buf, channel->rx_pkt_n_frags); diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c index 7e24329bc005..83bd27df30d4 100644 --- a/drivers/net/ethernet/sfc/siena/selftest.c +++ b/drivers/net/ethernet/sfc/siena/selftest.c @@ -69,7 +69,7 @@ static const char *const efx_siena_interrupt_mode_names[] = { /** * struct efx_loopback_state - persistent state during a loopback selftest - * @flush: Drop all packets in efx_loopback_rx_packet + * @flush: Drop all packets in efx_siena_loopback_rx_packet * @packet_count: Number of packets being used in this test * @skbs: An array of skbs transmitted * @offload_csum: Checksums are being offloaded @@ -278,8 +278,8 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, /* Loopback test RX callback * This is called for each received packet during loopback testing. */ -void efx_loopback_rx_packet(struct efx_nic *efx, - const char *buf_ptr, int pkt_len) +void efx_siena_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len) { struct efx_loopback_state *state = efx->loopback_selftest; struct efx_loopback_payload *received; @@ -369,7 +369,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx, atomic_inc(&state->rx_bad); } -/* Initialise an efx_selftest_state for a new iteration */ +/* Initialise an efx_siena_selftest_state for a new iteration */ static void efx_iterate_state(struct efx_nic *efx) { struct efx_loopback_state *state = efx->loopback_selftest; @@ -684,14 +684,14 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, * *************************************************************************/ -int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, - unsigned flags) +int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int flags) { enum efx_loopback_mode loopback_mode = efx->loopback_mode; int phy_mode = efx->phy_mode; int rc_test = 0, rc_reset, rc; - efx_selftest_async_cancel(efx); + efx_siena_selftest_async_cancel(efx); /* Online (i.e. non-disruptive) testing * This checks interrupt generation, event delivery and PHY presence. */ @@ -767,7 +767,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, return rc_test; } -void efx_selftest_async_start(struct efx_nic *efx) +void efx_siena_selftest_async_start(struct efx_nic *efx) { struct efx_channel *channel; @@ -776,12 +776,12 @@ void efx_selftest_async_start(struct efx_nic *efx) schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); } -void efx_selftest_async_cancel(struct efx_nic *efx) +void efx_siena_selftest_async_cancel(struct efx_nic *efx) { cancel_delayed_work_sync(&efx->selftest_work); } -static void efx_selftest_async_work(struct work_struct *data) +static void efx_siena_selftest_async_work(struct work_struct *data) { struct efx_nic *efx = container_of(data, struct efx_nic, selftest_work.work); @@ -801,7 +801,7 @@ static void efx_selftest_async_work(struct work_struct *data) } } -void efx_selftest_async_init(struct efx_nic *efx) +void efx_siena_selftest_async_init(struct efx_nic *efx) { - INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); + INIT_DELAYED_WORK(&efx->selftest_work, efx_siena_selftest_async_work); } diff --git a/drivers/net/ethernet/sfc/siena/selftest.h b/drivers/net/ethernet/sfc/siena/selftest.h index a23f085bf298..6af6e7fbfcee 100644 --- a/drivers/net/ethernet/sfc/siena/selftest.h +++ b/drivers/net/ethernet/sfc/siena/selftest.h @@ -41,12 +41,12 @@ struct efx_self_tests { struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; }; -void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr, - int pkt_len); -int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, - unsigned flags); -void efx_selftest_async_init(struct efx_nic *efx); -void efx_selftest_async_start(struct efx_nic *efx); -void efx_selftest_async_cancel(struct efx_nic *efx); +void efx_siena_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr, + int pkt_len); +int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int flags); +void efx_siena_selftest_async_init(struct efx_nic *efx); +void efx_siena_selftest_async_start(struct efx_nic *efx); +void efx_siena_selftest_async_cancel(struct efx_nic *efx); #endif /* EFX_SELFTEST_H */ diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c index 726dd4b72779..d70e481d0c73 100644 --- a/drivers/net/ethernet/sfc/siena/siena.c +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -143,27 +143,28 @@ static int siena_ptp_set_ts_config(struct efx_nic *efx, switch (init->rx_filter) { case HWTSTAMP_FILTER_NONE: /* if TX timestamping is still requested then leave PTP on */ - return efx_ptp_change_mode(efx, - init->tx_type != HWTSTAMP_TX_OFF, - efx_ptp_get_mode(efx)); + return efx_siena_ptp_change_mode(efx, + init->tx_type != HWTSTAMP_TX_OFF, + efx_siena_ptp_get_mode(efx)); case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1); + return efx_siena_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1); case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; - rc = efx_ptp_change_mode(efx, true, - MC_CMD_PTP_MODE_V2_ENHANCED); + rc = efx_siena_ptp_change_mode(efx, true, + MC_CMD_PTP_MODE_V2_ENHANCED); /* bug 33070 - old versions of the firmware do not support the * improved UUID filtering option. Similarly old versions of the * application do not expect it to be enabled. If the firmware * does not accept the enhanced mode, fall back to the standard * PTP v2 UUID filtering. */ if (rc != 0) - rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2); + rc = efx_siena_ptp_change_mode(efx, true, + MC_CMD_PTP_MODE_V2); return rc; default: return -ERANGE; @@ -329,7 +330,7 @@ static int siena_probe_nic(struct efx_nic *efx) #ifdef CONFIG_SFC_SRIOV efx_siena_sriov_probe(efx); #endif - efx_ptp_defer_probe_with_channel(efx); + efx_siena_ptp_defer_probe_with_channel(efx); return 0; diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c index 0677a0254d85..ef238e9efa94 100644 --- a/drivers/net/ethernet/sfc/siena/tx.c +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -321,14 +321,14 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb, /* PTP "event" packet */ if (unlikely(efx_xmit_with_hwtstamp(skb)) && - ((efx_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || - unlikely(efx_ptp_is_ptp_tx(efx, skb)))) { + ((efx_siena_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || + unlikely(efx_siena_ptp_is_ptp_tx(efx, skb)))) { /* There may be existing transmits on the channel that are * waiting for this packet to trigger the doorbell write. * We need to send the packets at this point. */ efx_tx_send_pending(efx_get_tx_channel(efx, index)); - return efx_ptp_tx(efx, skb); + return efx_siena_ptp_tx(efx, skb); } tx_queue = efx_get_tx_queue(efx, index, type); diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c index 66adc8525a3a..31e9888e71df 100644 --- a/drivers/net/ethernet/sfc/siena/tx_common.c +++ b/drivers/net/ethernet/sfc/siena/tx_common.c @@ -80,8 +80,8 @@ void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->old_read_count = 0; tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; tx_queue->xmit_pending = false; - tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && - tx_queue->channel == efx_ptp_channel(efx)); + tx_queue->timestamping = (efx_siena_ptp_use_mac_tx_timestamps(efx) && + tx_queue->channel == efx_siena_ptp_channel(efx)); tx_queue->completed_timestamp_major = 0; tx_queue->completed_timestamp_minor = 0; @@ -148,7 +148,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct skb_shared_hwtstamps hwtstamp; hwtstamp.hwtstamp = - efx_ptp_nic_to_kernel_time(tx_queue); + efx_siena_ptp_nic_to_kernel_time(tx_queue); skb_tstamp_tx(skb, &hwtstamp); tx_queue->completed_timestamp_major = 0; -- cgit v1.2.3 From 4d49e5cd4b095cd1fcf6b1abee0c1bac1b5fc722 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:32:45 +0100 Subject: sfc/siena: Rename functions in mcdi headers to avoid conflicts with sfc For siena use efx_siena_ as the function prefix. Several functions are not used in Siena, so they are removed. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx.c | 6 +- drivers/net/ethernet/sfc/siena/efx_channels.c | 6 +- drivers/net/ethernet/sfc/siena/efx_common.c | 10 +- drivers/net/ethernet/sfc/siena/ethtool.c | 2 +- drivers/net/ethernet/sfc/siena/ethtool_common.c | 20 +- drivers/net/ethernet/sfc/siena/farch.c | 4 +- drivers/net/ethernet/sfc/siena/mcdi.c | 410 ++++++++-------------- drivers/net/ethernet/sfc/siena/mcdi.h | 134 ++++--- drivers/net/ethernet/sfc/siena/mcdi_mon.c | 25 +- drivers/net/ethernet/sfc/siena/mcdi_port.c | 33 +- drivers/net/ethernet/sfc/siena/mcdi_port.h | 7 +- drivers/net/ethernet/sfc/siena/mcdi_port_common.c | 174 ++++----- drivers/net/ethernet/sfc/siena/mcdi_port_common.h | 61 ++-- drivers/net/ethernet/sfc/siena/ptp.c | 70 ++-- drivers/net/ethernet/sfc/siena/selftest.c | 4 +- drivers/net/ethernet/sfc/siena/siena.c | 93 ++--- drivers/net/ethernet/sfc/siena/siena_sriov.c | 9 +- 17 files changed, 459 insertions(+), 609 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index 3b79c39300a4..1bc5ee6220f0 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -153,7 +153,7 @@ static int efx_init_port(struct efx_nic *efx) efx->port_initialized = true; /* Ensure the PHY advertises the correct flow control settings */ - rc = efx_mcdi_port_reconfigure(efx); + rc = efx_siena_mcdi_port_reconfigure(efx); if (rc && rc != -EPERM) goto fail; @@ -526,7 +526,7 @@ static int efx_net_open(struct net_device *net_dev) return rc; if (efx->phy_mode & PHY_MODE_SPECIAL) return -EBUSY; - if (efx_mcdi_poll_reboot(efx) && efx_siena_reset(efx, RESET_TYPE_ALL)) + if (efx_siena_mcdi_poll_reboot(efx) && efx_siena_reset(efx, RESET_TYPE_ALL)) return -EIO; /* Notify the kernel of the link state polled during driver load, @@ -1158,7 +1158,7 @@ static int efx_pm_thaw(struct device *dev) goto fail; mutex_lock(&efx->mac_lock); - efx_mcdi_port_reconfigure(efx); + efx_siena_mcdi_port_reconfigure(efx); mutex_unlock(&efx->mac_lock); efx_siena_start_all(efx); diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 799c0a90358c..276cd7d88732 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -1022,7 +1022,7 @@ static int efx_soft_enable_interrupts(struct efx_nic *efx) efx_siena_start_eventq(channel); } - efx_mcdi_mode_event(efx); + efx_siena_mcdi_mode_event(efx); return 0; fail: @@ -1045,7 +1045,7 @@ static void efx_soft_disable_interrupts(struct efx_nic *efx) if (efx->state == STATE_DISABLED) return; - efx_mcdi_mode_poll(efx); + efx_siena_mcdi_mode_poll(efx); efx->irq_soft_enabled = false; smp_wmb(); @@ -1063,7 +1063,7 @@ static void efx_soft_disable_interrupts(struct efx_nic *efx) } /* Flush the asynchronous MCDI request queue */ - efx_mcdi_flush_async(efx); + efx_siena_mcdi_flush_async(efx); } int efx_siena_enable_interrupts(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index 6b524775c929..3293221b9e9e 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -456,7 +456,7 @@ static void efx_stop_datapath(struct efx_nic *efx) * **************************************************************************/ -/* Equivalent to efx_link_set_advertising with all-zeroes, except does not +/* Equivalent to efx_siena_link_set_advertising with all-zeroes, except does not * force the Autoneg bit on. */ void efx_siena_link_clear_advertising(struct efx_nic *efx) @@ -547,7 +547,7 @@ void efx_siena_start_all(struct efx_nic *efx) * to poll now because we could have missed a change */ mutex_lock(&efx->mac_lock); - if (efx_mcdi_phy_poll(efx)) + if (efx_siena_mcdi_phy_poll(efx)) efx_siena_link_status_changed(efx); mutex_unlock(&efx->mac_lock); @@ -665,7 +665,7 @@ static void efx_wait_for_bist_end(struct efx_nic *efx) int i; for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { - if (efx_mcdi_poll_reboot(efx)) + if (efx_siena_mcdi_poll_reboot(efx)) goto out; msleep(BIST_WAIT_DELAY_MS); } @@ -760,7 +760,7 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && method != RESET_TYPE_DATAPATH) { - rc = efx_mcdi_port_reconfigure(efx); + rc = efx_siena_mcdi_port_reconfigure(efx); if (rc && rc != -EPERM) netif_err(efx, drv, efx->net_dev, "could not restore PHY settings\n"); @@ -950,7 +950,7 @@ void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type) /* efx_process_channel() will no longer read events once a * reset is scheduled. So switch back to poll'd MCDI completions. */ - efx_mcdi_mode_poll(efx); + efx_siena_mcdi_mode_poll(efx); efx_siena_queue_reset_work(efx); } diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c index 63388bec421d..5ee626ba4eb1 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool.c +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -50,7 +50,7 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, return 1; /* cycle on/off once per second */ } - return efx_mcdi_set_id_led(efx, mode); + return efx_siena_mcdi_set_id_led(efx, mode); } static int efx_ethtool_get_regs_len(struct net_device *net_dev) diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c index 91f750e4ede8..0207d07f54e3 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool_common.c +++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c @@ -106,8 +106,8 @@ void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev, struct efx_nic *efx = netdev_priv(net_dev); strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - efx_mcdi_print_fwver(efx, info->fw_version, - sizeof(info->fw_version)); + efx_siena_mcdi_print_fwver(efx, info->fw_version, + sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } @@ -173,7 +173,7 @@ int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev, efx_siena_link_set_wanted_fc(efx, wanted_fc); if (efx->link_advertising[0] != old_adv || (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { - rc = efx_mcdi_port_reconfigure(efx); + rc = efx_siena_mcdi_port_reconfigure(efx); if (rc) { netif_err(efx, drv, efx->net_dev, "Unable to advertise requested flow " @@ -328,7 +328,7 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx, const char *name; EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); - name = efx_mcdi_phy_test_name(efx, i); + name = efx_siena_mcdi_phy_test_name(efx, i); if (name == NULL) break; @@ -565,7 +565,7 @@ int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev, struct efx_link_state *link_state = &efx->link_state; mutex_lock(&efx->mac_lock); - efx_mcdi_phy_get_link_ksettings(efx, cmd); + efx_siena_mcdi_phy_get_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); /* Both MACs support pause frames (bidirectional and respond-only) */ @@ -597,7 +597,7 @@ efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev, } mutex_lock(&efx->mac_lock); - rc = efx_mcdi_phy_set_link_ksettings(efx, cmd); + rc = efx_siena_mcdi_phy_set_link_ksettings(efx, cmd); mutex_unlock(&efx->mac_lock); return rc; } @@ -609,7 +609,7 @@ int efx_siena_ethtool_get_fecparam(struct net_device *net_dev, int rc; mutex_lock(&efx->mac_lock); - rc = efx_mcdi_phy_get_fecparam(efx, fecparam); + rc = efx_siena_mcdi_phy_get_fecparam(efx, fecparam); mutex_unlock(&efx->mac_lock); return rc; @@ -622,7 +622,7 @@ int efx_siena_ethtool_set_fecparam(struct net_device *net_dev, int rc; mutex_lock(&efx->mac_lock); - rc = efx_mcdi_phy_set_fecparam(efx, fecparam); + rc = efx_siena_mcdi_phy_set_fecparam(efx, fecparam); mutex_unlock(&efx->mac_lock); return rc; @@ -1320,7 +1320,7 @@ int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev, int ret; mutex_lock(&efx->mac_lock); - ret = efx_mcdi_phy_get_module_eeprom(efx, ee, data); + ret = efx_siena_mcdi_phy_get_module_eeprom(efx, ee, data); mutex_unlock(&efx->mac_lock); return ret; @@ -1333,7 +1333,7 @@ int efx_siena_ethtool_get_module_info(struct net_device *net_dev, int ret; mutex_lock(&efx->mac_lock); - ret = efx_mcdi_phy_get_module_info(efx, modinfo); + ret = efx_siena_mcdi_phy_get_module_info(efx, modinfo); mutex_unlock(&efx->mac_lock); return ret; diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c index 4de36c6c28e1..ebd6fa408126 100644 --- a/drivers/net/ethernet/sfc/siena/farch.c +++ b/drivers/net/ethernet/sfc/siena/farch.c @@ -667,7 +667,7 @@ static int efx_farch_do_flush(struct efx_nic *efx) * completion). If that fails, fall back to the old scheme. */ if (efx_siena_sriov_enabled(efx)) { - rc = efx_mcdi_flush_rxqs(efx); + rc = efx_siena_mcdi_flush_rxqs(efx); if (!rc) goto wait; } @@ -1313,7 +1313,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) break; #endif case FSE_CZ_EV_CODE_MCDI_EV: - efx_mcdi_process_event(channel, &event); + efx_siena_mcdi_process_event(channel, &event); break; case FSE_AZ_EV_CODE_GLOBAL_EV: if (efx->type->handle_global_event && diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c index ff426b228cb2..eb13aa59fe50 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.c +++ b/drivers/net/ethernet/sfc/siena/mcdi.c @@ -58,7 +58,7 @@ MODULE_PARM_DESC(mcdi_logging_default, "Enable MCDI logging on newly-probed functions"); #endif -int efx_mcdi_init(struct efx_nic *efx) +int efx_siena_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; bool already_attached; @@ -86,11 +86,11 @@ int efx_mcdi_init(struct efx_nic *efx) INIT_LIST_HEAD(&mcdi->async_list); timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0); - (void) efx_mcdi_poll_reboot(efx); + (void)efx_siena_mcdi_poll_reboot(efx); mcdi->new_epoch = true; /* Recover from a failed assertion before probing */ - rc = efx_mcdi_handle_assertion(efx); + rc = efx_siena_mcdi_handle_assertion(efx); if (rc) goto fail2; @@ -124,7 +124,7 @@ fail: return rc; } -void efx_mcdi_detach(struct efx_nic *efx) +void efx_siena_mcdi_detach(struct efx_nic *efx) { if (!efx->mcdi) return; @@ -135,7 +135,7 @@ void efx_mcdi_detach(struct efx_nic *efx) efx_mcdi_drv_attach(efx, false, NULL); } -void efx_mcdi_fini(struct efx_nic *efx) +void efx_siena_mcdi_fini(struct efx_nic *efx) { if (!efx->mcdi) return; @@ -360,7 +360,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) int rc; /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ - rc = efx_mcdi_poll_reboot(efx); + rc = efx_siena_mcdi_poll_reboot(efx); if (rc) { spin_lock_bh(&mcdi->iface_lock); mcdi->resprc = rc; @@ -401,7 +401,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) /* Test and clear MC-rebooted flag for this port/function; reset * software state as necessary. */ -int efx_mcdi_poll_reboot(struct efx_nic *efx) +int efx_siena_mcdi_poll_reboot(struct efx_nic *efx) { if (!efx->mcdi) return 0; @@ -440,7 +440,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx) * completed the request first, then we'll just end up completing the * request again, which is safe. * - * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which + * We need an smp_rmb() to synchronise with efx_siena_mcdi_mode_poll(), which * wait_event_timeout() implicitly provides. */ if (mcdi->mode == MCDI_MODE_POLL) @@ -548,8 +548,8 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) err_len = min(sizeof(errbuf), data_len); efx->type->mcdi_read_response(efx, errbuf, hdr_len, sizeof(errbuf)); - efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, - err_len, rc); + efx_siena_mcdi_display_error(efx, async->cmd, async->inlen, + errbuf, err_len, rc); } if (async->complete) @@ -733,13 +733,13 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, mcdi->proxy_rx_handle = 0; mcdi->state = MCDI_STATE_PROXY_WAIT; } else if (rc && !quiet) { - efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len, - rc); + efx_siena_mcdi_display_error(efx, cmd, inlen, errbuf, + err_len, rc); } if (rc == -EIO || rc == -EINTR) { msleep(MCDI_STATUS_SLEEP_MS); - efx_mcdi_poll_reboot(efx); + efx_siena_mcdi_poll_reboot(efx); mcdi->new_epoch = true; } } @@ -813,7 +813,7 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, return -EINVAL; } - rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); + rc = efx_siena_mcdi_rpc_start(efx, cmd, inbuf, inlen); if (rc) return rc; @@ -894,14 +894,14 @@ static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd, } if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO)) - efx_mcdi_display_error(efx, cmd, inlen, - outbuf, outlen, rc); + efx_siena_mcdi_display_error(efx, cmd, inlen, + outbuf, outlen, rc); return rc; } /** - * efx_mcdi_rpc - Issue an MCDI command and wait for completion + * efx_siena_mcdi_rpc - Issue an MCDI command and wait for completion * @efx: NIC through which to issue the command * @cmd: Command type number * @inbuf: Command parameters @@ -924,34 +924,34 @@ static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd, * set accordingly. In the latter case, *@outlen_actual will be * set to zero. */ -int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen, - efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual) +int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) { return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, outlen_actual, false); } /* Normally, on receiving an error code in the MCDI response, - * efx_mcdi_rpc will log an error message containing (among other - * things) the raw error code, by means of efx_mcdi_display_error. + * efx_siena_mcdi_rpc will log an error message containing (among other + * things) the raw error code, by means of efx_siena_mcdi_display_error. * This _quiet version suppresses that; if the caller wishes to log * the error conditionally on the return code, it should call this - * function and is then responsible for calling efx_mcdi_display_error + * function and is then responsible for calling efx_siena_mcdi_display_error * as needed. */ -int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen, - efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual) +int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) { return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen, outlen_actual, true); } -int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen) +int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); int rc; @@ -1026,7 +1026,7 @@ static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, } /** - * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously + * efx_siena_mcdi_rpc_async - Schedule an MCDI command to run asynchronously * @efx: NIC through which to issue the command * @cmd: Command type number * @inbuf: Command parameters @@ -1046,42 +1046,44 @@ static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, * (c) the request times-out (in timer context) */ int -efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, - const efx_dword_t *inbuf, size_t inlen, size_t outlen, - efx_mcdi_async_completer *complete, unsigned long cookie) +efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie) { return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, cookie, false); } -int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, - const efx_dword_t *inbuf, size_t inlen, - size_t outlen, efx_mcdi_async_completer *complete, - unsigned long cookie) +int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie) { return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, cookie, true); } -int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, - efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual) +int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) { return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, outlen_actual, false, NULL, NULL); } -int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, - efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual) +int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual) { return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, outlen_actual, true, NULL, NULL); } -void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, - size_t inlen, efx_dword_t *outbuf, - size_t outlen, int rc) +void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc) { int code = 0, err_arg = 0; @@ -1098,7 +1100,7 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, * error conditions with various locks held, so it must be lockless. * Caller is responsible for flushing asynchronous requests later. */ -void efx_mcdi_mode_poll(struct efx_nic *efx) +void efx_siena_mcdi_mode_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; @@ -1129,7 +1131,7 @@ void efx_mcdi_mode_poll(struct efx_nic *efx) /* Flush any running or queued asynchronous requests, after event processing * is stopped */ -void efx_mcdi_flush_async(struct efx_nic *efx) +void efx_siena_mcdi_flush_async(struct efx_nic *efx) { struct efx_mcdi_async_param *async, *next; struct efx_mcdi_iface *mcdi; @@ -1166,7 +1168,7 @@ void efx_mcdi_flush_async(struct efx_nic *efx) } } -void efx_mcdi_mode_event(struct efx_nic *efx) +void efx_siena_mcdi_mode_event(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; @@ -1185,7 +1187,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx) * request, because the completion method is specified in the request. * So acquire the interface to serialise the requestors. We don't need * to acquire the iface_lock to change the mode here, but we do need a - * write memory barrier ensure that efx_mcdi_rpc() sees it, which + * write memory barrier ensure that efx_siena_mcdi_rpc() sees it, which * efx_mcdi_acquire() provides. */ efx_mcdi_acquire_sync(mcdi); @@ -1234,18 +1236,18 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) } else { int count; - /* Consume the status word since efx_mcdi_rpc_finish() won't */ + /* Consume the status word since efx_siena_mcdi_rpc_finish() won't */ for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { - rc = efx_mcdi_poll_reboot(efx); + rc = efx_siena_mcdi_poll_reboot(efx); if (rc) break; udelay(MCDI_STATUS_DELAY_US); } /* On EF10, a CODE_MC_REBOOT event can be received without the - * reboot detection in efx_mcdi_poll_reboot() being triggered. + * reboot detection in efx_siena_mcdi_poll_reboot() being triggered. * If zero was returned from the final call to - * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the + * efx_siena_mcdi_poll_reboot(), the MC reboot wasn't noticed but the * MC has definitely rebooted so prepare for the reset. */ if (!rc && efx->type->mcdi_reboot_detected) @@ -1308,8 +1310,8 @@ static void efx_handle_drain_event(struct efx_nic *efx) } /* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */ -void efx_mcdi_process_event(struct efx_channel *channel, - efx_qword_t *event) +void efx_siena_mcdi_process_event(struct efx_channel *channel, + efx_qword_t *event) { struct efx_nic *efx = channel->efx; int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); @@ -1334,7 +1336,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, break; case MCDI_EVENT_CODE_LINKCHANGE: - efx_mcdi_process_link_change(efx, event); + efx_siena_mcdi_process_link_change(efx, event); break; case MCDI_EVENT_CODE_SENSOREVT: efx_sensor_event(efx, event); @@ -1408,7 +1410,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, ************************************************************************** */ -void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) +void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); size_t outlength; @@ -1417,8 +1419,8 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) int rc; BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, - outbuf, sizeof(outbuf), &outlength); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, + outbuf, sizeof(outbuf), &outlength); if (rc) goto fail; if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { @@ -1464,8 +1466,9 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID * specified will fail with EPERM, and we have to tell the MC we don't * care what firmware we get. @@ -1475,13 +1478,13 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n"); MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, - sizeof(inbuf), outbuf, sizeof(outbuf), - &outlen); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, + sizeof(inbuf), outbuf, + sizeof(outbuf), &outlen); } if (rc) { - efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf), - outbuf, outlen, rc); + efx_siena_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, + sizeof(inbuf), outbuf, outlen, rc); goto fail; } if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { @@ -1518,8 +1521,8 @@ fail: return rc; } -int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, - u16 *fw_subtype_list, u32 *capabilities) +int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); size_t outlen, i; @@ -1531,8 +1534,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1); BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; @@ -1574,7 +1577,8 @@ fail: return rc; } -int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) +int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, + u32 dest_evq) { MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); u32 dest = 0; @@ -1590,12 +1594,12 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), + NULL, 0, NULL); return rc; } -int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) +int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) { MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); size_t outlen; @@ -1603,8 +1607,8 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { @@ -1621,38 +1625,9 @@ fail: return rc; } -/* This function finds types using the new NVRAM_PARTITIONS mcdi. */ -static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number, - u32 *nvram_types) -{ - efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, - GFP_KERNEL); - size_t outlen; - int rc; - - if (!outbuf) - return -ENOMEM; - - BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); - - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, - outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen); - if (rc) - goto fail; - - *number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); - - memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID), - *number * sizeof(u32)); - -fail: - kfree(outbuf); - return rc; -} - -int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, - size_t *size_out, size_t *erase_size_out, - bool *protected_out) +int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); @@ -1661,8 +1636,8 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { @@ -1689,8 +1664,8 @@ static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); if (rc) return rc; @@ -1703,46 +1678,13 @@ static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) } } -/* This function tests nvram partitions using the new mcdi partition lookup scheme */ -int efx_new_mcdi_nvram_test_all(struct efx_nic *efx) -{ - u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, - GFP_KERNEL); - unsigned int number; - int rc, i; - - if (!nvram_types) - return -ENOMEM; - - rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types); - if (rc) - goto fail; - - /* Require at least one check */ - rc = -EAGAIN; - - for (i = 0; i < number; i++) { - if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP || - nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG) - continue; - - rc = efx_mcdi_nvram_test(efx, nvram_types[i]); - if (rc) - goto fail; - } - -fail: - kfree(nvram_types); - return rc; -} - -int efx_mcdi_nvram_test_all(struct efx_nic *efx) +int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx) { u32 nvram_types; unsigned int type; int rc; - rc = efx_mcdi_nvram_types(efx, &nvram_types); + rc = efx_siena_mcdi_nvram_types(efx, &nvram_types); if (rc) goto fail1; @@ -1788,17 +1730,17 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) retry = 2; do { MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, - inbuf, MC_CMD_GET_ASSERTS_IN_LEN, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, + inbuf, MC_CMD_GET_ASSERTS_IN_LEN, + outbuf, sizeof(outbuf), &outlen); if (rc == -EPERM) return 0; } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); if (rc) { - efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, - MC_CMD_GET_ASSERTS_IN_LEN, outbuf, - outlen, rc); + efx_siena_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, + MC_CMD_GET_ASSERTS_IN_LEN, outbuf, + outlen, rc); return rc; } if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) @@ -1847,17 +1789,17 @@ static int efx_mcdi_exit_assertion(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, + MC_CMD_REBOOT_IN_LEN, NULL, 0, NULL); if (rc == -EIO) rc = 0; if (rc) - efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN, - NULL, 0, rc); + efx_siena_mcdi_display_error(efx, MC_CMD_REBOOT, + MC_CMD_REBOOT_IN_LEN, NULL, 0, rc); return rc; } -int efx_mcdi_handle_assertion(struct efx_nic *efx) +int efx_siena_mcdi_handle_assertion(struct efx_nic *efx) { int rc; @@ -1868,7 +1810,7 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx) return efx_mcdi_exit_assertion(efx); } -int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); @@ -1880,7 +1822,8 @@ int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); - return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL); + return efx_siena_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), + NULL, 0, NULL); } static int efx_mcdi_reset_func(struct efx_nic *efx) @@ -1891,8 +1834,8 @@ static int efx_mcdi_reset_func(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0); MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG, ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); - rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), + NULL, 0, NULL); return rc; } @@ -1903,8 +1846,8 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); - rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), + NULL, 0, NULL); /* White is black, and up is down */ if (rc == -EIO) return 0; @@ -1913,12 +1856,12 @@ static int efx_mcdi_reset_mc(struct efx_nic *efx) return rc; } -enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) +enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason) { return RESET_TYPE_RECOVER_OR_ALL; } -int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) +int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method) { int rc; @@ -1936,7 +1879,7 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) } /* Recover from a failed assertion pre-reset */ - rc = efx_mcdi_handle_assertion(efx); + rc = efx_siena_mcdi_handle_assertion(efx); if (rc) return rc; @@ -1961,8 +1904,8 @@ static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, MC_CMD_FILTER_MODE_SIMPLE); ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac); - rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; @@ -1983,21 +1926,21 @@ fail: } -int -efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) +int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, + int *id_out) { return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); } -int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) +int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) { MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); size_t outlen; int rc; - rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, + outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; @@ -2017,19 +1960,19 @@ fail: } -int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) +int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); - rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, + sizeof(inbuf), NULL, 0, NULL); return rc; } -int efx_mcdi_flush_rxqs(struct efx_nic *efx) +int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx) { struct efx_channel *channel; struct efx_rx_queue *rx_queue; @@ -2054,79 +1997,20 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx) } } - rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, - MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), + NULL, 0, NULL); WARN_ON(rc < 0); return rc; } -int efx_mcdi_wol_filter_reset(struct efx_nic *efx) -{ - int rc; - - rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); - return rc; -} - -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, - unsigned int *flags) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN); - size_t outlen; - int rc; - - BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); - MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); - MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); - rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) - return rc; - - if (!flags) - return 0; - - if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN) - *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS); - else - *flags = 0; - - return 0; -} - -int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, - unsigned int *enabled_out) +int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx) { - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN); - size_t outlen; int rc; - rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0, - outbuf, sizeof(outbuf), &outlen); - if (rc) - goto fail; - - if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) { - rc = -EIO; - goto fail; - } - - if (impl_out) - *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED); - - if (enabled_out) - *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED); - - return 0; - -fail: - /* Older firmware lacks GET_WORKAROUNDS and this isn't especially - * terrifying. The call site will have to deal with it though. - */ - netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err, - "%s: failed rc=%d\n", __func__, rc); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, + NULL, 0, NULL); return rc; } @@ -2146,8 +2030,8 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, + sizeof(inbuf), NULL, 0, NULL); return rc; } @@ -2167,8 +2051,8 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE, MC_CMD_NVRAM_READ_IN_V2_DEFAULT); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); if (rc) return rc; @@ -2190,9 +2074,9 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, - ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, + ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), + NULL, 0, NULL); return rc; } @@ -2208,8 +2092,8 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), + NULL, 0, NULL); return rc; } @@ -2226,8 +2110,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) { rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE); if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) @@ -2263,8 +2147,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) return rc; } -int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, - size_t len, size_t *retlen, u8 *buffer) +int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer) { struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; @@ -2287,7 +2171,7 @@ out: return rc; } -int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) +int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) { struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; @@ -2317,8 +2201,8 @@ out: return rc; } -int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, - size_t len, size_t *retlen, const u8 *buffer) +int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer) { struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; @@ -2348,7 +2232,7 @@ out: return rc; } -int efx_mcdi_mtd_sync(struct mtd_info *mtd) +int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd) { struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; @@ -2362,7 +2246,7 @@ int efx_mcdi_mtd_sync(struct mtd_info *mtd) return rc; } -void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) +void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part) { struct efx_mcdi_mtd_partition *mcdi_part = container_of(part, struct efx_mcdi_mtd_partition, common); diff --git a/drivers/net/ethernet/sfc/siena/mcdi.h b/drivers/net/ethernet/sfc/siena/mcdi.h index 69c2924a147c..dcebdbf956ce 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.h +++ b/drivers/net/ethernet/sfc/siena/mcdi.h @@ -138,52 +138,54 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) } #endif -int efx_mcdi_init(struct efx_nic *efx); -void efx_mcdi_detach(struct efx_nic *efx); -void efx_mcdi_fini(struct efx_nic *efx); +int efx_siena_mcdi_init(struct efx_nic *efx); +void efx_siena_mcdi_detach(struct efx_nic *efx); +void efx_siena_mcdi_fini(struct efx_nic *efx); -int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, - size_t inlen, efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual); -int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, +int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual); +int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); -int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, - const efx_dword_t *inbuf, size_t inlen); -int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, - efx_dword_t *outbuf, size_t outlen, - size_t *outlen_actual); -int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, - size_t inlen, efx_dword_t *outbuf, - size_t outlen, size_t *outlen_actual); +int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen); +int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); +int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual); typedef void efx_mcdi_async_completer(struct efx_nic *efx, unsigned long cookie, int rc, efx_dword_t *outbuf, size_t outlen_actual); -int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, - const efx_dword_t *inbuf, size_t inlen, size_t outlen, - efx_mcdi_async_completer *complete, - unsigned long cookie); -int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, +int efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, size_t outlen, efx_mcdi_async_completer *complete, unsigned long cookie); +int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); -void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, - size_t inlen, efx_dword_t *outbuf, - size_t outlen, int rc); +void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc); -int efx_mcdi_poll_reboot(struct efx_nic *efx); -void efx_mcdi_mode_poll(struct efx_nic *efx); -void efx_mcdi_mode_event(struct efx_nic *efx); -void efx_mcdi_flush_async(struct efx_nic *efx); +int efx_siena_mcdi_poll_reboot(struct efx_nic *efx); +void efx_siena_mcdi_mode_poll(struct efx_nic *efx); +void efx_siena_mcdi_mode_event(struct efx_nic *efx); +void efx_siena_mcdi_flush_async(struct efx_nic *efx); -void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); -void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); +void efx_siena_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); +void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); /* We expect that 16- and 32-bit fields in MCDI requests and responses * are appropriately aligned, but 64-bit fields are only @@ -338,51 +340,47 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); MCDI_CAPABILITY(field), \ MCDI_CAPABILITY_OFST(field)) -void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); -int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, - u16 *fw_subtype_list, u32 *capabilities); -int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq); -int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); -int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, - size_t *size_out, size_t *erase_size_out, - bool *protected_out); -int efx_new_mcdi_nvram_test_all(struct efx_nic *efx); -int efx_mcdi_nvram_test_all(struct efx_nic *efx); -int efx_mcdi_handle_assertion(struct efx_nic *efx); -int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); -int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, - int *id_out); -int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); -int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); -int efx_mcdi_wol_filter_reset(struct efx_nic *efx); -int efx_mcdi_flush_rxqs(struct efx_nic *efx); -void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); -void efx_mcdi_mac_start_stats(struct efx_nic *efx); -void efx_mcdi_mac_stop_stats(struct efx_nic *efx); -void efx_mcdi_mac_pull_stats(struct efx_nic *efx); -enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); -int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, - unsigned int *flags); -int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, - unsigned int *enabled_out); +void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); +int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities); +int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, + u32 dest_evq); +int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); +int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out); +int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx); +int efx_siena_mcdi_handle_assertion(struct efx_nic *efx); +int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); +int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, + int *id_out); +int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); +int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id); +int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx); +int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx); +void efx_siena_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); +void efx_siena_mcdi_mac_start_stats(struct efx_nic *efx); +void efx_siena_mcdi_mac_stop_stats(struct efx_nic *efx); +void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx); +enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason); +int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method); #ifdef CONFIG_SFC_MCDI_MON -int efx_mcdi_mon_probe(struct efx_nic *efx); -void efx_mcdi_mon_remove(struct efx_nic *efx); +int efx_siena_mcdi_mon_probe(struct efx_nic *efx); +void efx_siena_mcdi_mon_remove(struct efx_nic *efx); #else -static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } -static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} +static inline int efx_siena_mcdi_mon_probe(struct efx_nic *efx) { return 0; } +static inline void efx_siena_mcdi_mon_remove(struct efx_nic *efx) {} #endif #ifdef CONFIG_SFC_MTD -int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, - size_t *retlen, u8 *buffer); -int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); -int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, - size_t *retlen, const u8 *buffer); -int efx_mcdi_mtd_sync(struct mtd_info *mtd); -void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); +int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); +int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); +int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); +int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd); +void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part); #endif #endif /* EFX_MCDI_H */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c index 5954fcfee2b1..eb44d4140925 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c @@ -100,7 +100,7 @@ static const char *const sensor_status_names[] = { [MC_CMD_SENSOR_STATE_NO_READING] = "No reading", }; -void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) { unsigned int type, state, value; enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN; @@ -151,8 +151,8 @@ static int efx_mcdi_mon_update(struct efx_nic *efx) hwmon->dma_buf.dma_addr); MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len); - rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS, - inbuf, sizeof(inbuf), NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_READ_SENSORS, + inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc == 0) hwmon->last_update = jiffies; return rc; @@ -300,7 +300,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; } -int efx_mcdi_mon_probe(struct efx_nic *efx) +int efx_siena_mcdi_mon_probe(struct efx_nic *efx) { unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0; struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); @@ -318,8 +318,9 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) do { MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); - rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); if (rc) return rc; if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) @@ -380,10 +381,10 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); - rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, - inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), - &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); if (rc) goto fail; if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) { @@ -513,11 +514,11 @@ hwmon_register: return 0; fail: - efx_mcdi_mon_remove(efx); + efx_siena_mcdi_mon_remove(efx); return rc; } -void efx_mcdi_mon_remove(struct efx_nic *efx) +void efx_siena_mcdi_mon_remove(struct efx_nic *efx) { struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.c b/drivers/net/ethernet/sfc/siena/mcdi_port.c index 94c6a345c0b1..93b8b2338f11 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_port.c +++ b/drivers/net/ethernet/sfc/siena/mcdi_port.c @@ -31,8 +31,8 @@ static int efx_mcdi_mdio_read(struct net_device *net_dev, MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); - rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); if (rc) return rc; @@ -58,8 +58,8 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev, MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); - rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); if (rc) return rc; @@ -70,14 +70,7 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev, return 0; } -u32 efx_mcdi_phy_get_caps(struct efx_nic *efx) -{ - struct efx_mcdi_phy_data *phy_data = efx->phy_data; - - return phy_data->supported_cap; -} - -bool efx_mcdi_mac_check_fault(struct efx_nic *efx) +bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); size_t outlength; @@ -85,15 +78,15 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), &outlength); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlength); if (rc) return true; return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; } -int efx_mcdi_port_probe(struct efx_nic *efx) +int efx_siena_mcdi_port_probe(struct efx_nic *efx) { int rc; @@ -103,15 +96,15 @@ int efx_mcdi_port_probe(struct efx_nic *efx) efx->mdio.mdio_write = efx_mcdi_mdio_write; /* Fill out MDIO structure, loopback modes, and initial link state */ - rc = efx_mcdi_phy_probe(efx); + rc = efx_siena_mcdi_phy_probe(efx); if (rc != 0) return rc; - return efx_mcdi_mac_init_stats(efx); + return efx_siena_mcdi_mac_init_stats(efx); } -void efx_mcdi_port_remove(struct efx_nic *efx) +void efx_siena_mcdi_port_remove(struct efx_nic *efx) { - efx_mcdi_phy_remove(efx); - efx_mcdi_mac_fini_stats(efx); + efx_siena_mcdi_phy_remove(efx); + efx_siena_mcdi_mac_fini_stats(efx); } diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.h b/drivers/net/ethernet/sfc/siena/mcdi_port.h index 07863ddbe740..7b4ae250b51f 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_port.h +++ b/drivers/net/ethernet/sfc/siena/mcdi_port.h @@ -10,9 +10,8 @@ #include "net_driver.h" -u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); -bool efx_mcdi_mac_check_fault(struct efx_nic *efx); -int efx_mcdi_port_probe(struct efx_nic *efx); -void efx_mcdi_port_remove(struct efx_nic *efx); +bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx); +int efx_siena_mcdi_port_probe(struct efx_nic *efx); +void efx_siena_mcdi_port_remove(struct efx_nic *efx); #endif /* EFX_MCDI_PORT_H */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c index 57908045fb15..a842c139d76f 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c @@ -12,7 +12,8 @@ #include "efx_common.h" #include "nic.h" -int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) +static int efx_mcdi_get_phy_cfg(struct efx_nic *efx, + struct efx_mcdi_phy_data *cfg) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); size_t outlen; @@ -21,8 +22,8 @@ int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; @@ -52,8 +53,8 @@ fail: return rc; } -void efx_link_set_advertising(struct efx_nic *efx, - const unsigned long *advertising) +void efx_siena_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising) { memcpy(efx->link_advertising, advertising, sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); @@ -67,8 +68,8 @@ void efx_link_set_advertising(struct efx_nic *efx, efx->wanted_fc ^= EFX_FC_TX; } -int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, - u32 flags, u32 loopback_mode, u32 loopback_speed) +static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, + u32 flags, u32 loopback_mode, u32 loopback_speed) { MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN); @@ -79,18 +80,18 @@ int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); - return efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), - NULL, 0, NULL); + return efx_siena_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), + NULL, 0, NULL); } -int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) +static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN); size_t outlen; int rc; - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; @@ -109,7 +110,7 @@ fail: return rc; } -void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset) +static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset) { #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ linkset) @@ -184,7 +185,7 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset) #undef SET_BIT } -u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) +static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) { u32 result = 0; @@ -229,7 +230,7 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) return result; } -u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) +static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; enum efx_phy_mode mode, supported; @@ -257,7 +258,7 @@ u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) return flags; } -u8 mcdi_to_ethtool_media(u32 media) +static u8 mcdi_to_ethtool_media(u32 media) { switch (media) { case MC_CMD_MEDIA_XAUI: @@ -278,9 +279,9 @@ u8 mcdi_to_ethtool_media(u32 media) } } -void efx_mcdi_phy_decode_link(struct efx_nic *efx, - struct efx_link_state *link_state, - u32 speed, u32 flags, u32 fcntl) +static void efx_mcdi_phy_decode_link(struct efx_nic *efx, + struct efx_link_state *link_state, + u32 speed, u32 flags, u32 fcntl) { switch (fcntl) { case MC_CMD_FCNTL_AUTO: @@ -321,7 +322,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx, * Both RS and BASER (whether AUTO or not) means use FEC if cable and link * partner support it, preferring RS to BASER. */ -u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap) +static u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap) { u32 ret = 0; @@ -352,7 +353,7 @@ u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap) * maps both of those to AUTO. This should never matter, and it's not clear * what a better mapping would be anyway. */ -u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g) +static u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g) { bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN), rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN), @@ -371,7 +372,7 @@ u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g) /* Verify that the forced flow control settings (!EFX_FC_AUTO) are * supported by the link partner. Warn the user if this isn't the case */ -void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) +static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 rmtadv; @@ -397,7 +398,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) "warning: link partner doesn't support pause frames"); } -bool efx_mcdi_phy_poll(struct efx_nic *efx) +bool efx_siena_mcdi_phy_poll(struct efx_nic *efx) { struct efx_link_state old_state = efx->link_state; MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); @@ -407,8 +408,8 @@ bool efx_mcdi_phy_poll(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); if (rc) efx->link_state.up = false; else @@ -421,7 +422,7 @@ bool efx_mcdi_phy_poll(struct efx_nic *efx) return !efx_link_state_equal(&efx->link_state, &old_state); } -int efx_mcdi_phy_probe(struct efx_nic *efx) +int efx_siena_mcdi_phy_probe(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_data; MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); @@ -439,8 +440,8 @@ int efx_mcdi_phy_probe(struct efx_nic *efx) /* Read initial link advertisement */ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); if (rc) goto fail; @@ -527,7 +528,7 @@ fail: return rc; } -void efx_mcdi_phy_remove(struct efx_nic *efx) +void efx_siena_mcdi_phy_remove(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_data = efx->phy_data; @@ -535,7 +536,8 @@ void efx_mcdi_phy_remove(struct efx_nic *efx) kfree(phy_data); } -void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd) +void efx_siena_mcdi_phy_get_link_ksettings(struct efx_nic *efx, + struct ethtool_link_ksettings *cmd) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); @@ -555,8 +557,8 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); if (rc) return; mcdi_to_ethtool_linkset(phy_cfg->media, @@ -564,7 +566,9 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks cmd->link_modes.lp_advertising); } -int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd) +int +efx_siena_mcdi_phy_set_link_ksettings(struct efx_nic *efx, + const struct ethtool_link_ksettings *cmd) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 caps; @@ -602,7 +606,7 @@ int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_li return rc; if (cmd->base.autoneg) { - efx_link_set_advertising(efx, cmd->link_modes.advertising); + efx_siena_link_set_advertising(efx, cmd->link_modes.advertising); phy_cfg->forced_cap = 0; } else { efx_siena_link_clear_advertising(efx); @@ -611,7 +615,8 @@ int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_li return 0; } -int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec) +int efx_siena_mcdi_phy_get_fecparam(struct efx_nic *efx, + struct ethtool_fecparam *fec) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN); u32 caps, active, speed; /* MCDI format */ @@ -620,8 +625,8 @@ int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec) int rc; BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlen); if (rc) return rc; if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN) @@ -676,7 +681,8 @@ static int ethtool_fec_supported(u32 supported_cap, u32 ethtool_cap) return 0; } -int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec) +int efx_siena_mcdi_phy_set_fecparam(struct efx_nic *efx, + const struct ethtool_fecparam *fec) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 caps; @@ -686,7 +692,7 @@ int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam if (rc) return rc; - /* Work out what efx_mcdi_phy_set_link_ksettings() would produce from + /* Work out what efx_siena_mcdi_phy_set_link_ksettings() would produce from * saved advertising bits */ if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising)) @@ -706,7 +712,7 @@ int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam return 0; } -int efx_mcdi_phy_test_alive(struct efx_nic *efx) +int efx_siena_mcdi_phy_test_alive(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); size_t outlen; @@ -714,8 +720,8 @@ int efx_mcdi_phy_test_alive(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, + outbuf, sizeof(outbuf), &outlen); if (rc) return rc; @@ -727,7 +733,7 @@ int efx_mcdi_phy_test_alive(struct efx_nic *efx) return 0; } -int efx_mcdi_port_reconfigure(struct efx_nic *efx) +int efx_siena_mcdi_port_reconfigure(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 caps = (efx->link_advertising[0] ? @@ -764,16 +770,16 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode); - rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, - inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, + MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); if (rc) goto out; /* Wait up to 10s for BIST to finish */ for (retry = 0; retry < 100; ++retry) { BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, - outbuf, sizeof(outbuf), &outlen); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); if (rc) goto out; @@ -811,7 +817,8 @@ out: return rc; } -int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags) +int efx_siena_mcdi_phy_run_tests(struct efx_nic *efx, int *results, + unsigned int flags) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 mode; @@ -850,7 +857,8 @@ int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags return 0; } -const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) +const char *efx_siena_mcdi_phy_test_name(struct efx_nic *efx, + unsigned int index) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; @@ -913,10 +921,10 @@ static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx, to_copy = min(space, SFP_PAGE_SIZE - offset); MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO, - inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), - &outlen); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); if (rc) return rc; @@ -984,7 +992,8 @@ static u32 efx_mcdi_phy_module_type(struct efx_nic *efx) } } -int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data) +int efx_siena_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) { int rc; ssize_t space_remaining = ee->len; @@ -1045,7 +1054,7 @@ int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *e return 0; } -int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo) +int efx_siena_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo) { int sff_8472_level; int diag_type; @@ -1090,7 +1099,7 @@ static unsigned int efx_calc_mac_mtu(struct efx_nic *efx) return EFX_MAX_FRAME_LEN(efx->net_dev->mtu); } -int efx_mcdi_set_mac(struct efx_nic *efx) +int efx_siena_mcdi_set_mac(struct efx_nic *efx) { u32 fcntl; MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); @@ -1130,23 +1139,8 @@ int efx_mcdi_set_mac(struct efx_nic *efx) MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); - return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), - NULL, 0, NULL); -} - -int efx_mcdi_set_mtu(struct efx_nic *efx) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MAC_EXT_IN_LEN); - - BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); - - MCDI_SET_DWORD(inbuf, SET_MAC_EXT_IN_MTU, efx_calc_mac_mtu(efx)); - - MCDI_POPULATE_DWORD_1(inbuf, SET_MAC_EXT_IN_CONTROL, - SET_MAC_EXT_IN_CFG_MTU, 1); - - return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, inbuf, sizeof(inbuf), - NULL, 0, NULL); + return efx_siena_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, + sizeof(cmdbytes), NULL, 0, NULL); } enum efx_stats_action { @@ -1183,16 +1177,16 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, + sizeof(inbuf), NULL, 0, NULL); /* Expect ENOENT if DMA queues have not been set up */ if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) - efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), - NULL, 0, rc); + efx_siena_mcdi_display_error(efx, MC_CMD_MAC_STATS, + sizeof(inbuf), NULL, 0, rc); return rc; } -void efx_mcdi_mac_start_stats(struct efx_nic *efx) +void efx_siena_mcdi_mac_start_stats(struct efx_nic *efx) { __le64 *dma_stats = efx->stats_buffer.addr; @@ -1201,7 +1195,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx) efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); } -void efx_mcdi_mac_stop_stats(struct efx_nic *efx) +void efx_siena_mcdi_mac_stop_stats(struct efx_nic *efx) { efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0); } @@ -1209,7 +1203,7 @@ void efx_mcdi_mac_stop_stats(struct efx_nic *efx) #define EFX_MAC_STATS_WAIT_US 100 #define EFX_MAC_STATS_WAIT_ATTEMPTS 10 -void efx_mcdi_mac_pull_stats(struct efx_nic *efx) +void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx) { __le64 *dma_stats = efx->stats_buffer.addr; int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS; @@ -1223,7 +1217,7 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx) udelay(EFX_MAC_STATS_WAIT_US); } -int efx_mcdi_mac_init_stats(struct efx_nic *efx) +int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx) { int rc; @@ -1248,25 +1242,11 @@ int efx_mcdi_mac_init_stats(struct efx_nic *efx) return 0; } -void efx_mcdi_mac_fini_stats(struct efx_nic *efx) +void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx) { efx_nic_free_buffer(efx, &efx->stats_buffer); } -/* Get physical port number (EF10 only; on Siena it is same as PF number) */ -int efx_mcdi_port_get_number(struct efx_nic *efx) -{ - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); - int rc; - - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0, - outbuf, sizeof(outbuf), NULL); - if (rc) - return rc; - - return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT); -} - static unsigned int efx_mcdi_event_link_speed[] = { [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, @@ -1277,7 +1257,7 @@ static unsigned int efx_mcdi_event_link_speed[] = { [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000, }; -void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) +void efx_siena_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) { u32 flags, fcntl, speed, lpa; diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.h b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h index ed31690e591c..7a6de13d9ce6 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_port_common.h +++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h @@ -28,40 +28,31 @@ struct efx_mcdi_phy_data { u32 forced_cap; }; -int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg); -void efx_link_set_advertising(struct efx_nic *efx, - const unsigned long *advertising); -int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, - u32 flags, u32 loopback_mode, u32 loopback_speed); -int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes); -void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset); -u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset); -u32 efx_get_mcdi_phy_flags(struct efx_nic *efx); -u8 mcdi_to_ethtool_media(u32 media); -void efx_mcdi_phy_decode_link(struct efx_nic *efx, - struct efx_link_state *link_state, - u32 speed, u32 flags, u32 fcntl); -u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap); -u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g); -void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa); -bool efx_mcdi_phy_poll(struct efx_nic *efx); -int efx_mcdi_phy_probe(struct efx_nic *efx); -void efx_mcdi_phy_remove(struct efx_nic *efx); -void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd); -int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd); -int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec); -int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec); -int efx_mcdi_phy_test_alive(struct efx_nic *efx); -int efx_mcdi_port_reconfigure(struct efx_nic *efx); -int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags); -const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index); -int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data); -int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo); -int efx_mcdi_set_mac(struct efx_nic *efx); -int efx_mcdi_set_mtu(struct efx_nic *efx); -int efx_mcdi_mac_init_stats(struct efx_nic *efx); -void efx_mcdi_mac_fini_stats(struct efx_nic *efx); -int efx_mcdi_port_get_number(struct efx_nic *efx); -void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); +void efx_siena_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising); +bool efx_siena_mcdi_phy_poll(struct efx_nic *efx); +int efx_siena_mcdi_phy_probe(struct efx_nic *efx); +void efx_siena_mcdi_phy_remove(struct efx_nic *efx); +void efx_siena_mcdi_phy_get_link_ksettings(struct efx_nic *efx, + struct ethtool_link_ksettings *cmd); +int efx_siena_mcdi_phy_set_link_ksettings(struct efx_nic *efx, + const struct ethtool_link_ksettings *cmd); +int efx_siena_mcdi_phy_get_fecparam(struct efx_nic *efx, + struct ethtool_fecparam *fec); +int efx_siena_mcdi_phy_set_fecparam(struct efx_nic *efx, + const struct ethtool_fecparam *fec); +int efx_siena_mcdi_phy_test_alive(struct efx_nic *efx); +int efx_siena_mcdi_port_reconfigure(struct efx_nic *efx); +int efx_siena_mcdi_phy_run_tests(struct efx_nic *efx, int *results, + unsigned int flags); +const char *efx_siena_mcdi_phy_test_name(struct efx_nic *efx, + unsigned int index); +int efx_siena_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data); +int efx_siena_mcdi_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo); +int efx_siena_mcdi_set_mac(struct efx_nic *efx); +int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx); +void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx); #endif diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index b67417063a80..5b4717520c3e 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -426,8 +426,8 @@ size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats) */ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); if (rc) memset(outbuf, 0, sizeof(outbuf)); efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, @@ -641,8 +641,8 @@ static int efx_ptp_get_attributes(struct efx_nic *efx) */ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &out_len); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); if (rc == 0) { fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT); } else if (rc == -EINVAL) { @@ -651,8 +651,8 @@ static int efx_ptp_get_attributes(struct efx_nic *efx) pci_info(efx->pci_dev, "no PTP support\n"); return rc; } else { - efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), - outbuf, sizeof(outbuf), rc); + efx_siena_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), + outbuf, sizeof(outbuf), rc); return rc; } @@ -739,8 +739,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &out_len); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); if (rc == 0) { efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf, PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT); @@ -772,8 +772,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) efx->ptp_data->ts_corrections.general_tx = 0; efx->ptp_data->ts_corrections.general_rx = 0; } else { - efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf, - sizeof(outbuf), rc); + efx_siena_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), + outbuf, sizeof(outbuf), rc); return rc; } @@ -794,13 +794,13 @@ static int efx_ptp_enable(struct efx_nic *efx) efx->ptp_data->channel->channel : 0); MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), NULL); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); rc = (rc == -EALREADY) ? 0 : rc; if (rc) - efx_mcdi_display_error(efx, MC_CMD_PTP, - MC_CMD_PTP_IN_ENABLE_LEN, - outbuf, sizeof(outbuf), rc); + efx_siena_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_ENABLE_LEN, + outbuf, sizeof(outbuf), rc); return rc; } @@ -817,8 +817,8 @@ static int efx_ptp_disable(struct efx_nic *efx) MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), NULL); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); rc = (rc == -EALREADY) ? 0 : rc; /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function * should only have been called during probe. @@ -826,9 +826,9 @@ static int efx_ptp_disable(struct efx_nic *efx) if (rc == -ENOSYS || rc == -EPERM) pci_info(efx->pci_dev, "no PTP support\n"); else if (rc) - efx_mcdi_display_error(efx, MC_CMD_PTP, - MC_CMD_PTP_IN_DISABLE_LEN, - outbuf, sizeof(outbuf), rc); + efx_siena_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_DISABLE_LEN, + outbuf, sizeof(outbuf), rc); return rc; } @@ -1042,8 +1042,8 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) /* Clear flag that signals MC ready */ WRITE_ONCE(*start, 0); - rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, - MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + rc = efx_siena_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN); EFX_WARN_ON_ONCE_PARANOID(rc); /* Wait for start from MCDI (or timeout) */ @@ -1062,10 +1062,10 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) efx_ptp_send_times(efx, &last_time); /* Collect results */ - rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP, - MC_CMD_PTP_IN_SYNCHRONIZE_LEN, - synch_buf, sizeof(synch_buf), - &response_length); + rc = efx_siena_mcdi_rpc_finish(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN, + synch_buf, sizeof(synch_buf), + &response_length); if (rc == 0) { rc = efx_ptp_process_times(efx, synch_buf, response_length, &last_time); @@ -1127,9 +1127,9 @@ static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb) MCDI_PTR(ptp_data->txbuf, PTP_IN_TRANSMIT_PACKET), skb->len); - rc = efx_mcdi_rpc(efx, MC_CMD_PTP, - ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), - txtime, sizeof(txtime), &len); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, ptp_data->txbuf, + MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), txtime, + sizeof(txtime), &len); if (rc != 0) goto fail; @@ -2068,8 +2068,8 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns); MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); - rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), - NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), + NULL, 0, NULL); if (rc != 0) return rc; @@ -2093,8 +2093,8 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major); MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor); - return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - NULL, 0, NULL); + return efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); } static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) @@ -2111,8 +2111,8 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); if (rc != 0) return rc; diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c index 83bd27df30d4..2d70b3356ddf 100644 --- a/drivers/net/ethernet/sfc/siena/selftest.c +++ b/drivers/net/ethernet/sfc/siena/selftest.c @@ -100,7 +100,7 @@ static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) { int rc = 0; - rc = efx_mcdi_phy_test_alive(efx); + rc = efx_siena_mcdi_phy_test_alive(efx); tests->phy_alive = rc ? -1 : 1; return rc; @@ -257,7 +257,7 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, int rc; mutex_lock(&efx->mac_lock); - rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags); + rc = efx_siena_mcdi_phy_run_tests(efx, tests->phy_ext, flags); mutex_unlock(&efx->mac_lock); if (rc == -EPERM) rc = 0; diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c index d70e481d0c73..74ed8e972c93 100644 --- a/drivers/net/ethernet/sfc/siena/siena.c +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -59,13 +59,13 @@ static void siena_push_irq_moderation(struct efx_channel *channel) void efx_siena_prepare_flush(struct efx_nic *efx) { if (efx->fc_disable++ == 0) - efx_mcdi_set_mac(efx); + efx_siena_mcdi_set_mac(efx); } void siena_finish_flush(struct efx_nic *efx) { if (--efx->fc_disable == 0) - efx_mcdi_set_mac(efx); + efx_siena_mcdi_set_mac(efx); } static const struct efx_farch_register_test siena_register_tests[] = { @@ -107,7 +107,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) /* Reset the chip immediately so that it is completely * quiescent regardless of what any VF driver does. */ - rc = efx_mcdi_reset(efx, reset_method); + rc = efx_siena_mcdi_reset(efx, reset_method); if (rc) goto out; @@ -116,7 +116,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) ARRAY_SIZE(siena_register_tests)) ? -1 : 1; - rc = efx_mcdi_reset(efx, reset_method); + rc = efx_siena_mcdi_reset(efx, reset_method); out: rc2 = efx_siena_reset_up(efx, reset_method, rc == 0); return rc ? rc : rc2; @@ -223,7 +223,8 @@ static int siena_probe_nvconfig(struct efx_nic *efx) u32 caps = 0; int rc; - rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps); + rc = efx_siena_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, + &caps); efx->timer_quantum_ns = (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ? @@ -286,12 +287,12 @@ static int siena_probe_nic(struct efx_nic *efx) efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; - rc = efx_mcdi_init(efx); + rc = efx_siena_mcdi_init(efx); if (rc) goto fail1; /* Now we can reset the NIC */ - rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); + rc = efx_siena_mcdi_reset(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; @@ -323,7 +324,7 @@ static int siena_probe_nic(struct efx_nic *efx) goto fail5; } - rc = efx_mcdi_mon_probe(efx); + rc = efx_siena_mcdi_mon_probe(efx); if (rc) goto fail5; @@ -338,8 +339,8 @@ fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: - efx_mcdi_detach(efx); - efx_mcdi_fini(efx); + efx_siena_mcdi_detach(efx); + efx_siena_mcdi_fini(efx); fail1: kfree(efx->nic_data); return rc; @@ -406,7 +407,7 @@ static int siena_init_nic(struct efx_nic *efx) int rc; /* Recover from a failed assertion post-reset */ - rc = efx_mcdi_handle_assertion(efx); + rc = efx_siena_mcdi_handle_assertion(efx); if (rc) return rc; @@ -440,7 +441,7 @@ static int siena_init_nic(struct efx_nic *efx) efx->rss_context.context_id = 0; /* indicates RSS is active */ /* Enable event logging */ - rc = efx_mcdi_log_ctrl(efx, true, false, 0); + rc = efx_siena_mcdi_log_ctrl(efx, true, false, 0); if (rc) return rc; @@ -457,14 +458,14 @@ static int siena_init_nic(struct efx_nic *efx) static void siena_remove_nic(struct efx_nic *efx) { - efx_mcdi_mon_remove(efx); + efx_siena_mcdi_mon_remove(efx); efx_nic_free_buffer(efx, &efx->irq_status); - efx_mcdi_reset(efx, RESET_TYPE_ALL); + efx_siena_mcdi_reset(efx, RESET_TYPE_ALL); - efx_mcdi_detach(efx); - efx_mcdi_fini(efx); + efx_siena_mcdi_detach(efx); + efx_siena_mcdi_fini(efx); /* Tear down the private nic state */ kfree(efx->nic_data); @@ -649,14 +650,14 @@ static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unu WARN_ON(!mutex_is_locked(&efx->mac_lock)); - rc = efx_mcdi_set_mac(efx); + rc = efx_siena_mcdi_set_mac(efx); if (rc != 0) return rc; memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0), efx->multicast_hash.byte, sizeof(efx->multicast_hash)); - return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, - inbuf, sizeof(inbuf), NULL, 0, NULL); + return efx_siena_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, + inbuf, sizeof(inbuf), NULL, 0, NULL); } /************************************************************************** @@ -689,16 +690,17 @@ static int siena_set_wol(struct efx_nic *efx, u32 type) if (type & WAKE_MAGIC) { if (nic_data->wol_filter_id != -1) - efx_mcdi_wol_filter_remove(efx, - nic_data->wol_filter_id); - rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, - &nic_data->wol_filter_id); + efx_siena_mcdi_wol_filter_remove(efx, + nic_data->wol_filter_id); + rc = efx_siena_mcdi_wol_filter_set_magic(efx, + efx->net_dev->dev_addr, + &nic_data->wol_filter_id); if (rc) goto fail; pci_wake_from_d3(efx->pci_dev, true); } else { - rc = efx_mcdi_wol_filter_reset(efx); + rc = efx_siena_mcdi_wol_filter_reset(efx); nic_data->wol_filter_id = -1; pci_wake_from_d3(efx->pci_dev, false); if (rc) @@ -718,12 +720,12 @@ static void siena_init_wol(struct efx_nic *efx) struct siena_nic_data *nic_data = efx->nic_data; int rc; - rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); + rc = efx_siena_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); if (rc != 0) { /* If it failed, attempt to get into a synchronised * state with MC by resetting any set WoL filters */ - efx_mcdi_wol_filter_reset(efx); + efx_siena_mcdi_wol_filter_reset(efx); nic_data->wol_filter_id = -1; } else if (nic_data->wol_filter_id != -1) { pci_wake_from_d3(efx->pci_dev, true); @@ -869,7 +871,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx, if (info->port != efx_port_num(efx)) return -ENODEV; - rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); + rc = efx_siena_mcdi_nvram_info(efx, type, &size, &erase_size, + &protected); if (rc) return rc; if (protected) @@ -896,7 +899,7 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, size_t i; int rc; - rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); + rc = efx_siena_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); if (rc) return rc; @@ -916,7 +919,7 @@ static int siena_mtd_probe(struct efx_nic *efx) ASSERT_RTNL(); - rc = efx_mcdi_nvram_types(efx, &nvram_types); + rc = efx_siena_mcdi_nvram_types(efx, &nvram_types); if (rc) return rc; @@ -987,11 +990,11 @@ const struct efx_nic_type siena_a0_nic_type = { #else .monitor = NULL, #endif - .map_reset_reason = efx_mcdi_map_reset_reason, + .map_reset_reason = efx_siena_mcdi_map_reset_reason, .map_reset_flags = siena_map_reset_flags, - .reset = efx_mcdi_reset, - .probe_port = efx_mcdi_port_probe, - .remove_port = efx_mcdi_port_remove, + .reset = efx_siena_mcdi_reset, + .probe_port = efx_siena_mcdi_port_probe, + .remove_port = efx_siena_mcdi_port_remove, .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = efx_siena_prepare_flush, .finish_flush = siena_finish_flush, @@ -999,18 +1002,18 @@ const struct efx_nic_type siena_a0_nic_type = { .finish_flr = efx_farch_finish_flr, .describe_stats = siena_describe_nic_stats, .update_stats = siena_update_nic_stats, - .start_stats = efx_mcdi_mac_start_stats, - .pull_stats = efx_mcdi_mac_pull_stats, - .stop_stats = efx_mcdi_mac_stop_stats, + .start_stats = efx_siena_mcdi_mac_start_stats, + .pull_stats = efx_siena_mcdi_mac_pull_stats, + .stop_stats = efx_siena_mcdi_mac_stop_stats, .push_irq_moderation = siena_push_irq_moderation, .reconfigure_mac = siena_mac_reconfigure, - .check_mac_fault = efx_mcdi_mac_check_fault, - .reconfigure_port = efx_mcdi_port_reconfigure, + .check_mac_fault = efx_siena_mcdi_mac_check_fault, + .reconfigure_port = efx_siena_mcdi_port_reconfigure, .get_wol = siena_get_wol, .set_wol = siena_set_wol, .resume_wol = siena_init_wol, .test_chip = siena_test_chip, - .test_nvram = efx_mcdi_nvram_test_all, + .test_nvram = efx_siena_mcdi_nvram_test_all, .mcdi_request = siena_mcdi_request, .mcdi_poll_response = siena_mcdi_poll_response, .mcdi_read_response = siena_mcdi_read_response, @@ -1057,11 +1060,11 @@ const struct efx_nic_type siena_a0_nic_type = { #endif #ifdef CONFIG_SFC_MTD .mtd_probe = siena_mtd_probe, - .mtd_rename = efx_mcdi_mtd_rename, - .mtd_read = efx_mcdi_mtd_read, - .mtd_erase = efx_mcdi_mtd_erase, - .mtd_write = efx_mcdi_mtd_write, - .mtd_sync = efx_mcdi_mtd_sync, + .mtd_rename = efx_siena_mcdi_mtd_rename, + .mtd_read = efx_siena_mcdi_mtd_read, + .mtd_erase = efx_siena_mcdi_mtd_erase, + .mtd_write = efx_siena_mcdi_mtd_write, + .mtd_sync = efx_siena_mcdi_mtd_sync, #endif .ptp_write_host_time = siena_ptp_write_host_time, .ptp_set_ts_config = siena_ptp_set_ts_config, @@ -1105,6 +1108,6 @@ const struct efx_nic_type siena_a0_nic_type = { 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), .rx_hash_key_size = 16, .check_caps = siena_check_caps, - .sensor_event = efx_mcdi_sensor_event, + .sensor_event = efx_siena_mcdi_sensor_event, .rx_recycle_ring_size = efx_siena_recycle_ring_size, }; diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.c b/drivers/net/ethernet/sfc/siena/siena_sriov.c index f8e14f0d2f34..fdfcf480fd47 100644 --- a/drivers/net/ethernet/sfc/siena/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.c @@ -206,8 +206,9 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE); MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count); - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN, - outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen); + rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, + MC_CMD_SRIOV_IN_LEN, outbuf, + MC_CMD_SRIOV_OUT_LEN, &outlen); if (rc) return rc; if (outlen < MC_CMD_SRIOV_OUT_LEN) @@ -288,7 +289,7 @@ static int efx_siena_sriov_memcpy(struct efx_nic *efx, ++req; } - rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); + rc = efx_siena_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); out: mb(); /* Don't write source/read dest before DMA is complete */ @@ -712,7 +713,7 @@ static int efx_vfdi_fini_all_queues(struct siena_vf *vf) atomic_set(&vf->rxq_retry_count, 0); while (timeout && (vf->rxq_count || vf->txq_count)) { - rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count), NULL, 0, NULL); WARN_ON(rc < 0); -- cgit v1.2.3 From c8443b698238fd0cd525c48578e74f093e9f80aa Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:32:58 +0100 Subject: sfc/siena: Rename functions in nic_common.h to avoid conflicts with sfc For siena use efx_siena_ as the function prefix. efx_nic_update_stats_atomic is only used in efx_common.c, so move it there. efx_nic_copy_stats is not used in Siena, so it is removed. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx.c | 6 +- drivers/net/ethernet/sfc/siena/efx_common.c | 10 ++- drivers/net/ethernet/sfc/siena/ethtool.c | 4 +- drivers/net/ethernet/sfc/siena/farch.c | 4 +- drivers/net/ethernet/sfc/siena/mcdi_mon.c | 9 ++- drivers/net/ethernet/sfc/siena/mcdi_port_common.c | 7 +- drivers/net/ethernet/sfc/siena/nic.c | 79 ++++++----------------- drivers/net/ethernet/sfc/siena/nic_common.h | 40 +++++------- drivers/net/ethernet/sfc/siena/ptp.c | 16 ++--- drivers/net/ethernet/sfc/siena/selftest.c | 8 +-- drivers/net/ethernet/sfc/siena/siena.c | 20 +++--- drivers/net/ethernet/sfc/siena/siena_sriov.c | 22 +++---- drivers/net/ethernet/sfc/siena/tx.c | 4 +- drivers/net/ethernet/sfc/siena/tx_common.c | 4 +- 14 files changed, 95 insertions(+), 138 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index 1bc5ee6220f0..d937704e416b 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -830,7 +830,7 @@ static void efx_pci_remove_main(struct efx_nic *efx) efx_siena_disable_interrupts(efx); efx_siena_clear_interrupt_affinity(efx); - efx_nic_fini_interrupt(efx); + efx_siena_fini_interrupt(efx); efx_fini_port(efx); efx->type->fini(efx); efx_siena_fini_napi(efx); @@ -939,7 +939,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) goto fail4; } - rc = efx_nic_init_interrupt(efx); + rc = efx_siena_init_interrupt(efx); if (rc) goto fail5; @@ -952,7 +952,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) fail6: efx_siena_clear_interrupt_affinity(efx); - efx_nic_fini_interrupt(efx); + efx_siena_fini_interrupt(efx); fail5: efx_fini_port(efx); fail4: diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index 3293221b9e9e..b44a7114e319 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -597,6 +597,14 @@ void efx_siena_stop_all(struct efx_nic *efx) efx_stop_datapath(efx); } +static size_t efx_siena_update_stats_atomic(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + if (efx->type->update_stats_atomic) + return efx->type->update_stats_atomic(efx, full_stats, core_stats); + return efx->type->update_stats(efx, full_stats, core_stats); +} + /* Context: process, dev_base_lock or RTNL held, non-blocking. */ void efx_siena_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) @@ -604,7 +612,7 @@ void efx_siena_net_stats(struct net_device *net_dev, struct efx_nic *efx = netdev_priv(net_dev); spin_lock_bh(&efx->stats_lock); - efx_nic_update_stats_atomic(efx, NULL, stats); + efx_siena_update_stats_atomic(efx, NULL, stats); spin_unlock_bh(&efx->stats_lock); } diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c index 5ee626ba4eb1..e4ec589216c1 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool.c +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -55,7 +55,7 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, static int efx_ethtool_get_regs_len(struct net_device *net_dev) { - return efx_nic_get_regs_len(netdev_priv(net_dev)); + return efx_siena_get_regs_len(netdev_priv(net_dev)); } static void efx_ethtool_get_regs(struct net_device *net_dev, @@ -64,7 +64,7 @@ static void efx_ethtool_get_regs(struct net_device *net_dev, struct efx_nic *efx = netdev_priv(net_dev); regs->version = efx->type->revision; - efx_nic_get_regs(efx, buf); + efx_siena_get_regs(efx, buf); } /* diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c index ebd6fa408126..a24ba23fd19f 100644 --- a/drivers/net/ethernet/sfc/siena/farch.c +++ b/drivers/net/ethernet/sfc/siena/farch.c @@ -233,7 +233,7 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, #endif len = ALIGN(len, EFX_BUF_SIZE); - if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) + if (efx_siena_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) return -ENOMEM; buffer->entries = len / EFX_BUF_SIZE; BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); @@ -269,7 +269,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) (u64)buffer->buf.dma_addr, buffer->buf.len, buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); - efx_nic_free_buffer(efx, &buffer->buf); + efx_siena_free_buffer(efx, &buffer->buf); buffer->entries = 0; } diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c index eb44d4140925..d0c25dfda0d7 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c @@ -336,10 +336,9 @@ int efx_siena_mcdi_mon_probe(struct efx_nic *efx) if (n_sensors == 0) return 0; - rc = efx_nic_alloc_buffer( - efx, &hwmon->dma_buf, - n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN, - GFP_KERNEL); + rc = efx_siena_alloc_buffer(efx, &hwmon->dma_buf, + n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN, + GFP_KERNEL); if (rc) return rc; @@ -526,7 +525,7 @@ void efx_siena_mcdi_mon_remove(struct efx_nic *efx) hwmon_device_unregister(hwmon->device); kfree(hwmon->attrs); kfree(hwmon->group.attrs); - efx_nic_free_buffer(efx, &hwmon->dma_buf); + efx_siena_free_buffer(efx, &hwmon->dma_buf); } #endif /* CONFIG_SFC_MCDI_MON */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c index a842c139d76f..067fe0f4393a 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c @@ -1225,8 +1225,9 @@ int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx) return 0; /* Allocate buffer for stats */ - rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, - efx->num_mac_stats * sizeof(u64), GFP_KERNEL); + rc = efx_siena_alloc_buffer(efx, &efx->stats_buffer, + efx->num_mac_stats * sizeof(u64), + GFP_KERNEL); if (rc) { netif_warn(efx, probe, efx->net_dev, "failed to allocate DMA buffer: %d\n", rc); @@ -1244,7 +1245,7 @@ int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx) void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx) { - efx_nic_free_buffer(efx, &efx->stats_buffer); + efx_siena_free_buffer(efx, &efx->stats_buffer); } static unsigned int efx_mcdi_event_link_speed[] = { diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c index c59357178657..abf9a4adf139 100644 --- a/drivers/net/ethernet/sfc/siena/nic.c +++ b/drivers/net/ethernet/sfc/siena/nic.c @@ -28,8 +28,8 @@ * **************************************************************************/ -int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, - unsigned int len, gfp_t gfp_flags) +int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags) { buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, &buffer->dma_addr, gfp_flags); @@ -39,7 +39,7 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, return 0; } -void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) +void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) { if (buffer->addr) { dma_free_coherent(&efx->pci_dev->dev, buffer->len, @@ -51,19 +51,19 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) /* Check whether an event is present in the eventq at the current * read pointer. Only useful for self-test. */ -bool efx_nic_event_present(struct efx_channel *channel) +bool efx_siena_event_present(struct efx_channel *channel) { return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); } -void efx_nic_event_test_start(struct efx_channel *channel) +void efx_siena_event_test_start(struct efx_channel *channel) { channel->event_test_cpu = -1; smp_wmb(); channel->efx->type->ev_test_generate(channel); } -int efx_nic_irq_test_start(struct efx_nic *efx) +int efx_siena_irq_test_start(struct efx_nic *efx) { efx->last_irq_cpu = -1; smp_wmb(); @@ -73,7 +73,7 @@ int efx_nic_irq_test_start(struct efx_nic *efx) /* Hook interrupt handler(s) * Try MSI and then legacy interrupts. */ -int efx_nic_init_interrupt(struct efx_nic *efx) +int efx_siena_init_interrupt(struct efx_nic *efx) { struct efx_channel *channel; unsigned int n_irqs; @@ -146,7 +146,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx) return rc; } -void efx_nic_fini_interrupt(struct efx_nic *efx) +void efx_siena_fini_interrupt(struct efx_nic *efx) { struct efx_channel *channel; @@ -364,7 +364,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = { REGISTER_TABLE_BZ(RX_FILTER_TBL0), }; -size_t efx_nic_get_regs_len(struct efx_nic *efx) +size_t efx_siena_get_regs_len(struct efx_nic *efx) { const struct efx_nic_reg *reg; const struct efx_nic_reg_table *table; @@ -387,7 +387,7 @@ size_t efx_nic_get_regs_len(struct efx_nic *efx) return len; } -void efx_nic_get_regs(struct efx_nic *efx, void *buf) +void efx_siena_get_regs(struct efx_nic *efx, void *buf) { const struct efx_nic_reg *reg; const struct efx_nic_reg_table *table; @@ -439,7 +439,7 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) } /** - * efx_nic_describe_stats - Describe supported statistics for ethtool + * efx_siena_describe_stats - Describe supported statistics for ethtool * @desc: Array of &struct efx_hw_stat_desc describing the statistics * @count: Length of the @desc array * @mask: Bitmask of which elements of @desc are enabled @@ -449,8 +449,8 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) * Returns the number of visible statistics, i.e. the number of set * bits in the first @count bits of @mask for which a name is defined. */ -size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names) +size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names) { size_t visible = 0; size_t index; @@ -470,50 +470,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, } /** - * efx_nic_copy_stats - Copy stats from the DMA buffer in to an - * intermediate buffer. This is used to get a consistent - * set of stats while the DMA buffer can be written at any time - * by the NIC. - * @efx: The associated NIC. - * @dest: Destination buffer. Must be the same size as the DMA buffer. - */ -int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest) -{ - __le64 *dma_stats = efx->stats_buffer.addr; - __le64 generation_start, generation_end; - int rc = 0, retry; - - if (!dest) - return 0; - - if (!dma_stats) - goto return_zeroes; - - /* If we're unlucky enough to read statistics during the DMA, wait - * up to 10ms for it to finish (typically takes <500us) - */ - for (retry = 0; retry < 100; ++retry) { - generation_end = dma_stats[efx->num_mac_stats - 1]; - if (generation_end == EFX_MC_STATS_GENERATION_INVALID) - goto return_zeroes; - rmb(); - memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64)); - rmb(); - generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; - if (generation_end == generation_start) - return 0; /* return good data */ - udelay(100); - } - - rc = -EIO; - -return_zeroes: - memset(dest, 0, efx->num_mac_stats * sizeof(u64)); - return rc; -} - -/** - * efx_nic_update_stats - Convert statistics DMA buffer to array of u64 + * efx_siena_update_stats - Convert statistics DMA buffer to array of u64 * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer * layout. DMA widths of 0, 16, 32 and 64 are supported; where * the width is specified as 0 the corresponding element of @@ -526,9 +483,9 @@ return_zeroes: * @accumulate: If set, the converted values will be added rather than * directly stored to the corresponding elements of @stats */ -void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, - u64 *stats, const void *dma_buf, bool accumulate) +void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, + u64 *stats, const void *dma_buf, bool accumulate) { size_t index; @@ -561,7 +518,7 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, } } -void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) +void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) { /* if down, or this is the first update after coming up */ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h index 47deeae0a034..3af0405eeaa4 100644 --- a/drivers/net/ethernet/sfc/siena/nic_common.h +++ b/drivers/net/ethernet/sfc/siena/nic_common.h @@ -182,9 +182,9 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) channel->efx->type->ev_read_ack(channel); } -void efx_nic_event_test_start(struct efx_channel *channel); +void efx_siena_event_test_start(struct efx_channel *channel); -bool efx_nic_event_present(struct efx_channel *channel); +bool efx_siena_event_present(struct efx_channel *channel); static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev) { @@ -216,9 +216,9 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff) } /* Interrupts */ -int efx_nic_init_interrupt(struct efx_nic *efx); -int efx_nic_irq_test_start(struct efx_nic *efx); -void efx_nic_fini_interrupt(struct efx_nic *efx); +int efx_siena_init_interrupt(struct efx_nic *efx); +int efx_siena_irq_test_start(struct efx_nic *efx); +void efx_siena_fini_interrupt(struct efx_nic *efx); static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) { @@ -230,29 +230,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) } /* Global Resources */ -int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, - unsigned int len, gfp_t gfp_flags); -void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); +int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags); +void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); -size_t efx_nic_get_regs_len(struct efx_nic *efx); -void efx_nic_get_regs(struct efx_nic *efx, void *buf); +size_t efx_siena_get_regs_len(struct efx_nic *efx); +void efx_siena_get_regs(struct efx_nic *efx, void *buf); #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) -size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names); -int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); -void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u64 *stats, - const void *dma_buf, bool accumulate); -void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); -static inline size_t efx_nic_update_stats_atomic(struct efx_nic *efx, u64 *full_stats, - struct rtnl_link_stats64 *core_stats) -{ - if (efx->type->update_stats_atomic) - return efx->type->update_stats_atomic(efx, full_stats, core_stats); - return efx->type->update_stats(efx, full_stats, core_stats); -} +size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names); +void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u64 *stats, + const void *dma_buf, bool accumulate); +void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); #define EFX_MAX_FLUSH_TIME 5000 diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index 5b4717520c3e..8e18da096595 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -398,8 +398,8 @@ size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings) if (!efx->ptp_data) return 0; - return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, - efx_ptp_stat_mask, strings); + return efx_siena_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, strings); } size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats) @@ -430,9 +430,9 @@ size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats) outbuf, sizeof(outbuf), NULL); if (rc) memset(outbuf, 0, sizeof(outbuf)); - efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, - efx_ptp_stat_mask, - stats, _MCDI_PTR(outbuf, 0), false); + efx_siena_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, + stats, _MCDI_PTR(outbuf, 0), false); return PTP_STAT_COUNT; } @@ -1452,7 +1452,7 @@ static int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) ptp->channel = channel; ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; - rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); + rc = efx_siena_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); if (rc != 0) goto fail1; @@ -1519,7 +1519,7 @@ fail3: destroy_workqueue(efx->ptp_data->workwq); fail2: - efx_nic_free_buffer(efx, &ptp->start); + efx_siena_free_buffer(efx, &ptp->start); fail1: kfree(efx->ptp_data); @@ -1574,7 +1574,7 @@ static void efx_ptp_remove(struct efx_nic *efx) destroy_workqueue(efx->ptp_data->workwq); - efx_nic_free_buffer(efx, &efx->ptp_data->start); + efx_siena_free_buffer(efx, &efx->ptp_data->start); kfree(efx->ptp_data); efx->ptp_data = NULL; } diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c index 2d70b3356ddf..07715a3d6bea 100644 --- a/drivers/net/ethernet/sfc/siena/selftest.c +++ b/drivers/net/ethernet/sfc/siena/selftest.c @@ -138,7 +138,7 @@ static int efx_test_interrupts(struct efx_nic *efx, netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); tests->interrupt = -1; - rc = efx_nic_irq_test_start(efx); + rc = efx_siena_irq_test_start(efx); if (rc == -ENOTSUPP) { netif_dbg(efx, drv, efx->net_dev, "direct interrupt testing not supported\n"); @@ -184,7 +184,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx, read_ptr[channel->channel] = channel->eventq_read_ptr; set_bit(channel->channel, &dma_pend); set_bit(channel->channel, &int_pend); - efx_nic_event_test_start(channel); + efx_siena_event_test_start(channel); } timeout = jiffies + IRQ_TIMEOUT; @@ -204,7 +204,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx, clear_bit(channel->channel, &dma_pend); clear_bit(channel->channel, &int_pend); } else { - if (efx_nic_event_present(channel)) + if (efx_siena_event_present(channel)) clear_bit(channel->channel, &dma_pend); if (efx_nic_event_test_irq_cpu(channel) >= 0) clear_bit(channel->channel, &int_pend); @@ -772,7 +772,7 @@ void efx_siena_selftest_async_start(struct efx_nic *efx) struct efx_channel *channel; efx_for_each_channel(channel, efx) - efx_nic_event_test_start(channel); + efx_siena_event_test_start(channel); schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); } diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c index 74ed8e972c93..741313aff1d1 100644 --- a/drivers/net/ethernet/sfc/siena/siena.c +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -301,8 +301,8 @@ static int siena_probe_nic(struct efx_nic *efx) siena_init_wol(efx); /* Allocate memory for INT_KER */ - rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), - GFP_KERNEL); + rc = efx_siena_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), + GFP_KERNEL); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); @@ -336,7 +336,7 @@ static int siena_probe_nic(struct efx_nic *efx) return 0; fail5: - efx_nic_free_buffer(efx, &efx->irq_status); + efx_siena_free_buffer(efx, &efx->irq_status); fail4: fail3: efx_siena_mcdi_detach(efx); @@ -460,7 +460,7 @@ static void siena_remove_nic(struct efx_nic *efx) { efx_siena_mcdi_mon_remove(efx); - efx_nic_free_buffer(efx, &efx->irq_status); + efx_siena_free_buffer(efx, &efx->irq_status); efx_siena_mcdi_reset(efx, RESET_TYPE_ALL); @@ -547,8 +547,8 @@ static const unsigned long siena_stat_mask[] = { static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names) { - return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT, - siena_stat_mask, names); + return efx_siena_describe_stats(siena_stat_desc, SIENA_STAT_COUNT, + siena_stat_mask, names); } static int siena_try_update_nic_stats(struct efx_nic *efx) @@ -564,16 +564,16 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) if (generation_end == EFX_MC_STATS_GENERATION_INVALID) return 0; rmb(); - efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask, - stats, efx->stats_buffer.addr, false); + efx_siena_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask, + stats, efx->stats_buffer.addr, false); rmb(); generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; if (generation_end != generation_start) return -EAGAIN; /* Update derived statistics */ - efx_nic_fix_nodesc_drop_stat(efx, - &stats[SIENA_STAT_rx_nodesc_drop_cnt]); + efx_siena_fix_nodesc_drop_stat(efx, + &stats[SIENA_STAT_rx_nodesc_drop_cnt]); efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes], stats[SIENA_STAT_tx_bytes] - stats[SIENA_STAT_tx_bad_bytes]); diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.c b/drivers/net/ethernet/sfc/siena/siena_sriov.c index fdfcf480fd47..8353c15dc233 100644 --- a/drivers/net/ethernet/sfc/siena/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.c @@ -1012,9 +1012,9 @@ static void efx_siena_sriov_reset_vf_work(struct work_struct *work) struct efx_nic *efx = vf->efx; struct efx_buffer buf; - if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { + if (!efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { efx_siena_sriov_reset_vf(vf, &buf); - efx_nic_free_buffer(efx, &buf); + efx_siena_free_buffer(efx, &buf); } } @@ -1229,7 +1229,7 @@ static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) for (pos = 0; pos < efx->vf_count; ++pos) { vf = nic_data->vf + pos; - efx_nic_free_buffer(efx, &vf->buf); + efx_siena_free_buffer(efx, &vf->buf); kfree(vf->peer_page_addrs); vf->peer_page_addrs = NULL; vf->peer_page_count = 0; @@ -1269,8 +1269,8 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx) pci_domain_nr(pci_dev->bus), pci_dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); - rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE, - GFP_KERNEL); + rc = efx_siena_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE, + GFP_KERNEL); if (rc) goto fail; @@ -1303,8 +1303,8 @@ int efx_siena_sriov_init(struct efx_nic *efx) if (rc) goto fail_cmd; - rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status, - sizeof(*vfdi_status), GFP_KERNEL); + rc = efx_siena_alloc_buffer(efx, &nic_data->vfdi_status, + sizeof(*vfdi_status), GFP_KERNEL); if (rc) goto fail_status; vfdi_status = nic_data->vfdi_status.addr; @@ -1359,7 +1359,7 @@ fail_vfs: efx_siena_sriov_free_local(efx); kfree(nic_data->vf); fail_alloc: - efx_nic_free_buffer(efx, &nic_data->vfdi_status); + efx_siena_free_buffer(efx, &nic_data->vfdi_status); fail_status: efx_siena_sriov_cmd(efx, false, NULL, NULL); fail_cmd: @@ -1396,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx) efx_siena_sriov_vfs_fini(efx); efx_siena_sriov_free_local(efx); kfree(nic_data->vf); - efx_nic_free_buffer(efx, &nic_data->vfdi_status); + efx_siena_free_buffer(efx, &nic_data->vfdi_status); efx_siena_sriov_cmd(efx, false, NULL, NULL); } @@ -1564,7 +1564,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx) efx_siena_sriov_usrev(efx, true); (void)efx_siena_sriov_cmd(efx, true, NULL, NULL); - if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) + if (efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) return; for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { @@ -1572,7 +1572,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx) efx_siena_sriov_reset_vf(vf, &buf); } - efx_nic_free_buffer(efx, &buf); + efx_siena_free_buffer(efx, &buf); } int efx_init_sriov(void) diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c index ef238e9efa94..b84b9e348c13 100644 --- a/drivers/net/ethernet/sfc/siena/tx.c +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -33,8 +33,8 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1); if (unlikely(!page_buf->addr) && - efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, - GFP_ATOMIC)) + efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, + GFP_ATOMIC)) return NULL; buffer->dma_addr = page_buf->dma_addr + offset; buffer->unmap_len = 0; diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c index 31e9888e71df..93a32d61944f 100644 --- a/drivers/net/ethernet/sfc/siena/tx_common.c +++ b/drivers/net/ethernet/sfc/siena/tx_common.c @@ -107,8 +107,8 @@ void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue) if (tx_queue->cb_page) { for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) - efx_nic_free_buffer(tx_queue->efx, - &tx_queue->cb_page[i]); + efx_siena_free_buffer(tx_queue->efx, + &tx_queue->cb_page[i]); kfree(tx_queue->cb_page); tx_queue->cb_page = NULL; } -- cgit v1.2.3 From 782f7130849f58825fc0ab8dcfe297054cb58f2c Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:33:10 +0100 Subject: sfc/siena: Inline functions in sriov.h to avoid conflicts with sfc The implementation of each is quite short. This means sriov.c is not needed any more. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/sriov.c | 72 ---------------------------------- drivers/net/ethernet/sfc/siena/sriov.h | 68 +++++++++++++++++++++++++++++--- 2 files changed, 63 insertions(+), 77 deletions(-) delete mode 100644 drivers/net/ethernet/sfc/siena/sriov.c (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/sriov.c b/drivers/net/ethernet/sfc/siena/sriov.c deleted file mode 100644 index 3f241e6c881a..000000000000 --- a/drivers/net/ethernet/sfc/siena/sriov.c +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/**************************************************************************** - * Driver for Solarflare network controllers and boards - * Copyright 2014-2015 Solarflare Communications Inc. - */ -#include -#include "net_driver.h" -#include "nic.h" -#include "sriov.h" - -int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->sriov_set_vf_mac) - return efx->type->sriov_set_vf_mac(efx, vf_i, mac); - else - return -EOPNOTSUPP; -} - -int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, - u8 qos, __be16 vlan_proto) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->sriov_set_vf_vlan) { - if ((vlan & ~VLAN_VID_MASK) || - (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))) - return -EINVAL; - - if (vlan_proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; - - return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos); - } else { - return -EOPNOTSUPP; - } -} - -int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, - bool spoofchk) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->sriov_set_vf_spoofchk) - return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk); - else - return -EOPNOTSUPP; -} - -int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, - struct ifla_vf_info *ivi) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->sriov_get_vf_config) - return efx->type->sriov_get_vf_config(efx, vf_i, ivi); - else - return -EOPNOTSUPP; -} - -int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, - int link_state) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->sriov_set_vf_link_state) - return efx->type->sriov_set_vf_link_state(efx, vf_i, - link_state); - else - return -EOPNOTSUPP; -} diff --git a/drivers/net/ethernet/sfc/siena/sriov.h b/drivers/net/ethernet/sfc/siena/sriov.h index 747707bee483..fbde67319d87 100644 --- a/drivers/net/ethernet/sfc/siena/sriov.h +++ b/drivers/net/ethernet/sfc/siena/sriov.h @@ -11,15 +11,73 @@ #ifdef CONFIG_SFC_SRIOV -int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac); +static inline +int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_mac) + return efx->type->sriov_set_vf_mac(efx, vf_i, mac); + else + return -EOPNOTSUPP; +} + +static inline int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, - u8 qos, __be16 vlan_proto); + u8 qos, __be16 vlan_proto) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_vlan) { + if ((vlan & ~VLAN_VID_MASK) || + (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos); + } else { + return -EOPNOTSUPP; + } +} + +static inline int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, - bool spoofchk); + bool spoofchk) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_spoofchk) + return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk); + else + return -EOPNOTSUPP; +} + +static inline int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, - struct ifla_vf_info *ivi); + struct ifla_vf_info *ivi) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_get_vf_config) + return efx->type->sriov_get_vf_config(efx, vf_i, ivi); + else + return -EOPNOTSUPP; +} + +static inline int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, - int link_state); + int link_state) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_link_state) + return efx->type->sriov_set_vf_link_state(efx, vf_i, + link_state); + else + return -EOPNOTSUPP; +} #endif /* CONFIG_SFC_SRIOV */ #endif /* EFX_SRIOV_H */ -- cgit v1.2.3 From c5a13c319e10e795850b61bc7e3447b08024be2e Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Mon, 9 May 2022 16:33:23 +0100 Subject: sfc: Add a basic Siena module Make the (un)load message more specific to differentiate it from the sfc.ko messages. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/Kconfig | 1 + drivers/net/ethernet/sfc/Makefile | 1 + drivers/net/ethernet/sfc/siena/Kconfig | 12 ++++++++++++ drivers/net/ethernet/sfc/siena/Makefile | 11 +++++++++++ drivers/net/ethernet/sfc/siena/efx.c | 6 +++--- 5 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/sfc/siena/Kconfig create mode 100644 drivers/net/ethernet/sfc/siena/Makefile (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 846fff16fa48..98db551ba2b7 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -65,5 +65,6 @@ config SFC_MCDI_LOGGING a sysfs file 'mcdi_logging' under the PCI device. source "drivers/net/ethernet/sfc/falcon/Kconfig" +source "drivers/net/ethernet/sfc/siena/Kconfig" endif # NET_VENDOR_SOLARFLARE diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 9b3374cf7937..b9298031ea51 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -13,3 +13,4 @@ sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o obj-$(CONFIG_SFC) += sfc.o obj-$(CONFIG_SFC_FALCON) += falcon/ +obj-$(CONFIG_SFC_SIENA) += siena/ diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig new file mode 100644 index 000000000000..3d52aee50d5a --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SFC_SIENA + tristate "Solarflare SFC9000 support" + depends on PCI + select MDIO + select CRC32 + help + This driver supports 10-gigabit Ethernet cards based on + the Solarflare SFC9000 controller. + + To compile this driver as a module, choose M here. The module + will be called sfc-siena. diff --git a/drivers/net/ethernet/sfc/siena/Makefile b/drivers/net/ethernet/sfc/siena/Makefile new file mode 100644 index 000000000000..74cb8b7d281e --- /dev/null +++ b/drivers/net/ethernet/sfc/siena/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +sfc-siena-y += farch.o siena.o \ + efx.o efx_common.o efx_channels.o nic.o \ + tx.o tx_common.o rx.o rx_common.o \ + selftest.o ethtool.o ethtool_common.o ptp.o \ + mcdi.o mcdi_port.o mcdi_port_common.o \ + mcdi_mon.o +sfc-siena-$(CONFIG_SFC_MTD) += mtd.o +sfc-siena-$(CONFIG_SFC_SRIOV) += siena_sriov.o + +obj-$(CONFIG_SFC_SIENA) += sfc-siena.o diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index d937704e416b..3f6e732f5fdc 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -1265,7 +1265,7 @@ static int __init efx_init_module(void) { int rc; - printk(KERN_INFO "Solarflare NET driver\n"); + pr_info("Solarflare Siena driver\n"); rc = register_netdevice_notifier(&efx_netdev_notifier); if (rc) @@ -1291,7 +1291,7 @@ static int __init efx_init_module(void) static void __exit efx_exit_module(void) { - printk(KERN_INFO "Solarflare NET driver unloading\n"); + pr_info("Solarflare Siena driver unloading\n"); pci_unregister_driver(&efx_pci_driver); efx_siena_destroy_reset_workqueue(); @@ -1304,6 +1304,6 @@ module_exit(efx_exit_module); MODULE_AUTHOR("Solarflare Communications and " "Michael Brown "); -MODULE_DESCRIPTION("Solarflare network driver"); +MODULE_DESCRIPTION("Solarflare Siena network driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, efx_pci_table); -- cgit v1.2.3 From 70a40ecfcb7d16262cf3ffe5eef4763315a68645 Mon Sep 17 00:00:00 2001 From: Yuiko Oshino Date: Mon, 9 May 2022 11:58:03 -0700 Subject: net: phy: microchip: add comments for the modified LAN88xx phy ID mask. add comments for the updated LAN88xx phy ID mask in the previous patch. Signed-off-by: Yuiko Oshino Signed-off-by: Jakub Kicinski --- drivers/net/phy/microchip.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 131caf659ed2..ccecee2524ce 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -345,6 +345,10 @@ static int lan88xx_config_aneg(struct phy_device *phydev) static struct phy_driver microchip_phy_driver[] = { { .phy_id = 0x0007c132, + /* This mask (0xfffffff2) is to differentiate from + * LAN8742 (phy_id 0x0007c130 and 0x0007c131) + * and allows future phy_id revisions. + */ .phy_id_mask = 0xfffffff2, .name = "Microchip LAN88xx", -- cgit v1.2.3 From b2be075139fa4dad1649edef30963de6315ba237 Mon Sep 17 00:00:00 2001 From: Yuiko Oshino Date: Mon, 9 May 2022 11:58:04 -0700 Subject: net: phy: smsc: add comments for the LAN8742 phy ID mask. add comments for the LAN8742 phy ID mask in the previous patch. add one missing tab in the LAN8742 phy ID line. Signed-off-by: Yuiko Oshino Signed-off-by: Jakub Kicinski --- drivers/net/phy/smsc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 44fa9e00cc50..92225d0fc246 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -484,7 +484,11 @@ static struct phy_driver smsc_phy_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = 0x0007c130, /* 0x0007c130 and 0x0007c131 */ + .phy_id = 0x0007c130, /* 0x0007c130 and 0x0007c131 */ + /* This mask (0xfffffff2) is to differentiate from + * LAN88xx (phy_id 0x0007c132) + * and allows future phy_id revisions. + */ .phy_id_mask = 0xfffffff2, .name = "Microchip LAN8742", -- cgit v1.2.3 From 4ee8a915730f3219a1737c8d73815c97ec8f3c27 Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Wed, 4 May 2022 16:19:25 +0000 Subject: wilc1000: increase firmware version array size Increase firmware version array size to hold complete version information. The firmware commit id(Build:) information is also part of the firmware version string. Firmware version format: WILC_WIFI_FW_REL_XX_XX Build: XXXXX e.g. WILC_WIFI_FW_REL_15_6 Build: 12804 Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504161924.2146601-1-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/netdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 643bddaae32a..3c292e3464c2 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -14,6 +14,7 @@ #include "wlan_cfg.h" #define WILC_MULTICAST_TABLE_SIZE 8 +#define WILC_MAX_FW_VERSION_STR_SIZE 50 /* latest API version supported */ #define WILC1000_API_VER 1 @@ -522,7 +523,7 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif) if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) { int size; - char firmware_ver[20]; + char firmware_ver[WILC_MAX_FW_VERSION_STR_SIZE]; size = wilc_wlan_cfg_get_val(wl, WID_FIRMWARE_VERSION, firmware_ver, -- cgit v1.2.3 From 72ebd6751f9eb3f9b023a81f10b02e9a2c8c0acb Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Wed, 4 May 2022 16:19:25 +0000 Subject: wilc1000: use fixed function base register value to access SDIO_FBR_ENABLE_CSA The function number was not correct(reset to 0) when host resumes from suspend state. Use hardcoded value in function base information register(FBR base address) to re-initialize correctly on host resume. Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504161924.2146601-2-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index ec595dbd8959..7962c11cfe84 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -598,7 +598,7 @@ static int wilc_sdio_init(struct wilc *wilc, bool resume) cmd.read_write = 1; cmd.function = 0; cmd.raw = 1; - cmd.address = SDIO_FBR_BASE(func->num); + cmd.address = SDIO_FBR_BASE(1); cmd.data = SDIO_FBR_ENABLE_CSA; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { -- cgit v1.2.3 From 868f0e28290c7a33e8cb79bfe97ebdcbb756e048 Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Wed, 4 May 2022 16:19:26 +0000 Subject: wilc1000: fix crash observed in AP mode with cfg80211_register_netdevice() Monitor(mon.) interface is used for handling the AP mode and 'ieee80211_ptr' reference is not getting set for it. Like earlier implementation, use register_netdevice() instead of cfg80211_register_netdevice() which expects valid 'ieee80211_ptr' reference to avoid the possible crash. Fixes: 2fe8ef106238 ("cfg80211: change netdev registration/unregistration semantics") Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504161924.2146601-3-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/mon.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c index 6bd63934c2d8..b5a1b65c087c 100644 --- a/drivers/net/wireless/microchip/wilc1000/mon.c +++ b/drivers/net/wireless/microchip/wilc1000/mon.c @@ -233,7 +233,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops; wl->monitor_dev->needs_free_netdev = true; - if (cfg80211_register_netdevice(wl->monitor_dev)) { + if (register_netdevice(wl->monitor_dev)) { netdev_err(real_dev, "register_netdevice failed\n"); free_netdev(wl->monitor_dev); return NULL; @@ -251,7 +251,7 @@ void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked) return; if (rtnl_locked) - cfg80211_unregister_netdevice(wl->monitor_dev); + unregister_netdevice(wl->monitor_dev); else unregister_netdev(wl->monitor_dev); wl->monitor_dev = NULL; -- cgit v1.2.3 From 819b161b9487870d1d1a76171b6820c6b977d99a Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Wed, 4 May 2022 16:19:26 +0000 Subject: wilc1000: use 'u64' datatype for cookie variable Use 'u64' instead of 'u32' for the cookie variable as expected by cfg80211 callback function argument. Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504161924.2146601-4-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/hif.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h index cccd54ed0518..77616fc77575 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.h +++ b/drivers/net/wireless/microchip/wilc1000/hif.h @@ -123,7 +123,7 @@ struct wilc_remain_ch { u32 duration; void (*expired)(void *priv, u64 cookie); void *arg; - u32 cookie; + u64 cookie; }; struct wilc; -- cgit v1.2.3 From 62296b3e19dd252694d24a60d6f10b487551d70b Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Wed, 4 May 2022 16:19:27 +0000 Subject: wilc1000: add valid vmm_entry check before fetching from TX queue 'vmm_table' array contains the size of data buffer length including host header length. In 'vmm_table' array, the Zero value means the end of vmm_entries that needs to transfer to firmware which is calculated based on VMM free size in firmware. Use 'vmm_table' valid entry check before fetching the entry from TX queue to only copy valid number of entries to avoid possible NULL pointer exception observed sometimes during large file transfers. Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220504161924.2146601-5-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/wlan.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index fb5633a05fd5..48441f0389ca 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -875,14 +875,15 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) char *bssid; u8 mgmt_ptk = 0; + if (vmm_table[i] == 0 || vmm_entries_ac[i] >= NQUEUES) + break; + tqe = wilc_wlan_txq_remove_from_head(wilc, vmm_entries_ac[i]); - ac_pkt_num_to_chip[vmm_entries_ac[i]]++; if (!tqe) break; + ac_pkt_num_to_chip[vmm_entries_ac[i]]++; vif = tqe->vif; - if (vmm_table[i] == 0) - break; le32_to_cpus(&vmm_table[i]); vmm_sz = FIELD_GET(WILC_VMM_BUFFER_SIZE, vmm_table[i]); -- cgit v1.2.3 From 716c220b4d990a4fe7800d0685ca69dee99e4e8f Mon Sep 17 00:00:00 2001 From: Pavel Löbl Date: Fri, 6 May 2022 06:42:46 +0200 Subject: brcmfmac: allow setting wlan MAC address using device tree MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows firmware to provide MAC address using device tree. Like in case there is no MAC burned in wlan NVRAM. Signed-off-by: Pavel Löbl Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506044246.67146-1-pavel@loebl.cz --- .../wireless/broadcom/brcm80211/brcmfmac/common.c | 23 ++++++++++++++++------ .../wireless/broadcom/brcm80211/brcmfmac/common.h | 1 + .../wireless/broadcom/brcm80211/brcmfmac/core.c | 4 +++- .../net/wireless/broadcom/brcm80211/brcmfmac/of.c | 3 +++ 4 files changed, 24 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index e3758bd86acf..fe01da9e620d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -202,13 +202,24 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) char *ptr; s32 err; - /* retreive mac address */ - err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr, - sizeof(ifp->mac_addr)); - if (err < 0) { - bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err); - goto done; + if (is_valid_ether_addr(ifp->mac_addr)) { + /* set mac address */ + err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr, + ETH_ALEN); + if (err < 0) { + bphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err); + goto done; + } + } else { + /* retrieve mac address */ + err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr, + sizeof(ifp->mac_addr)); + if (err < 0) { + bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err); + goto done; + } } + memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac)); memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index 8b5f49997c8b..15accc88d5c0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -50,6 +50,7 @@ struct brcmf_mp_device { bool ignore_probe_fail; struct brcmfmac_pd_cc *country_codes; const char *board_type; + unsigned char mac[ETH_ALEN]; union { struct brcmfmac_sdio_pd sdio; } bus; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 26fab4bee22c..87aef211b35f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -1197,7 +1198,8 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops) brcmf_dbg(TRACE, "\n"); /* add primary networking interface */ - ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", NULL); + ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", + is_valid_ether_addr(drvr->settings->mac) ? drvr->settings->mac : NULL); if (IS_ERR(ifp)) return PTR_ERR(ifp); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index 8623bde5eb70..083ac58f466d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include "debug.h" @@ -99,6 +100,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, if (err) brcmf_err("failed to get OF country code map (err=%d)\n", err); + of_get_mac_address(np, settings->mac); + if (bus_type != BRCMF_BUSTYPE_SDIO) return; -- cgit v1.2.3 From 84dc992e23df2633c7bd4f662e3f0073c126aac7 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 6 May 2022 15:58:14 +0800 Subject: ssb: remove unreachable code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up the following smatch warning: drivers/ssb/pci.c:917 ssb_pci_sprom_get() warn: ignoring unreachable code. Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Acked-by: Michael Büsch Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506075814.115059-1-jiapeng.chong@linux.alibaba.com --- drivers/ssb/pci.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 148bcb99c212..493bebbba521 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -914,7 +914,6 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, err = 0; goto out_free; } - pr_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n"); } } err = sprom_extract(bus, sprom, buf, bus->sprom_size); -- cgit v1.2.3 From 0cd75e4f1c9dce097665bee757da280bd5276864 Mon Sep 17 00:00:00 2001 From: Hsuan Hung Date: Fri, 6 May 2022 20:02:12 +0800 Subject: rtw89: 8852c: add settings to decrease the effect of DC Modify NBI and PD boost settings according to different primary channels. This setting can decrease the false alarm induced by DC. Signed-off-by: Hsuan Hung Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506120216.58567-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/reg.h | 14 ++++++ drivers/net/wireless/realtek/rtw89/rtw8852c.c | 63 ++++++++++++++++++++++++--- 2 files changed, 72 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 5c4de043845b..ebf28719d935 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -3822,6 +3822,8 @@ #define B_CHBW_MOD_SBW GENMASK(13, 12) #define B_CHBW_MOD_PRICH GENMASK(11, 8) #define B_ANT_RX_SEG0 GENMASK(3, 0) +#define R_PD_BOOST_EN 0x49E8 +#define B_PD_BOOST_EN BIT(7) #define R_P1_BACKOFF_IBADC_V1 0x49F0 #define B_P1_BACKOFF_IBADC_V1 GENMASK(31, 26) #define R_BK_FC0_INV_V1 0x4A1C @@ -3840,6 +3842,12 @@ #define B_PATH1_BT_BACKOFF_V1 GENMASK(23, 0) #define R_PATH0_FRC_FIR_TYPE_V1 0x4C00 #define B_PATH0_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0) +#define R_PATH0_NOTCH 0x4C14 +#define B_PATH0_NOTCH_EN BIT(12) +#define B_PATH0_NOTCH_VAL GENMASK(11, 0) +#define R_PATH0_NOTCH2 0x4C20 +#define B_PATH0_NOTCH2_EN BIT(12) +#define B_PATH0_NOTCH2_VAL GENMASK(11, 0) #define R_PATH0_5MDET 0x4C4C #define B_PATH0_5MDET_EN BIT(12) #define B_PATH0_5MDET_SB2 BIT(8) @@ -3847,6 +3855,12 @@ #define B_PATH0_5MDET_TH GENMASK(5, 0) #define R_PATH1_FRC_FIR_TYPE_V1 0x4CC4 #define B_PATH1_FRC_FIR_TYPE_MSK_V1 GENMASK(1, 0) +#define R_PATH1_NOTCH 0x4CD8 +#define B_PATH1_NOTCH_EN BIT(12) +#define B_PATH1_NOTCH_VAL GENMASK(11, 0) +#define R_PATH1_NOTCH2 0x4CE4 +#define B_PATH1_NOTCH2_EN BIT(12) +#define B_PATH1_NOTCH2_VAL GENMASK(11, 0) #define R_PATH1_5MDET 0x4D10 #define B_PATH1_5MDET_EN BIT(12) #define B_PATH1_5MDET_SB2 BIT(8) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 295b824dbecc..77dcdbd86c63 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -1381,19 +1381,72 @@ static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev, } } +static void rtw8852c_spur_notch(struct rtw89_dev *rtwdev, u32 val, + enum rtw89_phy_idx phy_idx) +{ + u32 notch; + u32 notch2; + + if (phy_idx == RTW89_PHY_0) { + notch = R_PATH0_NOTCH; + notch2 = R_PATH0_NOTCH2; + } else { + notch = R_PATH1_NOTCH; + notch2 = R_PATH1_NOTCH2; + } + + rtw89_phy_write32_mask(rtwdev, notch, + B_PATH0_NOTCH_VAL | B_PATH0_NOTCH_EN, val); + rtw89_phy_write32_set(rtwdev, notch, B_PATH0_NOTCH_EN); + rtw89_phy_write32_mask(rtwdev, notch2, + B_PATH0_NOTCH2_VAL | B_PATH0_NOTCH2_EN, val); + rtw89_phy_write32_set(rtwdev, notch2, B_PATH0_NOTCH2_EN); +} + static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev, struct rtw89_channel_params *param, + u8 pri_ch_idx, enum rtw89_phy_idx phy_idx) { rtw8852c_set_csi_tone_idx(rtwdev, param, phy_idx); if (phy_idx == RTW89_PHY_0) { - rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_A); - if (!rtwdev->dbcc_en) - rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B); + if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && + (pri_ch_idx == RTW89_SC_20_LOWER || + pri_ch_idx == RTW89_SC_20_UP3X)) { + rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_0); + if (!rtwdev->dbcc_en) + rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1); + } else if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && + (pri_ch_idx == RTW89_SC_20_UPPER || + pri_ch_idx == RTW89_SC_20_LOW3X)) { + rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_0); + if (!rtwdev->dbcc_en) + rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1); + } else { + rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_A); + if (!rtwdev->dbcc_en) + rtw8852c_set_nbi_tone_idx(rtwdev, param, + RF_PATH_B); + } } else { - rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B); + if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && + (pri_ch_idx == RTW89_SC_20_LOWER || + pri_ch_idx == RTW89_SC_20_UP3X)) { + rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1); + } else if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && + (pri_ch_idx == RTW89_SC_20_UPPER || + pri_ch_idx == RTW89_SC_20_LOW3X)) { + rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1); + } else { + rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B); + } } + + if (pri_ch_idx == RTW89_SC_20_UP3X || pri_ch_idx == RTW89_SC_20_LOW3X) + rtw89_phy_write32_idx(rtwdev, R_PD_BOOST_EN, B_PD_BOOST_EN, 0, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_PD_BOOST_EN, B_PD_BOOST_EN, 1, phy_idx); } static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev, @@ -1664,7 +1717,7 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev, B_PD_ARBITER_OFF, 0x1, phy_idx); } - rtw8852c_spur_elimination(rtwdev, param, phy_idx); + rtw8852c_spur_elimination(rtwdev, param, pri_ch_idx, phy_idx); rtw8852c_ctrl_btg(rtwdev, param->band_type == RTW89_BAND_2G); rtw8852c_5m_mask(rtwdev, param, phy_idx); -- cgit v1.2.3 From 4b0d341b2e0401b1d44489af5bc2fa0e30ea5d2b Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 6 May 2022 20:02:13 +0800 Subject: rtw89: correct setting of RX MPDU length Set proper setting according to RX quota, and then it doesn't break buffer due to size of received packet exceeding buffer size. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506120216.58567-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index a06ca65b339f..e1a1699a1a9c 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -2005,6 +2005,7 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) #define TRXCFG_RMAC_DATA_TO 15 #define RX_MAX_LEN_UNIT 512 #define PLD_RLS_MAX_PG 127 +#define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT) int ret; u32 reg, rx_max_len, rx_qta; u16 val; @@ -2035,11 +2036,10 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) rx_qta = rtwdev->mac.dle_info.c0_rx_qta; else rx_qta = rtwdev->mac.dle_info.c1_rx_qta; - rx_qta = rx_qta > PLD_RLS_MAX_PG ? PLD_RLS_MAX_PG : rx_qta; - rx_max_len = (rx_qta - 1) * rtwdev->mac.dle_info.ple_pg_size / - RX_MAX_LEN_UNIT; - rx_max_len = rx_max_len > B_AX_RX_MPDU_MAX_LEN_SIZE ? - B_AX_RX_MPDU_MAX_LEN_SIZE : rx_max_len; + rx_qta = min_t(u32, rx_qta, PLD_RLS_MAX_PG); + rx_max_len = rx_qta * rtwdev->mac.dle_info.ple_pg_size; + rx_max_len = min_t(u32, rx_max_len, RX_SPEC_MAX_LEN); + rx_max_len /= RX_MAX_LEN_UNIT; rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); if (rtwdev->chip->chip_id == RTL8852A && -- cgit v1.2.3 From 98ed6159a50524f3b3302bdc237e2d3dd89fdfe7 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 6 May 2022 20:02:14 +0800 Subject: rtw89: correct CCA control EDCCA signal can block transmitting in certain situation, so ignore this signal and use others to decide transmitting time. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506120216.58567-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index e1a1699a1a9c..3b61ff02fe84 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1890,11 +1890,12 @@ static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA | B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 | B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 | - B_AX_CTN_CHK_CCA_P20 | B_AX_SIFS_CHK_EDCCA); + B_AX_CTN_CHK_CCA_P20); val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 | B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 | B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 | - B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV); + B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV | + B_AX_SIFS_CHK_EDCCA); rtw89_write32(rtwdev, reg, val); -- cgit v1.2.3 From 0b75b35c3867ba9189628f1a3113bd2435f35380 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 6 May 2022 20:02:15 +0800 Subject: rtw89: add debug select to dump MAC pages 0x30 to 0x33 Dump new region 0x3000 to 0x33ff to help debug. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506120216.58567-5-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/debug.c | 5 +++++ drivers/net/wireless/realtek/rtw89/debug.h | 1 + 2 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index f93f3fee1505..7820bc3ab3b4 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -635,6 +635,11 @@ static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v) start = 0x000; end = 0x014; break; + case RTW89_DBG_SEL_MAC_30: + seq_puts(m, "Debug selected MAC page 0x30\n"); + start = 0x030; + end = 0x033; + break; case RTW89_DBG_SEL_MAC_40: seq_puts(m, "Debug selected MAC page 0x40\n"); start = 0x040; diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h index 1745815f5e00..de72155ad1fe 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.h +++ b/drivers/net/wireless/realtek/rtw89/debug.h @@ -28,6 +28,7 @@ enum rtw89_debug_mask { enum rtw89_debug_mac_reg_sel { RTW89_DBG_SEL_MAC_00, + RTW89_DBG_SEL_MAC_30, RTW89_DBG_SEL_MAC_40, RTW89_DBG_SEL_MAC_80, RTW89_DBG_SEL_MAC_C0, -- cgit v1.2.3 From dadb20864d89d43dd5386089bd82e5c66f0060c0 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 6 May 2022 20:02:16 +0800 Subject: rtw89: add debug entry to dump BSSID CAM BSSID CAM is a kind of CAM that is used to determine if we receive a packet or not. Add an entry to assist in debugging. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506120216.58567-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac.c | 1 + drivers/net/wireless/realtek/rtw89/mac.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 3b61ff02fe84..3cf892912c1d 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -29,6 +29,7 @@ const u32 rtw89_mac_mem_base_addrs[RTW89_MAC_MEM_NUM] = { [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR, [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR, [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR, + [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR, }; static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset, diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 9eb4afe348b3..9f511c8d8a37 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -268,6 +268,7 @@ enum rtw89_mac_mem_sel { RTW89_MAC_MEM_TXDATA_FIFO_0, RTW89_MAC_MEM_TXDATA_FIFO_1, RTW89_MAC_MEM_CPU_LOCAL, + RTW89_MAC_MEM_BSSID_CAM, /* keep last */ RTW89_MAC_MEM_NUM, -- cgit v1.2.3 From 2c33360bce6af0948fa162cdbd373d49be5a7491 Mon Sep 17 00:00:00 2001 From: Jaehee Park Date: Fri, 6 May 2022 13:00:46 -0400 Subject: wfx: use container_of() to get vif Currently, upon virtual interface creation, wfx_add_interface() stores a reference to the corresponding struct ieee80211_vif in private data, for later usage. This is not needed when using the container_of construct. This construct already has all the info it needs to retrieve the reference to the corresponding struct from the offset that is already available, inherent in container_of(), between its type and member inputs (struct ieee80211_vif and drv_priv, respectively). Remove vif (which was previously storing the reference to the struct ieee80211_vif) from the struct wfx_vif, define a function wvif_to_vif(wvif) for container_of(), and replace all wvif->vif with the newly defined container_of construct. Signed-off-by: Jaehee Park Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220506170046.GA1297231@jaehee-ThinkPad-X1-Extreme --- drivers/net/wireless/silabs/wfx/data_rx.c | 5 +- drivers/net/wireless/silabs/wfx/data_tx.c | 3 +- drivers/net/wireless/silabs/wfx/key.c | 4 +- drivers/net/wireless/silabs/wfx/queue.c | 3 +- drivers/net/wireless/silabs/wfx/scan.c | 11 +++-- drivers/net/wireless/silabs/wfx/sta.c | 76 +++++++++++++++++++------------ drivers/net/wireless/silabs/wfx/wfx.h | 6 ++- 7 files changed, 68 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/silabs/wfx/data_rx.c b/drivers/net/wireless/silabs/wfx/data_rx.c index a4b5ffe158e4..e099a9e65bae 100644 --- a/drivers/net/wireless/silabs/wfx/data_rx.c +++ b/drivers/net/wireless/silabs/wfx/data_rx.c @@ -15,6 +15,7 @@ static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt) { + struct ieee80211_vif *vif = wvif_to_vif(wvif); int params, tid; if (wfx_api_older_than(wvif->wdev, 3, 6)) @@ -24,12 +25,12 @@ static void wfx_rx_handle_ba(struct wfx_vif *wvif, struct ieee80211_mgmt *mgmt) case WLAN_ACTION_ADDBA_REQ: params = le16_to_cpu(mgmt->u.action.u.addba_req.capab); tid = (params & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; - ieee80211_start_rx_ba_session_offl(wvif->vif, mgmt->sa, tid); + ieee80211_start_rx_ba_session_offl(vif, mgmt->sa, tid); break; case WLAN_ACTION_DELBA: params = le16_to_cpu(mgmt->u.action.u.delba.params); tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12; - ieee80211_stop_rx_ba_session_offl(wvif->vif, mgmt->sa, tid); + ieee80211_stop_rx_ba_session_offl(vif, mgmt->sa, tid); break; } } diff --git a/drivers/net/wireless/silabs/wfx/data_tx.c b/drivers/net/wireless/silabs/wfx/data_tx.c index e07381b2ff4d..6a5e52a96d18 100644 --- a/drivers/net/wireless/silabs/wfx/data_tx.c +++ b/drivers/net/wireless/silabs/wfx/data_tx.c @@ -212,11 +212,12 @@ static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr) { struct wfx_sta_priv *sta_priv = sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL; + struct ieee80211_vif *vif = wvif_to_vif(wvif); const u8 *da = ieee80211_get_DA(hdr); if (sta_priv && sta_priv->link_id) return sta_priv->link_id; - if (wvif->vif->type != NL80211_IFTYPE_AP) + if (vif->type != NL80211_IFTYPE_AP) return 0; if (is_multicast_ether_addr(da)) return 0; diff --git a/drivers/net/wireless/silabs/wfx/key.c b/drivers/net/wireless/silabs/wfx/key.c index 8f23e8d42bd4..196d64ef68f3 100644 --- a/drivers/net/wireless/silabs/wfx/key.c +++ b/drivers/net/wireless/silabs/wfx/key.c @@ -156,6 +156,7 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta, struct wfx_dev *wdev = wvif->wdev; int idx = wfx_alloc_key(wvif->wdev); bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE; + struct ieee80211_vif *vif = wvif_to_vif(wvif); WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data"); ieee80211_get_key_rx_seq(key, 0, &seq); @@ -174,7 +175,7 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta, k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key, sta->addr); else k.type = fill_tkip_group(&k.key.tkip_group_key, key, &seq, - wvif->vif->type); + vif->type); } else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) { if (pairwise) k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key, sta->addr); @@ -224,4 +225,3 @@ int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_ mutex_unlock(&wvif->wdev->conf_mutex); return ret; } - diff --git a/drivers/net/wireless/silabs/wfx/queue.c b/drivers/net/wireless/silabs/wfx/queue.c index 729825230db2..37f492e5d3be 100644 --- a/drivers/net/wireless/silabs/wfx/queue.c +++ b/drivers/net/wireless/silabs/wfx/queue.c @@ -205,9 +205,10 @@ unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev, struct sk_buff * bool wfx_tx_queues_has_cab(struct wfx_vif *wvif) { + struct ieee80211_vif *vif = wvif_to_vif(wvif); int i; - if (wvif->vif->type != NL80211_IFTYPE_AP) + if (vif->type != NL80211_IFTYPE_AP) return false; for (i = 0; i < IEEE80211_NUM_ACS; ++i) /* Note: since only AP can have mcast frames in queue and only one vif can be AP, diff --git a/drivers/net/wireless/silabs/wfx/scan.c b/drivers/net/wireless/silabs/wfx/scan.c index 7f34f0d322f9..16f619ed22e0 100644 --- a/drivers/net/wireless/silabs/wfx/scan.c +++ b/drivers/net/wireless/silabs/wfx/scan.c @@ -23,9 +23,11 @@ static void wfx_ieee80211_scan_completed_compat(struct ieee80211_hw *hw, bool ab static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request *req) { + struct ieee80211_vif *vif = wvif_to_vif(wvif); struct sk_buff *skb; - skb = ieee80211_probereq_get(wvif->wdev->hw, wvif->vif->addr, NULL, 0, req->ie_len); + skb = ieee80211_probereq_get(wvif->wdev->hw, vif->addr, NULL, 0, + req->ie_len); if (!skb) return -ENOMEM; @@ -37,8 +39,9 @@ static int update_probe_tmpl(struct wfx_vif *wvif, struct cfg80211_scan_request static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req, int start_idx) { - int i, ret; + struct ieee80211_vif *vif = wvif_to_vif(wvif); struct ieee80211_channel *ch_start, *ch_cur; + int i, ret; for (i = start_idx; i < req->n_channels; i++) { ch_start = req->channels[start_idx]; @@ -75,8 +78,8 @@ static int send_scan_req(struct wfx_vif *wvif, struct cfg80211_scan_request *req } else { ret = wvif->scan_nb_chan_done; } - if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower) - wfx_hif_set_output_power(wvif, wvif->vif->bss_conf.txpower); + if (req->channels[start_idx]->max_power != vif->bss_conf.txpower) + wfx_hif_set_output_power(wvif, vif->bss_conf.txpower); wfx_tx_unlock(wvif->wdev); return ret; } diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c index 3297d73c327a..e551fa284a43 100644 --- a/drivers/net/wireless/silabs/wfx/sta.c +++ b/drivers/net/wireless/silabs/wfx/sta.c @@ -98,9 +98,10 @@ static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon) void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 unused) { - struct wfx_vif *wvif = NULL; - struct wfx_dev *wdev = hw->priv; bool filter_bssid, filter_prbreq, filter_beacon; + struct ieee80211_vif *vif = NULL; + struct wfx_dev *wdev = hw->priv; + struct wfx_vif *wvif = NULL; /* Notes: * - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered @@ -131,8 +132,9 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, else filter_bssid = true; + vif = wvif_to_vif(wvif); /* In AP mode, chip can reply to probe request itself */ - if (*total_flags & FIF_PROBE_REQ && wvif->vif->type == NL80211_IFTYPE_AP) { + if (*total_flags & FIF_PROBE_REQ && vif->type == NL80211_IFTYPE_AP) { dev_dbg(wdev->dev, "do not forward probe request in AP mode\n"); *total_flags &= ~FIF_PROBE_REQ; } @@ -152,19 +154,28 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps) { struct ieee80211_channel *chan0 = NULL, *chan1 = NULL; struct ieee80211_conf *conf = &wvif->wdev->hw->conf; + struct ieee80211_vif *vif = wvif_to_vif(wvif); - WARN(!wvif->vif->bss_conf.assoc && enable_ps, + WARN(!vif->bss_conf.assoc && enable_ps, "enable_ps is reliable only if associated"); - if (wdev_to_wvif(wvif->wdev, 0)) - chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan; - if (wdev_to_wvif(wvif->wdev, 1)) - chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan; - if (chan0 && chan1 && wvif->vif->type != NL80211_IFTYPE_AP) { + if (wdev_to_wvif(wvif->wdev, 0)) { + struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0); + struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0); + + chan0 = vif_ch0->bss_conf.chandef.chan; + } + if (wdev_to_wvif(wvif->wdev, 1)) { + struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1); + struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1); + + chan1 = vif_ch1->bss_conf.chandef.chan; + } + if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) { if (chan0->hw_value == chan1->hw_value) { /* It is useless to enable PS if channels are the same. */ if (enable_ps) *enable_ps = false; - if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps) + if (vif->bss_conf.assoc && vif->bss_conf.ps) dev_info(wvif->wdev->dev, "ignoring requested PS mode"); return -1; } @@ -177,8 +188,8 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps) return 30; } if (enable_ps) - *enable_ps = wvif->vif->bss_conf.ps; - if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps) + *enable_ps = vif->bss_conf.ps; + if (vif->bss_conf.assoc && vif->bss_conf.ps) return conf->dynamic_ps_timeout; else return -1; @@ -186,10 +197,11 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps) int wfx_update_pm(struct wfx_vif *wvif) { + struct ieee80211_vif *vif = wvif_to_vif(wvif); int ps_timeout; bool ps; - if (!wvif->vif->bss_conf.assoc) + if (!vif->bss_conf.assoc) return 0; ps_timeout = wfx_get_ps_timeout(wvif, &ps); if (!ps) @@ -215,7 +227,8 @@ int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mutex_lock(&wdev->conf_mutex); assign_bit(queue, &wvif->uapsd_mask, params->uapsd); wfx_hif_set_edca_queue_params(wvif, queue, params); - if (wvif->vif->type == NL80211_IFTYPE_STATION && old_uapsd != wvif->uapsd_mask) { + if (vif->type == NL80211_IFTYPE_STATION && + old_uapsd != wvif->uapsd_mask) { wfx_hif_set_uapsd_info(wvif, wvif->uapsd_mask); wfx_update_pm(wvif); } @@ -238,24 +251,26 @@ void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi) /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 * RSSI = RCPI / 2 - 110 */ + struct ieee80211_vif *vif = wvif_to_vif(wvif); int rcpi_rssi; int cqm_evt; rcpi_rssi = raw_rcpi_rssi / 2 - 110; - if (rcpi_rssi <= wvif->vif->bss_conf.cqm_rssi_thold) + if (rcpi_rssi <= vif->bss_conf.cqm_rssi_thold) cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; else cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; - ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL); + ieee80211_cqm_rssi_notify(vif, cqm_evt, rcpi_rssi, GFP_KERNEL); } static void wfx_beacon_loss_work(struct work_struct *work) { struct wfx_vif *wvif = container_of(to_delayed_work(work), struct wfx_vif, beacon_loss_work); - struct ieee80211_bss_conf *bss_conf = &wvif->vif->bss_conf; + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; - ieee80211_beacon_loss(wvif->vif); + ieee80211_beacon_loss(vif); schedule_delayed_work(to_delayed_work(work), msecs_to_jiffies(bss_conf->beacon_int)); } @@ -321,15 +336,16 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ie static int wfx_upload_ap_templates(struct wfx_vif *wvif) { + struct ieee80211_vif *vif = wvif_to_vif(wvif); struct sk_buff *skb; - skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif); + skb = ieee80211_beacon_get(wvif->wdev->hw, vif); if (!skb) return -ENOMEM; wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_BCN, API_RATE_INDEX_B_1MBPS); dev_kfree_skb(skb); - skb = ieee80211_proberesp_get(wvif->wdev->hw, wvif->vif); + skb = ieee80211_proberesp_get(wvif->wdev->hw, vif); if (!skb) return -ENOMEM; wfx_hif_set_template_frame(wvif, skb, HIF_TMPLT_PRBRES, API_RATE_INDEX_B_1MBPS); @@ -339,7 +355,8 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif) static void wfx_set_mfp_ap(struct wfx_vif *wvif) { - struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif); + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct sk_buff *skb = ieee80211_beacon_get(wvif->wdev->hw, vif); const int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); const u16 *ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset, skb->len - ieoffset); @@ -388,12 +405,13 @@ void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) static void wfx_join(struct wfx_vif *wvif) { - int ret; - struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf; + struct ieee80211_vif *vif = wvif_to_vif(wvif); + struct ieee80211_bss_conf *conf = &vif->bss_conf; struct cfg80211_bss *bss = NULL; u8 ssid[IEEE80211_MAX_SSID_LEN]; const u8 *ssidie = NULL; int ssidlen = 0; + int ret; wfx_tx_lock_flush(wvif->wdev); @@ -420,7 +438,7 @@ static void wfx_join(struct wfx_vif *wvif) wvif->join_in_progress = true; ret = wfx_hif_join(wvif, conf, wvif->channel, ssid, ssidlen); if (ret) { - ieee80211_connection_loss(wvif->vif); + ieee80211_connection_loss(vif); wfx_reset(wvif); } else { /* Due to beacon filtering it is possible that the AP's beacon is not known for the @@ -434,13 +452,14 @@ static void wfx_join(struct wfx_vif *wvif) static void wfx_join_finalize(struct wfx_vif *wvif, struct ieee80211_bss_conf *info) { + struct ieee80211_vif *vif = wvif_to_vif(wvif); struct ieee80211_sta *sta = NULL; int ampdu_density = 0; bool greenfield = false; rcu_read_lock(); /* protect sta */ if (info->bssid && !info->ibss_joined) - sta = ieee80211_find_sta(wvif->vif, info->bssid); + sta = ieee80211_find_sta(vif, info->bssid); if (sta && sta->deflink.ht_cap.ht_supported) ampdu_density = sta->deflink.ht_cap.ampdu_density; if (sta && sta->deflink.ht_cap.ht_supported && @@ -561,11 +580,13 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static int wfx_update_tim(struct wfx_vif *wvif) { + struct ieee80211_vif *vif = wvif_to_vif(wvif); struct sk_buff *skb; u16 tim_offset, tim_length; u8 *tim_ptr; - skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif, &tim_offset, &tim_length); + skb = ieee80211_beacon_get_tim(wvif->wdev->hw, vif, &tim_offset, + &tim_length); if (!skb) return -ENOENT; tim_ptr = skb->data + tim_offset; @@ -707,8 +728,6 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) return -EOPNOTSUPP; } - /* FIXME: prefer use of container_of() to get vif */ - wvif->vif = vif; wvif->wdev = wdev; wvif->link_id_map = 1; /* link-id 0 is reserved for multicast */ @@ -767,7 +786,6 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) cancel_delayed_work_sync(&wvif->beacon_loss_work); wdev->vif[wvif->id] = NULL; - wvif->vif = NULL; mutex_unlock(&wdev->conf_mutex); diff --git a/drivers/net/wireless/silabs/wfx/wfx.h b/drivers/net/wireless/silabs/wfx/wfx.h index 6f5e95dae21f..13ba84b3b2c3 100644 --- a/drivers/net/wireless/silabs/wfx/wfx.h +++ b/drivers/net/wireless/silabs/wfx/wfx.h @@ -62,7 +62,6 @@ struct wfx_dev { struct wfx_vif { struct wfx_dev *wdev; - struct ieee80211_vif *vif; struct ieee80211_channel *channel; int id; @@ -92,6 +91,11 @@ struct wfx_vif { struct completion set_pm_mode_complete; }; +static inline struct ieee80211_vif *wvif_to_vif(struct wfx_vif *wvif) +{ + return container_of((void *)wvif, struct ieee80211_vif, drv_priv); +} + static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id) { if (vif_id >= ARRAY_SIZE(wdev->vif)) { -- cgit v1.2.3 From 03dcb90dbf62fd0bb6152bb59b1d70c8e4ad1c87 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 9 May 2022 08:01:30 -0700 Subject: net: appletalk: remove Apple/Farallon LocalTalk PC support Looks like all the changes to this driver had been tree-wide refactoring since git era begun. The driver is using virt_to_bus() we should make it use more modern DMA APIs but since it's unlikely to be getting any use these days delete it instead. We can always revert to bring it back. Signed-off-by: Jakub Kicinski Acked-by: Arnd Bergmann Signed-off-by: David S. Miller --- .../networking/device_drivers/appletalk/index.rst | 1 - .../networking/device_drivers/appletalk/ltpc.rst | 144 --- drivers/net/appletalk/Kconfig | 11 - drivers/net/appletalk/Makefile | 1 - drivers/net/appletalk/ltpc.c | 1277 -------------------- drivers/net/appletalk/ltpc.h | 74 -- 6 files changed, 1508 deletions(-) delete mode 100644 Documentation/networking/device_drivers/appletalk/ltpc.rst delete mode 100644 drivers/net/appletalk/ltpc.c delete mode 100644 drivers/net/appletalk/ltpc.h (limited to 'drivers') diff --git a/Documentation/networking/device_drivers/appletalk/index.rst b/Documentation/networking/device_drivers/appletalk/index.rst index de7507f02037..c196baeb0856 100644 --- a/Documentation/networking/device_drivers/appletalk/index.rst +++ b/Documentation/networking/device_drivers/appletalk/index.rst @@ -9,7 +9,6 @@ Contents: :maxdepth: 2 cops - ltpc .. only:: subproject and html diff --git a/Documentation/networking/device_drivers/appletalk/ltpc.rst b/Documentation/networking/device_drivers/appletalk/ltpc.rst deleted file mode 100644 index 0ad197fd17ce..000000000000 --- a/Documentation/networking/device_drivers/appletalk/ltpc.rst +++ /dev/null @@ -1,144 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -=========== -LTPC Driver -=========== - -This is the ALPHA version of the ltpc driver. - -In order to use it, you will need at least version 1.3.3 of the -netatalk package, and the Apple or Farallon LocalTalk PC card. -There are a number of different LocalTalk cards for the PC; this -driver applies only to the one with the 65c02 processor chip on it. - -To include it in the kernel, select the CONFIG_LTPC switch in the -configuration dialog. You can also compile it as a module. - -While the driver will attempt to autoprobe the I/O port address, IRQ -line, and DMA channel of the card, this does not always work. For -this reason, you should be prepared to supply these parameters -yourself. (see "Card Configuration" below for how to determine or -change the settings on your card) - -When the driver is compiled into the kernel, you can add a line such -as the following to your /etc/lilo.conf:: - - append="ltpc=0x240,9,1" - -where the parameters (in order) are the port address, IRQ, and DMA -channel. The second and third values can be omitted, in which case -the driver will try to determine them itself. - -If you load the driver as a module, you can pass the parameters "io=", -"irq=", and "dma=" on the command line with insmod or modprobe, or add -them as options in a configuration file in /etc/modprobe.d/ directory:: - - alias lt0 ltpc # autoload the module when the interface is configured - options ltpc io=0x240 irq=9 dma=1 - -Before starting up the netatalk demons (perhaps in rc.local), you -need to add a line such as:: - - /sbin/ifconfig lt0 127.0.0.42 - -The address is unimportant - however, the card needs to be configured -with ifconfig so that Netatalk can find it. - -The appropriate netatalk configuration depends on whether you are -attached to a network that includes AppleTalk routers or not. If, -like me, you are simply connecting to your home Macintoshes and -printers, you need to set up netatalk to "seed". The way I do this -is to have the lines:: - - dummy -seed -phase 2 -net 2000 -addr 2000.26 -zone "1033" - lt0 -seed -phase 1 -net 1033 -addr 1033.27 -zone "1033" - -in my atalkd.conf. What is going on here is that I need to fool -netatalk into thinking that there are two AppleTalk interfaces -present; otherwise, it refuses to seed. This is a hack, and a more -permanent solution would be to alter the netatalk code. Also, make -sure you have the correct name for the dummy interface - If it's -compiled as a module, you will need to refer to it as "dummy0" or some -such. - -If you are attached to an extended AppleTalk network, with routers on -it, then you don't need to fool around with this -- the appropriate -line in atalkd.conf is:: - - lt0 -phase 1 - - -Card Configuration -================== - -The interrupts and so forth are configured via the dipswitch on the -board. Set the switches so as not to conflict with other hardware. - - Interrupts -- set at most one. If none are set, the driver uses - polled mode. Because the card was developed in the XT era, the - original documentation refers to IRQ2. Since you'll be running - this on an AT (or later) class machine, that really means IRQ9. - - === =========================================================== - SW1 IRQ 4 - SW2 IRQ 3 - SW3 IRQ 9 (2 in original card documentation only applies to XT) - === =========================================================== - - - DMA -- choose DMA 1 or 3, and set both corresponding switches. - - === ===== - SW4 DMA 3 - SW5 DMA 1 - SW6 DMA 3 - SW7 DMA 1 - === ===== - - - I/O address -- choose one. - - === ========= - SW8 220 / 240 - === ========= - - -IP -== - -Yes, it is possible to do IP over LocalTalk. However, you can't just -treat the LocalTalk device like an ordinary Ethernet device, even if -that's what it looks like to Netatalk. - -Instead, you follow the same procedure as for doing IP in EtherTalk. -See Documentation/networking/ipddp.rst for more information about the -kernel driver and userspace tools needed. - - -Bugs -==== - -IRQ autoprobing often doesn't work on a cold boot. To get around -this, either compile the driver as a module, or pass the parameters -for the card to the kernel as described above. - -Also, as usual, autoprobing is not recommended when you use the driver -as a module. (though it usually works at boot time, at least) - -Polled mode is *really* slow sometimes, but this seems to depend on -the configuration of the network. - -It may theoretically be possible to use two LTPC cards in the same -machine, but this is unsupported, so if you really want to do this, -you'll probably have to hack the initialization code a bit. - - -Thanks -====== - -Thanks to Alan Cox for helpful discussions early on in this -work, and to Denis Hainsworth for doing the bleeding-edge testing. - -Bradford Johnson - -Updated 11/09/1998 by David Huggins-Daines diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 90b9f1d6eda9..b38ed52b82bc 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig @@ -39,17 +39,6 @@ config DEV_APPLETALK connect to the AppleTalk network, say Y. -config LTPC - tristate "Apple/Farallon LocalTalk PC support" - depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS - help - This allows you to use the AppleTalk PC card to connect to LocalTalk - networks. The card is also known as the Farallon PhoneNet PC card. - If you are in doubt, this card is the one with the 65C02 chip on it. - You also need version 1.3.3 or later of the netatalk package. - This driver is experimental, which means that it may not work. - See the file . - config COPS tristate "COPS LocalTalk PC support" depends on DEV_APPLETALK && ISA diff --git a/drivers/net/appletalk/Makefile b/drivers/net/appletalk/Makefile index 903da3303f41..6db2943ce5d6 100644 --- a/drivers/net/appletalk/Makefile +++ b/drivers/net/appletalk/Makefile @@ -5,4 +5,3 @@ obj-$(CONFIG_IPDDP) += ipddp.o obj-$(CONFIG_COPS) += cops.o -obj-$(CONFIG_LTPC) += ltpc.o diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c deleted file mode 100644 index 388d7b3bd4c2..000000000000 --- a/drivers/net/appletalk/ltpc.c +++ /dev/null @@ -1,1277 +0,0 @@ -/*** ltpc.c -- a driver for the LocalTalk PC card. - * - * Copyright (c) 1995,1996 Bradford W. Johnson - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * This is ALPHA code at best. It may not work for you. It may - * damage your equipment. It may damage your relations with other - * users of your network. Use it at your own risk! - * - * Based in part on: - * skeleton.c by Donald Becker - * dummy.c by Nick Holloway and Alan Cox - * loopback.c by Ross Biro, Fred van Kampen, Donald Becker - * the netatalk source code (UMICH) - * lots of work on the card... - * - * I do not have access to the (proprietary) SDK that goes with the card. - * If you do, I don't want to know about it, and you can probably write - * a better driver yourself anyway. This does mean that the pieces that - * talk to the card are guesswork on my part, so use at your own risk! - * - * This is my first try at writing Linux networking code, and is also - * guesswork. Again, use at your own risk! (Although on this part, I'd - * welcome suggestions) - * - * This is a loadable kernel module which seems to work at my site - * consisting of a 1.2.13 linux box running netatalk 1.3.3, and with - * the kernel support from 1.3.3b2 including patches routing.patch - * and ddp.disappears.from.chooser. In order to run it, you will need - * to patch ddp.c and aarp.c in the kernel, but only a little... - * - * I'm fairly confident that while this is arguably badly written, the - * problems that people experience will be "higher level", that is, with - * complications in the netatalk code. The driver itself doesn't do - * anything terribly complicated -- it pretends to be an ether device - * as far as netatalk is concerned, strips the DDP data out of the ether - * frame and builds a LLAP packet to send out the card. In the other - * direction, it receives LLAP frames from the card and builds a fake - * ether packet that it then tosses up to the networking code. You can - * argue (correctly) that this is an ugly way to do things, but it - * requires a minimal amount of fooling with the code in ddp.c and aarp.c. - * - * The card will do a lot more than is used here -- I *think* it has the - * layers up through ATP. Even if you knew how that part works (which I - * don't) it would be a big job to carve up the kernel ddp code to insert - * things at a higher level, and probably a bad idea... - * - * There are a number of other cards that do LocalTalk on the PC. If - * nobody finds any insurmountable (at the netatalk level) problems - * here, this driver should encourage people to put some work into the - * other cards (some of which I gather are still commercially available) - * and also to put hooks for LocalTalk into the official ddp code. - * - * I welcome comments and suggestions. This is my first try at Linux - * networking stuff, and there are probably lots of things that I did - * suboptimally. - * - ***/ - -/*** - * - * $Log: ltpc.c,v $ - * Revision 1.1.2.1 2000/03/01 05:35:07 jgarzik - * at and tr cleanup - * - * Revision 1.8 1997/01/28 05:44:54 bradford - * Clean up for non-module a little. - * Hacked about a bit to clean things up - Alan Cox - * Probably broken it from the origina 1.8 - * - - * 1998/11/09: David Huggins-Daines - * Cleaned up the initialization code to use the standard autoirq methods, - and to probe for things in the standard order of i/o, irq, dma. This - removes the "reset the reset" hack, because I couldn't figure out an - easy way to get the card to trigger an interrupt after it. - * Added support for passing configuration parameters on the kernel command - line and through insmod - * Changed the device name from "ltalk0" to "lt0", both to conform with the - other localtalk driver, and to clear up the inconsistency between the - module and the non-module versions of the driver :-) - * Added a bunch of comments (I was going to make some enums for the state - codes and the register offsets, but I'm still not sure exactly what their - semantics are) - * Don't poll anymore in interrupt-driven mode - * It seems to work as a module now (as of 2.1.127), but I don't think - I'm responsible for that... - - * - * Revision 1.7 1996/12/12 03:42:33 bradford - * DMA alloc cribbed from 3c505.c. - * - * Revision 1.6 1996/12/12 03:18:58 bradford - * Added virt_to_bus; works in 2.1.13. - * - * Revision 1.5 1996/12/12 03:13:22 root - * xmitQel initialization -- think through better though. - * - * Revision 1.4 1996/06/18 14:55:55 root - * Change names to ltpc. Tabs. Took a shot at dma alloc, - * although more needs to be done eventually. - * - * Revision 1.3 1996/05/22 14:59:39 root - * Change dev->open, dev->close to track dummy.c in 1.99.(around 7) - * - * Revision 1.2 1996/05/22 14:58:24 root - * Change tabs mostly. - * - * Revision 1.1 1996/04/23 04:45:09 root - * Initial revision - * - * Revision 0.16 1996/03/05 15:59:56 root - * Change ARPHRD_LOCALTLK definition to the "real" one. - * - * Revision 0.15 1996/03/05 06:28:30 root - * Changes for kernel 1.3.70. Still need a few patches to kernel, but - * it's getting closer. - * - * Revision 0.14 1996/02/25 17:38:32 root - * More cleanups. Removed query to card on get_stats. - * - * Revision 0.13 1996/02/21 16:27:40 root - * Refix debug_print_skb. Fix mac.raw gotcha that appeared in 1.3.65. - * Clean up receive code a little. - * - * Revision 0.12 1996/02/19 16:34:53 root - * Fix debug_print_skb. Kludge outgoing snet to 0 when using startup - * range. Change debug to mask: 1 for verbose, 2 for higher level stuff - * including packet printing, 4 for lower level (card i/o) stuff. - * - * Revision 0.11 1996/02/12 15:53:38 root - * Added router sends (requires new aarp.c patch) - * - * Revision 0.10 1996/02/11 00:19:35 root - * Change source LTALK_LOGGING debug switch to insmod ... debug=2. - * - * Revision 0.9 1996/02/10 23:59:35 root - * Fixed those fixes for 1.2 -- DANGER! The at.h that comes with netatalk - * has a *different* definition of struct sockaddr_at than the Linux kernel - * does. This is an "insidious and invidious" bug... - * (Actually the preceding comment is false -- it's the atalk.h in the - * ancient atalk-0.06 that's the problem) - * - * Revision 0.8 1996/02/10 19:09:00 root - * Merge 1.3 changes. Tested OK under 1.3.60. - * - * Revision 0.7 1996/02/10 17:56:56 root - * Added debug=1 parameter on insmod for debugging prints. Tried - * to fix timer unload on rmmod, but I don't think that's the problem. - * - * Revision 0.6 1995/12/31 19:01:09 root - * Clean up rmmod, irq comments per feedback from Corin Anderson (Thanks Corey!) - * Clean up initial probing -- sometimes the card wakes up latched in reset. - * - * Revision 0.5 1995/12/22 06:03:44 root - * Added comments in front and cleaned up a bit. - * This version sent out to people. - * - * Revision 0.4 1995/12/18 03:46:44 root - * Return shortDDP to longDDP fake to 0/0. Added command structs. - * - ***/ - -/* ltpc jumpers are: -* -* Interrupts -- set at most one. If none are set, the driver uses -* polled mode. Because the card was developed in the XT era, the -* original documentation refers to IRQ2. Since you'll be running -* this on an AT (or later) class machine, that really means IRQ9. -* -* SW1 IRQ 4 -* SW2 IRQ 3 -* SW3 IRQ 9 (2 in original card documentation only applies to XT) -* -* -* DMA -- choose DMA 1 or 3, and set both corresponding switches. -* -* SW4 DMA 3 -* SW5 DMA 1 -* SW6 DMA 3 -* SW7 DMA 1 -* -* -* I/O address -- choose one. -* -* SW8 220 / 240 -*/ - -/* To have some stuff logged, do -* insmod ltpc.o debug=1 -* -* For a whole bunch of stuff, use higher numbers. -* -* The default is 0, i.e. no messages except for the probe results. -*/ - -/* insmod-tweakable variables */ -static int debug; -#define DEBUG_VERBOSE 1 -#define DEBUG_UPPER 2 -#define DEBUG_LOWER 4 - -static int io; -static int irq; -static int dma; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -/* our stuff */ -#include "ltpc.h" - -static DEFINE_SPINLOCK(txqueue_lock); -static DEFINE_SPINLOCK(mbox_lock); - -/* function prototypes */ -static int do_read(struct net_device *dev, void *cbuf, int cbuflen, - void *dbuf, int dbuflen); -static int sendup_buffer (struct net_device *dev); - -/* Dma Memory related stuff, cribbed directly from 3c505.c */ - -static unsigned long dma_mem_alloc(int size) -{ - int order = get_order(size); - - return __get_dma_pages(GFP_KERNEL, order); -} - -/* DMA data buffer, DMA command buffer */ -static unsigned char *ltdmabuf; -static unsigned char *ltdmacbuf; - -/* private struct, holds our appletalk address */ - -struct ltpc_private -{ - struct atalk_addr my_addr; -}; - -/* transmit queue element struct */ - -struct xmitQel { - struct xmitQel *next; - /* command buffer */ - unsigned char *cbuf; - short cbuflen; - /* data buffer */ - unsigned char *dbuf; - short dbuflen; - unsigned char QWrite; /* read or write data */ - unsigned char mailbox; -}; - -/* the transmit queue itself */ - -static struct xmitQel *xmQhd, *xmQtl; - -static void enQ(struct xmitQel *qel) -{ - unsigned long flags; - qel->next = NULL; - - spin_lock_irqsave(&txqueue_lock, flags); - if (xmQtl) { - xmQtl->next = qel; - } else { - xmQhd = qel; - } - xmQtl = qel; - spin_unlock_irqrestore(&txqueue_lock, flags); - - if (debug & DEBUG_LOWER) - printk("enqueued a 0x%02x command\n",qel->cbuf[0]); -} - -static struct xmitQel *deQ(void) -{ - unsigned long flags; - int i; - struct xmitQel *qel=NULL; - - spin_lock_irqsave(&txqueue_lock, flags); - if (xmQhd) { - qel = xmQhd; - xmQhd = qel->next; - if(!xmQhd) xmQtl = NULL; - } - spin_unlock_irqrestore(&txqueue_lock, flags); - - if ((debug & DEBUG_LOWER) && qel) { - int n; - printk(KERN_DEBUG "ltpc: dequeued command "); - n = qel->cbuflen; - if (n>100) n=100; - for(i=0;icbuf[i]); - printk("\n"); - } - - return qel; -} - -/* and... the queue elements we'll be using */ -static struct xmitQel qels[16]; - -/* and their corresponding mailboxes */ -static unsigned char mailbox[16]; -static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; - -static int wait_timeout(struct net_device *dev, int c) -{ - /* returns true if it stayed c */ - /* this uses base+6, but it's ok */ - int i; - - /* twenty second or so total */ - - for(i=0;i<200000;i++) { - if ( c != inb_p(dev->base_addr+6) ) return 0; - udelay(100); - } - return 1; /* timed out */ -} - -/* get the first free mailbox */ - -static int getmbox(void) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&mbox_lock, flags); - for(i=1;i<16;i++) if(!mboxinuse[i]) { - mboxinuse[i]=1; - spin_unlock_irqrestore(&mbox_lock, flags); - return i; - } - spin_unlock_irqrestore(&mbox_lock, flags); - return 0; -} - -/* read a command from the card */ -static void handlefc(struct net_device *dev) -{ - /* called *only* from idle, non-reentrant */ - int dma = dev->dma; - int base = dev->base_addr; - unsigned long flags; - - - flags=claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - set_dma_mode(dma,DMA_MODE_READ); - set_dma_addr(dma,virt_to_bus(ltdmacbuf)); - set_dma_count(dma,50); - enable_dma(dma); - release_dma_lock(flags); - - inb_p(base+3); - inb_p(base+2); - - if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n"); -} - -/* read data from the card */ -static void handlefd(struct net_device *dev) -{ - int dma = dev->dma; - int base = dev->base_addr; - unsigned long flags; - - flags=claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - set_dma_mode(dma,DMA_MODE_READ); - set_dma_addr(dma,virt_to_bus(ltdmabuf)); - set_dma_count(dma,800); - enable_dma(dma); - release_dma_lock(flags); - - inb_p(base+3); - inb_p(base+2); - - if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n"); - sendup_buffer(dev); -} - -static void handlewrite(struct net_device *dev) -{ - /* called *only* from idle, non-reentrant */ - /* on entry, 0xfb and ltdmabuf holds data */ - int dma = dev->dma; - int base = dev->base_addr; - unsigned long flags; - - flags=claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - set_dma_mode(dma,DMA_MODE_WRITE); - set_dma_addr(dma,virt_to_bus(ltdmabuf)); - set_dma_count(dma,800); - enable_dma(dma); - release_dma_lock(flags); - - inb_p(base+3); - inb_p(base+2); - - if ( wait_timeout(dev,0xfb) ) { - flags=claim_dma_lock(); - printk("timed out in handlewrite, dma res %d\n", - get_dma_residue(dev->dma) ); - release_dma_lock(flags); - } -} - -static void handleread(struct net_device *dev) -{ - /* on entry, 0xfb */ - /* on exit, ltdmabuf holds data */ - int dma = dev->dma; - int base = dev->base_addr; - unsigned long flags; - - - flags=claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - set_dma_mode(dma,DMA_MODE_READ); - set_dma_addr(dma,virt_to_bus(ltdmabuf)); - set_dma_count(dma,800); - enable_dma(dma); - release_dma_lock(flags); - - inb_p(base+3); - inb_p(base+2); - if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n"); -} - -static void handlecommand(struct net_device *dev) -{ - /* on entry, 0xfa and ltdmacbuf holds command */ - int dma = dev->dma; - int base = dev->base_addr; - unsigned long flags; - - flags=claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - set_dma_mode(dma,DMA_MODE_WRITE); - set_dma_addr(dma,virt_to_bus(ltdmacbuf)); - set_dma_count(dma,50); - enable_dma(dma); - release_dma_lock(flags); - inb_p(base+3); - inb_p(base+2); - if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n"); -} - -/* ready made command for getting the result from the card */ -static unsigned char rescbuf[2] = {LT_GETRESULT,0}; -static unsigned char resdbuf[2]; - -static int QInIdle; - -/* idle expects to be called with the IRQ line high -- either because of - * an interrupt, or because the line is tri-stated - */ - -static void idle(struct net_device *dev) -{ - unsigned long flags; - int state; - /* FIXME This is initialized to shut the warning up, but I need to - * think this through again. - */ - struct xmitQel *q = NULL; - int oops; - int i; - int base = dev->base_addr; - - spin_lock_irqsave(&txqueue_lock, flags); - if(QInIdle) { - spin_unlock_irqrestore(&txqueue_lock, flags); - return; - } - QInIdle = 1; - spin_unlock_irqrestore(&txqueue_lock, flags); - - /* this tri-states the IRQ line */ - (void) inb_p(base+6); - - oops = 100; - -loop: - if (0>oops--) { - printk("idle: looped too many times\n"); - goto done; - } - - state = inb_p(base+6); - if (state != inb_p(base+6)) goto loop; - - switch(state) { - case 0xfc: - /* incoming command */ - if (debug & DEBUG_LOWER) printk("idle: fc\n"); - handlefc(dev); - break; - case 0xfd: - /* incoming data */ - if(debug & DEBUG_LOWER) printk("idle: fd\n"); - handlefd(dev); - break; - case 0xf9: - /* result ready */ - if (debug & DEBUG_LOWER) printk("idle: f9\n"); - if(!mboxinuse[0]) { - mboxinuse[0] = 1; - qels[0].cbuf = rescbuf; - qels[0].cbuflen = 2; - qels[0].dbuf = resdbuf; - qels[0].dbuflen = 2; - qels[0].QWrite = 0; - qels[0].mailbox = 0; - enQ(&qels[0]); - } - inb_p(dev->base_addr+1); - inb_p(dev->base_addr+0); - if( wait_timeout(dev,0xf9) ) - printk("timed out idle f9\n"); - break; - case 0xf8: - /* ?? */ - if (xmQhd) { - inb_p(dev->base_addr+1); - inb_p(dev->base_addr+0); - if(wait_timeout(dev,0xf8) ) - printk("timed out idle f8\n"); - } else { - goto done; - } - break; - case 0xfa: - /* waiting for command */ - if(debug & DEBUG_LOWER) printk("idle: fa\n"); - if (xmQhd) { - q=deQ(); - memcpy(ltdmacbuf,q->cbuf,q->cbuflen); - ltdmacbuf[1] = q->mailbox; - if (debug>1) { - int n; - printk("ltpc: sent command "); - n = q->cbuflen; - if (n>100) n=100; - for(i=0;iQWrite) { - memcpy(ltdmabuf,q->dbuf,q->dbuflen); - handlewrite(dev); - } else { - handleread(dev); - /* non-zero mailbox numbers are for - commmands, 0 is for GETRESULT - requests */ - if(q->mailbox) { - memcpy(q->dbuf,ltdmabuf,q->dbuflen); - } else { - /* this was a result */ - mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1]; - mboxinuse[0]=0; - } - } - break; - } - goto loop; - -done: - QInIdle=0; - - /* now set the interrupts back as appropriate */ - /* the first read takes it out of tri-state (but still high) */ - /* the second resets it */ - /* note that after this point, any read of base+6 will - trigger an interrupt */ - - if (dev->irq) { - inb_p(base+7); - inb_p(base+7); - } -} - - -static int do_write(struct net_device *dev, void *cbuf, int cbuflen, - void *dbuf, int dbuflen) -{ - - int i = getmbox(); - int ret; - - if(i) { - qels[i].cbuf = cbuf; - qels[i].cbuflen = cbuflen; - qels[i].dbuf = dbuf; - qels[i].dbuflen = dbuflen; - qels[i].QWrite = 1; - qels[i].mailbox = i; /* this should be initted rather */ - enQ(&qels[i]); - idle(dev); - ret = mailbox[i]; - mboxinuse[i]=0; - return ret; - } - printk("ltpc: could not allocate mbox\n"); - return -1; -} - -static int do_read(struct net_device *dev, void *cbuf, int cbuflen, - void *dbuf, int dbuflen) -{ - - int i = getmbox(); - int ret; - - if(i) { - qels[i].cbuf = cbuf; - qels[i].cbuflen = cbuflen; - qels[i].dbuf = dbuf; - qels[i].dbuflen = dbuflen; - qels[i].QWrite = 0; - qels[i].mailbox = i; /* this should be initted rather */ - enQ(&qels[i]); - idle(dev); - ret = mailbox[i]; - mboxinuse[i]=0; - return ret; - } - printk("ltpc: could not allocate mbox\n"); - return -1; -} - -/* end of idle handlers -- what should be seen is do_read, do_write */ - -static struct timer_list ltpc_timer; -static struct net_device *ltpc_timer_dev; - -static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev); - -static int read_30 ( struct net_device *dev) -{ - lt_command c; - c.getflags.command = LT_GETFLAGS; - return do_read(dev, &c, sizeof(c.getflags),&c,0); -} - -static int set_30 (struct net_device *dev,int x) -{ - lt_command c; - c.setflags.command = LT_SETFLAGS; - c.setflags.flags = x; - return do_write(dev, &c, sizeof(c.setflags),&c,0); -} - -/* LLAP to DDP translation */ - -static int sendup_buffer (struct net_device *dev) -{ - /* on entry, command is in ltdmacbuf, data in ltdmabuf */ - /* called from idle, non-reentrant */ - - int dnode, snode, llaptype, len; - int sklen; - struct sk_buff *skb; - struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf; - - if (ltc->command != LT_RCVLAP) { - printk("unknown command 0x%02x from ltpc card\n",ltc->command); - return -1; - } - dnode = ltc->dnode; - snode = ltc->snode; - llaptype = ltc->laptype; - len = ltc->length; - - sklen = len; - if (llaptype == 1) - sklen += 8; /* correct for short ddp */ - if(sklen > 800) { - printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n", - dev->name,sklen); - return -1; - } - - if ( (llaptype==0) || (llaptype>2) ) { - printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype); - return -1; - } - - - skb = dev_alloc_skb(3+sklen); - if (skb == NULL) - { - printk("%s: dropping packet due to memory squeeze.\n", - dev->name); - return -1; - } - skb->dev = dev; - - if (sklen > len) - skb_reserve(skb,8); - skb_put(skb,len+3); - skb->protocol = htons(ETH_P_LOCALTALK); - /* add LLAP header */ - skb->data[0] = dnode; - skb->data[1] = snode; - skb->data[2] = llaptype; - skb_reset_mac_header(skb); /* save pointer to llap header */ - skb_pull(skb,3); - - /* copy ddp(s,e)hdr + contents */ - skb_copy_to_linear_data(skb, ltdmabuf, len); - - skb_reset_transport_header(skb); - - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; - - /* toss it onwards */ - netif_rx(skb); - return 0; -} - -/* the handler for the board interrupt */ - -static irqreturn_t -ltpc_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - - if (dev==NULL) { - printk("ltpc_interrupt: unknown device.\n"); - return IRQ_NONE; - } - - inb_p(dev->base_addr+6); /* disable further interrupts from board */ - - idle(dev); /* handle whatever is coming in */ - - /* idle re-enables interrupts from board */ - - return IRQ_HANDLED; -} - -/*** - * - * The ioctls that the driver responds to are: - * - * SIOCSIFADDR -- do probe using the passed node hint. - * SIOCGIFADDR -- return net, node. - * - * some of this stuff should be done elsewhere. - * - ***/ - -static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr; - /* we'll keep the localtalk node address in dev->pa_addr */ - struct ltpc_private *ltpc_priv = netdev_priv(dev); - struct atalk_addr *aa = <pc_priv->my_addr; - struct lt_init c; - int ltflags; - - if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n"); - - switch(cmd) { - case SIOCSIFADDR: - - aa->s_net = sa->sat_addr.s_net; - - /* this does the probe and returns the node addr */ - c.command = LT_INIT; - c.hint = sa->sat_addr.s_node; - - aa->s_node = do_read(dev,&c,sizeof(c),&c,0); - - /* get all llap frames raw */ - ltflags = read_30(dev); - ltflags |= LT_FLAG_ALLLAP; - set_30 (dev,ltflags); - - dev->broadcast[0] = 0xFF; - dev->addr_len=1; - dev_addr_set(dev, &aa->s_node); - - return 0; - - case SIOCGIFADDR: - - sa->sat_addr.s_net = aa->s_net; - sa->sat_addr.s_node = aa->s_node; - - return 0; - - default: - return -EINVAL; - } -} - -static void set_multicast_list(struct net_device *dev) -{ - /* This needs to be present to keep netatalk happy. */ - /* Actually netatalk needs fixing! */ -} - -static int ltpc_poll_counter; - -static void ltpc_poll(struct timer_list *unused) -{ - del_timer(<pc_timer); - - if(debug & DEBUG_VERBOSE) { - if (!ltpc_poll_counter) { - ltpc_poll_counter = 50; - printk("ltpc poll is alive\n"); - } - ltpc_poll_counter--; - } - - /* poll 20 times per second */ - idle(ltpc_timer_dev); - ltpc_timer.expires = jiffies + HZ/20; - add_timer(<pc_timer); -} - -/* DDP to LLAP translation */ - -static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev) -{ - /* in kernel 1.3.xx, on entry skb->data points to ddp header, - * and skb->len is the length of the ddp data + ddp header - */ - int i; - struct lt_sendlap cbuf; - unsigned char *hdr; - - cbuf.command = LT_SENDLAP; - cbuf.dnode = skb->data[0]; - cbuf.laptype = skb->data[2]; - skb_pull(skb,3); /* skip past LLAP header */ - cbuf.length = skb->len; /* this is host order */ - skb_reset_transport_header(skb); - - if(debug & DEBUG_UPPER) { - printk("command "); - for(i=0;i<6;i++) - printk("%02x ",((unsigned char *)&cbuf)[i]); - printk("\n"); - } - - hdr = skb_transport_header(skb); - do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len); - - if(debug & DEBUG_UPPER) { - printk("sent %d ddp bytes\n",skb->len); - for (i = 0; i < skb->len; i++) - printk("%02x ", hdr[i]); - printk("\n"); - } - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/* initialization stuff */ - -static int __init ltpc_probe_dma(int base, int dma) -{ - int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3; - unsigned long timeout; - unsigned long f; - - if (want & 1) { - if (request_dma(1,"ltpc")) { - want &= ~1; - } else { - f=claim_dma_lock(); - disable_dma(1); - clear_dma_ff(1); - set_dma_mode(1,DMA_MODE_WRITE); - set_dma_addr(1,virt_to_bus(ltdmabuf)); - set_dma_count(1,sizeof(struct lt_mem)); - enable_dma(1); - release_dma_lock(f); - } - } - if (want & 2) { - if (request_dma(3,"ltpc")) { - want &= ~2; - } else { - f=claim_dma_lock(); - disable_dma(3); - clear_dma_ff(3); - set_dma_mode(3,DMA_MODE_WRITE); - set_dma_addr(3,virt_to_bus(ltdmabuf)); - set_dma_count(3,sizeof(struct lt_mem)); - enable_dma(3); - release_dma_lock(f); - } - } - /* set up request */ - - /* FIXME -- do timings better! */ - - ltdmabuf[0] = LT_READMEM; - ltdmabuf[1] = 1; /* mailbox */ - ltdmabuf[2] = 0; ltdmabuf[3] = 0; /* address */ - ltdmabuf[4] = 0; ltdmabuf[5] = 1; /* read 0x0100 bytes */ - ltdmabuf[6] = 0; /* dunno if this is necessary */ - - inb_p(io+1); - inb_p(io+0); - timeout = jiffies+100*HZ/100; - while(time_before(jiffies, timeout)) { - if ( 0xfa == inb_p(io+6) ) break; - } - - inb_p(io+3); - inb_p(io+2); - while(time_before(jiffies, timeout)) { - if ( 0xfb == inb_p(io+6) ) break; - } - - /* release the other dma channel (if we opened both of them) */ - - if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) { - want &= ~2; - free_dma(3); - } - - if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) { - want &= ~1; - free_dma(1); - } - - if (!want) - return 0; - - return (want & 2) ? 3 : 1; -} - -static const struct net_device_ops ltpc_netdev = { - .ndo_start_xmit = ltpc_xmit, - .ndo_do_ioctl = ltpc_ioctl, - .ndo_set_rx_mode = set_multicast_list, -}; - -static struct net_device * __init ltpc_probe(void) -{ - struct net_device *dev; - int err = -ENOMEM; - int x=0,y=0; - int autoirq; - unsigned long f; - unsigned long timeout; - - dev = alloc_ltalkdev(sizeof(struct ltpc_private)); - if (!dev) - goto out; - - /* probe for the I/O port address */ - - if (io != 0x240 && request_region(0x220,8,"ltpc")) { - x = inb_p(0x220+6); - if ( (x!=0xff) && (x>=0xf0) ) { - io = 0x220; - goto got_port; - } - release_region(0x220,8); - } - if (io != 0x220 && request_region(0x240,8,"ltpc")) { - y = inb_p(0x240+6); - if ( (y!=0xff) && (y>=0xf0) ){ - io = 0x240; - goto got_port; - } - release_region(0x240,8); - } - - /* give up in despair */ - printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y); - err = -ENODEV; - goto out1; - - got_port: - /* probe for the IRQ line */ - if (irq < 2) { - unsigned long irq_mask; - - irq_mask = probe_irq_on(); - /* reset the interrupt line */ - inb_p(io+7); - inb_p(io+7); - /* trigger an interrupt (I hope) */ - inb_p(io+6); - mdelay(2); - autoirq = probe_irq_off(irq_mask); - - if (autoirq == 0) { - printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io); - } else { - irq = autoirq; - } - } - - /* allocate a DMA buffer */ - ltdmabuf = (unsigned char *) dma_mem_alloc(1000); - if (!ltdmabuf) { - printk(KERN_ERR "ltpc: mem alloc failed\n"); - err = -ENOMEM; - goto out2; - } - - ltdmacbuf = <dmabuf[800]; - - if(debug & DEBUG_VERBOSE) { - printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf); - } - - /* reset the card */ - - inb_p(io+1); - inb_p(io+3); - - msleep(20); - - inb_p(io+0); - inb_p(io+2); - inb_p(io+7); /* clear reset */ - inb_p(io+4); - inb_p(io+5); - inb_p(io+5); /* enable dma */ - inb_p(io+6); /* tri-state interrupt line */ - - ssleep(1); - - /* now, figure out which dma channel we're using, unless it's - already been specified */ - /* well, 0 is a legal DMA channel, but the LTPC card doesn't - use it... */ - dma = ltpc_probe_dma(io, dma); - if (!dma) { /* no dma channel */ - printk(KERN_ERR "No DMA channel found on ltpc card.\n"); - err = -ENODEV; - goto out3; - } - - /* print out friendly message */ - if(irq) - printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma); - else - printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma); - - dev->netdev_ops = <pc_netdev; - dev->base_addr = io; - dev->irq = irq; - dev->dma = dma; - - /* the card will want to send a result at this point */ - /* (I think... leaving out this part makes the kernel crash, - so I put it back in...) */ - - f=claim_dma_lock(); - disable_dma(dma); - clear_dma_ff(dma); - set_dma_mode(dma,DMA_MODE_READ); - set_dma_addr(dma,virt_to_bus(ltdmabuf)); - set_dma_count(dma,0x100); - enable_dma(dma); - release_dma_lock(f); - - (void) inb_p(io+3); - (void) inb_p(io+2); - timeout = jiffies+100*HZ/100; - - while(time_before(jiffies, timeout)) { - if( 0xf9 == inb_p(io+6)) - break; - schedule(); - } - - if(debug & DEBUG_VERBOSE) { - printk("setting up timer and irq\n"); - } - - /* grab it and don't let go :-) */ - if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0) - { - (void) inb_p(io+7); /* enable interrupts from board */ - (void) inb_p(io+7); /* and reset irq line */ - } else { - if( irq ) - printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n"); - dev->irq = 0; - /* polled mode -- 20 times per second */ - /* this is really, really slow... should it poll more often? */ - ltpc_timer_dev = dev; - timer_setup(<pc_timer, ltpc_poll, 0); - - ltpc_timer.expires = jiffies + HZ/20; - add_timer(<pc_timer); - } - err = register_netdev(dev); - if (err) - goto out4; - - return NULL; -out4: - del_timer_sync(<pc_timer); - if (dev->irq) - free_irq(dev->irq, dev); -out3: - free_pages((unsigned long)ltdmabuf, get_order(1000)); -out2: - release_region(io, 8); -out1: - free_netdev(dev); -out: - return ERR_PTR(err); -} - -#ifndef MODULE -/* handles "ltpc=io,irq,dma" kernel command lines */ -static int __init ltpc_setup(char *str) -{ - int ints[5]; - - str = get_options(str, ARRAY_SIZE(ints), ints); - - if (ints[0] == 0) { - if (str && !strncmp(str, "auto", 4)) { - /* do nothing :-) */ - } - else { - /* usage message */ - printk (KERN_ERR - "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n"); - return 0; - } - } else { - io = ints[1]; - if (ints[0] > 1) { - irq = ints[2]; - } - if (ints[0] > 2) { - dma = ints[3]; - } - /* ignore any other parameters */ - } - return 1; -} - -__setup("ltpc=", ltpc_setup); -#endif - -static struct net_device *dev_ltpc; - -MODULE_LICENSE("GPL"); -module_param(debug, int, 0); -module_param_hw(io, int, ioport, 0); -module_param_hw(irq, int, irq, 0); -module_param_hw(dma, int, dma, 0); - - -static int __init ltpc_module_init(void) -{ - if(io == 0) - printk(KERN_NOTICE - "ltpc: Autoprobing is not recommended for modules\n"); - - dev_ltpc = ltpc_probe(); - return PTR_ERR_OR_ZERO(dev_ltpc); -} -module_init(ltpc_module_init); - -static void __exit ltpc_cleanup(void) -{ - - if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n"); - unregister_netdev(dev_ltpc); - - del_timer_sync(<pc_timer); - - if(debug & DEBUG_VERBOSE) printk("freeing irq\n"); - - if (dev_ltpc->irq) - free_irq(dev_ltpc->irq, dev_ltpc); - - if(debug & DEBUG_VERBOSE) printk("freeing dma\n"); - - if (dev_ltpc->dma) - free_dma(dev_ltpc->dma); - - if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n"); - - if (dev_ltpc->base_addr) - release_region(dev_ltpc->base_addr,8); - - free_netdev(dev_ltpc); - - if(debug & DEBUG_VERBOSE) printk("free_pages\n"); - - free_pages( (unsigned long) ltdmabuf, get_order(1000)); - - if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n"); -} - -module_exit(ltpc_cleanup); diff --git a/drivers/net/appletalk/ltpc.h b/drivers/net/appletalk/ltpc.h deleted file mode 100644 index 58cf945732a4..000000000000 --- a/drivers/net/appletalk/ltpc.h +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/*** ltpc.h - * - * - ***/ - -#define LT_GETRESULT 0x00 -#define LT_WRITEMEM 0x01 -#define LT_READMEM 0x02 -#define LT_GETFLAGS 0x04 -#define LT_SETFLAGS 0x05 -#define LT_INIT 0x10 -#define LT_SENDLAP 0x13 -#define LT_RCVLAP 0x14 - -/* the flag that we care about */ -#define LT_FLAG_ALLLAP 0x04 - -struct lt_getresult { - unsigned char command; - unsigned char mailbox; -}; - -struct lt_mem { - unsigned char command; - unsigned char mailbox; - unsigned short addr; /* host order */ - unsigned short length; /* host order */ -}; - -struct lt_setflags { - unsigned char command; - unsigned char mailbox; - unsigned char flags; -}; - -struct lt_getflags { - unsigned char command; - unsigned char mailbox; -}; - -struct lt_init { - unsigned char command; - unsigned char mailbox; - unsigned char hint; -}; - -struct lt_sendlap { - unsigned char command; - unsigned char mailbox; - unsigned char dnode; - unsigned char laptype; - unsigned short length; /* host order */ -}; - -struct lt_rcvlap { - unsigned char command; - unsigned char dnode; - unsigned char snode; - unsigned char laptype; - unsigned short length; /* host order */ -}; - -union lt_command { - struct lt_getresult getresult; - struct lt_mem mem; - struct lt_setflags setflags; - struct lt_getflags getflags; - struct lt_init init; - struct lt_sendlap sendlap; - struct lt_rcvlap rcvlap; -}; -typedef union lt_command lt_command; - -- cgit v1.2.3 From 01f4685797a5723b0046da03c30185ac9ff42b30 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 9 May 2022 08:05:32 -0700 Subject: eth: amd: remove NI6510 support (ni65) Looks like all the changes to this driver had been tree-wide refactoring since git era begun. The driver is using virt_to_bus() we should make it use more modern DMA APIs but since it's unlikely to be getting any use these days delete it instead. We can always revert to bring it back. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/Space.c | 3 - drivers/net/ethernet/amd/Kconfig | 10 - drivers/net/ethernet/amd/Makefile | 1 - drivers/net/ethernet/amd/ni65.c | 1251 ------------------------------------- drivers/net/ethernet/amd/ni65.h | 121 ---- 5 files changed, 1386 deletions(-) delete mode 100644 drivers/net/ethernet/amd/ni65.c delete mode 100644 drivers/net/ethernet/amd/ni65.h (limited to 'drivers') diff --git a/drivers/net/Space.c b/drivers/net/Space.c index 49e67c9fb5a4..f475eef14390 100644 --- a/drivers/net/Space.c +++ b/drivers/net/Space.c @@ -221,9 +221,6 @@ static struct devprobe2 isa_probes[] __initdata = { #endif #ifdef CONFIG_CS89x0_ISA {cs89x0_probe, 0}, -#endif -#ifdef CONFIG_NI65 - {ni65_probe, 0}, #endif {NULL, 0}, }; diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index 899c8a2a34b6..ab42f75b9413 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -130,16 +130,6 @@ config PCMCIA_NMCLAN To compile this driver as a module, choose M here: the module will be called nmclan_cs. If unsure, say N. -config NI65 - tristate "NI6510 support" - depends on ISA && ISA_DMA_API && !ARM && !PPC32 - select NETDEV_LEGACY_INIT - help - If you have a network (Ethernet) card of this type, say Y here. - - To compile this driver as a module, choose M here. The module - will be called ni65. - config SUN3LANCE tristate "Sun3/Sun3x on-board LANCE support" depends on (SUN3 || SUN3X) diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile index 0d2f478b49a5..42742afe9115 100644 --- a/drivers/net/ethernet/amd/Makefile +++ b/drivers/net/ethernet/amd/Makefile @@ -13,7 +13,6 @@ obj-$(CONFIG_LANCE) += lance.o obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o -obj-$(CONFIG_NI65) += ni65.o obj-$(CONFIG_PCNET32) += pcnet32.o obj-$(CONFIG_SUN3LANCE) += sun3lance.o obj-$(CONFIG_SUNLANCE) += sunlance.o diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c deleted file mode 100644 index 8ba579b89b75..000000000000 --- a/drivers/net/ethernet/amd/ni65.c +++ /dev/null @@ -1,1251 +0,0 @@ -/* - * ni6510 (am7990 'lance' chip) driver for Linux-net-3 - * BETAcode v0.71 (96/09/29) for 2.0.0 (or later) - * copyrights (c) 1994,1995,1996 by M.Hipp - * - * This driver can handle the old ni6510 board and the newer ni6510 - * EtherBlaster. (probably it also works with every full NE2100 - * compatible card) - * - * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7 - * - * This is an extension to the Linux operating system, and is covered by the - * same GNU General Public License that covers the Linux-kernel. - * - * comments/bugs/suggestions can be sent to: - * Michael Hipp - * email: hippm@informatik.uni-tuebingen.de - * - * sources: - * some things are from the 'ni6510-packet-driver for dos by Russ Nelson' - * and from the original drivers by D.Becker - * - * known problems: - * - on some PCI boards (including my own) the card/board/ISA-bridge has - * problems with bus master DMA. This results in lotsa overruns. - * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef - * the XMT and RCV_VIA_SKB option .. this reduces driver performance. - * Or just play with your BIOS options to optimize ISA-DMA access. - * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE - * defines -> please report me your experience then - * - Harald reported for ASUS SP3G mainboards, that you should use - * the 'optimal settings' from the user's manual on page 3-12! - * - * credits: - * thanx to Jason Sullivan for sending me a ni6510 card! - * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig - * - * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB) - * average: FTP -> 8384421 bytes received in 8.5 seconds - * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS) - * peak: FTP -> 8384421 bytes received in 7.5 seconds - * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS) - */ - -/* - * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK) - * 96.Sept.29: virt_to_bus stuff added for new memory modell - * 96.April.29: Added Harald Koenig's Patches (MH) - * 96.April.13: enhanced error handling .. more tests (MH) - * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH) - * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH) - * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH) - * hopefully no more 16MB limit - * - * 95.Nov.18: multicast tweaked (AC). - * - * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH) - * - * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "ni65.h" - -/* - * the current setting allows an acceptable performance - * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in - * the header of this file - * 'invert' the defines for max. performance. This may cause DMA problems - * on some boards (e.g on my ASUS SP3G) - */ -#undef XMT_VIA_SKB -#undef RCV_VIA_SKB -#define RCV_PARANOIA_CHECK - -#define MID_PERFORMANCE - -#if defined( LOW_PERFORMANCE ) - static int isa0=7,isa1=7,csr80=0x0c10; -#elif defined( MID_PERFORMANCE ) - static int isa0=5,isa1=5,csr80=0x2810; -#else /* high performance */ - static int isa0=4,isa1=4,csr80=0x0017; -#endif - -/* - * a few card/vendor specific defines - */ -#define NI65_ID0 0x00 -#define NI65_ID1 0x55 -#define NI65_EB_ID0 0x52 -#define NI65_EB_ID1 0x44 -#define NE2100_ID0 0x57 -#define NE2100_ID1 0x57 - -#define PORT p->cmdr_addr - -/* - * buffer configuration - */ -#if 1 -#define RMDNUM 16 -#define RMDNUMMASK 0x80000000 -#else -#define RMDNUM 8 -#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */ -#endif - -#if 0 -#define TMDNUM 1 -#define TMDNUMMASK 0x00000000 -#else -#define TMDNUM 4 -#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */ -#endif - -/* slightly oversized */ -#define R_BUF_SIZE 1544 -#define T_BUF_SIZE 1544 - -/* - * lance register defines - */ -#define L_DATAREG 0x00 -#define L_ADDRREG 0x02 -#define L_RESET 0x04 -#define L_CONFIG 0x05 -#define L_BUSIF 0x06 - -/* - * to access the lance/am7990-regs, you have to write - * reg-number into L_ADDRREG, then you can access it using L_DATAREG - */ -#define CSR0 0x00 -#define CSR1 0x01 -#define CSR2 0x02 -#define CSR3 0x03 - -#define INIT_RING_BEFORE_START 0x1 -#define FULL_RESET_ON_ERROR 0x2 - -#if 0 -#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \ - outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} -#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\ - inw(PORT+L_DATAREG)) -#if 0 -#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} -#else -#define writedatareg(val) { writereg(val,CSR0); } -#endif -#else -#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);} -#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG)) -#define writedatareg(val) { writereg(val,CSR0); } -#endif - -static unsigned char ni_vendor[] = { 0x02,0x07,0x01 }; - -static struct card { - unsigned char id0,id1; - short id_offset; - short total_size; - short cmd_offset; - short addr_offset; - unsigned char *vendor_id; - char *cardname; - unsigned long config; -} cards[] = { - { - .id0 = NI65_ID0, - .id1 = NI65_ID1, - .id_offset = 0x0e, - .total_size = 0x10, - .cmd_offset = 0x0, - .addr_offset = 0x8, - .vendor_id = ni_vendor, - .cardname = "ni6510", - .config = 0x1, - }, - { - .id0 = NI65_EB_ID0, - .id1 = NI65_EB_ID1, - .id_offset = 0x0e, - .total_size = 0x18, - .cmd_offset = 0x10, - .addr_offset = 0x0, - .vendor_id = ni_vendor, - .cardname = "ni6510 EtherBlaster", - .config = 0x2, - }, - { - .id0 = NE2100_ID0, - .id1 = NE2100_ID1, - .id_offset = 0x0e, - .total_size = 0x18, - .cmd_offset = 0x10, - .addr_offset = 0x0, - .vendor_id = NULL, - .cardname = "generic NE2100", - .config = 0x0, - }, -}; -#define NUM_CARDS 3 - -struct priv -{ - struct rmd rmdhead[RMDNUM]; - struct tmd tmdhead[TMDNUM]; - struct init_block ib; - int rmdnum; - int tmdnum,tmdlast; -#ifdef RCV_VIA_SKB - struct sk_buff *recv_skb[RMDNUM]; -#else - void *recvbounce[RMDNUM]; -#endif -#ifdef XMT_VIA_SKB - struct sk_buff *tmd_skb[TMDNUM]; -#endif - void *tmdbounce[TMDNUM]; - int tmdbouncenum; - int lock,xmit_queued; - - void *self; - int cmdr_addr; - int cardno; - int features; - spinlock_t ring_lock; -}; - -static int ni65_probe1(struct net_device *dev,int); -static irqreturn_t ni65_interrupt(int irq, void * dev_id); -static void ni65_recv_intr(struct net_device *dev,int); -static void ni65_xmit_intr(struct net_device *dev,int); -static int ni65_open(struct net_device *dev); -static int ni65_lance_reinit(struct net_device *dev); -static void ni65_init_lance(struct priv *p,const unsigned char*,int,int); -static netdev_tx_t ni65_send_packet(struct sk_buff *skb, - struct net_device *dev); -static void ni65_timeout(struct net_device *dev, unsigned int txqueue); -static int ni65_close(struct net_device *dev); -static int ni65_alloc_buffer(struct net_device *dev); -static void ni65_free_buffer(struct priv *p); -static void set_multicast_list(struct net_device *dev); - -static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */ -static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */ - -static int debuglevel = 1; - -/* - * set 'performance' registers .. we must STOP lance for that - */ -static void ni65_set_performance(struct priv *p) -{ - writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */ - - if( !(cards[p->cardno].config & 0x02) ) - return; - - outw(80,PORT+L_ADDRREG); - if(inw(PORT+L_ADDRREG) != 80) - return; - - writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */ - outw(0,PORT+L_ADDRREG); - outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */ - outw(1,PORT+L_ADDRREG); - outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */ - - outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */ -} - -/* - * open interface (up) - */ -static int ni65_open(struct net_device *dev) -{ - struct priv *p = dev->ml_priv; - int irqval = request_irq(dev->irq, ni65_interrupt,0, - cards[p->cardno].cardname,dev); - if (irqval) { - printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n", - dev->name,dev->irq, irqval); - return -EAGAIN; - } - - if(ni65_lance_reinit(dev)) - { - netif_start_queue(dev); - return 0; - } - else - { - free_irq(dev->irq,dev); - return -EAGAIN; - } -} - -/* - * close interface (down) - */ -static int ni65_close(struct net_device *dev) -{ - struct priv *p = dev->ml_priv; - - netif_stop_queue(dev); - - outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */ - -#ifdef XMT_VIA_SKB - { - int i; - for(i=0;itmd_skb[i]) { - dev_kfree_skb(p->tmd_skb[i]); - p->tmd_skb[i] = NULL; - } - } - } -#endif - free_irq(dev->irq,dev); - return 0; -} - -static void cleanup_card(struct net_device *dev) -{ - struct priv *p = dev->ml_priv; - disable_dma(dev->dma); - free_dma(dev->dma); - release_region(dev->base_addr, cards[p->cardno].total_size); - ni65_free_buffer(p); -} - -/* set: io,irq,dma or set it when calling insmod */ -static int irq; -static int io; -static int dma; - -/* - * Probe The Card (not the lance-chip) - */ -struct net_device * __init ni65_probe(int unit) -{ - struct net_device *dev = alloc_etherdev(0); - static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 }; - const int *port; - int err = 0; - - if (!dev) - return ERR_PTR(-ENOMEM); - - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - irq = dev->irq; - dma = dev->dma; - } else { - dev->base_addr = io; - } - - if (dev->base_addr > 0x1ff) { /* Check a single specified location. */ - err = ni65_probe1(dev, dev->base_addr); - } else if (dev->base_addr > 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (port = ports; *port && ni65_probe1(dev, *port); port++) - ; - if (!*port) - err = -ENODEV; - } - if (err) - goto out; - - err = register_netdev(dev); - if (err) - goto out1; - return dev; -out1: - cleanup_card(dev); -out: - free_netdev(dev); - return ERR_PTR(err); -} - -static const struct net_device_ops ni65_netdev_ops = { - .ndo_open = ni65_open, - .ndo_stop = ni65_close, - .ndo_start_xmit = ni65_send_packet, - .ndo_tx_timeout = ni65_timeout, - .ndo_set_rx_mode = set_multicast_list, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -/* - * this is the real card probe .. - */ -static int __init ni65_probe1(struct net_device *dev,int ioaddr) -{ - int i,j; - struct priv *p; - u8 addr[ETH_ALEN]; - unsigned long flags; - - dev->irq = irq; - dev->dma = dma; - - for(i=0;i= 0) { - if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 || - inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) { - release_region(ioaddr, cards[i].total_size); - continue; - } - } - if(cards[i].vendor_id) { - for(j=0;j<3;j++) - if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) - release_region(ioaddr, cards[i].total_size); - } - break; - } - if(i == NUM_CARDS) - return -ENODEV; - - for(j=0;j<6;j++) - addr[j] = inb(ioaddr+cards[i].addr_offset+j); - eth_hw_addr_set(dev, addr); - - if( (j=ni65_alloc_buffer(dev)) < 0) { - release_region(ioaddr, cards[i].total_size); - return j; - } - p = dev->ml_priv; - p->cmdr_addr = ioaddr + cards[i].cmd_offset; - p->cardno = i; - spin_lock_init(&p->ring_lock); - - printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr); - - outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ - if( (j=readreg(CSR0)) != 0x4) { - printk("failed.\n"); - printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j); - ni65_free_buffer(p); - release_region(ioaddr, cards[p->cardno].total_size); - return -EAGAIN; - } - - outw(88,PORT+L_ADDRREG); - if(inw(PORT+L_ADDRREG) == 88) { - unsigned long v; - v = inw(PORT+L_DATAREG); - v <<= 16; - outw(89,PORT+L_ADDRREG); - v |= inw(PORT+L_DATAREG); - printk("Version %#08lx, ",v); - p->features = INIT_RING_BEFORE_START; - } - else { - printk("ancient LANCE, "); - p->features = 0x0; - } - - if(test_bit(0,&cards[i].config)) { - dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3]; - dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3]; - printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma); - } - else { - if(dev->dma == 0) { - /* 'stuck test' from lance.c */ - unsigned long dma_channels = - ((inb(DMA1_STAT_REG) >> 4) & 0x0f) - | (inb(DMA2_STAT_REG) & 0xf0); - for(i=1;i<5;i++) { - int dma = dmatab[i]; - if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510")) - continue; - - flags=claim_dma_lock(); - disable_dma(dma); - set_dma_mode(dma,DMA_MODE_CASCADE); - enable_dma(dma); - release_dma_lock(flags); - - ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */ - - flags=claim_dma_lock(); - disable_dma(dma); - free_dma(dma); - release_dma_lock(flags); - - if(readreg(CSR0) & CSR0_IDON) - break; - } - if(i == 5) { - printk("failed.\n"); - printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name); - ni65_free_buffer(p); - release_region(ioaddr, cards[p->cardno].total_size); - return -EAGAIN; - } - dev->dma = dmatab[i]; - printk("DMA %d (autodetected), ",dev->dma); - } - else - printk("DMA %d (assigned), ",dev->dma); - - if(dev->irq < 2) - { - unsigned long irq_mask; - - ni65_init_lance(p,dev->dev_addr,0,0); - irq_mask = probe_irq_on(); - writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */ - msleep(20); - dev->irq = probe_irq_off(irq_mask); - if(!dev->irq) - { - printk("Failed to detect IRQ line!\n"); - ni65_free_buffer(p); - release_region(ioaddr, cards[p->cardno].total_size); - return -EAGAIN; - } - printk("IRQ %d (autodetected).\n",dev->irq); - } - else - printk("IRQ %d (assigned).\n",dev->irq); - } - - if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0) - { - printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma); - ni65_free_buffer(p); - release_region(ioaddr, cards[p->cardno].total_size); - return -EAGAIN; - } - - dev->base_addr = ioaddr; - dev->netdev_ops = &ni65_netdev_ops; - dev->watchdog_timeo = HZ/2; - - return 0; /* everything is OK */ -} - -/* - * set lance register and trigger init - */ -static void ni65_init_lance(struct priv *p,const unsigned char *daddr,int filter,int mode) -{ - int i; - u32 pib; - - writereg(CSR0_CLRALL|CSR0_STOP,CSR0); - - for(i=0;i<6;i++) - p->ib.eaddr[i] = daddr[i]; - - for(i=0;i<8;i++) - p->ib.filter[i] = filter; - p->ib.mode = mode; - - p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK; - p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK; - writereg(0,CSR3); /* busmaster/no word-swap */ - pib = (u32) isa_virt_to_bus(&p->ib); - writereg(pib & 0xffff,CSR1); - writereg(pib >> 16,CSR2); - - writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */ - - for(i=0;i<32;i++) - { - mdelay(4); - if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) ) - break; /* init ok ? */ - } -} - -/* - * allocate memory area and check the 16MB border - */ -static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type) -{ - struct sk_buff *skb=NULL; - unsigned char *ptr; - void *ret; - - if(type) { - ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA); - if(!skb) { - printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); - return NULL; - } - skb_reserve(skb,2+16); - skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ - ptr = skb->data; - } - else { - ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA); - if(!ret) - return NULL; - } - if( (u32) virt_to_phys(ptr+size) > 0x1000000) { - printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what); - if(type) - kfree_skb(skb); - else - kfree(ptr); - return NULL; - } - return ret; -} - -/* - * allocate all memory structures .. send/recv buffers etc ... - */ -static int ni65_alloc_buffer(struct net_device *dev) -{ - unsigned char *ptr; - struct priv *p; - int i; - - /* - * we need 8-aligned memory .. - */ - ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0); - if(!ptr) - return -ENOMEM; - - p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7); - memset((char *)p, 0, sizeof(struct priv)); - p->self = ptr; - - for(i=0;itmd_skb[i] = NULL; -#endif - p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0); - if(!p->tmdbounce[i]) { - ni65_free_buffer(p); - return -ENOMEM; - } - } - - for(i=0;irecv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1); - if(!p->recv_skb[i]) { - ni65_free_buffer(p); - return -ENOMEM; - } -#else - p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0); - if(!p->recvbounce[i]) { - ni65_free_buffer(p); - return -ENOMEM; - } -#endif - } - - return 0; /* everything is OK */ -} - -/* - * free buffers and private struct - */ -static void ni65_free_buffer(struct priv *p) -{ - int i; - - if(!p) - return; - - for(i=0;itmdbounce[i]); -#ifdef XMT_VIA_SKB - dev_kfree_skb(p->tmd_skb[i]); -#endif - } - - for(i=0;irecv_skb[i]); -#else - kfree(p->recvbounce[i]); -#endif - } - kfree(p->self); -} - - -/* - * stop and (re)start lance .. e.g after an error - */ -static void ni65_stop_start(struct net_device *dev,struct priv *p) -{ - int csr0 = CSR0_INEA; - - writedatareg(CSR0_STOP); - - if(debuglevel > 1) - printk(KERN_DEBUG "ni65_stop_start\n"); - - if(p->features & INIT_RING_BEFORE_START) { - int i; -#ifdef XMT_VIA_SKB - struct sk_buff *skb_save[TMDNUM]; -#endif - unsigned long buffer[TMDNUM]; - short blen[TMDNUM]; - - if(p->xmit_queued) { - while(1) { - if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN)) - break; - p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); - if(p->tmdlast == p->tmdnum) - break; - } - } - - for(i=0;itmdhead + i; -#ifdef XMT_VIA_SKB - skb_save[i] = p->tmd_skb[i]; -#endif - buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer); - blen[i] = tmdp->blen; - tmdp->u.s.status = 0x0; - } - - for(i=0;irmdhead + i; - rmdp->u.s.status = RCV_OWN; - } - p->tmdnum = p->xmit_queued = 0; - writedatareg(CSR0_STRT | csr0); - - for(i=0;itmdlast) & (TMDNUM-1); - p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */ - p->tmdhead[i].blen = blen[num]; - if(p->tmdhead[i].u.s.status & XMIT_OWN) { - p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); - p->xmit_queued = 1; - writedatareg(CSR0_TDMD | CSR0_INEA | csr0); - } -#ifdef XMT_VIA_SKB - p->tmd_skb[i] = skb_save[num]; -#endif - } - p->rmdnum = p->tmdlast = 0; - if(!p->lock) - if (p->tmdnum || !p->xmit_queued) - netif_wake_queue(dev); - netif_trans_update(dev); /* prevent tx timeout */ - } - else - writedatareg(CSR0_STRT | csr0); -} - -/* - * init lance (write init-values .. init-buffers) (open-helper) - */ -static int ni65_lance_reinit(struct net_device *dev) -{ - int i; - struct priv *p = dev->ml_priv; - unsigned long flags; - - p->lock = 0; - p->xmit_queued = 0; - - flags=claim_dma_lock(); - disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */ - set_dma_mode(dev->dma,DMA_MODE_CASCADE); - enable_dma(dev->dma); - release_dma_lock(flags); - - outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ - if( (i=readreg(CSR0) ) != 0x4) - { - printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name, - cards[p->cardno].cardname,(int) i); - flags=claim_dma_lock(); - disable_dma(dev->dma); - release_dma_lock(flags); - return 0; - } - - p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0; - for(i=0;itmdhead + i; -#ifdef XMT_VIA_SKB - if(p->tmd_skb[i]) { - dev_kfree_skb(p->tmd_skb[i]); - p->tmd_skb[i] = NULL; - } -#endif - tmdp->u.buffer = 0x0; - tmdp->u.s.status = XMIT_START | XMIT_END; - tmdp->blen = tmdp->status2 = 0; - } - - for(i=0;irmdhead + i; -#ifdef RCV_VIA_SKB - rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data); -#else - rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]); -#endif - rmdp->blen = -(R_BUF_SIZE-8); - rmdp->mlen = 0; - rmdp->u.s.status = RCV_OWN; - } - - if(dev->flags & IFF_PROMISC) - ni65_init_lance(p,dev->dev_addr,0x00,M_PROM); - else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI) - ni65_init_lance(p,dev->dev_addr,0xff,0x0); - else - ni65_init_lance(p,dev->dev_addr,0x00,0x00); - - /* - * ni65_set_lance_mem() sets L_ADDRREG to CSR0 - * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED - */ - - if(inw(PORT+L_DATAREG) & CSR0_IDON) { - ni65_set_performance(p); - /* init OK: start lance , enable interrupts */ - writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT); - return 1; /* ->OK */ - } - printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG)); - flags=claim_dma_lock(); - disable_dma(dev->dma); - release_dma_lock(flags); - return 0; /* ->Error */ -} - -/* - * interrupt handler - */ -static irqreturn_t ni65_interrupt(int irq, void * dev_id) -{ - int csr0 = 0; - struct net_device *dev = dev_id; - struct priv *p; - int bcnt = 32; - - p = dev->ml_priv; - - spin_lock(&p->ring_lock); - - while(--bcnt) { - csr0 = inw(PORT+L_DATAREG); - -#if 0 - writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */ -#else - writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */ -#endif - - if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT))) - break; - - if(csr0 & CSR0_RINT) /* RECV-int? */ - ni65_recv_intr(dev,csr0); - if(csr0 & CSR0_TINT) /* XMIT-int? */ - ni65_xmit_intr(dev,csr0); - - if(csr0 & CSR0_ERR) - { - if(debuglevel > 1) - printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); - if(csr0 & CSR0_BABL) - dev->stats.tx_errors++; - if(csr0 & CSR0_MISS) { - int i; - for(i=0;irmdhead[i].u.s.status); - printk("\n"); - dev->stats.rx_errors++; - } - if(csr0 & CSR0_MERR) { - if(debuglevel > 1) - printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0); - ni65_stop_start(dev,p); - } - } - } - -#ifdef RCV_PARANOIA_CHECK -{ - int j; - for(j=0;j0;i--) { - num2 = (p->rmdnum + i) & (RMDNUM-1); - if(!(p->rmdhead[num2].u.s.status & RCV_OWN)) - break; - } - - if(i) { - int k, num1; - for(k=0;krmdnum + k) & (RMDNUM-1); - if(!(p->rmdhead[num1].u.s.status & RCV_OWN)) - break; - } - if(!k) - break; - - if(debuglevel > 0) - { - char buf[256],*buf1; - buf1 = buf; - for(k=0;krmdhead[k].u.s.status)); /* & RCV_OWN) ); */ - buf1 += 3; - } - *buf1 = 0; - printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf); - } - - p->rmdnum = num1; - ni65_recv_intr(dev,csr0); - if((p->rmdhead[num2].u.s.status & RCV_OWN)) - break; /* ok, we are 'in sync' again */ - } - else - break; - } -} -#endif - - if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) { - printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name); - ni65_stop_start(dev,p); - } - else - writedatareg(CSR0_INEA); - - spin_unlock(&p->ring_lock); - return IRQ_HANDLED; -} - -/* - * We have received an Xmit-Interrupt .. - * send a new packet if necessary - */ -static void ni65_xmit_intr(struct net_device *dev,int csr0) -{ - struct priv *p = dev->ml_priv; - - while(p->xmit_queued) - { - struct tmd *tmdp = p->tmdhead + p->tmdlast; - int tmdstat = tmdp->u.s.status; - - if(tmdstat & XMIT_OWN) - break; - - if(tmdstat & XMIT_ERR) - { -#if 0 - if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3) - printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name); -#endif - /* checking some errors */ - if(tmdp->status2 & XMIT_RTRY) - dev->stats.tx_aborted_errors++; - if(tmdp->status2 & XMIT_LCAR) - dev->stats.tx_carrier_errors++; - if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) { - /* this stops the xmitter */ - dev->stats.tx_fifo_errors++; - if(debuglevel > 0) - printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name); - if(p->features & INIT_RING_BEFORE_START) { - tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */ - ni65_stop_start(dev,p); - break; /* no more Xmit processing .. */ - } - else - ni65_stop_start(dev,p); - } - if(debuglevel > 2) - printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2); - if(!(csr0 & CSR0_BABL)) /* don't count errors twice */ - dev->stats.tx_errors++; - tmdp->status2 = 0; - } - else { - dev->stats.tx_bytes -= (short)(tmdp->blen); - dev->stats.tx_packets++; - } - -#ifdef XMT_VIA_SKB - if(p->tmd_skb[p->tmdlast]) { - dev_consume_skb_irq(p->tmd_skb[p->tmdlast]); - p->tmd_skb[p->tmdlast] = NULL; - } -#endif - - p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); - if(p->tmdlast == p->tmdnum) - p->xmit_queued = 0; - } - netif_wake_queue(dev); -} - -/* - * We have received a packet - */ -static void ni65_recv_intr(struct net_device *dev,int csr0) -{ - struct rmd *rmdp; - int rmdstat,len; - int cnt=0; - struct priv *p = dev->ml_priv; - - rmdp = p->rmdhead + p->rmdnum; - while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN)) - { - cnt++; - if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */ - { - if(!(rmdstat & RCV_ERR)) { - if(rmdstat & RCV_START) - { - dev->stats.rx_length_errors++; - printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff); - } - } - else { - if(debuglevel > 2) - printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n", - dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) ); - if(rmdstat & RCV_FRAM) - dev->stats.rx_frame_errors++; - if(rmdstat & RCV_OFLO) - dev->stats.rx_over_errors++; - if(rmdstat & RCV_CRC) - dev->stats.rx_crc_errors++; - if(rmdstat & RCV_BUF_ERR) - dev->stats.rx_fifo_errors++; - } - if(!(csr0 & CSR0_MISS)) /* don't count errors twice */ - dev->stats.rx_errors++; - } - else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60) - { -#ifdef RCV_VIA_SKB - struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC); - if (skb) - skb_reserve(skb,16); -#else - struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); -#endif - if(skb) - { - skb_reserve(skb,2); -#ifdef RCV_VIA_SKB - if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { - skb_put(skb,len); - skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len); - } - else { - struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; - skb_put(skb,R_BUF_SIZE); - p->recv_skb[p->rmdnum] = skb; - rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); - skb = skb1; - skb_trim(skb,len); - } -#else - skb_put(skb,len); - skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); -#endif - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - skb->protocol=eth_type_trans(skb,dev); - netif_rx(skb); - } - else - { - printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name); - dev->stats.rx_dropped++; - } - } - else { - printk(KERN_INFO "%s: received runt packet\n",dev->name); - dev->stats.rx_errors++; - } - rmdp->blen = -(R_BUF_SIZE-8); - rmdp->mlen = 0; - rmdp->u.s.status = RCV_OWN; /* change owner */ - p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1); - rmdp = p->rmdhead + p->rmdnum; - } -} - -/* - * kick xmitter .. - */ - -static void ni65_timeout(struct net_device *dev, unsigned int txqueue) -{ - int i; - struct priv *p = dev->ml_priv; - - printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name); - for(i=0;itmdhead[i].u.s.status); - printk("\n"); - ni65_lance_reinit(dev); - netif_trans_update(dev); /* prevent tx timeout */ - netif_wake_queue(dev); -} - -/* - * Send a packet - */ - -static netdev_tx_t ni65_send_packet(struct sk_buff *skb, - struct net_device *dev) -{ - struct priv *p = dev->ml_priv; - - netif_stop_queue(dev); - - if (test_and_set_bit(0, (void*)&p->lock)) { - printk(KERN_ERR "%s: Queue was locked.\n", dev->name); - return NETDEV_TX_BUSY; - } - - { - short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; - struct tmd *tmdp; - unsigned long flags; - -#ifdef XMT_VIA_SKB - if( (unsigned long) (skb->data + skb->len) > 0x1000000) { -#endif - - skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], - skb->len > T_BUF_SIZE ? T_BUF_SIZE : - skb->len); - if (len > skb->len) - memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); - dev_kfree_skb (skb); - - spin_lock_irqsave(&p->ring_lock, flags); - tmdp = p->tmdhead + p->tmdnum; - tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]); - p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1); - -#ifdef XMT_VIA_SKB - } - else { - spin_lock_irqsave(&p->ring_lock, flags); - - tmdp = p->tmdhead + p->tmdnum; - tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); - p->tmd_skb[p->tmdnum] = skb; - } -#endif - tmdp->blen = -len; - - tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; - writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */ - - p->xmit_queued = 1; - p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); - - if(p->tmdnum != p->tmdlast) - netif_wake_queue(dev); - - p->lock = 0; - - spin_unlock_irqrestore(&p->ring_lock, flags); - } - - return NETDEV_TX_OK; -} - -static void set_multicast_list(struct net_device *dev) -{ - if(!ni65_lance_reinit(dev)) - printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name); - netif_wake_queue(dev); -} - -#ifdef MODULE -static struct net_device *dev_ni65; - -module_param_hw(irq, int, irq, 0); -module_param_hw(io, int, ioport, 0); -module_param_hw(dma, int, dma, 0); -MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); -MODULE_PARM_DESC(io, "ni6510 I/O base address"); -MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); - -static int __init ni65_init_module(void) -{ - dev_ni65 = ni65_probe(-1); - return PTR_ERR_OR_ZERO(dev_ni65); -} -module_init(ni65_init_module); - -static void __exit ni65_cleanup_module(void) -{ - unregister_netdev(dev_ni65); - cleanup_card(dev_ni65); - free_netdev(dev_ni65); -} -module_exit(ni65_cleanup_module); -#endif /* MODULE */ - -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h deleted file mode 100644 index e6217e35edf0..000000000000 --- a/drivers/net/ethernet/amd/ni65.h +++ /dev/null @@ -1,121 +0,0 @@ -/* am7990 (lance) definitions - * - * This is an extension to the Linux operating system, and is covered by - * same GNU General Public License that covers that work. - * - * Michael Hipp - * email: mhipp@student.uni-tuebingen.de - * - * sources: (mail me or ask archie if you need them) - * crynwr-packet-driver - */ - -/* - * Control and Status Register 0 (CSR0) bit definitions - * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write) - * - */ - -#define CSR0_ERR 0x8000 /* Error summary (R) */ -#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */ -#define CSR0_CERR 0x2000 /* Collision Error (RC) */ -#define CSR0_MISS 0x1000 /* Missed packet (RC) */ -#define CSR0_MERR 0x0800 /* Memory Error (RC) */ -#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */ -#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */ -#define CSR0_IDON 0x0100 /* Initialization Done (RC) */ -#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */ -#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */ -#define CSR0_RXON 0x0020 /* Receiver on (R) */ -#define CSR0_TXON 0x0010 /* Transmitter on (R) */ -#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */ -#define CSR0_STOP 0x0004 /* Stop (RS) */ -#define CSR0_STRT 0x0002 /* Start (RS) */ -#define CSR0_INIT 0x0001 /* Initialize (RS) */ - -#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */ -/* - * Initialization Block Mode operation Bit Definitions. - */ - -#define M_PROM 0x8000 /* Promiscuous Mode */ -#define M_INTL 0x0040 /* Internal Loopback */ -#define M_DRTY 0x0020 /* Disable Retry */ -#define M_COLL 0x0010 /* Force Collision */ -#define M_DTCR 0x0008 /* Disable Transmit CRC) */ -#define M_LOOP 0x0004 /* Loopback */ -#define M_DTX 0x0002 /* Disable the Transmitter */ -#define M_DRX 0x0001 /* Disable the Receiver */ - - -/* - * Receive message descriptor bit definitions. - */ - -#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */ -#define RCV_ERR 0x40 /* Error Summary */ -#define RCV_FRAM 0x20 /* Framing Error */ -#define RCV_OFLO 0x10 /* Overflow Error */ -#define RCV_CRC 0x08 /* CRC Error */ -#define RCV_BUF_ERR 0x04 /* Buffer Error */ -#define RCV_START 0x02 /* Start of Packet */ -#define RCV_END 0x01 /* End of Packet */ - - -/* - * Transmit message descriptor bit definitions. - */ - -#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */ -#define XMIT_ERR 0x40 /* Error Summary */ -#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */ -#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */ -#define XMIT_DEF 0x04 /* Deferred */ -#define XMIT_START 0x02 /* Start of Packet */ -#define XMIT_END 0x01 /* End of Packet */ - -/* - * transmit status (2) (valid if XMIT_ERR == 1) - */ - -#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */ -#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */ -#define XMIT_LCAR 0x0800 /* Loss of Carrier */ -#define XMIT_LCOL 0x1000 /* Late collision */ -#define XMIT_RESERV 0x2000 /* Reserved */ -#define XMIT_UFLO 0x4000 /* Underflow (late memory) */ -#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */ - -struct init_block { - unsigned short mode; - unsigned char eaddr[6]; - unsigned char filter[8]; - /* bit 29-31: number of rmd's (power of 2) */ - u32 rrp; /* receive ring pointer (align 8) */ - /* bit 29-31: number of tmd's (power of 2) */ - u32 trp; /* transmit ring pointer (align 8) */ -}; - -struct rmd { /* Receive Message Descriptor */ - union { - volatile u32 buffer; - struct { - volatile unsigned char dummy[3]; - volatile unsigned char status; - } s; - } u; - volatile short blen; - volatile unsigned short mlen; -}; - -struct tmd { - union { - volatile u32 buffer; - struct { - volatile unsigned char dummy[3]; - volatile unsigned char status; - } s; - } u; - volatile unsigned short blen; - volatile unsigned short status2; -}; -- cgit v1.2.3 From 11ecf3412bdc583defd9c79584dd64ff82aa796d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 10 May 2022 19:43:20 +0300 Subject: net: dsa: ocelot: accept 1000base-X for VSC9959 and VSC9953 Switches using the Lynx PCS driver support 1000base-X optical SFP modules. Accept this interface type on a port. Signed-off-by: Vladimir Oltean Link: https://lore.kernel.org/r/20220510164320.10313-1-vladimir.oltean@nxp.com Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 1 + drivers/net/dsa/ocelot/felix.h | 1 + drivers/net/dsa/ocelot/felix_vsc9959.c | 2 ++ drivers/net/dsa/ocelot/seville_vsc9953.c | 4 +++- 4 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index a4882e493cf4..f5c9d695408a 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1008,6 +1008,7 @@ static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, + [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX, [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, }; diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 39faf1027965..a5e570826773 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -12,6 +12,7 @@ #define OCELOT_PORT_MODE_QSGMII BIT(2) #define OCELOT_PORT_MODE_2500BASEX BIT(3) #define OCELOT_PORT_MODE_USXGMII BIT(4) +#define OCELOT_PORT_MODE_1000BASEX BIT(5) /* Platform-specific information */ struct felix_info { diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 081871824eaf..98caca4317d7 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -28,6 +28,7 @@ #define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \ OCELOT_PORT_MODE_QSGMII | \ + OCELOT_PORT_MODE_1000BASEX | \ OCELOT_PORT_MODE_2500BASEX | \ OCELOT_PORT_MODE_USXGMII) @@ -973,6 +974,7 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, phylink_set(mask, 100baseT_Full); phylink_set(mask, 1000baseT_Half); phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); if (state->interface == PHY_INTERFACE_MODE_INTERNAL || state->interface == PHY_INTERFACE_MODE_2500BASEX || diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 48fd43a93364..ea0649211356 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -21,7 +21,8 @@ #define VSC9953_VCAP_POLICER_BASE2 120 #define VSC9953_VCAP_POLICER_MAX2 161 -#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \ +#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_1000BASEX | \ + OCELOT_PORT_MODE_SGMII | \ OCELOT_PORT_MODE_QSGMII) static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = { @@ -947,6 +948,7 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, phylink_set(mask, 100baseT_Full); phylink_set(mask, 100baseT_Half); phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); if (state->interface == PHY_INTERFACE_MODE_INTERNAL) { phylink_set(mask, 2500baseT_Full); -- cgit v1.2.3 From 1900e30d0ef74755213811cc23079b1be51298fe Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Mon, 9 May 2022 13:46:34 -0600 Subject: net: macb: simplify/cleanup NAPI reschedule checking Previously the macb_poll method was checking the RSR register after completing its RX receive work to see if additional packets had been received since IRQs were disabled, since this controller does not maintain the pending IRQ status across IRQ disable. It also had to double-check the register after re-enabling IRQs to detect if packets were received after the first check but before IRQs were enabled. Using the RSR register for this purpose is problematic since it reflects the global device state rather than the per-queue state, so if packets are being received on multiple queues it may end up retriggering receive on a queue where the packets did not actually arrive and not on the one where they did arrive. This will also cause problems with an upcoming change to use NAPI for the TX path where use of multiple queues is more likely. Add a macb_rx_pending function to check the RX ring to see if more packets have arrived in the queue, and use that to check if NAPI should be rescheduled rather than the RSR register. By doing this, we can just ignore the global RSR register entirely, and thus save some extra device register accesses at the same time. This also makes the previous first check for pending packets rather redundant, since it would be checking the RX ring state which was just checked in the receive work function. Therefore we can get rid of it and just check after enabling interrupts whether packets are already pending. Signed-off-by: Robert Hancock Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/cadence/macb_main.c | 65 +++++++++++++++----------------- 1 file changed, 31 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 6434e74c04f1..160dc5ad84ae 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1554,54 +1554,51 @@ static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, return received; } +static bool macb_rx_pending(struct macb_queue *queue) +{ + struct macb *bp = queue->bp; + unsigned int entry; + struct macb_dma_desc *desc; + + entry = macb_rx_ring_wrap(bp, queue->rx_tail); + desc = macb_rx_desc(queue, entry); + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + return (desc->addr & MACB_BIT(RX_USED)) != 0; +} + static int macb_poll(struct napi_struct *napi, int budget) { struct macb_queue *queue = container_of(napi, struct macb_queue, napi); struct macb *bp = queue->bp; int work_done; - u32 status; - status = macb_readl(bp, RSR); - macb_writel(bp, RSR, status); + work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); - netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", - (unsigned long)status, budget); + netdev_vdbg(bp->dev, "poll: queue = %u, work_done = %d, budget = %d\n", + (unsigned int)(queue - bp->queues), work_done, budget); - work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); - if (work_done < budget) { - napi_complete_done(napi, work_done); + if (work_done < budget && napi_complete_done(napi, work_done)) { + queue_writel(queue, IER, bp->rx_intr_mask); - /* RSR bits only seem to propagate to raise interrupts when - * interrupts are enabled at the time, so if bits are already - * set due to packets received while interrupts were disabled, + /* Packet completions only seem to propagate to raise + * interrupts when interrupts are enabled at the time, so if + * packets were received while interrupts were disabled, * they will not cause another interrupt to be generated when * interrupts are re-enabled. - * Check for this case here. This has been seen to happen - * around 30% of the time under heavy network load. + * Check for this case here to avoid losing a wakeup. This can + * potentially race with the interrupt handler doing the same + * actions if an interrupt is raised just after enabling them, + * but this should be harmless. */ - status = macb_readl(bp, RSR); - if (status) { + if (macb_rx_pending(queue)) { + queue_writel(queue, IDR, bp->rx_intr_mask); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(RCOMP)); - napi_reschedule(napi); - } else { - queue_writel(queue, IER, bp->rx_intr_mask); - - /* In rare cases, packets could have been received in - * the window between the check above and re-enabling - * interrupts. Therefore, a double-check is required - * to avoid losing a wakeup. This can potentially race - * with the interrupt handler doing the same actions - * if an interrupt is raised just after enabling them, - * but this should be harmless. - */ - status = macb_readl(bp, RSR); - if (unlikely(status)) { - queue_writel(queue, IDR, bp->rx_intr_mask); - if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) - queue_writel(queue, ISR, MACB_BIT(RCOMP)); - napi_schedule(napi); - } + netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n"); + napi_schedule(napi); } } -- cgit v1.2.3 From 138badbc21a0113ce783270c426210d6451da5d1 Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Mon, 9 May 2022 13:46:35 -0600 Subject: net: macb: use NAPI for TX completion path This driver was using the TX IRQ handler to perform all TX completion tasks. Under heavy TX network load, this can cause significant irqs-off latencies (found to be in the hundreds of microseconds using ftrace). This can cause other issues, such as overrunning serial UART FIFOs when using high baud rates with limited UART FIFO sizes. Switch to using a NAPI poll handler to perform the TX completion work to get this out of hard IRQ context and avoid the IRQ latency impact. A separate NAPI instance is used for TX and RX to avoid checking the other ring's state unnecessarily when doing the poll, and so that the NAPI budget handling can work for both TX and RX packets. A new per-queue tx_ptr_lock spinlock has been added to avoid using the main device lock (with IRQs needing to be disabled) across the entire TX mapping operation, and also to protect the TX queue pointers from concurrent access between the TX start and TX poll operations. The TX Used Bit Read interrupt (TXUBR) handling also needs to be moved into the TX NAPI poll handler to maintain the proper order of operations. A flag is used to notify the poll handler that a UBR condition needs to be handled. The macb_tx_restart handler has had some locking added for global register access, since this could now potentially happen concurrently on different queues. Signed-off-by: Robert Hancock Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/cadence/macb.h | 6 +- drivers/net/ethernet/cadence/macb_main.c | 228 +++++++++++++++++++++---------- 2 files changed, 161 insertions(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index f0a7d8396a4a..7ca077b65eaa 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -1204,11 +1204,15 @@ struct macb_queue { unsigned int RBQP; unsigned int RBQPH; + /* Lock to protect tx_head and tx_tail */ + spinlock_t tx_ptr_lock; unsigned int tx_head, tx_tail; struct macb_dma_desc *tx_ring; struct macb_tx_skb *tx_skb; dma_addr_t tx_ring_dma; struct work_struct tx_error_task; + bool txubr_pending; + struct napi_struct napi_tx; dma_addr_t rx_ring_dma; dma_addr_t rx_buffers_dma; @@ -1217,7 +1221,7 @@ struct macb_queue { struct macb_dma_desc *rx_ring; struct sk_buff **rx_skbuff; void *rx_buffers; - struct napi_struct napi; + struct napi_struct napi_rx; struct queue_stats stats; #ifdef CONFIG_MACB_USE_HWSTAMP diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 160dc5ad84ae..e993616308f8 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -959,7 +959,7 @@ static int macb_halt_tx(struct macb *bp) return -ETIMEDOUT; } -static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) +static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget) { if (tx_skb->mapping) { if (tx_skb->mapped_as_page) @@ -972,7 +972,7 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) } if (tx_skb->skb) { - dev_kfree_skb_any(tx_skb->skb); + napi_consume_skb(tx_skb->skb, budget); tx_skb->skb = NULL; } } @@ -1025,12 +1025,13 @@ static void macb_tx_error_task(struct work_struct *work) (unsigned int)(queue - bp->queues), queue->tx_tail, queue->tx_head); - /* Prevent the queue IRQ handlers from running: each of them may call - * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). + /* Prevent the queue NAPI TX poll from running, as it calls + * macb_tx_complete(), which in turn may call netif_wake_subqueue(). * As explained below, we have to halt the transmission before updating * TBQP registers so we call netif_tx_stop_all_queues() to notify the * network engine about the macb/gem being halted. */ + napi_disable(&queue->napi_tx); spin_lock_irqsave(&bp->lock, flags); /* Make sure nobody is trying to queue up new packets */ @@ -1058,7 +1059,7 @@ static void macb_tx_error_task(struct work_struct *work) if (ctrl & MACB_BIT(TX_USED)) { /* skb is set for the last buffer of the frame */ while (!skb) { - macb_tx_unmap(bp, tx_skb); + macb_tx_unmap(bp, tx_skb, 0); tail++; tx_skb = macb_tx_skb(queue, tail); skb = tx_skb->skb; @@ -1088,7 +1089,7 @@ static void macb_tx_error_task(struct work_struct *work) desc->ctrl = ctrl | MACB_BIT(TX_USED); } - macb_tx_unmap(bp, tx_skb); + macb_tx_unmap(bp, tx_skb, 0); } /* Set end of TX queue */ @@ -1118,27 +1119,20 @@ static void macb_tx_error_task(struct work_struct *work) macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); spin_unlock_irqrestore(&bp->lock, flags); + napi_enable(&queue->napi_tx); } -static void macb_tx_interrupt(struct macb_queue *queue) +static int macb_tx_complete(struct macb_queue *queue, int budget) { - unsigned int tail; - unsigned int head; - u32 status; struct macb *bp = queue->bp; u16 queue_index = queue - bp->queues; + unsigned int tail; + unsigned int head; + int packets = 0; - status = macb_readl(bp, TSR); - macb_writel(bp, TSR, status); - - if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) - queue_writel(queue, ISR, MACB_BIT(TCOMP)); - - netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", - (unsigned long)status); - + spin_lock(&queue->tx_ptr_lock); head = queue->tx_head; - for (tail = queue->tx_tail; tail != head; tail++) { + for (tail = queue->tx_tail; tail != head && packets < budget; tail++) { struct macb_tx_skb *tx_skb; struct sk_buff *skb; struct macb_dma_desc *desc; @@ -1179,10 +1173,11 @@ static void macb_tx_interrupt(struct macb_queue *queue) queue->stats.tx_packets++; bp->dev->stats.tx_bytes += skb->len; queue->stats.tx_bytes += skb->len; + packets++; } /* Now we can safely release resources */ - macb_tx_unmap(bp, tx_skb); + macb_tx_unmap(bp, tx_skb, budget); /* skb is set only for the last buffer of the frame. * WARNING: at this point skb has been freed by @@ -1198,6 +1193,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) CIRC_CNT(queue->tx_head, queue->tx_tail, bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) netif_wake_subqueue(bp->dev, queue_index); + spin_unlock(&queue->tx_ptr_lock); + + return packets; } static void gem_rx_refill(struct macb_queue *queue) @@ -1569,15 +1567,15 @@ static bool macb_rx_pending(struct macb_queue *queue) return (desc->addr & MACB_BIT(RX_USED)) != 0; } -static int macb_poll(struct napi_struct *napi, int budget) +static int macb_rx_poll(struct napi_struct *napi, int budget) { - struct macb_queue *queue = container_of(napi, struct macb_queue, napi); + struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx); struct macb *bp = queue->bp; int work_done; work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); - netdev_vdbg(bp->dev, "poll: queue = %u, work_done = %d, budget = %d\n", + netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n", (unsigned int)(queue - bp->queues), work_done, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -1607,6 +1605,90 @@ static int macb_poll(struct napi_struct *napi, int budget) return work_done; } +static void macb_tx_restart(struct macb_queue *queue) +{ + struct macb *bp = queue->bp; + unsigned int head_idx, tbqp; + + spin_lock(&queue->tx_ptr_lock); + + if (queue->tx_head == queue->tx_tail) + goto out_tx_ptr_unlock; + + tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); + tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); + head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head)); + + if (tbqp == head_idx) + goto out_tx_ptr_unlock; + + spin_lock_irq(&bp->lock); + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); + spin_unlock_irq(&bp->lock); + +out_tx_ptr_unlock: + spin_unlock(&queue->tx_ptr_lock); +} + +static bool macb_tx_complete_pending(struct macb_queue *queue) +{ + bool retval = false; + + spin_lock(&queue->tx_ptr_lock); + if (queue->tx_head != queue->tx_tail) { + /* Make hw descriptor updates visible to CPU */ + rmb(); + + if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED)) + retval = true; + } + spin_unlock(&queue->tx_ptr_lock); + return retval; +} + +static int macb_tx_poll(struct napi_struct *napi, int budget) +{ + struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx); + struct macb *bp = queue->bp; + int work_done; + + work_done = macb_tx_complete(queue, budget); + + rmb(); // ensure txubr_pending is up to date + if (queue->txubr_pending) { + queue->txubr_pending = false; + netdev_vdbg(bp->dev, "poll: tx restart\n"); + macb_tx_restart(queue); + } + + netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n", + (unsigned int)(queue - bp->queues), work_done, budget); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + queue_writel(queue, IER, MACB_BIT(TCOMP)); + + /* Packet completions only seem to propagate to raise + * interrupts when interrupts are enabled at the time, so if + * packets were sent while interrupts were disabled, + * they will not cause another interrupt to be generated when + * interrupts are re-enabled. + * Check for this case here to avoid losing a wakeup. This can + * potentially race with the interrupt handler doing the same + * actions if an interrupt is raised just after enabling them, + * but this should be harmless. + */ + if (macb_tx_complete_pending(queue)) { + queue_writel(queue, IDR, MACB_BIT(TCOMP)); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(TCOMP)); + netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n"); + napi_schedule(napi); + } + } + + return work_done; +} + static void macb_hresp_error_task(struct tasklet_struct *t) { struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); @@ -1646,29 +1728,6 @@ static void macb_hresp_error_task(struct tasklet_struct *t) netif_tx_start_all_queues(dev); } -static void macb_tx_restart(struct macb_queue *queue) -{ - unsigned int head = queue->tx_head; - unsigned int tail = queue->tx_tail; - struct macb *bp = queue->bp; - unsigned int head_idx, tbqp; - - if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) - queue_writel(queue, ISR, MACB_BIT(TXUBR)); - - if (head == tail) - return; - - tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); - tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); - head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); - - if (tbqp == head_idx) - return; - - macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); -} - static irqreturn_t macb_wol_interrupt(int irq, void *dev_id) { struct macb_queue *queue = dev_id; @@ -1765,9 +1824,27 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(RCOMP)); - if (napi_schedule_prep(&queue->napi)) { + if (napi_schedule_prep(&queue->napi_rx)) { netdev_vdbg(bp->dev, "scheduling RX softirq\n"); - __napi_schedule(&queue->napi); + __napi_schedule(&queue->napi_rx); + } + } + + if (status & (MACB_BIT(TCOMP) | + MACB_BIT(TXUBR))) { + queue_writel(queue, IDR, MACB_BIT(TCOMP)); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(TCOMP) | + MACB_BIT(TXUBR)); + + if (status & MACB_BIT(TXUBR)) { + queue->txubr_pending = true; + wmb(); // ensure softirq can see update + } + + if (napi_schedule_prep(&queue->napi_tx)) { + netdev_vdbg(bp->dev, "scheduling TX softirq\n"); + __napi_schedule(&queue->napi_tx); } } @@ -1781,12 +1858,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) break; } - if (status & MACB_BIT(TCOMP)) - macb_tx_interrupt(queue); - - if (status & MACB_BIT(TXUBR)) - macb_tx_restart(queue); - /* Link change detection isn't possible with RMII, so we'll * add that if/when we get our hands on a full-blown MII PHY. */ @@ -2019,7 +2090,7 @@ dma_error: for (i = queue->tx_head; i != tx_head; i++) { tx_skb = macb_tx_skb(queue, i); - macb_tx_unmap(bp, tx_skb); + macb_tx_unmap(bp, tx_skb, 0); } return 0; @@ -2141,7 +2212,6 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) u16 queue_index = skb_get_queue_mapping(skb); struct macb *bp = netdev_priv(dev); struct macb_queue *queue = &bp->queues[queue_index]; - unsigned long flags; unsigned int desc_cnt, nr_frags, frag_size, f; unsigned int hdrlen; bool is_lso; @@ -2198,16 +2268,16 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); } - spin_lock_irqsave(&bp->lock, flags); + spin_lock_bh(&queue->tx_ptr_lock); /* This is a hard error, log it. */ if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < desc_cnt) { netif_stop_subqueue(dev, queue_index); - spin_unlock_irqrestore(&bp->lock, flags); netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", queue->tx_head, queue->tx_tail); - return NETDEV_TX_BUSY; + ret = NETDEV_TX_BUSY; + goto unlock; } /* Map socket buffer for DMA transfer */ @@ -2220,13 +2290,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); skb_tx_timestamp(skb); + spin_lock_irq(&bp->lock); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); + spin_unlock_irq(&bp->lock); if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) netif_stop_subqueue(dev, queue_index); unlock: - spin_unlock_irqrestore(&bp->lock, flags); + spin_unlock_bh(&queue->tx_ptr_lock); return ret; } @@ -2760,8 +2832,10 @@ static int macb_open(struct net_device *dev) goto pm_exit; } - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) - napi_enable(&queue->napi); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + napi_enable(&queue->napi_rx); + napi_enable(&queue->napi_tx); + } macb_init_hw(bp); @@ -2785,8 +2859,10 @@ phy_off: reset_hw: macb_reset_hw(bp); - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) - napi_disable(&queue->napi); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + napi_disable(&queue->napi_rx); + napi_disable(&queue->napi_tx); + } macb_free_consistent(bp); pm_exit: pm_runtime_put_sync(&bp->pdev->dev); @@ -2802,8 +2878,10 @@ static int macb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); - for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) - napi_disable(&queue->napi); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + napi_disable(&queue->napi_rx); + napi_disable(&queue->napi_tx); + } phylink_stop(bp->phylink); phylink_disconnect_phy(bp->phylink); @@ -3865,7 +3943,9 @@ static int macb_init(struct platform_device *pdev) queue = &bp->queues[q]; queue->bp = bp; - netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT); + spin_lock_init(&queue->tx_ptr_lock); + netif_napi_add(dev, &queue->napi_rx, macb_rx_poll, NAPI_POLL_WEIGHT); + netif_napi_add(dev, &queue->napi_tx, macb_tx_poll, NAPI_POLL_WEIGHT); if (hw_q) { queue->ISR = GEM_ISR(hw_q - 1); queue->IER = GEM_IER(hw_q - 1); @@ -4972,8 +5052,10 @@ static int __maybe_unused macb_suspend(struct device *dev) netif_device_detach(netdev); for (q = 0, queue = bp->queues; q < bp->num_queues; - ++q, ++queue) - napi_disable(&queue->napi); + ++q, ++queue) { + napi_disable(&queue->napi_rx); + napi_disable(&queue->napi_tx); + } if (!(bp->wol & MACB_WOL_ENABLED)) { rtnl_lock(); @@ -5051,8 +5133,10 @@ static int __maybe_unused macb_resume(struct device *dev) } for (q = 0, queue = bp->queues; q < bp->num_queues; - ++q, ++queue) - napi_enable(&queue->napi); + ++q, ++queue) { + napi_enable(&queue->napi_rx); + napi_enable(&queue->napi_tx); + } if (netdev->hw_features & NETIF_F_NTUPLE) gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); -- cgit v1.2.3 From 32bf8e1f6fb9f6dc334b2b98dffc2e5dcd51e513 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 10 May 2022 19:36:14 +0300 Subject: net: enetc: manage ENETC_F_QBV in priv->active_offloads only when enabled Future work in this driver would like to look at priv->active_offloads & ENETC_F_QBV to determine whether a tc-taprio qdisc offload was installed, but this does not produce the intended effect. All the other flags in priv->active_offloads are managed dynamically, except ENETC_F_QBV which is set statically based on the probed SI capability. This change makes priv->active_offloads & ENETC_F_QBV really track the presence of a tc-taprio schedule on the port. Some existing users, like the enetc_sched_speed_set() call from phylink_mac_link_up(), are best kept using the old logic: the tc-taprio offload does not re-trigger another link mode resolve, so the scheduler needs to be functional from the get go, as long as Qbv is supported at all on the port. So to preserve functionality there, look at the static station interface capability from pf->si->hw_features instead. Signed-off-by: Vladimir Oltean Reviewed-by: Claudiu Manoil Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/freescale/enetc/enetc_pf.c | 6 ++---- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 6 ++++++ 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index a0c75c717073..7cccdf54359f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -777,9 +777,6 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->priv_flags |= IFF_UNICAST_FLT; - if (si->hw_features & ENETC_SI_F_QBV) - priv->active_offloads |= ENETC_F_QBV; - if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) { priv->active_offloads |= ENETC_F_QCI; ndev->features |= NETIF_F_HW_TC; @@ -993,7 +990,8 @@ static void enetc_pl_mac_link_up(struct phylink_config *config, int idx; priv = netdev_priv(pf->si->ndev); - if (priv->active_offloads & ENETC_F_QBV) + + if (pf->si->hw_features & ENETC_SI_F_QBV) enetc_sched_speed_set(priv, speed); if (!phylink_autoneg_inband(mode) && diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 9182631856d5..582a663ed0ba 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -70,6 +70,9 @@ static int enetc_setup_taprio(struct net_device *ndev, enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET, tge & (~ENETC_QBV_TGE)); + + priv->active_offloads &= ~ENETC_F_QBV; + return 0; } @@ -125,6 +128,9 @@ static int enetc_setup_taprio(struct net_device *ndev, enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma); + if (!err) + priv->active_offloads |= ENETC_F_QBV; + return err; } -- cgit v1.2.3 From 285e8dedb4bd62bb608a27eff8adabff7b2c82e3 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Tue, 10 May 2022 19:36:15 +0300 Subject: net: enetc: count the tc-taprio window drops The enetc scheduler for IEEE 802.1Qbv has 2 options (depending on PTGCR[TG_DROP_DISABLE]) when we attempt to send an oversized packet which will never fit in its allotted time slot for its traffic class: either block the entire port due to head-of-line blocking, or drop the packet and set a bit in the writeback format of the transmit buffer descriptor, allowing other packets to be sent. We obviously choose the second option in the driver, but we do not detect the drop condition, so from the perspective of the network stack, the packet is sent and no error counter is incremented. This change checks the writeback of the TX BD when tc-taprio is enabled, and increments a specific ethtool statistics counter and a generic "tx_dropped" counter in ndo_get_stats64. Signed-off-by: Po Liu Signed-off-by: Vladimir Oltean Reviewed-by: Claudiu Manoil Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/freescale/enetc/enetc.c | 13 +++++++++++-- drivers/net/ethernet/freescale/enetc/enetc.h | 2 ++ drivers/net/ethernet/freescale/enetc/enetc_ethtool.c | 2 ++ drivers/net/ethernet/freescale/enetc/enetc_hw.h | 1 + 4 files changed, 16 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index d6930a797c6c..4470a4a3e4c3 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -172,7 +172,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) } tx_swbd->do_twostep_tstamp = do_twostep_tstamp; - tx_swbd->check_wb = tx_swbd->do_twostep_tstamp; + tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV); + tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en; if (do_vlan || do_onestep_tstamp || do_twostep_tstamp) flags |= ENETC_TXBD_FLAGS_EX; @@ -792,9 +793,9 @@ static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) { + int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0; struct net_device *ndev = tx_ring->ndev; struct enetc_ndev_priv *priv = netdev_priv(ndev); - int tx_frm_cnt = 0, tx_byte_cnt = 0; struct enetc_tx_swbd *tx_swbd; int i, bds_to_clean; bool do_twostep_tstamp; @@ -821,6 +822,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) &tstamp); do_twostep_tstamp = true; } + + if (tx_swbd->qbv_en && + txbd->wb.status & ENETC_TXBD_STATS_WIN) + tx_win_drop++; } if (tx_swbd->is_xdp_tx) @@ -873,6 +878,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) tx_ring->next_to_clean = i; tx_ring->stats.packets += tx_frm_cnt; tx_ring->stats.bytes += tx_byte_cnt; + tx_ring->stats.win_drop += tx_win_drop; if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && __netif_subqueue_stopped(ndev, tx_ring->index) && @@ -2552,6 +2558,7 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev) struct enetc_ndev_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; unsigned long packets = 0, bytes = 0; + unsigned long tx_dropped = 0; int i; for (i = 0; i < priv->num_rx_rings; i++) { @@ -2567,10 +2574,12 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev) for (i = 0; i < priv->num_tx_rings; i++) { packets += priv->tx_ring[i]->stats.packets; bytes += priv->tx_ring[i]->stats.bytes; + tx_dropped += priv->tx_ring[i]->stats.win_drop; } stats->tx_packets = packets; stats->tx_bytes = bytes; + stats->tx_dropped = tx_dropped; return stats; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 68d806dc3701..29922c20531f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -36,6 +36,7 @@ struct enetc_tx_swbd { u8 is_eof:1; u8 is_xdp_tx:1; u8 is_xdp_redirect:1; + u8 qbv_en:1; }; #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE @@ -72,6 +73,7 @@ struct enetc_ring_stats { unsigned int xdp_redirect_sg; unsigned int recycles; unsigned int recycle_failures; + unsigned int win_drop; }; struct enetc_xdp_data { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 60ec64bfb3f0..ff872e40ce85 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -204,6 +204,7 @@ static const char tx_ring_stats[][ETH_GSTRING_LEN] = { "Tx ring %2d frames", "Tx ring %2d XDP frames", "Tx ring %2d XDP drops", + "Tx window drop %2d frames", }; static int enetc_get_sset_count(struct net_device *ndev, int sset) @@ -279,6 +280,7 @@ static void enetc_get_ethtool_stats(struct net_device *ndev, data[o++] = priv->tx_ring[i]->stats.packets; data[o++] = priv->tx_ring[i]->stats.xdp_tx; data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops; + data[o++] = priv->tx_ring[i]->stats.win_drop; } for (i = 0; i < priv->num_rx_rings; i++) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index ce5b677e8c2f..647c87f73bf7 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -543,6 +543,7 @@ enum enetc_txbd_flags { ENETC_TXBD_FLAGS_EX = BIT(6), ENETC_TXBD_FLAGS_F = BIT(7) }; +#define ENETC_TXBD_STATS_WIN BIT(7) #define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0) #define ENETC_TXBD_FLAGS_OFFSET 24 -- cgit v1.2.3 From 43213daed6d6cb60e8cf69058a6db8648a556d9d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 10 May 2022 19:53:01 -0700 Subject: fortify: Provide a memcpy trap door for sharp corners As we continue to narrow the scope of what the FORTIFY memcpy() will accept and build alternative APIs that give the compiler appropriate visibility into more complex memcpy scenarios, there is a need for "unfortified" memcpy use in rare cases where combinations of compiler behaviors, source code layout, etc, result in cases where the stricter memcpy checks need to be bypassed until appropriate solutions can be developed (i.e. fix compiler bugs, code refactoring, new API, etc). The intention is for this to be used only if there's no other reasonable solution, for its use to include a justification that can be used to assess future solutions, and for it to be temporary. Example usage included, based on analysis and discussion from: https://lore.kernel.org/netdev/CANn89iLS_2cshtuXPyNUGDPaic=sJiYfvTb_wNLgWrZRyBxZ_g@mail.gmail.com Cc: Jakub Kicinski Cc: Eric Dumazet Cc: "David S. Miller" Cc: Paolo Abeni Cc: Coco Li Cc: Tariq Toukan Cc: Saeed Mahameed Cc: Leon Romanovsky Cc: netdev@vger.kernel.org Cc: linux-hardening@vger.kernel.org Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220511025301.3636666-1-keescook@chromium.org Signed-off-by: Paolo Abeni --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 8 +++++++- include/linux/fortify-string.h | 16 ++++++++++++++++ include/linux/string.h | 4 ++++ 3 files changed, 27 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2dc48406cd08..5855d8f9c509 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -386,7 +386,13 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, stats->added_vlan_packets++; } else { eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs); - memcpy(eseg->inline_hdr.start, skb->data, attr->ihs); + unsafe_memcpy(eseg->inline_hdr.start, skb->data, attr->ihs, + /* This copy has been bounds-checked earlier in + * mlx5i_sq_calc_wqe_attr() and intentionally + * crosses a flex array boundary. Since it is + * performance sensitive, splitting the copy is + * undesirable. + */); } dseg += wqe_attr->ds_cnt_inl; } else if (skb_vlan_tag_present(skb)) { diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 295637a66c46..3b401fa0f374 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -52,6 +52,22 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) #define __underlying_strncpy __builtin_strncpy #endif +/** + * unsafe_memcpy - memcpy implementation with no FORTIFY bounds checking + * + * @dst: Destination memory address to write to + * @src: Source memory address to read from + * @bytes: How many bytes to write to @dst from @src + * @justification: Free-form text or comment describing why the use is needed + * + * This should be used for corner cases where the compiler cannot do the + * right thing, or during transitions between APIs, etc. It should be used + * very rarely, and includes a place for justification detailing where bounds + * checking has happened, and why existing solutions cannot be employed. + */ +#define unsafe_memcpy(dst, src, bytes, justification) \ + __underlying_memcpy(dst, src, bytes) + /* * Clang's use of __builtin_object_size() within inlines needs hinting via * __pass_object_size(). The preference is to only ever use type 1 (member diff --git a/include/linux/string.h b/include/linux/string.h index b6572aeca2f5..61ec7e4f6311 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -252,6 +252,10 @@ static inline const char *kbasename(const char *path) #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) #include #endif +#ifndef unsafe_memcpy +#define unsafe_memcpy(dst, src, bytes, justification) \ + memcpy(dst, src, bytes) +#endif void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, int pad); -- cgit v1.2.3 From 0f84d403b8e52c9c9bca52ac4f767c8f955e784d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 12:42:00 +0300 Subject: net: enetc: kill PHY-less mode for PFs Right now, a PHY-less port (no phy-mode, no fixed-link, no phy-handle) doesn't register with phylink, but calls netif_carrier_on() from enetc_start(). This makes sense for a VF, but for a PF, this is braindead, because we never call enetc_mac_enable() so the MAC is left inoperational. Furthermore, commit 71b77a7a27a3 ("enetc: Migrate to PHYLINK and PCS_LYNX") put the nail in the coffin because it removed the initial netif_carrier_off() call done right after register_netdev(). Without that call, netif_carrier_on() does not call linkwatch_fire_event(), so the operstate remains IF_OPER_UNKNOWN. Just deny the broken configuration by requiring that a phy-mode is present, and always register a PF with phylink. Signed-off-by: Vladimir Oltean Reviewed-by: Claudiu Manoil Link: https://lore.kernel.org/r/20220511094200.558502-1-vladimir.oltean@nxp.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/freescale/enetc/enetc_pf.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 7cccdf54359f..c4a0e836d4f0 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1105,8 +1105,7 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv, static void enetc_phylink_destroy(struct enetc_ndev_priv *priv) { - if (priv->phylink) - phylink_destroy(priv->phylink); + phylink_destroy(priv->phylink); } /* Initialize the entire shared memory for the flow steering entries @@ -1273,16 +1272,20 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_alloc_msix; } - if (!of_get_phy_mode(node, &pf->if_mode)) { - err = enetc_mdiobus_create(pf, node); - if (err) - goto err_mdiobus_create; - - err = enetc_phylink_create(priv, node); - if (err) - goto err_phylink_create; + err = of_get_phy_mode(node, &pf->if_mode); + if (err) { + dev_err(&pdev->dev, "Failed to read PHY mode\n"); + goto err_phy_mode; } + err = enetc_mdiobus_create(pf, node); + if (err) + goto err_mdiobus_create; + + err = enetc_phylink_create(priv, node); + if (err) + goto err_phylink_create; + err = register_netdev(ndev); if (err) goto err_reg_netdev; @@ -1294,6 +1297,7 @@ err_reg_netdev: err_phylink_create: enetc_mdiobus_destroy(pf); err_mdiobus_create: +err_phy_mode: enetc_free_msix(priv); err_config_si: err_alloc_msix: -- cgit v1.2.3 From 982c97eede13f3cb98c471c1b8fc5a12686ef85c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 May 2022 11:44:48 +0100 Subject: net: ethernet: SP7021: Fix spelling mistake "Interrput" -> "Interrupt" There is a spelling mistake in a dev_dbg message. Fix it. Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20220511104448.150800-1-colin.i.king@gmail.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/sunplus/spl2sw_int.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.c b/drivers/net/ethernet/sunplus/spl2sw_int.c index 95ce096d1543..69b1e2e0271e 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_int.c +++ b/drivers/net/ethernet/sunplus/spl2sw_int.c @@ -215,7 +215,7 @@ irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id) status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); if (unlikely(!status)) { - dev_dbg(&comm->pdev->dev, "Interrput status is null!\n"); + dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n"); goto spl2sw_ethernet_int_out; } writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); -- cgit v1.2.3 From e0d0e1fdf1ed9dcbca60409af0856fa17f0021cb Mon Sep 17 00:00:00 2001 From: Bin Chen Date: Wed, 11 May 2022 13:39:32 +0200 Subject: nfp: VF rate limit support Add VF rate limit feature This patch enhances the NFP driver to supports assignment of both max_tx_rate and min_tx_rate to VFs The template of configurations below is all supported. e.g. # ip link set $DEV vf $VF_NUM max_tx_rate $RATE_VALUE # ip link set $DEV vf $VF_NUM min_tx_rate $RATE_VALUE # ip link set $DEV vf $VF_NUM max_tx_rate $RATE_VALUE \ min_tx_rate $RATE_VALUE # ip link set $DEV vf $VF_NUM min_tx_rate $RATE_VALUE \ max_tx_rate $RATE_VALUE The max RATE_VALUE is limited to 0xFFFF which is about 63Gbps (using 1024 for 1G) Signed-off-by: Bin Chen Signed-off-by: Louis Peens Signed-off-by: Baowen Zheng Signed-off-by: Simon Horman Signed-off-by: Paolo Abeni --- .../net/ethernet/netronome/nfp/nfp_net_common.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c | 48 +++++++++++++++++++++- drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h | 9 ++++ 3 files changed, 56 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e3594a5c2a85..4e56a99087fa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1903,6 +1903,7 @@ const struct net_device_ops nfp_nfd3_netdev_ops = { .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, + .ndo_set_vf_rate = nfp_app_set_vf_rate, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, .ndo_set_vf_trust = nfp_app_set_vf_trust, .ndo_get_vf_config = nfp_app_get_vf_config, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 4627715a5e32..54af30961351 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -142,6 +142,37 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, return nfp_net_sriov_update(app, vf, update, "vlan"); } +int nfp_app_set_vf_rate(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct nfp_app *app = nfp_app_from_netdev(netdev); + u32 vf_offset, ratevalue; + int err; + + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + if (err) + return err; + + if (max_tx_rate >= NFP_NET_VF_RATE_MAX || + min_tx_rate >= NFP_NET_VF_RATE_MAX) { + nfp_warn(app->cpp, "tx-rate exceeds %d.\n", + NFP_NET_VF_RATE_MAX); + return -EINVAL; + } + + vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + ratevalue = FIELD_PREP(NFP_NET_VF_CFG_MAX_RATE, + max_tx_rate ? max_tx_rate : + NFP_NET_VF_RATE_MAX) | + FIELD_PREP(NFP_NET_VF_CFG_MIN_RATE, min_tx_rate); + + writel(ratevalue, + app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_RATE); + + return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_RATE, + "rate"); +} + int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) { struct nfp_app *app = nfp_app_from_netdev(netdev); @@ -228,9 +259,8 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct nfp_app *app = nfp_app_from_netdev(netdev); - unsigned int vf_offset; + u32 vf_offset, mac_hi, rate; u32 vlan_tag; - u32 mac_hi; u16 mac_lo; u8 flags; int err; @@ -261,5 +291,19 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + if (!err) { + rate = readl(app->pf->vfcfg_tbl2 + vf_offset + + NFP_NET_VF_CFG_RATE); + + ivi->max_tx_rate = FIELD_GET(NFP_NET_VF_CFG_MAX_RATE, rate); + ivi->min_tx_rate = FIELD_GET(NFP_NET_VF_CFG_MIN_RATE, rate); + + if (ivi->max_tx_rate == NFP_NET_VF_RATE_MAX) + ivi->max_tx_rate = 0; + if (ivi->min_tx_rate == NFP_NET_VF_RATE_MAX) + ivi->min_tx_rate = 0; + } + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index 7b72cc083476..2d445fa199dc 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -20,6 +20,7 @@ #define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3) #define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5) +#define NFP_NET_VF_CFG_MB_CAP_RATE (0x1 << 6) #define NFP_NET_VF_CFG_MB_RET 0x2 #define NFP_NET_VF_CFG_MB_UPD 0x4 #define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) @@ -28,6 +29,7 @@ #define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3) #define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5) +#define NFP_NET_VF_CFG_MB_UPD_RATE (0x1 << 6) #define NFP_NET_VF_CFG_MB_VF_NUM 0x7 /* VF config entry @@ -48,10 +50,17 @@ #define NFP_NET_VF_CFG_VLAN_PROT 0xffff0000 #define NFP_NET_VF_CFG_VLAN_QOS 0xe000 #define NFP_NET_VF_CFG_VLAN_VID 0x0fff +#define NFP_NET_VF_CFG_RATE 0xc +#define NFP_NET_VF_CFG_MIN_RATE 0x0000ffff +#define NFP_NET_VF_CFG_MAX_RATE 0xffff0000 + +#define NFP_NET_VF_RATE_MAX 0xffff int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto); +int nfp_app_set_vf_rate(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool setting); int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, -- cgit v1.2.3 From f4826443f4d69d2c97c184952c085caf0936a7b8 Mon Sep 17 00:00:00 2001 From: David Thompson Date: Wed, 11 May 2022 09:52:51 -0400 Subject: mlxbf_gige: remove driver-managed interrupt counts The driver currently has three interrupt counters, which are incremented every time each interrupt handler executes. These driver-managed counters are not necessary as the kernel already has logic that manages interrupt counts and exposes them via /proc/interrupts. This patch removes the driver-managed counters. Signed-off-by: David Thompson Signed-off-by: Asmaa Mnebhi Link: https://lore.kernel.org/r/20220511135251.2989-1-davthompson@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h | 3 --- drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c | 8 +++----- drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c | 9 --------- 3 files changed, 3 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h index 86826a70f9dd..5fdf9b7179f5 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h @@ -90,9 +90,6 @@ struct mlxbf_gige { dma_addr_t rx_cqe_base_dma; u16 tx_pi; u16 prev_tx_ci; - u64 error_intr_count; - u64 rx_intr_count; - u64 llu_plu_intr_count; struct sk_buff *rx_skb[MLXBF_GIGE_MAX_RXQ_SZ]; struct sk_buff *tx_skb[MLXBF_GIGE_MAX_TXQ_SZ]; int error_irq; diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c index ceeb7f4c3f6c..41ebef25a930 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c @@ -24,11 +24,9 @@ static void mlxbf_gige_get_regs(struct net_device *netdev, regs->version = MLXBF_GIGE_REGS_VERSION; /* Read entire MMIO register space and store results - * into the provided buffer. Each 64-bit word is converted - * to big-endian to make the output more readable. - * - * NOTE: by design, a read to an offset without an existing - * register will be acknowledged and return zero. + * into the provided buffer. By design, a read to an + * offset without an existing register will be + * acknowledged and return zero. */ memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ); } diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c index c38795be04a2..5b3519f0cc46 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c @@ -17,8 +17,6 @@ static irqreturn_t mlxbf_gige_error_intr(int irq, void *dev_id) priv = dev_id; - priv->error_intr_count++; - int_status = readq(priv->base + MLXBF_GIGE_INT_STATUS); if (int_status & MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR) @@ -75,8 +73,6 @@ static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id) priv = dev_id; - priv->rx_intr_count++; - /* NOTE: GigE silicon automatically disables "packet rx" interrupt by * setting MLXBF_GIGE_INT_MASK bit0 upon triggering the interrupt * to the ARM cores. Software needs to re-enable "packet rx" @@ -90,11 +86,6 @@ static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id) static irqreturn_t mlxbf_gige_llu_plu_intr(int irq, void *dev_id) { - struct mlxbf_gige *priv; - - priv = dev_id; - priv->llu_plu_intr_count++; - return IRQ_HANDLED; } -- cgit v1.2.3 From f0a65f815f640499990d446d4f5d9090634fdf27 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Wed, 11 May 2022 22:40:59 +0200 Subject: net: lan966x: Fix use of pointer after being freed The smatch found the following warning: drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c:736 lan966x_fdma_reload() warn: 'rx_dcbs' was already freed. This issue can happen when changing the MTU on one of the ports and once the RX buffers are allocated and then the TX buffer allocation fails. In that case the RX buffers should not be restore. This fix this issue such that the RX buffers will not be restored if the TX buffers failed to be allocated. Fixes: 2ea1cbac267e2a ("net: lan966x: Update FDMA to change MTU.") Reported-by: Dan Carpenter Signed-off-by: Horatiu Vultur Link: https://lore.kernel.org/r/20220511204059.2689199-1-horatiu.vultur@microchip.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index 9e2a7323eaf0..6dea7f8c1481 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -729,11 +729,11 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) return err; restore: lan966x->rx.dma = rx_dma; - lan966x->tx.dma = tx_dma; + lan966x->rx.dcbs = rx_dcbs; lan966x_fdma_rx_start(&lan966x->rx); restore_tx: - lan966x->rx.dcbs = rx_dcbs; + lan966x->tx.dma = tx_dma; lan966x->tx.dcbs = tx_dcbs; lan966x->tx.dcbs_buf = tx_dcbs_buf; -- cgit v1.2.3 From e9b3ba439dcb975474accbae1a37b99f9df59bed Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 12:50:13 +0300 Subject: net: dsa: felix: program host FDB entries towards PGID_CPU for tag_8021q too I remembered why we had the host FDB migration procedure in place. It is true that host FDB entry migration can be done by changing the value of PGID_CPU, but the problem is that only host FDB entries learned while operating in NPI mode go to PGID_CPU. When the CPU port operates in tag_8021q mode, the FDB entries are learned towards the unicast PGID equal to the physical port number of this CPU port, bypassing the PGID_CPU indirection. So host FDB entries learned in tag_8021q mode are not migrated any longer towards the NPI port. Fix this by extracting the NPI port -> PGID_CPU redirection from the ocelot switch lib, moving it to the Felix DSA driver, and applying it for any CPU port regardless of its kind (NPI or tag_8021q). Fixes: a51c1c3f3218 ("net: dsa: felix: stop migrating FDBs back and forth on tag proto change") Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 12 ++++++++++-- drivers/net/ethernet/mscc/ocelot.c | 7 +------ 2 files changed, 11 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index a23781d9a15c..5af4f9b3ee32 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -668,15 +668,19 @@ static int felix_fdb_add(struct dsa_switch *ds, int port, struct dsa_db db) { struct net_device *bridge_dev = felix_classify_db(db); + struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); - if (dsa_is_cpu_port(ds, port) && !bridge_dev && + if (dsa_port_is_cpu(dp) && !bridge_dev && dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) return 0; + if (dsa_port_is_cpu(dp)) + port = PGID_CPU; + return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); } @@ -685,15 +689,19 @@ static int felix_fdb_del(struct dsa_switch *ds, int port, struct dsa_db db) { struct net_device *bridge_dev = felix_classify_db(db); + struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); - if (dsa_is_cpu_port(ds, port) && !bridge_dev && + if (dsa_port_is_cpu(dp) && !bridge_dev && dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) return 0; + if (dsa_port_is_cpu(dp)) + port = PGID_CPU; + return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); } diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 5f81938c58a9..7a9ee91c8427 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1349,15 +1349,10 @@ EXPORT_SYMBOL(ocelot_drain_cpu_queue); int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, u16 vid, const struct net_device *bridge) { - int pgid = port; - - if (port == ocelot->npi) - pgid = PGID_CPU; - if (!vid) vid = ocelot_vlan_unaware_pvid(ocelot, bridge); - return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED); + return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED); } EXPORT_SYMBOL(ocelot_fdb_add); -- cgit v1.2.3 From 0ddf83cda5a6e1a7148ddef46b1c2e21d5be7515 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 12:50:14 +0300 Subject: net: dsa: felix: bring the NPI port indirection for host MDBs to surface For symmetry with host FDBs where the indirection is now handled outside the ocelot switch lib, do the same for host MDB entries. The only caller of the ocelot switch lib which uses the NPI port is the Felix DSA driver. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 6 ++++++ drivers/net/ethernet/mscc/ocelot.c | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 5af4f9b3ee32..f8a587ae9c6b 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -745,6 +745,9 @@ static int felix_mdb_add(struct dsa_switch *ds, int port, dsa_mdb_present_in_other_db(ds, port, mdb, db)) return 0; + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); } @@ -762,6 +765,9 @@ static int felix_mdb_del(struct dsa_switch *ds, int port, dsa_mdb_present_in_other_db(ds, port, mdb, db)) return 0; + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); } diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 7a9ee91c8427..29e8011e4a91 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -2339,9 +2339,6 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port, struct ocelot_pgid *pgid; u16 vid = mdb->vid; - if (port == ocelot->npi) - port = ocelot->num_phys_ports; - if (!vid) vid = ocelot_vlan_unaware_pvid(ocelot, bridge); @@ -2399,9 +2396,6 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, struct ocelot_pgid *pgid; u16 vid = mdb->vid; - if (port == ocelot->npi) - port = ocelot->num_phys_ports; - if (!vid) vid = ocelot_vlan_unaware_pvid(ocelot, bridge); -- cgit v1.2.3 From 910ee6cce92fc0d0c459967ac95902d7bf3e41bf Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 12:50:15 +0300 Subject: net: dsa: felix: bring the NPI port indirection for host flooding to surface For symmetry with host FDBs and MDBs where the indirection is now handled outside the ocelot switch lib, do the same for bridge port flags (unicast/multicast/broadcast flooding). The only caller of the ocelot switch lib which uses the NPI port is the Felix DSA driver. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 3 +++ drivers/net/ethernet/mscc/ocelot.c | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index f8a587ae9c6b..59221d838a45 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -794,6 +794,9 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + ocelot_port_bridge_flags(ocelot, port, val); return 0; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 29e8011e4a91..e0d1d5b59981 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -2943,9 +2943,6 @@ EXPORT_SYMBOL(ocelot_port_pre_bridge_flags); void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, struct switchdev_brport_flags flags) { - if (port == ocelot->npi) - port = ocelot->num_phys_ports; - if (flags.mask & BR_LEARNING) ocelot_port_set_learning(ocelot, port, !!(flags.val & BR_LEARNING)); -- cgit v1.2.3 From 72c3b0c7359a6f91dd03e08b839adccd9d4268a8 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 12:50:17 +0300 Subject: net: dsa: felix: manage host flooding using a specific driver callback At the time - commit 7569459a52c9 ("net: dsa: manage flooding on the CPU ports") - not introducing a dedicated switch callback for host flooding made sense, because for the only user, the felix driver, there was nothing different to do for the CPU port than set the flood flags on the CPU port just like on any other bridge port. There are 2 reasons why this approach is not good enough, however. (1) Other drivers, like sja1105, support configuring flooding as a function of {ingress port, egress port}, whereas the DSA ->port_bridge_flags() function only operates on an egress port. So with that driver we'd have useless host flooding from user ports which don't need it. (2) Even with the felix driver, support for multiple CPU ports makes it difficult to piggyback on ->port_bridge_flags(). The way in which the felix driver is going to support host-filtered addresses with multiple CPU ports is that it will direct these addresses towards both CPU ports (in a sort of multicast fashion), then restrict the forwarding to only one of the two using the forwarding masks. Consequently, flooding will also be enabled towards both CPU ports. However, ->port_bridge_flags() gets passed the index of a single CPU port, and that leaves the flood settings out of sync between the 2 CPU ports. This is to say, it's better to have a specific driver method for host flooding, which takes the user port as argument. This solves problem (1) by allowing the driver to do different things for different user ports, and problem (2) by abstracting the operation and letting the driver do whatever, rather than explicitly making the DSA core point to the CPU port it thinks needs to be touched. This new method also creates a problem, which is that cross-chip setups are not handled. However I don't have hardware right now where I can test what is the proper thing to do, and there isn't hardware compatible with multi-switch trees that supports host flooding. So it remains a problem to be tackled in the future. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 32 ++++++++++++++++++++++++++++++++ drivers/net/dsa/ocelot/felix.h | 2 ++ include/net/dsa.h | 2 ++ net/dsa/dsa_priv.h | 1 + net/dsa/port.c | 8 ++++++++ net/dsa/slave.c | 36 ++++++------------------------------ 6 files changed, 51 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 59221d838a45..6b67ab4e05ab 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -634,6 +634,37 @@ static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, return felix->tag_proto; } +static void felix_port_set_host_flood(struct dsa_switch *ds, int port, + bool uc, bool mc) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + unsigned long mask, val; + + if (uc) + felix->host_flood_uc_mask |= BIT(port); + else + felix->host_flood_uc_mask &= ~BIT(port); + + if (mc) + felix->host_flood_mc_mask |= BIT(port); + else + felix->host_flood_mc_mask &= ~BIT(port); + + if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) + mask = dsa_cpu_ports(ds); + else + mask = BIT(ocelot->num_phys_ports); + + val = (felix->host_flood_uc_mask) ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); + + val = (felix->host_flood_mc_mask) ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); +} + static int felix_set_ageing_time(struct dsa_switch *ds, unsigned int ageing_time) { @@ -1876,6 +1907,7 @@ const struct dsa_switch_ops felix_switch_ops = { .port_get_dscp_prio = felix_port_get_dscp_prio, .port_add_dscp_prio = felix_port_add_dscp_prio, .port_del_dscp_prio = felix_port_del_dscp_prio, + .port_set_host_flood = felix_port_set_host_flood, }; struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index a5e570826773..b34bde43f11b 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -72,6 +72,8 @@ struct felix { resource_size_t imdio_base; enum dsa_tag_protocol tag_proto; struct kthread_worker *xmit_worker; + unsigned long host_flood_uc_mask; + unsigned long host_flood_mc_mask; }; struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port); diff --git a/include/net/dsa.h b/include/net/dsa.h index 76257a9f0e1b..cfb287b0d311 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -978,6 +978,8 @@ struct dsa_switch_ops { int (*port_bridge_flags)(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack); + void (*port_set_host_flood)(struct dsa_switch *ds, int port, + bool uc, bool mc); /* * VLAN support diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 7c9abd5a0ab9..d9722e49864b 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -291,6 +291,7 @@ int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr); void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast); +void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc); /* slave.c */ extern const struct dsa_device_ops notag_netdev_ops; diff --git a/net/dsa/port.c b/net/dsa/port.c index ecf0395cbddd..3738f2d40a0b 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -920,6 +920,14 @@ int dsa_port_bridge_flags(struct dsa_port *dp, return 0; } +void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc) +{ + struct dsa_switch *ds = dp->ds; + + if (ds->ops->port_set_host_flood) + ds->ops->port_set_host_flood(ds, dp->index, uc, mc); +} + int dsa_port_vlan_msti(struct dsa_port *dp, const struct switchdev_vlan_msti *msti) { diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 5ee0aced9410..801a5d445833 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -262,37 +262,13 @@ static int dsa_slave_close(struct net_device *dev) return 0; } -/* Keep flooding enabled towards this port's CPU port as long as it serves at - * least one port in the tree that requires it. - */ -static void dsa_port_manage_cpu_flood(struct dsa_port *dp) +static void dsa_slave_manage_host_flood(struct net_device *dev) { - struct switchdev_brport_flags flags = { - .mask = BR_FLOOD | BR_MCAST_FLOOD, - }; - struct dsa_switch_tree *dst = dp->ds->dst; - struct dsa_port *cpu_dp = dp->cpu_dp; - struct dsa_port *other_dp; - int err; - - list_for_each_entry(other_dp, &dst->ports, list) { - if (!dsa_port_is_user(other_dp)) - continue; - - if (other_dp->cpu_dp != cpu_dp) - continue; - - if (other_dp->slave->flags & IFF_ALLMULTI) - flags.val |= BR_MCAST_FLOOD; - if (other_dp->slave->flags & IFF_PROMISC) - flags.val |= BR_FLOOD | BR_MCAST_FLOOD; - } - - err = dsa_port_pre_bridge_flags(dp, flags, NULL); - if (err) - return; + bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI); + struct dsa_port *dp = dsa_slave_to_port(dev); + bool uc = dev->flags & IFF_PROMISC; - dsa_port_bridge_flags(cpu_dp, flags, NULL); + dsa_port_set_host_flood(dp, uc, mc); } static void dsa_slave_change_rx_flags(struct net_device *dev, int change) @@ -310,7 +286,7 @@ static void dsa_slave_change_rx_flags(struct net_device *dev, int change) if (dsa_switch_supports_uc_filtering(ds) && dsa_switch_supports_mc_filtering(ds)) - dsa_port_manage_cpu_flood(dp); + dsa_slave_manage_host_flood(dev); } static void dsa_slave_set_rx_mode(struct net_device *dev) -- cgit v1.2.3 From bacf93b0561937695e9c6c1dc1d8ed10ca80eb81 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 12:50:18 +0300 Subject: net: dsa: remove port argument from ->change_tag_protocol() DSA has not supported (and probably will not support in the future either) independent tagging protocols per CPU port. Different switch drivers have different requirements, some may need to replicate some settings for each CPU port, some may need to apply some settings on a single CPU port, while some may have to configure some global settings and then some per-CPU-port settings. In any case, the current model where DSA calls ->change_tag_protocol for each CPU port turns out to be impractical for drivers where there are global things to be done. For example, felix calls dsa_tag_8021q_register(), which makes no sense per CPU port, so it suppresses the second call. Let drivers deal with replication towards all CPU ports, and remove the CPU port argument from the function prototype. Signed-off-by: Vladimir Oltean Acked-by: Luiz Angelo Daros de Luca Signed-off-by: Jakub Kicinski --- drivers/net/dsa/mv88e6xxx/chip.c | 22 +++++++++++++++++---- drivers/net/dsa/ocelot/felix.c | 39 +++++++++++-------------------------- drivers/net/dsa/realtek/rtl8365mb.c | 2 +- include/net/dsa.h | 6 +++++- net/dsa/dsa2.c | 18 +++++++---------- net/dsa/switch.c | 10 ++++------ 6 files changed, 46 insertions(+), 51 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 53fd12e7a21c..5d2c57a7c708 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -6329,11 +6329,12 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, return chip->tag_protocol; } -static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port, +static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto) { struct mv88e6xxx_chip *chip = ds->priv; enum dsa_tag_protocol old_protocol; + struct dsa_port *cpu_dp; int err; switch (proto) { @@ -6358,11 +6359,24 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port, chip->tag_protocol = proto; mv88e6xxx_reg_lock(chip); - err = mv88e6xxx_setup_port_mode(chip, port); + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = mv88e6xxx_setup_port_mode(chip, cpu_dp->index); + if (err) { + mv88e6xxx_reg_unlock(chip); + goto unwind; + } + } mv88e6xxx_reg_unlock(chip); - if (err) - chip->tag_protocol = old_protocol; + return 0; + +unwind: + chip->tag_protocol = old_protocol; + + mv88e6xxx_reg_lock(chip); + dsa_switch_for_each_cpu_port_continue_reverse(cpu_dp, ds) + mv88e6xxx_setup_port_mode(chip, cpu_dp->index); + mv88e6xxx_reg_unlock(chip); return err; } diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 6b67ab4e05ab..0edec7c2b847 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -575,14 +575,13 @@ static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, * tag_8021q setup can fail, the NPI setup can't. So either the change is made, * or the restoration is guaranteed to work. */ -static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, +static int felix_change_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); enum dsa_tag_protocol old_proto = felix->tag_proto; - bool cpu_port_active = false; - struct dsa_port *dp; + struct dsa_port *cpu_dp; int err; if (proto != DSA_TAG_PROTO_SEVILLE && @@ -590,33 +589,17 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, proto != DSA_TAG_PROTO_OCELOT_8021Q) return -EPROTONOSUPPORT; - /* We don't support multiple CPU ports, yet the DT blob may have - * multiple CPU ports defined. The first CPU port is the active one, - * the others are inactive. In this case, DSA will call - * ->change_tag_protocol() multiple times, once per CPU port. - * Since we implement the tagging protocol change towards "ocelot" or - * "seville" as effectively initializing the NPI port, what we are - * doing is effectively changing who the NPI port is to the last @cpu - * argument passed, which is an unused DSA CPU port and not the one - * that should actively pass traffic. - * Suppress DSA's calls on CPU ports that are inactive. - */ - dsa_switch_for_each_user_port(dp, ds) { - if (dp->cpu_dp->index == cpu) { - cpu_port_active = true; - break; - } - } - - if (!cpu_port_active) - return 0; + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + felix_del_tag_protocol(ds, cpu_dp->index, old_proto); - felix_del_tag_protocol(ds, cpu, old_proto); + err = felix_set_tag_protocol(ds, cpu_dp->index, proto); + if (err) { + felix_set_tag_protocol(ds, cpu_dp->index, old_proto); + return err; + } - err = felix_set_tag_protocol(ds, cpu, proto); - if (err) { - felix_set_tag_protocol(ds, cpu, old_proto); - return err; + /* Stop at first CPU port */ + break; } felix->tag_proto = proto; diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 3d70e8a77ecf..3bb42a9f236d 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -1778,7 +1778,7 @@ static int rtl8365mb_cpu_config(struct realtek_priv *priv) return 0; } -static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds, int cpu_index, +static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto) { struct realtek_priv *priv = ds->priv; diff --git a/include/net/dsa.h b/include/net/dsa.h index cfb287b0d311..14f07275852b 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -579,6 +579,10 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p) dsa_switch_for_each_port((_dp), (_ds)) \ if (dsa_port_is_cpu((_dp))) +#define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \ + dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \ + if (dsa_port_is_cpu((_dp))) + static inline u32 dsa_user_ports(struct dsa_switch *ds) { struct dsa_port *dp; @@ -803,7 +807,7 @@ struct dsa_switch_ops { enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds, int port, enum dsa_tag_protocol mprot); - int (*change_tag_protocol)(struct dsa_switch *ds, int port, + int (*change_tag_protocol)(struct dsa_switch *ds, enum dsa_tag_protocol proto); /* * Method for switch drivers to connect to the tagging protocol driver diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index cf933225df32..d0a2452a1e24 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -809,22 +809,18 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) { const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; struct dsa_switch_tree *dst = ds->dst; - struct dsa_port *cpu_dp; int err; if (tag_ops->proto == dst->default_proto) goto connect; - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - rtnl_lock(); - err = ds->ops->change_tag_protocol(ds, cpu_dp->index, - tag_ops->proto); - rtnl_unlock(); - if (err) { - dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n", - tag_ops->name, ERR_PTR(err)); - return err; - } + rtnl_lock(); + err = ds->ops->change_tag_protocol(ds, tag_ops->proto); + rtnl_unlock(); + if (err) { + dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n", + tag_ops->name, ERR_PTR(err)); + return err; } connect: diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 704975e5c1c2..2b56218fc57c 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -809,14 +809,12 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds, ASSERT_RTNL(); - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - err = ds->ops->change_tag_protocol(ds, cpu_dp->index, - tag_ops->proto); - if (err) - return err; + err = ds->ops->change_tag_protocol(ds, tag_ops->proto); + if (err) + return err; + dsa_switch_for_each_cpu_port(cpu_dp, ds) dsa_port_set_tag_protocol(cpu_dp, tag_ops); - } /* Now that changing the tag protocol can no longer fail, let's update * the remaining bits which are "duplicated for faster access", and the -- cgit v1.2.3 From c352e5e8e8f2888c99ee164632af365157da2a41 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 12:50:19 +0300 Subject: net: dsa: felix: dynamically determine tag_8021q CPU port for traps Ocelot switches support a single active CPU port at a time (at least as a trapping destination, i.e. for control traffic). This is true regardless of whether we are using the native copy-to-CPU-port-module functionality, or a redirect action towards the software-defined tag_8021q CPU port. Currently we assume that the trapping destination in tag_8021q mode is the first CPU port, yet in the future we may want to migrate the user ports to the second CPU port. For that to work, we need to make sure that the tag_8021q trapping destination is a CPU port that is active, i.e. is used by at least some user port on which the trap was added. Otherwise, we may end up redirecting the traffic to a CPU port which isn't even up. Note that due to the current design where we simply choose the CPU port of the first port from the trap's ingress port mask, it may be that a CPU port absorbes control traffic from user ports which aren't affine to it as per user space's request. This isn't ideal, but is the lesser of two evils. Following the user-configured affinity for traps would mean that we can no longer reuse a single TCAM entry for multiple traps, which is what we actually do for e.g. PTP. Either we duplicate and deduplicate TCAM entries on the fly when user-to-CPU-port mappings change (which is unnecessarily complicated), or we redirect trapped traffic to all tag_8021q CPU ports if multiple such ports are in use. The latter would have actually been nice, if it actually worked, but it doesn't, since a OCELOT_MASK_MODE_REDIRECT action towards multiple ports would not take PGID_SRC into consideration, and it would just duplicate the packet towards each (CPU) port, leading to duplicates in software. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 0edec7c2b847..e76a5d434626 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -313,6 +313,21 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) mutex_unlock(&ocelot->fwd_domain_lock); } +static int felix_trap_get_cpu_port(struct dsa_switch *ds, + const struct ocelot_vcap_filter *trap) +{ + struct dsa_port *dp; + int first_port; + + if (WARN_ON(!trap->ingress_port_mask)) + return -1; + + first_port = __ffs(trap->ingress_port_mask); + dp = dsa_to_port(ds, first_port); + + return dp->cpu_dp->index; +} + /* On switches with no extraction IRQ wired, trapped packets need to be * replicated over Ethernet as well, otherwise we'd get no notification of * their arrival when using the ocelot-8021q tagging protocol. @@ -326,19 +341,12 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds, struct ocelot_vcap_filter *trap; enum ocelot_mask_mode mask_mode; unsigned long port_mask; - struct dsa_port *dp; bool cpu_copy_ena; - int cpu = -1, err; + int err; if (!felix->info->quirk_no_xtr_irq) return 0; - /* Figure out the current CPU port */ - dsa_switch_for_each_cpu_port(dp, ds) { - cpu = dp->index; - break; - } - /* We are sure that "cpu" was found, otherwise * dsa_tree_setup_default_cpu() would have failed earlier. */ @@ -356,7 +364,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds, * port module. */ mask_mode = OCELOT_MASK_MODE_REDIRECT; - port_mask = BIT(cpu); + port_mask = BIT(felix_trap_get_cpu_port(ds, trap)); cpu_copy_ena = !!trap->take_ts; } else { /* Trap packets only to the CPU port module, which is -- cgit v1.2.3 From 7a29d220f4c0745a6d435dbd53c659fbde4998b6 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 12:50:20 +0300 Subject: net: dsa: felix: reimplement tagging protocol change with function pointers The error handling for the current tagging protocol change procedure is a bit brittle (we dismantle the previous tagging protocol entirely before setting up the new one). By identifying which parts of a tagging protocol are unique to itself and which parts are shared with the other, we can implement a protocol change procedure where error handling is a bit more robust, because we start setting up the new protocol first, and tear down the old one only after the setup of the specific and shared parts succeeded. The protocol change is a bit too open-coded too, in the area of migrating host flood settings and MDBs. By identifying what differs between tagging protocols (the forwarding masks for host flooding) we can implement a more straightforward migration procedure which is handled in the shared portion of the protocol change, rather than individually by each protocol. Therefore, a more structured approach calls for the introduction of a structure of function pointers per tagging protocol. This covers setup, teardown and the host forwarding mask. In the future it will also cover how to prepare for a new DSA master. The initial tagging protocol setup (at driver probe time) and the final teardown (at driver removal time) are also adapted to call into the structured methods of the specific protocol in current use. This is especially relevant for teardown, where we previously called felix_del_tag_protocol() only for the first CPU port. But by not specifying which CPU port this is for, we gain more flexibility to support multiple CPU ports in the future. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 399 +++++++++++++++++++++-------------------- drivers/net/dsa/ocelot/felix.h | 14 ++ 2 files changed, 216 insertions(+), 197 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index e76a5d434626..beac90bc642c 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -42,43 +42,6 @@ static struct net_device *felix_classify_db(struct dsa_db db) } } -static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to, - int pgid) -{ - struct ocelot *ocelot = ds->priv; - bool on; - u32 val; - - val = ocelot_read_rix(ocelot, ANA_PGID_PGID, pgid); - on = !!(val & BIT(from)); - val &= ~BIT(from); - if (on) - val |= BIT(to); - else - val &= ~BIT(to); - - ocelot_write_rix(ocelot, val, ANA_PGID_PGID, pgid); -} - -static void felix_migrate_flood_to_npi_port(struct dsa_switch *ds, int port) -{ - struct ocelot *ocelot = ds->priv; - - felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_UC); - felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_MC); - felix_migrate_pgid_bit(ds, port, ocelot->num_phys_ports, PGID_BC); -} - -static void -felix_migrate_flood_to_tag_8021q_port(struct dsa_switch *ds, int port) -{ - struct ocelot *ocelot = ds->priv; - - felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_UC); - felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_MC); - felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC); -} - /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that * the tagger can perform RX source port identification. */ @@ -392,13 +355,107 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds, return 0; } -static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) +/* The CPU port module is connected to the Node Processor Interface (NPI). This + * is the mode through which frames can be injected from and extracted to an + * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU + * running Linux, and this forms a DSA setup together with the enetc or fman + * DSA master. + */ +static void felix_npi_port_init(struct ocelot *ocelot, int port) +{ + ocelot->npi = port; + + ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | + QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), + QSYS_EXT_CPU_CFG); + + /* NPI port Injection/Extraction configuration */ + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, + ocelot->npi_xtr_prefix); + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, + ocelot->npi_inj_prefix); + + /* Disable transmission of pause frames */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); +} + +static void felix_npi_port_deinit(struct ocelot *ocelot, int port) +{ + /* Restore hardware defaults */ + int unused_port = ocelot->num_phys_ports + 2; + + ocelot->npi = -1; + + ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), + QSYS_EXT_CPU_CFG); + + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, + OCELOT_TAG_PREFIX_DISABLED); + ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, + OCELOT_TAG_PREFIX_DISABLED); + + /* Enable transmission of pause frames */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); +} + +static int felix_tag_npi_setup(struct dsa_switch *ds) +{ + struct dsa_port *dp, *first_cpu_dp = NULL; + struct ocelot *ocelot = ds->priv; + + dsa_switch_for_each_user_port(dp, ds) { + if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) { + dev_err(ds->dev, "Multiple NPI ports not supported\n"); + return -EINVAL; + } + + first_cpu_dp = dp->cpu_dp; + } + + if (!first_cpu_dp) + return -EINVAL; + + felix_npi_port_init(ocelot, first_cpu_dp->index); + + return 0; +} + +static void felix_tag_npi_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; - struct dsa_port *dp; + + felix_npi_port_deinit(ocelot, ocelot->npi); +} + +static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + + return BIT(ocelot->num_phys_ports); +} + +static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { + .setup = felix_tag_npi_setup, + .teardown = felix_tag_npi_teardown, + .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, +}; + +static int felix_tag_8021q_setup(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + struct dsa_port *dp, *cpu_dp; int err; - felix_8021q_cpu_port_init(ocelot, cpu); + err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); + if (err) + return err; + + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + felix_8021q_cpu_port_init(ocelot, cpu_dp->index); + + /* TODO we could support multiple CPU ports in tag_8021q mode */ + break; + } dsa_switch_for_each_available_port(dp, ds) { /* This overwrites ocelot_init(): @@ -416,21 +473,6 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); } - err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); - if (err) - return err; - - err = ocelot_migrate_mdbs(ocelot, BIT(ocelot->num_phys_ports), - BIT(cpu)); - if (err) - goto out_tag_8021q_unregister; - - felix_migrate_flood_to_tag_8021q_port(ds, cpu); - - err = felix_update_trapping_destinations(ds, true); - if (err) - goto out_migrate_flood; - /* The ownership of the CPU port module's queues might have just been * transferred to the tag_8021q tagger from the NPI-based tagger. * So there might still be all sorts of crap in the queues. On the @@ -441,27 +483,12 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu) ocelot_drain_cpu_queue(ocelot, 0); return 0; - -out_migrate_flood: - felix_migrate_flood_to_npi_port(ds, cpu); - ocelot_migrate_mdbs(ocelot, BIT(cpu), BIT(ocelot->num_phys_ports)); -out_tag_8021q_unregister: - dsa_tag_8021q_unregister(ds); - return err; } -static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) +static void felix_tag_8021q_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; - struct dsa_port *dp; - int err; - - err = felix_update_trapping_destinations(ds, false); - if (err) - dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d", - err); - - dsa_tag_8021q_unregister(ds); + struct dsa_port *dp, *cpu_dp; dsa_switch_for_each_available_port(dp, ds) { /* Restore the logic from ocelot_init: @@ -473,110 +500,99 @@ static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu) dp->index); } - felix_8021q_cpu_port_deinit(ocelot, cpu); -} - -/* The CPU port module is connected to the Node Processor Interface (NPI). This - * is the mode through which frames can be injected from and extracted to an - * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU - * running Linux, and this forms a DSA setup together with the enetc or fman - * DSA master. - */ -static void felix_npi_port_init(struct ocelot *ocelot, int port) -{ - ocelot->npi = port; - - ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | - QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), - QSYS_EXT_CPU_CFG); + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + felix_8021q_cpu_port_deinit(ocelot, cpu_dp->index); - /* NPI port Injection/Extraction configuration */ - ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, - ocelot->npi_xtr_prefix); - ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, - ocelot->npi_inj_prefix); + /* TODO we could support multiple CPU ports in tag_8021q mode */ + break; + } - /* Disable transmission of pause frames */ - ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); + dsa_tag_8021q_unregister(ds); } -static void felix_npi_port_deinit(struct ocelot *ocelot, int port) +static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) { - /* Restore hardware defaults */ - int unused_port = ocelot->num_phys_ports + 2; + return dsa_cpu_ports(ds); +} - ocelot->npi = -1; +static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { + .setup = felix_tag_8021q_setup, + .teardown = felix_tag_8021q_teardown, + .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, +}; - ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), - QSYS_EXT_CPU_CFG); +static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, + bool uc, bool mc, bool bc) +{ + struct ocelot *ocelot = ds->priv; + unsigned long val; - ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, - OCELOT_TAG_PREFIX_DISABLED); - ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, - OCELOT_TAG_PREFIX_DISABLED); + val = uc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); - /* Enable transmission of pause frames */ - ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); + val = mc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); } -static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu) +static void +felix_migrate_host_flood(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) { struct ocelot *ocelot = ds->priv; - int err; - - err = ocelot_migrate_mdbs(ocelot, BIT(cpu), - BIT(ocelot->num_phys_ports)); - if (err) - return err; - - felix_migrate_flood_to_npi_port(ds, cpu); + struct felix *felix = ocelot_to_felix(ocelot); + unsigned long mask; - felix_npi_port_init(ocelot, cpu); + if (old_proto_ops) { + mask = old_proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, false, false, false); + } - return 0; + mask = proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, + !!felix->host_flood_mc_mask, true); } -static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) +static int felix_migrate_mdbs(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) { struct ocelot *ocelot = ds->priv; + unsigned long from, to; + + if (!old_proto_ops) + return 0; + + from = old_proto_ops->get_host_fwd_mask(ds); + to = proto_ops->get_host_fwd_mask(ds); - felix_npi_port_deinit(ocelot, cpu); + return ocelot_migrate_mdbs(ocelot, from, to); } -static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, - enum dsa_tag_protocol proto) +/* Configure the shared hardware resources for a transition between + * @old_proto_ops and @proto_ops. + * Manual migration is needed because as far as DSA is concerned, no change of + * the CPU port is taking place here, just of the tagging protocol. + */ +static int +felix_tag_proto_setup_shared(struct dsa_switch *ds, + const struct felix_tag_proto_ops *proto_ops, + const struct felix_tag_proto_ops *old_proto_ops) { + bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops); int err; - switch (proto) { - case DSA_TAG_PROTO_SEVILLE: - case DSA_TAG_PROTO_OCELOT: - err = felix_setup_tag_npi(ds, cpu); - break; - case DSA_TAG_PROTO_OCELOT_8021Q: - err = felix_setup_tag_8021q(ds, cpu); - break; - default: - err = -EPROTONOSUPPORT; - } + err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops); + if (err) + return err; - return err; -} + felix_update_trapping_destinations(ds, using_tag_8021q); -static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, - enum dsa_tag_protocol proto) -{ - switch (proto) { - case DSA_TAG_PROTO_SEVILLE: - case DSA_TAG_PROTO_OCELOT: - felix_teardown_tag_npi(ds, cpu); - break; - case DSA_TAG_PROTO_OCELOT_8021Q: - felix_teardown_tag_8021q(ds, cpu); - break; - default: - break; - } + felix_migrate_host_flood(ds, proto_ops, old_proto_ops); + + return 0; } /* This always leaves the switch in a consistent state, because although the @@ -586,33 +602,45 @@ static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, static int felix_change_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto) { + const struct felix_tag_proto_ops *old_proto_ops, *proto_ops; struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - enum dsa_tag_protocol old_proto = felix->tag_proto; - struct dsa_port *cpu_dp; int err; - if (proto != DSA_TAG_PROTO_SEVILLE && - proto != DSA_TAG_PROTO_OCELOT && - proto != DSA_TAG_PROTO_OCELOT_8021Q) + switch (proto) { + case DSA_TAG_PROTO_SEVILLE: + case DSA_TAG_PROTO_OCELOT: + proto_ops = &felix_tag_npi_proto_ops; + break; + case DSA_TAG_PROTO_OCELOT_8021Q: + proto_ops = &felix_tag_8021q_proto_ops; + break; + default: return -EPROTONOSUPPORT; + } - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - felix_del_tag_protocol(ds, cpu_dp->index, old_proto); + old_proto_ops = felix->tag_proto_ops; - err = felix_set_tag_protocol(ds, cpu_dp->index, proto); - if (err) { - felix_set_tag_protocol(ds, cpu_dp->index, old_proto); - return err; - } + err = proto_ops->setup(ds); + if (err) + goto setup_failed; - /* Stop at first CPU port */ - break; - } + err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops); + if (err) + goto setup_shared_failed; + if (old_proto_ops) + old_proto_ops->teardown(ds); + + felix->tag_proto_ops = proto_ops; felix->tag_proto = proto; return 0; + +setup_shared_failed: + proto_ops->teardown(ds); +setup_failed: + return err; } static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, @@ -630,7 +658,7 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - unsigned long mask, val; + unsigned long mask; if (uc) felix->host_flood_uc_mask |= BIT(port); @@ -642,18 +670,9 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port, else felix->host_flood_mc_mask &= ~BIT(port); - if (felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q) - mask = dsa_cpu_ports(ds); - else - mask = BIT(ocelot->num_phys_ports); - - val = (felix->host_flood_uc_mask) ? mask : 0; - ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); - - val = (felix->host_flood_mc_mask) ? mask : 0; - ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); - ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); - ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); + mask = felix->tag_proto_ops->get_host_fwd_mask(ds); + felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, + !!felix->host_flood_mc_mask, true); } static int felix_set_ageing_time(struct dsa_switch *ds, @@ -1332,7 +1351,6 @@ static int felix_setup(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); - unsigned long cpu_flood; struct dsa_port *dp; int err; @@ -1366,21 +1384,10 @@ static int felix_setup(struct dsa_switch *ds) if (err) goto out_deinit_ports; - dsa_switch_for_each_cpu_port(dp, ds) { - /* The initial tag protocol is NPI which always returns 0, so - * there's no real point in checking for errors. - */ - felix_set_tag_protocol(ds, dp->index, felix->tag_proto); - - /* Start off with flooding disabled towards the NPI port - * (actually CPU port module). - */ - cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC); - ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC); - - break; - } + /* The initial tag protocol is NPI which won't fail during initial + * setup, there's no real point in checking for errors. + */ + felix_change_tag_protocol(ds, felix->tag_proto); ds->mtu_enforcement_ingress = true; ds->assisted_learning_on_cpu_port = true; @@ -1409,10 +1416,8 @@ static void felix_teardown(struct dsa_switch *ds) struct felix *felix = ocelot_to_felix(ocelot); struct dsa_port *dp; - dsa_switch_for_each_cpu_port(dp, ds) { - felix_del_tag_protocol(ds, dp->index, felix->tag_proto); - break; - } + if (felix->tag_proto_ops) + felix->tag_proto_ops->teardown(ds); dsa_switch_for_each_available_port(dp, ds) ocelot_deinit_port(ocelot, dp->index); diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index b34bde43f11b..9e07eb7ee28d 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -59,6 +59,19 @@ struct felix_info { struct resource *res); }; +/* Methods for initializing the hardware resources specific to a tagging + * protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs, + * for "ocelot-8021q"). + * It is important that the resources configured here do not have side effects + * for the other tagging protocols. If that is the case, their configuration + * needs to go to felix_tag_proto_setup_shared(). + */ +struct felix_tag_proto_ops { + int (*setup)(struct dsa_switch *ds); + void (*teardown)(struct dsa_switch *ds); + unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds); +}; + extern const struct dsa_switch_ops felix_switch_ops; /* DSA glue / front-end for struct ocelot */ @@ -71,6 +84,7 @@ struct felix { resource_size_t switch_base; resource_size_t imdio_base; enum dsa_tag_protocol tag_proto; + const struct felix_tag_proto_ops *tag_proto_ops; struct kthread_worker *xmit_worker; unsigned long host_flood_uc_mask; unsigned long host_flood_mc_mask; -- cgit v1.2.3 From 7e708760fc114f049df9dccb994e23d20866b310 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 11 May 2022 13:06:37 +0300 Subject: net: mscc: ocelot: move ocelot_port_private :: chip_port to ocelot_port :: index Currently the ocelot switch lib is unaware of the index of a struct ocelot_port, since that is kept in the encapsulating structures of outer drivers (struct dsa_port :: index, struct ocelot_port_private :: chip_port). With the upcoming increase in complexity associated with assigning DSA tag_8021q CPU ports to certain user ports, it becomes necessary for the switch lib to be able to retrieve the index of a certain ocelot_port. Therefore, introduce a new u8 to ocelot_port (same size as the chip_port used by the ocelot switchdev driver) and rework the existing code to populate and use it. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/felix.c | 1 + drivers/net/ethernet/mscc/ocelot.h | 1 - drivers/net/ethernet/mscc/ocelot_net.c | 76 +++++++++++++++++----------------- include/soc/mscc/ocelot.h | 2 + 4 files changed, 41 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index beac90bc642c..d38258a39d07 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1249,6 +1249,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) ocelot_port->phy_mode = port_phy_modes[port]; ocelot_port->ocelot = ocelot; ocelot_port->target = target; + ocelot_port->index = port; ocelot->ports[port] = ocelot_port; } diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index d0fa8ab6cc81..6d65cc87d757 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -48,7 +48,6 @@ struct ocelot_port_private { struct net_device *dev; struct phylink *phylink; struct phylink_config phylink_config; - u8 chip_port; struct ocelot_port_tc tc; }; diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 616d8127ef51..be168a372498 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -191,7 +191,7 @@ static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; return &ocelot->devlink_ports[port]; } @@ -201,7 +201,7 @@ int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, bool ingress) { struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; if (!ingress) return -EOPNOTSUPP; @@ -226,7 +226,7 @@ static int ocelot_setup_tc_cls_matchall_police(struct ocelot_port_private *priv, struct flow_action_entry *action = &f->rule->action.entries[0]; struct ocelot *ocelot = priv->port.ocelot; struct ocelot_policer pol = { 0 }; - int port = priv->chip_port; + int port = priv->port.index; int err; if (!ingress) { @@ -288,8 +288,8 @@ static int ocelot_setup_tc_cls_matchall_mirred(struct ocelot_port_private *priv, other_priv = netdev_priv(a->dev); - err = ocelot_port_mirror_add(ocelot, priv->chip_port, - other_priv->chip_port, ingress, extack); + err = ocelot_port_mirror_add(ocelot, priv->port.index, + other_priv->port.index, ingress, extack); if (err) return err; @@ -306,7 +306,7 @@ static int ocelot_del_tc_cls_matchall_police(struct ocelot_port_private *priv, struct netlink_ext_ack *extack) { struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; int err; err = ocelot_port_policer_del(ocelot, port); @@ -327,7 +327,7 @@ static int ocelot_del_tc_cls_matchall_mirred(struct ocelot_port_private *priv, struct netlink_ext_ack *extack) { struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; ocelot_port_mirror_del(ocelot, port, ingress); @@ -497,7 +497,7 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; int ret; ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged); @@ -515,7 +515,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; int ret; /* 8021q removes VID 0 on module unload for all interfaces @@ -558,7 +558,7 @@ static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; u32 rew_op = 0; if (!static_branch_unlikely(&ocelot_fdma_enabled) && @@ -724,7 +724,7 @@ static void ocelot_get_stats64(struct net_device *dev, { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; /* Configure the port to read the stats from */ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), @@ -767,7 +767,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_fdb_add(ocelot, port, addr, vid, ocelot_port->bridge); } @@ -780,7 +780,7 @@ static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge); } @@ -798,7 +798,7 @@ static int ocelot_port_fdb_dump(struct sk_buff *skb, .cb = cb, .idx = *idx, }; - int port = priv->chip_port; + int port = priv->port.index; int ret; ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump); @@ -840,7 +840,7 @@ static int ocelot_set_features(struct net_device *dev, netdev_features_t changed = dev->features ^ features; struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && priv->tc.offload_cnt) { @@ -859,7 +859,7 @@ static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; /* If the attached PHY device isn't capable of timestamping operations, * use our own (when possible). @@ -882,7 +882,7 @@ static int ocelot_change_mtu(struct net_device *dev, int new_mtu) struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - ocelot_port_set_maxlen(ocelot, priv->chip_port, new_mtu); + ocelot_port_set_maxlen(ocelot, priv->port.index, new_mtu); WRITE_ONCE(dev->mtu, new_mtu); return 0; @@ -935,7 +935,7 @@ int ocelot_netdev_to_port(struct net_device *dev) priv = netdev_priv(dev); - return priv->chip_port; + return priv->port.index; } static void ocelot_port_get_strings(struct net_device *netdev, u32 sset, @@ -943,7 +943,7 @@ static void ocelot_port_get_strings(struct net_device *netdev, u32 sset, { struct ocelot_port_private *priv = netdev_priv(netdev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; ocelot_get_strings(ocelot, port, sset, data); } @@ -954,7 +954,7 @@ static void ocelot_port_get_ethtool_stats(struct net_device *dev, { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; ocelot_get_ethtool_stats(ocelot, port, data); } @@ -963,7 +963,7 @@ static int ocelot_port_get_sset_count(struct net_device *dev, int sset) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_get_sset_count(ocelot, port, sset); } @@ -973,7 +973,7 @@ static int ocelot_port_get_ts_info(struct net_device *dev, { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; if (!ocelot->ptp) return ethtool_op_get_ts_info(dev, info); @@ -1025,7 +1025,7 @@ static int ocelot_port_attr_set(struct net_device *dev, const void *ctx, { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; int err = 0; if (ctx && ctx != priv) @@ -1066,7 +1066,7 @@ static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged, extack); } @@ -1092,7 +1092,7 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_port_mdb_add(ocelot, port, mdb, ocelot_port->bridge); } @@ -1103,7 +1103,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_port_mdb_del(ocelot, port, mdb, ocelot_port->bridge); } @@ -1114,7 +1114,7 @@ static int ocelot_port_obj_mrp_add(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_mrp_add(ocelot, port, mrp); } @@ -1125,7 +1125,7 @@ static int ocelot_port_obj_mrp_del(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_mrp_del(ocelot, port, mrp); } @@ -1137,7 +1137,7 @@ ocelot_port_obj_mrp_add_ring_role(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_mrp_add_ring_role(ocelot, port, mrp); } @@ -1149,7 +1149,7 @@ ocelot_port_obj_mrp_del_ring_role(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; return ocelot_mrp_del_ring_role(ocelot, port, mrp); } @@ -1314,7 +1314,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev, struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; int bridge_num, err; bridge_num = ocelot_bridge_num_get(ocelot, bridge); @@ -1366,7 +1366,7 @@ static int ocelot_netdevice_bridge_leave(struct net_device *dev, struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; int bridge_num = ocelot_port->bridge_num; - int port = priv->chip_port; + int port = priv->port.index; int err; err = ocelot_switchdev_unsync(ocelot, port); @@ -1388,7 +1388,7 @@ static int ocelot_netdevice_lag_join(struct net_device *dev, struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; struct net_device *bridge_dev; - int port = priv->chip_port; + int port = priv->port.index; int err; err = ocelot_port_lag_join(ocelot, port, bond, info); @@ -1431,7 +1431,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev, struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; struct net_device *bridge_dev; - int port = priv->chip_port; + int port = priv->port.index; ocelot_port_lag_leave(ocelot, port, bond); @@ -1545,7 +1545,7 @@ ocelot_netdevice_changelowerstate(struct net_device *dev, bool is_active = info->link_up && info->tx_enabled; struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; + int port = priv->port.index; if (!ocelot_port->bond) return NOTIFY_DONE; @@ -1693,7 +1693,7 @@ static void vsc7514_phylink_mac_link_down(struct phylink_config *config, struct net_device *ndev = to_net_dev(config->dev); struct ocelot_port_private *priv = netdev_priv(ndev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, OCELOT_MAC_QUIRKS); @@ -1709,7 +1709,7 @@ static void vsc7514_phylink_mac_link_up(struct phylink_config *config, struct net_device *ndev = to_net_dev(config->dev); struct ocelot_port_private *priv = netdev_priv(ndev); struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; + int port = priv->port.index; ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, interface, speed, duplex, @@ -1823,9 +1823,9 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, SET_NETDEV_DEV(dev, ocelot->dev); priv = netdev_priv(dev); priv->dev = dev; - priv->chip_port = port; ocelot_port = &priv->port; ocelot_port->ocelot = ocelot; + ocelot_port->index = port; ocelot_port->target = target; ocelot->ports[port] = ocelot_port; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 904c15ca145e..3b8c5a54fb00 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -675,6 +675,8 @@ struct ocelot_port { u8 ptp_cmd; u8 ts_id; + u8 index; + u8 stp_state; bool vlan_aware; bool is_dsa_8021q_cpu; -- cgit v1.2.3 From 65d4b471b3cf635565d0f36294fc47685bf659b1 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Wed, 11 May 2022 17:19:24 +0100 Subject: siena: Make MTD support specific for Siena Add a Siena Kconfig option and use it in stead of the sfc one. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/Kconfig | 2 +- drivers/net/ethernet/sfc/siena/Kconfig | 8 ++++++++ drivers/net/ethernet/sfc/siena/Makefile | 4 ++-- drivers/net/ethernet/sfc/siena/efx.h | 2 +- drivers/net/ethernet/sfc/siena/efx_common.c | 2 +- drivers/net/ethernet/sfc/siena/mcdi.c | 4 ++-- drivers/net/ethernet/sfc/siena/mcdi.h | 2 +- drivers/net/ethernet/sfc/siena/net_driver.h | 4 ++-- drivers/net/ethernet/sfc/siena/siena.c | 6 +++--- 9 files changed, 21 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 98db551ba2b7..79b8ccaeee01 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -32,7 +32,7 @@ config SFC To compile this driver as a module, choose M here. The module will be called sfc. config SFC_MTD - bool "Solarflare SFC9000/SFC9100-family MTD support" + bool "Solarflare SFC9100-family MTD support" depends on SFC && MTD && !(SFC=y && MTD=m) default y help diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig index 3d52aee50d5a..805b902f903d 100644 --- a/drivers/net/ethernet/sfc/siena/Kconfig +++ b/drivers/net/ethernet/sfc/siena/Kconfig @@ -10,3 +10,11 @@ config SFC_SIENA To compile this driver as a module, choose M here. The module will be called sfc-siena. +config SFC_SIENA_MTD + bool "Solarflare SFC9000-family MTD support" + depends on SFC_SIENA && MTD && !(SFC_SIENA=y && MTD=m) + default y + help + This exposes the on-board flash and/or EEPROM as MTD devices + (e.g. /dev/mtd1). This is required to update the firmware or + the boot configuration under Linux. diff --git a/drivers/net/ethernet/sfc/siena/Makefile b/drivers/net/ethernet/sfc/siena/Makefile index 74cb8b7d281e..3729095a51d9 100644 --- a/drivers/net/ethernet/sfc/siena/Makefile +++ b/drivers/net/ethernet/sfc/siena/Makefile @@ -5,7 +5,7 @@ sfc-siena-y += farch.o siena.o \ selftest.o ethtool.o ethtool_common.o ptp.o \ mcdi.o mcdi_port.o mcdi_port_common.o \ mcdi_mon.o -sfc-siena-$(CONFIG_SFC_MTD) += mtd.o -sfc-siena-$(CONFIG_SFC_SRIOV) += siena_sriov.o +sfc-siena-$(CONFIG_SFC_SIENA_MTD) += mtd.o +sfc-siena-$(CONFIG_SFC_SRIOV) += siena_sriov.o obj-$(CONFIG_SFC_SIENA) += sfc-siena.o diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h index f91f3c94a275..1d9755e59d75 100644 --- a/drivers/net/ethernet/sfc/siena/efx.h +++ b/drivers/net/ethernet/sfc/siena/efx.h @@ -162,7 +162,7 @@ void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats); /* MTD */ -#ifdef CONFIG_SFC_MTD +#ifdef CONFIG_SFC_SIENA_MTD int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, size_t n_parts, size_t sizeof_part); static inline int efx_mtd_probe(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index b44a7114e319..7c400fd590f5 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -997,7 +997,7 @@ int efx_siena_init_struct(struct efx_nic *efx, INIT_LIST_HEAD(&efx->node); INIT_LIST_HEAD(&efx->secondary_list); spin_lock_init(&efx->biu_lock); -#ifdef CONFIG_SFC_MTD +#ifdef CONFIG_SFC_SIENA_MTD INIT_LIST_HEAD(&efx->mtd_list); #endif INIT_WORK(&efx->reset_work, efx_reset_work); diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c index eb13aa59fe50..b767e29cfe92 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.c +++ b/drivers/net/ethernet/sfc/siena/mcdi.c @@ -2014,7 +2014,7 @@ int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx) return rc; } -#ifdef CONFIG_SFC_MTD +#ifdef CONFIG_SFC_SIENA_MTD #define EFX_MCDI_NVRAM_LEN_MAX 128 @@ -2256,4 +2256,4 @@ void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part) efx->name, part->type_name, mcdi_part->fw_subtype); } -#endif /* CONFIG_SFC_MTD */ +#endif /* CONFIG_SFC_SIENA_MTD */ diff --git a/drivers/net/ethernet/sfc/siena/mcdi.h b/drivers/net/ethernet/sfc/siena/mcdi.h index dcebdbf956ce..64990f398e67 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.h +++ b/drivers/net/ethernet/sfc/siena/mcdi.h @@ -373,7 +373,7 @@ static inline int efx_siena_mcdi_mon_probe(struct efx_nic *efx) { return 0; } static inline void efx_siena_mcdi_mon_remove(struct efx_nic *efx) {} #endif -#ifdef CONFIG_SFC_MTD +#ifdef CONFIG_SFC_SIENA_MTD int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer); int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h index 7e0659be4348..6af172fb0b10 100644 --- a/drivers/net/ethernet/sfc/siena/net_driver.h +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -1031,7 +1031,7 @@ struct efx_nic { unsigned irq_level; struct delayed_work selftest_work; -#ifdef CONFIG_SFC_MTD +#ifdef CONFIG_SFC_SIENA_MTD struct list_head mtd_list; #endif @@ -1411,7 +1411,7 @@ struct efx_nic_type { bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); #endif -#ifdef CONFIG_SFC_MTD +#ifdef CONFIG_SFC_SIENA_MTD int (*mtd_probe)(struct efx_nic *efx); void (*mtd_rename)(struct efx_mtd_partition *part); int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c index 741313aff1d1..9fe8ffc3a8d3 100644 --- a/drivers/net/ethernet/sfc/siena/siena.c +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -830,7 +830,7 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx) ************************************************************************** */ -#ifdef CONFIG_SFC_MTD +#ifdef CONFIG_SFC_SIENA_MTD struct siena_nvram_type_info { int port; @@ -954,7 +954,7 @@ fail: return rc; } -#endif /* CONFIG_SFC_MTD */ +#endif /* CONFIG_SFC_SIENA_MTD */ static unsigned int siena_check_caps(const struct efx_nic *efx, u8 flag, u32 offset) @@ -1058,7 +1058,7 @@ const struct efx_nic_type siena_a0_nic_type = { #ifdef CONFIG_RFS_ACCEL .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, #endif -#ifdef CONFIG_SFC_MTD +#ifdef CONFIG_SFC_SIENA_MTD .mtd_probe = siena_mtd_probe, .mtd_rename = efx_siena_mcdi_mtd_rename, .mtd_read = efx_siena_mcdi_mtd_read, -- cgit v1.2.3 From dfb1cfbd497e758de43ee02fbeb1fe66ed1ed26b Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Wed, 11 May 2022 17:19:36 +0100 Subject: siena: Make SRIOV support specific for Siena Add a Siena Kconfig option and use it in stead of the sfc one. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/Kconfig | 2 +- drivers/net/ethernet/sfc/siena/Kconfig | 8 ++++++++ drivers/net/ethernet/sfc/siena/Makefile | 2 +- drivers/net/ethernet/sfc/siena/efx.c | 12 ++++++------ drivers/net/ethernet/sfc/siena/efx.h | 2 +- drivers/net/ethernet/sfc/siena/efx_channels.c | 4 ++-- drivers/net/ethernet/sfc/siena/efx_common.c | 2 +- drivers/net/ethernet/sfc/siena/farch.c | 18 +++++++++--------- drivers/net/ethernet/sfc/siena/net_driver.h | 2 +- drivers/net/ethernet/sfc/siena/nic.h | 2 +- drivers/net/ethernet/sfc/siena/siena.c | 4 ++-- drivers/net/ethernet/sfc/siena/siena_sriov.h | 6 +++--- drivers/net/ethernet/sfc/siena/sriov.h | 4 ++-- 13 files changed, 38 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 79b8ccaeee01..4c85b26279c5 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -47,7 +47,7 @@ config SFC_MCDI_MON This exposes the on-board firmware-managed sensors as a hardware monitor device. config SFC_SRIOV - bool "Solarflare SFC9000/SFC9100-family SR-IOV support" + bool "Solarflare SFC9100-family SR-IOV support" depends on SFC && PCI_IOV default y help diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig index 805b902f903d..26a8cb838d47 100644 --- a/drivers/net/ethernet/sfc/siena/Kconfig +++ b/drivers/net/ethernet/sfc/siena/Kconfig @@ -18,3 +18,11 @@ config SFC_SIENA_MTD This exposes the on-board flash and/or EEPROM as MTD devices (e.g. /dev/mtd1). This is required to update the firmware or the boot configuration under Linux. +config SFC_SIENA_SRIOV + bool "Solarflare SFC9000-family SR-IOV support" + depends on SFC_SIENA && PCI_IOV + default n + help + This enables support for the Single Root I/O Virtualization + features, allowing accelerated network performance in + virtualized environments. diff --git a/drivers/net/ethernet/sfc/siena/Makefile b/drivers/net/ethernet/sfc/siena/Makefile index 3729095a51d9..f7384299667c 100644 --- a/drivers/net/ethernet/sfc/siena/Makefile +++ b/drivers/net/ethernet/sfc/siena/Makefile @@ -6,6 +6,6 @@ sfc-siena-y += farch.o siena.o \ mcdi.o mcdi_port.o mcdi_port_common.o \ mcdi_mon.o sfc-siena-$(CONFIG_SFC_SIENA_MTD) += mtd.o -sfc-siena-$(CONFIG_SFC_SRIOV) += siena_sriov.o +sfc-siena-$(CONFIG_SFC_SIENA_SRIOV) += siena_sriov.o obj-$(CONFIG_SFC_SIENA) += sfc-siena.o diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index 3f6e732f5fdc..01809666a3d1 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -359,7 +359,7 @@ static int efx_probe_all(struct efx_nic *efx) goto fail3; } -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV rc = efx->type->vswitching_probe(efx); if (rc) /* not fatal; the PF will still work fine */ netif_warn(efx, probe, efx->net_dev, @@ -383,7 +383,7 @@ static int efx_probe_all(struct efx_nic *efx) fail5: efx_siena_remove_filters(efx); fail4: -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV efx->type->vswitching_remove(efx); #endif fail3: @@ -402,7 +402,7 @@ static void efx_remove_all(struct efx_nic *efx) efx_siena_remove_channels(efx); efx_siena_remove_filters(efx); -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV efx->type->vswitching_remove(efx); #endif efx_remove_port(efx); @@ -592,7 +592,7 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_features_check = efx_siena_features_check, .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV .ndo_set_vf_mac = efx_sriov_set_vf_mac, .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, @@ -1108,7 +1108,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev, /* efx_pci_sriov_configure returns the actual number of Virtual Functions * enabled on success */ -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) { int rc; @@ -1250,7 +1250,7 @@ static struct pci_driver efx_pci_driver = { .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, .err_handler = &efx_siena_err_handlers, -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV .sriov_configure = efx_pci_sriov_configure, #endif }; diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h index 1d9755e59d75..27d1d3f19cae 100644 --- a/drivers/net/ethernet/sfc/siena/efx.h +++ b/drivers/net/ethernet/sfc/siena/efx.h @@ -177,7 +177,7 @@ static inline void efx_siena_mtd_rename(struct efx_nic *efx) {} static inline void efx_siena_mtd_remove(struct efx_nic *efx) {} #endif -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 1 << efx->vi_scale; diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 276cd7d88732..28391875de69 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -110,7 +110,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) /* If RSS is requested for the PF *and* VFs then we can't write RSS * table entries that are inaccessible to VFs */ -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV if (efx->type->sriov_wanted) { if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && count > efx_vf_size(efx)) { @@ -348,7 +348,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) rss_spread = efx->n_rx_channels; /* RSS might be usable on VFs even if it is disabled on the PF */ -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV if (efx->type->sriov_wanted) { efx->rss_spread = ((rss_spread > 1 || !efx->type->sriov_wanted(efx)) ? diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index 7c400fd590f5..3aef8d216f95 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -778,7 +778,7 @@ int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (rc) goto fail; -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV rc = efx->type->vswitching_restore(efx); if (rc) /* not fatal; the PF will still work fine */ netif_warn(efx, probe, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/siena/farch.c b/drivers/net/ethernet/sfc/siena/farch.c index a24ba23fd19f..cce23803c652 100644 --- a/drivers/net/ethernet/sfc/siena/farch.c +++ b/drivers/net/ethernet/sfc/siena/farch.c @@ -228,7 +228,7 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer, unsigned int len) { -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV struct siena_nic_data *nic_data = efx->nic_data; #endif len = ALIGN(len, EFX_BUF_SIZE); @@ -241,7 +241,7 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, /* Select new buffer ID */ buffer->index = efx->next_buffer_table; efx->next_buffer_table += buffer->entries; -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV BUG_ON(efx_siena_sriov_enabled(efx) && nic_data->vf_buftbl_base < efx->next_buffer_table); #endif @@ -1187,7 +1187,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", channel->channel, ev_sub_data); efx_farch_handle_tx_flush_done(efx, event); -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV efx_siena_sriov_tx_flush_done(efx, event); #endif break; @@ -1195,7 +1195,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", channel->channel, ev_sub_data); efx_farch_handle_rx_flush_done(efx, event); -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV efx_siena_sriov_rx_flush_done(efx, event); #endif break; @@ -1233,7 +1233,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ev_sub_data); efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV else efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); #endif @@ -1246,7 +1246,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ev_sub_data); efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR); } -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV else efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); #endif @@ -1307,7 +1307,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) case FSE_AZ_EV_CODE_DRIVER_EV: efx_farch_handle_driver_event(channel, &event); break; -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV case FSE_CZ_EV_CODE_USER_EV: efx_siena_sriov_event(channel, &event); break; @@ -1671,7 +1671,7 @@ void efx_farch_rx_pull_indir_table(struct efx_nic *efx) void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) { unsigned vi_count, total_tx_channels; -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV struct siena_nic_data *nic_data; unsigned buftbl_min; #endif @@ -1679,7 +1679,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels; vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL); -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV nic_data = efx->nic_data; /* Account for the buffer table entries backing the datapath channels * and the descriptor caches for those channels. diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h index 6af172fb0b10..a8f6c3699c8b 100644 --- a/drivers/net/ethernet/sfc/siena/net_driver.h +++ b/drivers/net/ethernet/sfc/siena/net_driver.h @@ -1096,7 +1096,7 @@ struct efx_nic { atomic_t rxq_flush_outstanding; wait_queue_head_t flush_wq; -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV unsigned vf_count; unsigned vf_init_count; unsigned vi_scale; diff --git a/drivers/net/ethernet/sfc/siena/nic.h b/drivers/net/ethernet/sfc/siena/nic.h index 935cb0ab5ec0..6def31070edb 100644 --- a/drivers/net/ethernet/sfc/siena/nic.h +++ b/drivers/net/ethernet/sfc/siena/nic.h @@ -104,7 +104,7 @@ struct siena_nic_data { struct efx_nic *efx; int wol_filter_id; u64 stats[SIENA_STAT_COUNT]; -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV struct siena_vf *vf; struct efx_channel *vfdi_channel; unsigned vf_buftbl_base; diff --git a/drivers/net/ethernet/sfc/siena/siena.c b/drivers/net/ethernet/sfc/siena/siena.c index 9fe8ffc3a8d3..a44c8fa25748 100644 --- a/drivers/net/ethernet/sfc/siena/siena.c +++ b/drivers/net/ethernet/sfc/siena/siena.c @@ -328,7 +328,7 @@ static int siena_probe_nic(struct efx_nic *efx) if (rc) goto fail5; -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV efx_siena_sriov_probe(efx); #endif efx_siena_ptp_defer_probe_with_channel(efx); @@ -1068,7 +1068,7 @@ const struct efx_nic_type siena_a0_nic_type = { #endif .ptp_write_host_time = siena_ptp_write_host_time, .ptp_set_ts_config = siena_ptp_set_ts_config, -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV .sriov_configure = efx_siena_sriov_configure, .sriov_init = efx_siena_sriov_init, .sriov_fini = efx_siena_sriov_fini, diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.h b/drivers/net/ethernet/sfc/siena/siena_sriov.h index e548c4daf189..69a7a18e9ba0 100644 --- a/drivers/net/ethernet/sfc/siena/siena_sriov.h +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.h @@ -54,18 +54,18 @@ int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf, struct ifla_vf_info *ivf); -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return efx->vf_init_count != 0; } -#else /* !CONFIG_SFC_SRIOV */ +#else /* !CONFIG_SFC_SIENA_SRIOV */ static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; } -#endif /* CONFIG_SFC_SRIOV */ +#endif /* CONFIG_SFC_SIENA_SRIOV */ void efx_siena_sriov_probe(struct efx_nic *efx); void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); diff --git a/drivers/net/ethernet/sfc/siena/sriov.h b/drivers/net/ethernet/sfc/siena/sriov.h index fbde67319d87..a6981bad7621 100644 --- a/drivers/net/ethernet/sfc/siena/sriov.h +++ b/drivers/net/ethernet/sfc/siena/sriov.h @@ -9,7 +9,7 @@ #include "net_driver.h" -#ifdef CONFIG_SFC_SRIOV +#ifdef CONFIG_SFC_SIENA_SRIOV static inline int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) @@ -78,6 +78,6 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, else return -EOPNOTSUPP; } -#endif /* CONFIG_SFC_SRIOV */ +#endif /* CONFIG_SFC_SIENA_SRIOV */ #endif /* EFX_SRIOV_H */ -- cgit v1.2.3 From f62a074525de47fe748ce74b81b95ea05f97b25c Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Wed, 11 May 2022 17:19:49 +0100 Subject: siena: Make HWMON support specific for Siena Add a Siena Kconfig option and use it in stead of the sfc one. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/Kconfig | 2 +- drivers/net/ethernet/sfc/siena/Kconfig | 7 +++++++ drivers/net/ethernet/sfc/siena/mcdi.h | 6 +++--- drivers/net/ethernet/sfc/siena/mcdi_mon.c | 4 ++-- 4 files changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 4c85b26279c5..dac2f09702aa 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -40,7 +40,7 @@ config SFC_MTD (e.g. /dev/mtd1). This is required to update the firmware or the boot configuration under Linux. config SFC_MCDI_MON - bool "Solarflare SFC9000/SFC9100-family hwmon support" + bool "Solarflare SFC9100-family hwmon support" depends on SFC && HWMON && !(SFC=y && HWMON=m) default y help diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig index 26a8cb838d47..4eb6801ff3c0 100644 --- a/drivers/net/ethernet/sfc/siena/Kconfig +++ b/drivers/net/ethernet/sfc/siena/Kconfig @@ -18,6 +18,13 @@ config SFC_SIENA_MTD This exposes the on-board flash and/or EEPROM as MTD devices (e.g. /dev/mtd1). This is required to update the firmware or the boot configuration under Linux. +config SFC_SIENA_MCDI_MON + bool "Solarflare SFC9000-family hwmon support" + depends on SFC_SIENA && HWMON && !(SFC_SIENA=y && HWMON=m) + default y + help + This exposes the on-board firmware-managed sensors as a + hardware monitor device. config SFC_SIENA_SRIOV bool "Solarflare SFC9000-family SR-IOV support" depends on SFC_SIENA && PCI_IOV diff --git a/drivers/net/ethernet/sfc/siena/mcdi.h b/drivers/net/ethernet/sfc/siena/mcdi.h index 64990f398e67..03810c570a33 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.h +++ b/drivers/net/ethernet/sfc/siena/mcdi.h @@ -118,7 +118,7 @@ struct efx_mcdi_mtd_partition { */ struct efx_mcdi_data { struct efx_mcdi_iface iface; -#ifdef CONFIG_SFC_MCDI_MON +#ifdef CONFIG_SFC_SIENA_MCDI_MON struct efx_mcdi_mon hwmon; #endif u32 fn_flags; @@ -130,7 +130,7 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) return &efx->mcdi->iface; } -#ifdef CONFIG_SFC_MCDI_MON +#ifdef CONFIG_SFC_SIENA_MCDI_MON static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) { EFX_WARN_ON_PARANOID(!efx->mcdi); @@ -365,7 +365,7 @@ void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx); enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason); int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method); -#ifdef CONFIG_SFC_MCDI_MON +#ifdef CONFIG_SFC_SIENA_MCDI_MON int efx_siena_mcdi_mon_probe(struct efx_nic *efx); void efx_siena_mcdi_mon_remove(struct efx_nic *efx); #else diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c index d0c25dfda0d7..c7ea703c5d7a 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c @@ -130,7 +130,7 @@ void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) type, name, state_txt, value, unit); } -#ifdef CONFIG_SFC_MCDI_MON +#ifdef CONFIG_SFC_SIENA_MCDI_MON struct efx_mcdi_mon_attribute { struct device_attribute dev_attr; @@ -528,4 +528,4 @@ void efx_siena_mcdi_mon_remove(struct efx_nic *efx) efx_siena_free_buffer(efx, &hwmon->dma_buf); } -#endif /* CONFIG_SFC_MCDI_MON */ +#endif /* CONFIG_SFC_SIENA_MCDI_MON */ -- cgit v1.2.3 From 58b6b3d5379de9198c091f08e14d82e67629f96e Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Wed, 11 May 2022 17:20:01 +0100 Subject: sfc/siena: Make MCDI logging support specific for Siena Add a Siena Kconfig option and use it in stead of the sfc one. Rename the internal variable for the 'mcdi_logging_default' module parameter to avoid a naming conflict with the one in sfc.ko. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/Kconfig | 2 +- drivers/net/ethernet/sfc/siena/Kconfig | 10 ++++++++++ drivers/net/ethernet/sfc/siena/efx_common.c | 2 +- drivers/net/ethernet/sfc/siena/efx_common.h | 2 +- drivers/net/ethernet/sfc/siena/mcdi.c | 23 ++++++++++++----------- drivers/net/ethernet/sfc/siena/mcdi.h | 2 +- 6 files changed, 26 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index dac2f09702aa..0950e6b0508f 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -55,7 +55,7 @@ config SFC_SRIOV features, allowing accelerated network performance in virtualized environments. config SFC_MCDI_LOGGING - bool "Solarflare SFC9000/SFC9100-family MCDI logging support" + bool "Solarflare SFC9100-family MCDI logging support" depends on SFC default y help diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig index 4eb6801ff3c0..cb3c5cb42a53 100644 --- a/drivers/net/ethernet/sfc/siena/Kconfig +++ b/drivers/net/ethernet/sfc/siena/Kconfig @@ -33,3 +33,13 @@ config SFC_SIENA_SRIOV This enables support for the Single Root I/O Virtualization features, allowing accelerated network performance in virtualized environments. +config SFC_SIENA_MCDI_LOGGING + bool "Solarflare SFC9000-family MCDI logging support" + depends on SFC_SIENA + default y + help + This enables support for tracing of MCDI (Management-Controller-to- + Driver-Interface) commands and responses, allowing debugging of + driver/firmware interaction. The tracing is actually enabled by + a sysfs file 'mcdi_logging' under the PCI device, or via module + parameter mcdi_logging_default. diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index 3aef8d216f95..a615bffcbad4 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -1170,7 +1170,7 @@ void efx_siena_fini_io(struct efx_nic *efx) pci_disable_device(efx->pci_dev); } -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING static ssize_t mcdi_logging_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/net/ethernet/sfc/siena/efx_common.h b/drivers/net/ethernet/sfc/siena/efx_common.h index 470033611436..aeb92f4e34b7 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.h +++ b/drivers/net/ethernet/sfc/siena/efx_common.h @@ -88,7 +88,7 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel) efx_schedule_channel(channel); } -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING void efx_siena_init_mcdi_logging(struct efx_nic *efx); void efx_siena_fini_mcdi_logging(struct efx_nic *efx); #else diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c index b767e29cfe92..3df0f0eca3b7 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.c +++ b/drivers/net/ethernet/sfc/siena/mcdi.c @@ -51,9 +51,10 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, static bool efx_mcdi_poll_once(struct efx_nic *efx); static void efx_mcdi_abandon(struct efx_nic *efx); -#ifdef CONFIG_SFC_MCDI_LOGGING -static bool mcdi_logging_default; -module_param(mcdi_logging_default, bool, 0644); +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING +static bool efx_siena_mcdi_logging_default; +module_param_named(mcdi_logging_default, efx_siena_mcdi_logging_default, + bool, 0644); MODULE_PARM_DESC(mcdi_logging_default, "Enable MCDI logging on newly-probed functions"); #endif @@ -70,12 +71,12 @@ int efx_siena_mcdi_init(struct efx_nic *efx) mcdi = efx_mcdi(efx); mcdi->efx = efx; -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING /* consuming code assumes buffer is page-sized */ mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL); if (!mcdi->logging_buffer) goto fail1; - mcdi->logging_enabled = mcdi_logging_default; + mcdi->logging_enabled = efx_siena_mcdi_logging_default; #endif init_waitqueue_head(&mcdi->wq); init_waitqueue_head(&mcdi->proxy_rx_wq); @@ -114,7 +115,7 @@ int efx_siena_mcdi_init(struct efx_nic *efx) return 0; fail2: -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING free_page((unsigned long)mcdi->logging_buffer); fail1: #endif @@ -140,7 +141,7 @@ void efx_siena_mcdi_fini(struct efx_nic *efx) if (!efx->mcdi) return; -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING free_page((unsigned long)efx->mcdi->iface.logging_buffer); #endif @@ -151,7 +152,7 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING char *buf = mcdi->logging_buffer; /* page-sized */ #endif efx_dword_t hdr[2]; @@ -198,7 +199,7 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, hdr_len = 8; } -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { int bytes = 0; int i; @@ -266,7 +267,7 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int respseq, respcmd, error; -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING char *buf = mcdi->logging_buffer; /* page-sized */ #endif efx_dword_t hdr; @@ -286,7 +287,7 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { size_t hdr_len, data_len; int bytes = 0; diff --git a/drivers/net/ethernet/sfc/siena/mcdi.h b/drivers/net/ethernet/sfc/siena/mcdi.h index 03810c570a33..06f38e5e6832 100644 --- a/drivers/net/ethernet/sfc/siena/mcdi.h +++ b/drivers/net/ethernet/sfc/siena/mcdi.h @@ -80,7 +80,7 @@ struct efx_mcdi_iface { spinlock_t async_lock; struct list_head async_list; struct timer_list async_timer; -#ifdef CONFIG_SFC_MCDI_LOGGING +#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING char *logging_buffer; bool logging_enabled; #endif -- cgit v1.2.3 From ef9b5770945ddc296a68080ab7a79aedbf0b0151 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Wed, 11 May 2022 17:20:13 +0100 Subject: sfc/siena: Make PTP and reset support specific for Siena Change the clock name and work queue names to differentiate them from the names used in sfc.ko. Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx_common.c | 2 +- drivers/net/ethernet/sfc/siena/ptp.c | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index a615bffcbad4..954daf464abb 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -112,7 +112,7 @@ static struct workqueue_struct *reset_workqueue; int efx_siena_create_reset_workqueue(void) { - reset_workqueue = create_singlethread_workqueue("sfc_reset"); + reset_workqueue = create_singlethread_workqueue("sfc_siena_reset"); if (!reset_workqueue) { printk(KERN_ERR "Failed to create reset workqueue\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index 8e18da096595..7c46752e6eae 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -1422,7 +1422,7 @@ static void efx_ptp_worker(struct work_struct *work) static const struct ptp_clock_info efx_phc_clock_info = { .owner = THIS_MODULE, - .name = "sfc", + .name = "sfc_siena", .max_adj = MAX_PPB, .n_alarm = 0, .n_ext_ts = 0, @@ -1458,7 +1458,7 @@ static int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) skb_queue_head_init(&ptp->rxq); skb_queue_head_init(&ptp->txq); - ptp->workwq = create_singlethread_workqueue("sfc_ptp"); + ptp->workwq = create_singlethread_workqueue("sfc_siena_ptp"); if (!ptp->workwq) { rc = -ENOMEM; goto fail2; @@ -1502,7 +1502,8 @@ static int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) goto fail3; } else if (ptp->phc_clock) { INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); - ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); + ptp->pps_workwq = + create_singlethread_workqueue("sfc_siena_pps"); if (!ptp->pps_workwq) { rc = -ENOMEM; goto fail4; -- cgit v1.2.3 From c374303969eac5639bd9230f1c7e7390cb92cc8e Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Wed, 11 May 2022 17:20:25 +0100 Subject: sfc/siena: Reinstate SRIOV init/fini function calls They were removed in the first series since they were not used for EF10. Put that code back for Siena, with the prototypes in siena_sriov.h since that file is a more applicable place for it. Reported-by: kernel test robot Signed-off-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx.c | 16 ++++++++++++++++ drivers/net/ethernet/sfc/siena/siena_sriov.h | 3 +++ 2 files changed, 19 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c index 01809666a3d1..63d999e63960 100644 --- a/drivers/net/ethernet/sfc/siena/efx.c +++ b/drivers/net/ethernet/sfc/siena/efx.c @@ -32,6 +32,9 @@ #include "io.h" #include "selftest.h" #include "sriov.h" +#ifdef CONFIG_SFC_SIENA_SRIOV +#include "siena_sriov.h" +#endif #include "mcdi_port_common.h" #include "mcdi_pcol.h" @@ -1271,6 +1274,12 @@ static int __init efx_init_module(void) if (rc) goto err_notifier; +#ifdef CONFIG_SFC_SIENA_SRIOV + rc = efx_init_sriov(); + if (rc) + goto err_sriov; +#endif + rc = efx_siena_create_reset_workqueue(); if (rc) goto err_reset; @@ -1284,6 +1293,10 @@ static int __init efx_init_module(void) err_pci: efx_siena_destroy_reset_workqueue(); err_reset: +#ifdef CONFIG_SFC_SIENA_SRIOV + efx_fini_sriov(); + err_sriov: +#endif unregister_netdevice_notifier(&efx_netdev_notifier); err_notifier: return rc; @@ -1295,6 +1308,9 @@ static void __exit efx_exit_module(void) pci_unregister_driver(&efx_pci_driver); efx_siena_destroy_reset_workqueue(); +#ifdef CONFIG_SFC_SIENA_SRIOV + efx_fini_sriov(); +#endif unregister_netdevice_notifier(&efx_netdev_notifier); } diff --git a/drivers/net/ethernet/sfc/siena/siena_sriov.h b/drivers/net/ethernet/sfc/siena/siena_sriov.h index 69a7a18e9ba0..50f6e924495e 100644 --- a/drivers/net/ethernet/sfc/siena/siena_sriov.h +++ b/drivers/net/ethernet/sfc/siena/siena_sriov.h @@ -60,6 +60,9 @@ static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return efx->vf_init_count != 0; } + +int efx_init_sriov(void); +void efx_fini_sriov(void); #else /* !CONFIG_SFC_SIENA_SRIOV */ static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { -- cgit v1.2.3 From ad732da434a2936128769216eddaece3b1af4588 Mon Sep 17 00:00:00 2001 From: Dongliang Mu Date: Wed, 11 May 2022 09:44:52 +0800 Subject: rtlwifi: Use pr_warn instead of WARN_ONCE This memory allocation failure can be triggered by fault injection or high pressure testing, resulting a WARN. Fix this by replacing WARN with pr_warn. Reported-by: syzkaller Signed-off-by: Dongliang Mu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220511014453.1621366-1-dzm91@hust.edu.cn --- drivers/net/wireless/realtek/rtlwifi/usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 86a236873254..a8eebafb9a7e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1014,7 +1014,7 @@ int rtl_usb_probe(struct usb_interface *intf, hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) + sizeof(struct rtl_usb_priv), &rtl_ops); if (!hw) { - WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n"); + pr_warn("rtl_usb: ieee80211 alloc failed\n"); return -ENOMEM; } rtlpriv = hw->priv; -- cgit v1.2.3 From 96c777708bcac53f73a1c079e416495647f69553 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 19 Mar 2022 08:08:26 +0100 Subject: mt76: mt7915: fix DBDC default band selection on MT7915D This code was accidentally dropped while adding 6 GHz support Fixes: b4d093e321bd ("mt76: mt7915: add 6 GHz support") Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index 5b133bcdab17..4b1a9811646f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -152,6 +152,8 @@ static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy) phy->mt76->cap.has_2ghz = true; return; } + } else if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support) { + val = phy->band_idx ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ; } switch (val) { -- cgit v1.2.3 From 7b8e1ae886e45aed9274048f2836a70b75ecfa40 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 25 Mar 2022 14:20:06 +0100 Subject: mt76: mt7915: rework hardware/phy initialization Clean up and fix error paths in mt7915_register_device Initialize second DBDC tx queue in mt7915_dma_init Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/dma.c | 14 ++- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 104 +++++++++++++-------- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 3 +- 3 files changed, 78 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index 75678aabff32..dad5225e223e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -5,7 +5,8 @@ #include "../dma.h" #include "mac.h" -int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base) +static int +mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base) { int i, err; @@ -323,7 +324,7 @@ static int mt7915_dma_enable(struct mt7915_dev *dev) return 0; } -int mt7915_dma_init(struct mt7915_dev *dev) +int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) { struct mt76_dev *mdev = &dev->mt76; u32 hif1_ofs = 0; @@ -346,6 +347,15 @@ int mt7915_dma_init(struct mt7915_dev *dev) if (ret) return ret; + if (phy2) { + ret = mt7915_init_tx_queues(phy2, + MT_TXQ_ID(phy2->band_idx), + MT7915_TX_RING_SIZE, + MT_TXQ_RING_BASE(1)); + if (ret) + return ret; + } + /* command to WM */ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_MCUQ_ID(MT_MCUQ_WM), diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 6d29366c5139..9686a835d5cb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -484,21 +484,18 @@ static int mt7915_txbf_init(struct mt7915_dev *dev) return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE); } -static int mt7915_register_ext_phy(struct mt7915_dev *dev) +static struct mt7915_phy * +mt7915_alloc_ext_phy(struct mt7915_dev *dev) { - struct mt7915_phy *phy = mt7915_ext_phy(dev); + struct mt7915_phy *phy; struct mt76_phy *mphy; - int ret; if (!dev->dbdc_support) - return 0; - - if (phy) - return 0; + return NULL; mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops); if (!mphy) - return -ENOMEM; + return ERR_PTR(-ENOMEM); phy = mphy->priv; phy->dev = dev; @@ -507,6 +504,15 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev) /* Bind main phy to band0 and ext_phy to band1 for dbdc case */ phy->band_idx = 1; + return phy; +} + +static int +mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy) +{ + struct mt76_phy *mphy = phy->mt76; + int ret; + INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work); mt7915_eeprom_parse_hw_cap(dev, phy); @@ -526,29 +532,22 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev) /* init wiphy according to mphy and phy */ mt7915_init_wiphy(mphy->hw); - ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(phy->band_idx), - MT7915_TX_RING_SIZE, - MT_TXQ_RING_BASE(1)); - if (ret) - goto error; ret = mt76_register_phy(mphy, true, mt76_rates, ARRAY_SIZE(mt76_rates)); if (ret) - goto error; + return ret; ret = mt7915_thermal_init(phy); if (ret) - goto error; + goto unreg; - ret = mt7915_init_debugfs(phy); - if (ret) - goto error; + mt7915_init_debugfs(phy); return 0; -error: - ieee80211_free_hw(mphy->hw); +unreg: + mt76_unregister_phy(mphy); return ret; } @@ -645,7 +644,8 @@ static bool mt7915_band_config(struct mt7915_dev *dev) return ret; } -static int mt7915_init_hardware(struct mt7915_dev *dev) +static int +mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2) { int ret, idx; @@ -653,14 +653,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) INIT_WORK(&dev->init_work, mt7915_init_work); - dev->dbdc_support = mt7915_band_config(dev); - /* If MCU was already running, it is likely in a bad state */ if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) > FW_STATE_FW_DOWNLOAD) mt7915_wfsys_reset(dev); - ret = mt7915_dma_init(dev); + ret = mt7915_dma_init(dev, phy2); if (ret) return ret; @@ -1048,9 +1046,22 @@ static void mt7915_unregister_ext_phy(struct mt7915_dev *dev) ieee80211_free_hw(mphy->hw); } +static void mt7915_stop_hardware(struct mt7915_dev *dev) +{ + mt7915_mcu_exit(dev); + mt7915_tx_token_put(dev); + mt7915_dma_cleanup(dev); + tasklet_disable(&dev->irq_tasklet); + + if (is_mt7986(&dev->mt76)) + mt7986_wmac_disable(dev); +} + + int mt7915_register_device(struct mt7915_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); + struct mt7915_phy *phy2; int ret; dev->phy.dev = dev; @@ -1066,9 +1077,15 @@ int mt7915_register_device(struct mt7915_dev *dev) init_waitqueue_head(&dev->reset_wait); INIT_WORK(&dev->reset_work, mt7915_mac_reset_work); - ret = mt7915_init_hardware(dev); + dev->dbdc_support = mt7915_band_config(dev); + + phy2 = mt7915_alloc_ext_phy(dev); + if (IS_ERR(phy2)) + return PTR_ERR(phy2); + + ret = mt7915_init_hardware(dev, phy2); if (ret) - return ret; + goto free_phy2; mt7915_init_wiphy(hw); @@ -1085,19 +1102,34 @@ int mt7915_register_device(struct mt7915_dev *dev) ret = mt76_register_device(&dev->mt76, true, mt76_rates, ARRAY_SIZE(mt76_rates)); if (ret) - return ret; + goto stop_hw; ret = mt7915_thermal_init(&dev->phy); if (ret) - return ret; + goto unreg_dev; ieee80211_queue_work(mt76_hw(dev), &dev->init_work); - ret = mt7915_register_ext_phy(dev); - if (ret) - return ret; + if (phy2) { + ret = mt7915_register_ext_phy(dev, phy2); + if (ret) + goto unreg_thermal; + } - return mt7915_init_debugfs(&dev->phy); + mt7915_init_debugfs(&dev->phy); + + return 0; + +unreg_thermal: + mt7915_unregister_thermal(&dev->phy); +unreg_dev: + mt76_unregister_device(&dev->mt76); +stop_hw: + mt7915_stop_hardware(dev); +free_phy2: + if (phy2) + ieee80211_free_hw(phy2->mt76->hw); + return ret; } void mt7915_unregister_device(struct mt7915_dev *dev) @@ -1105,13 +1137,7 @@ void mt7915_unregister_device(struct mt7915_dev *dev) mt7915_unregister_ext_phy(dev); mt7915_unregister_thermal(&dev->phy); mt76_unregister_device(&dev->mt76); - mt7915_mcu_exit(dev); - mt7915_tx_token_put(dev); - mt7915_dma_cleanup(dev); - tasklet_disable(&dev->irq_tasklet); - - if (is_mt7986(&dev->mt76)) - mt7986_wmac_disable(dev); + mt7915_stop_hardware(dev); mt76_free_device(&dev->mt76); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 6efa0a2e2345..fd05e07bf9b5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -440,7 +440,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, struct ieee80211_channel *chan, u8 chain_idx); s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band); -int mt7915_dma_init(struct mt7915_dev *dev); +int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2); void mt7915_dma_prefetch(struct mt7915_dev *dev); void mt7915_dma_cleanup(struct mt7915_dev *dev); int mt7915_mcu_init(struct mt7915_dev *dev); @@ -572,7 +572,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct mt76_tx_info *tx_info); void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); void mt7915_tx_token_put(struct mt7915_dev *dev); -int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base); void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb); bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len); -- cgit v1.2.3 From 9912a4639d1a0b7f2a7df269bf07069590b98444 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 19 Mar 2022 21:40:14 +0100 Subject: mt76: reduce tx queue lock hold time - call txq dequeue without holding txq lock (locking handled by mac80211) - disable bh around tx queue schedule Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/tx.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 6b8c9dc80542..830963a65b34 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -463,7 +463,9 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, ieee80211_get_tx_rates(txq->vif, txq->sta, skb, info->control.rates, 1); + spin_lock(&q->lock); idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); + spin_unlock(&q->lock); if (idx < 0) return idx; @@ -483,14 +485,18 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, ieee80211_get_tx_rates(txq->vif, txq->sta, skb, info->control.rates, 1); + spin_lock(&q->lock); idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); + spin_unlock(&q->lock); if (idx < 0) break; n_frames++; } while (1); + spin_lock(&q->lock); dev->queue_ops->kick(dev, q); + spin_unlock(&q->lock); return n_frames; } @@ -525,8 +531,6 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags)) continue; - spin_lock_bh(&q->lock); - if (mtxq->send_bar && mtxq->aggr) { struct ieee80211_txq *txq = mtxq_to_txq(mtxq); struct ieee80211_sta *sta = txq->sta; @@ -535,16 +539,12 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) u8 tid = txq->tid; mtxq->send_bar = false; - spin_unlock_bh(&q->lock); ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); - spin_lock_bh(&q->lock); } if (!mt76_txq_stopped(q)) n_frames = mt76_txq_send_burst(phy, q, mtxq); - spin_unlock_bh(&q->lock); - ieee80211_return_txq(phy->hw, txq, false); if (unlikely(n_frames < 0)) @@ -563,6 +563,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) if (qid >= 4) return; + local_bh_disable(); rcu_read_lock(); do { @@ -572,6 +573,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) } while (len > 0); rcu_read_unlock(); + local_bh_enable(); } EXPORT_SYMBOL_GPL(mt76_txq_schedule); -- cgit v1.2.3 From 402e01092e79583923579662f244bc538f466f36 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 19 Mar 2022 21:56:20 +0100 Subject: mt76: dma: use kzalloc instead of devm_kzalloc for txwi dma unmap is already needed for cleanup anyway, so we don't need the extra tracking and can save a bit of memory here Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/dma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 02daeefb0761..09dc37bbf112 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -16,7 +16,7 @@ mt76_alloc_txwi(struct mt76_dev *dev) int size; size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); - txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); + txwi = kzalloc(size, GFP_ATOMIC); if (!txwi) return NULL; @@ -73,9 +73,11 @@ mt76_free_pending_txwi(struct mt76_dev *dev) struct mt76_txwi_cache *t; local_bh_disable(); - while ((t = __mt76_get_txwi(dev)) != NULL) + while ((t = __mt76_get_txwi(dev)) != NULL) { dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); + kfree(mt76_get_txwi_ptr(dev, t)); + } local_bh_enable(); } -- cgit v1.2.3 From 77045a3740fa3d2325293cf8623899532b39303e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 25 Mar 2022 21:14:26 +0100 Subject: mt76: mt7915: accept rx frames with non-standard VHT MCS10-11 The hardware receives them properly, they should not be dropped Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index bab70cf981bb..957895fa73c0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -521,7 +521,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev, status->encoding = RX_ENC_VHT; if (gi) status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - if (i > 9) + if (i > 11) return -EINVAL; break; case MT_PHY_TYPE_HE_MU: -- cgit v1.2.3 From 3128ea016965ce9f91ddf4e1dd944724462d1698 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 25 Mar 2022 21:15:15 +0100 Subject: mt76: mt7921: accept rx frames with non-standard VHT MCS10-11 The hardware receives them properly, they should not be dropped Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index b67615487910..42d2795c4714 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -696,7 +696,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) status->nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; status->encoding = RX_ENC_VHT; - if (i > 9) + if (i > 11) return -EINVAL; break; case MT_PHY_TYPE_HE_MU: -- cgit v1.2.3 From 51fb1278aa57ae0fc54adaa786e1965362bed4fb Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 25 Mar 2022 22:01:43 +0100 Subject: mt76: fix use-after-free by removing a non-RCU wcid pointer Fixes an issue caught by KASAN about use-after-free in mt76_txq_schedule by protecting mtxq->wcid with rcu_lock between mt76_txq_schedule and sta_info_[alloc, free]. [18853.876689] ================================================================== [18853.876751] BUG: KASAN: use-after-free in mt76_txq_schedule+0x204/0xaf8 [mt76] [18853.876773] Read of size 8 at addr ffffffaf989a2138 by task mt76-tx phy0/883 [18853.876786] [18853.876810] CPU: 5 PID: 883 Comm: mt76-tx phy0 Not tainted 5.10.100-fix-510-56778d365941-kasan #5 0b01fbbcf41a530f52043508fec2e31a4215 [18853.876840] Call trace: [18853.876861] dump_backtrace+0x0/0x3ec [18853.876878] show_stack+0x20/0x2c [18853.876899] dump_stack+0x11c/0x1ac [18853.876918] print_address_description+0x74/0x514 [18853.876934] kasan_report+0x134/0x174 [18853.876948] __asan_report_load8_noabort+0x44/0x50 [18853.876976] mt76_txq_schedule+0x204/0xaf8 [mt76 074e03e4640e97fe7405ee1fab547b81c4fa45d2] [18853.877002] mt76_txq_schedule_all+0x2c/0x48 [mt76 074e03e4640e97fe7405ee1fab547b81c4fa45d2] [18853.877030] mt7921_tx_worker+0xa0/0x1cc [mt7921_common f0875ebac9d7b4754e1010549e7db50fbd90a047] [18853.877054] __mt76_worker_fn+0x190/0x22c [mt76 074e03e4640e97fe7405ee1fab547b81c4fa45d2] [18853.877071] kthread+0x2f8/0x3b8 [18853.877087] ret_from_fork+0x10/0x30 [18853.877098] [18853.877112] Allocated by task 941: [18853.877131] kasan_save_stack+0x38/0x68 [18853.877147] __kasan_kmalloc+0xd4/0xfc [18853.877163] kasan_kmalloc+0x10/0x1c [18853.877177] __kmalloc+0x264/0x3c4 [18853.877294] sta_info_alloc+0x460/0xf88 [mac80211] [18853.877410] ieee80211_prep_connection+0x204/0x1ee0 [mac80211] [18853.877523] ieee80211_mgd_auth+0x6c4/0xa4c [mac80211] [18853.877635] ieee80211_auth+0x20/0x2c [mac80211] [18853.877733] rdev_auth+0x7c/0x438 [cfg80211] [18853.877826] cfg80211_mlme_auth+0x26c/0x390 [cfg80211] [18853.877919] nl80211_authenticate+0x6d4/0x904 [cfg80211] [18853.877938] genl_rcv_msg+0x748/0x93c [18853.877954] netlink_rcv_skb+0x160/0x2a8 [18853.877969] genl_rcv+0x3c/0x54 [18853.877985] netlink_unicast_kernel+0x104/0x1ec [18853.877999] netlink_unicast+0x178/0x268 [18853.878015] netlink_sendmsg+0x3cc/0x5f0 [18853.878030] sock_sendmsg+0xb4/0xd8 [18853.878043] ____sys_sendmsg+0x2f8/0x53c [18853.878058] ___sys_sendmsg+0xe8/0x150 [18853.878071] __sys_sendmsg+0xc4/0x1f4 [18853.878087] __arm64_compat_sys_sendmsg+0x88/0x9c [18853.878101] el0_svc_common+0x1b4/0x390 [18853.878115] do_el0_svc_compat+0x8c/0xdc [18853.878131] el0_svc_compat+0x10/0x1c [18853.878146] el0_sync_compat_handler+0xa8/0xcc [18853.878161] el0_sync_compat+0x188/0x1c0 [18853.878171] [18853.878183] Freed by task 10927: [18853.878200] kasan_save_stack+0x38/0x68 [18853.878215] kasan_set_track+0x28/0x3c [18853.878228] kasan_set_free_info+0x24/0x48 [18853.878244] __kasan_slab_free+0x11c/0x154 [18853.878259] kasan_slab_free+0x14/0x24 [18853.878273] slab_free_freelist_hook+0xac/0x1b0 [18853.878287] kfree+0x104/0x390 [18853.878402] sta_info_free+0x198/0x210 [mac80211] [18853.878515] __sta_info_destroy_part2+0x230/0x2d4 [mac80211] [18853.878628] __sta_info_flush+0x300/0x37c [mac80211] [18853.878740] ieee80211_set_disassoc+0x2cc/0xa7c [mac80211] [18853.878851] ieee80211_mgd_deauth+0x4a4/0x10a0 [mac80211] [18853.878962] ieee80211_deauth+0x20/0x2c [mac80211] [18853.879057] rdev_deauth+0x7c/0x438 [cfg80211] [18853.879150] cfg80211_mlme_deauth+0x274/0x414 [cfg80211] [18853.879243] cfg80211_mlme_down+0xe4/0x118 [cfg80211] [18853.879335] cfg80211_disconnect+0x218/0x2d8 [cfg80211] [18853.879427] __cfg80211_leave+0x17c/0x240 [cfg80211] [18853.879519] cfg80211_leave+0x3c/0x58 [cfg80211] [18853.879611] wiphy_suspend+0xdc/0x200 [cfg80211] [18853.879628] dpm_run_callback+0x58/0x408 [18853.879642] __device_suspend+0x4cc/0x864 [18853.879658] async_suspend+0x34/0xf4 [18853.879673] async_run_entry_fn+0xe0/0x37c [18853.879689] process_one_work+0x508/0xb98 [18853.879702] worker_thread+0x7f4/0xcd4 [18853.879717] kthread+0x2f8/0x3b8 [18853.879731] ret_from_fork+0x10/0x30 [18853.879741] [18853.879757] The buggy address belongs to the object at ffffffaf989a2000 [18853.879757] which belongs to the cache kmalloc-8k of size 8192 [18853.879774] The buggy address is located 312 bytes inside of [18853.879774] 8192-byte region [ffffffaf989a2000, ffffffaf989a4000) [18853.879787] The buggy address belongs to the page: [18853.879807] page:000000004bda2a59 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x1d89a0 [18853.879823] head:000000004bda2a59 order:3 compound_mapcount:0 compound_pincount:0 [18853.879839] flags: 0x8000000000010200(slab|head) [18853.879857] raw: 8000000000010200 ffffffffbc89e208 ffffffffb7fb5208 ffffffaec000cc80 [18853.879873] raw: 0000000000000000 0000000000010001 00000001ffffffff 0000000000000000 [18853.879885] page dumped because: kasan: bad access detected [18853.879896] [18853.879907] Memory state around the buggy address: [18853.879922] ffffffaf989a2000: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [18853.879935] ffffffaf989a2080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [18853.879948] >ffffffaf989a2100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [18853.879961] ^ [18853.879973] ffffffaf989a2180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [18853.879986] ffffffaf989a2200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [18853.879998] ================================================================== Cc: stable@vger.kernel.org Reported-by: Sean Wang Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mac80211.c | 2 +- drivers/net/wireless/mediatek/mt76/mt76.h | 2 +- drivers/net/wireless/mediatek/mt76/mt7603/main.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 2 +- drivers/net/wireless/mediatek/mt76/mt76x02_util.c | 4 +++- drivers/net/wireless/mediatek/mt76/mt7915/main.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7921/main.c | 2 +- drivers/net/wireless/mediatek/mt76/tx.c | 9 ++++----- 8 files changed, 13 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 5b53d008eb66..026ab1e16d45 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1303,7 +1303,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif, continue; mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; - mtxq->wcid = wcid; + mtxq->wcid = wcid->idx; } ewma_signal_init(&wcid->rssi); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 882fb5d2517f..522c523d5c41 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -275,7 +275,7 @@ struct mt76_wcid { }; struct mt76_txq { - struct mt76_wcid *wcid; + u16 wcid; u16 agg_ssn; bool send_bar; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 83c5eec5b163..1d098e9799dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -75,7 +75,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); mtxq = (struct mt76_txq *)vif->txq->drv_priv; - mtxq->wcid = &mvif->sta.wcid; + mtxq->wcid = idx; rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); out: diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index d79cbdbd5a05..6b8e3e7ae4a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -234,7 +234,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); if (vif->txq) { mtxq = (struct mt76_txq *)vif->txq->drv_priv; - mtxq->wcid = &mvif->sta.wcid; + mtxq->wcid = idx; } ret = mt7615_mcu_add_dev_info(phy, vif, true); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index dd30f537676d..be1d27de993a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -292,7 +292,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, mt76_packet_id_init(&mvif->group_wcid); mtxq = (struct mt76_txq *)vif->txq->drv_priv; - mtxq->wcid = &mvif->group_wcid; + rcu_assign_pointer(dev->mt76.wcid[MT_VIF_WCID(idx)], &mvif->group_wcid); + mtxq->wcid = MT_VIF_WCID(idx); } int @@ -345,6 +346,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw, struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; dev->mt76.vif_mask &= ~BIT(mvif->idx); + rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL); mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid); } EXPORT_SYMBOL_GPL(mt76x02_remove_interface); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index c3f44d801e7f..187cf4ccd36e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -246,7 +246,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); if (vif->txq) { mtxq = (struct mt76_txq *)vif->txq->drv_priv; - mtxq->wcid = &mvif->sta.wcid; + mtxq->wcid = idx; } if (vif->type != NL80211_IFTYPE_AP && diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index fdaf2451bc1d..2173c3e9723f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -330,7 +330,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); if (vif->txq) { mtxq = (struct mt76_txq *)vif->txq->drv_priv; - mtxq->wcid = &mvif->sta.wcid; + mtxq->wcid = idx; } out: diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 830963a65b34..c3be62f58b62 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -436,12 +436,11 @@ mt76_txq_stopped(struct mt76_queue *q) static int mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, - struct mt76_txq *mtxq) + struct mt76_txq *mtxq, struct mt76_wcid *wcid) { struct mt76_dev *dev = phy->dev; struct ieee80211_txq *txq = mtxq_to_txq(mtxq); enum mt76_txq_id qid = mt76_txq_get_qid(txq); - struct mt76_wcid *wcid = mtxq->wcid; struct ieee80211_tx_info *info; struct sk_buff *skb; int n_frames = 1; @@ -527,8 +526,8 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) break; mtxq = (struct mt76_txq *)txq->drv_priv; - wcid = mtxq->wcid; - if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags)) + wcid = rcu_dereference(dev->wcid[mtxq->wcid]); + if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags)) continue; if (mtxq->send_bar && mtxq->aggr) { @@ -543,7 +542,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) } if (!mt76_txq_stopped(q)) - n_frames = mt76_txq_send_burst(phy, q, mtxq); + n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid); ieee80211_return_txq(phy->hw, txq, false); -- cgit v1.2.3 From b619e01380eedf24e8d26a367e94e0ccaeb0c3dd Mon Sep 17 00:00:00 2001 From: Evelyn Tsai Date: Thu, 17 Mar 2022 16:21:50 +0800 Subject: mt76: fix MBSS index condition in DBDC mode MT7915_MAX_INTERFACES is per-band declaration in MT7915/MT7986/MT7916. Enlarge vif_mask to 64 bits wide, including the bit operation. Reviewed-by: Shayne Chen Signed-off-by: Evelyn Tsai Signed-off-by: Bo Jiao Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76.h | 2 +- drivers/net/wireless/mediatek/mt76/mt7603/main.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt76x02_util.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt7915/main.c | 8 ++++---- drivers/net/wireless/mediatek/mt76/mt7921/main.c | 6 +++--- 6 files changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 522c523d5c41..62131010a0bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -727,7 +727,7 @@ struct mt76_dev { u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; - u32 vif_mask; + u64 vif_mask; struct mt76_wcid global_wcid; struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 1d098e9799dd..91425b454cae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -44,7 +44,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mutex_lock(&dev->mt76.mutex); - mvif->idx = ffs(~dev->mt76.vif_mask) - 1; + mvif->idx = __ffs64(~dev->mt76.vif_mask); if (mvif->idx >= MT7603_MAX_INTERFACES) { ret = -ENOSPC; goto out; @@ -65,7 +65,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } idx = MT7603_WTBL_RESERVED - 1 - mvif->idx; - dev->mt76.vif_mask |= BIT(mvif->idx); + dev->mt76.vif_mask |= BIT_ULL(mvif->idx); INIT_LIST_HEAD(&mvif->sta.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.hw_key_idx = -1; @@ -106,7 +106,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) spin_unlock_bh(&dev->sta_poll_lock); mutex_lock(&dev->mt76.mutex); - dev->mt76.vif_mask &= ~BIT(mvif->idx); + dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx); mutex_unlock(&dev->mt76.mutex); mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 6b8e3e7ae4a2..a9c9b97d173e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -194,7 +194,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, is_zero_ether_addr(vif->addr)) phy->monitor_vif = vif; - mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1; + mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); if (mvif->mt76.idx >= MT7615_MAX_INTERFACES) { ret = -ENOSPC; goto out; @@ -212,7 +212,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, if (ext_phy) mvif->mt76.wmm_idx += 2; - dev->mt76.vif_mask |= BIT(mvif->mt76.idx); + dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); dev->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); @@ -268,7 +268,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, rcu_assign_pointer(dev->mt76.wcid[idx], NULL); - dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx); + dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx); dev->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index be1d27de993a..5bd0a0bae688 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -328,11 +328,11 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) idx += 8; /* vif is already set or idx is 8 for AP/Mesh/... */ - if (dev->mt76.vif_mask & BIT(idx) || + if (dev->mt76.vif_mask & BIT_ULL(idx) || (vif->type != NL80211_IFTYPE_STATION && idx > 7)) return -EBUSY; - dev->mt76.vif_mask |= BIT(idx); + dev->mt76.vif_mask |= BIT_ULL(idx); mt76x02_vif_init(dev, vif, idx); return 0; @@ -345,7 +345,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw, struct mt76x02_dev *dev = hw->priv; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; - dev->mt76.vif_mask &= ~BIT(mvif->idx); + dev->mt76.vif_mask &= ~BIT_ULL(mvif->idx); rcu_assign_pointer(dev->mt76.wcid[mvif->group_wcid.idx], NULL); mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 187cf4ccd36e..865694c755cf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -204,8 +204,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, is_zero_ether_addr(vif->addr)) phy->monitor_vif = vif; - mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1; - if (mvif->mt76.idx >= MT7915_MAX_INTERFACES) { + mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); + if (mvif->mt76.idx >= (MT7915_MAX_INTERFACES << dev->dbdc_support)) { ret = -ENOSPC; goto out; } @@ -227,7 +227,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, if (ret) goto out; - dev->mt76.vif_mask |= BIT(mvif->mt76.idx); + dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); idx = MT7915_WTBL_RESERVED - mvif->mt76.idx; @@ -290,7 +290,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, rcu_assign_pointer(dev->mt76.wcid[idx], NULL); mutex_lock(&dev->mt76.mutex); - dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx); + dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx); phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mutex_unlock(&dev->mt76.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 2173c3e9723f..775524f36911 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -294,7 +294,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, mt7921_mutex_acquire(dev); - mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1; + mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); if (mvif->mt76.idx >= MT7921_MAX_INTERFACES) { ret = -ENOSPC; goto out; @@ -310,7 +310,7 @@ static int mt7921_add_interface(struct ieee80211_hw *hw, if (ret) goto out; - dev->mt76.vif_mask |= BIT(mvif->mt76.idx); + dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); idx = MT7921_WTBL_RESERVED - mvif->mt76.idx; @@ -354,7 +354,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw, rcu_assign_pointer(dev->mt76.wcid[idx], NULL); - dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx); + dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx); phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mt7921_mutex_release(dev); -- cgit v1.2.3 From df3e4143ba8ac159a0a55e84b616167219b7940b Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 17 Mar 2022 17:55:59 +0100 Subject: mt76: mt7921u: add suspend/resume support Introduce suspend/resume callbacks for mt7921u driver. Tested-by: Deren Wu Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h | 2 +- drivers/net/wireless/mediatek/mt76/mt7921/regs.h | 5 ++ drivers/net/wireless/mediatek/mt76/mt7921/usb.c | 62 +++++++++++++++++++++- .../net/wireless/mediatek/mt76/mt7921/usb_mac.c | 7 ++- 4 files changed, 72 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 7690364bc079..459226c5bb11 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -467,7 +467,7 @@ bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); int mt7921u_mcu_power_on(struct mt7921_dev *dev); int mt7921u_wfsys_reset(struct mt7921_dev *dev); -int mt7921u_dma_init(struct mt7921_dev *dev); +int mt7921u_dma_init(struct mt7921_dev *dev, bool resume); int mt7921u_init_reset(struct mt7921_dev *dev); int mt7921u_mac_reset(struct mt7921_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h index 6712ff60c722..ea643260ceb6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h @@ -516,4 +516,9 @@ #define MT_TOP_MISC2_FW_PWR_ON BIT(0) #define MT_TOP_MISC2_FW_N9_RDY GENMASK(1, 0) +#define MT_WF_SW_DEF_CR(ofs) (0x401a00 + (ofs)) +#define MT_WF_SW_DEF_CR_USB_MCU_EVENT MT_WF_SW_DEF_CR(0x028) +#define MT_WF_SW_SER_TRIGGER_SUSPEND BIT(6) +#define MT_WF_SW_SER_DONE_SUSPEND BIT(7) + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c index b7771e9f1fcd..dc38baef273a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c @@ -246,7 +246,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf, if (ret) goto error; - ret = mt7921u_dma_init(dev); + ret = mt7921u_dma_init(dev, false); if (ret) return ret; @@ -288,6 +288,61 @@ static void mt7921u_disconnect(struct usb_interface *usb_intf) mt76_free_device(&dev->mt76); } +#ifdef CONFIG_PM +static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state) +{ + struct mt7921_dev *dev = usb_get_intfdata(intf); + int err; + + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); + if (err) + return err; + + mt76u_stop_rx(&dev->mt76); + mt76u_stop_tx(&dev->mt76); + + set_bit(MT76_STATE_SUSPEND, &dev->mphy.state); + + return 0; +} + +static int mt7921u_resume(struct usb_interface *intf) +{ + struct mt7921_dev *dev = usb_get_intfdata(intf); + bool reinit = true; + int err, i; + + for (i = 0; i < 10; i++) { + u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT); + + if (!(val & MT_WF_SW_SER_TRIGGER_SUSPEND)) { + reinit = false; + break; + } + if (val & MT_WF_SW_SER_DONE_SUSPEND) { + mt76_wr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT, 0); + break; + } + + msleep(20); + } + + if (reinit || mt7921_dma_need_reinit(dev)) { + err = mt7921u_dma_init(dev, true); + if (err) + return err; + } + + clear_bit(MT76_STATE_SUSPEND, &dev->mphy.state); + + err = mt76u_resume_rx(&dev->mt76); + if (err < 0) + return err; + + return mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); +} +#endif /* CONFIG_PM */ + MODULE_DEVICE_TABLE(usb, mt7921u_device_table); MODULE_FIRMWARE(MT7921_FIRMWARE_WM); MODULE_FIRMWARE(MT7921_ROM_PATCH); @@ -297,6 +352,11 @@ static struct usb_driver mt7921u_driver = { .id_table = mt7921u_device_table, .probe = mt7921u_probe, .disconnect = mt7921u_disconnect, +#ifdef CONFIG_PM + .suspend = mt7921u_suspend, + .resume = mt7921u_resume, + .reset_resume = mt7921u_resume, +#endif /* CONFIG_PM */ .soft_unbind = 1, .disable_hub_initiated_lpm = 1, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c index 99bcbd858b65..cd2f09743d2f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c @@ -121,7 +121,7 @@ static void mt7921u_epctl_rst_opt(struct mt7921_dev *dev, bool reset) mt7921u_uhw_wr(&dev->mt76, MT_SSUSB_EPCTL_CSR_EP_RST_OPT, val); } -int mt7921u_dma_init(struct mt7921_dev *dev) +int mt7921u_dma_init(struct mt7921_dev *dev, bool resume) { int err; @@ -136,6 +136,9 @@ int mt7921u_dma_init(struct mt7921_dev *dev) MT_WL_RX_AGG_TO | MT_WL_RX_AGG_LMT); mt76_clear(dev, MT_UDMA_WLCFG_1, MT_WL_RX_AGG_PKT_LMT); + if (resume) + return 0; + err = mt7921u_dma_rx_evt_ep4(dev); if (err) return err; @@ -221,7 +224,7 @@ int mt7921u_mac_reset(struct mt7921_dev *dev) if (err) goto out; - err = mt7921u_dma_init(dev); + err = mt7921u_dma_init(dev, false); if (err) goto out; -- cgit v1.2.3 From 5e0abf6f4903d71222f4c7498ca0e271833b3694 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 17 Mar 2022 18:02:22 +0100 Subject: mt76: mt7921: rely on mt76_dev rxfilter in mt7921_configure_filter mt7921 is currently using rxfilter defined in mt76_dev for rx filter configuration. Fix mt7921_configure_filter implementation. Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/main.c | 29 +++++++++++----------- drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h | 1 - 2 files changed, 14 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 775524f36911..1783866c3daf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -566,7 +566,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, u64 multicast) { struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_phy *phy = mt7921_hw_phy(hw); u32 ctl_flags = MT_WF_RFCR1_DROP_ACK | MT_WF_RFCR1_DROP_BF_POLL | MT_WF_RFCR1_DROP_BA | @@ -576,23 +575,23 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, #define MT76_FILTER(_flag, _hw) do { \ flags |= *total_flags & FIF_##_flag; \ - phy->rxfilter &= ~(_hw); \ - phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + dev->mt76.rxfilter &= ~(_hw); \ + dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \ } while (0) mt7921_mutex_acquire(dev); - phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | - MT_WF_RFCR_DROP_OTHER_BEACON | - MT_WF_RFCR_DROP_FRAME_REPORT | - MT_WF_RFCR_DROP_PROBEREQ | - MT_WF_RFCR_DROP_MCAST_FILTERED | - MT_WF_RFCR_DROP_MCAST | - MT_WF_RFCR_DROP_BCAST | - MT_WF_RFCR_DROP_DUPLICATE | - MT_WF_RFCR_DROP_A2_BSSID | - MT_WF_RFCR_DROP_UNWANTED_CTL | - MT_WF_RFCR_DROP_STBC_MULTI); + dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | + MT_WF_RFCR_DROP_OTHER_BEACON | + MT_WF_RFCR_DROP_FRAME_REPORT | + MT_WF_RFCR_DROP_PROBEREQ | + MT_WF_RFCR_DROP_MCAST_FILTERED | + MT_WF_RFCR_DROP_MCAST | + MT_WF_RFCR_DROP_BCAST | + MT_WF_RFCR_DROP_DUPLICATE | + MT_WF_RFCR_DROP_A2_BSSID | + MT_WF_RFCR_DROP_UNWANTED_CTL | + MT_WF_RFCR_DROP_STBC_MULTI); MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM | MT_WF_RFCR_DROP_A3_MAC | @@ -606,7 +605,7 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR_DROP_NDPA); *total_flags = flags; - mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter); + mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter); if (*total_flags & FIF_CONTROL) mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 459226c5bb11..eae223a31000 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -155,7 +155,6 @@ struct mt7921_phy { struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES]; - u32 rxfilter; u64 omac_mask; u16 noise; -- cgit v1.2.3 From 47eea8ad62a1203ce20b365f7feba23fef62a487 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 17 Mar 2022 18:08:35 +0100 Subject: mt76: mt7921: honor pm user configuration in mt7921_sniffer_interface_iter Honor runtime-pm user configuration in mt7921_sniffer_interface_iter routine if we do not have a monitor interface. Fixes: 1f12fa34e5dc5 ("mt76: mt7921: don't enable beacon filter when IEEE80211_CONF_CHANGE_MONITOR is set") Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 1783866c3daf..ae86705faec2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -489,8 +489,8 @@ mt7921_sniffer_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); mt7921_mcu_set_sniffer(dev, vif, monitor); - pm->enable = !monitor; - pm->ds_enable = !monitor; + pm->enable = pm->enable_user && !monitor; + pm->ds_enable = pm->ds_enable_user && !monitor; mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable); -- cgit v1.2.3 From 5beadb27fa808172c26a6a6d3e8500cf6b547c48 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Sat, 19 Mar 2022 12:53:50 +0800 Subject: mt76: mt7915: always call mt7915_wfsys_reset() during init Soft reboot might not clear certain condition, so always call mt7915_wfsys_reset() during init. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 17 +++-------------- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 1 + drivers/net/wireless/mediatek/mt76/mt7915/pci.c | 1 + drivers/net/wireless/mediatek/mt76/mt7915/soc.c | 7 ++++--- 4 files changed, 9 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 9686a835d5cb..aa93aeab5634 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -564,7 +564,7 @@ static void mt7915_init_work(struct work_struct *work) mt7915_txbf_init(dev); } -static void mt7915_wfsys_reset(struct mt7915_dev *dev) +void mt7915_wfsys_reset(struct mt7915_dev *dev) { #define MT_MCU_DUMMY_RANDOM GENMASK(15, 0) #define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16) @@ -653,11 +653,6 @@ mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2) INIT_WORK(&dev->init_work, mt7915_init_work); - /* If MCU was already running, it is likely in a bad state */ - if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) > - FW_STATE_FW_DOWNLOAD) - mt7915_wfsys_reset(dev); - ret = mt7915_dma_init(dev, phy2); if (ret) return ret; @@ -665,14 +660,8 @@ mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2) set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); ret = mt7915_mcu_init(dev); - if (ret) { - /* Reset and try again */ - mt7915_wfsys_reset(dev); - - ret = mt7915_mcu_init(dev); - if (ret) - return ret; - } + if (ret) + return ret; ret = mt7915_eeprom_init(dev); if (ret < 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index fd05e07bf9b5..ca129e5e380e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -429,6 +429,7 @@ static inline void mt7986_wmac_disable(struct mt7915_dev *dev) #endif struct mt7915_dev *mt7915_mmio_probe(struct device *pdev, void __iomem *mem_base, u32 device_id); +void mt7915_wfsys_reset(struct mt7915_dev *dev); irqreturn_t mt7915_irq_handler(int irq, void *dev_instance); u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif); int mt7915_register_device(struct mt7915_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index 6f819c41a4c4..1bab1cbb0d89 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -126,6 +126,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev, return PTR_ERR(dev); mdev = &dev->mt76; + mt7915_wfsys_reset(dev); hif2 = mt7915_pci_init_hif2(pdev); ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index 3028c02cb840..52fc5d5bf20f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -1128,7 +1128,7 @@ static int mt7986_wmac_init(struct mt7915_dev *dev) if (IS_ERR(dev->rstc)) return PTR_ERR(dev->rstc); - return mt7986_wmac_enable(dev); + return 0; } static int mt7986_wmac_probe(struct platform_device *pdev) @@ -1161,12 +1161,13 @@ static int mt7986_wmac_probe(struct platform_device *pdev) if (ret) goto free_device; - mt76_wr(dev, MT_INT_MASK_CSR, 0); - ret = mt7986_wmac_init(dev); if (ret) goto free_irq; + mt7915_wfsys_reset(dev); + mt76_wr(dev, MT_INT_MASK_CSR, 0); + ret = mt7915_register_device(dev); if (ret) goto free_irq; -- cgit v1.2.3 From aa796f12091aa4758366f5171fd9cba2ff574ba3 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sat, 19 Mar 2022 14:28:01 +0100 Subject: mt76: mt7915: fix unbounded shift in mt7915_mcu_beacon_mbss Fix the following smatch static checker warning: drivers/net/wireless/mediatek/mt76/mt7915/mcu.c:1872 mt7915_mcu_beacon_mbss() error: undefined (user controlled) shift '(((1))) << (data[2])' Rely on mac80211 definitions for ieee80211_bssid_index subelement. Fixes: 6b7f9aff7c67 ("mt76: mt7915: introduce 802.11ax multi-bss support") Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index df31084e860f..6bd8e7591ad3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1854,7 +1854,8 @@ mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb, continue; for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) { - const u8 *data; + const struct ieee80211_bssid_index *idx; + const u8 *idx_ie; if (sub_elem->id || sub_elem->datalen < 4) continue; /* not a valid BSS profile */ @@ -1862,14 +1863,19 @@ mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb, /* Find WLAN_EID_MULTI_BSSID_IDX * in the merged nontransmitted profile */ - data = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, - sub_elem->data, - sub_elem->datalen); - if (!data || data[1] < 1 || !data[2]) + idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, + sub_elem->data, + sub_elem->datalen); + if (!idx_ie || idx_ie[1] < sizeof(*idx)) continue; - mbss->offset[data[2]] = cpu_to_le16(data - skb->data); - mbss->bitmap |= cpu_to_le32(BIT(data[2])); + idx = (void *)(idx_ie + 2); + if (!idx->bssid_index || idx->bssid_index > 31) + continue; + + mbss->offset[idx->bssid_index] = + cpu_to_le16(idx_ie - skb->data); + mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index)); } } } -- cgit v1.2.3 From 4e90db5e21eb3bb272fe47386dc3506755e209e9 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 3 Apr 2022 17:40:33 +0200 Subject: mt76: mt7921: Fix the error handling path of mt7921_pci_probe() In case of error, some resources must be freed, as already done above and below the devm_kmemdup() and __mt7921e_mcu_drv_pmctrl() calls added in the commit in Fixes:. Fixes: 602cc0c9618a ("mt76: mt7921e: fix possible probe failure after reboot") Signed-off-by: Christophe JAILLET Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/pci.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 1a01d025bbe5..062e2b422478 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -302,8 +302,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev, dev->bus_ops = dev->mt76.bus; bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), GFP_KERNEL); - if (!bus_ops) - return -ENOMEM; + if (!bus_ops) { + ret = -ENOMEM; + goto err_free_dev; + } bus_ops->rr = mt7921_rr; bus_ops->wr = mt7921_wr; @@ -312,7 +314,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev, ret = __mt7921e_mcu_drv_pmctrl(dev); if (ret) - return ret; + goto err_free_dev; mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) | (mt7921_l1_rr(dev, MT_HW_REV) & 0xff); -- cgit v1.2.3 From 9bd6823f5a64b6465708b244eecc9b7dd4b01bfc Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 4 Apr 2022 10:08:10 +0200 Subject: mt76: mt7915: fix possible uninitialized pointer dereference in mt7986_wmac_gpio_setup Add default case for type switch in mt7986_wmac_gpio_setup routine in order to avoid a possible uninitialized pointer dereference. Fixes: 99ad32a4ca3a2 ("mt76: mt7915: add support for MT7986") Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/soc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index 52fc5d5bf20f..04e62d569599 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -210,6 +210,8 @@ static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev) if (IS_ERR_OR_NULL(state)) return -EINVAL; break; + default: + return -EINVAL; } ret = pinctrl_select_state(pinctrl, state); -- cgit v1.2.3 From 62fdc974894eec80d678523458cf99bbdb887e22 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 4 Apr 2022 10:23:15 +0200 Subject: mt76: mt7915: fix possible NULL pointer dereference in mt7915_mac_fill_rx_vector Fix possible NULL pointer dereference in mt7915_mac_fill_rx_vector routine if the chip does not support dbdc and the hw reports band_idx set to 1. Fixes: 78fc30a21cf11 ("mt76: mt7915: move testmode data from dev to phy") Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 957895fa73c0..4f538cae29cf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -864,8 +864,11 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) int i; band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX); - if (band_idx && !phy->band_idx) + if (band_idx && !phy->band_idx) { phy = mt7915_ext_phy(dev); + if (!phy) + goto out; + } rcpi = le32_to_cpu(rxv[6]); ib_rssi = le32_to_cpu(rxv[7]); @@ -890,8 +893,8 @@ mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) phy->test.last_freq_offset = foe; phy->test.last_snr = snr; +out: #endif - dev_kfree_skb(skb); } -- cgit v1.2.3 From badb6ffaa1439fce30fc6ef10571dcf45a622b44 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 4 Apr 2022 10:35:39 +0200 Subject: mt76: mt7915: do not pass data pointer to mt7915_mcu_muru_debug_set Fix typo in mt7915_muru_debug_set routine and pass muru_debug value to mt7915_mcu_muru_debug_set() instead of data pointer. Fixes: 1966a5078f2d ("mt76: mt7915: add mu-mimo and ofdma debugfs knobs") Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index e9cab1165f38..e0b7f1efb113 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -95,7 +95,7 @@ mt7915_muru_debug_set(void *data, u64 val) struct mt7915_dev *dev = data; dev->muru_debug = val; - mt7915_mcu_muru_debug_set(dev, data); + mt7915_mcu_muru_debug_set(dev, dev->muru_debug); return 0; } -- cgit v1.2.3 From 05268cf1789d99eda491c4a32f23a4c5b9bddeba Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 4 Apr 2022 11:17:03 +0200 Subject: mt76: mt7915: report rx mode value in mt7915_mac_fill_rx_rate Report rx mode in mt7915_mac_fill_rx_rate routine in order to properly add he radiotap if mode is at least HE_SU. Fixes: 1c9db0aa23fd1 ("mt76: mt7915: update rx rate reporting for mt7916") Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 4f538cae29cf..fbc66a8b9338 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -309,7 +309,7 @@ mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv) } static void -mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u32 mode) +mt7915_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; static const struct ieee80211_radiotap_he known = { @@ -474,10 +474,10 @@ static int mt7915_mac_fill_rx_rate(struct mt7915_dev *dev, struct mt76_rx_status *status, struct ieee80211_supported_band *sband, - __le32 *rxv) + __le32 *rxv, u8 *mode) { u32 v0, v2; - u8 stbc, gi, bw, dcm, mode, nss; + u8 stbc, gi, bw, dcm, nss; int i, idx; bool cck = false; @@ -490,18 +490,18 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev, if (!is_mt7915(&dev->mt76)) { stbc = FIELD_GET(MT_PRXV_HT_STBC, v0); gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v0); - mode = FIELD_GET(MT_PRXV_TX_MODE, v0); + *mode = FIELD_GET(MT_PRXV_TX_MODE, v0); dcm = FIELD_GET(MT_PRXV_DCM, v0); bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0); } else { stbc = FIELD_GET(MT_CRXV_HT_STBC, v2); gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2); - mode = FIELD_GET(MT_CRXV_TX_MODE, v2); + *mode = FIELD_GET(MT_CRXV_TX_MODE, v2); dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM); bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2); } - switch (mode) { + switch (*mode) { case MT_PHY_TYPE_CCK: cck = true; fallthrough; @@ -546,7 +546,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev, case IEEE80211_STA_RX_BW_20: break; case IEEE80211_STA_RX_BW_40: - if (mode & MT_PHY_TYPE_HE_EXT_SU && + if (*mode & MT_PHY_TYPE_HE_EXT_SU && (idx & MT_PRXV_TX_ER_SU_106T)) { status->bw = RATE_INFO_BW_HE_RU; status->he_ru = @@ -566,7 +566,7 @@ mt7915_mac_fill_rx_rate(struct mt7915_dev *dev, } status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; - if (mode < MT_PHY_TYPE_HE_SU && gi) + if (*mode < MT_PHY_TYPE_HE_SU && gi) status->enc_flags |= RX_ENC_FLAG_SHORT_GI; return 0; @@ -581,7 +581,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) struct ieee80211_supported_band *sband; __le32 *rxd = (__le32 *)skb->data; __le32 *rxv = NULL; - u32 mode = 0; u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); @@ -590,10 +589,10 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; bool unicast, insert_ccmp_hdr = false; u8 remove_pad, amsdu_info; + u8 mode = 0, qos_ctl = 0; bool hdr_trans; u16 hdr_gap; u16 seq_ctrl = 0; - u8 qos_ctl = 0; __le16 fc = 0; int idx; @@ -766,7 +765,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) } if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) { - ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv); + ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv, + &mode); if (ret < 0) return ret; } -- cgit v1.2.3 From 1e779f49ca0c413c80457b2db9c201c8db6188bb Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 4 Apr 2022 19:28:02 +0200 Subject: mt76: mt7915: use 0xff to initialize bitrate_mask in mt7915_init_bitrate_mask Use 0xff (GENMASK(7,0)) in mt7915_init_bitrate_mask routine in order to initialize bitrate_mask structure in order to avoid truncating value in memset(). Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 865694c755cf..9b8defb15b4e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -174,14 +174,14 @@ static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif) for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) { mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI; - mvif->bitrate_mask.control[i].he_gi = GENMASK(7, 0); - mvif->bitrate_mask.control[i].he_ltf = GENMASK(7, 0); + mvif->bitrate_mask.control[i].he_gi = 0xff; + mvif->bitrate_mask.control[i].he_ltf = 0xff; mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0); - memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0), + memset(mvif->bitrate_mask.control[i].ht_mcs, 0xff, sizeof(mvif->bitrate_mask.control[i].ht_mcs)); - memset(mvif->bitrate_mask.control[i].vht_mcs, GENMASK(15, 0), + memset(mvif->bitrate_mask.control[i].vht_mcs, 0xff, sizeof(mvif->bitrate_mask.control[i].vht_mcs)); - memset(mvif->bitrate_mask.control[i].he_mcs, GENMASK(15, 0), + memset(mvif->bitrate_mask.control[i].he_mcs, 0xff, sizeof(mvif->bitrate_mask.control[i].he_mcs)); } } -- cgit v1.2.3 From 116c69603b01f2d6e4499ca5d535f5b71c52052c Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Thu, 7 Apr 2022 02:29:14 +0800 Subject: mt76: mt7921: Add AP mode support add AP mode support to mt7921 that can work for mt7921[e,s,u] with the common code. Tested-by: Deren Wu Tested-by: Lorenzo Bianconi Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/init.c | 11 ++- drivers/net/wireless/mediatek/mt76/mt7921/mac.c | 9 +++ drivers/net/wireless/mediatek/mt76/mt7921/main.c | 45 ++++++++++++ drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 79 +++++++++++++++++++++- drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h | 4 ++ 5 files changed, 146 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index 91fc41922d95..f9e1255bd9c7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -11,6 +11,10 @@ static const struct ieee80211_iface_limit if_limits[] = { { .max = MT7921_MAX_INTERFACES, .types = BIT(NL80211_IFTYPE_STATION) + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) } }; @@ -64,7 +68,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) wiphy->iface_combinations = if_comb; wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION); - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP); wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN; wiphy->max_scan_ssids = 4; @@ -80,6 +85,10 @@ mt7921_init_wiphy(struct ieee80211_hw *hw) wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, HAS_RATE_CONTROL); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 42d2795c4714..0c4ee3ffa0bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -1361,12 +1361,21 @@ mt7921_vif_connect_iter(void *priv, u8 *mac, { struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; struct mt7921_dev *dev = mvif->phy->dev; + struct ieee80211_hw *hw = mt76_hw(dev); if (vif->type == NL80211_IFTYPE_STATION) ieee80211_disconnect(vif, true); mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); mt7921_mcu_set_tx(dev, vif); + + if (vif->type == NL80211_IFTYPE_AP) { + mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid, + true); + mt7921_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_NONE); + mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true); + } } /* system error recovery */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index ae86705faec2..186d883c5eab 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -53,6 +53,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, switch (i) { case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: break; default: continue; @@ -86,6 +87,23 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; switch (i) { + case NL80211_IFTYPE_AP: + he_cap_elem->mac_cap_info[2] |= + IEEE80211_HE_MAC_CAP2_BSR; + he_cap_elem->mac_cap_info[4] |= + IEEE80211_HE_MAC_CAP4_BQR; + he_cap_elem->mac_cap_info[5] |= + IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX; + he_cap_elem->phy_cap_info[3] |= + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; + he_cap_elem->phy_cap_info[6] |= + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; + he_cap_elem->phy_cap_info[9] |= + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; + break; case NL80211_IFTYPE_STATION: he_cap_elem->mac_cap_info[1] |= IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US; @@ -634,6 +652,20 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, } } + if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) { + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + + mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, + true); + mt7921_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_NONE); + } + + if (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)) + mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, + info->enable_beacon); + /* ensure that enable txcmd_mode after bss_info */ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) mt7921_mcu_set_tx(dev, vif); @@ -1394,6 +1426,18 @@ out: return err; } +static void +mt7921_channel_switch_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + + mt7921_mutex_acquire(dev); + mt7921_mcu_uni_add_beacon_offload(dev, hw, vif, true); + mt7921_mutex_release(dev); +} + const struct ieee80211_ops mt7921_ops = { .tx = mt7921_tx, .start = mt7921_start, @@ -1412,6 +1456,7 @@ const struct ieee80211_ops mt7921_ops = { .set_rts_threshold = mt7921_set_rts_threshold, .wake_tx_queue = mt76_wake_tx_queue, .release_buffered_frames = mt76_release_buffered_frames, + .channel_switch_beacon = mt7921_channel_switch_beacon, .get_txpower = mt76_get_txpower, .get_stats = mt7921_get_stats, .get_et_sset_count = mt7921_get_et_sset_count, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index da2be050ed7c..1ecbbe4fa498 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -248,7 +248,8 @@ mt7921_mcu_connection_loss_iter(void *priv, u8 *mac, if (mvif->idx != event->bss_idx) return; - if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) + if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) || + vif->type != NL80211_IFTYPE_STATION) return; ieee80211_connection_loss(vif); @@ -1166,3 +1167,79 @@ int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif, return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), true); } + +int +mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct ieee80211_mutable_offsets offs; + struct { + struct req_hdr { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcn_content_tlv { + __le16 tag; + __le16 len; + __le16 tim_ie_pos; + __le16 csa_ie_pos; + __le16 bcc_ie_pos; + /* 0: disable beacon offload + * 1: enable beacon offload + * 2: update probe respond offload + */ + u8 enable; + /* 0: legacy format (TXD + payload) + * 1: only cap field IE + */ + u8 type; + __le16 pkt_len; + u8 pkt[512]; + } __packed beacon_tlv; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .beacon_tlv = { + .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), + .len = cpu_to_le16(sizeof(struct bcn_content_tlv)), + .enable = enable, + }, + }; + struct sk_buff *skb; + + if (!enable) + goto out; + + skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs); + if (!skb) + return -EINVAL; + + if (skb->len > 512 - MT_TXD_SIZE) { + dev_err(dev->mt76.dev, "beacon size limit exceed\n"); + dev_kfree_skb(skb); + return -EINVAL; + } + + mt7921_mac_write_txwi(dev, (__le32 *)(req.beacon_tlv.pkt), skb, + wcid, NULL, 0, true); + memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len); + req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); + req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset); + + if (offs.cntdwn_counter_offs[0]) { + u16 csa_offs; + + csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4; + req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs); + } + dev_kfree_skb(skb); + +out: + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), + &req, sizeof(req), true); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index eae223a31000..adbdb2e22934 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -469,4 +469,8 @@ int mt7921u_wfsys_reset(struct mt7921_dev *dev); int mt7921u_dma_init(struct mt7921_dev *dev, bool resume); int mt7921u_init_reset(struct mt7921_dev *dev); int mt7921u_mac_reset(struct mt7921_dev *dev); +int mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev, + struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + bool enable); #endif -- cgit v1.2.3 From f5874fc6f54e58da8afb01092286f5e18a54c55d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 20 Apr 2022 12:27:18 +0200 Subject: mt76: fix rx reordering with non explicit / psmp ack policy When the QoS ack policy was set to non explicit / psmp ack, frames are treated as not being part of a BA session, which causes extra latency on reordering. Fix this by only bypassing reordering for packets with no-ack policy Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 72622220051b..9b8e1dfbc814 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -169,8 +169,7 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) /* not part of a BA session */ ackp = status->qos_ctl & IEEE80211_QOS_CTL_ACK_POLICY_MASK; - if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && - ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) + if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK) return; tid = rcu_dereference(wcid->aggr[tidno]); -- cgit v1.2.3 From abba345311a740d9dca1b5eb293b3b1c296715dd Mon Sep 17 00:00:00 2001 From: Deren Wu Date: Sat, 9 Apr 2022 21:44:07 +0800 Subject: mt76: fix antenna config missing in 6G cap To make sure we have the proper antenna config in 6g cap, move IEEE80211_VHT_CAP_[T/R]X_ANTENNA_PATTERN to stream init. Fixes: edf9dab8ba27 ("mt76: add 6GHz support") Signed-off-by: Deren Wu Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mac80211.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 026ab1e16d45..2dd3ebd1863f 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -248,6 +248,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy, vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC; + vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | + IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; for (i = 0; i < 8; i++) { if (i < nstream) @@ -323,8 +325,6 @@ mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_SHORT_GI_80 | - IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | - IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); return 0; -- cgit v1.2.3 From 74752f5367ab42786c06bce01b707d23215deabe Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Wed, 13 Apr 2022 06:27:24 +0800 Subject: mt76: mt7915: remove SCS feature SCS is obsoleted and no longer used, so remove it. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h | 1 - drivers/net/wireless/mediatek/mt76/mt7915/main.c | 8 -------- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 16 ---------------- drivers/net/wireless/mediatek/mt76/mt7915/mcu.h | 10 ---------- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 1 - 5 files changed, 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index c3c93338d56a..561fb0368708 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -974,7 +974,6 @@ enum { MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d, MCU_EXT_CMD_MWDS_SUPPORT = 0x80, MCU_EXT_CMD_SET_SER_TRIGGER = 0x81, - MCU_EXT_CMD_SCS_CTRL = 0x82, MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94, MCU_EXT_CMD_FW_DBG_CTRL = 0x95, MCU_EXT_CMD_OFFCH_SCAN_CTRL = 0x9a, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 9b8defb15b4e..5177b19f9154 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -42,10 +42,6 @@ static int mt7915_start(struct ieee80211_hw *hw) if (ret) goto out; - ret = mt7915_mcu_set_scs(dev, 0, true); - if (ret) - goto out; - mt7915_mac_enable_nf(dev, 0); } @@ -58,10 +54,6 @@ static int mt7915_start(struct ieee80211_hw *hw) if (ret) goto out; - ret = mt7915_mcu_set_scs(dev, 1, true); - if (ret) - goto out; - mt7915_mac_enable_nf(dev, 1); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 6bd8e7591ad3..98a3b22ba549 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -2589,22 +2589,6 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, &req_mac, sizeof(req_mac), true); } -int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable) -{ - struct { - __le32 cmd; - u8 band; - u8 enable; - } __packed req = { - .cmd = cpu_to_le32(SCS_ENABLE), - .band = band, - .enable = enable + 1, - }; - - return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SCS_CTRL), &req, - sizeof(req), false); -} - int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param) { struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index 960072a44222..064d33e33738 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -304,16 +304,6 @@ enum mcu_mmps_mode { MCU_MMPS_DISABLE, }; -enum { - SCS_SEND_DATA, - SCS_SET_MANUAL_PD_TH, - SCS_CONFIG, - SCS_ENABLE, - SCS_SHOW_INFO, - SCS_GET_GLO_ADDR, - SCS_GET_GLO_ADDR_EVENT, -}; - struct bss_info_bmc_rate { __le16 tag; __le16 len; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index ca129e5e380e..419ff08176b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -486,7 +486,6 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable, bool hdr_trans); int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode, u8 en); -int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable); int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band); int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable); int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy); -- cgit v1.2.3 From b57a5bb0170a54f6f3b2dbb9624b1093d9abb878 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 14 Apr 2022 10:50:07 +0100 Subject: mt76: mt7915: make read-only array ppet16_ppet8_ru3_ru0 static const Don't populate the read-only array ppet16_ppet8_ru3_ru0 on the stack but instead make it static const. Also makes the object code a little smaller. Reviewed-by: AngeloGioacchino Del Regno Signed-off-by: Colin Ian King Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index aa93aeab5634..1811a69338f4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -801,7 +801,7 @@ static void mt7915_gen_ppe_thresh(u8 *he_ppet, int nss) { u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */ - u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71}; + static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71}; he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) | FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, -- cgit v1.2.3 From deb0891bad7c05b66867bc68a26849eb83281f70 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 14 Apr 2022 10:54:38 +0100 Subject: mt76: mt7921: make read-only array ppet16_ppet8_ru3_ru0 static const Don't populate the read-only array ppet16_ppet8_ru3_ru0 on the stack but instead make it static const. Also makes the object code a little smaller. Reviewed-by: AngeloGioacchino Del Regno Signed-off-by: Colin Ian King Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 186d883c5eab..e3db08e3a79f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -12,7 +12,7 @@ static void mt7921_gen_ppe_thresh(u8 *he_ppet, int nss) { u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */ - u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71}; + static const u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71}; he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) | FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, -- cgit v1.2.3 From ad483ed9dd5193a54293269c852a29051813b7bd Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Fri, 15 Apr 2022 06:56:05 +0800 Subject: mt76: mt7921: fix kernel crash at mt7921_pci_remove The crash log shown it is possible that mt7921_irq_handler is called while devm_free_irq is being handled so mt76_free_device need to be postponed until devm_free_irq is completed to solve the crash we free the mt76 device too early. [ 9299.339655] BUG: kernel NULL pointer dereference, address: 0000000000000008 [ 9299.339705] #PF: supervisor read access in kernel mode [ 9299.339735] #PF: error_code(0x0000) - not-present page [ 9299.339768] PGD 0 P4D 0 [ 9299.339786] Oops: 0000 [#1] SMP PTI [ 9299.339812] CPU: 1 PID: 1624 Comm: prepare-suspend Not tainted 5.15.14-1.fc32.qubes.x86_64 #1 [ 9299.339863] Hardware name: Xen HVM domU, BIOS 4.14.3 01/20/2022 [ 9299.339901] RIP: 0010:mt7921_irq_handler+0x1e/0x70 [mt7921e] [ 9299.340048] RSP: 0018:ffffa81b80c27cb0 EFLAGS: 00010082 [ 9299.340081] RAX: 0000000000000000 RBX: ffff98a4cb752020 RCX: ffffffffa96211c5 [ 9299.340123] RDX: 0000000000000000 RSI: 00000000000d4204 RDI: ffff98a4cb752020 [ 9299.340165] RBP: ffff98a4c28a62a4 R08: ffff98a4c37a96c0 R09: 0000000080150011 [ 9299.340207] R10: 0000000040000000 R11: 0000000000000000 R12: ffff98a4c4eaa080 [ 9299.340249] R13: ffff98a4c28a6360 R14: ffff98a4cb752020 R15: ffff98a4c28a6228 [ 9299.340297] FS: 00007260840d3740(0000) GS:ffff98a4ef700000(0000) knlGS:0000000000000000 [ 9299.340345] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 9299.340383] CR2: 0000000000000008 CR3: 0000000004c56001 CR4: 0000000000770ee0 [ 9299.340432] PKRU: 55555554 [ 9299.340449] Call Trace: [ 9299.340467] [ 9299.340485] __free_irq+0x221/0x350 [ 9299.340527] free_irq+0x30/0x70 [ 9299.340553] devm_free_irq+0x55/0x80 [ 9299.340579] mt7921_pci_remove+0x2f/0x40 [mt7921e] [ 9299.340616] pci_device_remove+0x3b/0xa0 [ 9299.340651] __device_release_driver+0x17a/0x240 [ 9299.340686] device_driver_detach+0x3c/0xa0 [ 9299.340714] unbind_store+0x113/0x130 [ 9299.340740] kernfs_fop_write_iter+0x124/0x1b0 [ 9299.340775] new_sync_write+0x15c/0x1f0 [ 9299.340806] vfs_write+0x1d2/0x270 [ 9299.340831] ksys_write+0x67/0xe0 [ 9299.340857] do_syscall_64+0x3b/0x90 [ 9299.340887] entry_SYSCALL_64_after_hwframe+0x44/0xae Fixes: 5c14a5f944b9 ("mt76: mt7921: introduce mt7921e support") Reported-by: ThinerLogoer Signed-off-by: Deren Wu Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index 062e2b422478..b5fb22b8e086 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -119,7 +119,6 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev) mt7921_mcu_exit(dev); tasklet_disable(&dev->irq_tasklet); - mt76_free_device(&dev->mt76); } static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr) @@ -356,6 +355,7 @@ static void mt7921_pci_remove(struct pci_dev *pdev) mt7921e_unregister_device(dev); devm_free_irq(&pdev->dev, pdev->irq, dev); + mt76_free_device(&dev->mt76); pci_free_irq_vectors(pdev); } -- cgit v1.2.3 From 0a17329ae9c1fa0fde43b45e7d04a10708020b1b Mon Sep 17 00:00:00 2001 From: Shayne Chen Date: Mon, 18 Apr 2022 16:03:30 +0800 Subject: mt76: mt7915: add debugfs knob for RF registers read/write Add RF registers read/write support for debugging RF issues, which should be processed by mcu commands. The index of rf registers use the generic regidx, and are combined with two parts: WF selection [31:28] and offset [27:0]. Reviewed-by: Ryder Lee Signed-off-by: Peter Chiu Signed-off-by: Shayne Chen Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7915/debugfs.c | 32 ++++++++++++++++++++++ drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 29 ++++++++++++++++++++ drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 1 + 3 files changed, 62 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index e0b7f1efb113..1628840e8931 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -867,6 +867,36 @@ mt7915_twt_stats(struct seq_file *s, void *data) return 0; } +/* The index of RF registers use the generic regidx, combined with two parts: + * WF selection [31:28] and offset [27:0]. + */ +static int +mt7915_rf_regval_get(void *data, u64 *val) +{ + struct mt7915_dev *dev = data; + u32 regval; + int ret; + + ret = mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, ®val, false); + if (ret) + return ret; + + *val = le32_to_cpu(regval); + + return 0; +} + +static int +mt7915_rf_regval_set(void *data, u64 val) +{ + struct mt7915_dev *dev = data; + + return mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, (u32 *)&val, true); +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7915_rf_regval_get, + mt7915_rf_regval_set, "0x%08llx\n"); + int mt7915_init_debugfs(struct mt7915_phy *phy) { struct mt7915_dev *dev = phy->dev; @@ -898,6 +928,8 @@ int mt7915_init_debugfs(struct mt7915_phy *phy) debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir, mt7915_twt_stats); debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger); + debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval); + if (!dev->dbdc_support || phy->band_idx) { debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 98a3b22ba549..4faa7b8b2778 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -3661,3 +3661,32 @@ int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev, return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE), &req, sizeof(req), true); } + +int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set) +{ + struct { + __le32 idx; + __le32 ofs; + __le32 data; + } __packed req = { + .idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 28))), + .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(27, 0))), + .data = set ? cpu_to_le32(*val) : 0, + }; + struct sk_buff *skb; + int ret; + + if (set) + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS), + &req, sizeof(req), false); + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + *val = le32_to_cpu(*(__le32 *)(skb->data + 8)); + dev_kfree_skb(skb); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 419ff08176b4..6c590eff14f1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -506,6 +506,7 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct rate_info *rate); int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy, struct cfg80211_chan_def *chandef); +int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set); int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3); int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl); int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level); -- cgit v1.2.3 From b61cc2a76b9d66f95e721167d74ab173b3902d97 Mon Sep 17 00:00:00 2001 From: Yunbo Yu Date: Mon, 18 Apr 2022 16:18:44 +0800 Subject: mt76: mt7603: move spin_lock_bh() to spin_lock() It is unnecessary to call spin_lock_bh() within a tasklet. Signed-off-by: Yunbo Yu Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7603/beacon.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index 5d4522f440b7..b5e8308e0cc7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -82,12 +82,12 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) __skb_queue_head_init(&data.q); q = dev->mphy.q_tx[MT_TXQ_BEACON]; - spin_lock_bh(&q->lock); + spin_lock(&q->lock); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, mt7603_update_beacon_iter, dev); mt76_queue_kick(dev, q); - spin_unlock_bh(&q->lock); + spin_unlock(&q->lock); /* Flush all previous CAB queue packets */ mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); @@ -117,7 +117,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) mt76_skb_set_moredata(data.tail[i], false); } - spin_lock_bh(&q->lock); + spin_lock(&q->lock); while ((skb = __skb_dequeue(&data.q)) != NULL) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; @@ -126,7 +126,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL); } mt76_queue_kick(dev, q); - spin_unlock_bh(&q->lock); + spin_unlock(&q->lock); for (i = 0; i < ARRAY_SIZE(data.count); i++) mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i), -- cgit v1.2.3 From 46f6adbfce18d868cd29f4c03a09c71f65c07507 Mon Sep 17 00:00:00 2001 From: Bo Jiao Date: Wed, 20 Apr 2022 15:11:44 +0800 Subject: mt76: mt7915: disable RX_HDR_TRANS_SHORT This patch disables RX_TRANS_SHORT to make MDP to do header translation when payload less than 8 bytes, hence the (QoS) null data can be encapsulated to 802.3 format. However, WDS requires (QoS) null data in 802.11 format to created vlan AP interfaces. Signed-off-by: Ryder Lee Signed-off-by: lian.chen Signed-off-by: Sujuan Chen Signed-off-by: Bo Jiao Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 3 +++ drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 4 ---- drivers/net/wireless/mediatek/mt76/mt7915/regs.h | 3 +++ 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 1811a69338f4..70baad756dd0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -450,6 +450,9 @@ static void mt7915_mac_init(struct mt7915_dev *dev) mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len); + if (!is_mt7915(&dev->mt76)) + mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT); + /* enable hardware de-agg */ mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index fbc66a8b9338..9c1421884435 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -837,10 +837,6 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; - /* drop no data frame */ - if (fc & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)) - return -EINVAL; - status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc); status->qos_ctl = qos_ctl; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index e5f93c40591c..bac76bc2770f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -158,6 +158,9 @@ enum offs_rev { #define MT_MDP_DCR1 MT_MDP(0x004) #define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3) +#define MT_MDP_DCR2 MT_MDP(0x0e8) +#define MT_MDP_DCR2_RX_TRANS_SHORT BIT(2) + #define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \ ((_band) << 8)) #define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4) -- cgit v1.2.3 From 3968a66475b40691c37b5e6c76975f699671e10e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 20 Apr 2022 13:20:23 +0200 Subject: mt76: do not attempt to reorder received 802.3 packets without agg session Fixes potential latency / packet drop issues in cases where a BA session has not (yet) been established. Fixes: e195dad14115 ("mt76: add support for 802.3 rx frames") Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 9b8e1dfbc814..10cbd9e560e7 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -162,8 +162,9 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) if (!sta) return; - if (!status->aggr && !(status->flag & RX_FLAG_8023)) { - mt76_rx_aggr_check_ctl(skb, frames); + if (!status->aggr) { + if (!(status->flag & RX_FLAG_8023)) + mt76_rx_aggr_check_ctl(skb, frames); return; } -- cgit v1.2.3 From bc98e7fdd80d215b4b55eea001023231eb8ce12e Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 20 Apr 2022 14:29:00 +0200 Subject: mt76: fix encap offload ethernet type check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The driver needs to check if the format is 802.2 vs 802.3 in order to set a tx descriptor flag. skb->protocol can't be used, since it may not be properly initialized for packets coming in from a packet socket. Fix misdetection by checking the ethertype from the skb data instead Reported-by: Thibaut VARÈNE Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 4 +++- drivers/net/wireless/mediatek/mt76/mt7921/mac.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 9c1421884435..94a1871fbf43 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -1016,6 +1016,7 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi, u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; u8 fc_type, fc_stype; + u16 ethertype; bool wmm = false; u32 val; @@ -1029,7 +1030,8 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi, val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | FIELD_PREP(MT_TXD1_TID, tid); - if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN) + ethertype = get_unaligned_be16(&skb->data[12]); + if (ethertype >= ETH_P_802_3_MIN) val |= MT_TXD1_ETH_802_3; txwi[1] |= cpu_to_le32(val); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 0c4ee3ffa0bb..e67c3d17d36c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -814,6 +814,7 @@ mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi, { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; u8 fc_type, fc_stype; + u16 ethertype; bool wmm = false; u32 val; @@ -827,7 +828,8 @@ mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi, val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | FIELD_PREP(MT_TXD1_TID, tid); - if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN) + ethertype = get_unaligned_be16(&skb->data[12]); + if (ethertype >= ETH_P_802_3_MIN) val |= MT_TXD1_ETH_802_3; txwi[1] |= cpu_to_le32(val); -- cgit v1.2.3 From fcfe1b5e162bf473c1d47760962cec8523c00466 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 23 Apr 2022 07:01:18 +0200 Subject: mt76: fix tx status related use-after-free race on station removal There is a small race window where ongoing tx activity can lead to a skb getting added to the status tracking idr after that idr has already been cleaned up, which will keep the wcid linked in the status poll list. Fix this by only adding status skbs if the wcid pointer is still assigned in dev->wcid, which gets cleared early by mt76_sta_pre_rcu_remove Fixes: bd1e3e7b693c ("mt76: introduce packet_id idr") Tested-by: Ben Greear Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mac80211.c | 2 ++ drivers/net/wireless/mediatek/mt76/tx.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 2dd3ebd1863f..8a2fedbb1451 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1381,7 +1381,9 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; mutex_lock(&dev->mutex); + spin_lock_bh(&dev->status_lock); rcu_assign_pointer(dev->wcid[wcid->idx], NULL); + spin_unlock_bh(&dev->status_lock); mutex_unlock(&dev->mutex); } EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index c3be62f58b62..d5a8456c108b 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -120,7 +120,7 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, memset(cb, 0, sizeof(*cb)); - if (!wcid) + if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx])) return MT_PACKET_ID_NO_ACK; if (info->flags & IEEE80211_TX_CTL_NO_ACK) -- cgit v1.2.3 From cd85efdfd0994df56ace1dd92dea6cc84b2184aa Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 29 Apr 2022 13:55:56 +0200 Subject: mt76: mt7915: configure soc clocks in mt7986_wmac_init Configure mt7986 wmac soc clocks in mt7986_wmac_init routine. Tested-by: Peter Chiu Co-developed-by: Peter Chiu Signed-off-by: Peter Chiu Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/soc.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index 04e62d569599..a24bcdcd2f06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "mt7915.h" @@ -1117,6 +1118,19 @@ static int mt7986_wmac_init(struct mt7915_dev *dev) { struct device *pdev = dev->mt76.dev; struct platform_device *pfdev = to_platform_device(pdev); + struct clk *mcu_clk, *ap_conn_clk; + + mcu_clk = devm_clk_get(pdev, "mcu"); + if (IS_ERR(mcu_clk)) + dev_err(pdev, "mcu clock not found\n"); + else if (clk_prepare_enable(mcu_clk)) + dev_err(pdev, "mcu clock configuration failed\n"); + + ap_conn_clk = devm_clk_get(pdev, "ap2conn"); + if (IS_ERR(ap_conn_clk)) + dev_err(pdev, "ap2conn clock not found\n"); + else if (clk_prepare_enable(ap_conn_clk)) + dev_err(pdev, "ap2conn clock configuration failed\n"); dev->dcm = devm_platform_ioremap_resource(pfdev, 1); if (IS_ERR(dev->dcm)) -- cgit v1.2.3 From ed2d3d948e6d8aff9e74af6a759ee0a2aff28935 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Tue, 3 May 2022 07:14:38 +0800 Subject: mt76: connac: use skb_put_data instead of open coding use skb_put_data instead of open coding in mt76_connac_mcu_update_arp_filter. Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 51a9b5d60c7a..faa279bbbcb2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -2185,11 +2185,8 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, return -ENOMEM; skb_put_data(skb, &req_hdr, sizeof(req_hdr)); - for (i = 0; i < len; i++) { - u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); - - memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); - } + for (i = 0; i < len; i++) + skb_put_data(skb, &info->arp_addr_list[i], sizeof(__be32)); return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD(OFFLOAD), true); } -- cgit v1.2.3 From b5509983d72ebb610366d692cb40202ec06adcae Mon Sep 17 00:00:00 2001 From: Peter Chiu Date: Thu, 5 May 2022 16:25:51 +0800 Subject: mt76: mt7915: update mt7986 patch in mt7986_wmac_adie_patch_7976() Update mt7976 adie patch for different adie version. Reviewed-by: Ryder Lee Reviewed-by: Shayne Chen Signed-off-by: Peter Chiu Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/regs.h | 1 + drivers/net/wireless/mediatek/mt76/mt7915/soc.c | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index bac76bc2770f..338dfbee0f93 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -797,6 +797,7 @@ enum offs_rev { /* ADIE */ #define MT_ADIE_CHIP_ID 0x02c +#define MT_ADIE_VERSION_MASK GENMASK(15, 0) #define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16) #define MT_ADIE_IDX0 GENMASK(15, 0) #define MT_ADIE_IDX1 GENMASK(31, 16) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index a24bcdcd2f06..c1900646c54d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -471,17 +471,32 @@ static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie) static int mt7986_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie) { + u32 id, version, rg_xo_01, rg_xo_03; int ret; + ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_CHIP_ID, &id); + if (ret) + return ret; + + version = FIELD_GET(MT_ADIE_VERSION_MASK, id); + ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_TOP_THADC, 0x4a563b00); if (ret) return ret; - ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, 0x1d59080f); + if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) { + rg_xo_01 = 0x1d59080f; + rg_xo_03 = 0x34c00fe0; + } else { + rg_xo_01 = 0x1959f80f; + rg_xo_03 = 0x34d00fe0; + } + + ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, rg_xo_01); if (ret) return ret; - return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, 0x34c00fe0); + return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, rg_xo_03); } static int -- cgit v1.2.3 From 3620c8821ae15902eb995a32918e34b7a0c773a3 Mon Sep 17 00:00:00 2001 From: Peter Chiu Date: Thu, 5 May 2022 16:25:52 +0800 Subject: mt76: mt7915: fix twt table_mask to u16 in mt7915_dev mt7915 can support 16 twt stations so modify table_mask to u16. Fixes: 3782b69d03e7 ("mt76: mt7915: introduce mt7915_mac_add_twt_setup routine") Signed-off-by: Peter Chiu Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 6c590eff14f1..0f51d7657caf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -319,7 +319,7 @@ struct mt7915_dev { void *cal; struct { - u8 table_mask; + u16 table_mask; u8 n_agrt; } twt; -- cgit v1.2.3 From c088eb38e642f43010fc53ab8e89f6b7c045ef96 Mon Sep 17 00:00:00 2001 From: Peter Chiu Date: Thu, 5 May 2022 16:25:53 +0800 Subject: mt76: mt7915: reject duplicated twt flows Reject twt flows with the same parameters to prevent some potential issues causing by duplicated establishment. Reviewed-by: Ryder Lee Reviewed-by: Shayne Chen Signed-off-by: Peter Chiu Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 31 +++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 94a1871fbf43..bc6e3c880869 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -2601,6 +2601,34 @@ static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt) return 0; } +static bool +mt7915_mac_twt_param_equal(struct mt7915_sta *msta, + struct ieee80211_twt_params *twt_agrt) +{ + u16 type = le16_to_cpu(twt_agrt->req_type); + u8 exp; + int i; + + exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); + for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) { + struct mt7915_twt_flow *f; + + if (!(msta->twt.flowid_mask & BIT(i))) + continue; + + f = &msta->twt.flow[i]; + if (f->duration == twt_agrt->min_twt_dur && + f->mantissa == twt_agrt->mantissa && + f->exp == exp && + f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && + f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && + f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) + return true; + } + + return false; +} + void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_twt_setup *twt) @@ -2634,6 +2662,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); + if (mt7915_mac_twt_param_equal(msta, twt_agrt)) + goto unlock; + flow = &msta->twt.flow[flowid]; memset(flow, 0, sizeof(*flow)); INIT_LIST_HEAD(&flow->list); -- cgit v1.2.3 From 4ebcff04d3db076c054938ea2b6bc8057671a0d7 Mon Sep 17 00:00:00 2001 From: Peter Chiu Date: Thu, 5 May 2022 16:25:54 +0800 Subject: mt76: mt7915: limit minimum twt duration The minimum twt duration supported by mt7915 is 64 according to hardware design. Reply station with TWT_SETUP_CMD_DICTATE if min_twt_dur smaller than 64. Signed-off-by: Peter Chiu Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 6 ++++++ drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 1 + 2 files changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index bc6e3c880869..7b04fd87e37b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -2654,6 +2654,12 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw, if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) goto unlock; + if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) { + setup_cmd = TWT_SETUP_CMD_DICTATE; + twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR; + goto unlock; + } + flowid = ffs(~msta->twt.flowid_mask) - 1; le16p_replace_bits(&twt_agrt->req_type, flowid, IEEE80211_TWT_REQTYPE_FLOWID); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 0f51d7657caf..0219f941521b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -66,6 +66,7 @@ #define MT7915_MAX_TWT_AGRT 16 #define MT7915_MAX_STA_TWT_AGRT 8 +#define MT7915_MIN_TWT_DUR 64 #define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2) struct mt7915_vif; -- cgit v1.2.3 From bdd2ca78faacc10cf1963c2616e7ff16f571f4e4 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Thu, 5 May 2022 15:08:34 +0800 Subject: mt76: mt7915: rework SER debugfs knob 1. get status of system recovery from firmware. 2. add more recovery points. 3. make knob per phy. Signed-off-by: Bo Jiao Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7915/debugfs.c | 106 ++++++++++++++++++--- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 5 +- drivers/net/wireless/mediatek/mt76/mt7915/mcu.h | 14 +++ drivers/net/wireless/mediatek/mt76/mt7915/mmio.c | 3 + drivers/net/wireless/mediatek/mt76/mt7915/regs.h | 18 +++- 5 files changed, 126 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 1628840e8931..665a30e415c2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -44,35 +44,113 @@ mt7915_implicit_txbf_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7915_implicit_txbf_get, mt7915_implicit_txbf_set, "%lld\n"); -/* test knob of system layer 1/2 error recovery */ -static int mt7915_ser_trigger_set(void *data, u64 val) +/* test knob of system error recovery */ +static ssize_t +mt7915_fw_ser_set(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) { - enum { - SER_SET_RECOVER_L1 = 1, - SER_SET_RECOVER_L2, - SER_ENABLE = 2, - SER_RECOVER - }; - struct mt7915_dev *dev = data; + struct mt7915_phy *phy = file->private_data; + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + char buf[16]; int ret = 0; + u16 val; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (count && buf[count - 1] == '\n') + buf[count - 1] = '\0'; + else + buf[count] = '\0'; + + if (kstrtou16(buf, 0, &val)) + return -EINVAL; switch (val) { + case SER_QUERY: + /* grab firmware SER stats */ + ret = mt7915_mcu_set_ser(dev, 0, 0, ext_phy); + break; case SER_SET_RECOVER_L1: case SER_SET_RECOVER_L2: - ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0); + case SER_SET_RECOVER_L3_RX_ABORT: + case SER_SET_RECOVER_L3_TX_ABORT: + case SER_SET_RECOVER_L3_TX_DISABLE: + case SER_SET_RECOVER_L3_BF: + ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), ext_phy); if (ret) return ret; - return mt7915_mcu_set_ser(dev, SER_RECOVER, val, 0); + ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, ext_phy); + break; default: break; } + return ret ? ret : count; +} + +static ssize_t +mt7915_fw_ser_get(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct mt7915_phy *phy = file->private_data; + struct mt7915_dev *dev = phy->dev; + char *buff; + int desc = 0; + ssize_t ret; + static const size_t bufsz = 400; + + buff = kmalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_STATUS = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_SER_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_PLE_ERR = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_PLE_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_PLE_ERR_1 = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_PLE1_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_PLE_ERR_AMSDU = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_PLE_AMSDU_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_PSE_ERR = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_PSE_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_PSE_ERR_1 = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_PSE1_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_LMAC_WISR6_B0 = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN0_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_LMAC_WISR6_B1 = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN1_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_LMAC_WISR7_B0 = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN0_STATS)); + desc += scnprintf(buff + desc, bufsz - desc, + "::E R , SER_LMAC_WISR7_B1 = 0x%08x\n", + mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN1_STATS)); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); return ret; } -DEFINE_DEBUGFS_ATTRIBUTE(fops_ser_trigger, NULL, - mt7915_ser_trigger_set, "%lld\n"); +static const struct file_operations mt7915_fw_ser_ops = { + .write = mt7915_fw_ser_set, + .read = mt7915_fw_ser_get, + .open = simple_open, + .llseek = default_llseek, +}; static int mt7915_radar_trigger(void *data, u64 val) @@ -914,6 +992,7 @@ int mt7915_init_debugfs(struct mt7915_phy *phy) debugfs_create_file("xmit-queues", 0400, dir, phy, &mt7915_xmit_queues_fops); debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops); + debugfs_create_file("fw_ser", 0600, dir, phy, &mt7915_fw_ser_ops); debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm); debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa); debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin); @@ -927,7 +1006,6 @@ int mt7915_init_debugfs(struct mt7915_phy *phy) &mt7915_rate_txpower_fops); debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir, mt7915_twt_stats); - debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger); debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval); if (!dev->dbdc_support || phy->band_idx) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 4faa7b8b2778..7594c2b44fe1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -2471,10 +2471,7 @@ int mt7915_mcu_init(struct mt7915_dev *dev) /* force firmware operation mode into normal state, * which should be set before firmware download stage. */ - if (is_mt7915(&dev->mt76)) - mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); - else - mt76_wr(dev, MT_SWDEF_MODE_MT7916, MT_SWDEF_NORMAL_MODE); + mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); ret = mt7915_driver_own(dev, 0); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index 064d33e33738..0c40e4469937 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -463,6 +463,20 @@ enum { MURU_GET_TXC_TX_STATS = 151, }; +enum { + SER_QUERY, + /* recovery */ + SER_SET_RECOVER_L1, + SER_SET_RECOVER_L2, + SER_SET_RECOVER_L3_RX_ABORT, + SER_SET_RECOVER_L3_TX_ABORT, + SER_SET_RECOVER_L3_TX_DISABLE, + SER_SET_RECOVER_L3_BF, + /* action */ + SER_ENABLE = 2, + SER_RECOVER +}; + #define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct bss_info_omac) + \ sizeof(struct bss_info_basic) +\ diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index 5062e0d8cae4..229d40826c9b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -22,6 +22,7 @@ static const u32 mt7915_reg[] = { [WFDMA_EXT_CSR_ADDR] = 0xd7000, [CBTOP1_PHY_END] = 0x77ffffff, [INFRA_MCU_ADDR_END] = 0x7c3fffff, + [SWDEF_BASE_ADDR] = 0x41f200, }; static const u32 mt7916_reg[] = { @@ -36,6 +37,7 @@ static const u32 mt7916_reg[] = { [WFDMA_EXT_CSR_ADDR] = 0xd7000, [CBTOP1_PHY_END] = 0x7fffffff, [INFRA_MCU_ADDR_END] = 0x7c085fff, + [SWDEF_BASE_ADDR] = 0x411400, }; static const u32 mt7986_reg[] = { @@ -50,6 +52,7 @@ static const u32 mt7986_reg[] = { [WFDMA_EXT_CSR_ADDR] = 0x27000, [CBTOP1_PHY_END] = 0x7fffffff, [INFRA_MCU_ADDR_END] = 0x7c085fff, + [SWDEF_BASE_ADDR] = 0x411400, }; static const u32 mt7915_offs[] = { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index 338dfbee0f93..e3eb62a4929b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -30,6 +30,7 @@ enum reg_rev { WFDMA_EXT_CSR_ADDR, CBTOP1_PHY_END, INFRA_MCU_ADDR_END, + SWDEF_BASE_ADDR, __MT_REG_MAX, }; @@ -917,12 +918,25 @@ enum offs_rev { #define MT_ADIE_TYPE_MASK BIT(1) /* FW MODE SYNC */ -#define MT_SWDEF_MODE 0x41f23c -#define MT_SWDEF_MODE_MT7916 0x41143c +#define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR) + +#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs)) +#define MT_SWDEF_MODE MT_SWDEF(0x3c) #define MT_SWDEF_NORMAL_MODE 0 #define MT_SWDEF_ICAP_MODE 1 #define MT_SWDEF_SPECTRUM_MODE 2 +#define MT_SWDEF_SER_STATS MT_SWDEF(0x040) +#define MT_SWDEF_PLE_STATS MT_SWDEF(0x044) +#define MT_SWDEF_PLE1_STATS MT_SWDEF(0x048) +#define MT_SWDEF_PLE_AMSDU_STATS MT_SWDEF(0x04C) +#define MT_SWDEF_PSE_STATS MT_SWDEF(0x050) +#define MT_SWDEF_PSE1_STATS MT_SWDEF(0x054) +#define MT_SWDEF_LAMC_WISR6_BN0_STATS MT_SWDEF(0x058) +#define MT_SWDEF_LAMC_WISR6_BN1_STATS MT_SWDEF(0x05C) +#define MT_SWDEF_LAMC_WISR7_BN0_STATS MT_SWDEF(0x060) +#define MT_SWDEF_LAMC_WISR7_BN1_STATS MT_SWDEF(0x064) + #define MT_DIC_CMD_REG_BASE 0x41f000 #define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs)) #define MT_DIC_CMD_REG_CMD MT_DIC_CMD_REG(0x10) -- cgit v1.2.3 From b4c268ca4df8a86e80dbbd64589983bfb005467d Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Thu, 5 May 2022 15:08:35 +0800 Subject: mt76: mt7915: introduce mt7915_mac_severe_check() In rare cases, TRB pointers might be out of sync leads to RMAC stopping Rx that requires minimal recovery, so add this helper to periodically check TRB status. Tested-by: Chad Monroe Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 27 ++++++++++++++++++++++ drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 2 ++ drivers/net/wireless/mediatek/mt76/mt7915/regs.h | 8 +++++++ 3 files changed, 37 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 7b04fd87e37b..cc796693ced9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -2305,6 +2305,32 @@ void mt7915_mac_update_stats(struct mt7915_phy *phy) } } +static void mt7915_mac_severe_check(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + u32 trb; + + if (!phy->omac_mask) + return; + + /* In rare cases, TRB pointers might be out of sync leads to RMAC + * stopping Rx, so check status periodically to see if TRB hardware + * requires minimal recovery. + */ + trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx)); + + if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) != + FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) && + (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) != + FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) && + trb == phy->trb_ts) + mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT, + ext_phy); + + phy->trb_ts = trb; +} + void mt7915_mac_sta_rc_work(struct work_struct *work) { struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); @@ -2357,6 +2383,7 @@ void mt7915_mac_work(struct work_struct *work) mphy->mac_work_count = 0; mt7915_mac_update_stats(phy); + mt7915_mac_severe_check(phy); } mutex_unlock(&mphy->dev->mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 0219f941521b..fbb5dde588a8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -248,6 +248,8 @@ struct mt7915_phy { u8 rdd_state; + u32 trb_ts; + u32 rx_ampdu_ts; u32 ampdu_ref; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index e3eb62a4929b..66c60e60e921 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -176,6 +176,14 @@ enum offs_rev { #define MT_MDP_TO_HIF 0 #define MT_MDP_TO_WM 1 +/* TRB: band 0(0x820e1000), band 1(0x820f1000) */ +#define MT_WF_TRB_BASE(_band) ((_band) ? 0x820f1000 : 0x820e1000) +#define MT_WF_TRB(_band, ofs) (MT_WF_TRB_BASE(_band) + (ofs)) + +#define MT_TRB_RXPSR0(_band) MT_WF_TRB(_band, 0x03c) +#define MT_TRB_RXPSR0_RX_WTBL_PTR GENMASK(25, 16) +#define MT_TRB_RXPSR0_RX_RMAC_PTR GENMASK(9, 0) + /* TMAC: band 0(0x820e4000), band 1(0x820f4000) */ #define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000) #define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) -- cgit v1.2.3 From 1dfe52adb00d260c5e53dc7c5dd2109d54d2b451 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Thu, 5 May 2022 15:08:36 +0800 Subject: mt76: mt7915: move MT_INT_MASK_CSR to init.c To avoid redundant MT_INT_MASK_CSR settings. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 1 + drivers/net/wireless/mediatek/mt76/mt7915/mmio.c | 2 -- drivers/net/wireless/mediatek/mt76/mt7915/pci.c | 2 -- drivers/net/wireless/mediatek/mt76/mt7915/soc.c | 1 - 4 files changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 70baad756dd0..1e777affc2d8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -652,6 +652,7 @@ mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2) { int ret, idx; + mt76_wr(dev, MT_INT_MASK_CSR, 0); mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); INIT_WORK(&dev->init_work, mt7915_init_work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index 229d40826c9b..3c55d4cebbf2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -668,8 +668,6 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev, tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet); - mt76_wr(dev, MT_INT_MASK_CSR, 0); - return dev; error: diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index 1bab1cbb0d89..7cea49f23941 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -139,8 +139,6 @@ static int mt7915_pci_probe(struct pci_dev *pdev, if (ret) goto free_irq_vector; - mt76_wr(dev, MT_INT_MASK_CSR, 0); - /* master switch of PCIe tnterrupt enable */ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index c1900646c54d..c74afa746251 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -1197,7 +1197,6 @@ static int mt7986_wmac_probe(struct platform_device *pdev) goto free_irq; mt7915_wfsys_reset(dev); - mt76_wr(dev, MT_INT_MASK_CSR, 0); ret = mt7915_register_device(dev); if (ret) -- cgit v1.2.3 From cc9fd945db4fa1ea2317c3d716b92ba2e9a13147 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 25 Apr 2021 08:22:45 +0200 Subject: mt76: dma: add wrapper macro for accessing queue registers Preparation for adding indirection used for Wireless Ethernet Dispatch support Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/dma.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 09dc37bbf112..03d5beb1afdd 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -7,6 +7,10 @@ #include "mt76.h" #include "dma.h" +#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) +#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) + + static struct mt76_txwi_cache * mt76_alloc_txwi(struct mt76_dev *dev) { @@ -84,9 +88,9 @@ mt76_free_pending_txwi(struct mt76_dev *dev) static void mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) { - writel(q->desc_dma, &q->regs->desc_base); - writel(q->ndesc, &q->regs->ring_size); - q->head = readl(&q->regs->dma_idx); + Q_WRITE(dev, q, desc_base, q->desc_dma); + Q_WRITE(dev, q, ring_size, q->ndesc); + q->head = Q_READ(dev, q, dma_idx); q->tail = q->head; } @@ -102,8 +106,8 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) for (i = 0; i < q->ndesc; i++) q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); - writel(0, &q->regs->cpu_idx); - writel(0, &q->regs->dma_idx); + Q_WRITE(dev, q, cpu_idx, 0); + Q_WRITE(dev, q, dma_idx, 0); mt76_dma_sync_idx(dev, q); } @@ -226,7 +230,7 @@ static void mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) { wmb(); - writel(q->head, &q->regs->cpu_idx); + Q_WRITE(dev, q, cpu_idx, q->head); } static void @@ -242,7 +246,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) if (flush) last = -1; else - last = readl(&q->regs->dma_idx); + last = Q_READ(dev, q, dma_idx); while (q->queued > 0 && q->tail != last) { mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); @@ -254,8 +258,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) } if (!flush && q->tail == last) - last = readl(&q->regs->dma_idx); - + last = Q_READ(dev, q, dma_idx); } spin_unlock_bh(&q->cleanup_lock); -- cgit v1.2.3 From d1ddc536df93ae406ef671deb3218898d3515ea4 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 6 Jan 2022 13:22:28 +0100 Subject: mt76: add support for overriding the device used for DMA mapping WED support requires using non-coherent DMA, whereas the PCI device might be configured for coherent DMA. The WED driver will take care of changing the PCI HIF coherent IO setting on attach. Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/dma.c | 34 +++++++++++++-------------- drivers/net/wireless/mediatek/mt76/mac80211.c | 1 + drivers/net/wireless/mediatek/mt76/mt76.h | 1 + 3 files changed, 19 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 03d5beb1afdd..83787fc01e4e 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -24,7 +24,7 @@ mt76_alloc_txwi(struct mt76_dev *dev) if (!txwi) return NULL; - addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, + addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, DMA_TO_DEVICE); t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); t->dma_addr = addr; @@ -78,7 +78,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev) local_bh_disable(); while ((t = __mt76_get_txwi(dev)) != NULL) { - dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, + dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); kfree(mt76_get_txwi_ptr(dev, t)); } @@ -127,7 +127,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, q->hw_idx = idx; size = q->ndesc * sizeof(struct mt76_desc); - q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); + q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); if (!q->desc) return -ENOMEM; @@ -209,11 +209,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, struct mt76_queue_entry *e = &q->entry[idx]; if (!e->skip_buf0) - dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0], + dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], DMA_TO_DEVICE); if (!e->skip_buf1) - dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1], + dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], DMA_TO_DEVICE); if (e->txwi == DMA_DUMMY_DATA) @@ -293,7 +293,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, if (info) *info = le32_to_cpu(desc->info); - dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); + dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE); e->buf = NULL; return buf; @@ -330,9 +330,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, if (q->queued + 1 >= q->ndesc - 1) goto error; - addr = dma_map_single(dev->dev, skb->data, skb->len, + addr = dma_map_single(dev->dma_dev, skb->data, skb->len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev, addr))) + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) goto error; buf.addr = addr; @@ -379,8 +379,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, mt76_insert_hdr_pad(skb); len = skb_headlen(skb); - addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev, addr))) + addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) goto free; tx_info.buf[n].addr = t->dma_addr; @@ -392,9 +392,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, if (n == ARRAY_SIZE(tx_info.buf)) goto unmap; - addr = dma_map_single(dev->dev, iter->data, iter->len, + addr = dma_map_single(dev->dma_dev, iter->data, iter->len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev, addr))) + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) goto unmap; tx_info.buf[n].addr = addr; @@ -407,10 +407,10 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, goto unmap; } - dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, + dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info); - dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, + dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); if (ret < 0) goto unmap; @@ -420,7 +420,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, unmap: for (n--; n > 0; n--) - dma_unmap_single(dev->dev, tx_info.buf[n].addr, + dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, tx_info.buf[n].len, DMA_TO_DEVICE); free: @@ -465,8 +465,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) if (!buf) break; - addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev->dev, addr))) { + addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { skb_free_frag(buf); break; } diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 8a2fedbb1451..41e59f34f02a 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -545,6 +545,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size, dev->hw = hw; dev->dev = pdev; dev->drv = drv_ops; + dev->dma_dev = pdev; phy = &dev->phy; phy->dev = dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 62131010a0bb..9a2fc77243bf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -698,6 +698,7 @@ struct mt76_dev { const struct mt76_driver_ops *drv; const struct mt76_mcu_ops *mcu_ops; struct device *dev; + struct device *dma_dev; struct mt76_mcu mcu; -- cgit v1.2.3 From 61b5156bf02dbd642de94eb61a3bc4ab6f8bfb71 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 26 Apr 2021 17:45:06 +0200 Subject: mt76: make number of tokens configurable dynamically Preparation for adding Wireless Ethernet Dispatch support Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mac80211.c | 1 + drivers/net/wireless/mediatek/mt76/mt76.h | 6 +++--- drivers/net/wireless/mediatek/mt76/tx.c | 7 +++---- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 41e59f34f02a..9056d09d6c33 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -580,6 +580,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size, INIT_LIST_HEAD(&dev->wcid_list); INIT_LIST_HEAD(&dev->txwi_cache); + dev->token_size = dev->drv->token_size; for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) skb_queue_head_init(&dev->rx_skb[i]); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 9a2fc77243bf..dce86846e648 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -719,7 +719,8 @@ struct mt76_dev { spinlock_t token_lock; struct idr token; - int token_count; + u16 token_count; + u16 token_size; wait_queue_head_t tx_wait; /* spinclock used to protect wcid pktid linked list */ @@ -1381,8 +1382,7 @@ mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) int token; spin_lock_bh(&dev->token_lock); - token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size, - GFP_ATOMIC); + token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); spin_unlock_bh(&dev->token_lock); return token; diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index d5a8456c108b..84fa922c977a 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -722,12 +722,11 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) spin_lock_bh(&dev->token_lock); - token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size, - GFP_ATOMIC); + token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); if (token >= 0) dev->token_count++; - if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR) + if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR) __mt76_set_tx_blocked(dev, true); spin_unlock_bh(&dev->token_lock); @@ -747,7 +746,7 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake) if (txwi) dev->token_count--; - if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR && + if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR && dev->phy.q_tx[0]->blocked) *wake = true; -- cgit v1.2.3 From f68d67623dec9445960b52a0e9e8e60297596b4b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 6 Dec 2021 13:45:54 +0100 Subject: mt76: mt7915: add Wireless Ethernet Dispatch support This is used to support hardware flow offloading from Ethernet to WLAN Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/dma.c | 160 ++++++++++++++++----- drivers/net/wireless/mediatek/mt76/mac80211.c | 4 +- drivers/net/wireless/mediatek/mt76/mmio.c | 9 +- drivers/net/wireless/mediatek/mt76/mt76.h | 30 +++- drivers/net/wireless/mediatek/mt76/mt7603/dma.c | 8 +- drivers/net/wireless/mediatek/mt76/mt7615/dma.c | 6 +- drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c | 4 +- drivers/net/wireless/mediatek/mt76/mt7915/dma.c | 43 +++++- drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 129 ++++++++++++++--- drivers/net/wireless/mediatek/mt76/mt7915/mac.h | 2 + drivers/net/wireless/mediatek/mt76/mt7915/main.c | 36 +++++ drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 3 + drivers/net/wireless/mediatek/mt76/mt7915/mmio.c | 29 ++-- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 2 + drivers/net/wireless/mediatek/mt76/mt7915/pci.c | 96 ++++++++++++- drivers/net/wireless/mediatek/mt76/mt7915/regs.h | 17 ++- drivers/net/wireless/mediatek/mt76/mt7921/dma.c | 2 +- drivers/net/wireless/mediatek/mt76/tx.c | 16 ++- 18 files changed, 502 insertions(+), 94 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 83787fc01e4e..30de8be4aac1 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -7,9 +7,36 @@ #include "mt76.h" #include "dma.h" -#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) -#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) +#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) + +#define Q_READ(_dev, _q, _field) ({ \ + u32 _offset = offsetof(struct mt76_queue_regs, _field); \ + u32 _val; \ + if ((_q)->flags & MT_QFLAG_WED) \ + _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \ + ((_q)->wed_regs + \ + _offset)); \ + else \ + _val = readl(&(_q)->regs->_field); \ + _val; \ +}) + +#define Q_WRITE(_dev, _q, _field, _val) do { \ + u32 _offset = offsetof(struct mt76_queue_regs, _field); \ + if ((_q)->flags & MT_QFLAG_WED) \ + mtk_wed_device_reg_write(&(_dev)->mmio.wed, \ + ((_q)->wed_regs + _offset), \ + _val); \ + else \ + writel(_val, &(_q)->regs->_field); \ +} while (0) + +#else + +#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) +#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) +#endif static struct mt76_txwi_cache * mt76_alloc_txwi(struct mt76_dev *dev) @@ -111,36 +138,6 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) mt76_dma_sync_idx(dev, q); } -static int -mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, - int idx, int n_desc, int bufsize, - u32 ring_base) -{ - int size; - - spin_lock_init(&q->lock); - spin_lock_init(&q->cleanup_lock); - - q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; - q->ndesc = n_desc; - q->buf_size = bufsize; - q->hw_idx = idx; - - size = q->ndesc * sizeof(struct mt76_desc); - q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); - if (!q->desc) - return -ENOMEM; - - size = q->ndesc * sizeof(*q->entry); - q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); - if (!q->entry) - return -ENOMEM; - - mt76_dma_queue_reset(dev, q); - - return 0; -} - static int mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_queue_buf *buf, int nbufs, u32 info, @@ -486,6 +483,85 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) return frames; } +static int +mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q) +{ +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + struct mtk_wed_device *wed = &dev->mmio.wed; + int ret, type, ring; + u8 flags = q->flags; + + if (!mtk_wed_device_active(wed)) + q->flags &= ~MT_QFLAG_WED; + + if (!(q->flags & MT_QFLAG_WED)) + return 0; + + type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); + ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); + + switch (type) { + case MT76_WED_Q_TX: + ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs); + if (!ret) + q->wed_regs = wed->tx_ring[ring].reg_base; + break; + case MT76_WED_Q_TXFREE: + /* WED txfree queue needs ring to be initialized before setup */ + q->flags = 0; + mt76_dma_queue_reset(dev, q); + mt76_dma_rx_fill(dev, q); + q->flags = flags; + + ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); + if (!ret) + q->wed_regs = wed->txfree_ring.reg_base; + break; + default: + ret = -EINVAL; + } + + return ret; +#else + return 0; +#endif +} + +static int +mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, + int idx, int n_desc, int bufsize, + u32 ring_base) +{ + int ret, size; + + spin_lock_init(&q->lock); + spin_lock_init(&q->cleanup_lock); + + q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; + q->ndesc = n_desc; + q->buf_size = bufsize; + q->hw_idx = idx; + + size = q->ndesc * sizeof(struct mt76_desc); + q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + size = q->ndesc * sizeof(*q->entry); + q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + ret = mt76_dma_wed_setup(dev, q); + if (ret) + return ret; + + if (q->flags != MT_WED_Q_TXFREE) + mt76_dma_queue_reset(dev, q); + + return 0; +} + static void mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) { @@ -567,14 +643,29 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, static int mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) { - int len, data_len, done = 0; + int len, data_len, done = 0, dma_idx; struct sk_buff *skb; unsigned char *data; + bool check_ddone = false; bool more; + if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && + q->flags == MT_WED_Q_TXFREE) { + dma_idx = Q_READ(dev, q, dma_idx); + check_ddone = true; + } + while (done < budget) { u32 info; + if (check_ddone) { + if (q->tail == dma_idx) + dma_idx = Q_READ(dev, q, dma_idx); + + if (q->tail == dma_idx) + break; + } + data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); if (!data) break; @@ -715,5 +806,8 @@ void mt76_dma_cleanup(struct mt76_dev *dev) } mt76_free_pending_txwi(dev); + + if (mtk_wed_device_active(&dev->mmio.wed)) + mtk_wed_device_detach(&dev->mmio.wed); } EXPORT_SYMBOL_GPL(mt76_dma_cleanup); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 9056d09d6c33..18b5de55334c 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1582,7 +1582,7 @@ EXPORT_SYMBOL_GPL(mt76_get_antenna); struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, - int ring_base) + int ring_base, u32 flags) { struct mt76_queue *hwq; int err; @@ -1591,6 +1591,8 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, if (!hwq) return ERR_PTR(-ENOMEM); + hwq->flags = flags; + err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); if (err < 0) return ERR_PTR(err); diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c index 26353b6bce97..86e3d2ac4d0d 100644 --- a/drivers/net/wireless/mediatek/mt76/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mmio.c @@ -73,8 +73,13 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, spin_lock_irqsave(&dev->mmio.irq_lock, flags); dev->mmio.irqmask &= ~clear; dev->mmio.irqmask |= set; - if (addr) - mt76_mmio_wr(dev, addr, dev->mmio.irqmask); + if (addr) { + if (mtk_wed_device_active(&dev->mmio.wed)) + mtk_wed_device_irq_set_mask(&dev->mmio.wed, + dev->mmio.irqmask); + else + mt76_mmio_wr(dev, addr, dev->mmio.irqmask); + } spin_unlock_irqrestore(&dev->mmio.irq_lock, flags); } EXPORT_SYMBOL_GPL(mt76_set_irq_mask); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index dce86846e648..dc0f1b0aa34a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "util.h" #include "testmode.h" @@ -26,6 +27,16 @@ #define MT76_TOKEN_FREE_THR 64 +#define MT_QFLAG_WED_RING GENMASK(1, 0) +#define MT_QFLAG_WED_TYPE GENMASK(3, 2) +#define MT_QFLAG_WED BIT(4) + +#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \ + FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ + FIELD_PREP(MT_QFLAG_WED_RING, _n)) +#define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n) +#define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0) + struct mt76_dev; struct mt76_phy; struct mt76_wcid; @@ -42,6 +53,11 @@ enum mt76_bus_type { MT76_BUS_SDIO, }; +enum mt76_wed_type { + MT76_WED_Q_TX, + MT76_WED_Q_TXFREE, +}; + struct mt76_bus_ops { u32 (*rr)(struct mt76_dev *dev, u32 offset); void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); @@ -170,6 +186,9 @@ struct mt76_queue { u8 buf_offset; u8 hw_idx; u8 qid; + u8 flags; + + u32 wed_regs; dma_addr_t desc_dma; struct sk_buff *rx_head; @@ -537,6 +556,8 @@ struct mt76_mmio { void __iomem *regs; spinlock_t irq_lock; u32 irqmask; + + struct mtk_wed_device wed; }; struct mt76_rx_status { @@ -719,6 +740,7 @@ struct mt76_dev { spinlock_t token_lock; struct idr token; + u16 wed_token_count; u16 token_count; u16 token_size; @@ -944,14 +966,14 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len); struct mt76_queue * mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, - int ring_base); + int ring_base, u32 flags); u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx); static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, - int n_desc, int ring_base) + int n_desc, int ring_base, u32 flags) { struct mt76_queue *q; - q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base); + q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags); if (IS_ERR(q)) return PTR_ERR(q); @@ -966,7 +988,7 @@ static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, { struct mt76_queue *q; - q = mt76_init_queue(dev, qid, idx, n_desc, ring_base); + q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0); if (IS_ERR(q)) return PTR_ERR(q); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 2c1c79dc7fda..f9e5857850e7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -173,13 +173,13 @@ int mt7603_dma_init(struct mt7603_dev *dev) for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i], - MT7603_TX_RING_SIZE, MT_TX_RING_BASE); + MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0); if (ret) return ret; } ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, - MT7603_PSD_RING_SIZE, MT_TX_RING_BASE); + MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0); if (ret) return ret; @@ -189,12 +189,12 @@ int mt7603_dma_init(struct mt7603_dev *dev) return ret; ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN, - MT_MCU_RING_SIZE, MT_TX_RING_BASE); + MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0); if (ret) return ret; ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC, - MT_MCU_RING_SIZE, MT_TX_RING_BASE); + MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 95dbb413678a..ce19f57de475 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -26,14 +26,14 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev) for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i], MT7615_TX_RING_SIZE / 2, - MT_TX_RING_BASE); + MT_TX_RING_BASE, 0); if (ret) return ret; } ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE, - MT_TX_RING_BASE); + MT_TX_RING_BASE, 0); if (ret) return ret; @@ -55,7 +55,7 @@ mt7615_init_tx_queues(struct mt7615_dev *dev) return mt7622_init_tx_queues_multi(dev); ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE, - MT_TX_RING_BASE); + MT_TX_RING_BASE, 0); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 23d2864d37fb..96ec96df6a3c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -191,13 +191,13 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) for (i = 0; i < IEEE80211_NUM_ACS; i++) { ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i), MT76x02_TX_RING_SIZE, - MT_TX_RING_BASE); + MT_TX_RING_BASE, 0); if (ret) return ret; } ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, - MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE); + MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE, 0); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index dad5225e223e..f3d608d2d3b2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -8,9 +8,16 @@ static int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base) { + struct mt7915_dev *dev = phy->dev; int i, err; - err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base); + if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { + ring_base = MT_WED_TX_RING_BASE; + idx -= MT_TXQ_ID(0); + } + + err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base, + MT_WED_Q_TX(idx)); if (err < 0) return err; @@ -319,6 +326,14 @@ static int mt7915_dma_enable(struct mt7915_dev *dev) if (dev->dbdc_support || dev->phy.band_idx) irq_mask |= MT_INT_BAND1_RX_DONE; + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { + u32 wed_irq_mask = irq_mask; + + wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; + mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask); + mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); + } + mt7915_irq_enable(dev, irq_mask); return 0; @@ -327,6 +342,7 @@ static int mt7915_dma_enable(struct mt7915_dev *dev) int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) { struct mt76_dev *mdev = &dev->mt76; + u32 wa_rx_base, wa_rx_idx; u32 hif1_ofs = 0; int ret; @@ -339,6 +355,17 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) mt7915_dma_disable(dev, true); + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { + mt76_set(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED); + + mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL, + FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) | + FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) | + FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 1)); + } else { + mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED); + } + /* init tx queue */ ret = mt7915_init_tx_queues(&dev->phy, MT_TXQ_ID(dev->phy.band_idx), @@ -390,11 +417,17 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) return ret; /* event from WA */ + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { + wa_rx_base = MT_WED_RX_RING_BASE; + wa_rx_idx = MT7915_RXQ_MCU_WA; + dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE; + } else { + wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA); + wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA); + } ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], - MT_RXQ_ID(MT_RXQ_MCU_WA), - MT7915_RX_MCU_RING_SIZE, - MT_RX_BUF_SIZE, - MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); + wa_rx_idx, MT7915_RX_MCU_RING_SIZE, + MT_RX_BUF_SIZE, wa_rx_base); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index cc796693ced9..a493162b4e8c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -1348,6 +1348,29 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, return 0; } +u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) +{ + struct mt7915_txp *txp = ptr + MT_TXD_SIZE; + __le32 *txwi = ptr; + u32 val; + + memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); + + val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | + FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); + txwi[0] = cpu_to_le32(val); + + val = MT_TXD1_LONG_FORMAT | + FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); + txwi[1] = cpu_to_le32(val); + + txp->token = cpu_to_le16(token_id); + txp->nbuf = 1; + txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); + + return MT_TXD_SIZE + sizeof(*txp); +} + static void mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) { @@ -1381,7 +1404,7 @@ mt7915_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t) txp = mt7915_txwi_to_txp(dev, t); for (i = 0; i < txp->nbuf; i++) - dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]), + dma_unmap_single(dev->dma_dev, le32_to_cpu(txp->buf[i]), le16_to_cpu(txp->len[i]), DMA_TO_DEVICE); } @@ -1390,6 +1413,7 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, struct ieee80211_sta *sta, struct list_head *free_list) { struct mt76_dev *mdev = &dev->mt76; + struct mt7915_sta *msta; struct mt76_wcid *wcid; __le32 *txwi; u16 wcid_idx; @@ -1402,13 +1426,24 @@ mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t, if (sta) { wcid = (struct mt76_wcid *)sta->drv_priv; wcid_idx = wcid->idx; - - if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7915_tx_check_aggr(sta, txwi); } else { wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX); + wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]); + + if (wcid && wcid->sta) { + msta = container_of(wcid, struct mt7915_sta, wcid); + sta = container_of((void *)msta, struct ieee80211_sta, + drv_priv); + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + } } + if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) + mt7915_tx_check_aggr(sta, txwi); + __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); out: @@ -1416,29 +1451,55 @@ out: mt76_put_txwi(mdev, t); } +static void +mt7915_mac_tx_free_prepare(struct mt7915_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + struct mt76_phy *mphy_ext = mdev->phy2; + + /* clean DMA queues and unmap buffers first */ + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); + if (mphy_ext) { + mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); + mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); + } +} + +static void +mt7915_mac_tx_free_done(struct mt7915_dev *dev, + struct list_head *free_list, bool wake) +{ + struct sk_buff *skb, *tmp; + + mt7915_mac_sta_poll(dev); + + if (wake) + mt76_set_tx_blocked(&dev->mt76, false); + + mt76_worker_schedule(&dev->mt76.tx_worker); + + list_for_each_entry_safe(skb, tmp, free_list, list) { + skb_list_del_init(skb); + napi_consume_skb(skb, 1); + } +} + static void mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) { struct mt7915_tx_free *free = (struct mt7915_tx_free *)data; struct mt76_dev *mdev = &dev->mt76; - struct mt76_phy *mphy_ext = mdev->phy2; struct mt76_txwi_cache *txwi; struct ieee80211_sta *sta = NULL; LIST_HEAD(free_list); - struct sk_buff *skb, *tmp; void *end = data + len; bool v3, wake = false; u16 total, count = 0; u32 txd = le32_to_cpu(free->txd); __le32 *cur_info; - /* clean DMA queues and unmap buffers first */ - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); - if (mphy_ext) { - mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); - mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); - } + mt7915_mac_tx_free_prepare(dev); total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT); v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4); @@ -1492,17 +1553,38 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len) } } - mt7915_mac_sta_poll(dev); + mt7915_mac_tx_free_done(dev, &free_list, wake); +} - if (wake) - mt76_set_tx_blocked(&dev->mt76, false); +static void +mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len) +{ + struct mt7915_tx_free *free = (struct mt7915_tx_free *)data; + struct mt76_dev *mdev = &dev->mt76; + __le16 *info = (__le16 *)free->info; + void *end = data + len; + LIST_HEAD(free_list); + bool wake = false; + u8 i, count; - mt76_worker_schedule(&dev->mt76.tx_worker); + mt7915_mac_tx_free_prepare(dev); - list_for_each_entry_safe(skb, tmp, &free_list, list) { - skb_list_del_init(skb); - napi_consume_skb(skb, 1); + count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl)); + if (WARN_ON_ONCE((void *)&info[count] > end)) + return; + + for (i = 0; i < count; i++) { + struct mt76_txwi_cache *txwi; + u16 msdu = le16_to_cpu(info[i]); + + txwi = mt76_token_release(mdev, msdu, &wake); + if (!txwi) + continue; + + mt7915_txwi_free(dev, txwi, NULL, &free_list); } + + mt7915_mac_tx_free_done(dev, &free_list, wake); } static bool @@ -1682,6 +1764,9 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len) case PKT_TYPE_TXRX_NOTIFY: mt7915_mac_tx_free(dev, data, len); return false; + case PKT_TYPE_TXRX_NOTIFY_V0: + mt7915_mac_tx_free_v0(dev, data, len); + return false; case PKT_TYPE_TXS: for (rxd += 2; rxd + 8 <= end; rxd += 8) mt7915_mac_add_txs(dev, rxd); @@ -1709,6 +1794,10 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, mt7915_mac_tx_free(dev, skb->data, skb->len); napi_consume_skb(skb, 1); break; + case PKT_TYPE_TXRX_NOTIFY_V0: + mt7915_mac_tx_free_v0(dev, skb->data, skb->len); + napi_consume_skb(skb, 1); + break; case PKT_TYPE_RX_EVENT: mt7915_mcu_rx_event(dev, skb); break; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h index 5add1dd36dbe..c5fd1a618ae7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -24,6 +24,7 @@ enum rx_pkt_type { PKT_TYPE_TXRX_NOTIFY, PKT_TYPE_RX_EVENT, PKT_TYPE_RX_FW_MONITOR = 0x0c, + PKT_TYPE_TXRX_NOTIFY_V0 = 0x18, }; /* RXD DW1 */ @@ -311,6 +312,7 @@ struct mt7915_tx_free { #define MT_TX_FREE_VER GENMASK(18, 16) #define MT_TX_FREE_MSDU_CNT GENMASK(9, 0) +#define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0) #define MT_TX_FREE_WLAN_ID GENMASK(23, 14) #define MT_TX_FREE_LATENCY GENMASK(12, 0) /* 0: success, others: dropped */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 5177b19f9154..78bf5ffa7576 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -1373,6 +1373,39 @@ out: return ret; } +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +static int +mt7915_net_fill_forward_path(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; + + if (!mtk_wed_device_active(wed)) + return -ENODEV; + + if (msta->wcid.idx > 0xff) + return -EIO; + + path->type = DEV_PATH_MTK_WDMA; + path->dev = ctx->dev; + path->mtk_wdma.wdma_idx = wed->wdma_idx; + path->mtk_wdma.bss = mvif->mt76.idx; + path->mtk_wdma.wcid = msta->wcid.idx; + path->mtk_wdma.queue = phy != &dev->phy; + + ctx->dev = NULL; + + return 0; +} +#endif + const struct ieee80211_ops mt7915_ops = { .tx = mt7915_tx, .start = mt7915_start, @@ -1420,4 +1453,7 @@ const struct ieee80211_ops mt7915_ops = { .sta_add_debugfs = mt7915_sta_add_debugfs, #endif .set_radar_background = mt7915_set_radar_background, +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + .net_fill_forward_path = mt7915_net_fill_forward_path, +#endif }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 7594c2b44fe1..07c1a15e735d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -2496,6 +2496,9 @@ int mt7915_mcu_init(struct mt7915_dev *dev) if (ret) return ret; + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) + mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0); + ret = mt7915_mcu_set_mwds(dev, 1); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index 3c55d4cebbf2..c6446757ad4a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -550,15 +550,21 @@ static void mt7915_rx_poll_complete(struct mt76_dev *mdev, static void mt7915_irq_tasklet(struct tasklet_struct *t) { struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet); + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; u32 intr, intr1, mask; - mt76_wr(dev, MT_INT_MASK_CSR, 0); - if (dev->hif2) - mt76_wr(dev, MT_INT1_MASK_CSR, 0); + if (mtk_wed_device_active(wed)) { + mtk_wed_device_irq_set_mask(wed, 0); + intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask); + } else { + mt76_wr(dev, MT_INT_MASK_CSR, 0); + if (dev->hif2) + mt76_wr(dev, MT_INT1_MASK_CSR, 0); - intr = mt76_rr(dev, MT_INT_SOURCE_CSR); - intr &= dev->mt76.mmio.irqmask; - mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); + intr &= dev->mt76.mmio.irqmask; + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + } if (dev->hif2) { intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR); @@ -613,10 +619,15 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t) irqreturn_t mt7915_irq_handler(int irq, void *dev_instance) { struct mt7915_dev *dev = dev_instance; + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; - mt76_wr(dev, MT_INT_MASK_CSR, 0); - if (dev->hif2) - mt76_wr(dev, MT_INT1_MASK_CSR, 0); + if (mtk_wed_device_active(wed)) { + mtk_wed_device_irq_set_mask(wed, 0); + } else { + mt76_wr(dev, MT_INT_MASK_CSR, 0); + if (dev->hif2) + mt76_wr(dev, MT_INT1_MASK_CSR, 0); + } if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) return IRQ_NONE; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index fbb5dde588a8..fdd989f0bb99 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -435,6 +435,8 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev, void mt7915_wfsys_reset(struct mt7915_dev *dev); irqreturn_t mt7915_irq_handler(int irq, void *dev_instance); u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif); +u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id); + int mt7915_register_device(struct mt7915_dev *dev); void mt7915_unregister_device(struct mt7915_dev *dev); int mt7915_eeprom_init(struct mt7915_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index 7cea49f23941..d74f609775d3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -12,6 +12,9 @@ #include "mac.h" #include "../trace.h" +static bool wed_enable = false; +module_param(wed_enable, bool, 0644); + static LIST_HEAD(hif_list); static DEFINE_SPINLOCK(hif_lock); static u32 hif_idx; @@ -92,12 +95,79 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev) return 0; } +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +static int mt7915_wed_offload_enable(struct mtk_wed_device *wed) +{ + struct mt7915_dev *dev; + int ret; + + dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); + + spin_lock_bh(&dev->mt76.token_lock); + dev->mt76.token_size = wed->wlan.token_start; + spin_unlock_bh(&dev->mt76.token_lock); + + ret = wait_event_timeout(dev->mt76.tx_wait, + !dev->mt76.wed_token_count, HZ); + if (!ret) + return -EAGAIN; + + return 0; +} + +static void mt7915_wed_offload_disable(struct mtk_wed_device *wed) +{ + struct mt7915_dev *dev; + + dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed); + + spin_lock_bh(&dev->mt76.token_lock); + dev->mt76.token_size = MT7915_TOKEN_SIZE; + spin_unlock_bh(&dev->mt76.token_lock); +} +#endif + +static int +mt7915_pci_wed_init(struct mt7915_dev *dev, struct pci_dev *pdev, int *irq) +{ +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; + int ret; + + if (!wed_enable) + return 0; + + wed->wlan.pci_dev = pdev; + wed->wlan.wpdma_phys = pci_resource_start(pdev, 0) + + MT_WFDMA_EXT_CSR_BASE; + wed->wlan.nbuf = 4096; + wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf; + wed->wlan.init_buf = mt7915_wed_init_buf; + wed->wlan.offload_enable = mt7915_wed_offload_enable; + wed->wlan.offload_disable = mt7915_wed_offload_disable; + + if (mtk_wed_device_attach(wed) != 0) + return 0; + + *irq = wed->irq; + dev->mt76.dma_dev = wed->dev; + + ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + return 1; +#else + return 0; +#endif +} + static int mt7915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct mt7915_hif *hif2 = NULL; struct mt7915_dev *dev; struct mt76_dev *mdev; - struct mt7915_hif *hif2; int irq; int ret; @@ -129,15 +199,24 @@ static int mt7915_pci_probe(struct pci_dev *pdev, mt7915_wfsys_reset(dev); hif2 = mt7915_pci_init_hif2(pdev); - ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + ret = mt7915_pci_wed_init(dev, pdev, &irq); if (ret < 0) - goto free_device; + goto free_wed_or_irq_vector; + + if (!ret) { + hif2 = mt7915_pci_init_hif2(pdev); + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + goto free_device; + + irq = pdev->irq; + } - irq = pdev->irq; ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) - goto free_irq_vector; + goto free_wed_or_irq_vector; /* master switch of PCIe tnterrupt enable */ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); @@ -172,8 +251,11 @@ free_hif2: if (dev->hif2) put_device(dev->hif2->dev); devm_free_irq(mdev->dev, irq, dev); -free_irq_vector: - pci_free_irq_vectors(pdev); +free_wed_or_irq_vector: + if (mtk_wed_device_active(&mdev->mmio.wed)) + mtk_wed_device_detach(&mdev->mmio.wed); + else + pci_free_irq_vectors(pdev); free_device: mt76_free_device(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index 66c60e60e921..31dffb9c949c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -577,18 +577,31 @@ enum offs_rev { /* WFDMA CSR */ #define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR) +#define MT_WFDMA_EXT_CSR_PHYS_BASE 0x18027000 #define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs)) +#define MT_WFDMA_EXT_CSR_PHYS(ofs) (MT_WFDMA_EXT_CSR_PHYS_BASE + (ofs)) -#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30) +#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR_PHYS(0x30) #define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0) +#define MT_WFDMA_HOST_CONFIG_WED BIT(1) -#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44) +#define MT_WFDMA_WED_RING_CONTROL MT_WFDMA_EXT_CSR_PHYS(0x34) +#define MT_WFDMA_WED_RING_CONTROL_TX0 GENMASK(4, 0) +#define MT_WFDMA_WED_RING_CONTROL_TX1 GENMASK(12, 8) +#define MT_WFDMA_WED_RING_CONTROL_RX1 GENMASK(20, 16) + +#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR_PHYS(0x44) #define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0) #define MT_PCIE_RECOG_ID 0xd7090 #define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0) #define MT_PCIE_RECOG_ID_SEM BIT(31) +#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204) + +#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300) +#define MT_WED_RX_RING_BASE MT_WFDMA_EXT_CSR(0x400) + /* WFDMA0 PCIE1 */ #define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR) #define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c index 34b3effe14f3..3a6b158b779e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c @@ -9,7 +9,7 @@ static int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc) { int i, err; - err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE); + err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE, 0); if (err < 0) return err; diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 84fa922c977a..02067edca95e 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -726,6 +726,12 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) if (token >= 0) dev->token_count++; +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + if (mtk_wed_device_active(&dev->mmio.wed) && + token >= dev->mmio.wed.wlan.token_start) + dev->wed_token_count++; +#endif + if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR) __mt76_set_tx_blocked(dev, true); @@ -743,9 +749,17 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake) spin_lock_bh(&dev->token_lock); txwi = idr_remove(&dev->token, token); - if (txwi) + if (txwi) { dev->token_count--; +#ifdef CONFIG_NET_MEDIATEK_SOC_WED + if (mtk_wed_device_active(&dev->mmio.wed) && + token >= dev->mmio.wed.wlan.token_start && + --dev->wed_token_count == 0) + wake_up(&dev->tx_wait); +#endif + } + if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR && dev->phy.q_tx[0]->blocked) *wake = true; -- cgit v1.2.3 From 869f06468e77b06795bc5855bd5b6b03c6cb147c Mon Sep 17 00:00:00 2001 From: MeiChia Chiu Date: Tue, 26 Apr 2022 10:23:35 +0800 Subject: mt76: mt7915: add support for 6G in-band discovery Add offloading FILS discovery and unsolicited broadcast probe response support. Reviewed-by: Ryder Lee Signed-off-by: MeiChia Chiu Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/init.c | 2 + drivers/net/wireless/mediatek/mt76/mt7915/mac.c | 17 +++-- drivers/net/wireless/mediatek/mt76/mt7915/main.c | 8 ++- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 75 +++++++++++++++++++++- drivers/net/wireless/mediatek/mt76/mt7915/mcu.h | 15 ++++- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 4 +- 6 files changed, 107 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 1e777affc2d8..01169853355e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -351,6 +351,8 @@ mt7915_init_wiphy(struct ieee80211_hw *hw) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); if (!mdev->dev->of_node || !of_property_read_bool(mdev->dev->of_node, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index a493162b4e8c..086244d9be76 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -1177,7 +1177,7 @@ out: void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, int pid, - struct ieee80211_key_conf *key, bool beacon) + struct ieee80211_key_conf *key, u32 changed) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; @@ -1188,6 +1188,10 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, bool mcast = false; u16 tx_count = 15; u32 val; + bool beacon = !!(changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)); + bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | + BSS_CHANGED_FILS_DISCOVERY)); if (vif) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; @@ -1200,7 +1204,10 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, if (ext_phy && dev->mt76.phy2) mphy = dev->mt76.phy2; - if (beacon) { + if (inband_disc) { + p_fmt = MT_TX_TYPE_FW; + q_idx = MT_LMAC_ALTX0; + } else if (beacon) { p_fmt = MT_TX_TYPE_FW; q_idx = MT_LMAC_BCN0; } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { @@ -1308,8 +1315,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, return id; pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); - mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, - false); + mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key, 0); txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE); for (i = 0; i < nbuf; i++) { @@ -2008,7 +2014,8 @@ mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: - mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon); + mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon, + BSS_CHANGED_BEACON_ENABLED); break; default: break; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 78bf5ffa7576..710ca757fb52 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -622,8 +622,10 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw, mt7915_update_bss_color(hw, vif, &info->he_bss_color); if (changed & (BSS_CHANGED_BEACON | - BSS_CHANGED_BEACON_ENABLED)) - mt7915_mcu_add_beacon(hw, vif, info->enable_beacon); + BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | + BSS_CHANGED_FILS_DISCOVERY)) + mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed); mutex_unlock(&dev->mt76.mutex); } @@ -636,7 +638,7 @@ mt7915_channel_switch_beacon(struct ieee80211_hw *hw, struct mt7915_dev *dev = mt7915_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mt7915_mcu_add_beacon(hw, vif, true); + mt7915_mcu_add_beacon(hw, vif, true, BSS_CHANGED_BEACON); mutex_unlock(&dev->mt76.mutex); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 07c1a15e735d..b7e2b365356c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1892,6 +1892,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif, u8 *buf; int len = sizeof(*cont) + MT_TXD_SIZE + skb->len; + len = (len & 0x3) ? ((len | 0x3) + 1) : len; tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT, len, &bcn->sub_ntlv, &bcn->len); @@ -1910,7 +1911,7 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif, buf = (u8 *)tlv + sizeof(*cont); mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL, - true); + BSS_CHANGED_BEACON); memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); } @@ -1992,8 +1993,71 @@ mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif, } } -int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, int en) +static void +mt7915_mcu_beacon_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct sk_buff *rskb, struct bss_info_bcn *bcn, + u32 changed) +{ +#define OFFLOAD_TX_MODE_SU BIT(0) +#define OFFLOAD_TX_MODE_MU BIT(1) + struct ieee80211_hw *hw = mt76_hw(dev); + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; + enum nl80211_band band = chandef->chan->band; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct bss_info_inband_discovery *discov; + struct ieee80211_tx_info *info; + struct sk_buff *skb = NULL; + struct tlv *tlv; + bool ext_phy = phy != &dev->phy; + u8 *buf, interval; + int len; + + if (changed & BSS_CHANGED_FILS_DISCOVERY && + vif->bss_conf.fils_discovery.max_interval) { + interval = vif->bss_conf.fils_discovery.max_interval; + skb = ieee80211_get_fils_discovery_tmpl(hw, vif); + } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP && + vif->bss_conf.unsol_bcast_probe_resp_interval) { + interval = vif->bss_conf.unsol_bcast_probe_resp_interval; + skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); + } + + if (!skb) + return; + + info = IEEE80211_SKB_CB(skb); + info->control.vif = vif; + info->band = band; + + if (ext_phy) + info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; + + len = sizeof(*discov) + MT_TXD_SIZE + skb->len; + len = (len & 0x3) ? ((len | 0x3) + 1) : len; + + tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV, + len, &bcn->sub_ntlv, &bcn->len); + discov = (struct bss_info_inband_discovery *)tlv; + discov->tx_mode = OFFLOAD_TX_MODE_SU; + /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */ + discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY); + discov->tx_interval = interval; + discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len); + discov->enable = true; + + buf = (u8 *)tlv + sizeof(*discov); + + mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL, + changed); + memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); + + dev_kfree_skb(skb); +} + +int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int en, u32 changed) { #define MAX_BEACON_SIZE 512 struct mt7915_dev *dev = mt7915_hw_dev(hw); @@ -2044,6 +2108,11 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); dev_kfree_skb(skb); + if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP || + changed & BSS_CHANGED_FILS_DISCOVERY) + mt7915_mcu_beacon_inband_discov(dev, vif, rskb, + bcn, changed); + out: return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, MCU_EXT_CMD(BSS_INFO_UPDATE), true); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index 0c40e4469937..5abde482a97f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -404,11 +404,23 @@ struct bss_info_bcn_cont { __le16 pkt_len; } __packed __aligned(4); +struct bss_info_inband_discovery { + __le16 tag; + __le16 len; + u8 tx_type; + u8 tx_mode; + u8 tx_interval; + u8 enable; + __le16 rsv; + __le16 prob_rsp_len; +} __packed __aligned(4); + enum { BSS_INFO_BCN_CSA, BSS_INFO_BCN_BCC, BSS_INFO_BCN_MBSSID, BSS_INFO_BCN_CONTENT, + BSS_INFO_BCN_DISCOV, BSS_INFO_BCN_MAX }; @@ -490,6 +502,7 @@ enum { #define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct bss_info_bcn_cntdwn) + \ sizeof(struct bss_info_bcn_mbss) + \ - sizeof(struct bss_info_bcn_cont)) + sizeof(struct bss_info_bcn_cont) + \ + sizeof(struct bss_info_inband_discovery)) #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index fdd989f0bb99..269e56f851b0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -469,7 +469,7 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct cfg80211_he_bss_color *he_bss_color); int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - int enable); + int enable, u32 changed); int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, bool enable); int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, @@ -556,7 +556,7 @@ void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy); void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy); void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, int pid, - struct ieee80211_key_conf *key, bool beacon); + struct ieee80211_key_conf *key, u32 changed); void mt7915_mac_set_timing(struct mt7915_phy *phy); int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -- cgit v1.2.3 From 5eb14a0cfcaa260c8037ec323c06403554a59345 Mon Sep 17 00:00:00 2001 From: Bo Jiao Date: Sat, 7 May 2022 09:21:21 +0800 Subject: mt76: mt7615/mt7915: do reset_work with mt76's work queue reset_work may be blocked when mcu message timeout occurs Signed-off-by: Bo Jiao Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/mmio.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7915/mmio.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index ce45c3bfc443..a208035e197a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -145,7 +145,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t) return; dev->reset_state = mcu_int; - ieee80211_queue_work(mt76_hw(dev), &dev->reset_work); + queue_work(dev->mt76.wq, &dev->reset_work); wake_up(&dev->reset_wait); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index c6446757ad4a..846e71042dc3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -610,7 +610,7 @@ static void mt7915_irq_tasklet(struct tasklet_struct *t) mt76_wr(dev, MT_MCU_CMD, val); if (val & MT_MCU_CMD_ERROR_MASK) { dev->reset_state = val; - ieee80211_queue_work(mt76_hw(dev), &dev->reset_work); + queue_work(dev->mt76.wq, &dev->reset_work); wake_up(&dev->reset_wait); } } -- cgit v1.2.3 From 0d28ec72b0936a783f75bc5cf4719178bc48f39d Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Sun, 8 May 2022 13:24:53 +0800 Subject: mt76: mt7915: improve error handling for fw_debug knobs In case fw.debug_wm/wa might be unavailable. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7915/debugfs.c | 53 +++++++++++++--------- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 9 ++-- 2 files changed, 37 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 665a30e415c2..11d1b2b46bbc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -447,20 +447,20 @@ mt7915_fw_debug_wm_set(void *data, u64 val) bool tx, rx, en; int ret; - dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0; + dev->fw.debug_wm = val ? MCU_FW_LOG_TO_HOST : 0; - if (dev->fw_debug_bin) + if (dev->fw.debug_bin) val = 16; else - val = dev->fw_debug_wm; + val = dev->fw.debug_wm; - tx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(1)); - rx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(2)); - en = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(0)); + tx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(1)); + rx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(2)); + en = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(0)); ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val); if (ret) - return ret; + goto out; for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) { if (debug == DEBUG_RPT_RX) @@ -470,16 +470,20 @@ mt7915_fw_debug_wm_set(void *data, u64 val) ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val); if (ret) - return ret; + goto out; } /* WM CPU info record control */ mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0)); - mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw_debug_wm); + mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw.debug_wm); mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5)); mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5)); - return 0; +out: + if (ret) + dev->fw.debug_wm = 0; + + return ret; } static int @@ -487,7 +491,7 @@ mt7915_fw_debug_wm_get(void *data, u64 *val) { struct mt7915_dev *dev = data; - *val = dev->fw_debug_wm; + *val = dev->fw.debug_wm; return 0; } @@ -501,14 +505,19 @@ mt7915_fw_debug_wa_set(void *data, u64 val) struct mt7915_dev *dev = data; int ret; - dev->fw_debug_wa = val ? MCU_FW_LOG_TO_HOST : 0; + dev->fw.debug_wa = val ? MCU_FW_LOG_TO_HOST : 0; - ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw_debug_wa); + ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw.debug_wa); if (ret) - return ret; + goto out; - return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_PDMA_RX, - !!dev->fw_debug_wa, 0); + ret = mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), + MCU_WA_PARAM_PDMA_RX, !!dev->fw.debug_wa, 0); +out: + if (ret) + dev->fw.debug_wa = 0; + + return ret; } static int @@ -516,7 +525,7 @@ mt7915_fw_debug_wa_get(void *data, u64 *val) { struct mt7915_dev *dev = data; - *val = dev->fw_debug_wa; + *val = dev->fw.debug_wa; return 0; } @@ -563,11 +572,11 @@ mt7915_fw_debug_bin_set(void *data, u64 val) if (!dev->relay_fwlog) return -ENOMEM; - dev->fw_debug_bin = val; + dev->fw.debug_bin = val; relay_reset(dev->relay_fwlog); - return mt7915_fw_debug_wm_set(dev, dev->fw_debug_wm); + return mt7915_fw_debug_wm_set(dev, dev->fw.debug_wm); } static int @@ -575,7 +584,7 @@ mt7915_fw_debug_bin_get(void *data, u64 *val) { struct mt7915_dev *dev = data; - *val = dev->fw_debug_bin; + *val = dev->fw.debug_bin; return 0; } @@ -588,7 +597,7 @@ mt7915_fw_util_wm_show(struct seq_file *file, void *data) { struct mt7915_dev *dev = file->private; - if (dev->fw_debug_wm) { + if (dev->fw.debug_wm) { seq_printf(file, "Busy: %u%% Peak busy: %u%%\n", mt76_rr(dev, MT_CPU_UTIL_BUSY_PCT), mt76_rr(dev, MT_CPU_UTIL_PEAK_BUSY_PCT)); @@ -607,7 +616,7 @@ mt7915_fw_util_wa_show(struct seq_file *file, void *data) { struct mt7915_dev *dev = file->private; - if (dev->fw_debug_wa) + if (dev->fw.debug_wa) return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY), MCU_WA_PARAM_CPU_UTIL, 0, 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 269e56f851b0..4dcae6991669 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -312,15 +312,18 @@ struct mt7915_dev { bool flash_mode; bool muru_debug; bool ibf; - u8 fw_debug_wm; - u8 fw_debug_wa; - u8 fw_debug_bin; struct dentry *debugfs_dir; struct rchan *relay_fwlog; void *cal; + struct { + u8 debug_wm; + u8 debug_wa; + u8 debug_bin; + } fw; + struct { u16 table_mask; u8 n_agrt; -- cgit v1.2.3 From 64d607256a9e56944828794c8459e1cbdc0cea4b Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Sun, 8 May 2022 13:24:54 +0800 Subject: mt76: mt7915: add more statistics from fw_util debugfs knobs Print out exception state and program counters of WA/WM MCUs. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c | 8 ++++++++ drivers/net/wireless/mediatek/mt76/mt7915/mmio.c | 3 +++ drivers/net/wireless/mediatek/mt76/mt7915/regs.h | 14 ++++++++++---- 3 files changed, 21 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 11d1b2b46bbc..cab6e02e1f8c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -597,6 +597,12 @@ mt7915_fw_util_wm_show(struct seq_file *file, void *data) { struct mt7915_dev *dev = file->private; + seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WM_MCU_PC)); + seq_printf(file, "Exception state: 0x%x\n", + is_mt7915(&dev->mt76) ? + (u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(15, 8)) : + (u32)mt76_get_field(dev, MT_FW_EXCEPTION, GENMASK(7, 0))); + if (dev->fw.debug_wm) { seq_printf(file, "Busy: %u%% Peak busy: %u%%\n", mt76_rr(dev, MT_CPU_UTIL_BUSY_PCT), @@ -616,6 +622,8 @@ mt7915_fw_util_wa_show(struct seq_file *file, void *data) { struct mt7915_dev *dev = file->private; + seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WA_MCU_PC)); + if (dev->fw.debug_wa) return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY), MCU_WA_PARAM_CPU_UTIL, 0, 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index 846e71042dc3..46ee8a7db7bc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -22,6 +22,7 @@ static const u32 mt7915_reg[] = { [WFDMA_EXT_CSR_ADDR] = 0xd7000, [CBTOP1_PHY_END] = 0x77ffffff, [INFRA_MCU_ADDR_END] = 0x7c3fffff, + [FW_EXCEPTION_ADDR] = 0x219848, [SWDEF_BASE_ADDR] = 0x41f200, }; @@ -37,6 +38,7 @@ static const u32 mt7916_reg[] = { [WFDMA_EXT_CSR_ADDR] = 0xd7000, [CBTOP1_PHY_END] = 0x7fffffff, [INFRA_MCU_ADDR_END] = 0x7c085fff, + [FW_EXCEPTION_ADDR] = 0x022050bc, [SWDEF_BASE_ADDR] = 0x411400, }; @@ -52,6 +54,7 @@ static const u32 mt7986_reg[] = { [WFDMA_EXT_CSR_ADDR] = 0x27000, [CBTOP1_PHY_END] = 0x7fffffff, [INFRA_MCU_ADDR_END] = 0x7c085fff, + [FW_EXCEPTION_ADDR] = 0x02204ffc, [SWDEF_BASE_ADDR] = 0x411400, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index 31dffb9c949c..4953be208c5e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -30,6 +30,7 @@ enum reg_rev { WFDMA_EXT_CSR_ADDR, CBTOP1_PHY_END, INFRA_MCU_ADDR_END, + FW_EXCEPTION_ADDR, SWDEF_BASE_ADDR, __MT_REG_MAX, }; @@ -939,6 +940,8 @@ enum offs_rev { #define MT_ADIE_TYPE_MASK BIT(1) /* FW MODE SYNC */ +#define MT_FW_EXCEPTION __REG(FW_EXCEPTION_ADDR) + #define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR) #define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs)) @@ -1004,10 +1007,6 @@ enum offs_rev { #define MT_TOP_MISC MT_TOP(0xf0) #define MT_TOP_MISC_FW_STATE GENMASK(2, 0) -#define MT_HW_BOUND 0x70010020 -#define MT_HW_REV 0x70010204 -#define MT_WF_SUBSYS_RST 0x70002600 - #define MT_TOP_WFSYS_WAKEUP MT_TOP(0x1a4) #define MT_TOP_WFSYS_WAKEUP_MASK BIT(0) @@ -1069,6 +1068,10 @@ enum offs_rev { #define MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK BIT(3) #define MT_MCU_BUS_DBG_TIMEOUT_EN_MASK BIT(2) +#define MT_HW_BOUND 0x70010020 +#define MT_HW_REV 0x70010204 +#define MT_WF_SUBSYS_RST 0x70002600 + /* PCIE MAC */ #define MT_PCIE_MAC_BASE 0x74030000 #define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs)) @@ -1077,6 +1080,9 @@ enum offs_rev { #define MT_PCIE1_MAC_INT_ENABLE 0x74020188 #define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188 +#define MT_WM_MCU_PC 0x7c060204 +#define MT_WA_MCU_PC 0x7c06020c + /* PP TOP */ #define MT_WF_PP_TOP_BASE 0x820cc000 #define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs)) -- cgit v1.2.3 From a0a2034e2da0013347cd4c796ea04b5aa14f3854 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 12 May 2022 07:06:35 +0800 Subject: mt76: add gfp to mt76_mcu_msg_alloc signature Introduce __mt76_mcu_msg_alloc utility routine in order to specify gfp flags for mcu message allocation. Acked-by: Sean Wang Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mcu.c | 8 ++++---- drivers/net/wireless/mediatek/mt76/mt76.h | 9 ++++++++- 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c index 3f94c37251df..914ee278e6e2 100644 --- a/drivers/net/wireless/mediatek/mt76/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -6,14 +6,14 @@ #include "mt76.h" struct sk_buff * -mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, - int data_len) +__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, + int data_len, gfp_t gfp) { const struct mt76_mcu_ops *ops = dev->mcu_ops; int length = ops->headroom + data_len + ops->tailroom; struct sk_buff *skb; - skb = alloc_skb(length, GFP_KERNEL); + skb = alloc_skb(length, gfp); if (!skb) return NULL; @@ -25,7 +25,7 @@ mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, return skb; } -EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc); +EXPORT_SYMBOL_GPL(__mt76_mcu_msg_alloc); struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, unsigned long expires) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index dc0f1b0aa34a..4e8997c45c1b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1345,8 +1345,15 @@ int mt76s_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, int len); struct sk_buff * +__mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, + int data_len, gfp_t gfp); +static inline struct sk_buff * mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, - int data_len); + int data_len) +{ + return __mt76_mcu_msg_alloc(dev, data, data_len, GFP_KERNEL); +} + void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, unsigned long expires); -- cgit v1.2.3 From 5fc201aa8cf39d8e313b22c97abea73849cf1edb Mon Sep 17 00:00:00 2001 From: Deren Wu Date: Thu, 12 May 2022 07:06:36 +0800 Subject: mt76: mt7921: add ipv6 NS offload support Add ipv6 NS offload for WoWLAN state. Tested in this way: 1. Put device-A into WoW state. 2. ping6 from device-B to device-A. 3. In sniffer, see Neighbour advertisement from device-A. Reviewed-by: Sean Wang Signed-off-by: Deren Wu Signed-off-by: Ming Yen Hsieh Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7921/init.c | 4 ++ drivers/net/wireless/mediatek/mt76/mt7921/mac.c | 26 +++++++++ drivers/net/wireless/mediatek/mt76/mt7921/main.c | 67 +++++++++++++++++++++- drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 43 ++++++++++++++ drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h | 8 +++ 5 files changed, 147 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c index f9e1255bd9c7..4a8675634f80 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c @@ -264,6 +264,10 @@ int mt7921_register_device(struct mt7921_dev *dev) INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work); INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work); INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work); +#if IS_ENABLED(CONFIG_IPV6) + INIT_WORK(&dev->ipv6_ns_work, mt7921_set_ipv6_ns_work); + skb_queue_head_init(&dev->ipv6_ns_list); +#endif skb_queue_head_init(&dev->phy.scan_event_list); skb_queue_head_init(&dev->coredump.msg_list); INIT_LIST_HEAD(&dev->sta_poll_list); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index e67c3d17d36c..a630ddbf19e5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -1726,3 +1726,29 @@ bool mt7921_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) return false; } EXPORT_SYMBOL_GPL(mt7921_usb_sdio_tx_status_data); + +#if IS_ENABLED(CONFIG_IPV6) +void mt7921_set_ipv6_ns_work(struct work_struct *work) +{ + struct mt7921_dev *dev = container_of(work, struct mt7921_dev, + ipv6_ns_work); + struct sk_buff *skb; + int ret = 0; + + do { + skb = skb_dequeue(&dev->ipv6_ns_list); + + if (!skb) + break; + + mt7921_mutex_acquire(dev); + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(OFFLOAD), true); + mt7921_mutex_release(dev); + + } while (!ret); + + if (ret) + skb_queue_purge(&dev->ipv6_ns_list); +} +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index e3db08e3a79f..80279f342109 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "mt7921.h" #include "mcu.h" @@ -1332,7 +1333,7 @@ static int mt7921_suspend(struct ieee80211_hw *hw, clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, - mt76_connac_mcu_set_suspend_iter, + mt7921_mcu_set_suspend_iter, &dev->mphy); mt7921_mutex_release(dev); @@ -1407,6 +1408,67 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, MCU_UNI_CMD(STA_REC_UPDATE)); } +#if IS_ENABLED(CONFIG_IPV6) +static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = mvif->phy->dev; + struct inet6_ifaddr *ifa; + struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; + struct sk_buff *skb; + u8 i, idx = 0; + + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt76_connac_arpns_tlv arpns; + } req_hdr = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .arpns = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), + .mode = 2, /* update */ + .option = 1, /* update only */ + }, + }; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + if (ifa->flags & IFA_F_TENTATIVE) + continue; + ns_addrs[idx] = ifa->addr; + if (++idx >= IEEE80211_BSS_ARP_ADDR_LIST_LEN) + break; + } + read_unlock_bh(&idev->lock); + + if (!idx) + return; + + skb = __mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + + idx * sizeof(struct in6_addr), GFP_ATOMIC); + if (!skb) + return; + + req_hdr.arpns.ips_num = idx; + req_hdr.arpns.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv) + + idx * sizeof(struct in6_addr)); + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + + for (i = 0; i < idx; i++) + skb_put_data(skb, &ns_addrs[i].in6_u, sizeof(struct in6_addr)); + + skb_queue_tail(&dev->ipv6_ns_list, skb); + + ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work); +} +#endif + static int mt7921_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) { @@ -1452,6 +1514,9 @@ const struct ieee80211_ops mt7921_ops = { .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, .set_key = mt7921_set_key, .sta_set_decap_offload = mt7921_sta_set_decap_offload, +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = mt7921_ipv6_addr_change, +#endif /* CONFIG_IPV6 */ .ampdu_action = mt7921_ampdu_action, .set_rts_threshold = mt7921_set_rts_threshold, .wake_tx_queue = mt76_wake_tx_queue, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index 1ecbbe4fa498..12bab18c4171 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -224,6 +224,49 @@ exit: } EXPORT_SYMBOL_GPL(mt7921_mcu_fill_message); +#ifdef CONFIG_PM + +static int +mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev, + struct ieee80211_vif *vif, bool suspend) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt76_connac_arpns_tlv arpns; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .arpns = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), + .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), + .mode = suspend, + }, + }; + + return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req), + true); +} + +void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + if (IS_ENABLED(CONFIG_IPV6)) { + struct mt76_phy *phy = priv; + + mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif, + !test_bit(MT76_STATE_RUNNING, + &phy->state)); + } + + mt76_connac_mcu_set_suspend_iter(priv, mac, vif); +} + +#endif /* CONFIG_PM */ + static void mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index adbdb2e22934..5ca584bb2fc6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -211,6 +211,10 @@ struct mt7921_dev { struct mt76_connac_pm pm; struct mt76_connac_coredump coredump; const struct mt7921_hif_ops *hif_ops; + + struct work_struct ipv6_ns_work; + /* IPv6 addresses for WoWLAN */ + struct sk_buff_head ipv6_ns_list; }; enum { @@ -449,6 +453,10 @@ int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev); int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev); void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data); void mt7921_set_runtime_pm(struct mt7921_dev *dev); +void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif); +void mt7921_set_ipv6_ns_work(struct work_struct *work); + int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif, bool enable); -- cgit v1.2.3 From a19cef450bb6b1365c3bd5c82952f95b76688143 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Thu, 12 May 2022 14:09:05 +0800 Subject: net: ethernet: Use swap() instead of open coding it Clean the following coccicheck warning: ./drivers/net/ethernet/sunplus/spl2sw_driver.c:217:27-28: WARNING opportunity for swap(). ./drivers/net/ethernet/sunplus/spl2sw_driver.c:222:27-28: WARNING opportunity for swap(). Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Signed-off-by: David S. Miller --- drivers/net/ethernet/sunplus/spl2sw_driver.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index 8320fa833d3e..1cb7076f946d 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -204,28 +204,16 @@ static const struct net_device_ops netdev_ops = { static void spl2sw_check_mac_vendor_id_and_convert(u8 *mac_addr) { - u8 tmp; - /* Byte order of MAC address of some samples are reversed. * Check vendor id and convert byte order if it is wrong. * OUI of Sunplus: fc:4b:bc */ if (mac_addr[5] == 0xfc && mac_addr[4] == 0x4b && mac_addr[3] == 0xbc && (mac_addr[0] != 0xfc || mac_addr[1] != 0x4b || mac_addr[2] != 0xbc)) { - /* Swap mac_addr[0] and mac_addr[5] */ - tmp = mac_addr[0]; - mac_addr[0] = mac_addr[5]; - mac_addr[5] = tmp; - - /* Swap mac_addr[1] and mac_addr[4] */ - tmp = mac_addr[1]; - mac_addr[1] = mac_addr[4]; - mac_addr[4] = tmp; - - /* Swap mac_addr[2] and mac_addr[3] */ - tmp = mac_addr[2]; - mac_addr[2] = mac_addr[3]; - mac_addr[3] = tmp; + + swap(mac_addr[0], mac_addr[5]); + swap(mac_addr[1], mac_addr[4]); + swap(mac_addr[2], mac_addr[3]); } } -- cgit v1.2.3 From 7b8b82224c26863d9b6c67f6cc6044bc24044e44 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Thu, 12 May 2022 16:03:57 +0800 Subject: net: ethernet: fix platform_no_drv_owner.cocci warning Remove .owner field if calls are used which set it automatically. ./drivers/net/ethernet/sunplus/spl2sw_driver.c:569:3-8: No need to set .owner here. The core will do it. Reported-by: Abaci Robot Signed-off-by: Yang Li Signed-off-by: David S. Miller --- drivers/net/ethernet/sunplus/spl2sw_driver.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index 1cb7076f946d..3773ce5e12cc 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -554,7 +554,6 @@ static struct platform_driver spl2sw_driver = { .remove = spl2sw_remove, .driver = { .name = "sp7021_emac", - .owner = THIS_MODULE, .of_match_table = spl2sw_of_match, }, }; -- cgit v1.2.3 From d1408f6b4dd78fb1b9e26bcf64477984e5f85409 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 12 May 2022 10:42:01 +0200 Subject: usbnet: Run unregister_netdev() before unbind() again Commit 2c9d6c2b871d ("usbnet: run unbind() before unregister_netdev()") sought to fix a use-after-free on disconnect of USB Ethernet adapters. It turns out that a different fix is necessary to address the issue: https://lore.kernel.org/netdev/18b3541e5372bc9b9fc733d422f4e698c089077c.1650177997.git.lukas@wunner.de/ So the commit was not necessary. The commit made binding and unbinding of USB Ethernet asymmetrical: Before, usbnet_probe() first invoked the ->bind() callback and then register_netdev(). usbnet_disconnect() mirrored that by first invoking unregister_netdev() and then ->unbind(). Since the commit, the order in usbnet_disconnect() is reversed and no longer mirrors usbnet_probe(). One consequence is that a PHY disconnected (and stopped) in ->unbind() is afterwards stopped once more by unregister_netdev() as it closes the netdev before unregistering. That necessitates a contortion in ->stop() because the PHY may only be stopped if it hasn't already been disconnected. Reverting the commit allows making the call to phy_stop() unconditional in ->stop(). Tested-by: Oleksij Rempel # LAN9514/9512/9500 Tested-by: Ferry Toth # LAN9514 Signed-off-by: Lukas Wunner Acked-by: Oliver Neukum Cc: Martyn Welch Cc: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/usb/asix_devices.c | 6 +----- drivers/net/usb/smsc95xx.c | 3 +-- drivers/net/usb/usbnet.c | 6 +++--- 3 files changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 38e47a93fb83..5b5eb630c4b7 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -795,11 +795,7 @@ static int ax88772_stop(struct usbnet *dev) { struct asix_common_private *priv = dev->driver_priv; - /* On unplugged USB, we will get MDIO communication errors and the - * PHY will be set in to PHY_HALTED state. - */ - if (priv->phydev->state != PHY_HALTED) - phy_stop(priv->phydev); + phy_stop(priv->phydev); return 0; } diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 4ef61f6b85df..edf0492ad489 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1243,8 +1243,7 @@ static int smsc95xx_start_phy(struct usbnet *dev) static int smsc95xx_stop(struct usbnet *dev) { - if (dev->net->phydev) - phy_stop(dev->net->phydev); + phy_stop(dev->net->phydev); return 0; } diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 9a6450f796dc..36b24ec11650 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1616,9 +1616,6 @@ void usbnet_disconnect (struct usb_interface *intf) xdev->bus->bus_name, xdev->devpath, dev->driver_info->description); - if (dev->driver_info->unbind) - dev->driver_info->unbind(dev, intf); - net = dev->net; unregister_netdev (net); @@ -1626,6 +1623,9 @@ void usbnet_disconnect (struct usb_interface *intf) usb_scuttle_anchored_urbs(&dev->deferred); + if (dev->driver_info->unbind) + dev->driver_info->unbind(dev, intf); + usb_kill_urb(dev->interrupt); usb_free_urb(dev->interrupt); kfree(dev->padding_pkt); -- cgit v1.2.3 From 3108871f19221372b251f7da1ac38736928b5b3a Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 12 May 2022 10:42:02 +0200 Subject: usbnet: smsc95xx: Don't clear read-only PHY interrupt Upon receiving data from the Interrupt Endpoint, the SMSC LAN95xx driver attempts to clear the signaled interrupts by writing "all ones" to the Interrupt Status Register. However the driver only ever enables a single type of interrupt, namely the PHY Interrupt. And according to page 119 of the LAN950x datasheet, its bit in the Interrupt Status Register is read-only. There's no other way to clear it than in a separate PHY register: https://www.microchip.com/content/dam/mchp/documents/UNG/ProductDocuments/DataSheets/LAN950x-Data-Sheet-DS00001875D.pdf Consequently, writing "all ones" to the Interrupt Status Register is pointless and can be dropped. Tested-by: Oleksij Rempel # LAN9514/9512/9500 Tested-by: Ferry Toth # LAN9514 Signed-off-by: Lukas Wunner Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/usb/smsc95xx.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index edf0492ad489..2cb44d65bbc3 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -572,10 +572,6 @@ static int smsc95xx_link_reset(struct usbnet *dev) unsigned long flags; int ret; - ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); - if (ret < 0) - return ret; - spin_lock_irqsave(&pdata->mac_cr_lock, flags); if (pdata->phydev->duplex != DUPLEX_FULL) { pdata->mac_cr &= ~MAC_CR_FDPX_; -- cgit v1.2.3 From 14021da69811cc9bd680a83932614adf308ed0fe Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 12 May 2022 10:42:03 +0200 Subject: usbnet: smsc95xx: Don't reset PHY behind PHY driver's back smsc95xx_reset() resets the PHY behind the PHY driver's back, which seems like a bad idea generally. Remove that portion of the function. We're about to use PHY interrupts instead of polling to detect link changes on SMSC LAN95xx chips. Because smsc95xx_reset() is called from usbnet_open(), PHY interrupt settings are lost whenever the net_device is brought up. There are two other callers of smsc95xx_reset(), namely smsc95xx_bind() and smsc95xx_reset_resume(), and both may indeed benefit from a PHY reset. However they already perform one through their calls to phy_connect_direct() and phy_init_hw(). Tested-by: Oleksij Rempel # LAN9514/9512/9500 Tested-by: Ferry Toth # LAN9514 Signed-off-by: Lukas Wunner Cc: Martyn Welch Cc: Gabriel Hojda Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/usb/smsc95xx.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 2cb44d65bbc3..6c37c7adde1b 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -887,24 +887,6 @@ static int smsc95xx_reset(struct usbnet *dev) return ret; } - ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_); - if (ret < 0) - return ret; - - timeout = 0; - do { - msleep(10); - ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf); - if (ret < 0) - return ret; - timeout++; - } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100)); - - if (timeout >= 100) { - netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); - return ret; - } - ret = smsc95xx_set_mac_address(dev); if (ret < 0) return ret; -- cgit v1.2.3 From 8960f878e39fadc03d74292a6731f1e914cf2019 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 12 May 2022 10:42:04 +0200 Subject: usbnet: smsc95xx: Avoid link settings race on interrupt reception When a PHY interrupt is signaled, the SMSC LAN95xx driver updates the MAC full duplex mode and PHY flow control registers based on cached data in struct phy_device: smsc95xx_status() # raises EVENT_LINK_RESET usbnet_deferred_kevent() smsc95xx_link_reset() # uses cached data in phydev Simultaneously, phylib polls link status once per second and updates that cached data: phy_state_machine() phy_check_link_status() phy_read_status() lan87xx_read_status() genphy_read_status() # updates cached data in phydev If smsc95xx_link_reset() wins the race against genphy_read_status(), the registers may be updated based on stale data. E.g. if the link was previously down, phydev->duplex is set to DUPLEX_UNKNOWN and that's what smsc95xx_link_reset() will use, even though genphy_read_status() may update it to DUPLEX_FULL afterwards. PHY interrupts are currently only enabled on suspend to trigger wakeup, so the impact of the race is limited, but we're about to enable them perpetually. Avoid the race by delaying execution of smsc95xx_link_reset() until phy_state_machine() has done its job and calls back via smsc95xx_handle_link_change(). Signaling EVENT_LINK_RESET on wakeup is not necessary because phylib picks up link status changes through polling. So drop the declaration of a ->link_reset() callback. Note that the semicolon on a line by itself added in smsc95xx_status() is a placeholder for a function call which will be added in a subsequent commit. That function call will actually handle the INT_ENP_PHY_INT_ interrupt. Tested-by: Oleksij Rempel # LAN9514/9512/9500 Tested-by: Ferry Toth # LAN9514 Signed-off-by: Lukas Wunner Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/usb/smsc95xx.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 6c37c7adde1b..6309ff8e75de 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -566,7 +566,7 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev) return smsc95xx_write_reg(dev, AFC_CFG, afc_cfg); } -static int smsc95xx_link_reset(struct usbnet *dev) +static void smsc95xx_mac_update_fullduplex(struct usbnet *dev) { struct smsc95xx_priv *pdata = dev->driver_priv; unsigned long flags; @@ -583,14 +583,16 @@ static int smsc95xx_link_reset(struct usbnet *dev) spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); - if (ret < 0) - return ret; + if (ret < 0) { + if (ret != -ENODEV) + netdev_warn(dev->net, + "Error updating MAC full duplex mode\n"); + return; + } ret = smsc95xx_phy_update_flowcontrol(dev); if (ret < 0) netdev_warn(dev->net, "Error updating PHY flow control\n"); - - return ret; } static void smsc95xx_status(struct usbnet *dev, struct urb *urb) @@ -607,7 +609,7 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); if (intdata & INT_ENP_PHY_INT_) - usbnet_defer_kevent(dev, EVENT_LINK_RESET); + ; else netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n", intdata); @@ -1070,6 +1072,7 @@ static void smsc95xx_handle_link_change(struct net_device *net) struct usbnet *dev = netdev_priv(net); phy_print_status(net->phydev); + smsc95xx_mac_update_fullduplex(dev); usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); } @@ -1975,7 +1978,6 @@ static const struct driver_info smsc95xx_info = { .description = "smsc95xx USB 2.0 Ethernet", .bind = smsc95xx_bind, .unbind = smsc95xx_unbind, - .link_reset = smsc95xx_link_reset, .reset = smsc95xx_reset, .check_connect = smsc95xx_start_phy, .stop = smsc95xx_stop, -- cgit v1.2.3 From 1ce8b37241ed291af56f7a49bbdbf20c08728e88 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 12 May 2022 10:42:05 +0200 Subject: usbnet: smsc95xx: Forward PHY interrupts to PHY driver to avoid polling Link status of SMSC LAN95xx chips is polled once per second, even though they're capable of signaling PHY interrupts through the MAC layer. Forward those interrupts to the PHY driver to avoid polling. Benefits are reduced bus traffic, reduced CPU overhead and quicker interface bringup. Polling was introduced in 2016 by commit d69d16949346 ("usbnet: smsc95xx: fix link detection for disabled autonegotiation"). Back then, the LAN95xx driver neglected to enable the ENERGYON interrupt, hence couldn't detect link-up events when auto-negotiation was disabled. The proper solution would have been to enable the ENERGYON interrupt instead of polling. Since then, PHY handling was moved from the LAN95xx driver to the SMSC PHY driver with commit 05b35e7eb9a1 ("smsc95xx: add phylib support"). That PHY driver is capable of link detection with auto-negotiation disabled because it enables the ENERGYON interrupt. Note that signaling interrupts through the MAC layer not only works with the integrated PHY, but also with an external PHY, provided its interrupt pin is attached to LAN95xx's nPHY_INT pin. In the unlikely event that the interrupt pin of an external PHY is attached to a GPIO of the SoC (or not connected at all), the driver can be amended to retrieve the irq from the PHY's of_node. To forward PHY interrupts to phylib, it is not sufficient to call phy_mac_interrupt(). Instead, the PHY's interrupt handler needs to run so that PHY interrupts are cleared. That's because according to page 119 of the LAN950x datasheet, "The source of this interrupt is a level. The interrupt persists until it is cleared in the PHY." https://www.microchip.com/content/dam/mchp/documents/UNG/ProductDocuments/DataSheets/LAN950x-Data-Sheet-DS00001875D.pdf Therefore, create an IRQ domain with a single IRQ for the PHY. In the future, the IRQ domain may be extended to support the 11 GPIOs on the LAN95xx. Normally the PHY interrupt should be masked until the PHY driver has cleared it. However masking requires a (sleeping) USB transaction and interrupts are received in (non-sleepable) softirq context. I decided not to mask the interrupt at all (by using the dummy_irq_chip's noop ->irq_mask() callback): The USB interrupt endpoint is polled in 1 msec intervals and normally that's sufficient to wake the PHY driver's IRQ thread and have it clear the interrupt. If it does take longer, worst thing that can happen is the IRQ thread is woken again. No big deal. Because PHY interrupts are now perpetually enabled, there's no need to selectively enable them on suspend. So remove all invocations of smsc95xx_enable_phy_wakeup_interrupts(). In smsc95xx_resume(), move the call of phy_init_hw() before usbnet_resume() (which restarts the status URB) to ensure that the PHY is fully initialized when an interrupt is handled. Tested-by: Oleksij Rempel # LAN9514/9512/9500 Tested-by: Ferry Toth # LAN9514 Signed-off-by: Lukas Wunner Reviewed-by: Andrew Lunn # from a PHY perspective Cc: Andre Edich Signed-off-by: David S. Miller --- drivers/net/usb/smsc95xx.c | 113 ++++++++++++++++++++++++--------------------- 1 file changed, 61 insertions(+), 52 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 6309ff8e75de..bd03e16f98a1 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -53,6 +55,9 @@ #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) +#define SMSC95XX_NR_IRQS (1) /* raise to 12 for GPIOs */ +#define PHY_HWIRQ (SMSC95XX_NR_IRQS - 1) + struct smsc95xx_priv { u32 mac_cr; u32 hash_hi; @@ -61,6 +66,9 @@ struct smsc95xx_priv { spinlock_t mac_cr_lock; u8 features; u8 suspend_flags; + struct irq_chip irqchip; + struct irq_domain *irqdomain; + struct fwnode_handle *irqfwnode; struct mii_bus *mdiobus; struct phy_device *phydev; }; @@ -597,6 +605,8 @@ static void smsc95xx_mac_update_fullduplex(struct usbnet *dev) static void smsc95xx_status(struct usbnet *dev, struct urb *urb) { + struct smsc95xx_priv *pdata = dev->driver_priv; + unsigned long flags; u32 intdata; if (urb->actual_length != 4) { @@ -608,11 +618,15 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) intdata = get_unaligned_le32(urb->transfer_buffer); netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); + local_irq_save(flags); + if (intdata & INT_ENP_PHY_INT_) - ; + generic_handle_domain_irq(pdata->irqdomain, PHY_HWIRQ); else netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n", intdata); + + local_irq_restore(flags); } /* Enable or disable Tx & Rx checksum offload engines */ @@ -1080,8 +1094,9 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata; bool is_internal_phy; + char usb_path[64]; + int ret, phy_irq; u32 val; - int ret; printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); @@ -1121,10 +1136,38 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) if (ret) goto free_pdata; + /* create irq domain for use by PHY driver and GPIO consumers */ + usb_make_path(dev->udev, usb_path, sizeof(usb_path)); + pdata->irqfwnode = irq_domain_alloc_named_fwnode(usb_path); + if (!pdata->irqfwnode) { + ret = -ENOMEM; + goto free_pdata; + } + + pdata->irqdomain = irq_domain_create_linear(pdata->irqfwnode, + SMSC95XX_NR_IRQS, + &irq_domain_simple_ops, + pdata); + if (!pdata->irqdomain) { + ret = -ENOMEM; + goto free_irqfwnode; + } + + phy_irq = irq_create_mapping(pdata->irqdomain, PHY_HWIRQ); + if (!phy_irq) { + ret = -ENOENT; + goto remove_irqdomain; + } + + pdata->irqchip = dummy_irq_chip; + pdata->irqchip.name = SMSC_CHIPNAME; + irq_set_chip_and_handler_name(phy_irq, &pdata->irqchip, + handle_simple_irq, "phy"); + pdata->mdiobus = mdiobus_alloc(); if (!pdata->mdiobus) { ret = -ENOMEM; - goto free_pdata; + goto dispose_irq; } ret = smsc95xx_read_reg(dev, HW_CFG, &val); @@ -1157,6 +1200,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) goto unregister_mdio; } + pdata->phydev->irq = phy_irq; pdata->phydev->is_internal = is_internal_phy; /* detect device revision as different features may be available */ @@ -1199,6 +1243,15 @@ unregister_mdio: free_mdio: mdiobus_free(pdata->mdiobus); +dispose_irq: + irq_dispose_mapping(phy_irq); + +remove_irqdomain: + irq_domain_remove(pdata->irqdomain); + +free_irqfwnode: + irq_domain_free_fwnode(pdata->irqfwnode); + free_pdata: kfree(pdata); return ret; @@ -1211,6 +1264,9 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) phy_disconnect(dev->net->phydev); mdiobus_unregister(pdata->mdiobus); mdiobus_free(pdata->mdiobus); + irq_dispose_mapping(irq_find_mapping(pdata->irqdomain, PHY_HWIRQ)); + irq_domain_remove(pdata->irqdomain); + irq_domain_free_fwnode(pdata->irqfwnode); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); } @@ -1235,29 +1291,6 @@ static u32 smsc_crc(const u8 *buffer, size_t len, int filter) return crc << ((filter % 2) * 16); } -static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask) -{ - int ret; - - netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n"); - - /* read to clear */ - ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_SRC); - if (ret < 0) - return ret; - - /* enable interrupt source */ - ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_MASK); - if (ret < 0) - return ret; - - ret |= mask; - - smsc95xx_mdio_write_nopm(dev, PHY_INT_MASK, ret); - - return 0; -} - static int smsc95xx_link_ok_nopm(struct usbnet *dev) { int ret; @@ -1424,7 +1457,6 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev) static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up) { struct smsc95xx_priv *pdata = dev->driver_priv; - int ret; if (!netif_running(dev->net)) { /* interface is ifconfig down so fully power down hw */ @@ -1443,27 +1475,10 @@ static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up) } netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n"); - - /* enable PHY wakeup events for if cable is attached */ - ret = smsc95xx_enable_phy_wakeup_interrupts(dev, - PHY_INT_MASK_ANEG_COMP_); - if (ret < 0) { - netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); - return ret; - } - netdev_info(dev->net, "entering SUSPEND1 mode\n"); return smsc95xx_enter_suspend1(dev); } - /* enable PHY wakeup events so we remote wakeup if cable is pulled */ - ret = smsc95xx_enable_phy_wakeup_interrupts(dev, - PHY_INT_MASK_LINK_DOWN_); - if (ret < 0) { - netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); - return ret; - } - netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n"); return smsc95xx_enter_suspend3(dev); } @@ -1529,13 +1544,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) } if (pdata->wolopts & WAKE_PHY) { - ret = smsc95xx_enable_phy_wakeup_interrupts(dev, - (PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_LINK_DOWN_)); - if (ret < 0) { - netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); - goto done; - } - /* if link is down then configure EDPD and enter SUSPEND1, * otherwise enter SUSPEND0 below */ @@ -1769,11 +1777,12 @@ static int smsc95xx_resume(struct usb_interface *intf) return ret; } + phy_init_hw(pdata->phydev); + ret = usbnet_resume(intf); if (ret < 0) netdev_warn(dev->net, "usbnet_resume error\n"); - phy_init_hw(pdata->phydev); return ret; } -- cgit v1.2.3 From 7e8b617eb93f9fcaedac02cd19edcad31c767386 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 12 May 2022 10:42:06 +0200 Subject: net: phy: smsc: Cache interrupt mask Cache the interrupt mask to avoid re-reading it from the PHY upon every interrupt. This will simplify a subsequent commit which detects hot-removal in the interrupt handler and bails out. Analyzing and debugging PHY transactions also becomes simpler if such redundant reads are avoided. Last not least, interrupt overhead and latency is slightly improved. Tested-by: Oleksij Rempel # LAN9514/9512/9500 Tested-by: Ferry Toth # LAN9514 Signed-off-by: Lukas Wunner Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/smsc.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 92225d0fc246..e0eadea4b4b5 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -44,6 +44,7 @@ static struct smsc_hw_stat smsc_hw_stats[] = { }; struct smsc_phy_priv { + u16 intmask; bool energy_enable; struct clk *refclk; }; @@ -58,7 +59,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) static int smsc_phy_config_intr(struct phy_device *phydev) { struct smsc_phy_priv *priv = phydev->priv; - u16 intmask = 0; int rc; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { @@ -66,12 +66,15 @@ static int smsc_phy_config_intr(struct phy_device *phydev) if (rc) return rc; - intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6; + priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6; if (priv->energy_enable) - intmask |= MII_LAN83C185_ISF_INT7; - rc = phy_write(phydev, MII_LAN83C185_IM, intmask); + priv->intmask |= MII_LAN83C185_ISF_INT7; + + rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask); } else { - rc = phy_write(phydev, MII_LAN83C185_IM, intmask); + priv->intmask = 0; + + rc = phy_write(phydev, MII_LAN83C185_IM, 0); if (rc) return rc; @@ -83,13 +86,8 @@ static int smsc_phy_config_intr(struct phy_device *phydev) static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev) { - int irq_status, irq_enabled; - - irq_enabled = phy_read(phydev, MII_LAN83C185_IM); - if (irq_enabled < 0) { - phy_error(phydev); - return IRQ_NONE; - } + struct smsc_phy_priv *priv = phydev->priv; + int irq_status; irq_status = phy_read(phydev, MII_LAN83C185_ISF); if (irq_status < 0) { @@ -97,7 +95,7 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } - if (!(irq_status & irq_enabled)) + if (!(irq_status & priv->intmask)) return IRQ_NONE; phy_trigger_machine(phydev); -- cgit v1.2.3 From 1e7b81edebc1466d4154c5e716eb87468f3eeee2 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 12 May 2022 10:42:07 +0200 Subject: net: phy: smsc: Cope with hot-removal in interrupt handler If reading the Interrupt Source Flag register fails with -ENODEV, then the PHY has been hot-removed and the correct response is to bail out instead of throwing a WARN splat and attempting to suspend the PHY. The PHY should be stopped in due course anyway as the kernel asynchronously tears down the device. Tested-by: Oleksij Rempel # LAN9514/9512/9500 Tested-by: Ferry Toth # LAN9514 Signed-off-by: Lukas Wunner Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/smsc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index e0eadea4b4b5..1b54684b68a0 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -91,7 +91,9 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev) irq_status = phy_read(phydev, MII_LAN83C185_ISF); if (irq_status < 0) { - phy_error(phydev); + if (irq_status != -ENODEV) + phy_error(phydev); + return IRQ_NONE; } -- cgit v1.2.3 From 63fac3343b99e6726830bb40028fbe23c864eafe Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 20 Mar 2022 23:27:49 +0100 Subject: Bluetooth: btbcm: Support per-board firmware variants There are provedly different firmware variants for the different phones using some of these chips. These were extracted from a few Samsung phones: 37446 BCM4334B0.samsung,codina-tmo.hcd 37366 BCM4334B0.samsung,golden.hcd 37403 BCM4334B0.samsung,kyle.hcd 37366 BCM4334B0.samsung,skomer.hcd This patch supports the above naming schedule with inserting [.board_name] between the firmware name and ".hcd". This scheme is the same as used by the companion BRCM wireless chips as can be seen in drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c or just by looking at the firmwares in linux-firmware/brcm. Currently we only support board variants using the device tree compatible string as board type, but other schemes are possible. This makes it possible to successfully load a few unique firmware variants for some Samsung phones. Cc: phone-devel@vger.kernel.org Cc: Markuss Broks Cc: Stephan Gerhold Signed-off-by: Linus Walleij Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btbcm.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index d9ceca7a7935..92a2b7e81757 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -29,7 +30,7 @@ #define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}}) #define BCM_FW_NAME_LEN 64 -#define BCM_FW_NAME_COUNT_MAX 2 +#define BCM_FW_NAME_COUNT_MAX 4 /* For kmalloc-ing the fw-name array instead of putting it on the stack */ typedef char bcm_fw_name[BCM_FW_NAME_LEN]; @@ -476,6 +477,42 @@ static const struct bcm_subver_table bcm_usb_subver_table[] = { { } }; +/* + * This currently only looks up the device tree board appendix, + * but can be expanded to other mechanisms. + */ +static const char *btbcm_get_board_name(struct device *dev) +{ +#ifdef CONFIG_OF + struct device_node *root; + char *board_type; + const char *tmp; + int len; + int i; + + root = of_find_node_by_path("/"); + if (!root) + return NULL; + + if (of_property_read_string_index(root, "compatible", 0, &tmp)) + return NULL; + + /* get rid of any '/' in the compatible string */ + len = strlen(tmp) + 1; + board_type = devm_kzalloc(dev, len, GFP_KERNEL); + strscpy(board_type, tmp, len); + for (i = 0; i < board_type[i]; i++) { + if (board_type[i] == '/') + board_type[i] = '-'; + } + of_node_put(root); + + return board_type; +#else + return NULL; +#endif +} + int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done) { u16 subver, rev, pid, vid; @@ -483,12 +520,15 @@ int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done) struct hci_rp_read_local_version *ver; const struct bcm_subver_table *bcm_subver_table; const char *hw_name = NULL; + const char *board_name; char postfix[16] = ""; int fw_name_count = 0; bcm_fw_name *fw_name; const struct firmware *fw; int i, err; + board_name = btbcm_get_board_name(&hdev->dev); + /* Reset */ err = btbcm_reset(hdev); if (err) @@ -549,11 +589,21 @@ int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done) return -ENOMEM; if (hw_name) { + if (board_name) { + snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, + "brcm/%s%s.%s.hcd", hw_name, postfix, board_name); + fw_name_count++; + } snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, "brcm/%s%s.hcd", hw_name, postfix); fw_name_count++; } + if (board_name) { + snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, + "brcm/BCM%s.%s.hcd", postfix, board_name); + fw_name_count++; + } snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, "brcm/BCM%s.hcd", postfix); fw_name_count++; -- cgit v1.2.3 From 789f6b8ac3b15bca09b69d5699cad0bf6e2103aa Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 23 Mar 2022 07:30:40 +0800 Subject: Bluetooth: mt7921s: Fix the incorrect pointer check Fix the incorrect pointer check on ven_data. Fixes: f41b91fa1783 ("Bluetooth: mt7921s: Add .btmtk_get_codec_config_data") Co-developed-by: Yake Yang Signed-off-by: Yake Yang Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmtksdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index f3dc5881fff7..b6d77e04240c 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -961,7 +961,7 @@ static int btmtksdio_get_codec_config_data(struct hci_dev *hdev, } *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL); - if (!ven_data) { + if (!*ven_data) { err = -ENOMEM; goto error; } -- cgit v1.2.3 From bf7380e224f98a29ff6290beb2ea026f0a044a8c Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Thu, 31 Mar 2022 00:32:52 +0200 Subject: Bluetooth: btintel: Constify static struct regmap_bus The only usage of regmap_ibt is to (after the regmap_init() macro is expanded), pass its address to __regmap_init(), which takes a pointer to const struct regmap_bus as input. Make it const to allow the compiler to put it in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btintel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 06514ed66022..818681c89db8 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -794,7 +794,7 @@ static void regmap_ibt_free_context(void *context) kfree(context); } -static struct regmap_bus regmap_ibt = { +static const struct regmap_bus regmap_ibt = { .read = regmap_ibt_read, .write = regmap_ibt_write, .gather_write = regmap_ibt_gather_write, -- cgit v1.2.3 From 72ef98445aca568a81c2da050532500a8345ad3a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 5 Apr 2022 10:02:00 -0400 Subject: Bluetooth: hci_qca: Use del_timer_sync() before freeing While looking at a crash report on a timer list being corrupted, which usually happens when a timer is freed while still active. This is commonly triggered by code calling del_timer() instead of del_timer_sync() just before freeing. One possible culprit is the hci_qca driver, which does exactly that. Eric mentioned that wake_retrans_timer could be rearmed via the work queue, so also move the destruction of the work queue before del_timer_sync(). Cc: Eric Dumazet Cc: stable@vger.kernel.org Fixes: 0ff252c1976da ("Bluetooth: hciuart: Add support QCA chipset for UART") Signed-off-by: Steven Rostedt (Google) Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index f6e91fb432a3..eab34e24d944 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -696,9 +696,9 @@ static int qca_close(struct hci_uart *hu) skb_queue_purge(&qca->tx_wait_q); skb_queue_purge(&qca->txq); skb_queue_purge(&qca->rx_memdump_q); - del_timer(&qca->tx_idle_timer); - del_timer(&qca->wake_retrans_timer); destroy_workqueue(qca->workqueue); + del_timer_sync(&qca->tx_idle_timer); + del_timer_sync(&qca->wake_retrans_timer); qca->hu = NULL; kfree_skb(qca->rx_skb); -- cgit v1.2.3 From d44e1dbda36fff5d7c2586683c4adc0963aef908 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Fri, 1 Apr 2022 16:38:25 -0700 Subject: Bluetooth: btusb: Set HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN for QCA This sets HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN for QCA controllers since SCO appear to not work when using HCI_OP_ENHANCED_SETUP_SYNC_CONN. Link: https://bugzilla.kernel.org/show_bug.cgi?id=215576 Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 50df417207af..06a854a2507e 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3335,6 +3335,11 @@ static int btusb_setup_qca(struct hci_dev *hdev) msleep(QCA_BT_RESET_WAIT_MS); } + /* Mark HCI_OP_ENHANCED_SETUP_SYNC_CONN as broken as it doesn't seem to + * work with the likes of HSP/HFP mSBC. + */ + set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks); + return 0; } -- cgit v1.2.3 From 8b1d66b50437b65ef109f32270bd936ca5437a83 Mon Sep 17 00:00:00 2001 From: Max Chou Date: Mon, 11 Apr 2022 17:19:57 +0800 Subject: Bluetooth: btrtl: Add support for RTL8852C Add the support for RTL8852C BT controller on USB interface. The necessary firmware file will be submitted to linux-firmware. Signed-off-by: Max Chou Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btrtl.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 481d488bca0f..47c28fd8f006 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -50,6 +50,7 @@ enum btrtl_chip_id { CHIP_ID_8761B, CHIP_ID_8852A = 18, CHIP_ID_8852B = 20, + CHIP_ID_8852C = 25, }; struct id_table { @@ -196,6 +197,14 @@ static const struct id_table ic_id_table[] = { .has_msft_ext = true, .fw_name = "rtl_bt/rtl8852bu_fw.bin", .cfg_name = "rtl_bt/rtl8852bu_config" }, + + /* 8852C */ + { IC_INFO(RTL_ROM_LMP_8852A, 0xc, 0xc, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8852cu_fw.bin", + .cfg_name = "rtl_bt/rtl8852cu_config" }, }; static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, @@ -305,6 +314,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, { RTL_ROM_LMP_8761A, 14 }, /* 8761B */ { RTL_ROM_LMP_8852A, 18 }, /* 8852A */ { RTL_ROM_LMP_8852A, 20 }, /* 8852B */ + { RTL_ROM_LMP_8852A, 25 }, /* 8852C */ }; min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; @@ -768,6 +778,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) case CHIP_ID_8822C: case CHIP_ID_8852A: case CHIP_ID_8852B: + case CHIP_ID_8852C: set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); hci_set_aosp_capable(hdev); @@ -947,3 +958,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin"); -- cgit v1.2.3 From 4622594766d0bc96d04079a96e13eb58f3f799f2 Mon Sep 17 00:00:00 2001 From: Zijun Hu Date: Mon, 25 Apr 2022 23:01:29 +0800 Subject: Bluetooth: btusb: add support for Qualcomm WCN785x Qualcomm WCN785x has PID/VID 0cf3/e700 as shown by /sys/kernel/debug/usb/devices: T: Bus=02 Lev=02 Prnt=02 Port=01 Cnt=02 Dev#= 8 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0cf3 ProdID=e700 Rev= 0.01 C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms I: If#= 1 Alt= 6 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 63 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 63 Ivl=1ms I: If#= 1 Alt= 7 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 65 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 65 Ivl=1ms Signed-off-by: Zijun Hu Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 06a854a2507e..e6e28d3d1683 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -317,6 +317,11 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + /* QCA WCN785x chipset */ + { USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, @@ -3037,6 +3042,7 @@ static const struct qca_device_info qca_devices_table[] = { { 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */ { 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */ { 0x00130201, 40, 4, 16 }, /* WCN6855 2.1 */ + { 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */ }; static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, @@ -3327,11 +3333,11 @@ static int btusb_setup_qca(struct hci_dev *hdev) if (err < 0) return err; - /* WCN6855 2.1 will reset to apply firmware downloaded here, so + /* WCN6855 2.1 and later will reset to apply firmware downloaded here, so * wait ~100ms for reset Done then go ahead, otherwise, it maybe * cause potential enable failure. */ - if (info->rom_version == 0x00130201) + if (info->rom_version >= 0x00130201) msleep(QCA_BT_RESET_WAIT_MS); } -- cgit v1.2.3 From 247f226adadfb7be09dd537f177429f4415aef8e Mon Sep 17 00:00:00 2001 From: Zijun Hu Date: Wed, 27 Apr 2022 10:16:51 +0800 Subject: Bluetooth: btusb: Set HCI_QUIRK_BROKEN_ERR_DATA_REPORTING for QCA Set HCI_QUIRK_BROKEN_ERR_DATA_REPORTING for QCA controllers since they answer HCI_OP_READ_DEF_ERR_DATA_REPORTING with error code "UNKNOWN HCI COMMAND" as shown below: [ 580.517552] Bluetooth: hci0: unexpected cc 0x0c5a length: 1 < 2 [ 580.517660] Bluetooth: hci0: Opcode 0x c5a failed: -38 hcitool -i hci0 cmd 0x03 0x5a < HCI Command: ogf 0x03, ocf 0x005a, plen 0 > HCI Event: 0x0e plen 4 01 5A 0C 01 btmon log: < HCI Command: Read Default Erroneous Data Reporting (0x03|0x005a) plen 0 > HCI Event: Command Complete (0x0e) plen 4 Read Default Erroneous Data Reporting (0x03|0x005a) ncmd 1 Status: Unknown HCI Command (0x01) Signed-off-by: Zijun Hu Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e6e28d3d1683..1ec7a731c89e 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3345,6 +3345,7 @@ static int btusb_setup_qca(struct hci_dev *hdev) * work with the likes of HSP/HFP mSBC. */ set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); return 0; } -- cgit v1.2.3 From c77a592befddf10dc527a438a81d7166d724c32d Mon Sep 17 00:00:00 2001 From: Ismael Luceno Date: Wed, 27 Apr 2022 14:59:48 +0200 Subject: Bluetooth: btusb: Add 0x0bda:0x8771 Realtek 8761BUV devices Identifies as just "Realtek Semiconductor Corp. Bluetooth Radio"; it's used in many adapters, e.g.: - UGREEN CM390 - C-TECH BTD-01 - Orico BTA-508 - KS-is KS-457 Device description at /sys/kernel/debug/usb/devices: T: Bus=03 Lev=01 Prnt=01 Port=05 Cnt=02 Dev#= 3 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0bda ProdID=8771 Rev= 2.00 S: Manufacturer=Realtek S: Product=Bluetooth Radio S: SerialNumber=00E04C239987 C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=500mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms Signed-off-by: Ismael Luceno Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1ec7a731c89e..3b0af9f05ea8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -505,6 +505,10 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + /* Additional Realtek 8761BUV Bluetooth devices */ + { USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, -- cgit v1.2.3 From 23fcb27b33c8ca09a371de5bc95a18c20f72709c Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Thu, 28 Apr 2022 02:38:39 +0800 Subject: Bluetooth: btusb: Add a new PID/VID 0489/e0c8 for MT7921 Add VID 0489 & PID e0c8 for MediaTek MT7921 USB Bluetooth chip. The information in /sys/kernel/debug/usb/devices about the Bluetooth device is listed as the below. T: Bus=01 Lev=01 Prnt=01 Port=13 Cnt=03 Dev#= 4 Spd=480 MxCh= 0 D: Ver= 2.10 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0489 ProdID=e0c8 Rev= 1.00 S: Manufacturer=MediaTek Inc. S: Product=Wireless_Device S: SerialNumber=000000000 C:* #Ifs= 3 Cfg#= 1 Atr=e0 MxPwr=100mA A: FirstIf#= 0 IfCount= 3 Cls=e0(wlcon) Sub=01 Prot=01 I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=125us E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms I: If#= 1 Alt= 6 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 63 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 63 Ivl=1ms I:* If#= 2 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none) E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=125us E: Ad=0a(O) Atr=03(Int.) MxPS= 64 Ivl=125us I: If#= 2 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=(none) E: Ad=8a(I) Atr=03(Int.) MxPS= 512 Ivl=125us E: Ad=0a(O) Atr=03(Int.) MxPS= 512 Ivl=125us Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 3b0af9f05ea8..e25fcd49db70 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -451,6 +451,9 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_VALID_LE_STATES }, /* Additional MediaTek MT7921 Bluetooth devices */ + { USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, -- cgit v1.2.3 From 0d37ddfc50d9a81f46d3b2ffa156d4fea8a410f7 Mon Sep 17 00:00:00 2001 From: Tim Harvey Date: Thu, 5 May 2022 12:50:10 -0700 Subject: Bluetooth: btbcm: Add entry for BCM4373A0 UART Bluetooth This patch adds the device ID for the BCM4373A0 module, found e.g. in the Infineon (Cypress) CYW4373E chip. The required firmware file is named 'BCM4373A0.hcd'. Signed-off-by: Tim Harvey Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btbcm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 92a2b7e81757..76fbb046bdbe 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -458,6 +458,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x6106, "BCM4359C0" }, /* 003.001.006 */ { 0x4106, "BCM4335A0" }, /* 002.001.006 */ { 0x410c, "BCM43430B0" }, /* 002.001.012 */ + { 0x2119, "BCM4373A0" }, /* 001.001.025 */ { } }; -- cgit v1.2.3 From 0fab6361c4ba17d1b43a991bef4238a3c1754d35 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Thu, 12 May 2022 06:22:15 +0800 Subject: Bluetooth: btmtksdio: fix use-after-free at btmtksdio_recv_event We should not access skb buffer data anymore after hci_recv_frame was called. [ 39.634809] BUG: KASAN: use-after-free in btmtksdio_recv_event+0x1b0 [ 39.634855] Read of size 1 at addr ffffff80cf28a60d by task kworker [ 39.634962] Call trace: [ 39.634974] dump_backtrace+0x0/0x3b8 [ 39.634999] show_stack+0x20/0x2c [ 39.635016] dump_stack_lvl+0x60/0x78 [ 39.635040] print_address_description+0x70/0x2f0 [ 39.635062] kasan_report+0x154/0x194 [ 39.635079] __asan_report_load1_noabort+0x44/0x50 [ 39.635099] btmtksdio_recv_event+0x1b0/0x1c4 [ 39.635129] btmtksdio_txrx_work+0x6cc/0xac4 [ 39.635157] process_one_work+0x560/0xc5c [ 39.635177] worker_thread+0x7ec/0xcc0 [ 39.635195] kthread+0x2d0/0x3d0 [ 39.635215] ret_from_fork+0x10/0x20 [ 39.635247] Allocated by task 0: [ 39.635260] (stack is not available) [ 39.635281] Freed by task 2392: [ 39.635295] kasan_save_stack+0x38/0x68 [ 39.635319] kasan_set_track+0x28/0x3c [ 39.635338] kasan_set_free_info+0x28/0x4c [ 39.635357] ____kasan_slab_free+0x104/0x150 [ 39.635374] __kasan_slab_free+0x18/0x28 [ 39.635391] slab_free_freelist_hook+0x114/0x248 [ 39.635410] kfree+0xf8/0x2b4 [ 39.635427] skb_free_head+0x58/0x98 [ 39.635447] skb_release_data+0x2f4/0x410 [ 39.635464] skb_release_all+0x50/0x60 [ 39.635481] kfree_skb+0xc8/0x25c [ 39.635498] hci_event_packet+0x894/0xca4 [bluetooth] [ 39.635721] hci_rx_work+0x1c8/0x68c [bluetooth] [ 39.635925] process_one_work+0x560/0xc5c [ 39.635951] worker_thread+0x7ec/0xcc0 [ 39.635970] kthread+0x2d0/0x3d0 [ 39.635990] ret_from_fork+0x10/0x20 [ 39.636021] The buggy address belongs to the object at ffffff80cf28a600 which belongs to the cache kmalloc-512 of size 512 [ 39.636039] The buggy address is located 13 bytes inside of 512-byte region [ffffff80cf28a600, ffffff80cf28a800) Fixes: 9aebfd4a2200 ("Bluetooth: mediatek: add support for MediaTek MT7663S and MT7668S SDIO devices") Co-developed-by: Yake Yang Signed-off-by: Yake Yang Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmtksdio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index b6d77e04240c..4ae6631a7c29 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -379,6 +379,7 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct hci_event_hdr *hdr = (void *)skb->data; + u8 evt = hdr->evt; int err; /* When someone waits for the WMT event, the skb is being cloned @@ -396,7 +397,7 @@ static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) if (err < 0) goto err_free_skb; - if (hdr->evt == HCI_EV_WMT) { + if (evt == HCI_EV_WMT) { if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) { /* Barrier to sync with other CPUs */ -- cgit v1.2.3 From 7469720563e01f479ec5afe06bd6f440f965d523 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Fri, 13 May 2022 05:38:12 +0800 Subject: Bluetooth: btmtksdio: fix possible FW initialization failure According to FW advised sequence, mt7921s need to re-acquire privilege immediately after the firmware download is complete before normal running. Otherwise, it is still possible the bus may be stuck in an abnormal status that causes FW initialization failure in the current driver. Fixes: 752aea58489f ("Bluetooth: mt7921s: fix bus hang with wrong privilege") Co-developed-by: Yake Yang Signed-off-by: Yake Yang Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmtksdio.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 4ae6631a7c29..5d13c1f61bd3 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -864,6 +864,14 @@ static int mt79xx_setup(struct hci_dev *hdev, const char *fwname) return err; } + err = btmtksdio_fw_pmctrl(bdev); + if (err < 0) + return err; + + err = btmtksdio_drv_pmctrl(bdev); + if (err < 0) + return err; + /* Enable Bluetooth protocol */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; @@ -1109,14 +1117,6 @@ static int btmtksdio_setup(struct hci_dev *hdev) if (err < 0) return err; - err = btmtksdio_fw_pmctrl(bdev); - if (err < 0) - return err; - - err = btmtksdio_drv_pmctrl(bdev); - if (err < 0) - return err; - /* Enable SCO over I2S/PCM */ err = btmtksdio_sco_setting(hdev); if (err < 0) { -- cgit v1.2.3 From baabb7f530e8a3f0085d12f4ea0bada4115515d3 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Fri, 13 May 2022 05:38:11 +0800 Subject: Bluetooth: btmtksdio: fix the reset takes too long Sending WMT command during the reset in progress is invalid and would get no response from firmware until the reset is complete, so we ignore the WMT command here to resolve the issue which causes the whole reset process taking too long. Fixes: 8fafe702253d ("Bluetooth: mt7921s: support bluetooth reset mechanism") Co-developed-by: Yake Yang Signed-off-by: Yake Yang Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmtksdio.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 5d13c1f61bd3..d6700efcfe8c 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -1189,6 +1189,10 @@ static int btmtksdio_shutdown(struct hci_dev *hdev) */ pm_runtime_get_sync(bdev->dev); + /* wmt command only works until the reset is complete */ + if (test_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) + goto ignore_wmt_cmd; + /* Disable the device */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; @@ -1202,6 +1206,7 @@ static int btmtksdio_shutdown(struct hci_dev *hdev) return err; } +ignore_wmt_cmd: pm_runtime_put_noidle(bdev->dev); pm_runtime_disable(bdev->dev); -- cgit v1.2.3 From f0cf4000f5867ec4325d19d32bd83cf583065667 Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Thu, 12 May 2022 11:18:52 -0600 Subject: net: axienet: Be more careful about updating tx_bd_tail The axienet_start_xmit function was updating the tx_bd_tail variable multiple times, with potential rollbacks on error or invalid intermediate positions, even though this variable is also used in the TX completion path. Use READ_ONCE where this variable is read and WRITE_ONCE where it is written to make this update more atomic, and move the write before the MMIO write to start the transfer, so it is protected by that implicit write barrier. Signed-off-by: Robert Hancock Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 26 +++++++++++++---------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index d6fc3f7acdf0..1e0f70e53991 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -746,7 +746,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, /* Ensure we see all descriptor updates from device or TX IRQ path */ rmb(); - cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; + cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % + lp->tx_bd_num]; if (cur_p->cntrl) return NETDEV_TX_BUSY; return 0; @@ -807,12 +808,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) u32 csum_index_off; skb_frag_t *frag; dma_addr_t tail_p, phys; + u32 orig_tail_ptr, new_tail_ptr; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; - u32 orig_tail_ptr = lp->tx_bd_tail; + + orig_tail_ptr = lp->tx_bd_tail; + new_tail_ptr = orig_tail_ptr; num_frag = skb_shinfo(skb)->nr_frags; - cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + cur_p = &lp->tx_bd_v[orig_tail_ptr]; if (axienet_check_tx_bd_space(lp, num_frag + 1)) { /* Should not happen as last start_xmit call should have @@ -852,9 +856,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; for (ii = 0; ii < num_frag; ii++) { - if (++lp->tx_bd_tail >= lp->tx_bd_num) - lp->tx_bd_tail = 0; - cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + if (++new_tail_ptr >= lp->tx_bd_num) + new_tail_ptr = 0; + cur_p = &lp->tx_bd_v[new_tail_ptr]; frag = &skb_shinfo(skb)->frags[ii]; phys = dma_map_single(lp->dev, skb_frag_address(frag), @@ -866,8 +870,6 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ndev->stats.tx_dropped++; axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, NULL); - lp->tx_bd_tail = orig_tail_ptr; - return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); @@ -877,11 +879,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; cur_p->skb = skb; - tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; + if (++new_tail_ptr >= lp->tx_bd_num) + new_tail_ptr = 0; + WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); + /* Start the transfer */ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); - if (++lp->tx_bd_tail >= lp->tx_bd_num) - lp->tx_bd_tail = 0; /* Stop queue if next transmit may not have space */ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { -- cgit v1.2.3 From 9e2bc267e78068b512d4409b884662f425adb1ec Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Thu, 12 May 2022 11:18:53 -0600 Subject: net: axienet: Use NAPI for TX completion path This driver was using the TX IRQ handler to perform all TX completion tasks. Under heavy TX network load, this can cause significant irqs-off latencies (found to be in the hundreds of microseconds using ftrace). This can cause other issues, such as overrunning serial UART FIFOs when using high baud rates with limited UART FIFO sizes. Switch to using a NAPI poll handler to perform the TX completion work to get this out of hard IRQ context and avoid the IRQ latency impact. A separate poll handler is used for TX and RX since they have separate IRQs on this controller, so that the completion work for each of them stays on the same CPU as the interrupt. Testing on a Xilinx MPSoC ZU9EG platform using iperf3 from a Linux PC through a switch at 1G link speed showed no significant change in TX or RX throughput, with approximately 941 Mbps before and after. Hard IRQ time in the TX throughput test was significantly reduced from 12% to below 1% on the CPU handling TX interrupts, with total hard+soft IRQ CPU usage dropping from about 56% down to 48%. Signed-off-by: Robert Hancock Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/xilinx_axienet.h | 54 ++++---- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 142 +++++++++++++--------- 2 files changed, 111 insertions(+), 85 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index d5c1e5c4a508..4225efbeda3d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -385,7 +385,6 @@ struct axidma_bd { * @phy_node: Pointer to device node structure * @phylink: Pointer to phylink instance * @phylink_config: phylink configuration settings - * @napi: NAPI control structure * @pcs_phy: Reference to PCS/PMA PHY if used * @pcs: phylink pcs structure for PCS PHY * @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core @@ -396,7 +395,22 @@ struct axidma_bd { * @regs_start: Resource start for axienet device addresses * @regs: Base address for the axienet_local device address space * @dma_regs: Base address for the axidma device address space + * @napi_rx: NAPI RX control structure * @rx_dma_cr: Nominal content of RX DMA control register + * @rx_bd_v: Virtual address of the RX buffer descriptor ring + * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring + * @rx_bd_num: Size of RX buffer descriptor ring + * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being + * accessed currently. + * @napi_tx: NAPI TX control structure + * @tx_dma_cr: Nominal content of TX DMA control register + * @tx_bd_v: Virtual address of the TX buffer descriptor ring + * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring + * @tx_bd_num: Size of TX buffer descriptor ring + * @tx_bd_ci: Stores the next Tx buffer descriptor in the ring that may be + * complete. Only updated at runtime by TX NAPI poll. + * @tx_bd_tail: Stores the index of the next Tx buffer descriptor in the ring + * to be populated. * @dma_err_task: Work structure to process Axi DMA errors * @tx_irq: Axidma TX IRQ number * @rx_irq: Axidma RX IRQ number @@ -404,19 +418,6 @@ struct axidma_bd { * @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X * @options: AxiEthernet option word * @features: Stores the extended features supported by the axienet hw - * @tx_bd_v: Virtual address of the TX buffer descriptor ring - * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring - * @tx_bd_num: Size of TX buffer descriptor ring - * @rx_bd_v: Virtual address of the RX buffer descriptor ring - * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring - * @rx_bd_num: Size of RX buffer descriptor ring - * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being - * accessed currently. Used while alloc. BDs before a TX starts - * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being - * accessed currently. Used while processing BDs after the TX - * completed. - * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being - * accessed currently. * @max_frm_size: Stores the maximum size of the frame that can be that * Txed/Rxed in the existing hardware. If jumbo option is * supported, the maximum frame size would be 9k. Else it is @@ -436,8 +437,6 @@ struct axienet_local { struct phylink *phylink; struct phylink_config phylink_config; - struct napi_struct napi; - struct mdio_device *pcs_phy; struct phylink_pcs pcs; @@ -453,7 +452,20 @@ struct axienet_local { void __iomem *regs; void __iomem *dma_regs; + struct napi_struct napi_rx; u32 rx_dma_cr; + struct axidma_bd *rx_bd_v; + dma_addr_t rx_bd_p; + u32 rx_bd_num; + u32 rx_bd_ci; + + struct napi_struct napi_tx; + u32 tx_dma_cr; + struct axidma_bd *tx_bd_v; + dma_addr_t tx_bd_p; + u32 tx_bd_num; + u32 tx_bd_ci; + u32 tx_bd_tail; struct work_struct dma_err_task; @@ -465,16 +477,6 @@ struct axienet_local { u32 options; u32 features; - struct axidma_bd *tx_bd_v; - dma_addr_t tx_bd_p; - u32 tx_bd_num; - struct axidma_bd *rx_bd_v; - dma_addr_t rx_bd_p; - u32 rx_bd_num; - u32 tx_bd_ci; - u32 tx_bd_tail; - u32 rx_bd_ci; - u32 max_frm_size; u32 rxmem; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 1e0f70e53991..93c9f305bba4 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -254,8 +254,6 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) */ static void axienet_dma_start(struct axienet_local *lp) { - u32 tx_cr; - /* Start updating the Rx channel control register */ lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; @@ -269,16 +267,16 @@ static void axienet_dma_start(struct axienet_local *lp) axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); /* Start updating the Tx channel control register */ - tx_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | - XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; + lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | + XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; /* Only set interrupt delay timer if not generating an interrupt on * the first TX packet. Otherwise leave at 0 to disable delay interrupt. */ if (lp->coalesce_count_tx > 1) - tx_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) - << XAXIDMA_DELAY_SHIFT) | - XAXIDMA_IRQ_DELAY_MASK; - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr); + lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) + << XAXIDMA_DELAY_SHIFT) | + XAXIDMA_IRQ_DELAY_MASK; + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. @@ -294,8 +292,8 @@ static void axienet_dma_start(struct axienet_local *lp) * tail pointer register that the Tx channel will start transmitting. */ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); - tx_cr |= XAXIDMA_CR_RUNSTOP_MASK; - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr); + lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); } /** @@ -666,37 +664,34 @@ static int axienet_device_reset(struct net_device *ndev) /** * axienet_free_tx_chain - Clean up a series of linked TX descriptors. - * @ndev: Pointer to the net_device structure + * @lp: Pointer to the axienet_local structure * @first_bd: Index of first descriptor to clean up - * @nr_bds: Number of descriptors to clean up, can be -1 if unknown. + * @nr_bds: Max number of descriptors to clean up + * @force: Whether to clean descriptors even if not complete * @sizep: Pointer to a u32 filled with the total sum of all bytes * in all cleaned-up descriptors. Ignored if NULL. + * @budget: NAPI budget (use 0 when not called from NAPI poll) * * Would either be called after a successful transmit operation, or after * there was an error when setting up the chain. * Returns the number of descriptors handled. */ -static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, - int nr_bds, u32 *sizep) +static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, + int nr_bds, bool force, u32 *sizep, int budget) { - struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; - int max_bds = nr_bds; unsigned int status; dma_addr_t phys; int i; - if (max_bds == -1) - max_bds = lp->tx_bd_num; - - for (i = 0; i < max_bds; i++) { + for (i = 0; i < nr_bds; i++) { cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; status = cur_p->status; - /* If no number is given, clean up *all* descriptors that have - * been completed by the MAC. + /* If force is not specified, clean up only descriptors + * that have been completed by the MAC. */ - if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) + if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) break; /* Ensure we see complete descriptor update */ @@ -707,7 +702,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, DMA_TO_DEVICE); if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) - dev_consume_skb_irq(cur_p->skb); + napi_consume_skb(cur_p->skb, budget); cur_p->app0 = 0; cur_p->app1 = 0; @@ -737,14 +732,14 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, * This function is invoked before BDs are allocated and transmission starts. * This function returns 0 if a BD or group of BDs can be allocated for * transmission. If the BD or any of the BDs are not free the function - * returns a busy status. This is invoked from axienet_start_xmit. + * returns a busy status. */ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, int num_frag) { struct axidma_bd *cur_p; - /* Ensure we see all descriptor updates from device or TX IRQ path */ + /* Ensure we see all descriptor updates from device or TX polling */ rmb(); cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % lp->tx_bd_num]; @@ -754,36 +749,51 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, } /** - * axienet_start_xmit_done - Invoked once a transmit is completed by the + * axienet_tx_poll - Invoked once a transmit is completed by the * Axi DMA Tx channel. - * @ndev: Pointer to the net_device structure + * @napi: Pointer to NAPI structure. + * @budget: Max number of TX packets to process. + * + * Return: Number of TX packets processed. * - * This function is invoked from the Axi DMA Tx isr to notify the completion + * This function is invoked from the NAPI processing to notify the completion * of transmit operation. It clears fields in the corresponding Tx BDs and * unmaps the corresponding buffer so that CPU can regain ownership of the * buffer. It finally invokes "netif_wake_queue" to restart transmission if * required. */ -static void axienet_start_xmit_done(struct net_device *ndev) +static int axienet_tx_poll(struct napi_struct *napi, int budget) { - struct axienet_local *lp = netdev_priv(ndev); - u32 packets = 0; + struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); + struct net_device *ndev = lp->ndev; u32 size = 0; + int packets; - packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size); + packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); - lp->tx_bd_ci += packets; - if (lp->tx_bd_ci >= lp->tx_bd_num) - lp->tx_bd_ci -= lp->tx_bd_num; + if (packets) { + lp->tx_bd_ci += packets; + if (lp->tx_bd_ci >= lp->tx_bd_num) + lp->tx_bd_ci %= lp->tx_bd_num; - ndev->stats.tx_packets += packets; - ndev->stats.tx_bytes += size; + ndev->stats.tx_packets += packets; + ndev->stats.tx_bytes += size; - /* Matches barrier in axienet_start_xmit */ - smp_mb(); + /* Matches barrier in axienet_start_xmit */ + smp_mb(); - if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) - netif_wake_queue(ndev); + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) + netif_wake_queue(ndev); + } + + if (packets < budget && napi_complete_done(napi, packets)) { + /* Re-enable TX completion interrupts. This should + * cause an immediate interrupt if any TX packets are + * already pending. + */ + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); + } + return packets; } /** @@ -868,8 +878,8 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; - axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, - NULL); + axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, + true, NULL, 0); return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); @@ -891,7 +901,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { netif_stop_queue(ndev); - /* Matches barrier in axienet_start_xmit_done */ + /* Matches barrier in axienet_tx_poll */ smp_mb(); /* Space might have just been freed - check again */ @@ -903,13 +913,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } /** - * axienet_poll - Triggered by RX ISR to complete the received BD processing. + * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. * @napi: Pointer to NAPI structure. - * @budget: Max number of packets to process. + * @budget: Max number of RX packets to process. * * Return: Number of RX packets processed. */ -static int axienet_poll(struct napi_struct *napi, int budget) +static int axienet_rx_poll(struct napi_struct *napi, int budget) { u32 length; u32 csumstatus; @@ -918,7 +928,7 @@ static int axienet_poll(struct napi_struct *napi, int budget) dma_addr_t tail_p = 0; struct axidma_bd *cur_p; struct sk_buff *skb, *new_skb; - struct axienet_local *lp = container_of(napi, struct axienet_local, napi); + struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; @@ -1021,8 +1031,8 @@ static int axienet_poll(struct napi_struct *napi, int budget) * * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. * - * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" - * to complete the BD processing. + * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the + * TX BD processing. */ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) { @@ -1044,7 +1054,15 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) (lp->tx_bd_v[lp->tx_bd_ci]).phys); schedule_work(&lp->dma_err_task); } else { - axienet_start_xmit_done(lp->ndev); + /* Disable further TX completion interrupts and schedule + * NAPI to handle the completions. + */ + u32 cr = lp->tx_dma_cr; + + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + + napi_schedule(&lp->napi_tx); } return IRQ_HANDLED; @@ -1088,7 +1106,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); - napi_schedule(&lp->napi); + napi_schedule(&lp->napi_rx); } return IRQ_HANDLED; @@ -1164,7 +1182,8 @@ static int axienet_open(struct net_device *ndev) /* Enable worker thread for Axi DMA error handling */ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); - napi_enable(&lp->napi); + napi_enable(&lp->napi_rx); + napi_enable(&lp->napi_tx); /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, @@ -1191,7 +1210,8 @@ err_eth_irq: err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: - napi_disable(&lp->napi); + napi_disable(&lp->napi_tx); + napi_disable(&lp->napi_rx); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); cancel_work_sync(&lp->dma_err_task); @@ -1215,7 +1235,8 @@ static int axienet_stop(struct net_device *ndev) dev_dbg(&ndev->dev, "axienet_close()\n"); - napi_disable(&lp->napi); + napi_disable(&lp->napi_tx); + napi_disable(&lp->napi_rx); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); @@ -1736,7 +1757,8 @@ static void axienet_dma_err_handler(struct work_struct *work) dma_err_task); struct net_device *ndev = lp->ndev; - napi_disable(&lp->napi); + napi_disable(&lp->napi_tx); + napi_disable(&lp->napi_rx); axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); @@ -1802,7 +1824,8 @@ static void axienet_dma_err_handler(struct work_struct *work) axienet_set_mac_address(ndev, NULL); axienet_set_multicast_list(ndev); axienet_setoptions(ndev, lp->options); - napi_enable(&lp->napi); + napi_enable(&lp->napi_rx); + napi_enable(&lp->napi_tx); } /** @@ -1851,7 +1874,8 @@ static int axienet_probe(struct platform_device *pdev) lp->rx_bd_num = RX_BD_NUM_DEFAULT; lp->tx_bd_num = TX_BD_NUM_DEFAULT; - netif_napi_add(ndev, &lp->napi, axienet_poll, NAPI_POLL_WEIGHT); + netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll, NAPI_POLL_WEIGHT); + netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll, NAPI_POLL_WEIGHT); lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); if (!lp->axi_clk) { -- cgit v1.2.3 From ad04cc058d644acc6c903d8da4b8d59aa2b6335e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 12 May 2022 22:40:21 -0400 Subject: bnxt_en: Update firmware interface to 1.10.2.95 The main changes are timestamp support for all RX packets and new PCIe statistics. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 415 +++++++++++++++++--------- 1 file changed, 280 insertions(+), 135 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index b7100edbd6dd..b753032a1047 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2,7 +2,7 @@ * * Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2018 Broadcom Limited - * Copyright (c) 2018-2021 Broadcom Inc. + * Copyright (c) 2018-2022 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -311,6 +311,8 @@ struct cmd_nums { #define HWRM_CFA_TFLIB 0x125UL #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL + #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL + #define HWRM_CFA_TLS_FILTER_FREE 0x129UL #define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL @@ -375,6 +377,8 @@ struct cmd_nums { #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL + #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL + #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -399,6 +403,7 @@ struct cmd_nums { #define HWRM_MFG_PSOC_QSTATUS 0x215UL #define HWRM_MFG_SELFTEST_QLIST 0x216UL #define HWRM_MFG_SELFTEST_EXEC 0x217UL + #define HWRM_STAT_GENERIC_QSTATS 0x218UL #define HWRM_TF 0x2bcUL #define HWRM_TF_VERSION_GET 0x2bdUL #define HWRM_TF_SESSION_OPEN 0x2c6UL @@ -541,8 +546,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_RSVD 73 -#define HWRM_VERSION_STR "1.10.2.73" +#define HWRM_VERSION_RSVD 95 +#define HWRM_VERSION_STR "1.10.2.95" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -770,7 +775,9 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL - #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x47UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL + #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -1259,7 +1266,8 @@ struct hwrm_async_event_cmpl_error_report_base { #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD }; /* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */ @@ -1365,6 +1373,8 @@ struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold { #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0 #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8 }; /* hwrm_func_reset_input (size:192b/24B) */ @@ -1600,36 +1610,38 @@ struct hwrm_func_qcaps_output { __le16 max_sp_tx_rings; __le16 max_msix_vfs; __le32 flags_ext; - #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL u8 max_schqs; u8 mpc_chnls_cap; #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL @@ -1638,7 +1650,23 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL __le16 max_key_ctxs_alloc; - u8 unused_1[7]; + __le32 flags_ext2; + #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL + __le16 tunnel_disable_flag; + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL + u8 unused_1; u8 valid; }; @@ -1802,11 +1830,17 @@ struct hwrm_func_qcfg_output { __le16 host_mtu; __le16 alloc_tx_key_ctxs; __le16 alloc_rx_key_ctxs; - u8 unused_3[5]; + u8 port_kdnet_mode; + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED + u8 kdnet_pcie_function; + __le16 port_kdnet_fid; + u8 unused_3; u8 valid; }; -/* hwrm_func_cfg_input (size:896b/112B) */ +/* hwrm_func_cfg_input (size:960b/120B) */ struct hwrm_func_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1986,7 +2020,13 @@ struct hwrm_func_cfg_input { __le16 host_mtu; __le16 num_tx_key_ctxs; __le16 num_rx_key_ctxs; - u8 unused_0[4]; + __le32 enables2; + #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL + u8 port_kdnet_mode; + #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL + #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL + #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED + u8 unused_0[7]; }; /* hwrm_func_cfg_output (size:128b/16B) */ @@ -3355,20 +3395,26 @@ struct hwrm_func_backing_store_cfg_v2_input { __le16 target_id; __le64 resp_addr; __le16 type; - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID __le16 instance; __le32 flags; #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL @@ -3416,20 +3462,26 @@ struct hwrm_func_backing_store_qcfg_v2_input { __le16 target_id; __le64 resp_addr; __le16 type; - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID __le16 instance; u8 rsvd[4]; }; @@ -3453,6 +3505,8 @@ struct hwrm_func_backing_store_qcfg_v2_output { #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID __le16 instance; @@ -3528,20 +3582,26 @@ struct hwrm_func_backing_store_qcaps_v2_input { __le16 target_id; __le64 resp_addr; __le16 type; - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID u8 rsvd[6]; }; @@ -3552,24 +3612,31 @@ struct hwrm_func_backing_store_qcaps_v2_output { __le16 seq_id; __le16 resp_len; __le16 type; - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID __le16 entry_size; __le32 flags; - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL __le32 instance_bit_map; u8 ctx_init_value; u8 ctx_init_offset; @@ -4108,6 +4175,8 @@ struct hwrm_port_mac_cfg_input { #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL + #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL + #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL __le32 enables; #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL @@ -6390,6 +6459,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL + #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL __le16 vnic_id; __le16 dflt_ring_grp; __le16 rss_rule; @@ -6404,7 +6474,12 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX - u8 unused0[5]; + u8 l2_cqe_mode; + #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL + #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL + #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL + #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED + u8 unused0[4]; }; /* hwrm_vnic_cfg_output (size:128b/16B) */ @@ -6437,25 +6512,31 @@ struct hwrm_vnic_qcaps_output { __le16 mru; u8 unused_0[2]; __le32 flags; - #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL - #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL - #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL - #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL - #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL - #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL - #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL - #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL - #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL - #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL - #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL - #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL - #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL - #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL - #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL - #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_TOEPLITZ_CAP 0x8000UL - #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_XOR_CAP 0x10000UL - #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_CHKSM_CAP 0x20000UL - #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL + #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL + #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL + #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL + #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL + #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL + #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; @@ -6576,6 +6657,10 @@ struct hwrm_vnic_rss_cfg_input { #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL __le16 vnic_id; u8 ring_table_pair_index; u8 hash_mode_flags; @@ -6590,11 +6675,11 @@ struct hwrm_vnic_rss_cfg_input { u8 flags; #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL - u8 rss_hash_function; - #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_TOEPLITZ 0x0UL - #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_XOR 0x1UL - #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM 0x2UL - #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_LAST VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM + u8 ring_select_mode; + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM u8 unused_1[4]; }; @@ -6739,7 +6824,9 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX __le16 flags; - #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL + #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL + #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL + #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL __le64 page_tbl_addr; __le32 fbo; u8 page_size; @@ -7923,12 +8010,17 @@ struct hwrm_cfa_flow_info_input { __le16 target_id; __le64 resp_addr; __le16 flow_handle; - #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL - #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0 - #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL - #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL - #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL - #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX u8 unused_0[6]; __le64 ext_flow_handle; }; @@ -8017,7 +8109,8 @@ struct hwrm_cfa_flow_stats_output { __le64 byte_7; __le64 byte_8; __le64 byte_9; - u8 unused_0[7]; + __le16 flow_hits; + u8 unused_0[5]; u8 valid; }; @@ -8243,6 +8336,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output { #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL u8 unused_0[3]; u8 valid; }; @@ -8583,6 +8677,56 @@ struct pcie_ctx_hw_stats { __le64 pcie_recovery_histogram; }; +/* hwrm_stat_generic_qstats_input (size:256b/32B) */ +struct hwrm_stat_generic_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 generic_stat_size; + u8 flags; + #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER 0x0UL + #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + #define STAT_GENERIC_QSTATS_REQ_FLAGS_LAST STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK + u8 unused_0[5]; + __le64 generic_stat_host_addr; +}; + +/* hwrm_stat_generic_qstats_output (size:128b/16B) */ +struct hwrm_stat_generic_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 generic_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* generic_sw_hw_stats (size:1216b/152B) */ +struct generic_sw_hw_stats { + __le64 pcie_statistics_tx_tlp; + __le64 pcie_statistics_rx_tlp; + __le64 pcie_credit_fc_hdr_posted; + __le64 pcie_credit_fc_hdr_nonposted; + __le64 pcie_credit_fc_hdr_cmpl; + __le64 pcie_credit_fc_data_posted; + __le64 pcie_credit_fc_data_nonposted; + __le64 pcie_credit_fc_data_cmpl; + __le64 pcie_credit_fc_tgt_nonposted; + __le64 pcie_credit_fc_tgt_data_posted; + __le64 pcie_credit_fc_tgt_hdr_posted; + __le64 pcie_credit_fc_cmpl_hdr_posted; + __le64 pcie_credit_fc_cmpl_data_posted; + __le64 pcie_cmpl_longest; + __le64 pcie_cmpl_shortest; + __le64 cache_miss_count_cfcq; + __le64 cache_miss_count_cfcs; + __le64 cache_miss_count_cfcc; + __le64 cache_miss_count_cfcm; +}; + /* hwrm_fw_reset_input (size:192b/24B) */ struct hwrm_fw_reset_input { __le16 req_type; @@ -9811,11 +9955,12 @@ struct hwrm_nvm_install_update_output { /* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ struct hwrm_nvm_install_update_cmd_err { u8 code; - #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL - #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL - #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL - #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL - #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT u8 unused_0[7]; }; -- cgit v1.2.3 From 11862689e8f117e4702f55000790d7bce6859e84 Mon Sep 17 00:00:00 2001 From: Pavan Chebbi Date: Thu, 12 May 2022 22:40:22 -0400 Subject: bnxt_en: Configure ptp filters during bnxt open For correctness, we need to configure the packet filters for timestamping during bnxt_open. This way they are always configured after firmware reset or chip reset. We should not assume that the filters will always be retained across resets. This patch modifies the ioctl handler and always configures the PTP filters in the bnxt_open() path. Cc: Richard Cochran Signed-off-by: Pavan Chebbi Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c | 56 ++++++++++++++++++++------- drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h | 2 + 3 files changed, 46 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 082518e68579..bcb3c16bf915 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -10508,6 +10508,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (BNXT_PF(bp)) bnxt_vf_reps_open(bp); bnxt_ptp_init_rtc(bp, true); + bnxt_ptp_cfg_tstamp_filters(bp); return 0; open_err_irq: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 00f2f80c0073..f9c94e5fe718 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -295,6 +295,27 @@ static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event) return hwrm_req_send(bp, req); } +void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct hwrm_port_mac_cfg_input *req; + + if (!ptp || !ptp->tstamp_filters) + return; + + if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG)) + goto out; + req->flags = cpu_to_le32(ptp->tstamp_filters); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); + + if (!hwrm_req_send(bp, req)) + return; + ptp->tstamp_filters = 0; +out: + netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n"); +} + void bnxt_ptp_reapply_pps(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; @@ -435,27 +456,36 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - struct hwrm_port_mac_cfg_input *req; u32 flags = 0; - int rc; + int rc = 0; - rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); - if (rc) - return rc; + switch (ptp->rx_filter) { + case HWTSTAMP_FILTER_NONE: + flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + break; + } - if (ptp->rx_filter) - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; - else - flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; if (ptp->tx_tstamp_en) flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; else flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; - req->flags = cpu_to_le32(flags); - req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); - req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); - return hwrm_req_send(bp, req); + ptp->tstamp_filters = flags; + + if (netif_running(bp->dev)) { + rc = bnxt_close_nic(bp, false, false); + if (!rc) + rc = bnxt_open_nic(bp, false, false); + if (!rc && !ptp->tstamp_filters) + rc = -EIO; + } + + return rc; } int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 530b9922608c..4ce0a14c1e23 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -113,6 +113,7 @@ struct bnxt_ptp_cfg { BNXT_PTP_MSG_PDELAY_RESP) u8 tx_tstamp_en:1; int rx_filter; + u32 tstamp_filters; u32 refclk_regs[2]; u32 refclk_mapped_regs[2]; @@ -133,6 +134,7 @@ do { \ int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off); void bnxt_ptp_update_current_time(struct bnxt *bp); void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2); +void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp); void bnxt_ptp_reapply_pps(struct bnxt *bp); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); -- cgit v1.2.3 From 66ed81dcedc665bf8c7dfc3867d425f50eba219e Mon Sep 17 00:00:00 2001 From: Pavan Chebbi Date: Thu, 12 May 2022 22:40:23 -0400 Subject: bnxt_en: Enable packet timestamping for all RX packets Add driver support to enable timestamping on all RX packets that are received by the NIC. This capability can be requested by the applications using SIOCSHWTSTAMP ioctl with filter type HWTSTAMP_FILTER_ALL. Cc: Richard Cochran Signed-off-by: Pavan Chebbi Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 +++++-- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 3 +++ drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c | 26 ++++++++++++++++++++++- 4 files changed, 36 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index bcb3c16bf915..56b46b8206a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2040,7 +2040,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == - RX_CMP_FLAGS_ITYPE_PTP_W_TS)) { + RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) { if (bp->flags & BNXT_FLAG_CHIP_P5) { u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); u64 ns, ts; @@ -7659,7 +7659,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) struct hwrm_func_qcaps_output *resp; struct hwrm_func_qcaps_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - u32 flags, flags_ext; + u32 flags, flags_ext, flags_ext2; int rc; rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); @@ -7704,6 +7704,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; + flags_ext2 = le32_to_cpu(resp->flags_ext2); + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; + bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && BNXT_FW_MAJ(bp) > 217) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a498ee297946..a1dca8c58f54 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1968,6 +1968,7 @@ struct bnxt { #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 #define BNXT_FW_CAP_HOT_RESET 0x00200000 #define BNXT_FW_CAP_PTP_RTC 0x00400000 + #define BNXT_FW_CAP_RX_ALL_PKT_TS 0x00800000 #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 @@ -2131,6 +2132,7 @@ struct bnxt { struct bpf_prog *xdp_prog; struct bnxt_ptp_cfg *ptp_cfg; + u8 ptp_all_rx_tstamp; /* devlink interface and vf-rep structs */ struct devlink *dl; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index b3a48d6675fe..8a7f3f02ed90 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -3759,6 +3759,9 @@ static int bnxt_get_ts_info(struct net_device *dev, info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + + if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) + info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index f9c94e5fe718..562f8f68a47d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -305,14 +305,27 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp) if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG)) goto out; + + if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters & + (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE | + PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) { + ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE | + PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE); + netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n"); + } + req->flags = cpu_to_le32(ptp->tstamp_filters); req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); - if (!hwrm_req_send(bp, req)) + if (!hwrm_req_send(bp, req)) { + bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters & + PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE); return; + } ptp->tstamp_filters = 0; out: + bp->ptp_all_rx_tstamp = 0; netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n"); } @@ -460,8 +473,13 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) int rc = 0; switch (ptp->rx_filter) { + case HWTSTAMP_FILTER_ALL: + flags = PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE; + break; case HWTSTAMP_FILTER_NONE: flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) + flags |= PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: @@ -516,6 +534,12 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) ptp->rxctl = 0; ptp->rx_filter = HWTSTAMP_FILTER_NONE; break; + case HWTSTAMP_FILTER_ALL: + if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) { + ptp->rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + return -EOPNOTSUPP; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: -- cgit v1.2.3 From ab0bed4bf6fae8a42cf3b08b38e1fffb1a79193a Mon Sep 17 00:00:00 2001 From: Kalesh AP Date: Thu, 12 May 2022 22:40:24 -0400 Subject: bnxt_en: parse and report result field when NVRAM package install fails Instead of always returning -ENOPKG, decode the firmware error code further when the HWRM_NVM_INSTALL_UPDATE firmware call fails. Return a more suitable error code to userspace and log an error in dmesg. This is version 2 of the earlier patch that was reverted: 02acd399533e ("bnxt_en: parse result field when NVRAM package install fails") In this new version, if the call is made through devlink instead of ethtool, we'll also set the error message in extack. Link: https://lore.kernel.org/netdev/20220307141358.4d52462e@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com/ Reviewed-by: Somnath Kotur Reviewed-by: Pavan Chebbi Signed-off-by: Kalesh AP Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 81 ++++++++++++++++++++--- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 2 +- 3 files changed, 72 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 0c17f90d44a2..3528ce9849e6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -45,7 +45,7 @@ bnxt_dl_flash_update(struct devlink *dl, } devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); - rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0); + rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack); if (!rc) devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0); else diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8a7f3f02ed90..7191e5d74208 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_hwrm.h" @@ -34,6 +35,13 @@ #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ #include "bnxt_coredump.h" +#define BNXT_NVM_ERR_MSG(dev, extack, msg) \ + do { \ + if (extack) \ + NL_SET_ERR_MSG_MOD(extack, msg); \ + netdev_err(dev, "%s\n", msg); \ + } while (0) + static u32 bnxt_get_msglevel(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -2499,12 +2507,65 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, return rc; } +#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM" +#define MSG_INVALID_PKG "PKG install error : Invalid package" +#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error" +#define MSG_INVALID_DEV "PKG install error : Invalid device" +#define MSG_INTERNAL_ERR "PKG install error : Internal error" +#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" +#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" +#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" +#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" + +static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, + struct netlink_ext_ack *extack) +{ + switch (result) { + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER: + case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR: + case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR: + case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND: + case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED: + BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR); + return -EINVAL; + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE: + case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM: + case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM: + BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG); + return -ENOPKG; + case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR: + BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR); + return -EPERM; + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV: + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID: + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR: + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID: + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM: + BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV); + return -EOPNOTSUPP; + default: + BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR); + return -EIO; + } +} + #define BNXT_PKG_DMA_SIZE 0x40000 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, - u32 install_type) + u32 install_type, struct netlink_ext_ack *extack) { struct hwrm_nvm_install_update_input *install; struct hwrm_nvm_install_update_output *resp; @@ -2567,12 +2628,11 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware BNX_DIR_EXT_NONE, &index, &item_len, NULL); if (rc) { - netdev_err(dev, "PKG update area not created in nvram\n"); + BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); break; } if (fw->size > item_len) { - netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", - (unsigned long)fw->size); + BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR); rc = -EFBIG; break; } @@ -2613,7 +2673,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware switch (cmd_err) { case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: - netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure Anti-rollback detected\n"); + BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR); rc = -EALREADY; break; case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: @@ -2641,8 +2701,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware } fallthrough; default: - netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x cmd_err :%x\n", - rc, cmd_err); + BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR); } } while (defrag_attempted && !rc); @@ -2653,7 +2712,7 @@ pkg_abort: if (resp->result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", (s8)resp->result, (int)resp->problem_item); - rc = -ENOPKG; + rc = nvm_update_err_to_stderr(dev, resp->result, extack); } if (rc == -EACCES) bnxt_print_admin_err(bp); @@ -2661,7 +2720,7 @@ pkg_abort: } static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, - u32 install_type) + u32 install_type, struct netlink_ext_ack *extack) { const struct firmware *fw; int rc; @@ -2673,7 +2732,7 @@ static int bnxt_flash_package_from_file(struct net_device *dev, const char *file return rc; } - rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type); + rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack); release_firmware(fw); @@ -2691,7 +2750,7 @@ static int bnxt_flash_device(struct net_device *dev, if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || flash->region > 0xffff) return bnxt_flash_package_from_file(dev, flash->data, - flash->region); + flash->region, NULL); return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 6aa44840f13a..a59284215e78 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -54,7 +54,7 @@ int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, u8 self_reset, u8 flags); int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, - u32 install_type); + u32 install_type, struct netlink_ext_ack *extack); int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size); void bnxt_ethtool_init(struct bnxt *bp); void bnxt_ethtool_free(struct bnxt *bp); -- cgit v1.2.3 From f9a210c72d70c9a59cf989fb23fb01ff10d18136 Mon Sep 17 00:00:00 2001 From: Ren Zhijie Date: Fri, 13 May 2022 09:27:21 +0800 Subject: sfc: siena: Fix Kconfig dependencies If CONFIG_PTP_1588_CLOCK=m and CONFIG_SFC_SIENA=y, the siena driver will fail to link: drivers/net/ethernet/sfc/siena/ptp.o: In function `efx_ptp_remove_channel': ptp.c:(.text+0xa28): undefined reference to `ptp_clock_unregister' drivers/net/ethernet/sfc/siena/ptp.o: In function `efx_ptp_probe_channel': ptp.c:(.text+0x13a0): undefined reference to `ptp_clock_register' ptp.c:(.text+0x1470): undefined reference to `ptp_clock_unregister' drivers/net/ethernet/sfc/siena/ptp.o: In function `efx_ptp_pps_worker': ptp.c:(.text+0x1d29): undefined reference to `ptp_clock_event' drivers/net/ethernet/sfc/siena/ptp.o: In function `efx_siena_ptp_get_ts_info': ptp.c:(.text+0x301b): undefined reference to `ptp_clock_index' To fix this build error, make SFC_SIENA depends on PTP_1588_CLOCK. Reported-by: Hulk Robot Fixes: d48523cb88e0 ("sfc: Copy shared files needed for Siena (part 2)") Signed-off-by: Ren Zhijie Acked-by: Martin Habets Link: https://lore.kernel.org/r/20220513012721.140871-1-renzhijie2@huawei.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/Kconfig b/drivers/net/ethernet/sfc/siena/Kconfig index cb3c5cb42a53..c6ea09769873 100644 --- a/drivers/net/ethernet/sfc/siena/Kconfig +++ b/drivers/net/ethernet/sfc/siena/Kconfig @@ -2,6 +2,7 @@ config SFC_SIENA tristate "Solarflare SFC9000 support" depends on PCI + depends on PTP_1588_CLOCK select MDIO select CRC32 help -- cgit v1.2.3 From c28678162b330c8d3bfc472db8c47c8e5e7726f1 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 May 2022 13:56:03 -0700 Subject: eth: sfc: remove remnants of the out-of-tree napi_weight module param Remove napi_weight statics which are set to 64 and never modified, remnants of the out-of-tree napi_weight module param. Acked-by: Edward Cree Link: https://lore.kernel.org/r/20220512205603.1536771-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/efx_channels.c | 8 +------- drivers/net/ethernet/sfc/falcon/efx.c | 8 +------- drivers/net/ethernet/sfc/siena/efx_channels.c | 8 +------- 3 files changed, 3 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 79df636d6df8..f4919e7ee77b 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -46,11 +46,6 @@ module_param(irq_adapt_high_thresh, uint, 0644); MODULE_PARM_DESC(irq_adapt_high_thresh, "Threshold score for increasing IRQ moderation"); -/* This is the weight assigned to each of the (per-channel) virtual - * NAPI devices. - */ -static int napi_weight = 64; - static const struct efx_channel_type efx_default_channel_type; /************* @@ -1320,8 +1315,7 @@ void efx_init_napi_channel(struct efx_channel *channel) struct efx_nic *efx = channel->efx; channel->napi_dev = efx->net_dev; - netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll, - napi_weight); + netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64); } void efx_init_napi(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index f619ffb26787..a63f40b09856 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -112,11 +112,6 @@ module_param(ef4_separate_tx_channels, bool, 0444); MODULE_PARM_DESC(ef4_separate_tx_channels, "Use separate channels for TX and RX"); -/* This is the weight assigned to each of the (per-channel) virtual - * NAPI devices. - */ -static int napi_weight = 64; - /* This is the time (in jiffies) between invocations of the hardware * monitor. * On Falcon-based NICs, this will: @@ -2017,8 +2012,7 @@ static void ef4_init_napi_channel(struct ef4_channel *channel) struct ef4_nic *efx = channel->efx; channel->napi_dev = efx->net_dev; - netif_napi_add_weight(channel->napi_dev, &channel->napi_str, ef4_poll, - napi_weight); + netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll, 64); } static void ef4_init_napi(struct ef4_nic *efx) diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c index 28391875de69..2465cf4d505c 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.c +++ b/drivers/net/ethernet/sfc/siena/efx_channels.c @@ -46,11 +46,6 @@ module_param(irq_adapt_high_thresh, uint, 0644); MODULE_PARM_DESC(irq_adapt_high_thresh, "Threshold score for increasing IRQ moderation"); -/* This is the weight assigned to each of the (per-channel) virtual - * NAPI devices. - */ -static int napi_weight = 64; - static const struct efx_channel_type efx_default_channel_type; /************* @@ -1324,8 +1319,7 @@ static void efx_init_napi_channel(struct efx_channel *channel) struct efx_nic *efx = channel->efx; channel->napi_dev = efx->net_dev; - netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll, - napi_weight); + netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64); } void efx_siena_init_napi(struct efx_nic *efx) -- cgit v1.2.3 From 470bcfd6039b390f1bc29d9ce12461781f5cd7af Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 12 May 2022 14:26:21 -0700 Subject: ixgbe: add xdp frags support to ndo_xdp_xmit Add the capability to map non-linear xdp frames in XDP_TX and ndo_xdp_xmit callback. Signed-off-by: Lorenzo Bianconi Tested-by: Sandeep Penigalapati Signed-off-by: Tony Nguyen Link: https://lore.kernel.org/r/20220512212621.3746140-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 99 +++++++++++++++++---------- 1 file changed, 63 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 19cde928d9b7..77c2e70b0860 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2344,6 +2344,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, hard_start = page_address(rx_buffer->page) + rx_buffer->page_offset - offset; xdp_prepare_buff(&xdp, hard_start, offset, size, true); + xdp_buff_clear_frags_flag(&xdp); #if (PAGE_SIZE > 4096) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); @@ -8571,57 +8572,83 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf) { - struct ixgbe_tx_buffer *tx_buffer; - union ixgbe_adv_tx_desc *tx_desc; - u32 len, cmd_type; - dma_addr_t dma; - u16 i; - - len = xdpf->len; + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + u16 i = 0, index = ring->next_to_use; + struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index]; + struct ixgbe_tx_buffer *tx_buff = tx_head; + union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index); + u32 cmd_type, len = xdpf->len; + void *data = xdpf->data; - if (unlikely(!ixgbe_desc_unused(ring))) + if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags)) return IXGBE_XDP_CONSUMED; - dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); - if (dma_mapping_error(ring->dev, dma)) - return IXGBE_XDP_CONSUMED; + tx_head->bytecount = xdp_get_frame_len(xdpf); + tx_head->gso_segs = 1; + tx_head->xdpf = xdpf; - /* record the location of the first descriptor for this packet */ - tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; - tx_buffer->bytecount = len; - tx_buffer->gso_segs = 1; - tx_buffer->protocol = 0; + tx_desc->read.olinfo_status = + cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT); - i = ring->next_to_use; - tx_desc = IXGBE_TX_DESC(ring, i); + for (;;) { + dma_addr_t dma; - dma_unmap_len_set(tx_buffer, len, len); - dma_unmap_addr_set(tx_buffer, dma, dma); - tx_buffer->xdpf = xdpf; + dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) + goto unmap; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + dma_unmap_len_set(tx_buff, len, len); + dma_unmap_addr_set(tx_buff, dma, dma); + + cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS | len; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_buff->protocol = 0; + + if (++index == ring->count) + index = 0; + + if (i == nr_frags) + break; + + tx_buff = &ring->tx_buffer_info[index]; + tx_desc = IXGBE_TX_DESC(ring, index); + tx_desc->read.olinfo_status = 0; + data = skb_frag_address(&sinfo->frags[i]); + len = skb_frag_size(&sinfo->frags[i]); + i++; + } /* put descriptor type bits */ - cmd_type = IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_DEXT | - IXGBE_ADVTXD_DCMD_IFCS; - cmd_type |= len | IXGBE_TXD_CMD; - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); - tx_desc->read.olinfo_status = - cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); /* Avoid any potential race with xdp_xmit and cleanup */ smp_wmb(); - /* set next_to_watch value indicating a packet is present */ - i++; - if (i == ring->count) - i = 0; - - tx_buffer->next_to_watch = tx_desc; - ring->next_to_use = i; + tx_head->next_to_watch = tx_desc; + ring->next_to_use = index; return IXGBE_XDP_TX; + +unmap: + for (;;) { + tx_buff = &ring->tx_buffer_info[index]; + if (dma_unmap_len(tx_buff, len)) + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma), + dma_unmap_len(tx_buff, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buff, len, 0); + if (tx_buff == tx_head) + break; + + if (!index) + index += ring->count; + index--; + } + + return IXGBE_XDP_CONSUMED; } netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, -- cgit v1.2.3 From d9713088158b23973266e07fdc85ff7d68791a8c Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Thu, 12 May 2022 14:32:49 -0700 Subject: ice: Expose RSS indirection tables for queue groups via ethtool When ADQ queue groups (TCs) are created via tc mqprio command, RSS contexts and associated RSS indirection tables are configured automatically per TC based on the queue ranges specified for each traffic class. For ex: tc qdisc add dev enp175s0f0 root mqprio num_tc 3 map 0 1 2 \ queues 2@0 8@2 4@10 hw 1 mode channel will create 3 queue groups (TC 0-2) with queue ranges 2, 8 and 4 in 3 queue groups. Each queue group is associated with its own RSS context and RSS indirection table. Add support to expose RSS indirection tables for all ADQ queue groups using ethtool RSS contexts interface. ethtool -x enp175s0f0 context Signed-off-by: Sridhar Samudrala Signed-off-by: Sudheer Mogilappagari Tested-by: Bharathi Sreenivas Signed-off-by: Tony Nguyen Link: https://lore.kernel.org/r/20220512213249.3747424-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice_ethtool.c | 69 ++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 476bd1c83c87..1e71b70f0e52 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3111,36 +3111,47 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev) return np->vsi->rss_table_size; } -/** - * ice_get_rxfh - get the Rx flow hash indirection table - * @netdev: network interface device structure - * @indir: indirection table - * @key: hash key - * @hfunc: hash function - * - * Reads the indirection table directly from the hardware. - */ static int -ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +ice_get_rxfh_context(struct net_device *netdev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - int err, i; + u16 qcount, offset; + int err, num_tc, i; u8 *lut; + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + netdev_warn(netdev, "RSS is not supported on this VSI!\n"); + return -EOPNOTSUPP; + } + + if (rss_context && !ice_is_adq_active(pf)) { + netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n"); + return -EINVAL; + } + + qcount = vsi->mqprio_qopt.qopt.count[rss_context]; + offset = vsi->mqprio_qopt.qopt.offset[rss_context]; + + if (rss_context && ice_is_adq_active(pf)) { + num_tc = vsi->mqprio_qopt.qopt.num_tc; + if (rss_context >= num_tc) { + netdev_err(netdev, "RSS context:%d > num_tc:%d\n", + rss_context, num_tc); + return -EINVAL; + } + /* Use channel VSI of given TC */ + vsi = vsi->tc_map_vsi[rss_context]; + } + if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { - /* RSS not supported return error here */ - netdev_warn(netdev, "RSS is not configured on this VSI!\n"); - return -EIO; - } - lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; @@ -3153,14 +3164,35 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) if (err) goto out; + if (ice_is_adq_active(pf)) { + for (i = 0; i < vsi->rss_table_size; i++) + indir[i] = offset + lut[i] % qcount; + goto out; + } + for (i = 0; i < vsi->rss_table_size; i++) - indir[i] = (u32)(lut[i]); + indir[i] = lut[i]; out: kfree(lut); return err; } +/** + * ice_get_rxfh - get the Rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Reads the indirection table directly from the hardware. + */ +static int +ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + return ice_get_rxfh_context(netdev, indir, key, hfunc, 0); +} + /** * ice_set_rxfh - set the Rx flow hash indirection table * @netdev: network interface device structure @@ -4102,6 +4134,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .set_pauseparam = ice_set_pauseparam, .get_rxfh_key_size = ice_get_rxfh_key_size, .get_rxfh_indir_size = ice_get_rxfh_indir_size, + .get_rxfh_context = ice_get_rxfh_context, .get_rxfh = ice_get_rxfh, .set_rxfh = ice_set_rxfh, .get_channels = ice_get_channels, -- cgit v1.2.3 From 44fa75f207d8a106bc75e6230db61e961fdbf8a8 Mon Sep 17 00:00:00 2001 From: Jonas Jelonek Date: Mon, 9 May 2022 19:39:57 +0200 Subject: mac80211: extend current rate control tx status API This patch adds the new struct ieee80211_rate_status and replaces 'struct rate_info *rate' in ieee80211_tx_status with pointer and length annotation. The struct ieee80211_rate_status allows to: (1) receive tx power status feedback for transmit power control (TPC) per packet or packet retry (2) dynamic mapping of wifi chip specific multi-rate retry (mrr) chains with different lengths (3) increase the limit of annotatable rate indices to support IEEE802.11ac rate sets and beyond ieee80211_tx_info, control and status buffer, and ieee80211_tx_rate cannot be used to achieve these goals due to fixed size limitations. Our new struct contains a struct rate_info to annotate the rate that was used, retry count of the rate and tx power. It is intended for all information related to RC and TPC that needs to be passed from driver to mac80211 and its RC/TPC algorithms like Minstrel_HT. It corresponds to one stage in an mrr. Multiple subsequent instances of this struct can be included in struct ieee80211_tx_status via a pointer and a length variable. Those instances can be allocated on-stack. The former reference to a single instance of struct rate_info is replaced with our new annotation. An extension is introduced to struct ieee80211_hw. There are two new members called 'tx_power_levels' and 'max_txpwr_levels_idx' acting as a tx power level table. When a wifi device is registered, the driver shall supply all supported power levels in this list. This allows to support several quirks like differing power steps in power level ranges or alike. TPC can use this for algorithm and thus be designed more abstract instead of handling all possible step widths individually. Further mandatory changes in status.c, mt76 and ath11k drivers due to the removal of 'struct rate_info *rate' are also included. status.c already uses the information in ieee80211_tx_status->rate in radiotap, this is now changed to use ieee80211_rate_status->rate_idx. mt76 driver already uses struct rate_info to pass the tx rate to status path. The new members of the ieee80211_tx_status are set to NULL and 0 because the previously passed rate is not relevant to rate control and accurate information is passed via tx_info->status.rates. For ath11k, the txrate can be passed via this struct because ath11k uses firmware RC and thus the information does not interfere with software RC. Compile-Tested: current wireless-next tree with all flags on Tested-on: Xiaomi 4A Gigabit (MediaTek MT7603E, MT7612E) with OpenWrt Linux 5.10.113 Signed-off-by: Jonas Jelonek Link: https://lore.kernel.org/r/20220509173958.1398201-2-jelonek.jonas@gmail.com Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath11k/dp_tx.c | 8 ++- drivers/net/wireless/mediatek/mt76/tx.c | 5 +- include/net/mac80211.h | 33 +++++++++++- net/mac80211/status.c | 91 +++++++++++++++++++-------------- 4 files changed, 92 insertions(+), 45 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 00a45819907e..c17a2620aad7 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -520,6 +520,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, struct hal_tx_status *ts) { struct ieee80211_tx_status status = { 0 }; + struct ieee80211_rate_status status_rate = { 0 }; struct ath11k_base *ab = ar->ab; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; @@ -603,7 +604,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, status.skb = msdu; status.info = info; rate = arsta->last_txrate; - status.rate = &rate; + + status_rate.rate_idx = rate; + status_rate.try_count = 1; + + status.rates = &status_rate; + status.n_rates = 1; spin_unlock_bh(&ab->base_lock); diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 6b8c9dc80542..b49ef6619829 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -66,9 +66,8 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) wcid = rcu_dereference(dev->wcid[cb->wcid]); if (wcid) { status.sta = wcid_to_sta(wcid); - - if (status.sta) - status.rate = &wcid->rate; + status.rates = NULL; + status.n_rates = 0; } hw = mt76_tx_status_get_hw(dev, skb); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 944b31f0eb0d..ebadb2103968 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1143,20 +1143,41 @@ ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info) return info->tx_time_est << 2; } +/*** + * struct ieee80211_rate_status - mrr stage for status path + * + * This struct is used in struct ieee80211_tx_status to provide drivers a + * dynamic way to report about used rates and power levels per packet. + * + * @rate_idx The actual used rate. + * @try_count How often the rate was tried. + * @tx_power_idx An idx into the ieee80211_hw->tx_power_levels list of the + * corresponding wifi hardware. The idx shall point to the power level + * that was used when sending the packet. + */ +struct ieee80211_rate_status { + struct rate_info rate_idx; + u8 try_count; + u8 tx_power_idx; +}; + /** * struct ieee80211_tx_status - extended tx status info for rate control * * @sta: Station that the packet was transmitted for * @info: Basic tx status information * @skb: Packet skb (can be NULL if not provided by the driver) - * @rate: The TX rate that was used when sending the packet + * @rates: Mrr stages that were used when sending the packet + * @n_rates: Number of mrr stages (count of instances for @rates) * @free_list: list where processed skbs are stored to be free'd by the driver */ struct ieee80211_tx_status { struct ieee80211_sta *sta; struct ieee80211_tx_info *info; struct sk_buff *skb; - struct rate_info *rate; + struct ieee80211_rate_status *rates; + u8 n_rates; + struct list_head *free_list; }; @@ -2657,6 +2678,12 @@ enum ieee80211_hw_flags { * refilling deficit of each TXQ. * * @max_mtu: the max mtu could be set. + * + * @tx_power_levels: a list of power levels supported by the wifi hardware. + * The power levels can be specified either as integer or fractions. + * The power level at idx 0 shall be the maximum positive power level. + * + * @max_txpwr_levels_idx: the maximum valid idx of 'tx_power_levels' list. */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -2695,6 +2722,8 @@ struct ieee80211_hw { u8 tx_sk_pacing_shift; u8 weight_multiplier; u32 max_mtu; + const s8 *tx_power_levels; + u8 max_txpwr_levels_idx; }; static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw, diff --git a/net/mac80211/status.c b/net/mac80211/status.c index c563fa718d84..e69272139437 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -247,15 +247,19 @@ static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn) static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info, struct ieee80211_tx_status *status) { + struct ieee80211_rate_status *status_rate = NULL; int len = sizeof(struct ieee80211_radiotap_header); + if (status && status->n_rates) + status_rate = &status->rates[status->n_rates - 1]; + /* IEEE80211_RADIOTAP_RATE rate */ - if (status && status->rate && !(status->rate->flags & - (RATE_INFO_FLAGS_MCS | - RATE_INFO_FLAGS_DMG | - RATE_INFO_FLAGS_EDMG | - RATE_INFO_FLAGS_VHT_MCS | - RATE_INFO_FLAGS_HE_MCS))) + if (status_rate && !(status_rate->rate_idx.flags & + (RATE_INFO_FLAGS_MCS | + RATE_INFO_FLAGS_DMG | + RATE_INFO_FLAGS_EDMG | + RATE_INFO_FLAGS_VHT_MCS | + RATE_INFO_FLAGS_HE_MCS))) len += 2; else if (info->status.rates[0].idx >= 0 && !(info->status.rates[0].flags & @@ -270,12 +274,12 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info, /* IEEE80211_RADIOTAP_MCS * IEEE80211_RADIOTAP_VHT */ - if (status && status->rate) { - if (status->rate->flags & RATE_INFO_FLAGS_MCS) + if (status_rate) { + if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_MCS) len += 3; - else if (status->rate->flags & RATE_INFO_FLAGS_VHT_MCS) + else if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS) len = ALIGN(len, 2) + 12; - else if (status->rate->flags & RATE_INFO_FLAGS_HE_MCS) + else if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_HE_MCS) len = ALIGN(len, 2) + 12; } else if (info->status.rates[0].idx >= 0) { if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) @@ -297,10 +301,14 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_radiotap_header *rthdr; + struct ieee80211_rate_status *status_rate = NULL; unsigned char *pos; u16 legacy_rate = 0; u16 txflags; + if (status && status->n_rates) + status_rate = &status->rates[status->n_rates - 1]; + rthdr = skb_push(skb, rtap_len); memset(rthdr, 0, rtap_len); @@ -318,13 +326,14 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, /* IEEE80211_RADIOTAP_RATE */ - if (status && status->rate) { - if (!(status->rate->flags & (RATE_INFO_FLAGS_MCS | - RATE_INFO_FLAGS_DMG | - RATE_INFO_FLAGS_EDMG | - RATE_INFO_FLAGS_VHT_MCS | - RATE_INFO_FLAGS_HE_MCS))) - legacy_rate = status->rate->legacy; + if (status_rate) { + if (!(status_rate->rate_idx.flags & + (RATE_INFO_FLAGS_MCS | + RATE_INFO_FLAGS_DMG | + RATE_INFO_FLAGS_EDMG | + RATE_INFO_FLAGS_VHT_MCS | + RATE_INFO_FLAGS_HE_MCS))) + legacy_rate = status_rate->rate_idx.legacy; } else if (info->status.rates[0].idx >= 0 && !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))) @@ -357,20 +366,21 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, *pos = retry_count; pos++; - if (status && status->rate && - (status->rate->flags & RATE_INFO_FLAGS_MCS)) { + if (status_rate && (status_rate->rate_idx.flags & RATE_INFO_FLAGS_MCS)) + { rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS | IEEE80211_RADIOTAP_MCS_HAVE_GI | IEEE80211_RADIOTAP_MCS_HAVE_BW; - if (status->rate->flags & RATE_INFO_FLAGS_SHORT_GI) + if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_SHORT_GI) pos[1] |= IEEE80211_RADIOTAP_MCS_SGI; - if (status->rate->bw == RATE_INFO_BW_40) + if (status_rate->rate_idx.bw == RATE_INFO_BW_40) pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40; - pos[2] = status->rate->mcs; + pos[2] = status_rate->rate_idx.mcs; pos += 3; - } else if (status && status->rate && - (status->rate->flags & RATE_INFO_FLAGS_VHT_MCS)) { + } else if (status_rate && (status_rate->rate_idx.flags & + RATE_INFO_FLAGS_VHT_MCS)) + { u16 known = local->hw.radiotap_vht_details & (IEEE80211_RADIOTAP_VHT_KNOWN_GI | IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH); @@ -385,12 +395,12 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, pos += 2; /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */ - if (status->rate->flags & RATE_INFO_FLAGS_SHORT_GI) + if (status_rate->rate_idx.flags & RATE_INFO_FLAGS_SHORT_GI) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; pos++; /* u8 bandwidth */ - switch (status->rate->bw) { + switch (status_rate->rate_idx.bw) { case RATE_INFO_BW_160: *pos = 11; break; @@ -407,7 +417,8 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, pos++; /* u8 mcs_nss[4] */ - *pos = (status->rate->mcs << 4) | status->rate->nss; + *pos = (status_rate->rate_idx.mcs << 4) | + status_rate->rate_idx.nss; pos += 4; /* u8 coding */ @@ -416,8 +427,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, pos++; /* u16 partial_aid */ pos += 2; - } else if (status && status->rate && - (status->rate->flags & RATE_INFO_FLAGS_HE_MCS)) { + } else if (status_rate && (status_rate->rate_idx.flags & + RATE_INFO_FLAGS_HE_MCS)) + { struct ieee80211_radiotap_he *he; rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE)); @@ -435,7 +447,7 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) - he->data6 |= HE_PREP(DATA6_NSTS, status->rate->nss); + he->data6 |= HE_PREP(DATA6_NSTS, status_rate->rate_idx.nss); #define CHECK_GI(s) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ @@ -445,12 +457,12 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, CHECK_GI(1_6); CHECK_GI(3_2); - he->data3 |= HE_PREP(DATA3_DATA_MCS, status->rate->mcs); - he->data3 |= HE_PREP(DATA3_DATA_DCM, status->rate->he_dcm); + he->data3 |= HE_PREP(DATA3_DATA_MCS, status_rate->rate_idx.mcs); + he->data3 |= HE_PREP(DATA3_DATA_DCM, status_rate->rate_idx.he_dcm); - he->data5 |= HE_PREP(DATA5_GI, status->rate->he_gi); + he->data5 |= HE_PREP(DATA5_GI, status_rate->rate_idx.he_gi); - switch (status->rate->bw) { + switch (status_rate->rate_idx.bw) { case RATE_INFO_BW_20: he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); @@ -481,16 +493,16 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, CHECK_RU_ALLOC(2x996); he->data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, - status->rate->he_ru_alloc + 4); + status_rate->rate_idx.he_ru_alloc + 4); break; default: - WARN_ONCE(1, "Invalid SU BW %d\n", status->rate->bw); + WARN_ONCE(1, "Invalid SU BW %d\n", status_rate->rate_idx.bw); } pos += sizeof(struct ieee80211_radiotap_he); } - if ((status && status->rate) || info->status.rates[0].idx < 0) + if (status_rate || info->status.rates[0].idx < 0) return; /* IEEE80211_RADIOTAP_MCS @@ -1111,8 +1123,9 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw, if (pubsta) { sta = container_of(pubsta, struct sta_info, sta); - if (status->rate) - sta->deflink.tx_stats.last_rate_info = *status->rate; + if (status->n_rates) + sta->deflink.tx_stats.last_rate_info = + status->rates[status->n_rates - 1].rate_idx; } if (skb && (tx_time_est = -- cgit v1.2.3 From cb99badde146c327f150773921ffe080abe1eb44 Mon Sep 17 00:00:00 2001 From: Phil Edworthy Date: Thu, 12 May 2022 12:47:19 +0100 Subject: ravb: Separate handling of irq enable/disable regs into feature Currently, when the HW has a single interrupt, the driver uses the GIC, TIC, RIC0 registers to enable and disable interrupts. When the HW has multiple interrupts, it uses the GIE, GID, TIE, TID, RIE0, RID0 registers. However, other devices, e.g. RZ/V2M, have multiple irqs and only have the GIC, TIC, RIC0 registers. Therefore, split this into a separate feature. Signed-off-by: Phil Edworthy Reviewed-by: Biju Das Reviewed-by: Sergey Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 1 + drivers/net/ethernet/renesas/ravb_main.c | 5 +++-- drivers/net/ethernet/renesas/ravb_ptp.c | 6 +++--- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 08062d73df10..bb82efd222c7 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1027,6 +1027,7 @@ struct ravb_hw_info { unsigned tx_counters:1; /* E-MAC has TX counters */ unsigned carrier_counters:1; /* E-MAC has carrier counters */ unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */ + unsigned irq_en_dis:1; /* Has separate irq enable and disable regs */ unsigned gptp:1; /* AVB-DMAC has gPTP support */ unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */ unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 525d66f71f02..e22c0e6ed0f3 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1124,7 +1124,7 @@ static bool ravb_queue_interrupt(struct net_device *ndev, int q) if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { if (napi_schedule_prep(&priv->napi[q])) { /* Mask RX and TX interrupts */ - if (!info->multi_irqs) { + if (!info->irq_en_dis) { ravb_write(ndev, ric0 & ~BIT(q), RIC0); ravb_write(ndev, tic & ~BIT(q), TIC); } else { @@ -1306,7 +1306,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - if (!info->multi_irqs) { + if (!info->irq_en_dis) { ravb_modify(ndev, RIC0, mask, mask); ravb_modify(ndev, TIC, mask, mask); } else { @@ -2410,6 +2410,7 @@ static const struct ravb_hw_info ravb_gen3_hw_info = { .internal_delay = 1, .tx_counters = 1, .multi_irqs = 1, + .irq_en_dis = 1, .ccc_gac = 1, .nc_queues = 1, .magic_pkt = 1, diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index c099656dd75b..87c4306d66ec 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -198,7 +198,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - if (!info->multi_irqs) + if (!info->irq_en_dis) ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); else if (on) ravb_write(ndev, GIE_PTCS, GIE); @@ -254,7 +254,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - if (!info->multi_irqs) + if (!info->irq_en_dis) ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); else ravb_write(ndev, GIE_PTMS0, GIE); @@ -266,7 +266,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - if (!info->multi_irqs) + if (!info->irq_en_dis) ravb_modify(ndev, GIC, GIC_PTME, 0); else ravb_write(ndev, GID_PTMD0, GID); -- cgit v1.2.3 From b0265dcba3d6c1689e6ce315bed09192fb587403 Mon Sep 17 00:00:00 2001 From: Phil Edworthy Date: Thu, 12 May 2022 12:47:20 +0100 Subject: ravb: Support separate Line0 (Desc), Line1 (Err) and Line2 (Mgmt) irqs R-Car has a combined interrupt line, ch22 = Line0_DiA | Line1_A | Line2_A. RZ/V2M has separate interrupt lines for each of these, so add a feature that allows the driver to get these interrupts and call the common handler. Signed-off-by: Phil Edworthy Reviewed-by: Biju Das Reviewed-by: Sergey Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 3 ++ drivers/net/ethernet/renesas/ravb_main.c | 56 ++++++++++++++++++++++++++++---- 2 files changed, 53 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index bb82efd222c7..e505e8088445 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1028,6 +1028,7 @@ struct ravb_hw_info { unsigned carrier_counters:1; /* E-MAC has carrier counters */ unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */ unsigned irq_en_dis:1; /* Has separate irq enable and disable regs */ + unsigned err_mgmt_irqs:1; /* Line1 (Err) and Line2 (Mgmt) irqs are separate */ unsigned gptp:1; /* AVB-DMAC has gPTP support */ unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */ unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */ @@ -1078,6 +1079,8 @@ struct ravb_private { int msg_enable; int speed; int emac_irq; + int erra_irq; + int mgmta_irq; int rx_irqs[NUM_RX_QUEUE]; int tx_irqs[NUM_TX_QUEUE]; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index e22c0e6ed0f3..8ccc817b8b5d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1798,12 +1798,23 @@ static int ravb_open(struct net_device *ndev) ndev, dev, "ch19:tx_nc"); if (error) goto out_free_irq_nc_rx; + + if (info->err_mgmt_irqs) { + error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt, + ndev, dev, "err_a"); + if (error) + goto out_free_irq_nc_tx; + error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt, + ndev, dev, "mgmt_a"); + if (error) + goto out_free_irq_erra; + } } /* Device init */ error = ravb_dmac_init(ndev); if (error) - goto out_free_irq_nc_tx; + goto out_free_irq_mgmta; ravb_emac_init(ndev); /* Initialise PTP Clock driver */ @@ -1823,9 +1834,15 @@ out_ptp_stop: /* Stop PTP Clock driver */ if (info->gptp) ravb_ptp_stop(ndev); -out_free_irq_nc_tx: +out_free_irq_mgmta: if (!info->multi_irqs) goto out_free_irq; + if (info->err_mgmt_irqs) + free_irq(priv->mgmta_irq, ndev); +out_free_irq_erra: + if (info->err_mgmt_irqs) + free_irq(priv->erra_irq, ndev); +out_free_irq_nc_tx: free_irq(priv->tx_irqs[RAVB_NC], ndev); out_free_irq_nc_rx: free_irq(priv->rx_irqs[RAVB_NC], ndev); @@ -2166,6 +2183,10 @@ static int ravb_close(struct net_device *ndev) free_irq(priv->tx_irqs[RAVB_BE], ndev); free_irq(priv->rx_irqs[RAVB_BE], ndev); free_irq(priv->emac_irq, ndev); + if (info->err_mgmt_irqs) { + free_irq(priv->erra_irq, ndev); + free_irq(priv->mgmta_irq, ndev); + } } free_irq(ndev->irq, ndev); @@ -2595,10 +2616,14 @@ static int ravb_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); - if (info->multi_irqs) - irq = platform_get_irq_byname(pdev, "ch22"); - else + if (info->multi_irqs) { + if (info->err_mgmt_irqs) + irq = platform_get_irq_byname(pdev, "dia"); + else + irq = platform_get_irq_byname(pdev, "ch22"); + } else { irq = platform_get_irq(pdev, 0); + } if (irq < 0) { error = irq; goto out_release; @@ -2640,7 +2665,10 @@ static int ravb_probe(struct platform_device *pdev) of_property_read_bool(np, "renesas,ether-link-active-low"); if (info->multi_irqs) { - irq = platform_get_irq_byname(pdev, "ch24"); + if (info->err_mgmt_irqs) + irq = platform_get_irq_byname(pdev, "line3"); + else + irq = platform_get_irq_byname(pdev, "ch24"); if (irq < 0) { error = irq; goto out_release; @@ -2662,6 +2690,22 @@ static int ravb_probe(struct platform_device *pdev) } priv->tx_irqs[i] = irq; } + + if (info->err_mgmt_irqs) { + irq = platform_get_irq_byname(pdev, "err_a"); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->erra_irq = irq; + + irq = platform_get_irq_byname(pdev, "mgmt_a"); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->mgmta_irq = irq; + } } priv->clk = devm_clk_get(&pdev->dev, NULL); -- cgit v1.2.3 From 72069a7b2821443f57e5734f91e19936c48e4809 Mon Sep 17 00:00:00 2001 From: Phil Edworthy Date: Thu, 12 May 2022 12:47:21 +0100 Subject: ravb: Use separate clock for gPTP RZ/V2M has a separate gPTP reference clock that is used when the AVB-DMAC Mode Register (CCC) gPTP Clock Select (CSEL) bits are set to "01: High-speed peripheral bus clock". Therefore, add a feature that allows this clock to be used for gPTP. Signed-off-by: Phil Edworthy Reviewed-by: Biju Das Reviewed-by: Sergey Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 2 ++ drivers/net/ethernet/renesas/ravb_main.c | 22 +++++++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index e505e8088445..b980bce763d3 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1031,6 +1031,7 @@ struct ravb_hw_info { unsigned err_mgmt_irqs:1; /* Line1 (Err) and Line2 (Mgmt) irqs are separate */ unsigned gptp:1; /* AVB-DMAC has gPTP support */ unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */ + unsigned gptp_ref_clk:1; /* gPTP has separate reference clock */ unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */ unsigned magic_pkt:1; /* E-MAC supports magic packet detection */ unsigned half_duplex:1; /* E-MAC supports half duplex mode */ @@ -1042,6 +1043,7 @@ struct ravb_private { void __iomem *addr; struct clk *clk; struct clk *refclk; + struct clk *gptp_clk; struct mdiobb_ctrl mdiobb; u32 num_rx_ring[NUM_RX_QUEUE]; u32 num_tx_ring[NUM_TX_QUEUE]; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 8ccc817b8b5d..9d281f74abd0 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2495,11 +2495,15 @@ MODULE_DEVICE_TABLE(of, ravb_match_table); static int ravb_set_gti(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct device *dev = ndev->dev.parent; unsigned long rate; uint64_t inc; - rate = clk_get_rate(priv->clk); + if (info->gptp_ref_clk) + rate = clk_get_rate(priv->gptp_clk); + else + rate = clk_get_rate(priv->clk); if (!rate) return -EINVAL; @@ -2721,6 +2725,15 @@ static int ravb_probe(struct platform_device *pdev) } clk_prepare_enable(priv->refclk); + if (info->gptp_ref_clk) { + priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); + if (IS_ERR(priv->gptp_clk)) { + error = PTR_ERR(priv->gptp_clk); + goto out_disable_refclk; + } + clk_prepare_enable(priv->gptp_clk); + } + ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; @@ -2742,7 +2755,7 @@ static int ravb_probe(struct platform_device *pdev) /* Set GTI value */ error = ravb_set_gti(ndev); if (error) - goto out_disable_refclk; + goto out_disable_gptp_clk; /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); @@ -2762,7 +2775,7 @@ static int ravb_probe(struct platform_device *pdev) "Cannot allocate desc base address table (size %d bytes)\n", priv->desc_bat_size); error = -ENOMEM; - goto out_disable_refclk; + goto out_disable_gptp_clk; } for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) priv->desc_bat[q].die_dt = DT_EOS; @@ -2825,6 +2838,8 @@ out_dma_free: /* Stop PTP Clock driver */ if (info->ccc_gac) ravb_ptp_stop(ndev); +out_disable_gptp_clk: + clk_disable_unprepare(priv->gptp_clk); out_disable_refclk: clk_disable_unprepare(priv->refclk); out_release: @@ -2846,6 +2861,7 @@ static int ravb_remove(struct platform_device *pdev) if (info->ccc_gac) ravb_ptp_stop(ndev); + clk_disable_unprepare(priv->gptp_clk); clk_disable_unprepare(priv->refclk); dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, -- cgit v1.2.3 From e1154be73153ab6aafa1546d998987b692ffdc02 Mon Sep 17 00:00:00 2001 From: Phil Edworthy Date: Thu, 12 May 2022 12:47:22 +0100 Subject: ravb: Add support for RZ/V2M RZ/V2M Ethernet is very similar to R-Car Gen3 Ethernet-AVB, though some small parts are the same as R-Car Gen2. Other differences to R-Car Gen3 and Gen2 are: * It has separate data (DI), error (Line 1) and management (Line 2) irqs rather than one irq for all three. * Instead of using the High-speed peripheral bus clock for gPTP, it has a separate gPTP reference clock. Signed-off-by: Phil Edworthy Reviewed-by: Biju Das Reviewed-by: Sergey Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 9d281f74abd0..b357ac4c56c5 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2460,6 +2460,31 @@ static const struct ravb_hw_info ravb_gen2_hw_info = { .magic_pkt = 1, }; +static const struct ravb_hw_info ravb_rzv2m_hw_info = { + .rx_ring_free = ravb_rx_ring_free_rcar, + .rx_ring_format = ravb_rx_ring_format_rcar, + .alloc_rx_desc = ravb_alloc_rx_desc_rcar, + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_buf_size = SZ_2K, + .multi_irqs = 1, + .err_mgmt_irqs = 1, + .gptp = 1, + .gptp_ref_clk = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + static const struct ravb_hw_info gbeth_hw_info = { .rx_ring_free = ravb_rx_ring_free_gbeth, .rx_ring_format = ravb_rx_ring_format_gbeth, @@ -2487,6 +2512,7 @@ static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info }, { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info }, { } }; -- cgit v1.2.3 From 7c4e983c4f3cf94fcd879730c6caa877e0768a4d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 13 May 2022 11:33:57 -0700 Subject: net: allow gso_max_size to exceed 65536 The code for gso_max_size was added originally to allow for debugging and workaround of buggy devices that couldn't support TSO with blocks 64K in size. The original reason for limiting it to 64K was because that was the existing limits of IPv4 and non-jumbogram IPv6 length fields. With the addition of Big TCP we can remove this limit and allow the value to potentially go up to UINT_MAX and instead be limited by the tso_max_size value. So in order to support this we need to go through and clean up the remaining users of the gso_max_size value so that the values will cap at 64K for non-TCPv6 flows. In addition we can clean up the GSO_MAX_SIZE value so that 64K becomes GSO_LEGACY_MAX_SIZE and UINT_MAX will now be the upper limit for GSO_MAX_SIZE. v6: (edumazet) fixed a compile error if CONFIG_IPV6=n, in a new sk_trim_gso_size() helper. netif_set_tso_max_size() caps the requested TSO size with GSO_MAX_SIZE. Signed-off-by: Alexander Duyck Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe.h | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- drivers/net/ethernet/sfc/ef100_nic.c | 3 ++- drivers/net/ethernet/sfc/falcon/tx.c | 3 ++- drivers/net/ethernet/sfc/tx_common.c | 3 ++- drivers/net/ethernet/synopsys/dwc-xlgmac.h | 3 ++- drivers/net/hyperv/rndis_filter.c | 2 +- drivers/scsi/fcoe/fcoe.c | 2 +- include/linux/netdevice.h | 4 +++- net/bpf/test_run.c | 2 +- net/core/dev.c | 7 ++++--- net/core/rtnetlink.c | 2 +- net/core/sock.c | 14 ++++++++++++++ net/ipv4/tcp_bbr.c | 2 +- net/ipv4/tcp_output.c | 2 +- net/sctp/output.c | 3 ++- 16 files changed, 40 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 607a2c90513b..d9547552ceef 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -151,7 +151,8 @@ #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) /* Descriptors required for maximum contiguous TSO/GSO packet */ -#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1) +#define XGBE_TX_MAX_SPLIT \ + ((GSO_LEGACY_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1) /* Maximum possible descriptors needed for an SKB: * - Maximum number of SKB frags diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index fb11081001a0..838870bc6dbd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -2038,7 +2038,7 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) { int nr_frags = skb_shinfo(skb)->nr_frags; - return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE; + return PAGE_SIZE * nr_frags + data_bcnt <= GRO_MAX_SIZE; } static void diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index a69d756e09b9..b2536d2c218a 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -1008,7 +1008,8 @@ static int ef100_process_design_param(struct efx_nic *efx, } return 0; case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN: - nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE); + nic_data->tso_max_payload_len = min_t(u64, reader->value, + GSO_LEGACY_MAX_SIZE); netif_set_tso_max_size(efx->net_dev, nic_data->tso_max_payload_len); return 0; diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index f7306e93a8b8..b9369483758c 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -98,7 +98,8 @@ unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx) /* Possibly more for PCIe page boundaries within input fragments */ if (PAGE_SIZE > EF4_PAGE_SIZE) max_descs += max_t(unsigned int, MAX_SKB_FRAGS, - DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE)); + DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE, + EF4_PAGE_SIZE)); return max_descs; } diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 9bc8281b7f5b..658ea2d34070 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -416,7 +416,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) /* Possibly more for PCIe page boundaries within input fragments */ if (PAGE_SIZE > EFX_PAGE_SIZE) max_descs += max_t(unsigned int, MAX_SKB_FRAGS, - DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE, + EFX_PAGE_SIZE)); return max_descs; } diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h index 98e3a271e017..a848e10f3ea4 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h @@ -38,7 +38,8 @@ #define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3) /* Descriptors required for maximum contiguous TSO/GSO packet */ -#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1) +#define XLGMAC_TX_MAX_SPLIT \ + ((GSO_LEGACY_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1) /* Maximum possible descriptors needed for a SKB */ #define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2) diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 866af2cc27a3..6da36cb8af80 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1349,7 +1349,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, struct net_device_context *net_device_ctx = netdev_priv(net); struct ndis_offload hwcaps; struct ndis_offload_params offloads; - unsigned int gso_max_size = GSO_MAX_SIZE; + unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE; int ret; /* Find HW offload capabilities */ diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 44ca6110213c..79b2827e4081 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -667,7 +667,7 @@ static void fcoe_netdev_features_change(struct fc_lport *lport, if (netdev->features & NETIF_F_FSO) { lport->seq_offload = 1; - lport->lso_max = netdev->gso_max_size; + lport->lso_max = min(netdev->gso_max_size, GSO_LEGACY_MAX_SIZE); FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n", lport->lso_max); } else { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 536321691c72..ce780e352f43 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2272,7 +2272,9 @@ struct net_device { const struct rtnl_link_ops *rtnl_link_ops; /* for setting kernel sock attribute on TCP connection setup */ -#define GSO_MAX_SIZE 65536 +#define GSO_LEGACY_MAX_SIZE 65536u +#define GSO_MAX_SIZE UINT_MAX + unsigned int gso_max_size; #define TSO_LEGACY_MAX_SIZE 65536 #define TSO_MAX_SIZE UINT_MAX diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 8d54fef9a568..9b5a1f630bb0 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1001,7 +1001,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) cb->pkt_len = skb->len; } else { if (__skb->wire_len < skb->len || - __skb->wire_len > GSO_MAX_SIZE) + __skb->wire_len > GSO_LEGACY_MAX_SIZE) return -EINVAL; cb->pkt_len = __skb->wire_len; } diff --git a/net/core/dev.c b/net/core/dev.c index a601da3b4a7c..830beb05161a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2998,11 +2998,12 @@ EXPORT_SYMBOL(netif_set_real_num_queues); * @size: max skb->len of a TSO frame * * Set the limit on the size of TSO super-frames the device can handle. - * Unless explicitly set the stack will assume the value of %GSO_MAX_SIZE. + * Unless explicitly set the stack will assume the value of + * %GSO_LEGACY_MAX_SIZE. */ void netif_set_tso_max_size(struct net_device *dev, unsigned int size) { - dev->tso_max_size = size; + dev->tso_max_size = min(GSO_MAX_SIZE, size); if (size < READ_ONCE(dev->gso_max_size)) netif_set_gso_max_size(dev, size); } @@ -10595,7 +10596,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev_net_set(dev, &init_net); - dev->gso_max_size = GSO_MAX_SIZE; + dev->gso_max_size = GSO_LEGACY_MAX_SIZE; dev->gso_max_segs = GSO_MAX_SEGS; dev->gro_max_size = GRO_MAX_SIZE; dev->tso_max_size = TSO_LEGACY_MAX_SIZE; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f35cc21298ac..f2b0f747d3d2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2817,7 +2817,7 @@ static int do_setlink(const struct sk_buff *skb, if (tb[IFLA_GSO_MAX_SIZE]) { u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); - if (max_size > GSO_MAX_SIZE || max_size > dev->tso_max_size) { + if (max_size > dev->tso_max_size) { err = -EINVAL; goto errout; } diff --git a/net/core/sock.c b/net/core/sock.c index 6b287eb5427b..24a46a1e4f28 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2293,6 +2293,19 @@ void sk_free_unlock_clone(struct sock *sk) } EXPORT_SYMBOL_GPL(sk_free_unlock_clone); +static void sk_trim_gso_size(struct sock *sk) +{ + if (sk->sk_gso_max_size <= GSO_LEGACY_MAX_SIZE) + return; +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6 && + sk_is_tcp(sk) && + !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) + return; +#endif + sk->sk_gso_max_size = GSO_LEGACY_MAX_SIZE; +} + void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { u32 max_segs = 1; @@ -2312,6 +2325,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; /* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */ sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size); + sk_trim_gso_size(sk); sk->sk_gso_max_size -= (MAX_TCP_HEADER + 1); /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1); diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index c7d30a3bbd81..075e744bfb48 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -310,7 +310,7 @@ static u32 bbr_tso_segs_goal(struct sock *sk) */ bytes = min_t(unsigned long, sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), - GSO_MAX_SIZE - 1 - MAX_TCP_HEADER); + GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER); segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); return min(segs, 0x7FU); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index b092228e4342..b4b2284ed4a2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1553,7 +1553,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, * SO_SNDBUF values. * Also allow first and last skb in retransmit queue to be split. */ - limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); + limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE); if (unlikely((sk->sk_wmem_queued >> 1) > limit && tcp_queue != TCP_FRAG_IN_WRITE_QUEUE && skb != tcp_rtx_queue_head(sk) && diff --git a/net/sctp/output.c b/net/sctp/output.c index 72fe6669c50d..a63df055ac57 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -134,7 +134,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, dst_hold(tp->dst); sk_setup_caps(sk, tp->dst); } - packet->max_size = sk_can_gso(sk) ? READ_ONCE(tp->dst->dev->gso_max_size) + packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size), + GSO_LEGACY_MAX_SIZE) : asoc->pathmtu; rcu_read_unlock(); } -- cgit v1.2.3 From 0fe79f28bfaf73b66b7b1562d2468f94aa03bd12 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 13 May 2022 11:34:03 -0700 Subject: net: allow gro_max_size to exceed 65536 Allow the gro_max_size to exceed a value larger than 65536. There weren't really any external limitations that prevented this other than the fact that IPv4 only supports a 16 bit length field. Since we have the option of adding a hop-by-hop header for IPv6 we can allow IPv6 to exceed this value and for IPv4 and non-TCP flows we can cap things at 65536 via a constant rather than relying on gro_max_size. [edumazet] limit GRO_MAX_SIZE to (8 * 65535) to avoid overflows. Signed-off-by: Alexander Duyck Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 +- include/linux/netdevice.h | 6 +++++- include/net/ipv6.h | 2 +- net/core/dev.c | 2 +- net/core/gro.c | 8 ++++++++ net/core/rtnetlink.c | 8 -------- 6 files changed, 16 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 838870bc6dbd..24de37b79f5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -2038,7 +2038,7 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) { int nr_frags = skb_shinfo(skb)->nr_frags; - return PAGE_SIZE * nr_frags + data_bcnt <= GRO_MAX_SIZE; + return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; } static void diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index fd38847d0dc7..d57ce248004c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2161,7 +2161,11 @@ struct net_device { struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; int napi_defer_hard_irqs; -#define GRO_MAX_SIZE 65536 +#define GRO_LEGACY_MAX_SIZE 65536u +/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), + * and shinfo->gso_segs is a 16bit field. + */ +#define GRO_MAX_SIZE (8 * 65535u) unsigned int gro_max_size; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index b6df0314aa02..5b38bf1a586b 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -477,7 +477,7 @@ static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb) const struct hop_jumbo_hdr *jhdr; const struct ipv6hdr *nhdr; - if (likely(skb->len <= GRO_MAX_SIZE)) + if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) return 0; if (skb->protocol != htons(ETH_P_IPV6)) diff --git a/net/core/dev.c b/net/core/dev.c index 830beb05161a..d93456c75b55 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10598,7 +10598,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev->gso_max_size = GSO_LEGACY_MAX_SIZE; dev->gso_max_segs = GSO_MAX_SEGS; - dev->gro_max_size = GRO_MAX_SIZE; + dev->gro_max_size = GRO_LEGACY_MAX_SIZE; dev->tso_max_size = TSO_LEGACY_MAX_SIZE; dev->tso_max_segs = TSO_MAX_SEGS; dev->upper_level = 1; diff --git a/net/core/gro.c b/net/core/gro.c index 78110edf5d4b..b4190eb08467 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -167,6 +167,14 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush)) return -E2BIG; + if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) { + if (p->protocol != htons(ETH_P_IPV6) || + skb_headroom(p) < sizeof(struct hop_jumbo_hdr) || + ipv6_hdr(p)->nexthdr != IPPROTO_TCP || + p->encapsulation) + return -E2BIG; + } + lp = NAPI_GRO_CB(p)->last; pinfo = skb_shinfo(lp); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f2b0f747d3d2..ac45328607f7 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2360,14 +2360,6 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], } } - if (tb[IFLA_GRO_MAX_SIZE]) { - u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]); - - if (gro_max_size > GRO_MAX_SIZE) { - NL_SET_ERR_MSG(extack, "too big gro_max_size"); - return -EINVAL; - } - } return 0; } -- cgit v1.2.3 From d6f938ce52f9adb23f4c31cc371654a5f18ff328 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 13 May 2022 11:34:05 -0700 Subject: net: loopback: enable BIG TCP packets Set the driver limit to GSO_MAX_SIZE (512 KB). This allows the admin/user to set a GSO limit up to this value. Tested: ip link set dev lo gso_max_size 200000 netperf -H ::1 -t TCP_RR -l 100 -- -r 80000,80000 & tcpdump shows : 18:28:42.962116 IP6 ::1 > ::1: HBH 40051 > 63780: Flags [P.], seq 3626480001:3626560001, ack 3626560001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 80000 18:28:42.962138 IP6 ::1.63780 > ::1.40051: Flags [.], ack 3626560001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 0 18:28:42.962152 IP6 ::1 > ::1: HBH 63780 > 40051: Flags [P.], seq 3626560001:3626640001, ack 3626560001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 80000 18:28:42.962157 IP6 ::1.40051 > ::1.63780: Flags [.], ack 3626640001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 0 18:28:42.962180 IP6 ::1 > ::1: HBH 40051 > 63780: Flags [P.], seq 3626560001:3626640001, ack 3626640001, win 17743, options [nop,nop,TS val 3771179265 ecr 3771179265], length 80000 18:28:42.962214 IP6 ::1.63780 > ::1.40051: Flags [.], ack 3626640001, win 17743, options [nop,nop,TS val 3771179266 ecr 3771179265], length 0 18:28:42.962228 IP6 ::1 > ::1: HBH 63780 > 40051: Flags [P.], seq 3626640001:3626720001, ack 3626640001, win 17743, options [nop,nop,TS val 3771179266 ecr 3771179265], length 80000 18:28:42.962233 IP6 ::1.40051 > ::1.63780: Flags [.], ack 3626720001, win 17743, options [nop,nop,TS val 3771179266 ecr 3771179266], length 0 Signed-off-by: Eric Dumazet Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/loopback.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 720394c0639b..14e8d04cb434 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -191,6 +191,8 @@ static void gen_lo_setup(struct net_device *dev, dev->netdev_ops = dev_ops; dev->needs_free_netdev = true; dev->priv_destructor = dev_destructor; + + netif_set_tso_max_size(dev, GSO_MAX_SIZE); } /* The loopback device is special. There is only one instance -- cgit v1.2.3 From d406099d6a150d6ee09b46d660feca348f9f9bba Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 13 May 2022 11:34:06 -0700 Subject: veth: enable BIG TCP packets Set the TSO driver limit to GSO_MAX_SIZE (512 KB). This allows the admin/user to set a GSO limit up to this value. ip link set dev veth10 gso_max_size 200000 Signed-off-by: Eric Dumazet Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/veth.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f474e79a7745..466da01ba2e3 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1647,6 +1647,7 @@ static void veth_setup(struct net_device *dev) dev->hw_features = VETH_FEATURES; dev->hw_enc_features = VETH_FEATURES; dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; + netif_set_tso_max_size(dev, GSO_MAX_SIZE); } /* -- cgit v1.2.3 From 1169a64265c4ea7100091228c98d4267f041b0e7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 13 May 2022 11:34:07 -0700 Subject: mlx4: support BIG TCP packets mlx4 supports LSOv2 just fine. IPv6 stack inserts a temporary Hop-by-Hop header with JUMBO TLV for big packets. We need to ignore the HBH header when populating TX descriptor. Tested: Before: (not enabling bigger TSO/GRO packets) ip link set dev eth0 gso_max_size 65536 gro_max_size 65536 netperf -H lpaa18 -t TCP_RR -T2,2 -l 10 -Cc -- -r 70000,70000 MIGRATED TCP REQUEST/RESPONSE TEST from ::0 (::) port 0 AF_INET6 to lpaa18.prod.google.com () port 0 AF_INET6 : first burst 0 : cpu bind Local /Remote Socket Size Request Resp. Elapsed Trans. CPU CPU S.dem S.dem Send Recv Size Size Time Rate local remote local remote bytes bytes bytes bytes secs. per sec % S % S us/Tr us/Tr 262144 540000 70000 70000 10.00 6591.45 0.86 1.34 62.490 97.446 262144 540000 After: (enabling bigger TSO/GRO packets) ip link set dev eth0 gso_max_size 185000 gro_max_size 185000 netperf -H lpaa18 -t TCP_RR -T2,2 -l 10 -Cc -- -r 70000,70000 MIGRATED TCP REQUEST/RESPONSE TEST from ::0 (::) port 0 AF_INET6 to lpaa18.prod.google.com () port 0 AF_INET6 : first burst 0 : cpu bind Local /Remote Socket Size Request Resp. Elapsed Trans. CPU CPU S.dem S.dem Send Recv Size Size Time Rate local remote local remote bytes bytes bytes bytes secs. per sec % S % S us/Tr us/Tr 262144 540000 70000 70000 10.00 8383.95 0.95 1.01 54.432 57.584 262144 540000 Signed-off-by: Eric Dumazet Reviewed-by: Tariq Toukan Acked-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 3 ++ drivers/net/ethernet/mellanox/mlx4/en_tx.c | 47 +++++++++++++++++++++----- 2 files changed, 41 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index c61dc7ae0c05..ca4b93a01034 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3417,6 +3417,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = priv->max_mtu; + /* supports LSOv2 packets. */ + netif_set_tso_max_size(dev, GSO_MAX_SIZE); + mdev->pndev[port] = dev; mdev->upper[port] = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f777151d226f..af3b2b59a2a6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "mlx4_en.h" @@ -634,19 +635,28 @@ static int get_real_size(const struct sk_buff *skb, struct net_device *dev, int *lso_header_size, bool *inline_ok, - void **pfrag) + void **pfrag, + int *hopbyhop) { struct mlx4_en_priv *priv = netdev_priv(dev); int real_size; if (shinfo->gso_size) { *inline_ok = false; - if (skb->encapsulation) + *hopbyhop = 0; + if (skb->encapsulation) { *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); - else + } else { + /* Detects large IPV6 TCP packets and prepares for removal of + * HBH header that has been pushed by ip6_xmit(), + * mainly so that tcpdump can dissect them. + */ + if (ipv6_has_hopopt_jumbo(skb)) + *hopbyhop = sizeof(struct hop_jumbo_hdr); *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); + } real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + - ALIGN(*lso_header_size + 4, DS_SIZE); + ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE); if (unlikely(*lso_header_size != skb_headlen(skb))) { /* We add a segment for the skb linear buffer only if * it contains data */ @@ -873,6 +883,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) int desc_size; int real_size; u32 index, bf_index; + struct ipv6hdr *h6; __be32 op_own; int lso_header_size; void *fragptr = NULL; @@ -881,6 +892,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool stop_queue; bool inline_ok; u8 data_offset; + int hopbyhop; bool bf_ok; tx_ind = skb_get_queue_mapping(skb); @@ -890,7 +902,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; real_size = get_real_size(skb, shinfo, dev, &lso_header_size, - &inline_ok, &fragptr); + &inline_ok, &fragptr, &hopbyhop); if (unlikely(!real_size)) goto tx_drop_count; @@ -943,7 +955,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) data = &tx_desc->data; data_offset = offsetof(struct mlx4_en_tx_desc, data); } else { - int lso_align = ALIGN(lso_header_size + 4, DS_SIZE); + int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE); data = (void *)&tx_desc->lso + lso_align; data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align; @@ -1008,14 +1020,31 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ((ring->prod & ring->size) ? cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + lso_header_size -= hopbyhop; /* Fill in the LSO prefix */ tx_desc->lso.mss_hdr_size = cpu_to_be32( shinfo->gso_size << 16 | lso_header_size); - /* Copy headers; - * note that we already verified that it is linear */ - memcpy(tx_desc->lso.header, skb->data, lso_header_size); + if (unlikely(hopbyhop)) { + /* remove the HBH header. + * Layout: [Ethernet header][IPv6 header][HBH][TCP header] + */ + memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6)); + h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN); + h6->nexthdr = IPPROTO_TCP; + /* Copy the TCP header after the IPv6 one */ + memcpy(h6 + 1, + skb->data + ETH_HLEN + sizeof(*h6) + + sizeof(struct hop_jumbo_hdr), + tcp_hdrlen(skb)); + /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */ + } else { + /* Copy headers; + * note that we already verified that it is linear + */ + memcpy(tx_desc->lso.header, skb->data, lso_header_size); + } ring->tso_packets++; i = shinfo->gso_segs; -- cgit v1.2.3 From de78960e025f0f361db76f90de609ca2c26d1366 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 13 May 2022 11:34:08 -0700 Subject: mlx5: support BIG TCP packets mlx5 supports LSOv2. IPv6 gro/tcp stacks insert a temporary Hop-by-Hop header with JUMBO TLV for big packets. We need to ignore/skip this HBH header when populating TX descriptor. Note that ipv6_has_hopopt_jumbo() only recognizes very specific packet layout, thus mlx5e_sq_xmit_wqe() is taking care of this layout only. v7: adopt unsafe_memcpy() and MLX5_UNSAFE_MEMCPY_DISCLAIMER v2: clear hopbyhop in mlx5e_tx_get_gso_ihs() v4: fix compile error for CONFIG_MLX5_CORE_IPOIB=y Signed-off-by: Coco Li Signed-off-by: Eric Dumazet Reviewed-by: Tariq Toukan Cc: Saeed Mahameed Cc: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 111 +++++++++++++++++----- 2 files changed, 89 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d27986869b8b..bf3bca79e160 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4920,6 +4920,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; + netif_set_tso_max_size(netdev, GSO_MAX_SIZE); mlx5e_set_netdev_dev_addr(netdev); mlx5e_ipsec_build_netdev(priv); mlx5e_ktls_build_netdev(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 5855d8f9c509..50d14cec4894 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -40,6 +40,7 @@ #include "en_accel/en_accel.h" #include "en_accel/ipsec_rxtx.h" #include "en/ptp.h" +#include static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) { @@ -91,6 +92,13 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, return min_t(u16, hlen, skb_headlen(skb)); } +#define MLX5_UNSAFE_MEMCPY_DISCLAIMER \ + "This copy has been bounds-checked earlier in " \ + "mlx5i_sq_calc_wqe_attr() and intentionally " \ + "crosses a flex array boundary. Since it is " \ + "performance sensitive, splitting the copy is " \ + "undesirable." + static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) { struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; @@ -100,7 +108,10 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) memcpy(&vhdr->addrs, skb->data, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); - memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); + unsafe_memcpy(&vhdr->h_vlan_encapsulated_proto, + skb->data + cpy1_sz, + cpy2_sz, + MLX5_UNSAFE_MEMCPY_DISCLAIMER); } static inline void @@ -130,23 +141,32 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq->stats->csum_none++; } +/* Returns the number of header bytes that we plan + * to inline later in the transmit descriptor + */ static inline u16 -mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) +mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop) { struct mlx5e_sq_stats *stats = sq->stats; u16 ihs; + *hopbyhop = 0; if (skb->encapsulation) { ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); stats->tso_inner_packets++; stats->tso_inner_bytes += skb->len - ihs; } else { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { ihs = skb_transport_offset(skb) + sizeof(struct udphdr); - else + } else { ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (ipv6_has_hopopt_jumbo(skb)) { + *hopbyhop = sizeof(struct hop_jumbo_hdr); + ihs -= sizeof(struct hop_jumbo_hdr); + } + } stats->tso_packets++; - stats->tso_bytes += skb->len - ihs; + stats->tso_bytes += skb->len - ihs - *hopbyhop; } return ihs; @@ -208,6 +228,7 @@ struct mlx5e_tx_attr { __be16 mss; u16 insz; u8 opcode; + u8 hopbyhop; }; struct mlx5e_tx_wqe_attr { @@ -244,14 +265,16 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_sq_stats *stats = sq->stats; if (skb_is_gso(skb)) { - u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb); + int hopbyhop; + u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop); *attr = (struct mlx5e_tx_attr) { .opcode = MLX5_OPCODE_LSO, .mss = cpu_to_be16(skb_shinfo(skb)->gso_size), .ihs = ihs, .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs, - .headlen = skb_headlen(skb) - ihs, + .headlen = skb_headlen(skb) - ihs - hopbyhop, + .hopbyhop = hopbyhop, }; stats->packets += skb_shinfo(skb)->gso_segs; @@ -365,7 +388,8 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg; struct mlx5_wqe_data_seg *dseg; struct mlx5e_tx_wqe_info *wi; - + u16 ihs = attr->ihs; + struct ipv6hdr *h6; struct mlx5e_sq_stats *stats = sq->stats; int num_dma; @@ -379,21 +403,40 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, eseg->mss = attr->mss; - if (attr->ihs) { - if (skb_vlan_tag_present(skb)) { - eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN); - mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs); + if (ihs) { + u8 *start = eseg->inline_hdr.start; + + if (unlikely(attr->hopbyhop)) { + /* remove the HBH header. + * Layout: [Ethernet header][IPv6 header][HBH][TCP header] + */ + if (skb_vlan_tag_present(skb)) { + mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6)); + ihs += VLAN_HLEN; + h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr)); + } else { + unsafe_memcpy(start, skb->data, + ETH_HLEN + sizeof(*h6), + MLX5_UNSAFE_MEMCPY_DISCLAIMER); + h6 = (struct ipv6hdr *)(start + ETH_HLEN); + } + h6->nexthdr = IPPROTO_TCP; + /* Copy the TCP header after the IPv6 one */ + memcpy(h6 + 1, + skb->data + ETH_HLEN + sizeof(*h6) + + sizeof(struct hop_jumbo_hdr), + tcp_hdrlen(skb)); + /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */ + } else if (skb_vlan_tag_present(skb)) { + mlx5e_insert_vlan(start, skb, ihs); + ihs += VLAN_HLEN; stats->added_vlan_packets++; } else { - eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs); - unsafe_memcpy(eseg->inline_hdr.start, skb->data, attr->ihs, - /* This copy has been bounds-checked earlier in - * mlx5i_sq_calc_wqe_attr() and intentionally - * crosses a flex array boundary. Since it is - * performance sensitive, splitting the copy is - * undesirable. - */); + unsafe_memcpy(eseg->inline_hdr.start, skb->data, + attr->ihs, + MLX5_UNSAFE_MEMCPY_DISCLAIMER); } + eseg->inline_hdr.sz |= cpu_to_be16(ihs); dseg += wqe_attr->ds_cnt_inl; } else if (skb_vlan_tag_present(skb)) { eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); @@ -404,7 +447,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, } dseg += wqe_attr->ds_cnt_ids; - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs, + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop, attr->headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; @@ -924,12 +967,34 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, eseg->mss = attr.mss; if (attr.ihs) { - memcpy(eseg->inline_hdr.start, skb->data, attr.ihs); + if (unlikely(attr.hopbyhop)) { + struct ipv6hdr *h6; + + /* remove the HBH header. + * Layout: [Ethernet header][IPv6 header][HBH][TCP header] + */ + unsafe_memcpy(eseg->inline_hdr.start, skb->data, + ETH_HLEN + sizeof(*h6), + MLX5_UNSAFE_MEMCPY_DISCLAIMER); + h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN); + h6->nexthdr = IPPROTO_TCP; + /* Copy the TCP header after the IPv6 one */ + unsafe_memcpy(h6 + 1, + skb->data + ETH_HLEN + sizeof(*h6) + + sizeof(struct hop_jumbo_hdr), + tcp_hdrlen(skb), + MLX5_UNSAFE_MEMCPY_DISCLAIMER); + /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */ + } else { + unsafe_memcpy(eseg->inline_hdr.start, skb->data, + attr.ihs, + MLX5_UNSAFE_MEMCPY_DISCLAIMER); + } eseg->inline_hdr.sz = cpu_to_be16(attr.ihs); dseg += wqe_attr.ds_cnt_inl; } - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs, + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop, attr.headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; -- cgit v1.2.3 From 382d917bfc1e92339dae3c8a636b2730e8bb5132 Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Fri, 13 May 2022 15:09:22 +0800 Subject: net: hinic: add missing destroy_workqueue in hinic_pf_to_mgmt_init hinic_pf_to_mgmt_init misses destroy_workqueue in error path, this patch fixes that. Fixes: 6dbb89014dc3 ("hinic: fix sending mailbox timeout in aeq event work") Signed-off-by: Zheng Bin Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index ebc77771f5da..4aa1f433ed24 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -643,6 +643,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, err = alloc_msg_buf(pf_to_mgmt); if (err) { dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); + destroy_workqueue(pf_to_mgmt->workq); hinic_health_reporters_destroy(hwdev->devlink_dev); return err; } @@ -650,6 +651,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); if (err) { dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); + destroy_workqueue(pf_to_mgmt->workq); hinic_health_reporters_destroy(hwdev->devlink_dev); return err; } -- cgit v1.2.3 From 3daebfbeb4555cb0c113aeb88aa469192ee41d89 Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Fri, 13 May 2022 19:23:59 +0200 Subject: net: tulip: convert to devres Works fine on my HP C3600: [ 274.452394] tulip0: no phy info, aborting mtable build [ 274.499041] tulip0: MII transceiver #1 config 1000 status 782d advertising 01e1 [ 274.750691] net eth0: Digital DS21142/43 Tulip rev 65 at MMIO 0xf4008000, 00:30:6e:08:7d:21, IRQ 17 [ 283.104520] net eth0: Setting full-duplex based on MII#1 link partner capability of c1e1 Signed-off-by: Rolf Eike Beer Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/eeprom.c | 7 ++-- drivers/net/ethernet/dec/tulip/tulip_core.c | 64 ++++++++--------------------- 2 files changed, 20 insertions(+), 51 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c index ba0a69b363f8..67a67cb78dbb 100644 --- a/drivers/net/ethernet/dec/tulip/eeprom.c +++ b/drivers/net/ethernet/dec/tulip/eeprom.c @@ -117,8 +117,8 @@ static void tulip_build_fake_mediatable(struct tulip_private *tp) 0x00, 0x06 /* ttm bit map */ }; - tp->mtable = kmalloc(sizeof(struct mediatable) + - sizeof(struct medialeaf), GFP_KERNEL); + tp->mtable = devm_kmalloc(&tp->pdev->pdev, sizeof(struct mediatable) + + sizeof(struct medialeaf), GFP_KERNEL); if (tp->mtable == NULL) return; /* Horrible, impossible failure. */ @@ -224,7 +224,8 @@ subsequent_board: return; } - mtable = kmalloc(struct_size(mtable, mleaf, count), GFP_KERNEL); + mtable = devm_kmalloc(&tp->pdev->dev, struct_size(mtable, mleaf, count), + GFP_KERNEL); if (mtable == NULL) return; /* Horrible, impossible failure. */ last_mediatable = tp->mtable = mtable; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 825e81f5fd22..b8e46c4849ef 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1389,7 +1389,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * And back to business */ - i = pci_enable_device(pdev); + i = pcim_enable_device(pdev); if (i) { pr_err("Cannot enable tulip board #%d, aborting\n", board_idx); return i; @@ -1398,11 +1398,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) irq = pdev->irq; /* alloc_etherdev ensures aligned and zeroed private structures */ - dev = alloc_etherdev (sizeof (*tp)); - if (!dev) { - pci_disable_device(pdev); + dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp)); + if (!dev) return -ENOMEM; - } SET_NETDEV_DEV(dev, &pdev->dev); if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { @@ -1410,18 +1408,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_name(pdev), (unsigned long long)pci_resource_len (pdev, 0), (unsigned long long)pci_resource_start (pdev, 0)); - goto err_out_free_netdev; + return -ENODEV; } /* grab all resources from both PIO and MMIO regions, as we * don't want anyone else messing around with our hardware */ - if (pci_request_regions (pdev, DRV_NAME)) - goto err_out_free_netdev; + if (pci_request_regions(pdev, DRV_NAME)) + return -ENODEV; - ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size); + ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size); if (!ioaddr) - goto err_out_free_res; + return -ENODEV; /* * initialize private data structure 'tp' @@ -1430,12 +1428,12 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp = netdev_priv(dev); tp->dev = dev; - tp->rx_ring = dma_alloc_coherent(&pdev->dev, - sizeof(struct tulip_rx_desc) * RX_RING_SIZE + - sizeof(struct tulip_tx_desc) * TX_RING_SIZE, - &tp->rx_ring_dma, GFP_KERNEL); + tp->rx_ring = dmam_alloc_coherent(&pdev->dev, + sizeof(struct tulip_rx_desc) * RX_RING_SIZE + + sizeof(struct tulip_tx_desc) * TX_RING_SIZE, + &tp->rx_ring_dma, GFP_KERNEL); if (!tp->rx_ring) - goto err_out_mtable; + return -ENODEV; tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE; @@ -1695,8 +1693,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #endif dev->ethtool_ops = &ops; - if (register_netdev(dev)) - goto err_out_free_ring; + i = register_netdev(dev); + if (i) + return i; pci_set_drvdata(pdev, dev); @@ -1771,24 +1770,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tulip_set_power_state (tp, 0, 1); return 0; - -err_out_free_ring: - dma_free_coherent(&pdev->dev, - sizeof(struct tulip_rx_desc) * RX_RING_SIZE + - sizeof(struct tulip_tx_desc) * TX_RING_SIZE, - tp->rx_ring, tp->rx_ring_dma); - -err_out_mtable: - kfree (tp->mtable); - pci_iounmap(pdev, ioaddr); - -err_out_free_res: - pci_release_regions (pdev); - -err_out_free_netdev: - free_netdev (dev); - pci_disable_device(pdev); - return -ENODEV; } @@ -1888,24 +1869,11 @@ static int __maybe_unused tulip_resume(struct device *dev_d) static void tulip_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); - struct tulip_private *tp; if (!dev) return; - tp = netdev_priv(dev); unregister_netdev(dev); - dma_free_coherent(&pdev->dev, - sizeof(struct tulip_rx_desc) * RX_RING_SIZE + - sizeof(struct tulip_tx_desc) * TX_RING_SIZE, - tp->rx_ring, tp->rx_ring_dma); - kfree (tp->mtable); - pci_iounmap(pdev, tp->base_addr); - free_netdev (dev); - pci_release_regions (pdev); - pci_disable_device(pdev); - - /* pci_power_off (pdev, -1); */ } #ifdef CONFIG_NET_POLL_CONTROLLER -- cgit v1.2.3 From e68372efb9feae33467bdbea2654f3d91b2e0466 Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Fri, 13 May 2022 15:10:18 +0800 Subject: octeon_ep: add missing destroy_workqueue in octep_init_module octep_init_module misses destroy_workqueue in error path, this patch fixes that. Fixes: 862cd659a6fb ("octeon_ep: Add driver framework and device initialization") Signed-off-by: Zheng Bin Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeon_ep/octep_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index e020c81f3455..ebf78f6ca82b 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1149,6 +1149,7 @@ static int __init octep_init_module(void) if (ret < 0) { pr_err("%s: Failed to register PCI driver; err=%d\n", OCTEP_DRV_NAME, ret); + destroy_workqueue(octep_wq); return ret; } -- cgit v1.2.3 From 1dee43c2c6f159c26684e02a56373bb3b537ab24 Mon Sep 17 00:00:00 2001 From: Ziyang Xuan Date: Fri, 13 May 2022 15:29:28 +0800 Subject: octeon_ep: delete unnecessary NULL check vfree(NULL) is safe. NULL check before vfree() is not needed. Delete them to simplify the code. Signed-off-by: Ziyang Xuan Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeon_ep/octep_main.c | 3 +-- drivers/net/ethernet/marvell/octeon_ep/octep_rx.c | 3 +-- drivers/net/ethernet/marvell/octeon_ep/octep_tx.c | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index ebf78f6ca82b..94409e329a9a 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -980,8 +980,7 @@ static void octep_device_cleanup(struct octep_device *oct) dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); for (i = 0; i < OCTEP_MAX_VF; i++) { - if (oct->mbox[i]) - vfree(oct->mbox[i]); + vfree(oct->mbox[i]); oct->mbox[i] = NULL; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index 945947ec7723..d9ae0937d17a 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -230,8 +230,7 @@ static int octep_free_oq(struct octep_oq *oq) octep_oq_free_ring_buffers(oq); - if (oq->buff_info) - vfree(oq->buff_info); + vfree(oq->buff_info); if (oq->desc_ring) dma_free_coherent(oq->dev, diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c index 511552bc3e87..5a520d37bea0 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -270,8 +270,7 @@ static void octep_free_iq(struct octep_iq *iq) desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); - if (iq->buff_info) - vfree(iq->buff_info); + vfree(iq->buff_info); if (iq->desc_ring) dma_free_coherent(iq->dev, desc_ring_size, -- cgit v1.2.3 From b321dfafb0b99e285d14bcaae00b4f9093556eb6 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 13 May 2022 15:56:11 +0800 Subject: net: wwan: t7xx: Fix return type of t7xx_dl_add_timedout() t7xx_dl_add_timedout() now return int 'ret', but the return type is bool. Change the return type to int for furthor errcode upstream. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/wwan/t7xx/t7xx_dpmaif.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_dpmaif.c index c8bf6929af51..6d3edadecbec 100644 --- a/drivers/net/wwan/t7xx/t7xx_dpmaif.c +++ b/drivers/net/wwan/t7xx/t7xx_dpmaif.c @@ -1043,15 +1043,13 @@ unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info, return value & DPMAIF_DL_RD_WR_IDX_MSK; } -static bool t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info) +static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info) { u32 value; - int ret; - ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD, + return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD, value, !(value & DPMAIF_DL_ADD_NOT_READY), 0, DPMAIF_CHECK_TIMEOUT_US); - return ret; } int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt) -- cgit v1.2.3 From d887ae3247e022183f244cb325dca1dfbd0a9ed0 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Fri, 13 May 2022 08:19:18 +0000 Subject: octeontx2-pf: Remove unnecessary synchronize_irq() before free_irq() Calling synchronize_irq() right before free_irq() is quite useless. On one hand the IRQ can easily fire again before free_irq() is entered, on the other hand free_irq() itself calls synchronize_irq() internally (in a race condition free way), before any state associated with the IRQ is freed. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 53b2706d65a1..f83e5ca632dd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1718,7 +1718,6 @@ err_free_cints: vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); - synchronize_irq(vec); free_irq(vec, pf); err_disable_napi: otx2_disable_napi(pf); @@ -1762,7 +1761,6 @@ int otx2_stop(struct net_device *netdev) vec = pci_irq_vector(pf->pdev, pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START); otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0)); - synchronize_irq(vec); free_irq(vec, pf); /* Cleanup CQ NAPI and IRQ */ -- cgit v1.2.3 From f2ef6f7539c68c6bd6c32323d8845ee102b7c450 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 13 May 2022 08:46:12 -0300 Subject: net: phy: micrel: Allow probing without .driver_data Currently, if the .probe element is present in the phy_driver structure and the .driver_data is not, a NULL pointer dereference happens. Allow passing .probe without .driver_data by inserting NULL checks for priv->type. Signed-off-by: Fabio Estevam Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20220513114613.762810-1-festevam@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/micrel.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index c34a93403d1e..5e356e23c1b7 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -520,7 +520,7 @@ static int kszphy_config_reset(struct phy_device *phydev) } } - if (priv->led_mode >= 0) + if (priv->type && priv->led_mode >= 0) kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode); return 0; @@ -536,10 +536,10 @@ static int kszphy_config_init(struct phy_device *phydev) type = priv->type; - if (type->has_broadcast_disable) + if (type && type->has_broadcast_disable) kszphy_broadcast_disable(phydev); - if (type->has_nand_tree_disable) + if (type && type->has_nand_tree_disable) kszphy_nand_tree_disable(phydev); return kszphy_config_reset(phydev); @@ -1730,7 +1730,7 @@ static int kszphy_probe(struct phy_device *phydev) priv->type = type; - if (type->led_mode_reg) { + if (type && type->led_mode_reg) { ret = of_property_read_u32(np, "micrel,led-mode", &priv->led_mode); if (ret) @@ -1751,7 +1751,8 @@ static int kszphy_probe(struct phy_device *phydev) unsigned long rate = clk_get_rate(clk); bool rmii_ref_clk_sel_25_mhz; - priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; + if (type) + priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; rmii_ref_clk_sel_25_mhz = of_property_read_bool(np, "micrel,rmii-reference-clock-select-25-mhz"); -- cgit v1.2.3 From 8e6004dfecb7bab0d8945989917fbcae5d3b50bd Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 13 May 2022 08:46:13 -0300 Subject: net: phy: micrel: Use the kszphy probe/suspend/resume Now that it is possible to use .probe without having .driver_data, let KSZ8061 use the kszphy specific hooks for probe,suspend and resume, which is preferred. Switch to using the dedicated kszphy probe/suspend/resume functions. Signed-off-by: Fabio Estevam Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20220513114613.762810-2-festevam@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/micrel.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 5e356e23c1b7..22139901f01c 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -3019,11 +3019,12 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8061", .phy_id_mask = MICREL_PHY_ID_MASK, /* PHY_BASIC_FEATURES */ + .probe = kszphy_probe, .config_init = ksz8061_config_init, .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, - .suspend = genphy_suspend, - .resume = genphy_resume, + .suspend = kszphy_suspend, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, -- cgit v1.2.3 From 94737ef56b610d94a24fadfb8386fc17dbd79ddd Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 9 May 2022 16:02:59 +0200 Subject: can: ctucanfd: Let users select instead of depend on CAN_CTUCANFD The CTU CAN-FD IP core is only useful when used with one of the corresponding PCI/PCIe or platform (FPGA, SoC) drivers, which depend on PCI resp. OF. Hence make the users select the core driver code, instead of letting then depend on it. Keep the core code config option visible when compile-testing, to maintain compile-coverage. Link: https://lore.kernel.org/all/887b7440446b6244a20a503cc6e8dc9258846706.1652104941.git.geert+renesas@glider.be Signed-off-by: Geert Uytterhoeven Acked-by: Pavel Pisa Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig index 48963efc7f19..3c383612eb17 100644 --- a/drivers/net/can/ctucanfd/Kconfig +++ b/drivers/net/can/ctucanfd/Kconfig @@ -1,5 +1,5 @@ config CAN_CTUCANFD - tristate "CTU CAN-FD IP core" + tristate "CTU CAN-FD IP core" if COMPILE_TEST help This driver adds support for the CTU CAN FD open-source IP core. More documentation and core sources at project page @@ -13,8 +13,8 @@ config CAN_CTUCANFD config CAN_CTUCANFD_PCI tristate "CTU CAN-FD IP core PCI/PCIe driver" - depends on CAN_CTUCANFD depends on PCI + select CAN_CTUCANFD help This driver adds PCI/PCIe support for CTU CAN-FD IP core. The project providing FPGA design for Intel EP4CGX15 based DB4CGX15 @@ -23,8 +23,8 @@ config CAN_CTUCANFD_PCI config CAN_CTUCANFD_PLATFORM tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver" - depends on CAN_CTUCANFD depends on OF || COMPILE_TEST + select CAN_CTUCANFD help The core has been tested together with OpenCores SJA1000 modified to be CAN FD frames tolerant on MicroZed Zynq based -- cgit v1.2.3 From 30abc929132929b52fa7203c35e60335c500bd17 Mon Sep 17 00:00:00 2001 From: Vincent Mailhol Date: Sat, 14 May 2022 23:16:47 +0900 Subject: can: slcan: slc_xmit(): use can_dropped_invalid_skb() instead of manual check slcan does a manual check in slc_xmit() to verify if the skb is valid. This check is incomplete, use instead can_dropped_invalid_skb(). Link: https://lore.kernel.org/all/20220514141650.1109542-2-mailhol.vincent@wanadoo.fr Signed-off-by: Vincent Mailhol Signed-off-by: Marc Kleine-Budde --- drivers/net/can/slcan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index ec294d0c5722..64a3aee8a7da 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -359,8 +359,8 @@ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev) { struct slcan *sl = netdev_priv(dev); - if (skb->len != CAN_MTU) - goto out; + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; spin_lock(&sl->lock); if (!netif_running(dev)) { -- cgit v1.2.3 From f008f8d0305cdf930de97d4f746f2a6bc9338385 Mon Sep 17 00:00:00 2001 From: Alvin Šipraga Date: Fri, 13 May 2022 23:36:18 +0200 Subject: net: dsa: realtek: rtl8366rb: Serialize indirect PHY register access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lock the regmap during the whole PHY register access routines in rtl8366rb. Signed-off-by: Alvin Šipraga Tested-by: Linus Walleij Signed-off-by: Linus Walleij Reviewed-by: Vladimir Oltean Link: https://lore.kernel.org/r/20220513213618.2742895-1-linus.walleij@linaro.org Signed-off-by: Jakub Kicinski --- drivers/net/dsa/realtek/rtl8366rb.c | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index 1a3406b9e64c..25f88022b9e4 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -1653,29 +1653,37 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum) if (phy > RTL8366RB_PHY_NO_MAX) return -EINVAL; - ret = regmap_write(priv->map, RTL8366RB_PHY_ACCESS_CTRL_REG, + mutex_lock(&priv->map_lock); + + ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG, RTL8366RB_PHY_CTRL_READ); if (ret) - return ret; + goto out; reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum; - ret = regmap_write(priv->map, reg, 0); + ret = regmap_write(priv->map_nolock, reg, 0); if (ret) { dev_err(priv->dev, "failed to write PHY%d reg %04x @ %04x, ret %d\n", phy, regnum, reg, ret); - return ret; + goto out; } - ret = regmap_read(priv->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val); + ret = regmap_read(priv->map_nolock, RTL8366RB_PHY_ACCESS_DATA_REG, + &val); if (ret) - return ret; + goto out; + + ret = val; dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n", phy, regnum, reg, val); - return val; +out: + mutex_unlock(&priv->map_lock); + + return ret; } static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum, @@ -1687,21 +1695,26 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum, if (phy > RTL8366RB_PHY_NO_MAX) return -EINVAL; - ret = regmap_write(priv->map, RTL8366RB_PHY_ACCESS_CTRL_REG, + mutex_lock(&priv->map_lock); + + ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG, RTL8366RB_PHY_CTRL_WRITE); if (ret) - return ret; + goto out; reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum; dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n", phy, regnum, reg, val); - ret = regmap_write(priv->map, reg, val); + ret = regmap_write(priv->map_nolock, reg, val); if (ret) - return ret; + goto out; - return 0; +out: + mutex_unlock(&priv->map_lock); + + return ret; } static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum) -- cgit v1.2.3 From 262d98b1193fec68c66f3d57772b72240fc4b9da Mon Sep 17 00:00:00 2001 From: Ricardo Martinez Date: Fri, 13 May 2022 10:33:59 -0700 Subject: net: wwan: t7xx: Avoid calls to skb_data_area_size() skb_data_area_size() helper was used to calculate the size of the DMA mapped buffer passed to the HW. Instead of doing this, use the size passed to allocate the skbs. Signed-off-by: Ricardo Martinez Reviewed-by: Andy Shevchenko Reviewed-by: Sergey Ryazanov Signed-off-by: Jakub Kicinski --- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 7 +++---- drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 6 ++---- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 46066dcd2607..0c52801ed0de 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -97,8 +97,7 @@ static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma if (!req->skb) return -ENOMEM; - req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, - skb_data_area_size(req->skb), DMA_FROM_DEVICE); + req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE); if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { dev_kfree_skb_any(req->skb); req->skb = NULL; @@ -154,7 +153,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool if (req->mapped_buff) { dma_unmap_single(md_ctrl->dev, req->mapped_buff, - skb_data_area_size(skb), DMA_FROM_DEVICE); + queue->tr_ring->pkt_size, DMA_FROM_DEVICE); req->mapped_buff = 0; } @@ -376,7 +375,7 @@ static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { if (req_cur->mapped_buff && req_cur->skb) { dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, - skb_data_area_size(req_cur->skb), tx_rx); + ring->pkt_size, tx_rx); req_cur->mapped_buff = 0; } diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index 35a8a0d7c1ee..91a0eb19e0d8 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -151,14 +151,12 @@ static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl, { dma_addr_t data_bus_addr; struct sk_buff *skb; - size_t data_len; skb = __dev_alloc_skb(size, GFP_KERNEL); if (!skb) return false; - data_len = skb_data_area_size(skb); - data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE); + data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE); if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) { dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n"); dev_kfree_skb_any(skb); @@ -167,7 +165,7 @@ static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl, cur_skb->skb = skb; cur_skb->data_bus_addr = data_bus_addr; - cur_skb->data_len = data_len; + cur_skb->data_len = size; return true; } -- cgit v1.2.3 From 6251264fedde83ade6f0f1f7049037469dd4de0b Mon Sep 17 00:00:00 2001 From: Wells Lu Date: Fri, 13 May 2022 19:57:16 +0800 Subject: net: ethernet: Fix unmet direct dependencies detected for NVMEM_SUNPLUS_OCOTP Removed unnecessary: select COMMON_CLK_SP7021 select RESET_SUNPLUS select NVMEM_SUNPLUS_OCOTP from Kconfig. Reported-by: kernel test robot Signed-off-by: Wells Lu Link: https://lore.kernel.org/r/1652443036-24731-1-git-send-email-wellslutw@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sunplus/Kconfig | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sunplus/Kconfig b/drivers/net/ethernet/sunplus/Kconfig index d0144a2ab918..be50a6b723eb 100644 --- a/drivers/net/ethernet/sunplus/Kconfig +++ b/drivers/net/ethernet/sunplus/Kconfig @@ -23,9 +23,6 @@ config SP7021_EMAC tristate "Sunplus Dual 10M/100M Ethernet devices" depends on SOC_SP7021 || COMPILE_TEST select PHYLIB - select COMMON_CLK_SP7021 - select RESET_SUNPLUS - select NVMEM_SUNPLUS_OCOTP help If you have Sunplus dual 10M/100M Ethernet devices, say Y. The network device creates two net-device interfaces. -- cgit v1.2.3 From 1588f5a91b169b5252f5d2ccee3f0dd7c13affda Mon Sep 17 00:00:00 2001 From: Bernard Zhao Date: Sun, 15 May 2022 18:52:05 -0700 Subject: ethernet/ti: delete if NULL check befort devm_kfree devm_kfree check the pointer, there is no need to check before devm_kfree call. This change is to cleanup the code a bit. Signed-off-by: Bernard Zhao Link: https://lore.kernel.org/r/20220516015208.6526-1-bernard@vivo.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/ti/am65-cpsw-qos.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c index aa32dd905e2b..e162771893af 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.c +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c @@ -166,8 +166,7 @@ static void am65_cpsw_admin_to_oper(struct net_device *ndev) { struct am65_cpsw_port *port = am65_ndev_to_port(ndev); - if (port->qos.est_oper) - devm_kfree(&ndev->dev, port->qos.est_oper); + devm_kfree(&ndev->dev, port->qos.est_oper); port->qos.est_oper = port->qos.est_admin; port->qos.est_admin = NULL; @@ -434,11 +433,8 @@ static void am65_cpsw_purge_est(struct net_device *ndev) am65_cpsw_stop_est(ndev); - if (port->qos.est_admin) - devm_kfree(&ndev->dev, port->qos.est_admin); - - if (port->qos.est_oper) - devm_kfree(&ndev->dev, port->qos.est_oper); + devm_kfree(&ndev->dev, port->qos.est_admin); + devm_kfree(&ndev->dev, port->qos.est_oper); port->qos.est_oper = NULL; port->qos.est_admin = NULL; @@ -524,8 +520,7 @@ static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data) ret = am65_cpsw_configure_taprio(ndev, est_new); if (!ret) { if (taprio->enable) { - if (port->qos.est_admin) - devm_kfree(&ndev->dev, port->qos.est_admin); + devm_kfree(&ndev->dev, port->qos.est_admin); port->qos.est_admin = est_new; } else { -- cgit v1.2.3 From bcdcf2c466d30d69be822420bdb3f880651b2e55 Mon Sep 17 00:00:00 2001 From: Lu Wei Date: Mon, 16 May 2022 11:33:43 +0800 Subject: net/mlxbf_gige: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address instead of memset(). Signed-off-by: Lu Wei Link: https://lore.kernel.org/r/20220516033343.329178-1-luwei32@huawei.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 66ef0090755e..84621b4cb15b 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -69,7 +69,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv) u8 mac[ETH_ALEN]; u64 local_mac; - memset(mac, 0, ETH_ALEN); + eth_zero_addr(mac); mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX, &local_mac); u64_to_ether_addr(local_mac, mac); -- cgit v1.2.3 From 29fd3ca1779f6997d15c6d8f1ab119128e4bab61 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Mon, 16 May 2022 07:26:46 +0000 Subject: qed: Remove unnecessary synchronize_irq() before free_irq() Calling synchronize_irq() right before free_irq() is quite useless. On one hand the IRQ can easily fire again before free_irq() is entered, on the other hand free_irq() itself calls synchronize_irq() internally (in a race condition free way), before any state associated with the IRQ is freed. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Link: https://lore.kernel.org/r/20220516072646.1651109-1-chi.minghao@zte.com.cn Signed-off-by: Paolo Abeni --- drivers/net/ethernet/qlogic/qed/qed_main.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c5003fa1a25e..c91898be7c03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -823,7 +823,6 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev) for_each_hwfn(cdev, i) { if (!cdev->hwfns[i].b_int_requested) break; - synchronize_irq(cdev->int_params.msix_table[i].vector); free_irq(cdev->int_params.msix_table[i].vector, &cdev->hwfns[i].sp_dpc); } -- cgit v1.2.3 From bd81bfb5a1d1aa811ccb370aa5d118d0f87877bc Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Mon, 16 May 2022 08:19:14 +0000 Subject: net: vxge: Remove unnecessary synchronize_irq() before free_irq() Calling synchronize_irq() right before free_irq() is quite useless. On one hand the IRQ can easily fire again before free_irq() is entered, on the other hand free_irq() itself calls synchronize_irq() internally (in a race condition free way), before any state associated with the IRQ is freed. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Link: https://lore.kernel.org/r/20220516081914.1651281-1-chi.minghao@zte.com.cn Signed-off-by: Paolo Abeni --- drivers/net/ethernet/neterion/vxge/vxge-main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index d2de8ac44f72..fa5d4ddf429b 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2405,7 +2405,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1); intr_cnt++) { if (vdev->vxge_entries[intr_cnt].in_use) { - synchronize_irq(vdev->entries[intr_cnt].vector); free_irq(vdev->entries[intr_cnt].vector, vdev->vxge_entries[intr_cnt].arg); vdev->vxge_entries[intr_cnt].in_use = 0; @@ -2427,7 +2426,6 @@ static void vxge_rem_isr(struct vxgedev *vdev) vdev->config.intr_type == MSI_X) { vxge_rem_msix_isr(vdev); } else if (vdev->config.intr_type == INTA) { - synchronize_irq(vdev->pdev->irq); free_irq(vdev->pdev->irq, vdev); } } -- cgit v1.2.3 From d1e7f009bfff8b739599c7ff89fb794ef6d4a44d Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Mon, 16 May 2022 08:22:51 +0000 Subject: net: qede: Remove unnecessary synchronize_irq() before free_irq() Calling synchronize_irq() right before free_irq() is quite useless. On one hand the IRQ can easily fire again before free_irq() is entered, on the other hand free_irq() itself calls synchronize_irq() internally (in a race condition free way), before any state associated with the IRQ is freed. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Link: https://lore.kernel.org/r/20220516082251.1651350-1-chi.minghao@zte.com.cn Signed-off-by: Paolo Abeni --- drivers/net/ethernet/qlogic/qede/qede_main.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index b4e5a15e308b..f56b679adb4b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1916,7 +1916,6 @@ static void qede_sync_free_irqs(struct qede_dev *edev) for (i = 0; i < edev->int_info.used_cnt; i++) { if (edev->int_info.msix_cnt) { - synchronize_irq(edev->int_info.msix[i].vector); free_irq(edev->int_info.msix[i].vector, &edev->fp_array[i]); } else { -- cgit v1.2.3 From 65a9dedc11d615d8f104a48d38b4fa226967b4ed Mon Sep 17 00:00:00 2001 From: Leszek Polak Date: Mon, 16 May 2022 09:08:59 +0200 Subject: net: phy: marvell: Add errata section 5.1 for Alaska PHY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As per Errata Section 5.1, if EEE is intended to be used, some register writes must be done once after every hardware reset. This patch now adds the necessary register writes as listed in the Marvell errata. Without this fix we experience ethernet problems on some of our boards equipped with a new version of this ethernet PHY (different supplier). The fix applies to Marvell Alaska 88E1510/88E1518/88E1512/88E1514 Rev. A0. Signed-off-by: Leszek Polak Signed-off-by: Stefan Roese Cc: Marek Behún Cc: Andrew Lunn Cc: Heiner Kallweit Cc: Russell King Cc: David S. Miller Reviewed-by: Marek Behún Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20220516070859.549170-1-sr@denx.de Signed-off-by: Paolo Abeni --- drivers/net/phy/marvell.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 47e83c1e9051..d777c8851ed6 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1191,7 +1191,44 @@ static int m88e1318_config_init(struct phy_device *phydev) static int m88e1510_config_init(struct phy_device *phydev) { + static const struct { + u16 reg17, reg16; + } errata_vals[] = { + { 0x214b, 0x2144 }, + { 0x0c28, 0x2146 }, + { 0xb233, 0x214d }, + { 0xcc0c, 0x2159 }, + }; int err; + int i; + + /* As per Marvell Release Notes - Alaska 88E1510/88E1518/88E1512/ + * 88E1514 Rev A0, Errata Section 5.1: + * If EEE is intended to be used, the following register writes + * must be done once after every hardware reset. + */ + err = marvell_set_page(phydev, 0x00FF); + if (err < 0) + return err; + + for (i = 0; i < ARRAY_SIZE(errata_vals); ++i) { + err = phy_write(phydev, 17, errata_vals[i].reg17); + if (err) + return err; + err = phy_write(phydev, 16, errata_vals[i].reg16); + if (err) + return err; + } + + err = marvell_set_page(phydev, 0x00FB); + if (err < 0) + return err; + err = phy_write(phydev, 07, 0xC00D); + if (err < 0) + return err; + err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + if (err < 0) + return err; /* SGMII-to-Copper mode initialization */ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { -- cgit v1.2.3 From 8762246c7b232d280b545b0acc97d75a9c518db2 Mon Sep 17 00:00:00 2001 From: Srinivasan R Date: Fri, 13 May 2022 19:42:20 +0100 Subject: wireless: Fix Makefile to be in alphabetical order Fix quantenna to be in the right order Signed-off-by: Srinivasan R Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/MA1PR01MB26992E104B006B340C3C3A84C1CA9@MA1PR01MB2699.INDPRD01.PROD.OUTLOOK.COM --- drivers/net/wireless/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index abf3e5c87ca7..a61cf6c90343 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_WLAN_VENDOR_PURELIFI) += purelifi/ +obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/ obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/ obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/ @@ -21,7 +22,6 @@ obj-$(CONFIG_WLAN_VENDOR_SILABS) += silabs/ obj-$(CONFIG_WLAN_VENDOR_ST) += st/ obj-$(CONFIG_WLAN_VENDOR_TI) += ti/ obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/ -obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/ # 16-bit wireless PCMCIA client drivers obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o -- cgit v1.2.3 From 9d9a9edcf8edab4a151b7d4bad8cfa68e8d675ff Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 16 May 2022 08:52:10 +0800 Subject: rtw89: add ieee80211::sta_rc_update ops When peer's NSS, rate or bandwidth is changed, we update RA(rate adaptive) mask to ensure transmitting packets properly. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220516005215.5878-2-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/mac80211.c | 12 +++++++++++- drivers/net/wireless/realtek/rtw89/phy.c | 12 +++++++++--- drivers/net/wireless/realtek/rtw89/phy.h | 3 ++- 3 files changed, 22 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 8da3e117ad38..f24e4a208376 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -630,7 +630,7 @@ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta rtwsta->use_cfg_mask = true; rtwsta->mask = *br_data->mask; - rtw89_phy_ra_updata_sta(br_data->rtwdev, sta); + rtw89_phy_ra_updata_sta(br_data->rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); } static void rtw89_ra_mask_info_update(struct rtw89_dev *rtwdev, @@ -759,6 +759,15 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw, mutex_unlock(&rtwdev->mutex); } +static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u32 changed) +{ + struct rtw89_dev *rtwdev = hw->priv; + + rtw89_phy_ra_updata_sta(rtwdev, sta, changed); +} + const struct ieee80211_ops rtw89_ops = { .tx = rtw89_ops_tx, .wake_tx_queue = rtw89_ops_wake_tx_queue, @@ -788,5 +797,6 @@ const struct ieee80211_ops rtw89_ops = { .hw_scan = rtw89_ops_hw_scan, .cancel_hw_scan = rtw89_ops_cancel_hw_scan, .set_sar_specs = rtw89_ops_set_sar_specs, + .sta_rc_update = rtw89_ops_sta_rc_update, }; EXPORT_SYMBOL(rtw89_ops); diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 33494e8451cf..c9a4ae989ac7 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -357,13 +357,19 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ra->csi_mode = csi_mode; } -void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta) +void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, + u32 changed) { struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; struct rtw89_ra_info *ra = &rtwsta->ra; rtw89_phy_ra_sta_update(rtwdev, sta, false); - ra->upd_mask = 1; + + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) + ra->upd_mask = 1; + if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED)) + ra->upd_bw_nss_mask = 1; + rtw89_debug(rtwdev, RTW89_DBG_RA, "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d", ra->macid, @@ -487,7 +493,7 @@ static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta) { struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; - rtw89_phy_ra_updata_sta(rtwdev, sta); + rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); } void rtw89_phy_ra_update(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index 3ca5efa4c097..291660154d58 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -471,7 +471,8 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch); void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta); void rtw89_phy_ra_update(struct rtw89_dev *rtwdev); -void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta); +void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, + u32 changed); void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask); -- cgit v1.2.3 From d3efeee240f8dce123eb8569dc953f6cb3af3f17 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 16 May 2022 08:52:11 +0800 Subject: rtw89: 8852c: set TX antenna path To make user space can set TX antenna via iw command. Then, we can diagnose antenna is connected properly or not, and measure TX power in single path. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220516005215.5878-3-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/rtw8852c.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 77dcdbd86c63..64840c8d9efe 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -2360,19 +2360,19 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, rtw89_write32(rtwdev, reg, 0); } - if (tx_path == RF_PATH_A) { + if (tx_path == RF_A) { path_com[0].data = AX_PATH_COM0_PATHA; path_com[1].data = AX_PATH_COM1_PATHA; path_com[2].data = AX_PATH_COM2_PATHA; path_com[7].data = AX_PATH_COM7_PATHA; path_com[8].data = AX_PATH_COM8_PATHA; - } else if (tx_path == RF_PATH_B) { + } else if (tx_path == RF_B) { path_com[0].data = AX_PATH_COM0_PATHB; path_com[1].data = AX_PATH_COM1_PATHB; path_com[2].data = AX_PATH_COM2_PATHB; path_com[7].data = AX_PATH_COM7_PATHB; path_com[8].data = AX_PATH_COM8_PATHB; - } else if (tx_path == RF_PATH_AB) { + } else if (tx_path == RF_AB) { path_com[0].data = AX_PATH_COM0_PATHAB; path_com[1].data = AX_PATH_COM1_PATHAB; path_com[2].data = AX_PATH_COM2_PATHAB; @@ -2457,6 +2457,7 @@ static void rtw8852c_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en) static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; + u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB; rtw8852c_bb_cfg_rx_path(rtwdev, RF_PATH_AB); @@ -2472,7 +2473,7 @@ static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); } - rtw8852c_ctrl_tx_path_tmac(rtwdev, RF_PATH_AB, RTW89_MAC_0); + rtw8852c_ctrl_tx_path_tmac(rtwdev, ntx_path, RTW89_MAC_0); } static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) -- cgit v1.2.3 From 97df85871a5b187609d30fca6d85b912d9e02f29 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 16 May 2022 08:52:12 +0800 Subject: rtw89: cfo: check mac_id to avoid out-of-bounds Somehow, hardware reports incorrect mac_id and pollute memory. Check index before we access the array. UBSAN: array-index-out-of-bounds in rtw89/phy.c:2517:23 index 188 is out of range for type 's32 [64]' CPU: 1 PID: 51550 Comm: irq/35-rtw89_pc Tainted: G OE Call Trace: show_stack+0x52/0x58 dump_stack_lvl+0x4c/0x63 dump_stack+0x10/0x12 ubsan_epilogue+0x9/0x45 __ubsan_handle_out_of_bounds.cold+0x44/0x49 ? __alloc_skb+0x92/0x1d0 rtw89_phy_cfo_parse+0x44/0x7f [rtw89_core] rtw89_core_rx+0x261/0x871 [rtw89_core] ? __alloc_skb+0xee/0x1d0 rtw89_pci_napi_poll+0x3fa/0x4ea [rtw89_pci] __napi_poll+0x33/0x1a0 net_rx_action+0x126/0x260 ? __queue_work+0x217/0x4c0 __do_softirq+0xd9/0x315 ? disable_irq_nosync+0x10/0x10 do_softirq.part.0+0x6d/0x90 __local_bh_enable_ip+0x62/0x70 rtw89_pci_interrupt_threadfn+0x182/0x1a6 [rtw89_pci] irq_thread_fn+0x28/0x60 irq_thread+0xc8/0x190 ? irq_thread_fn+0x60/0x60 kthread+0x16b/0x190 ? irq_thread_check_affinity+0xe0/0xe0 ? set_kthread_struct+0x50/0x50 ret_from_fork+0x22/0x30 Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220516005215.5878-4-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/phy.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index c9a4ae989ac7..79e4c28495c8 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -2462,6 +2462,11 @@ void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; u8 macid = phy_ppdu->mac_id; + if (macid >= CFO_TRACK_MAX_USER) { + rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid); + return; + } + cfo->cfo_tail[macid] += cfo_val; cfo->cfo_cnt[macid]++; cfo->packet_count++; -- cgit v1.2.3 From aebc048d100073f8f16543ed0c4eefc11b3c0b06 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 16 May 2022 08:52:13 +0800 Subject: rtw89: 8852c: update txpwr tables to HALRF_027_00_052 Update notes: update the following to HALRF_027_00_052 TX power by rate table TX power limit table TX power limit RU table TX shape table doesn't seem to be changed on HALRF_027_00_052 Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220516005215.5878-5-pkshih@realtek.com --- .../net/wireless/realtek/rtw89/rtw8852c_table.c | 3714 ++++++++++---------- 1 file changed, 1857 insertions(+), 1857 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c index 477c46041c94..feaa83b16171 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -13678,27 +13678,27 @@ static const struct rtw89_txpwr_byrate_cfg rtw89_8852c_txpwr_byrate[] = { { 0, 1, 3, 0, 4, 0x50505050, }, { 0, 0, 4, 1, 4, 0x00000000, }, { 0, 0, 4, 0, 1, 0x00000000, }, - { 1, 0, 1, 0, 4, 0x5054585c, }, - { 1, 0, 1, 4, 4, 0x4044484c, }, - { 1, 0, 2, 0, 4, 0x4c505458, }, + { 1, 0, 1, 0, 4, 0x48484848, }, + { 1, 0, 1, 4, 4, 0x40444848, }, + { 1, 0, 2, 0, 4, 0x48484848, }, { 1, 0, 2, 4, 4, 0x3c404448, }, { 1, 0, 2, 8, 4, 0x2c303438, }, - { 1, 0, 3, 0, 4, 0x3c40484c, }, - { 1, 1, 2, 0, 4, 0x4c505458, }, + { 1, 0, 3, 0, 4, 0x48484848, }, + { 1, 1, 2, 0, 4, 0x48484848, }, { 1, 1, 2, 4, 4, 0x3c404448, }, { 1, 1, 2, 8, 4, 0x2c303438, }, - { 1, 1, 3, 0, 4, 0x3c40484c, }, + { 1, 1, 3, 0, 4, 0x48484848, }, { 1, 0, 4, 0, 4, 0x00000000, }, - { 2, 0, 1, 0, 4, 0x5054585c, }, - { 2, 0, 1, 4, 4, 0x4044484c, }, - { 2, 0, 2, 0, 4, 0x4c505458, }, - { 2, 0, 2, 4, 4, 0x3c404448, }, - { 2, 0, 2, 8, 4, 0x2c303438, }, - { 2, 0, 3, 0, 4, 0x3c40484c, }, - { 2, 1, 2, 0, 4, 0x4c505458, }, - { 2, 1, 2, 4, 4, 0x3c404448, }, - { 2, 1, 2, 8, 4, 0x2c303438, }, - { 2, 1, 3, 0, 4, 0x3c40484c, }, + { 2, 0, 1, 0, 4, 0x40404040, }, + { 2, 0, 1, 4, 4, 0x383c4040, }, + { 2, 0, 2, 0, 4, 0x40404040, }, + { 2, 0, 2, 4, 4, 0x34383c40, }, + { 2, 0, 2, 8, 4, 0x24282c30, }, + { 2, 0, 3, 0, 4, 0x40404040, }, + { 2, 1, 2, 0, 4, 0x40404040, }, + { 2, 1, 2, 4, 4, 0x34383c40, }, + { 2, 1, 2, 8, 4, 0x24282c30, }, + { 2, 1, 3, 0, 4, 0x40404040, }, { 2, 0, 4, 0, 4, 0x00000000, }, }; @@ -13857,8 +13857,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][0][0][RTW89_WW][9] = 60, [0][0][0][0][RTW89_WW][10] = 60, [0][0][0][0][RTW89_WW][11] = 60, - [0][0][0][0][RTW89_WW][12] = 58, - [0][0][0][0][RTW89_WW][13] = 74, + [0][0][0][0][RTW89_WW][12] = 48, + [0][0][0][0][RTW89_WW][13] = 72, [0][1][0][0][RTW89_WW][0] = 48, [0][1][0][0][RTW89_WW][1] = 48, [0][1][0][0][RTW89_WW][2] = 48, @@ -13870,34 +13870,34 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][0][0][RTW89_WW][8] = 48, [0][1][0][0][RTW89_WW][9] = 48, [0][1][0][0][RTW89_WW][10] = 48, - [0][1][0][0][RTW89_WW][11] = 48, - [0][1][0][0][RTW89_WW][12] = 44, - [0][1][0][0][RTW89_WW][13] = 62, + [0][1][0][0][RTW89_WW][11] = 46, + [0][1][0][0][RTW89_WW][12] = 34, + [0][1][0][0][RTW89_WW][13] = 60, [1][0][0][0][RTW89_WW][0] = 0, [1][0][0][0][RTW89_WW][1] = 0, - [1][0][0][0][RTW89_WW][2] = 52, - [1][0][0][0][RTW89_WW][3] = 52, - [1][0][0][0][RTW89_WW][4] = 52, - [1][0][0][0][RTW89_WW][5] = 60, - [1][0][0][0][RTW89_WW][6] = 52, - [1][0][0][0][RTW89_WW][7] = 52, - [1][0][0][0][RTW89_WW][8] = 52, - [1][0][0][0][RTW89_WW][9] = 44, - [1][0][0][0][RTW89_WW][10] = 32, + [1][0][0][0][RTW89_WW][2] = 42, + [1][0][0][0][RTW89_WW][3] = 42, + [1][0][0][0][RTW89_WW][4] = 42, + [1][0][0][0][RTW89_WW][5] = 58, + [1][0][0][0][RTW89_WW][6] = 42, + [1][0][0][0][RTW89_WW][7] = 42, + [1][0][0][0][RTW89_WW][8] = 42, + [1][0][0][0][RTW89_WW][9] = 34, + [1][0][0][0][RTW89_WW][10] = 22, [1][0][0][0][RTW89_WW][11] = 0, [1][0][0][0][RTW89_WW][12] = 0, [1][0][0][0][RTW89_WW][13] = 0, [1][1][0][0][RTW89_WW][0] = 0, [1][1][0][0][RTW89_WW][1] = 0, - [1][1][0][0][RTW89_WW][2] = 48, - [1][1][0][0][RTW89_WW][3] = 48, - [1][1][0][0][RTW89_WW][4] = 48, + [1][1][0][0][RTW89_WW][2] = 38, + [1][1][0][0][RTW89_WW][3] = 38, + [1][1][0][0][RTW89_WW][4] = 38, [1][1][0][0][RTW89_WW][5] = 48, - [1][1][0][0][RTW89_WW][6] = 36, - [1][1][0][0][RTW89_WW][7] = 36, - [1][1][0][0][RTW89_WW][8] = 36, - [1][1][0][0][RTW89_WW][9] = 32, - [1][1][0][0][RTW89_WW][10] = 32, + [1][1][0][0][RTW89_WW][6] = 26, + [1][1][0][0][RTW89_WW][7] = 26, + [1][1][0][0][RTW89_WW][8] = 26, + [1][1][0][0][RTW89_WW][9] = 22, + [1][1][0][0][RTW89_WW][10] = 22, [1][1][0][0][RTW89_WW][11] = 0, [1][1][0][0][RTW89_WW][12] = 0, [1][1][0][0][RTW89_WW][13] = 0, @@ -13912,8 +13912,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][8] = 60, [0][0][1][0][RTW89_WW][9] = 60, [0][0][1][0][RTW89_WW][10] = 60, - [0][0][1][0][RTW89_WW][11] = 56, - [0][0][1][0][RTW89_WW][12] = 52, + [0][0][1][0][RTW89_WW][11] = 46, + [0][0][1][0][RTW89_WW][12] = 42, [0][0][1][0][RTW89_WW][13] = 0, [0][1][1][0][RTW89_WW][0] = 48, [0][1][1][0][RTW89_WW][1] = 48, @@ -13926,8 +13926,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][8] = 48, [0][1][1][0][RTW89_WW][9] = 48, [0][1][1][0][RTW89_WW][10] = 48, - [0][1][1][0][RTW89_WW][11] = 48, - [0][1][1][0][RTW89_WW][12] = 44, + [0][1][1][0][RTW89_WW][11] = 38, + [0][1][1][0][RTW89_WW][12] = 34, [0][1][1][0][RTW89_WW][13] = 0, [0][0][2][0][RTW89_WW][0] = 60, [0][0][2][0][RTW89_WW][1] = 60, @@ -13940,8 +13940,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][8] = 60, [0][0][2][0][RTW89_WW][9] = 60, [0][0][2][0][RTW89_WW][10] = 60, - [0][0][2][0][RTW89_WW][11] = 56, - [0][0][2][0][RTW89_WW][12] = 52, + [0][0][2][0][RTW89_WW][11] = 46, + [0][0][2][0][RTW89_WW][12] = 42, [0][0][2][0][RTW89_WW][13] = 0, [0][1][2][0][RTW89_WW][0] = 48, [0][1][2][0][RTW89_WW][1] = 48, @@ -13954,8 +13954,8 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_WW][8] = 48, [0][1][2][0][RTW89_WW][9] = 48, [0][1][2][0][RTW89_WW][10] = 48, - [0][1][2][0][RTW89_WW][11] = 48, - [0][1][2][0][RTW89_WW][12] = 44, + [0][1][2][0][RTW89_WW][11] = 38, + [0][1][2][0][RTW89_WW][12] = 34, [0][1][2][0][RTW89_WW][13] = 0, [0][1][2][1][RTW89_WW][0] = 36, [0][1][2][1][RTW89_WW][1] = 36, @@ -13969,7 +13969,7 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][9] = 36, [0][1][2][1][RTW89_WW][10] = 36, [0][1][2][1][RTW89_WW][11] = 36, - [0][1][2][1][RTW89_WW][12] = 36, + [0][1][2][1][RTW89_WW][12] = 34, [0][1][2][1][RTW89_WW][13] = 0, [1][0][2][0][RTW89_WW][0] = 0, [1][0][2][0][RTW89_WW][1] = 0, @@ -13981,21 +13981,21 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_WW][7] = 60, [1][0][2][0][RTW89_WW][8] = 60, [1][0][2][0][RTW89_WW][9] = 60, - [1][0][2][0][RTW89_WW][10] = 60, + [1][0][2][0][RTW89_WW][10] = 58, [1][0][2][0][RTW89_WW][11] = 0, [1][0][2][0][RTW89_WW][12] = 0, [1][0][2][0][RTW89_WW][13] = 0, [1][1][2][0][RTW89_WW][0] = 0, [1][1][2][0][RTW89_WW][1] = 0, - [1][1][2][0][RTW89_WW][2] = 48, - [1][1][2][0][RTW89_WW][3] = 48, + [1][1][2][0][RTW89_WW][2] = 46, + [1][1][2][0][RTW89_WW][3] = 46, [1][1][2][0][RTW89_WW][4] = 48, [1][1][2][0][RTW89_WW][5] = 48, [1][1][2][0][RTW89_WW][6] = 48, - [1][1][2][0][RTW89_WW][7] = 48, - [1][1][2][0][RTW89_WW][8] = 48, - [1][1][2][0][RTW89_WW][9] = 44, - [1][1][2][0][RTW89_WW][10] = 40, + [1][1][2][0][RTW89_WW][7] = 46, + [1][1][2][0][RTW89_WW][8] = 46, + [1][1][2][0][RTW89_WW][9] = 34, + [1][1][2][0][RTW89_WW][10] = 30, [1][1][2][0][RTW89_WW][11] = 0, [1][1][2][0][RTW89_WW][12] = 0, [1][1][2][0][RTW89_WW][13] = 0, @@ -14008,149 +14008,149 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_WW][6] = 36, [1][1][2][1][RTW89_WW][7] = 36, [1][1][2][1][RTW89_WW][8] = 36, - [1][1][2][1][RTW89_WW][9] = 36, - [1][1][2][1][RTW89_WW][10] = 36, + [1][1][2][1][RTW89_WW][9] = 34, + [1][1][2][1][RTW89_WW][10] = 30, [1][1][2][1][RTW89_WW][11] = 0, [1][1][2][1][RTW89_WW][12] = 0, [1][1][2][1][RTW89_WW][13] = 0, - [0][0][0][0][RTW89_FCC][0] = 80, + [0][0][0][0][RTW89_FCC][0] = 70, [0][0][0][0][RTW89_ETSI][0] = 60, - [0][0][0][0][RTW89_MKK][0] = 72, - [0][0][0][0][RTW89_IC][0] = 80, + [0][0][0][0][RTW89_MKK][0] = 68, + [0][0][0][0][RTW89_IC][0] = 74, [0][0][0][0][RTW89_ACMA][0] = 60, - [0][0][0][0][RTW89_FCC][1] = 80, + [0][0][0][0][RTW89_FCC][1] = 70, [0][0][0][0][RTW89_ETSI][1] = 60, - [0][0][0][0][RTW89_MKK][1] = 72, - [0][0][0][0][RTW89_IC][1] = 80, + [0][0][0][0][RTW89_MKK][1] = 68, + [0][0][0][0][RTW89_IC][1] = 74, [0][0][0][0][RTW89_ACMA][1] = 60, - [0][0][0][0][RTW89_FCC][2] = 80, + [0][0][0][0][RTW89_FCC][2] = 70, [0][0][0][0][RTW89_ETSI][2] = 60, - [0][0][0][0][RTW89_MKK][2] = 72, - [0][0][0][0][RTW89_IC][2] = 80, + [0][0][0][0][RTW89_MKK][2] = 68, + [0][0][0][0][RTW89_IC][2] = 74, [0][0][0][0][RTW89_ACMA][2] = 60, - [0][0][0][0][RTW89_FCC][3] = 80, + [0][0][0][0][RTW89_FCC][3] = 70, [0][0][0][0][RTW89_ETSI][3] = 60, - [0][0][0][0][RTW89_MKK][3] = 72, - [0][0][0][0][RTW89_IC][3] = 80, + [0][0][0][0][RTW89_MKK][3] = 68, + [0][0][0][0][RTW89_IC][3] = 74, [0][0][0][0][RTW89_ACMA][3] = 60, - [0][0][0][0][RTW89_FCC][4] = 80, + [0][0][0][0][RTW89_FCC][4] = 70, [0][0][0][0][RTW89_ETSI][4] = 60, - [0][0][0][0][RTW89_MKK][4] = 72, - [0][0][0][0][RTW89_IC][4] = 80, + [0][0][0][0][RTW89_MKK][4] = 68, + [0][0][0][0][RTW89_IC][4] = 74, [0][0][0][0][RTW89_ACMA][4] = 60, - [0][0][0][0][RTW89_FCC][5] = 80, + [0][0][0][0][RTW89_FCC][5] = 70, [0][0][0][0][RTW89_ETSI][5] = 60, - [0][0][0][0][RTW89_MKK][5] = 72, - [0][0][0][0][RTW89_IC][5] = 80, + [0][0][0][0][RTW89_MKK][5] = 68, + [0][0][0][0][RTW89_IC][5] = 74, [0][0][0][0][RTW89_ACMA][5] = 60, - [0][0][0][0][RTW89_FCC][6] = 80, + [0][0][0][0][RTW89_FCC][6] = 70, [0][0][0][0][RTW89_ETSI][6] = 60, - [0][0][0][0][RTW89_MKK][6] = 72, - [0][0][0][0][RTW89_IC][6] = 80, + [0][0][0][0][RTW89_MKK][6] = 68, + [0][0][0][0][RTW89_IC][6] = 74, [0][0][0][0][RTW89_ACMA][6] = 60, - [0][0][0][0][RTW89_FCC][7] = 80, + [0][0][0][0][RTW89_FCC][7] = 70, [0][0][0][0][RTW89_ETSI][7] = 60, - [0][0][0][0][RTW89_MKK][7] = 72, - [0][0][0][0][RTW89_IC][7] = 80, + [0][0][0][0][RTW89_MKK][7] = 68, + [0][0][0][0][RTW89_IC][7] = 74, [0][0][0][0][RTW89_ACMA][7] = 60, - [0][0][0][0][RTW89_FCC][8] = 80, + [0][0][0][0][RTW89_FCC][8] = 70, [0][0][0][0][RTW89_ETSI][8] = 60, - [0][0][0][0][RTW89_MKK][8] = 72, - [0][0][0][0][RTW89_IC][8] = 80, + [0][0][0][0][RTW89_MKK][8] = 68, + [0][0][0][0][RTW89_IC][8] = 74, [0][0][0][0][RTW89_ACMA][8] = 60, - [0][0][0][0][RTW89_FCC][9] = 80, + [0][0][0][0][RTW89_FCC][9] = 70, [0][0][0][0][RTW89_ETSI][9] = 60, - [0][0][0][0][RTW89_MKK][9] = 72, - [0][0][0][0][RTW89_IC][9] = 80, + [0][0][0][0][RTW89_MKK][9] = 68, + [0][0][0][0][RTW89_IC][9] = 74, [0][0][0][0][RTW89_ACMA][9] = 60, - [0][0][0][0][RTW89_FCC][10] = 80, + [0][0][0][0][RTW89_FCC][10] = 70, [0][0][0][0][RTW89_ETSI][10] = 60, - [0][0][0][0][RTW89_MKK][10] = 72, - [0][0][0][0][RTW89_IC][10] = 80, + [0][0][0][0][RTW89_MKK][10] = 68, + [0][0][0][0][RTW89_IC][10] = 74, [0][0][0][0][RTW89_ACMA][10] = 60, - [0][0][0][0][RTW89_FCC][11] = 72, + [0][0][0][0][RTW89_FCC][11] = 62, [0][0][0][0][RTW89_ETSI][11] = 60, - [0][0][0][0][RTW89_MKK][11] = 72, + [0][0][0][0][RTW89_MKK][11] = 68, [0][0][0][0][RTW89_IC][11] = 72, [0][0][0][0][RTW89_ACMA][11] = 60, - [0][0][0][0][RTW89_FCC][12] = 58, + [0][0][0][0][RTW89_FCC][12] = 48, [0][0][0][0][RTW89_ETSI][12] = 60, - [0][0][0][0][RTW89_MKK][12] = 72, + [0][0][0][0][RTW89_MKK][12] = 68, [0][0][0][0][RTW89_IC][12] = 58, [0][0][0][0][RTW89_ACMA][12] = 60, [0][0][0][0][RTW89_FCC][13] = 127, [0][0][0][0][RTW89_ETSI][13] = 127, - [0][0][0][0][RTW89_MKK][13] = 74, + [0][0][0][0][RTW89_MKK][13] = 72, [0][0][0][0][RTW89_IC][13] = 127, [0][0][0][0][RTW89_ACMA][13] = 127, - [0][1][0][0][RTW89_FCC][0] = 76, + [0][1][0][0][RTW89_FCC][0] = 66, [0][1][0][0][RTW89_ETSI][0] = 48, - [0][1][0][0][RTW89_MKK][0] = 60, - [0][1][0][0][RTW89_IC][0] = 76, + [0][1][0][0][RTW89_MKK][0] = 58, + [0][1][0][0][RTW89_IC][0] = 74, [0][1][0][0][RTW89_ACMA][0] = 48, - [0][1][0][0][RTW89_FCC][1] = 76, + [0][1][0][0][RTW89_FCC][1] = 66, [0][1][0][0][RTW89_ETSI][1] = 48, - [0][1][0][0][RTW89_MKK][1] = 60, - [0][1][0][0][RTW89_IC][1] = 76, + [0][1][0][0][RTW89_MKK][1] = 58, + [0][1][0][0][RTW89_IC][1] = 74, [0][1][0][0][RTW89_ACMA][1] = 48, - [0][1][0][0][RTW89_FCC][2] = 76, + [0][1][0][0][RTW89_FCC][2] = 66, [0][1][0][0][RTW89_ETSI][2] = 48, - [0][1][0][0][RTW89_MKK][2] = 60, - [0][1][0][0][RTW89_IC][2] = 76, + [0][1][0][0][RTW89_MKK][2] = 58, + [0][1][0][0][RTW89_IC][2] = 74, [0][1][0][0][RTW89_ACMA][2] = 48, - [0][1][0][0][RTW89_FCC][3] = 76, + [0][1][0][0][RTW89_FCC][3] = 66, [0][1][0][0][RTW89_ETSI][3] = 48, - [0][1][0][0][RTW89_MKK][3] = 60, - [0][1][0][0][RTW89_IC][3] = 76, + [0][1][0][0][RTW89_MKK][3] = 58, + [0][1][0][0][RTW89_IC][3] = 74, [0][1][0][0][RTW89_ACMA][3] = 48, - [0][1][0][0][RTW89_FCC][4] = 76, + [0][1][0][0][RTW89_FCC][4] = 66, [0][1][0][0][RTW89_ETSI][4] = 48, - [0][1][0][0][RTW89_MKK][4] = 60, - [0][1][0][0][RTW89_IC][4] = 76, + [0][1][0][0][RTW89_MKK][4] = 58, + [0][1][0][0][RTW89_IC][4] = 74, [0][1][0][0][RTW89_ACMA][4] = 48, - [0][1][0][0][RTW89_FCC][5] = 76, + [0][1][0][0][RTW89_FCC][5] = 66, [0][1][0][0][RTW89_ETSI][5] = 48, - [0][1][0][0][RTW89_MKK][5] = 60, - [0][1][0][0][RTW89_IC][5] = 76, + [0][1][0][0][RTW89_MKK][5] = 58, + [0][1][0][0][RTW89_IC][5] = 74, [0][1][0][0][RTW89_ACMA][5] = 48, - [0][1][0][0][RTW89_FCC][6] = 76, + [0][1][0][0][RTW89_FCC][6] = 66, [0][1][0][0][RTW89_ETSI][6] = 48, - [0][1][0][0][RTW89_MKK][6] = 60, - [0][1][0][0][RTW89_IC][6] = 76, + [0][1][0][0][RTW89_MKK][6] = 58, + [0][1][0][0][RTW89_IC][6] = 74, [0][1][0][0][RTW89_ACMA][6] = 48, - [0][1][0][0][RTW89_FCC][7] = 76, + [0][1][0][0][RTW89_FCC][7] = 66, [0][1][0][0][RTW89_ETSI][7] = 48, - [0][1][0][0][RTW89_MKK][7] = 60, - [0][1][0][0][RTW89_IC][7] = 76, + [0][1][0][0][RTW89_MKK][7] = 58, + [0][1][0][0][RTW89_IC][7] = 74, [0][1][0][0][RTW89_ACMA][7] = 48, - [0][1][0][0][RTW89_FCC][8] = 76, + [0][1][0][0][RTW89_FCC][8] = 66, [0][1][0][0][RTW89_ETSI][8] = 48, - [0][1][0][0][RTW89_MKK][8] = 60, - [0][1][0][0][RTW89_IC][8] = 76, + [0][1][0][0][RTW89_MKK][8] = 58, + [0][1][0][0][RTW89_IC][8] = 74, [0][1][0][0][RTW89_ACMA][8] = 48, - [0][1][0][0][RTW89_FCC][9] = 76, + [0][1][0][0][RTW89_FCC][9] = 66, [0][1][0][0][RTW89_ETSI][9] = 48, - [0][1][0][0][RTW89_MKK][9] = 60, - [0][1][0][0][RTW89_IC][9] = 76, + [0][1][0][0][RTW89_MKK][9] = 58, + [0][1][0][0][RTW89_IC][9] = 74, [0][1][0][0][RTW89_ACMA][9] = 48, - [0][1][0][0][RTW89_FCC][10] = 76, + [0][1][0][0][RTW89_FCC][10] = 66, [0][1][0][0][RTW89_ETSI][10] = 48, - [0][1][0][0][RTW89_MKK][10] = 60, - [0][1][0][0][RTW89_IC][10] = 76, + [0][1][0][0][RTW89_MKK][10] = 58, + [0][1][0][0][RTW89_IC][10] = 74, [0][1][0][0][RTW89_ACMA][10] = 48, - [0][1][0][0][RTW89_FCC][11] = 56, + [0][1][0][0][RTW89_FCC][11] = 46, [0][1][0][0][RTW89_ETSI][11] = 48, - [0][1][0][0][RTW89_MKK][11] = 60, + [0][1][0][0][RTW89_MKK][11] = 58, [0][1][0][0][RTW89_IC][11] = 56, [0][1][0][0][RTW89_ACMA][11] = 48, - [0][1][0][0][RTW89_FCC][12] = 44, + [0][1][0][0][RTW89_FCC][12] = 34, [0][1][0][0][RTW89_ETSI][12] = 48, - [0][1][0][0][RTW89_MKK][12] = 60, + [0][1][0][0][RTW89_MKK][12] = 58, [0][1][0][0][RTW89_IC][12] = 44, [0][1][0][0][RTW89_ACMA][12] = 48, [0][1][0][0][RTW89_FCC][13] = 127, [0][1][0][0][RTW89_ETSI][13] = 127, - [0][1][0][0][RTW89_MKK][13] = 62, + [0][1][0][0][RTW89_MKK][13] = 60, [0][1][0][0][RTW89_IC][13] = 127, [0][1][0][0][RTW89_ACMA][13] = 127, [1][0][0][0][RTW89_FCC][0] = 127, @@ -14163,49 +14163,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][0][0][RTW89_MKK][1] = 127, [1][0][0][0][RTW89_IC][1] = 127, [1][0][0][0][RTW89_ACMA][1] = 127, - [1][0][0][0][RTW89_FCC][2] = 52, + [1][0][0][0][RTW89_FCC][2] = 42, [1][0][0][0][RTW89_ETSI][2] = 60, - [1][0][0][0][RTW89_MKK][2] = 72, + [1][0][0][0][RTW89_MKK][2] = 66, [1][0][0][0][RTW89_IC][2] = 52, [1][0][0][0][RTW89_ACMA][2] = 60, - [1][0][0][0][RTW89_FCC][3] = 52, + [1][0][0][0][RTW89_FCC][3] = 42, [1][0][0][0][RTW89_ETSI][3] = 60, - [1][0][0][0][RTW89_MKK][3] = 72, + [1][0][0][0][RTW89_MKK][3] = 66, [1][0][0][0][RTW89_IC][3] = 52, [1][0][0][0][RTW89_ACMA][3] = 60, - [1][0][0][0][RTW89_FCC][4] = 52, + [1][0][0][0][RTW89_FCC][4] = 42, [1][0][0][0][RTW89_ETSI][4] = 60, - [1][0][0][0][RTW89_MKK][4] = 72, + [1][0][0][0][RTW89_MKK][4] = 66, [1][0][0][0][RTW89_IC][4] = 52, [1][0][0][0][RTW89_ACMA][4] = 60, - [1][0][0][0][RTW89_FCC][5] = 68, + [1][0][0][0][RTW89_FCC][5] = 58, [1][0][0][0][RTW89_ETSI][5] = 60, - [1][0][0][0][RTW89_MKK][5] = 72, + [1][0][0][0][RTW89_MKK][5] = 66, [1][0][0][0][RTW89_IC][5] = 68, [1][0][0][0][RTW89_ACMA][5] = 60, - [1][0][0][0][RTW89_FCC][6] = 52, + [1][0][0][0][RTW89_FCC][6] = 42, [1][0][0][0][RTW89_ETSI][6] = 60, - [1][0][0][0][RTW89_MKK][6] = 72, + [1][0][0][0][RTW89_MKK][6] = 66, [1][0][0][0][RTW89_IC][6] = 52, [1][0][0][0][RTW89_ACMA][6] = 60, - [1][0][0][0][RTW89_FCC][7] = 52, + [1][0][0][0][RTW89_FCC][7] = 42, [1][0][0][0][RTW89_ETSI][7] = 60, - [1][0][0][0][RTW89_MKK][7] = 72, + [1][0][0][0][RTW89_MKK][7] = 66, [1][0][0][0][RTW89_IC][7] = 52, [1][0][0][0][RTW89_ACMA][7] = 60, - [1][0][0][0][RTW89_FCC][8] = 52, + [1][0][0][0][RTW89_FCC][8] = 42, [1][0][0][0][RTW89_ETSI][8] = 60, - [1][0][0][0][RTW89_MKK][8] = 72, + [1][0][0][0][RTW89_MKK][8] = 66, [1][0][0][0][RTW89_IC][8] = 52, [1][0][0][0][RTW89_ACMA][8] = 60, - [1][0][0][0][RTW89_FCC][9] = 44, + [1][0][0][0][RTW89_FCC][9] = 34, [1][0][0][0][RTW89_ETSI][9] = 60, - [1][0][0][0][RTW89_MKK][9] = 72, + [1][0][0][0][RTW89_MKK][9] = 66, [1][0][0][0][RTW89_IC][9] = 44, [1][0][0][0][RTW89_ACMA][9] = 60, - [1][0][0][0][RTW89_FCC][10] = 32, + [1][0][0][0][RTW89_FCC][10] = 22, [1][0][0][0][RTW89_ETSI][10] = 60, - [1][0][0][0][RTW89_MKK][10] = 70, + [1][0][0][0][RTW89_MKK][10] = 66, [1][0][0][0][RTW89_IC][10] = 32, [1][0][0][0][RTW89_ACMA][10] = 60, [1][0][0][0][RTW89_FCC][11] = 127, @@ -14233,49 +14233,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MKK][1] = 127, [1][1][0][0][RTW89_IC][1] = 127, [1][1][0][0][RTW89_ACMA][1] = 127, - [1][1][0][0][RTW89_FCC][2] = 48, + [1][1][0][0][RTW89_FCC][2] = 38, [1][1][0][0][RTW89_ETSI][2] = 48, - [1][1][0][0][RTW89_MKK][2] = 60, + [1][1][0][0][RTW89_MKK][2] = 58, [1][1][0][0][RTW89_IC][2] = 48, [1][1][0][0][RTW89_ACMA][2] = 48, - [1][1][0][0][RTW89_FCC][3] = 48, + [1][1][0][0][RTW89_FCC][3] = 38, [1][1][0][0][RTW89_ETSI][3] = 48, - [1][1][0][0][RTW89_MKK][3] = 60, + [1][1][0][0][RTW89_MKK][3] = 58, [1][1][0][0][RTW89_IC][3] = 48, [1][1][0][0][RTW89_ACMA][3] = 48, - [1][1][0][0][RTW89_FCC][4] = 48, + [1][1][0][0][RTW89_FCC][4] = 38, [1][1][0][0][RTW89_ETSI][4] = 48, - [1][1][0][0][RTW89_MKK][4] = 60, + [1][1][0][0][RTW89_MKK][4] = 58, [1][1][0][0][RTW89_IC][4] = 48, [1][1][0][0][RTW89_ACMA][4] = 48, - [1][1][0][0][RTW89_FCC][5] = 64, + [1][1][0][0][RTW89_FCC][5] = 54, [1][1][0][0][RTW89_ETSI][5] = 48, - [1][1][0][0][RTW89_MKK][5] = 60, + [1][1][0][0][RTW89_MKK][5] = 58, [1][1][0][0][RTW89_IC][5] = 64, [1][1][0][0][RTW89_ACMA][5] = 48, - [1][1][0][0][RTW89_FCC][6] = 36, + [1][1][0][0][RTW89_FCC][6] = 26, [1][1][0][0][RTW89_ETSI][6] = 48, - [1][1][0][0][RTW89_MKK][6] = 60, + [1][1][0][0][RTW89_MKK][6] = 58, [1][1][0][0][RTW89_IC][6] = 36, [1][1][0][0][RTW89_ACMA][6] = 48, - [1][1][0][0][RTW89_FCC][7] = 36, + [1][1][0][0][RTW89_FCC][7] = 26, [1][1][0][0][RTW89_ETSI][7] = 48, - [1][1][0][0][RTW89_MKK][7] = 60, + [1][1][0][0][RTW89_MKK][7] = 58, [1][1][0][0][RTW89_IC][7] = 36, [1][1][0][0][RTW89_ACMA][7] = 48, - [1][1][0][0][RTW89_FCC][8] = 36, + [1][1][0][0][RTW89_FCC][8] = 26, [1][1][0][0][RTW89_ETSI][8] = 48, - [1][1][0][0][RTW89_MKK][8] = 60, + [1][1][0][0][RTW89_MKK][8] = 58, [1][1][0][0][RTW89_IC][8] = 36, [1][1][0][0][RTW89_ACMA][8] = 48, - [1][1][0][0][RTW89_FCC][9] = 32, + [1][1][0][0][RTW89_FCC][9] = 22, [1][1][0][0][RTW89_ETSI][9] = 48, - [1][1][0][0][RTW89_MKK][9] = 60, + [1][1][0][0][RTW89_MKK][9] = 58, [1][1][0][0][RTW89_IC][9] = 32, [1][1][0][0][RTW89_ACMA][9] = 48, - [1][1][0][0][RTW89_FCC][10] = 32, + [1][1][0][0][RTW89_FCC][10] = 22, [1][1][0][0][RTW89_ETSI][10] = 48, - [1][1][0][0][RTW89_MKK][10] = 58, + [1][1][0][0][RTW89_MKK][10] = 56, [1][1][0][0][RTW89_IC][10] = 32, [1][1][0][0][RTW89_ACMA][10] = 48, [1][1][0][0][RTW89_FCC][11] = 127, @@ -14293,69 +14293,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][0][0][RTW89_MKK][13] = 127, [1][1][0][0][RTW89_IC][13] = 127, [1][1][0][0][RTW89_ACMA][13] = 127, - [0][0][1][0][RTW89_FCC][0] = 78, + [0][0][1][0][RTW89_FCC][0] = 68, [0][0][1][0][RTW89_ETSI][0] = 60, [0][0][1][0][RTW89_MKK][0] = 76, [0][0][1][0][RTW89_IC][0] = 78, [0][0][1][0][RTW89_ACMA][0] = 60, - [0][0][1][0][RTW89_FCC][1] = 78, + [0][0][1][0][RTW89_FCC][1] = 68, [0][0][1][0][RTW89_ETSI][1] = 60, - [0][0][1][0][RTW89_MKK][1] = 76, + [0][0][1][0][RTW89_MKK][1] = 78, [0][0][1][0][RTW89_IC][1] = 78, [0][0][1][0][RTW89_ACMA][1] = 60, - [0][0][1][0][RTW89_FCC][2] = 80, + [0][0][1][0][RTW89_FCC][2] = 70, [0][0][1][0][RTW89_ETSI][2] = 60, - [0][0][1][0][RTW89_MKK][2] = 76, - [0][0][1][0][RTW89_IC][2] = 80, + [0][0][1][0][RTW89_MKK][2] = 78, + [0][0][1][0][RTW89_IC][2] = 78, [0][0][1][0][RTW89_ACMA][2] = 60, - [0][0][1][0][RTW89_FCC][3] = 80, + [0][0][1][0][RTW89_FCC][3] = 70, [0][0][1][0][RTW89_ETSI][3] = 60, - [0][0][1][0][RTW89_MKK][3] = 76, - [0][0][1][0][RTW89_IC][3] = 80, + [0][0][1][0][RTW89_MKK][3] = 78, + [0][0][1][0][RTW89_IC][3] = 78, [0][0][1][0][RTW89_ACMA][3] = 60, - [0][0][1][0][RTW89_FCC][4] = 80, + [0][0][1][0][RTW89_FCC][4] = 70, [0][0][1][0][RTW89_ETSI][4] = 60, - [0][0][1][0][RTW89_MKK][4] = 76, - [0][0][1][0][RTW89_IC][4] = 80, + [0][0][1][0][RTW89_MKK][4] = 78, + [0][0][1][0][RTW89_IC][4] = 78, [0][0][1][0][RTW89_ACMA][4] = 60, - [0][0][1][0][RTW89_FCC][5] = 80, + [0][0][1][0][RTW89_FCC][5] = 70, [0][0][1][0][RTW89_ETSI][5] = 60, - [0][0][1][0][RTW89_MKK][5] = 76, - [0][0][1][0][RTW89_IC][5] = 80, + [0][0][1][0][RTW89_MKK][5] = 78, + [0][0][1][0][RTW89_IC][5] = 78, [0][0][1][0][RTW89_ACMA][5] = 60, - [0][0][1][0][RTW89_FCC][6] = 80, + [0][0][1][0][RTW89_FCC][6] = 70, [0][0][1][0][RTW89_ETSI][6] = 60, [0][0][1][0][RTW89_MKK][6] = 76, - [0][0][1][0][RTW89_IC][6] = 80, + [0][0][1][0][RTW89_IC][6] = 78, [0][0][1][0][RTW89_ACMA][6] = 60, - [0][0][1][0][RTW89_FCC][7] = 80, + [0][0][1][0][RTW89_FCC][7] = 70, [0][0][1][0][RTW89_ETSI][7] = 60, - [0][0][1][0][RTW89_MKK][7] = 76, - [0][0][1][0][RTW89_IC][7] = 80, + [0][0][1][0][RTW89_MKK][7] = 78, + [0][0][1][0][RTW89_IC][7] = 78, [0][0][1][0][RTW89_ACMA][7] = 60, - [0][0][1][0][RTW89_FCC][8] = 80, + [0][0][1][0][RTW89_FCC][8] = 70, [0][0][1][0][RTW89_ETSI][8] = 60, - [0][0][1][0][RTW89_MKK][8] = 76, - [0][0][1][0][RTW89_IC][8] = 80, + [0][0][1][0][RTW89_MKK][8] = 78, + [0][0][1][0][RTW89_IC][8] = 78, [0][0][1][0][RTW89_ACMA][8] = 60, - [0][0][1][0][RTW89_FCC][9] = 76, + [0][0][1][0][RTW89_FCC][9] = 66, [0][0][1][0][RTW89_ETSI][9] = 60, - [0][0][1][0][RTW89_MKK][9] = 76, + [0][0][1][0][RTW89_MKK][9] = 78, [0][0][1][0][RTW89_IC][9] = 76, [0][0][1][0][RTW89_ACMA][9] = 60, - [0][0][1][0][RTW89_FCC][10] = 76, + [0][0][1][0][RTW89_FCC][10] = 66, [0][0][1][0][RTW89_ETSI][10] = 60, - [0][0][1][0][RTW89_MKK][10] = 76, + [0][0][1][0][RTW89_MKK][10] = 78, [0][0][1][0][RTW89_IC][10] = 76, [0][0][1][0][RTW89_ACMA][10] = 60, - [0][0][1][0][RTW89_FCC][11] = 56, + [0][0][1][0][RTW89_FCC][11] = 46, [0][0][1][0][RTW89_ETSI][11] = 60, - [0][0][1][0][RTW89_MKK][11] = 76, + [0][0][1][0][RTW89_MKK][11] = 78, [0][0][1][0][RTW89_IC][11] = 56, [0][0][1][0][RTW89_ACMA][11] = 60, - [0][0][1][0][RTW89_FCC][12] = 52, + [0][0][1][0][RTW89_FCC][12] = 42, [0][0][1][0][RTW89_ETSI][12] = 60, - [0][0][1][0][RTW89_MKK][12] = 76, + [0][0][1][0][RTW89_MKK][12] = 78, [0][0][1][0][RTW89_IC][12] = 52, [0][0][1][0][RTW89_ACMA][12] = 60, [0][0][1][0][RTW89_FCC][13] = 127, @@ -14363,69 +14363,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_MKK][13] = 127, [0][0][1][0][RTW89_IC][13] = 127, [0][0][1][0][RTW89_ACMA][13] = 127, - [0][1][1][0][RTW89_FCC][0] = 64, + [0][1][1][0][RTW89_FCC][0] = 54, [0][1][1][0][RTW89_ETSI][0] = 48, - [0][1][1][0][RTW89_MKK][0] = 68, + [0][1][1][0][RTW89_MKK][0] = 66, [0][1][1][0][RTW89_IC][0] = 64, [0][1][1][0][RTW89_ACMA][0] = 48, - [0][1][1][0][RTW89_FCC][1] = 64, + [0][1][1][0][RTW89_FCC][1] = 54, [0][1][1][0][RTW89_ETSI][1] = 48, - [0][1][1][0][RTW89_MKK][1] = 68, + [0][1][1][0][RTW89_MKK][1] = 66, [0][1][1][0][RTW89_IC][1] = 64, [0][1][1][0][RTW89_ACMA][1] = 48, - [0][1][1][0][RTW89_FCC][2] = 68, + [0][1][1][0][RTW89_FCC][2] = 58, [0][1][1][0][RTW89_ETSI][2] = 48, - [0][1][1][0][RTW89_MKK][2] = 68, + [0][1][1][0][RTW89_MKK][2] = 66, [0][1][1][0][RTW89_IC][2] = 68, [0][1][1][0][RTW89_ACMA][2] = 48, - [0][1][1][0][RTW89_FCC][3] = 72, + [0][1][1][0][RTW89_FCC][3] = 62, [0][1][1][0][RTW89_ETSI][3] = 48, - [0][1][1][0][RTW89_MKK][3] = 68, + [0][1][1][0][RTW89_MKK][3] = 66, [0][1][1][0][RTW89_IC][3] = 72, [0][1][1][0][RTW89_ACMA][3] = 48, - [0][1][1][0][RTW89_FCC][4] = 80, + [0][1][1][0][RTW89_FCC][4] = 70, [0][1][1][0][RTW89_ETSI][4] = 48, - [0][1][1][0][RTW89_MKK][4] = 68, - [0][1][1][0][RTW89_IC][4] = 80, + [0][1][1][0][RTW89_MKK][4] = 66, + [0][1][1][0][RTW89_IC][4] = 78, [0][1][1][0][RTW89_ACMA][4] = 48, - [0][1][1][0][RTW89_FCC][5] = 80, + [0][1][1][0][RTW89_FCC][5] = 70, [0][1][1][0][RTW89_ETSI][5] = 48, - [0][1][1][0][RTW89_MKK][5] = 68, - [0][1][1][0][RTW89_IC][5] = 80, + [0][1][1][0][RTW89_MKK][5] = 66, + [0][1][1][0][RTW89_IC][5] = 78, [0][1][1][0][RTW89_ACMA][5] = 48, - [0][1][1][0][RTW89_FCC][6] = 80, + [0][1][1][0][RTW89_FCC][6] = 70, [0][1][1][0][RTW89_ETSI][6] = 48, - [0][1][1][0][RTW89_MKK][6] = 68, - [0][1][1][0][RTW89_IC][6] = 80, + [0][1][1][0][RTW89_MKK][6] = 66, + [0][1][1][0][RTW89_IC][6] = 78, [0][1][1][0][RTW89_ACMA][6] = 48, - [0][1][1][0][RTW89_FCC][7] = 72, + [0][1][1][0][RTW89_FCC][7] = 62, [0][1][1][0][RTW89_ETSI][7] = 48, - [0][1][1][0][RTW89_MKK][7] = 68, + [0][1][1][0][RTW89_MKK][7] = 66, [0][1][1][0][RTW89_IC][7] = 72, [0][1][1][0][RTW89_ACMA][7] = 48, - [0][1][1][0][RTW89_FCC][8] = 68, + [0][1][1][0][RTW89_FCC][8] = 58, [0][1][1][0][RTW89_ETSI][8] = 48, - [0][1][1][0][RTW89_MKK][8] = 68, + [0][1][1][0][RTW89_MKK][8] = 66, [0][1][1][0][RTW89_IC][8] = 68, [0][1][1][0][RTW89_ACMA][8] = 48, - [0][1][1][0][RTW89_FCC][9] = 64, + [0][1][1][0][RTW89_FCC][9] = 54, [0][1][1][0][RTW89_ETSI][9] = 48, - [0][1][1][0][RTW89_MKK][9] = 68, + [0][1][1][0][RTW89_MKK][9] = 66, [0][1][1][0][RTW89_IC][9] = 64, [0][1][1][0][RTW89_ACMA][9] = 48, - [0][1][1][0][RTW89_FCC][10] = 64, + [0][1][1][0][RTW89_FCC][10] = 54, [0][1][1][0][RTW89_ETSI][10] = 48, - [0][1][1][0][RTW89_MKK][10] = 68, + [0][1][1][0][RTW89_MKK][10] = 66, [0][1][1][0][RTW89_IC][10] = 64, [0][1][1][0][RTW89_ACMA][10] = 48, - [0][1][1][0][RTW89_FCC][11] = 48, + [0][1][1][0][RTW89_FCC][11] = 38, [0][1][1][0][RTW89_ETSI][11] = 48, - [0][1][1][0][RTW89_MKK][11] = 68, + [0][1][1][0][RTW89_MKK][11] = 66, [0][1][1][0][RTW89_IC][11] = 48, [0][1][1][0][RTW89_ACMA][11] = 48, - [0][1][1][0][RTW89_FCC][12] = 44, + [0][1][1][0][RTW89_FCC][12] = 34, [0][1][1][0][RTW89_ETSI][12] = 48, - [0][1][1][0][RTW89_MKK][12] = 68, + [0][1][1][0][RTW89_MKK][12] = 66, [0][1][1][0][RTW89_IC][12] = 44, [0][1][1][0][RTW89_ACMA][12] = 48, [0][1][1][0][RTW89_FCC][13] = 127, @@ -14433,69 +14433,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_MKK][13] = 127, [0][1][1][0][RTW89_IC][13] = 127, [0][1][1][0][RTW89_ACMA][13] = 127, - [0][0][2][0][RTW89_FCC][0] = 78, + [0][0][2][0][RTW89_FCC][0] = 68, [0][0][2][0][RTW89_ETSI][0] = 60, - [0][0][2][0][RTW89_MKK][0] = 76, + [0][0][2][0][RTW89_MKK][0] = 78, [0][0][2][0][RTW89_IC][0] = 78, [0][0][2][0][RTW89_ACMA][0] = 60, - [0][0][2][0][RTW89_FCC][1] = 78, + [0][0][2][0][RTW89_FCC][1] = 68, [0][0][2][0][RTW89_ETSI][1] = 60, - [0][0][2][0][RTW89_MKK][1] = 76, + [0][0][2][0][RTW89_MKK][1] = 78, [0][0][2][0][RTW89_IC][1] = 78, [0][0][2][0][RTW89_ACMA][1] = 60, - [0][0][2][0][RTW89_FCC][2] = 80, + [0][0][2][0][RTW89_FCC][2] = 70, [0][0][2][0][RTW89_ETSI][2] = 60, - [0][0][2][0][RTW89_MKK][2] = 76, - [0][0][2][0][RTW89_IC][2] = 80, + [0][0][2][0][RTW89_MKK][2] = 78, + [0][0][2][0][RTW89_IC][2] = 78, [0][0][2][0][RTW89_ACMA][2] = 60, - [0][0][2][0][RTW89_FCC][3] = 80, + [0][0][2][0][RTW89_FCC][3] = 70, [0][0][2][0][RTW89_ETSI][3] = 60, - [0][0][2][0][RTW89_MKK][3] = 76, - [0][0][2][0][RTW89_IC][3] = 80, + [0][0][2][0][RTW89_MKK][3] = 78, + [0][0][2][0][RTW89_IC][3] = 78, [0][0][2][0][RTW89_ACMA][3] = 60, - [0][0][2][0][RTW89_FCC][4] = 80, + [0][0][2][0][RTW89_FCC][4] = 70, [0][0][2][0][RTW89_ETSI][4] = 60, - [0][0][2][0][RTW89_MKK][4] = 76, - [0][0][2][0][RTW89_IC][4] = 80, + [0][0][2][0][RTW89_MKK][4] = 78, + [0][0][2][0][RTW89_IC][4] = 78, [0][0][2][0][RTW89_ACMA][4] = 60, - [0][0][2][0][RTW89_FCC][5] = 80, + [0][0][2][0][RTW89_FCC][5] = 70, [0][0][2][0][RTW89_ETSI][5] = 60, - [0][0][2][0][RTW89_MKK][5] = 76, - [0][0][2][0][RTW89_IC][5] = 80, + [0][0][2][0][RTW89_MKK][5] = 78, + [0][0][2][0][RTW89_IC][5] = 78, [0][0][2][0][RTW89_ACMA][5] = 60, - [0][0][2][0][RTW89_FCC][6] = 80, + [0][0][2][0][RTW89_FCC][6] = 70, [0][0][2][0][RTW89_ETSI][6] = 60, - [0][0][2][0][RTW89_MKK][6] = 76, - [0][0][2][0][RTW89_IC][6] = 80, + [0][0][2][0][RTW89_MKK][6] = 78, + [0][0][2][0][RTW89_IC][6] = 78, [0][0][2][0][RTW89_ACMA][6] = 60, - [0][0][2][0][RTW89_FCC][7] = 80, + [0][0][2][0][RTW89_FCC][7] = 70, [0][0][2][0][RTW89_ETSI][7] = 60, - [0][0][2][0][RTW89_MKK][7] = 76, - [0][0][2][0][RTW89_IC][7] = 80, + [0][0][2][0][RTW89_MKK][7] = 78, + [0][0][2][0][RTW89_IC][7] = 78, [0][0][2][0][RTW89_ACMA][7] = 60, - [0][0][2][0][RTW89_FCC][8] = 78, + [0][0][2][0][RTW89_FCC][8] = 68, [0][0][2][0][RTW89_ETSI][8] = 60, - [0][0][2][0][RTW89_MKK][8] = 76, + [0][0][2][0][RTW89_MKK][8] = 78, [0][0][2][0][RTW89_IC][8] = 78, [0][0][2][0][RTW89_ACMA][8] = 60, - [0][0][2][0][RTW89_FCC][9] = 74, + [0][0][2][0][RTW89_FCC][9] = 64, [0][0][2][0][RTW89_ETSI][9] = 60, - [0][0][2][0][RTW89_MKK][9] = 76, + [0][0][2][0][RTW89_MKK][9] = 78, [0][0][2][0][RTW89_IC][9] = 74, [0][0][2][0][RTW89_ACMA][9] = 60, - [0][0][2][0][RTW89_FCC][10] = 74, + [0][0][2][0][RTW89_FCC][10] = 64, [0][0][2][0][RTW89_ETSI][10] = 60, - [0][0][2][0][RTW89_MKK][10] = 76, + [0][0][2][0][RTW89_MKK][10] = 78, [0][0][2][0][RTW89_IC][10] = 74, [0][0][2][0][RTW89_ACMA][10] = 60, - [0][0][2][0][RTW89_FCC][11] = 56, + [0][0][2][0][RTW89_FCC][11] = 46, [0][0][2][0][RTW89_ETSI][11] = 60, - [0][0][2][0][RTW89_MKK][11] = 76, + [0][0][2][0][RTW89_MKK][11] = 78, [0][0][2][0][RTW89_IC][11] = 56, [0][0][2][0][RTW89_ACMA][11] = 60, - [0][0][2][0][RTW89_FCC][12] = 52, + [0][0][2][0][RTW89_FCC][12] = 42, [0][0][2][0][RTW89_ETSI][12] = 60, - [0][0][2][0][RTW89_MKK][12] = 76, + [0][0][2][0][RTW89_MKK][12] = 78, [0][0][2][0][RTW89_IC][12] = 52, [0][0][2][0][RTW89_ACMA][12] = 60, [0][0][2][0][RTW89_FCC][13] = 127, @@ -14503,69 +14503,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_MKK][13] = 127, [0][0][2][0][RTW89_IC][13] = 127, [0][0][2][0][RTW89_ACMA][13] = 127, - [0][1][2][0][RTW89_FCC][0] = 60, + [0][1][2][0][RTW89_FCC][0] = 50, [0][1][2][0][RTW89_ETSI][0] = 48, - [0][1][2][0][RTW89_MKK][0] = 70, + [0][1][2][0][RTW89_MKK][0] = 68, [0][1][2][0][RTW89_IC][0] = 60, [0][1][2][0][RTW89_ACMA][0] = 48, - [0][1][2][0][RTW89_FCC][1] = 60, + [0][1][2][0][RTW89_FCC][1] = 50, [0][1][2][0][RTW89_ETSI][1] = 48, - [0][1][2][0][RTW89_MKK][1] = 70, + [0][1][2][0][RTW89_MKK][1] = 68, [0][1][2][0][RTW89_IC][1] = 60, [0][1][2][0][RTW89_ACMA][1] = 48, - [0][1][2][0][RTW89_FCC][2] = 64, + [0][1][2][0][RTW89_FCC][2] = 54, [0][1][2][0][RTW89_ETSI][2] = 48, - [0][1][2][0][RTW89_MKK][2] = 70, + [0][1][2][0][RTW89_MKK][2] = 68, [0][1][2][0][RTW89_IC][2] = 64, [0][1][2][0][RTW89_ACMA][2] = 48, - [0][1][2][0][RTW89_FCC][3] = 68, + [0][1][2][0][RTW89_FCC][3] = 58, [0][1][2][0][RTW89_ETSI][3] = 48, - [0][1][2][0][RTW89_MKK][3] = 70, + [0][1][2][0][RTW89_MKK][3] = 68, [0][1][2][0][RTW89_IC][3] = 68, [0][1][2][0][RTW89_ACMA][3] = 48, - [0][1][2][0][RTW89_FCC][4] = 74, + [0][1][2][0][RTW89_FCC][4] = 64, [0][1][2][0][RTW89_ETSI][4] = 48, - [0][1][2][0][RTW89_MKK][4] = 70, + [0][1][2][0][RTW89_MKK][4] = 68, [0][1][2][0][RTW89_IC][4] = 74, [0][1][2][0][RTW89_ACMA][4] = 48, - [0][1][2][0][RTW89_FCC][5] = 80, + [0][1][2][0][RTW89_FCC][5] = 70, [0][1][2][0][RTW89_ETSI][5] = 48, - [0][1][2][0][RTW89_MKK][5] = 70, - [0][1][2][0][RTW89_IC][5] = 80, + [0][1][2][0][RTW89_MKK][5] = 68, + [0][1][2][0][RTW89_IC][5] = 78, [0][1][2][0][RTW89_ACMA][5] = 48, - [0][1][2][0][RTW89_FCC][6] = 76, + [0][1][2][0][RTW89_FCC][6] = 66, [0][1][2][0][RTW89_ETSI][6] = 48, - [0][1][2][0][RTW89_MKK][6] = 70, + [0][1][2][0][RTW89_MKK][6] = 68, [0][1][2][0][RTW89_IC][6] = 76, [0][1][2][0][RTW89_ACMA][6] = 48, - [0][1][2][0][RTW89_FCC][7] = 68, + [0][1][2][0][RTW89_FCC][7] = 58, [0][1][2][0][RTW89_ETSI][7] = 48, - [0][1][2][0][RTW89_MKK][7] = 70, + [0][1][2][0][RTW89_MKK][7] = 68, [0][1][2][0][RTW89_IC][7] = 68, [0][1][2][0][RTW89_ACMA][7] = 48, - [0][1][2][0][RTW89_FCC][8] = 64, + [0][1][2][0][RTW89_FCC][8] = 54, [0][1][2][0][RTW89_ETSI][8] = 48, - [0][1][2][0][RTW89_MKK][8] = 70, + [0][1][2][0][RTW89_MKK][8] = 68, [0][1][2][0][RTW89_IC][8] = 64, [0][1][2][0][RTW89_ACMA][8] = 48, - [0][1][2][0][RTW89_FCC][9] = 60, + [0][1][2][0][RTW89_FCC][9] = 50, [0][1][2][0][RTW89_ETSI][9] = 48, - [0][1][2][0][RTW89_MKK][9] = 70, + [0][1][2][0][RTW89_MKK][9] = 68, [0][1][2][0][RTW89_IC][9] = 60, [0][1][2][0][RTW89_ACMA][9] = 48, - [0][1][2][0][RTW89_FCC][10] = 60, + [0][1][2][0][RTW89_FCC][10] = 50, [0][1][2][0][RTW89_ETSI][10] = 48, - [0][1][2][0][RTW89_MKK][10] = 70, + [0][1][2][0][RTW89_MKK][10] = 68, [0][1][2][0][RTW89_IC][10] = 60, [0][1][2][0][RTW89_ACMA][10] = 48, - [0][1][2][0][RTW89_FCC][11] = 48, + [0][1][2][0][RTW89_FCC][11] = 38, [0][1][2][0][RTW89_ETSI][11] = 48, - [0][1][2][0][RTW89_MKK][11] = 70, + [0][1][2][0][RTW89_MKK][11] = 68, [0][1][2][0][RTW89_IC][11] = 48, [0][1][2][0][RTW89_ACMA][11] = 48, - [0][1][2][0][RTW89_FCC][12] = 44, + [0][1][2][0][RTW89_FCC][12] = 34, [0][1][2][0][RTW89_ETSI][12] = 48, - [0][1][2][0][RTW89_MKK][12] = 70, + [0][1][2][0][RTW89_MKK][12] = 68, [0][1][2][0][RTW89_IC][12] = 44, [0][1][2][0][RTW89_ACMA][12] = 48, [0][1][2][0][RTW89_FCC][13] = 127, @@ -14573,69 +14573,69 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_MKK][13] = 127, [0][1][2][0][RTW89_IC][13] = 127, [0][1][2][0][RTW89_ACMA][13] = 127, - [0][1][2][1][RTW89_FCC][0] = 60, - [0][1][2][1][RTW89_ETSI][0] = 38, - [0][1][2][1][RTW89_MKK][0] = 58, + [0][1][2][1][RTW89_FCC][0] = 50, + [0][1][2][1][RTW89_ETSI][0] = 36, + [0][1][2][1][RTW89_MKK][0] = 68, [0][1][2][1][RTW89_IC][0] = 60, [0][1][2][1][RTW89_ACMA][0] = 36, - [0][1][2][1][RTW89_FCC][1] = 60, - [0][1][2][1][RTW89_ETSI][1] = 38, - [0][1][2][1][RTW89_MKK][1] = 58, + [0][1][2][1][RTW89_FCC][1] = 50, + [0][1][2][1][RTW89_ETSI][1] = 36, + [0][1][2][1][RTW89_MKK][1] = 68, [0][1][2][1][RTW89_IC][1] = 60, [0][1][2][1][RTW89_ACMA][1] = 36, - [0][1][2][1][RTW89_FCC][2] = 64, - [0][1][2][1][RTW89_ETSI][2] = 38, - [0][1][2][1][RTW89_MKK][2] = 58, + [0][1][2][1][RTW89_FCC][2] = 54, + [0][1][2][1][RTW89_ETSI][2] = 36, + [0][1][2][1][RTW89_MKK][2] = 68, [0][1][2][1][RTW89_IC][2] = 64, [0][1][2][1][RTW89_ACMA][2] = 36, - [0][1][2][1][RTW89_FCC][3] = 68, - [0][1][2][1][RTW89_ETSI][3] = 38, - [0][1][2][1][RTW89_MKK][3] = 58, + [0][1][2][1][RTW89_FCC][3] = 58, + [0][1][2][1][RTW89_ETSI][3] = 36, + [0][1][2][1][RTW89_MKK][3] = 68, [0][1][2][1][RTW89_IC][3] = 68, [0][1][2][1][RTW89_ACMA][3] = 36, - [0][1][2][1][RTW89_FCC][4] = 74, - [0][1][2][1][RTW89_ETSI][4] = 38, - [0][1][2][1][RTW89_MKK][4] = 58, + [0][1][2][1][RTW89_FCC][4] = 64, + [0][1][2][1][RTW89_ETSI][4] = 36, + [0][1][2][1][RTW89_MKK][4] = 68, [0][1][2][1][RTW89_IC][4] = 74, [0][1][2][1][RTW89_ACMA][4] = 36, - [0][1][2][1][RTW89_FCC][5] = 80, - [0][1][2][1][RTW89_ETSI][5] = 38, - [0][1][2][1][RTW89_MKK][5] = 58, - [0][1][2][1][RTW89_IC][5] = 80, + [0][1][2][1][RTW89_FCC][5] = 70, + [0][1][2][1][RTW89_ETSI][5] = 36, + [0][1][2][1][RTW89_MKK][5] = 68, + [0][1][2][1][RTW89_IC][5] = 78, [0][1][2][1][RTW89_ACMA][5] = 36, - [0][1][2][1][RTW89_FCC][6] = 76, - [0][1][2][1][RTW89_ETSI][6] = 38, - [0][1][2][1][RTW89_MKK][6] = 58, + [0][1][2][1][RTW89_FCC][6] = 66, + [0][1][2][1][RTW89_ETSI][6] = 36, + [0][1][2][1][RTW89_MKK][6] = 68, [0][1][2][1][RTW89_IC][6] = 76, [0][1][2][1][RTW89_ACMA][6] = 36, - [0][1][2][1][RTW89_FCC][7] = 68, - [0][1][2][1][RTW89_ETSI][7] = 38, - [0][1][2][1][RTW89_MKK][7] = 58, + [0][1][2][1][RTW89_FCC][7] = 58, + [0][1][2][1][RTW89_ETSI][7] = 36, + [0][1][2][1][RTW89_MKK][7] = 68, [0][1][2][1][RTW89_IC][7] = 68, [0][1][2][1][RTW89_ACMA][7] = 36, - [0][1][2][1][RTW89_FCC][8] = 64, - [0][1][2][1][RTW89_ETSI][8] = 38, - [0][1][2][1][RTW89_MKK][8] = 58, + [0][1][2][1][RTW89_FCC][8] = 54, + [0][1][2][1][RTW89_ETSI][8] = 36, + [0][1][2][1][RTW89_MKK][8] = 68, [0][1][2][1][RTW89_IC][8] = 64, [0][1][2][1][RTW89_ACMA][8] = 36, - [0][1][2][1][RTW89_FCC][9] = 60, - [0][1][2][1][RTW89_ETSI][9] = 38, - [0][1][2][1][RTW89_MKK][9] = 58, + [0][1][2][1][RTW89_FCC][9] = 50, + [0][1][2][1][RTW89_ETSI][9] = 36, + [0][1][2][1][RTW89_MKK][9] = 68, [0][1][2][1][RTW89_IC][9] = 60, [0][1][2][1][RTW89_ACMA][9] = 36, - [0][1][2][1][RTW89_FCC][10] = 60, - [0][1][2][1][RTW89_ETSI][10] = 38, - [0][1][2][1][RTW89_MKK][10] = 58, + [0][1][2][1][RTW89_FCC][10] = 50, + [0][1][2][1][RTW89_ETSI][10] = 36, + [0][1][2][1][RTW89_MKK][10] = 68, [0][1][2][1][RTW89_IC][10] = 60, [0][1][2][1][RTW89_ACMA][10] = 36, - [0][1][2][1][RTW89_FCC][11] = 48, - [0][1][2][1][RTW89_ETSI][11] = 38, - [0][1][2][1][RTW89_MKK][11] = 58, + [0][1][2][1][RTW89_FCC][11] = 38, + [0][1][2][1][RTW89_ETSI][11] = 36, + [0][1][2][1][RTW89_MKK][11] = 68, [0][1][2][1][RTW89_IC][11] = 48, [0][1][2][1][RTW89_ACMA][11] = 36, - [0][1][2][1][RTW89_FCC][12] = 44, - [0][1][2][1][RTW89_ETSI][12] = 38, - [0][1][2][1][RTW89_MKK][12] = 58, + [0][1][2][1][RTW89_FCC][12] = 34, + [0][1][2][1][RTW89_ETSI][12] = 36, + [0][1][2][1][RTW89_MKK][12] = 68, [0][1][2][1][RTW89_IC][12] = 44, [0][1][2][1][RTW89_ACMA][12] = 36, [0][1][2][1][RTW89_FCC][13] = 127, @@ -14653,49 +14653,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_MKK][1] = 127, [1][0][2][0][RTW89_IC][1] = 127, [1][0][2][0][RTW89_ACMA][1] = 127, - [1][0][2][0][RTW89_FCC][2] = 72, + [1][0][2][0][RTW89_FCC][2] = 62, [1][0][2][0][RTW89_ETSI][2] = 60, - [1][0][2][0][RTW89_MKK][2] = 72, + [1][0][2][0][RTW89_MKK][2] = 74, [1][0][2][0][RTW89_IC][2] = 72, [1][0][2][0][RTW89_ACMA][2] = 60, - [1][0][2][0][RTW89_FCC][3] = 72, + [1][0][2][0][RTW89_FCC][3] = 62, [1][0][2][0][RTW89_ETSI][3] = 60, - [1][0][2][0][RTW89_MKK][3] = 72, + [1][0][2][0][RTW89_MKK][3] = 74, [1][0][2][0][RTW89_IC][3] = 72, [1][0][2][0][RTW89_ACMA][3] = 60, - [1][0][2][0][RTW89_FCC][4] = 74, + [1][0][2][0][RTW89_FCC][4] = 64, [1][0][2][0][RTW89_ETSI][4] = 60, - [1][0][2][0][RTW89_MKK][4] = 72, + [1][0][2][0][RTW89_MKK][4] = 74, [1][0][2][0][RTW89_IC][4] = 74, [1][0][2][0][RTW89_ACMA][4] = 60, - [1][0][2][0][RTW89_FCC][5] = 74, + [1][0][2][0][RTW89_FCC][5] = 64, [1][0][2][0][RTW89_ETSI][5] = 60, - [1][0][2][0][RTW89_MKK][5] = 72, + [1][0][2][0][RTW89_MKK][5] = 74, [1][0][2][0][RTW89_IC][5] = 74, [1][0][2][0][RTW89_ACMA][5] = 60, - [1][0][2][0][RTW89_FCC][6] = 74, + [1][0][2][0][RTW89_FCC][6] = 64, [1][0][2][0][RTW89_ETSI][6] = 60, - [1][0][2][0][RTW89_MKK][6] = 72, + [1][0][2][0][RTW89_MKK][6] = 74, [1][0][2][0][RTW89_IC][6] = 74, [1][0][2][0][RTW89_ACMA][6] = 60, - [1][0][2][0][RTW89_FCC][7] = 70, + [1][0][2][0][RTW89_FCC][7] = 60, [1][0][2][0][RTW89_ETSI][7] = 60, - [1][0][2][0][RTW89_MKK][7] = 72, + [1][0][2][0][RTW89_MKK][7] = 74, [1][0][2][0][RTW89_IC][7] = 70, [1][0][2][0][RTW89_ACMA][7] = 60, - [1][0][2][0][RTW89_FCC][8] = 70, + [1][0][2][0][RTW89_FCC][8] = 60, [1][0][2][0][RTW89_ETSI][8] = 60, - [1][0][2][0][RTW89_MKK][8] = 72, + [1][0][2][0][RTW89_MKK][8] = 74, [1][0][2][0][RTW89_IC][8] = 70, [1][0][2][0][RTW89_ACMA][8] = 60, - [1][0][2][0][RTW89_FCC][9] = 70, + [1][0][2][0][RTW89_FCC][9] = 60, [1][0][2][0][RTW89_ETSI][9] = 60, - [1][0][2][0][RTW89_MKK][9] = 72, + [1][0][2][0][RTW89_MKK][9] = 74, [1][0][2][0][RTW89_IC][9] = 70, [1][0][2][0][RTW89_ACMA][9] = 60, - [1][0][2][0][RTW89_FCC][10] = 68, + [1][0][2][0][RTW89_FCC][10] = 58, [1][0][2][0][RTW89_ETSI][10] = 60, - [1][0][2][0][RTW89_MKK][10] = 72, + [1][0][2][0][RTW89_MKK][10] = 74, [1][0][2][0][RTW89_IC][10] = 68, [1][0][2][0][RTW89_ACMA][10] = 60, [1][0][2][0][RTW89_FCC][11] = 127, @@ -14723,49 +14723,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_MKK][1] = 127, [1][1][2][0][RTW89_IC][1] = 127, [1][1][2][0][RTW89_ACMA][1] = 127, - [1][1][2][0][RTW89_FCC][2] = 56, + [1][1][2][0][RTW89_FCC][2] = 46, [1][1][2][0][RTW89_ETSI][2] = 48, - [1][1][2][0][RTW89_MKK][2] = 70, + [1][1][2][0][RTW89_MKK][2] = 68, [1][1][2][0][RTW89_IC][2] = 56, [1][1][2][0][RTW89_ACMA][2] = 48, - [1][1][2][0][RTW89_FCC][3] = 56, + [1][1][2][0][RTW89_FCC][3] = 46, [1][1][2][0][RTW89_ETSI][3] = 48, - [1][1][2][0][RTW89_MKK][3] = 70, + [1][1][2][0][RTW89_MKK][3] = 68, [1][1][2][0][RTW89_IC][3] = 56, [1][1][2][0][RTW89_ACMA][3] = 48, - [1][1][2][0][RTW89_FCC][4] = 60, + [1][1][2][0][RTW89_FCC][4] = 50, [1][1][2][0][RTW89_ETSI][4] = 48, - [1][1][2][0][RTW89_MKK][4] = 70, + [1][1][2][0][RTW89_MKK][4] = 68, [1][1][2][0][RTW89_IC][4] = 60, [1][1][2][0][RTW89_ACMA][4] = 48, - [1][1][2][0][RTW89_FCC][5] = 68, + [1][1][2][0][RTW89_FCC][5] = 58, [1][1][2][0][RTW89_ETSI][5] = 48, - [1][1][2][0][RTW89_MKK][5] = 70, + [1][1][2][0][RTW89_MKK][5] = 68, [1][1][2][0][RTW89_IC][5] = 68, [1][1][2][0][RTW89_ACMA][5] = 48, - [1][1][2][0][RTW89_FCC][6] = 60, + [1][1][2][0][RTW89_FCC][6] = 50, [1][1][2][0][RTW89_ETSI][6] = 48, - [1][1][2][0][RTW89_MKK][6] = 70, + [1][1][2][0][RTW89_MKK][6] = 68, [1][1][2][0][RTW89_IC][6] = 60, [1][1][2][0][RTW89_ACMA][6] = 48, - [1][1][2][0][RTW89_FCC][7] = 56, + [1][1][2][0][RTW89_FCC][7] = 46, [1][1][2][0][RTW89_ETSI][7] = 48, - [1][1][2][0][RTW89_MKK][7] = 70, + [1][1][2][0][RTW89_MKK][7] = 68, [1][1][2][0][RTW89_IC][7] = 56, [1][1][2][0][RTW89_ACMA][7] = 48, - [1][1][2][0][RTW89_FCC][8] = 56, + [1][1][2][0][RTW89_FCC][8] = 46, [1][1][2][0][RTW89_ETSI][8] = 48, - [1][1][2][0][RTW89_MKK][8] = 70, + [1][1][2][0][RTW89_MKK][8] = 68, [1][1][2][0][RTW89_IC][8] = 56, [1][1][2][0][RTW89_ACMA][8] = 48, - [1][1][2][0][RTW89_FCC][9] = 44, + [1][1][2][0][RTW89_FCC][9] = 34, [1][1][2][0][RTW89_ETSI][9] = 48, - [1][1][2][0][RTW89_MKK][9] = 70, + [1][1][2][0][RTW89_MKK][9] = 68, [1][1][2][0][RTW89_IC][9] = 44, [1][1][2][0][RTW89_ACMA][9] = 48, - [1][1][2][0][RTW89_FCC][10] = 40, + [1][1][2][0][RTW89_FCC][10] = 30, [1][1][2][0][RTW89_ETSI][10] = 48, - [1][1][2][0][RTW89_MKK][10] = 70, + [1][1][2][0][RTW89_MKK][10] = 68, [1][1][2][0][RTW89_IC][10] = 40, [1][1][2][0][RTW89_ACMA][10] = 48, [1][1][2][0][RTW89_FCC][11] = 127, @@ -14793,49 +14793,49 @@ const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_MKK][1] = 127, [1][1][2][1][RTW89_IC][1] = 127, [1][1][2][1][RTW89_ACMA][1] = 127, - [1][1][2][1][RTW89_FCC][2] = 56, - [1][1][2][1][RTW89_ETSI][2] = 38, - [1][1][2][1][RTW89_MKK][2] = 58, + [1][1][2][1][RTW89_FCC][2] = 46, + [1][1][2][1][RTW89_ETSI][2] = 36, + [1][1][2][1][RTW89_MKK][2] = 68, [1][1][2][1][RTW89_IC][2] = 56, [1][1][2][1][RTW89_ACMA][2] = 36, - [1][1][2][1][RTW89_FCC][3] = 56, - [1][1][2][1][RTW89_ETSI][3] = 38, - [1][1][2][1][RTW89_MKK][3] = 58, + [1][1][2][1][RTW89_FCC][3] = 46, + [1][1][2][1][RTW89_ETSI][3] = 36, + [1][1][2][1][RTW89_MKK][3] = 68, [1][1][2][1][RTW89_IC][3] = 56, [1][1][2][1][RTW89_ACMA][3] = 36, - [1][1][2][1][RTW89_FCC][4] = 60, - [1][1][2][1][RTW89_ETSI][4] = 38, - [1][1][2][1][RTW89_MKK][4] = 58, + [1][1][2][1][RTW89_FCC][4] = 50, + [1][1][2][1][RTW89_ETSI][4] = 36, + [1][1][2][1][RTW89_MKK][4] = 68, [1][1][2][1][RTW89_IC][4] = 60, [1][1][2][1][RTW89_ACMA][4] = 36, - [1][1][2][1][RTW89_FCC][5] = 68, - [1][1][2][1][RTW89_ETSI][5] = 38, - [1][1][2][1][RTW89_MKK][5] = 58, + [1][1][2][1][RTW89_FCC][5] = 58, + [1][1][2][1][RTW89_ETSI][5] = 36, + [1][1][2][1][RTW89_MKK][5] = 68, [1][1][2][1][RTW89_IC][5] = 68, [1][1][2][1][RTW89_ACMA][5] = 36, - [1][1][2][1][RTW89_FCC][6] = 60, - [1][1][2][1][RTW89_ETSI][6] = 38, - [1][1][2][1][RTW89_MKK][6] = 58, + [1][1][2][1][RTW89_FCC][6] = 50, + [1][1][2][1][RTW89_ETSI][6] = 36, + [1][1][2][1][RTW89_MKK][6] = 68, [1][1][2][1][RTW89_IC][6] = 60, [1][1][2][1][RTW89_ACMA][6] = 36, - [1][1][2][1][RTW89_FCC][7] = 56, - [1][1][2][1][RTW89_ETSI][7] = 38, - [1][1][2][1][RTW89_MKK][7] = 58, + [1][1][2][1][RTW89_FCC][7] = 46, + [1][1][2][1][RTW89_ETSI][7] = 36, + [1][1][2][1][RTW89_MKK][7] = 68, [1][1][2][1][RTW89_IC][7] = 56, [1][1][2][1][RTW89_ACMA][7] = 36, - [1][1][2][1][RTW89_FCC][8] = 56, - [1][1][2][1][RTW89_ETSI][8] = 38, - [1][1][2][1][RTW89_MKK][8] = 58, + [1][1][2][1][RTW89_FCC][8] = 46, + [1][1][2][1][RTW89_ETSI][8] = 36, + [1][1][2][1][RTW89_MKK][8] = 68, [1][1][2][1][RTW89_IC][8] = 56, [1][1][2][1][RTW89_ACMA][8] = 36, - [1][1][2][1][RTW89_FCC][9] = 44, - [1][1][2][1][RTW89_ETSI][9] = 38, - [1][1][2][1][RTW89_MKK][9] = 58, + [1][1][2][1][RTW89_FCC][9] = 34, + [1][1][2][1][RTW89_ETSI][9] = 36, + [1][1][2][1][RTW89_MKK][9] = 68, [1][1][2][1][RTW89_IC][9] = 44, [1][1][2][1][RTW89_ACMA][9] = 36, - [1][1][2][1][RTW89_FCC][10] = 40, - [1][1][2][1][RTW89_ETSI][10] = 38, - [1][1][2][1][RTW89_MKK][10] = 58, + [1][1][2][1][RTW89_FCC][10] = 30, + [1][1][2][1][RTW89_ETSI][10] = 36, + [1][1][2][1][RTW89_MKK][10] = 68, [1][1][2][1][RTW89_IC][10] = 40, [1][1][2][1][RTW89_ACMA][10] = 36, [1][1][2][1][RTW89_FCC][11] = 127, @@ -14871,21 +14871,21 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_WW][19] = 60, [0][0][1][0][RTW89_WW][21] = 60, [0][0][1][0][RTW89_WW][23] = 60, - [0][0][1][0][RTW89_WW][25] = 60, - [0][0][1][0][RTW89_WW][27] = 60, - [0][0][1][0][RTW89_WW][29] = 60, + [0][0][1][0][RTW89_WW][25] = 66, + [0][0][1][0][RTW89_WW][27] = 66, + [0][0][1][0][RTW89_WW][29] = 66, [0][0][1][0][RTW89_WW][31] = 60, [0][0][1][0][RTW89_WW][33] = 60, [0][0][1][0][RTW89_WW][35] = 60, - [0][0][1][0][RTW89_WW][37] = 78, + [0][0][1][0][RTW89_WW][37] = 70, [0][0][1][0][RTW89_WW][38] = 30, [0][0][1][0][RTW89_WW][40] = 30, [0][0][1][0][RTW89_WW][42] = 30, [0][0][1][0][RTW89_WW][44] = 30, [0][0][1][0][RTW89_WW][46] = 30, - [0][0][1][0][RTW89_WW][48] = 80, - [0][0][1][0][RTW89_WW][50] = 80, - [0][0][1][0][RTW89_WW][52] = 80, + [0][0][1][0][RTW89_WW][48] = 70, + [0][0][1][0][RTW89_WW][50] = 70, + [0][0][1][0][RTW89_WW][52] = 70, [0][1][1][0][RTW89_WW][0] = 42, [0][1][1][0][RTW89_WW][2] = 42, [0][1][1][0][RTW89_WW][4] = 42, @@ -14899,26 +14899,26 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_WW][19] = 48, [0][1][1][0][RTW89_WW][21] = 48, [0][1][1][0][RTW89_WW][23] = 48, - [0][1][1][0][RTW89_WW][25] = 48, - [0][1][1][0][RTW89_WW][27] = 48, - [0][1][1][0][RTW89_WW][29] = 48, + [0][1][1][0][RTW89_WW][25] = 54, + [0][1][1][0][RTW89_WW][27] = 54, + [0][1][1][0][RTW89_WW][29] = 54, [0][1][1][0][RTW89_WW][31] = 48, [0][1][1][0][RTW89_WW][33] = 48, [0][1][1][0][RTW89_WW][35] = 48, - [0][1][1][0][RTW89_WW][37] = 70, + [0][1][1][0][RTW89_WW][37] = 60, [0][1][1][0][RTW89_WW][38] = 18, [0][1][1][0][RTW89_WW][40] = 16, [0][1][1][0][RTW89_WW][42] = 18, [0][1][1][0][RTW89_WW][44] = 16, [0][1][1][0][RTW89_WW][46] = 18, - [0][1][1][0][RTW89_WW][48] = 58, - [0][1][1][0][RTW89_WW][50] = 58, - [0][1][1][0][RTW89_WW][52] = 58, + [0][1][1][0][RTW89_WW][48] = 48, + [0][1][1][0][RTW89_WW][50] = 48, + [0][1][1][0][RTW89_WW][52] = 48, [0][0][2][0][RTW89_WW][0] = 62, [0][0][2][0][RTW89_WW][2] = 62, [0][0][2][0][RTW89_WW][4] = 62, - [0][0][2][0][RTW89_WW][6] = 62, - [0][0][2][0][RTW89_WW][8] = 62, + [0][0][2][0][RTW89_WW][6] = 60, + [0][0][2][0][RTW89_WW][8] = 58, [0][0][2][0][RTW89_WW][10] = 62, [0][0][2][0][RTW89_WW][12] = 62, [0][0][2][0][RTW89_WW][14] = 62, @@ -14927,26 +14927,26 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_WW][19] = 62, [0][0][2][0][RTW89_WW][21] = 62, [0][0][2][0][RTW89_WW][23] = 62, - [0][0][2][0][RTW89_WW][25] = 62, - [0][0][2][0][RTW89_WW][27] = 62, - [0][0][2][0][RTW89_WW][29] = 62, + [0][0][2][0][RTW89_WW][25] = 66, + [0][0][2][0][RTW89_WW][27] = 66, + [0][0][2][0][RTW89_WW][29] = 66, [0][0][2][0][RTW89_WW][31] = 62, [0][0][2][0][RTW89_WW][33] = 62, [0][0][2][0][RTW89_WW][35] = 62, - [0][0][2][0][RTW89_WW][37] = 78, + [0][0][2][0][RTW89_WW][37] = 70, [0][0][2][0][RTW89_WW][38] = 30, [0][0][2][0][RTW89_WW][40] = 30, [0][0][2][0][RTW89_WW][42] = 30, [0][0][2][0][RTW89_WW][44] = 30, [0][0][2][0][RTW89_WW][46] = 30, - [0][0][2][0][RTW89_WW][48] = 80, - [0][0][2][0][RTW89_WW][50] = 80, - [0][0][2][0][RTW89_WW][52] = 80, + [0][0][2][0][RTW89_WW][48] = 70, + [0][0][2][0][RTW89_WW][50] = 70, + [0][0][2][0][RTW89_WW][52] = 70, [0][1][2][0][RTW89_WW][0] = 44, [0][1][2][0][RTW89_WW][2] = 44, [0][1][2][0][RTW89_WW][4] = 44, [0][1][2][0][RTW89_WW][6] = 44, - [0][1][2][0][RTW89_WW][8] = 50, + [0][1][2][0][RTW89_WW][8] = 42, [0][1][2][0][RTW89_WW][10] = 50, [0][1][2][0][RTW89_WW][12] = 50, [0][1][2][0][RTW89_WW][14] = 50, @@ -14955,21 +14955,21 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_WW][19] = 50, [0][1][2][0][RTW89_WW][21] = 50, [0][1][2][0][RTW89_WW][23] = 50, - [0][1][2][0][RTW89_WW][25] = 50, - [0][1][2][0][RTW89_WW][27] = 50, - [0][1][2][0][RTW89_WW][29] = 50, + [0][1][2][0][RTW89_WW][25] = 54, + [0][1][2][0][RTW89_WW][27] = 54, + [0][1][2][0][RTW89_WW][29] = 54, [0][1][2][0][RTW89_WW][31] = 50, [0][1][2][0][RTW89_WW][33] = 50, [0][1][2][0][RTW89_WW][35] = 50, - [0][1][2][0][RTW89_WW][37] = 72, + [0][1][2][0][RTW89_WW][37] = 62, [0][1][2][0][RTW89_WW][38] = 18, [0][1][2][0][RTW89_WW][40] = 18, [0][1][2][0][RTW89_WW][42] = 18, [0][1][2][0][RTW89_WW][44] = 18, [0][1][2][0][RTW89_WW][46] = 18, - [0][1][2][0][RTW89_WW][48] = 60, - [0][1][2][0][RTW89_WW][50] = 60, - [0][1][2][0][RTW89_WW][52] = 60, + [0][1][2][0][RTW89_WW][48] = 50, + [0][1][2][0][RTW89_WW][50] = 50, + [0][1][2][0][RTW89_WW][52] = 50, [0][1][2][1][RTW89_WW][0] = 38, [0][1][2][1][RTW89_WW][2] = 38, [0][1][2][1][RTW89_WW][4] = 38, @@ -14983,1149 +14983,1149 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_WW][19] = 38, [0][1][2][1][RTW89_WW][21] = 38, [0][1][2][1][RTW89_WW][23] = 38, - [0][1][2][1][RTW89_WW][25] = 42, - [0][1][2][1][RTW89_WW][27] = 42, - [0][1][2][1][RTW89_WW][29] = 42, + [0][1][2][1][RTW89_WW][25] = 40, + [0][1][2][1][RTW89_WW][27] = 40, + [0][1][2][1][RTW89_WW][29] = 40, [0][1][2][1][RTW89_WW][31] = 38, [0][1][2][1][RTW89_WW][33] = 38, [0][1][2][1][RTW89_WW][35] = 38, - [0][1][2][1][RTW89_WW][37] = 70, - [0][1][2][1][RTW89_WW][38] = 8, - [0][1][2][1][RTW89_WW][40] = 8, - [0][1][2][1][RTW89_WW][42] = 8, - [0][1][2][1][RTW89_WW][44] = 8, - [0][1][2][1][RTW89_WW][46] = 8, - [0][1][2][1][RTW89_WW][48] = 60, - [0][1][2][1][RTW89_WW][50] = 60, - [0][1][2][1][RTW89_WW][52] = 60, - [1][0][2][0][RTW89_WW][1] = 66, + [0][1][2][1][RTW89_WW][37] = 60, + [0][1][2][1][RTW89_WW][38] = 6, + [0][1][2][1][RTW89_WW][40] = 6, + [0][1][2][1][RTW89_WW][42] = 6, + [0][1][2][1][RTW89_WW][44] = 6, + [0][1][2][1][RTW89_WW][46] = 6, + [0][1][2][1][RTW89_WW][48] = 50, + [0][1][2][1][RTW89_WW][50] = 50, + [0][1][2][1][RTW89_WW][52] = 50, + [1][0][2][0][RTW89_WW][1] = 58, [1][0][2][0][RTW89_WW][5] = 66, [1][0][2][0][RTW89_WW][9] = 66, - [1][0][2][0][RTW89_WW][13] = 66, - [1][0][2][0][RTW89_WW][16] = 66, + [1][0][2][0][RTW89_WW][13] = 58, + [1][0][2][0][RTW89_WW][16] = 56, [1][0][2][0][RTW89_WW][20] = 66, [1][0][2][0][RTW89_WW][24] = 66, [1][0][2][0][RTW89_WW][28] = 66, [1][0][2][0][RTW89_WW][32] = 66, - [1][0][2][0][RTW89_WW][36] = 76, + [1][0][2][0][RTW89_WW][36] = 66, [1][0][2][0][RTW89_WW][39] = 30, [1][0][2][0][RTW89_WW][43] = 30, - [1][0][2][0][RTW89_WW][47] = 80, - [1][0][2][0][RTW89_WW][51] = 72, - [1][1][2][0][RTW89_WW][1] = 54, - [1][1][2][0][RTW89_WW][5] = 54, - [1][1][2][0][RTW89_WW][9] = 54, - [1][1][2][0][RTW89_WW][13] = 54, - [1][1][2][0][RTW89_WW][16] = 54, + [1][0][2][0][RTW89_WW][47] = 68, + [1][0][2][0][RTW89_WW][51] = 68, + [1][1][2][0][RTW89_WW][1] = 48, + [1][1][2][0][RTW89_WW][5] = 52, + [1][1][2][0][RTW89_WW][9] = 52, + [1][1][2][0][RTW89_WW][13] = 52, + [1][1][2][0][RTW89_WW][16] = 48, [1][1][2][0][RTW89_WW][20] = 54, [1][1][2][0][RTW89_WW][24] = 54, [1][1][2][0][RTW89_WW][28] = 54, [1][1][2][0][RTW89_WW][32] = 54, - [1][1][2][0][RTW89_WW][36] = 72, + [1][1][2][0][RTW89_WW][36] = 66, [1][1][2][0][RTW89_WW][39] = 18, [1][1][2][0][RTW89_WW][43] = 18, - [1][1][2][0][RTW89_WW][47] = 70, - [1][1][2][0][RTW89_WW][51] = 68, - [1][1][2][1][RTW89_WW][1] = 42, - [1][1][2][1][RTW89_WW][5] = 42, - [1][1][2][1][RTW89_WW][9] = 42, - [1][1][2][1][RTW89_WW][13] = 42, - [1][1][2][1][RTW89_WW][16] = 42, - [1][1][2][1][RTW89_WW][20] = 42, - [1][1][2][1][RTW89_WW][24] = 42, - [1][1][2][1][RTW89_WW][28] = 42, - [1][1][2][1][RTW89_WW][32] = 42, - [1][1][2][1][RTW89_WW][36] = 70, - [1][1][2][1][RTW89_WW][39] = 8, - [1][1][2][1][RTW89_WW][43] = 8, - [1][1][2][1][RTW89_WW][47] = 70, - [1][1][2][1][RTW89_WW][51] = 68, - [2][0][2][0][RTW89_WW][3] = 64, - [2][0][2][0][RTW89_WW][11] = 66, - [2][0][2][0][RTW89_WW][18] = 64, - [2][0][2][0][RTW89_WW][26] = 66, - [2][0][2][0][RTW89_WW][34] = 72, + [1][1][2][0][RTW89_WW][47] = 60, + [1][1][2][0][RTW89_WW][51] = 58, + [1][1][2][1][RTW89_WW][1] = 40, + [1][1][2][1][RTW89_WW][5] = 40, + [1][1][2][1][RTW89_WW][9] = 40, + [1][1][2][1][RTW89_WW][13] = 40, + [1][1][2][1][RTW89_WW][16] = 40, + [1][1][2][1][RTW89_WW][20] = 40, + [1][1][2][1][RTW89_WW][24] = 40, + [1][1][2][1][RTW89_WW][28] = 40, + [1][1][2][1][RTW89_WW][32] = 40, + [1][1][2][1][RTW89_WW][36] = 60, + [1][1][2][1][RTW89_WW][39] = 6, + [1][1][2][1][RTW89_WW][43] = 6, + [1][1][2][1][RTW89_WW][47] = 60, + [1][1][2][1][RTW89_WW][51] = 58, + [2][0][2][0][RTW89_WW][3] = 56, + [2][0][2][0][RTW89_WW][11] = 58, + [2][0][2][0][RTW89_WW][18] = 54, + [2][0][2][0][RTW89_WW][26] = 60, + [2][0][2][0][RTW89_WW][34] = 60, [2][0][2][0][RTW89_WW][41] = 30, - [2][0][2][0][RTW89_WW][49] = 66, - [2][1][2][0][RTW89_WW][3] = 54, - [2][1][2][0][RTW89_WW][11] = 54, - [2][1][2][0][RTW89_WW][18] = 54, + [2][0][2][0][RTW89_WW][49] = 56, + [2][1][2][0][RTW89_WW][3] = 48, + [2][1][2][0][RTW89_WW][11] = 52, + [2][1][2][0][RTW89_WW][18] = 48, [2][1][2][0][RTW89_WW][26] = 54, - [2][1][2][0][RTW89_WW][34] = 72, + [2][1][2][0][RTW89_WW][34] = 60, [2][1][2][0][RTW89_WW][41] = 18, - [2][1][2][0][RTW89_WW][49] = 60, - [2][1][2][1][RTW89_WW][3] = 42, - [2][1][2][1][RTW89_WW][11] = 42, - [2][1][2][1][RTW89_WW][18] = 42, - [2][1][2][1][RTW89_WW][26] = 44, - [2][1][2][1][RTW89_WW][34] = 70, - [2][1][2][1][RTW89_WW][41] = 8, - [2][1][2][1][RTW89_WW][49] = 60, - [3][0][2][0][RTW89_WW][7] = 56, - [3][0][2][0][RTW89_WW][22] = 56, - [3][0][2][0][RTW89_WW][45] = 56, - [3][1][2][0][RTW89_WW][7] = 44, - [3][1][2][0][RTW89_WW][22] = 44, - [3][1][2][0][RTW89_WW][45] = 44, - [3][1][2][1][RTW89_WW][7] = 32, - [3][1][2][1][RTW89_WW][22] = 32, - [3][1][2][1][RTW89_WW][45] = 32, - [0][0][1][0][RTW89_FCC][0] = 80, - [0][0][1][0][RTW89_ETSI][0] = 60, - [0][0][1][0][RTW89_MKK][0] = 62, + [2][1][2][0][RTW89_WW][49] = 50, + [2][1][2][1][RTW89_WW][3] = 40, + [2][1][2][1][RTW89_WW][11] = 40, + [2][1][2][1][RTW89_WW][18] = 40, + [2][1][2][1][RTW89_WW][26] = 42, + [2][1][2][1][RTW89_WW][34] = 60, + [2][1][2][1][RTW89_WW][41] = 6, + [2][1][2][1][RTW89_WW][49] = 50, + [3][0][2][0][RTW89_WW][7] = 38, + [3][0][2][0][RTW89_WW][22] = 50, + [3][0][2][0][RTW89_WW][45] = 0, + [3][1][2][0][RTW89_WW][7] = 26, + [3][1][2][0][RTW89_WW][22] = 42, + [3][1][2][0][RTW89_WW][45] = 0, + [3][1][2][1][RTW89_WW][7] = 14, + [3][1][2][1][RTW89_WW][22] = 30, + [3][1][2][1][RTW89_WW][45] = 0, + [0][0][1][0][RTW89_FCC][0] = 70, + [0][0][1][0][RTW89_ETSI][0] = 66, + [0][0][1][0][RTW89_MKK][0] = 66, [0][0][1][0][RTW89_IC][0] = 62, [0][0][1][0][RTW89_ACMA][0] = 60, - [0][0][1][0][RTW89_FCC][2] = 80, - [0][0][1][0][RTW89_ETSI][2] = 60, - [0][0][1][0][RTW89_MKK][2] = 62, + [0][0][1][0][RTW89_FCC][2] = 70, + [0][0][1][0][RTW89_ETSI][2] = 66, + [0][0][1][0][RTW89_MKK][2] = 66, [0][0][1][0][RTW89_IC][2] = 62, [0][0][1][0][RTW89_ACMA][2] = 60, - [0][0][1][0][RTW89_FCC][4] = 80, - [0][0][1][0][RTW89_ETSI][4] = 60, - [0][0][1][0][RTW89_MKK][4] = 62, + [0][0][1][0][RTW89_FCC][4] = 70, + [0][0][1][0][RTW89_ETSI][4] = 66, + [0][0][1][0][RTW89_MKK][4] = 66, [0][0][1][0][RTW89_IC][4] = 62, [0][0][1][0][RTW89_ACMA][4] = 60, - [0][0][1][0][RTW89_FCC][6] = 80, - [0][0][1][0][RTW89_ETSI][6] = 60, - [0][0][1][0][RTW89_MKK][6] = 62, + [0][0][1][0][RTW89_FCC][6] = 70, + [0][0][1][0][RTW89_ETSI][6] = 66, + [0][0][1][0][RTW89_MKK][6] = 66, [0][0][1][0][RTW89_IC][6] = 62, [0][0][1][0][RTW89_ACMA][6] = 60, - [0][0][1][0][RTW89_FCC][8] = 80, - [0][0][1][0][RTW89_ETSI][8] = 60, - [0][0][1][0][RTW89_MKK][8] = 64, + [0][0][1][0][RTW89_FCC][8] = 70, + [0][0][1][0][RTW89_ETSI][8] = 66, + [0][0][1][0][RTW89_MKK][8] = 66, [0][0][1][0][RTW89_IC][8] = 66, [0][0][1][0][RTW89_ACMA][8] = 60, - [0][0][1][0][RTW89_FCC][10] = 80, - [0][0][1][0][RTW89_ETSI][10] = 60, - [0][0][1][0][RTW89_MKK][10] = 64, + [0][0][1][0][RTW89_FCC][10] = 70, + [0][0][1][0][RTW89_ETSI][10] = 66, + [0][0][1][0][RTW89_MKK][10] = 66, [0][0][1][0][RTW89_IC][10] = 66, [0][0][1][0][RTW89_ACMA][10] = 60, - [0][0][1][0][RTW89_FCC][12] = 80, - [0][0][1][0][RTW89_ETSI][12] = 60, - [0][0][1][0][RTW89_MKK][12] = 64, + [0][0][1][0][RTW89_FCC][12] = 70, + [0][0][1][0][RTW89_ETSI][12] = 66, + [0][0][1][0][RTW89_MKK][12] = 66, [0][0][1][0][RTW89_IC][12] = 66, [0][0][1][0][RTW89_ACMA][12] = 60, - [0][0][1][0][RTW89_FCC][14] = 80, - [0][0][1][0][RTW89_ETSI][14] = 60, - [0][0][1][0][RTW89_MKK][14] = 62, + [0][0][1][0][RTW89_FCC][14] = 70, + [0][0][1][0][RTW89_ETSI][14] = 66, + [0][0][1][0][RTW89_MKK][14] = 66, [0][0][1][0][RTW89_IC][14] = 66, [0][0][1][0][RTW89_ACMA][14] = 60, - [0][0][1][0][RTW89_FCC][15] = 78, - [0][0][1][0][RTW89_ETSI][15] = 60, - [0][0][1][0][RTW89_MKK][15] = 78, - [0][0][1][0][RTW89_IC][15] = 78, + [0][0][1][0][RTW89_FCC][15] = 68, + [0][0][1][0][RTW89_ETSI][15] = 66, + [0][0][1][0][RTW89_MKK][15] = 70, + [0][0][1][0][RTW89_IC][15] = 70, [0][0][1][0][RTW89_ACMA][15] = 60, - [0][0][1][0][RTW89_FCC][17] = 80, - [0][0][1][0][RTW89_ETSI][17] = 60, - [0][0][1][0][RTW89_MKK][17] = 78, - [0][0][1][0][RTW89_IC][17] = 80, + [0][0][1][0][RTW89_FCC][17] = 70, + [0][0][1][0][RTW89_ETSI][17] = 66, + [0][0][1][0][RTW89_MKK][17] = 70, + [0][0][1][0][RTW89_IC][17] = 70, [0][0][1][0][RTW89_ACMA][17] = 60, - [0][0][1][0][RTW89_FCC][19] = 80, - [0][0][1][0][RTW89_ETSI][19] = 60, - [0][0][1][0][RTW89_MKK][19] = 78, - [0][0][1][0][RTW89_IC][19] = 80, + [0][0][1][0][RTW89_FCC][19] = 70, + [0][0][1][0][RTW89_ETSI][19] = 66, + [0][0][1][0][RTW89_MKK][19] = 70, + [0][0][1][0][RTW89_IC][19] = 70, [0][0][1][0][RTW89_ACMA][19] = 60, - [0][0][1][0][RTW89_FCC][21] = 80, - [0][0][1][0][RTW89_ETSI][21] = 60, - [0][0][1][0][RTW89_MKK][21] = 78, - [0][0][1][0][RTW89_IC][21] = 80, + [0][0][1][0][RTW89_FCC][21] = 70, + [0][0][1][0][RTW89_ETSI][21] = 66, + [0][0][1][0][RTW89_MKK][21] = 70, + [0][0][1][0][RTW89_IC][21] = 70, [0][0][1][0][RTW89_ACMA][21] = 60, - [0][0][1][0][RTW89_FCC][23] = 80, - [0][0][1][0][RTW89_ETSI][23] = 60, - [0][0][1][0][RTW89_MKK][23] = 78, - [0][0][1][0][RTW89_IC][23] = 80, + [0][0][1][0][RTW89_FCC][23] = 70, + [0][0][1][0][RTW89_ETSI][23] = 66, + [0][0][1][0][RTW89_MKK][23] = 70, + [0][0][1][0][RTW89_IC][23] = 70, [0][0][1][0][RTW89_ACMA][23] = 60, - [0][0][1][0][RTW89_FCC][25] = 80, - [0][0][1][0][RTW89_ETSI][25] = 60, - [0][0][1][0][RTW89_MKK][25] = 78, + [0][0][1][0][RTW89_FCC][25] = 70, + [0][0][1][0][RTW89_ETSI][25] = 66, + [0][0][1][0][RTW89_MKK][25] = 70, [0][0][1][0][RTW89_IC][25] = 127, [0][0][1][0][RTW89_ACMA][25] = 127, - [0][0][1][0][RTW89_FCC][27] = 80, - [0][0][1][0][RTW89_ETSI][27] = 60, - [0][0][1][0][RTW89_MKK][27] = 78, + [0][0][1][0][RTW89_FCC][27] = 70, + [0][0][1][0][RTW89_ETSI][27] = 66, + [0][0][1][0][RTW89_MKK][27] = 70, [0][0][1][0][RTW89_IC][27] = 127, [0][0][1][0][RTW89_ACMA][27] = 127, - [0][0][1][0][RTW89_FCC][29] = 80, - [0][0][1][0][RTW89_ETSI][29] = 60, - [0][0][1][0][RTW89_MKK][29] = 78, + [0][0][1][0][RTW89_FCC][29] = 70, + [0][0][1][0][RTW89_ETSI][29] = 66, + [0][0][1][0][RTW89_MKK][29] = 70, [0][0][1][0][RTW89_IC][29] = 127, [0][0][1][0][RTW89_ACMA][29] = 127, - [0][0][1][0][RTW89_FCC][31] = 80, - [0][0][1][0][RTW89_ETSI][31] = 60, - [0][0][1][0][RTW89_MKK][31] = 78, - [0][0][1][0][RTW89_IC][31] = 80, + [0][0][1][0][RTW89_FCC][31] = 70, + [0][0][1][0][RTW89_ETSI][31] = 66, + [0][0][1][0][RTW89_MKK][31] = 70, + [0][0][1][0][RTW89_IC][31] = 70, [0][0][1][0][RTW89_ACMA][31] = 60, - [0][0][1][0][RTW89_FCC][33] = 80, - [0][0][1][0][RTW89_ETSI][33] = 60, - [0][0][1][0][RTW89_MKK][33] = 78, - [0][0][1][0][RTW89_IC][33] = 80, + [0][0][1][0][RTW89_FCC][33] = 70, + [0][0][1][0][RTW89_ETSI][33] = 66, + [0][0][1][0][RTW89_MKK][33] = 70, + [0][0][1][0][RTW89_IC][33] = 70, [0][0][1][0][RTW89_ACMA][33] = 60, - [0][0][1][0][RTW89_FCC][35] = 72, - [0][0][1][0][RTW89_ETSI][35] = 60, - [0][0][1][0][RTW89_MKK][35] = 78, - [0][0][1][0][RTW89_IC][35] = 72, + [0][0][1][0][RTW89_FCC][35] = 62, + [0][0][1][0][RTW89_ETSI][35] = 66, + [0][0][1][0][RTW89_MKK][35] = 70, + [0][0][1][0][RTW89_IC][35] = 70, [0][0][1][0][RTW89_ACMA][35] = 60, - [0][0][1][0][RTW89_FCC][37] = 80, + [0][0][1][0][RTW89_FCC][37] = 70, [0][0][1][0][RTW89_ETSI][37] = 127, - [0][0][1][0][RTW89_MKK][37] = 78, - [0][0][1][0][RTW89_IC][37] = 80, - [0][0][1][0][RTW89_ACMA][37] = 78, - [0][0][1][0][RTW89_FCC][38] = 80, + [0][0][1][0][RTW89_MKK][37] = 70, + [0][0][1][0][RTW89_IC][37] = 70, + [0][0][1][0][RTW89_ACMA][37] = 70, + [0][0][1][0][RTW89_FCC][38] = 70, [0][0][1][0][RTW89_ETSI][38] = 30, [0][0][1][0][RTW89_MKK][38] = 127, - [0][0][1][0][RTW89_IC][38] = 80, - [0][0][1][0][RTW89_ACMA][38] = 78, - [0][0][1][0][RTW89_FCC][40] = 80, + [0][0][1][0][RTW89_IC][38] = 70, + [0][0][1][0][RTW89_ACMA][38] = 70, + [0][0][1][0][RTW89_FCC][40] = 70, [0][0][1][0][RTW89_ETSI][40] = 30, [0][0][1][0][RTW89_MKK][40] = 127, - [0][0][1][0][RTW89_IC][40] = 80, - [0][0][1][0][RTW89_ACMA][40] = 78, - [0][0][1][0][RTW89_FCC][42] = 80, + [0][0][1][0][RTW89_IC][40] = 70, + [0][0][1][0][RTW89_ACMA][40] = 70, + [0][0][1][0][RTW89_FCC][42] = 70, [0][0][1][0][RTW89_ETSI][42] = 30, [0][0][1][0][RTW89_MKK][42] = 127, - [0][0][1][0][RTW89_IC][42] = 80, - [0][0][1][0][RTW89_ACMA][42] = 78, - [0][0][1][0][RTW89_FCC][44] = 80, + [0][0][1][0][RTW89_IC][42] = 70, + [0][0][1][0][RTW89_ACMA][42] = 70, + [0][0][1][0][RTW89_FCC][44] = 70, [0][0][1][0][RTW89_ETSI][44] = 30, [0][0][1][0][RTW89_MKK][44] = 127, - [0][0][1][0][RTW89_IC][44] = 80, - [0][0][1][0][RTW89_ACMA][44] = 78, - [0][0][1][0][RTW89_FCC][46] = 80, + [0][0][1][0][RTW89_IC][44] = 70, + [0][0][1][0][RTW89_ACMA][44] = 70, + [0][0][1][0][RTW89_FCC][46] = 70, [0][0][1][0][RTW89_ETSI][46] = 30, [0][0][1][0][RTW89_MKK][46] = 127, - [0][0][1][0][RTW89_IC][46] = 80, - [0][0][1][0][RTW89_ACMA][46] = 78, - [0][0][1][0][RTW89_FCC][48] = 80, + [0][0][1][0][RTW89_IC][46] = 70, + [0][0][1][0][RTW89_ACMA][46] = 70, + [0][0][1][0][RTW89_FCC][48] = 70, [0][0][1][0][RTW89_ETSI][48] = 127, [0][0][1][0][RTW89_MKK][48] = 127, [0][0][1][0][RTW89_IC][48] = 127, [0][0][1][0][RTW89_ACMA][48] = 127, - [0][0][1][0][RTW89_FCC][50] = 80, + [0][0][1][0][RTW89_FCC][50] = 70, [0][0][1][0][RTW89_ETSI][50] = 127, [0][0][1][0][RTW89_MKK][50] = 127, [0][0][1][0][RTW89_IC][50] = 127, [0][0][1][0][RTW89_ACMA][50] = 127, - [0][0][1][0][RTW89_FCC][52] = 80, + [0][0][1][0][RTW89_FCC][52] = 70, [0][0][1][0][RTW89_ETSI][52] = 127, [0][0][1][0][RTW89_MKK][52] = 127, [0][0][1][0][RTW89_IC][52] = 127, [0][0][1][0][RTW89_ACMA][52] = 127, - [0][1][1][0][RTW89_FCC][0] = 70, - [0][1][1][0][RTW89_ETSI][0] = 48, - [0][1][1][0][RTW89_MKK][0] = 50, + [0][1][1][0][RTW89_FCC][0] = 60, + [0][1][1][0][RTW89_ETSI][0] = 54, + [0][1][1][0][RTW89_MKK][0] = 54, [0][1][1][0][RTW89_IC][0] = 42, [0][1][1][0][RTW89_ACMA][0] = 48, - [0][1][1][0][RTW89_FCC][2] = 70, - [0][1][1][0][RTW89_ETSI][2] = 48, - [0][1][1][0][RTW89_MKK][2] = 50, + [0][1][1][0][RTW89_FCC][2] = 60, + [0][1][1][0][RTW89_ETSI][2] = 54, + [0][1][1][0][RTW89_MKK][2] = 54, [0][1][1][0][RTW89_IC][2] = 42, [0][1][1][0][RTW89_ACMA][2] = 48, - [0][1][1][0][RTW89_FCC][4] = 70, - [0][1][1][0][RTW89_ETSI][4] = 48, - [0][1][1][0][RTW89_MKK][4] = 50, + [0][1][1][0][RTW89_FCC][4] = 60, + [0][1][1][0][RTW89_ETSI][4] = 54, + [0][1][1][0][RTW89_MKK][4] = 54, [0][1][1][0][RTW89_IC][4] = 42, [0][1][1][0][RTW89_ACMA][4] = 48, - [0][1][1][0][RTW89_FCC][6] = 70, - [0][1][1][0][RTW89_ETSI][6] = 48, - [0][1][1][0][RTW89_MKK][6] = 50, + [0][1][1][0][RTW89_FCC][6] = 60, + [0][1][1][0][RTW89_ETSI][6] = 54, + [0][1][1][0][RTW89_MKK][6] = 54, [0][1][1][0][RTW89_IC][6] = 42, [0][1][1][0][RTW89_ACMA][6] = 48, - [0][1][1][0][RTW89_FCC][8] = 70, - [0][1][1][0][RTW89_ETSI][8] = 48, - [0][1][1][0][RTW89_MKK][8] = 50, + [0][1][1][0][RTW89_FCC][8] = 60, + [0][1][1][0][RTW89_ETSI][8] = 54, + [0][1][1][0][RTW89_MKK][8] = 52, [0][1][1][0][RTW89_IC][8] = 54, [0][1][1][0][RTW89_ACMA][8] = 48, - [0][1][1][0][RTW89_FCC][10] = 70, - [0][1][1][0][RTW89_ETSI][10] = 48, - [0][1][1][0][RTW89_MKK][10] = 50, + [0][1][1][0][RTW89_FCC][10] = 60, + [0][1][1][0][RTW89_ETSI][10] = 54, + [0][1][1][0][RTW89_MKK][10] = 54, [0][1][1][0][RTW89_IC][10] = 54, [0][1][1][0][RTW89_ACMA][10] = 48, - [0][1][1][0][RTW89_FCC][12] = 70, - [0][1][1][0][RTW89_ETSI][12] = 48, - [0][1][1][0][RTW89_MKK][12] = 50, + [0][1][1][0][RTW89_FCC][12] = 60, + [0][1][1][0][RTW89_ETSI][12] = 54, + [0][1][1][0][RTW89_MKK][12] = 54, [0][1][1][0][RTW89_IC][12] = 54, [0][1][1][0][RTW89_ACMA][12] = 48, - [0][1][1][0][RTW89_FCC][14] = 70, - [0][1][1][0][RTW89_ETSI][14] = 48, - [0][1][1][0][RTW89_MKK][14] = 50, + [0][1][1][0][RTW89_FCC][14] = 60, + [0][1][1][0][RTW89_ETSI][14] = 54, + [0][1][1][0][RTW89_MKK][14] = 54, [0][1][1][0][RTW89_IC][14] = 54, [0][1][1][0][RTW89_ACMA][14] = 48, - [0][1][1][0][RTW89_FCC][15] = 68, - [0][1][1][0][RTW89_ETSI][15] = 48, + [0][1][1][0][RTW89_FCC][15] = 58, + [0][1][1][0][RTW89_ETSI][15] = 54, [0][1][1][0][RTW89_MKK][15] = 70, [0][1][1][0][RTW89_IC][15] = 68, [0][1][1][0][RTW89_ACMA][15] = 48, - [0][1][1][0][RTW89_FCC][17] = 70, - [0][1][1][0][RTW89_ETSI][17] = 48, - [0][1][1][0][RTW89_MKK][17] = 72, + [0][1][1][0][RTW89_FCC][17] = 60, + [0][1][1][0][RTW89_ETSI][17] = 54, + [0][1][1][0][RTW89_MKK][17] = 70, [0][1][1][0][RTW89_IC][17] = 70, [0][1][1][0][RTW89_ACMA][17] = 48, - [0][1][1][0][RTW89_FCC][19] = 70, - [0][1][1][0][RTW89_ETSI][19] = 48, - [0][1][1][0][RTW89_MKK][19] = 72, + [0][1][1][0][RTW89_FCC][19] = 60, + [0][1][1][0][RTW89_ETSI][19] = 54, + [0][1][1][0][RTW89_MKK][19] = 70, [0][1][1][0][RTW89_IC][19] = 70, [0][1][1][0][RTW89_ACMA][19] = 48, - [0][1][1][0][RTW89_FCC][21] = 70, - [0][1][1][0][RTW89_ETSI][21] = 48, - [0][1][1][0][RTW89_MKK][21] = 72, + [0][1][1][0][RTW89_FCC][21] = 60, + [0][1][1][0][RTW89_ETSI][21] = 54, + [0][1][1][0][RTW89_MKK][21] = 70, [0][1][1][0][RTW89_IC][21] = 70, [0][1][1][0][RTW89_ACMA][21] = 48, - [0][1][1][0][RTW89_FCC][23] = 70, - [0][1][1][0][RTW89_ETSI][23] = 48, - [0][1][1][0][RTW89_MKK][23] = 72, + [0][1][1][0][RTW89_FCC][23] = 60, + [0][1][1][0][RTW89_ETSI][23] = 54, + [0][1][1][0][RTW89_MKK][23] = 70, [0][1][1][0][RTW89_IC][23] = 70, [0][1][1][0][RTW89_ACMA][23] = 48, - [0][1][1][0][RTW89_FCC][25] = 70, - [0][1][1][0][RTW89_ETSI][25] = 48, + [0][1][1][0][RTW89_FCC][25] = 60, + [0][1][1][0][RTW89_ETSI][25] = 54, [0][1][1][0][RTW89_MKK][25] = 70, [0][1][1][0][RTW89_IC][25] = 127, [0][1][1][0][RTW89_ACMA][25] = 127, - [0][1][1][0][RTW89_FCC][27] = 70, - [0][1][1][0][RTW89_ETSI][27] = 48, - [0][1][1][0][RTW89_MKK][27] = 72, + [0][1][1][0][RTW89_FCC][27] = 60, + [0][1][1][0][RTW89_ETSI][27] = 54, + [0][1][1][0][RTW89_MKK][27] = 70, [0][1][1][0][RTW89_IC][27] = 127, [0][1][1][0][RTW89_ACMA][27] = 127, - [0][1][1][0][RTW89_FCC][29] = 70, - [0][1][1][0][RTW89_ETSI][29] = 48, - [0][1][1][0][RTW89_MKK][29] = 72, + [0][1][1][0][RTW89_FCC][29] = 60, + [0][1][1][0][RTW89_ETSI][29] = 54, + [0][1][1][0][RTW89_MKK][29] = 70, [0][1][1][0][RTW89_IC][29] = 127, [0][1][1][0][RTW89_ACMA][29] = 127, - [0][1][1][0][RTW89_FCC][31] = 70, - [0][1][1][0][RTW89_ETSI][31] = 48, - [0][1][1][0][RTW89_MKK][31] = 72, + [0][1][1][0][RTW89_FCC][31] = 60, + [0][1][1][0][RTW89_ETSI][31] = 54, + [0][1][1][0][RTW89_MKK][31] = 70, [0][1][1][0][RTW89_IC][31] = 70, [0][1][1][0][RTW89_ACMA][31] = 48, - [0][1][1][0][RTW89_FCC][33] = 70, - [0][1][1][0][RTW89_ETSI][33] = 48, - [0][1][1][0][RTW89_MKK][33] = 72, + [0][1][1][0][RTW89_FCC][33] = 60, + [0][1][1][0][RTW89_ETSI][33] = 54, + [0][1][1][0][RTW89_MKK][33] = 70, [0][1][1][0][RTW89_IC][33] = 70, [0][1][1][0][RTW89_ACMA][33] = 48, - [0][1][1][0][RTW89_FCC][35] = 68, - [0][1][1][0][RTW89_ETSI][35] = 48, - [0][1][1][0][RTW89_MKK][35] = 72, + [0][1][1][0][RTW89_FCC][35] = 58, + [0][1][1][0][RTW89_ETSI][35] = 54, + [0][1][1][0][RTW89_MKK][35] = 70, [0][1][1][0][RTW89_IC][35] = 68, [0][1][1][0][RTW89_ACMA][35] = 48, - [0][1][1][0][RTW89_FCC][37] = 70, + [0][1][1][0][RTW89_FCC][37] = 60, [0][1][1][0][RTW89_ETSI][37] = 127, - [0][1][1][0][RTW89_MKK][37] = 72, + [0][1][1][0][RTW89_MKK][37] = 70, [0][1][1][0][RTW89_IC][37] = 70, - [0][1][1][0][RTW89_ACMA][37] = 72, - [0][1][1][0][RTW89_FCC][38] = 80, + [0][1][1][0][RTW89_ACMA][37] = 70, + [0][1][1][0][RTW89_FCC][38] = 70, [0][1][1][0][RTW89_ETSI][38] = 18, [0][1][1][0][RTW89_MKK][38] = 127, - [0][1][1][0][RTW89_IC][38] = 80, - [0][1][1][0][RTW89_ACMA][38] = 74, - [0][1][1][0][RTW89_FCC][40] = 80, + [0][1][1][0][RTW89_IC][38] = 70, + [0][1][1][0][RTW89_ACMA][38] = 70, + [0][1][1][0][RTW89_FCC][40] = 70, [0][1][1][0][RTW89_ETSI][40] = 18, [0][1][1][0][RTW89_MKK][40] = 127, - [0][1][1][0][RTW89_IC][40] = 80, + [0][1][1][0][RTW89_IC][40] = 70, [0][1][1][0][RTW89_ACMA][40] = 16, - [0][1][1][0][RTW89_FCC][42] = 80, + [0][1][1][0][RTW89_FCC][42] = 70, [0][1][1][0][RTW89_ETSI][42] = 18, [0][1][1][0][RTW89_MKK][42] = 127, - [0][1][1][0][RTW89_IC][42] = 80, - [0][1][1][0][RTW89_ACMA][42] = 78, - [0][1][1][0][RTW89_FCC][44] = 80, + [0][1][1][0][RTW89_IC][42] = 70, + [0][1][1][0][RTW89_ACMA][42] = 70, + [0][1][1][0][RTW89_FCC][44] = 70, [0][1][1][0][RTW89_ETSI][44] = 18, [0][1][1][0][RTW89_MKK][44] = 127, - [0][1][1][0][RTW89_IC][44] = 80, + [0][1][1][0][RTW89_IC][44] = 70, [0][1][1][0][RTW89_ACMA][44] = 16, - [0][1][1][0][RTW89_FCC][46] = 80, + [0][1][1][0][RTW89_FCC][46] = 70, [0][1][1][0][RTW89_ETSI][46] = 18, [0][1][1][0][RTW89_MKK][46] = 127, - [0][1][1][0][RTW89_IC][46] = 80, - [0][1][1][0][RTW89_ACMA][46] = 78, - [0][1][1][0][RTW89_FCC][48] = 58, + [0][1][1][0][RTW89_IC][46] = 70, + [0][1][1][0][RTW89_ACMA][46] = 70, + [0][1][1][0][RTW89_FCC][48] = 48, [0][1][1][0][RTW89_ETSI][48] = 127, [0][1][1][0][RTW89_MKK][48] = 127, [0][1][1][0][RTW89_IC][48] = 127, [0][1][1][0][RTW89_ACMA][48] = 127, - [0][1][1][0][RTW89_FCC][50] = 58, + [0][1][1][0][RTW89_FCC][50] = 48, [0][1][1][0][RTW89_ETSI][50] = 127, [0][1][1][0][RTW89_MKK][50] = 127, [0][1][1][0][RTW89_IC][50] = 127, [0][1][1][0][RTW89_ACMA][50] = 127, - [0][1][1][0][RTW89_FCC][52] = 58, + [0][1][1][0][RTW89_FCC][52] = 48, [0][1][1][0][RTW89_ETSI][52] = 127, [0][1][1][0][RTW89_MKK][52] = 127, [0][1][1][0][RTW89_IC][52] = 127, [0][1][1][0][RTW89_ACMA][52] = 127, - [0][0][2][0][RTW89_FCC][0] = 80, - [0][0][2][0][RTW89_ETSI][0] = 62, - [0][0][2][0][RTW89_MKK][0] = 64, + [0][0][2][0][RTW89_FCC][0] = 70, + [0][0][2][0][RTW89_ETSI][0] = 66, + [0][0][2][0][RTW89_MKK][0] = 68, [0][0][2][0][RTW89_IC][0] = 66, [0][0][2][0][RTW89_ACMA][0] = 62, - [0][0][2][0][RTW89_FCC][2] = 80, - [0][0][2][0][RTW89_ETSI][2] = 62, - [0][0][2][0][RTW89_MKK][2] = 64, + [0][0][2][0][RTW89_FCC][2] = 70, + [0][0][2][0][RTW89_ETSI][2] = 66, + [0][0][2][0][RTW89_MKK][2] = 68, [0][0][2][0][RTW89_IC][2] = 66, [0][0][2][0][RTW89_ACMA][2] = 62, - [0][0][2][0][RTW89_FCC][4] = 80, - [0][0][2][0][RTW89_ETSI][4] = 62, - [0][0][2][0][RTW89_MKK][4] = 64, + [0][0][2][0][RTW89_FCC][4] = 70, + [0][0][2][0][RTW89_ETSI][4] = 66, + [0][0][2][0][RTW89_MKK][4] = 68, [0][0][2][0][RTW89_IC][4] = 66, [0][0][2][0][RTW89_ACMA][4] = 62, - [0][0][2][0][RTW89_FCC][6] = 80, - [0][0][2][0][RTW89_ETSI][6] = 62, - [0][0][2][0][RTW89_MKK][6] = 64, + [0][0][2][0][RTW89_FCC][6] = 70, + [0][0][2][0][RTW89_ETSI][6] = 66, + [0][0][2][0][RTW89_MKK][6] = 60, [0][0][2][0][RTW89_IC][6] = 66, [0][0][2][0][RTW89_ACMA][6] = 62, - [0][0][2][0][RTW89_FCC][8] = 80, - [0][0][2][0][RTW89_ETSI][8] = 62, - [0][0][2][0][RTW89_MKK][8] = 64, + [0][0][2][0][RTW89_FCC][8] = 70, + [0][0][2][0][RTW89_ETSI][8] = 66, + [0][0][2][0][RTW89_MKK][8] = 58, [0][0][2][0][RTW89_IC][8] = 66, [0][0][2][0][RTW89_ACMA][8] = 62, - [0][0][2][0][RTW89_FCC][10] = 80, - [0][0][2][0][RTW89_ETSI][10] = 62, - [0][0][2][0][RTW89_MKK][10] = 64, + [0][0][2][0][RTW89_FCC][10] = 70, + [0][0][2][0][RTW89_ETSI][10] = 66, + [0][0][2][0][RTW89_MKK][10] = 70, [0][0][2][0][RTW89_IC][10] = 66, [0][0][2][0][RTW89_ACMA][10] = 62, - [0][0][2][0][RTW89_FCC][12] = 80, - [0][0][2][0][RTW89_ETSI][12] = 62, - [0][0][2][0][RTW89_MKK][12] = 64, + [0][0][2][0][RTW89_FCC][12] = 70, + [0][0][2][0][RTW89_ETSI][12] = 66, + [0][0][2][0][RTW89_MKK][12] = 70, [0][0][2][0][RTW89_IC][12] = 66, [0][0][2][0][RTW89_ACMA][12] = 62, - [0][0][2][0][RTW89_FCC][14] = 80, - [0][0][2][0][RTW89_ETSI][14] = 62, - [0][0][2][0][RTW89_MKK][14] = 64, + [0][0][2][0][RTW89_FCC][14] = 70, + [0][0][2][0][RTW89_ETSI][14] = 66, + [0][0][2][0][RTW89_MKK][14] = 70, [0][0][2][0][RTW89_IC][14] = 66, [0][0][2][0][RTW89_ACMA][14] = 62, - [0][0][2][0][RTW89_FCC][15] = 76, - [0][0][2][0][RTW89_ETSI][15] = 62, - [0][0][2][0][RTW89_MKK][15] = 78, - [0][0][2][0][RTW89_IC][15] = 76, + [0][0][2][0][RTW89_FCC][15] = 66, + [0][0][2][0][RTW89_ETSI][15] = 66, + [0][0][2][0][RTW89_MKK][15] = 70, + [0][0][2][0][RTW89_IC][15] = 70, [0][0][2][0][RTW89_ACMA][15] = 62, - [0][0][2][0][RTW89_FCC][17] = 80, - [0][0][2][0][RTW89_ETSI][17] = 62, - [0][0][2][0][RTW89_MKK][17] = 78, - [0][0][2][0][RTW89_IC][17] = 80, + [0][0][2][0][RTW89_FCC][17] = 70, + [0][0][2][0][RTW89_ETSI][17] = 66, + [0][0][2][0][RTW89_MKK][17] = 70, + [0][0][2][0][RTW89_IC][17] = 70, [0][0][2][0][RTW89_ACMA][17] = 62, - [0][0][2][0][RTW89_FCC][19] = 80, - [0][0][2][0][RTW89_ETSI][19] = 62, - [0][0][2][0][RTW89_MKK][19] = 78, - [0][0][2][0][RTW89_IC][19] = 80, + [0][0][2][0][RTW89_FCC][19] = 70, + [0][0][2][0][RTW89_ETSI][19] = 66, + [0][0][2][0][RTW89_MKK][19] = 70, + [0][0][2][0][RTW89_IC][19] = 70, [0][0][2][0][RTW89_ACMA][19] = 62, - [0][0][2][0][RTW89_FCC][21] = 80, - [0][0][2][0][RTW89_ETSI][21] = 62, - [0][0][2][0][RTW89_MKK][21] = 78, - [0][0][2][0][RTW89_IC][21] = 80, + [0][0][2][0][RTW89_FCC][21] = 70, + [0][0][2][0][RTW89_ETSI][21] = 66, + [0][0][2][0][RTW89_MKK][21] = 70, + [0][0][2][0][RTW89_IC][21] = 70, [0][0][2][0][RTW89_ACMA][21] = 62, - [0][0][2][0][RTW89_FCC][23] = 80, - [0][0][2][0][RTW89_ETSI][23] = 62, - [0][0][2][0][RTW89_MKK][23] = 78, - [0][0][2][0][RTW89_IC][23] = 80, + [0][0][2][0][RTW89_FCC][23] = 70, + [0][0][2][0][RTW89_ETSI][23] = 66, + [0][0][2][0][RTW89_MKK][23] = 70, + [0][0][2][0][RTW89_IC][23] = 70, [0][0][2][0][RTW89_ACMA][23] = 62, - [0][0][2][0][RTW89_FCC][25] = 80, - [0][0][2][0][RTW89_ETSI][25] = 62, - [0][0][2][0][RTW89_MKK][25] = 78, + [0][0][2][0][RTW89_FCC][25] = 70, + [0][0][2][0][RTW89_ETSI][25] = 66, + [0][0][2][0][RTW89_MKK][25] = 70, [0][0][2][0][RTW89_IC][25] = 127, [0][0][2][0][RTW89_ACMA][25] = 127, - [0][0][2][0][RTW89_FCC][27] = 80, - [0][0][2][0][RTW89_ETSI][27] = 62, - [0][0][2][0][RTW89_MKK][27] = 78, + [0][0][2][0][RTW89_FCC][27] = 70, + [0][0][2][0][RTW89_ETSI][27] = 66, + [0][0][2][0][RTW89_MKK][27] = 70, [0][0][2][0][RTW89_IC][27] = 127, [0][0][2][0][RTW89_ACMA][27] = 127, - [0][0][2][0][RTW89_FCC][29] = 80, - [0][0][2][0][RTW89_ETSI][29] = 62, - [0][0][2][0][RTW89_MKK][29] = 78, + [0][0][2][0][RTW89_FCC][29] = 70, + [0][0][2][0][RTW89_ETSI][29] = 66, + [0][0][2][0][RTW89_MKK][29] = 70, [0][0][2][0][RTW89_IC][29] = 127, [0][0][2][0][RTW89_ACMA][29] = 127, - [0][0][2][0][RTW89_FCC][31] = 80, - [0][0][2][0][RTW89_ETSI][31] = 62, - [0][0][2][0][RTW89_MKK][31] = 78, - [0][0][2][0][RTW89_IC][31] = 80, + [0][0][2][0][RTW89_FCC][31] = 70, + [0][0][2][0][RTW89_ETSI][31] = 66, + [0][0][2][0][RTW89_MKK][31] = 70, + [0][0][2][0][RTW89_IC][31] = 70, [0][0][2][0][RTW89_ACMA][31] = 62, - [0][0][2][0][RTW89_FCC][33] = 80, - [0][0][2][0][RTW89_ETSI][33] = 62, - [0][0][2][0][RTW89_MKK][33] = 78, - [0][0][2][0][RTW89_IC][33] = 80, + [0][0][2][0][RTW89_FCC][33] = 70, + [0][0][2][0][RTW89_ETSI][33] = 66, + [0][0][2][0][RTW89_MKK][33] = 70, + [0][0][2][0][RTW89_IC][33] = 70, [0][0][2][0][RTW89_ACMA][33] = 62, - [0][0][2][0][RTW89_FCC][35] = 72, - [0][0][2][0][RTW89_ETSI][35] = 62, - [0][0][2][0][RTW89_MKK][35] = 78, - [0][0][2][0][RTW89_IC][35] = 72, + [0][0][2][0][RTW89_FCC][35] = 62, + [0][0][2][0][RTW89_ETSI][35] = 66, + [0][0][2][0][RTW89_MKK][35] = 70, + [0][0][2][0][RTW89_IC][35] = 70, [0][0][2][0][RTW89_ACMA][35] = 62, - [0][0][2][0][RTW89_FCC][37] = 80, + [0][0][2][0][RTW89_FCC][37] = 70, [0][0][2][0][RTW89_ETSI][37] = 127, - [0][0][2][0][RTW89_MKK][37] = 78, - [0][0][2][0][RTW89_IC][37] = 80, - [0][0][2][0][RTW89_ACMA][37] = 78, - [0][0][2][0][RTW89_FCC][38] = 80, + [0][0][2][0][RTW89_MKK][37] = 70, + [0][0][2][0][RTW89_IC][37] = 70, + [0][0][2][0][RTW89_ACMA][37] = 70, + [0][0][2][0][RTW89_FCC][38] = 70, [0][0][2][0][RTW89_ETSI][38] = 30, [0][0][2][0][RTW89_MKK][38] = 127, - [0][0][2][0][RTW89_IC][38] = 80, - [0][0][2][0][RTW89_ACMA][38] = 78, - [0][0][2][0][RTW89_FCC][40] = 80, + [0][0][2][0][RTW89_IC][38] = 70, + [0][0][2][0][RTW89_ACMA][38] = 70, + [0][0][2][0][RTW89_FCC][40] = 70, [0][0][2][0][RTW89_ETSI][40] = 30, [0][0][2][0][RTW89_MKK][40] = 127, - [0][0][2][0][RTW89_IC][40] = 80, - [0][0][2][0][RTW89_ACMA][40] = 78, - [0][0][2][0][RTW89_FCC][42] = 80, + [0][0][2][0][RTW89_IC][40] = 70, + [0][0][2][0][RTW89_ACMA][40] = 70, + [0][0][2][0][RTW89_FCC][42] = 70, [0][0][2][0][RTW89_ETSI][42] = 30, [0][0][2][0][RTW89_MKK][42] = 127, - [0][0][2][0][RTW89_IC][42] = 80, - [0][0][2][0][RTW89_ACMA][42] = 78, - [0][0][2][0][RTW89_FCC][44] = 80, + [0][0][2][0][RTW89_IC][42] = 70, + [0][0][2][0][RTW89_ACMA][42] = 70, + [0][0][2][0][RTW89_FCC][44] = 70, [0][0][2][0][RTW89_ETSI][44] = 30, [0][0][2][0][RTW89_MKK][44] = 127, - [0][0][2][0][RTW89_IC][44] = 80, - [0][0][2][0][RTW89_ACMA][44] = 78, - [0][0][2][0][RTW89_FCC][46] = 80, + [0][0][2][0][RTW89_IC][44] = 70, + [0][0][2][0][RTW89_ACMA][44] = 70, + [0][0][2][0][RTW89_FCC][46] = 70, [0][0][2][0][RTW89_ETSI][46] = 30, [0][0][2][0][RTW89_MKK][46] = 127, - [0][0][2][0][RTW89_IC][46] = 80, - [0][0][2][0][RTW89_ACMA][46] = 78, - [0][0][2][0][RTW89_FCC][48] = 80, + [0][0][2][0][RTW89_IC][46] = 70, + [0][0][2][0][RTW89_ACMA][46] = 70, + [0][0][2][0][RTW89_FCC][48] = 70, [0][0][2][0][RTW89_ETSI][48] = 127, [0][0][2][0][RTW89_MKK][48] = 127, [0][0][2][0][RTW89_IC][48] = 127, [0][0][2][0][RTW89_ACMA][48] = 127, - [0][0][2][0][RTW89_FCC][50] = 80, + [0][0][2][0][RTW89_FCC][50] = 70, [0][0][2][0][RTW89_ETSI][50] = 127, [0][0][2][0][RTW89_MKK][50] = 127, [0][0][2][0][RTW89_IC][50] = 127, [0][0][2][0][RTW89_ACMA][50] = 127, - [0][0][2][0][RTW89_FCC][52] = 80, + [0][0][2][0][RTW89_FCC][52] = 70, [0][0][2][0][RTW89_ETSI][52] = 127, [0][0][2][0][RTW89_MKK][52] = 127, [0][0][2][0][RTW89_IC][52] = 127, [0][0][2][0][RTW89_ACMA][52] = 127, - [0][1][2][0][RTW89_FCC][0] = 72, - [0][1][2][0][RTW89_ETSI][0] = 50, - [0][1][2][0][RTW89_MKK][0] = 52, + [0][1][2][0][RTW89_FCC][0] = 62, + [0][1][2][0][RTW89_ETSI][0] = 54, + [0][1][2][0][RTW89_MKK][0] = 54, [0][1][2][0][RTW89_IC][0] = 44, [0][1][2][0][RTW89_ACMA][0] = 50, - [0][1][2][0][RTW89_FCC][2] = 72, - [0][1][2][0][RTW89_ETSI][2] = 50, - [0][1][2][0][RTW89_MKK][2] = 52, + [0][1][2][0][RTW89_FCC][2] = 62, + [0][1][2][0][RTW89_ETSI][2] = 54, + [0][1][2][0][RTW89_MKK][2] = 54, [0][1][2][0][RTW89_IC][2] = 44, [0][1][2][0][RTW89_ACMA][2] = 50, - [0][1][2][0][RTW89_FCC][4] = 72, - [0][1][2][0][RTW89_ETSI][4] = 50, - [0][1][2][0][RTW89_MKK][4] = 52, + [0][1][2][0][RTW89_FCC][4] = 62, + [0][1][2][0][RTW89_ETSI][4] = 54, + [0][1][2][0][RTW89_MKK][4] = 54, [0][1][2][0][RTW89_IC][4] = 44, [0][1][2][0][RTW89_ACMA][4] = 50, - [0][1][2][0][RTW89_FCC][6] = 72, - [0][1][2][0][RTW89_ETSI][6] = 50, - [0][1][2][0][RTW89_MKK][6] = 52, + [0][1][2][0][RTW89_FCC][6] = 62, + [0][1][2][0][RTW89_ETSI][6] = 54, + [0][1][2][0][RTW89_MKK][6] = 50, [0][1][2][0][RTW89_IC][6] = 44, [0][1][2][0][RTW89_ACMA][6] = 50, - [0][1][2][0][RTW89_FCC][8] = 72, - [0][1][2][0][RTW89_ETSI][8] = 50, - [0][1][2][0][RTW89_MKK][8] = 52, + [0][1][2][0][RTW89_FCC][8] = 62, + [0][1][2][0][RTW89_ETSI][8] = 54, + [0][1][2][0][RTW89_MKK][8] = 42, [0][1][2][0][RTW89_IC][8] = 54, [0][1][2][0][RTW89_ACMA][8] = 50, - [0][1][2][0][RTW89_FCC][10] = 72, - [0][1][2][0][RTW89_ETSI][10] = 50, - [0][1][2][0][RTW89_MKK][10] = 52, + [0][1][2][0][RTW89_FCC][10] = 62, + [0][1][2][0][RTW89_ETSI][10] = 54, + [0][1][2][0][RTW89_MKK][10] = 54, [0][1][2][0][RTW89_IC][10] = 54, [0][1][2][0][RTW89_ACMA][10] = 50, - [0][1][2][0][RTW89_FCC][12] = 72, - [0][1][2][0][RTW89_ETSI][12] = 50, - [0][1][2][0][RTW89_MKK][12] = 52, + [0][1][2][0][RTW89_FCC][12] = 62, + [0][1][2][0][RTW89_ETSI][12] = 54, + [0][1][2][0][RTW89_MKK][12] = 54, [0][1][2][0][RTW89_IC][12] = 54, [0][1][2][0][RTW89_ACMA][12] = 50, - [0][1][2][0][RTW89_FCC][14] = 72, - [0][1][2][0][RTW89_ETSI][14] = 50, - [0][1][2][0][RTW89_MKK][14] = 52, + [0][1][2][0][RTW89_FCC][14] = 62, + [0][1][2][0][RTW89_ETSI][14] = 54, + [0][1][2][0][RTW89_MKK][14] = 54, [0][1][2][0][RTW89_IC][14] = 54, [0][1][2][0][RTW89_ACMA][14] = 50, - [0][1][2][0][RTW89_FCC][15] = 70, - [0][1][2][0][RTW89_ETSI][15] = 50, - [0][1][2][0][RTW89_MKK][15] = 72, + [0][1][2][0][RTW89_FCC][15] = 60, + [0][1][2][0][RTW89_ETSI][15] = 54, + [0][1][2][0][RTW89_MKK][15] = 68, [0][1][2][0][RTW89_IC][15] = 70, [0][1][2][0][RTW89_ACMA][15] = 50, - [0][1][2][0][RTW89_FCC][17] = 72, - [0][1][2][0][RTW89_ETSI][17] = 50, - [0][1][2][0][RTW89_MKK][17] = 72, - [0][1][2][0][RTW89_IC][17] = 72, + [0][1][2][0][RTW89_FCC][17] = 62, + [0][1][2][0][RTW89_ETSI][17] = 54, + [0][1][2][0][RTW89_MKK][17] = 68, + [0][1][2][0][RTW89_IC][17] = 70, [0][1][2][0][RTW89_ACMA][17] = 50, - [0][1][2][0][RTW89_FCC][19] = 72, - [0][1][2][0][RTW89_ETSI][19] = 50, - [0][1][2][0][RTW89_MKK][19] = 72, - [0][1][2][0][RTW89_IC][19] = 72, + [0][1][2][0][RTW89_FCC][19] = 62, + [0][1][2][0][RTW89_ETSI][19] = 54, + [0][1][2][0][RTW89_MKK][19] = 68, + [0][1][2][0][RTW89_IC][19] = 70, [0][1][2][0][RTW89_ACMA][19] = 50, - [0][1][2][0][RTW89_FCC][21] = 72, - [0][1][2][0][RTW89_ETSI][21] = 50, - [0][1][2][0][RTW89_MKK][21] = 72, - [0][1][2][0][RTW89_IC][21] = 72, + [0][1][2][0][RTW89_FCC][21] = 62, + [0][1][2][0][RTW89_ETSI][21] = 54, + [0][1][2][0][RTW89_MKK][21] = 68, + [0][1][2][0][RTW89_IC][21] = 70, [0][1][2][0][RTW89_ACMA][21] = 50, - [0][1][2][0][RTW89_FCC][23] = 72, - [0][1][2][0][RTW89_ETSI][23] = 50, - [0][1][2][0][RTW89_MKK][23] = 72, - [0][1][2][0][RTW89_IC][23] = 72, + [0][1][2][0][RTW89_FCC][23] = 62, + [0][1][2][0][RTW89_ETSI][23] = 54, + [0][1][2][0][RTW89_MKK][23] = 68, + [0][1][2][0][RTW89_IC][23] = 70, [0][1][2][0][RTW89_ACMA][23] = 50, - [0][1][2][0][RTW89_FCC][25] = 72, - [0][1][2][0][RTW89_ETSI][25] = 50, - [0][1][2][0][RTW89_MKK][25] = 72, + [0][1][2][0][RTW89_FCC][25] = 62, + [0][1][2][0][RTW89_ETSI][25] = 54, + [0][1][2][0][RTW89_MKK][25] = 68, [0][1][2][0][RTW89_IC][25] = 127, [0][1][2][0][RTW89_ACMA][25] = 127, - [0][1][2][0][RTW89_FCC][27] = 72, - [0][1][2][0][RTW89_ETSI][27] = 50, - [0][1][2][0][RTW89_MKK][27] = 72, + [0][1][2][0][RTW89_FCC][27] = 62, + [0][1][2][0][RTW89_ETSI][27] = 54, + [0][1][2][0][RTW89_MKK][27] = 68, [0][1][2][0][RTW89_IC][27] = 127, [0][1][2][0][RTW89_ACMA][27] = 127, - [0][1][2][0][RTW89_FCC][29] = 72, - [0][1][2][0][RTW89_ETSI][29] = 50, - [0][1][2][0][RTW89_MKK][29] = 72, + [0][1][2][0][RTW89_FCC][29] = 62, + [0][1][2][0][RTW89_ETSI][29] = 54, + [0][1][2][0][RTW89_MKK][29] = 68, [0][1][2][0][RTW89_IC][29] = 127, [0][1][2][0][RTW89_ACMA][29] = 127, - [0][1][2][0][RTW89_FCC][31] = 72, - [0][1][2][0][RTW89_ETSI][31] = 50, - [0][1][2][0][RTW89_MKK][31] = 72, - [0][1][2][0][RTW89_IC][31] = 72, + [0][1][2][0][RTW89_FCC][31] = 62, + [0][1][2][0][RTW89_ETSI][31] = 54, + [0][1][2][0][RTW89_MKK][31] = 68, + [0][1][2][0][RTW89_IC][31] = 70, [0][1][2][0][RTW89_ACMA][31] = 50, - [0][1][2][0][RTW89_FCC][33] = 72, - [0][1][2][0][RTW89_ETSI][33] = 50, - [0][1][2][0][RTW89_MKK][33] = 72, - [0][1][2][0][RTW89_IC][33] = 72, + [0][1][2][0][RTW89_FCC][33] = 62, + [0][1][2][0][RTW89_ETSI][33] = 54, + [0][1][2][0][RTW89_MKK][33] = 68, + [0][1][2][0][RTW89_IC][33] = 70, [0][1][2][0][RTW89_ACMA][33] = 50, - [0][1][2][0][RTW89_FCC][35] = 68, - [0][1][2][0][RTW89_ETSI][35] = 50, - [0][1][2][0][RTW89_MKK][35] = 72, + [0][1][2][0][RTW89_FCC][35] = 58, + [0][1][2][0][RTW89_ETSI][35] = 54, + [0][1][2][0][RTW89_MKK][35] = 68, [0][1][2][0][RTW89_IC][35] = 68, [0][1][2][0][RTW89_ACMA][35] = 50, - [0][1][2][0][RTW89_FCC][37] = 72, + [0][1][2][0][RTW89_FCC][37] = 62, [0][1][2][0][RTW89_ETSI][37] = 127, - [0][1][2][0][RTW89_MKK][37] = 72, - [0][1][2][0][RTW89_IC][37] = 72, - [0][1][2][0][RTW89_ACMA][37] = 72, - [0][1][2][0][RTW89_FCC][38] = 80, + [0][1][2][0][RTW89_MKK][37] = 68, + [0][1][2][0][RTW89_IC][37] = 70, + [0][1][2][0][RTW89_ACMA][37] = 70, + [0][1][2][0][RTW89_FCC][38] = 70, [0][1][2][0][RTW89_ETSI][38] = 18, [0][1][2][0][RTW89_MKK][38] = 127, - [0][1][2][0][RTW89_IC][38] = 80, - [0][1][2][0][RTW89_ACMA][38] = 76, - [0][1][2][0][RTW89_FCC][40] = 80, + [0][1][2][0][RTW89_IC][38] = 70, + [0][1][2][0][RTW89_ACMA][38] = 70, + [0][1][2][0][RTW89_FCC][40] = 70, [0][1][2][0][RTW89_ETSI][40] = 18, [0][1][2][0][RTW89_MKK][40] = 127, - [0][1][2][0][RTW89_IC][40] = 80, - [0][1][2][0][RTW89_ACMA][40] = 76, - [0][1][2][0][RTW89_FCC][42] = 80, + [0][1][2][0][RTW89_IC][40] = 70, + [0][1][2][0][RTW89_ACMA][40] = 70, + [0][1][2][0][RTW89_FCC][42] = 70, [0][1][2][0][RTW89_ETSI][42] = 18, [0][1][2][0][RTW89_MKK][42] = 127, - [0][1][2][0][RTW89_IC][42] = 80, - [0][1][2][0][RTW89_ACMA][42] = 78, - [0][1][2][0][RTW89_FCC][44] = 80, + [0][1][2][0][RTW89_IC][42] = 70, + [0][1][2][0][RTW89_ACMA][42] = 70, + [0][1][2][0][RTW89_FCC][44] = 70, [0][1][2][0][RTW89_ETSI][44] = 18, [0][1][2][0][RTW89_MKK][44] = 127, - [0][1][2][0][RTW89_IC][44] = 80, - [0][1][2][0][RTW89_ACMA][44] = 78, - [0][1][2][0][RTW89_FCC][46] = 80, + [0][1][2][0][RTW89_IC][44] = 70, + [0][1][2][0][RTW89_ACMA][44] = 70, + [0][1][2][0][RTW89_FCC][46] = 70, [0][1][2][0][RTW89_ETSI][46] = 18, [0][1][2][0][RTW89_MKK][46] = 127, - [0][1][2][0][RTW89_IC][46] = 80, - [0][1][2][0][RTW89_ACMA][46] = 78, - [0][1][2][0][RTW89_FCC][48] = 60, + [0][1][2][0][RTW89_IC][46] = 70, + [0][1][2][0][RTW89_ACMA][46] = 70, + [0][1][2][0][RTW89_FCC][48] = 50, [0][1][2][0][RTW89_ETSI][48] = 127, [0][1][2][0][RTW89_MKK][48] = 127, [0][1][2][0][RTW89_IC][48] = 127, [0][1][2][0][RTW89_ACMA][48] = 127, - [0][1][2][0][RTW89_FCC][50] = 60, + [0][1][2][0][RTW89_FCC][50] = 50, [0][1][2][0][RTW89_ETSI][50] = 127, [0][1][2][0][RTW89_MKK][50] = 127, [0][1][2][0][RTW89_IC][50] = 127, [0][1][2][0][RTW89_ACMA][50] = 127, - [0][1][2][0][RTW89_FCC][52] = 60, + [0][1][2][0][RTW89_FCC][52] = 50, [0][1][2][0][RTW89_ETSI][52] = 127, [0][1][2][0][RTW89_MKK][52] = 127, [0][1][2][0][RTW89_IC][52] = 127, [0][1][2][0][RTW89_ACMA][52] = 127, - [0][1][2][1][RTW89_FCC][0] = 70, - [0][1][2][1][RTW89_ETSI][0] = 42, - [0][1][2][1][RTW89_MKK][0] = 52, + [0][1][2][1][RTW89_FCC][0] = 60, + [0][1][2][1][RTW89_ETSI][0] = 40, + [0][1][2][1][RTW89_MKK][0] = 54, [0][1][2][1][RTW89_IC][0] = 42, [0][1][2][1][RTW89_ACMA][0] = 38, - [0][1][2][1][RTW89_FCC][2] = 70, - [0][1][2][1][RTW89_ETSI][2] = 42, - [0][1][2][1][RTW89_MKK][2] = 52, + [0][1][2][1][RTW89_FCC][2] = 60, + [0][1][2][1][RTW89_ETSI][2] = 40, + [0][1][2][1][RTW89_MKK][2] = 54, [0][1][2][1][RTW89_IC][2] = 42, [0][1][2][1][RTW89_ACMA][2] = 38, - [0][1][2][1][RTW89_FCC][4] = 70, - [0][1][2][1][RTW89_ETSI][4] = 42, - [0][1][2][1][RTW89_MKK][4] = 52, + [0][1][2][1][RTW89_FCC][4] = 60, + [0][1][2][1][RTW89_ETSI][4] = 40, + [0][1][2][1][RTW89_MKK][4] = 54, [0][1][2][1][RTW89_IC][4] = 42, [0][1][2][1][RTW89_ACMA][4] = 38, - [0][1][2][1][RTW89_FCC][6] = 70, - [0][1][2][1][RTW89_ETSI][6] = 42, - [0][1][2][1][RTW89_MKK][6] = 52, + [0][1][2][1][RTW89_FCC][6] = 60, + [0][1][2][1][RTW89_ETSI][6] = 40, + [0][1][2][1][RTW89_MKK][6] = 50, [0][1][2][1][RTW89_IC][6] = 42, [0][1][2][1][RTW89_ACMA][6] = 38, - [0][1][2][1][RTW89_FCC][8] = 70, - [0][1][2][1][RTW89_ETSI][8] = 42, - [0][1][2][1][RTW89_MKK][8] = 52, + [0][1][2][1][RTW89_FCC][8] = 60, + [0][1][2][1][RTW89_ETSI][8] = 40, + [0][1][2][1][RTW89_MKK][8] = 42, [0][1][2][1][RTW89_IC][8] = 42, [0][1][2][1][RTW89_ACMA][8] = 38, - [0][1][2][1][RTW89_FCC][10] = 70, - [0][1][2][1][RTW89_ETSI][10] = 42, - [0][1][2][1][RTW89_MKK][10] = 52, + [0][1][2][1][RTW89_FCC][10] = 60, + [0][1][2][1][RTW89_ETSI][10] = 40, + [0][1][2][1][RTW89_MKK][10] = 66, [0][1][2][1][RTW89_IC][10] = 42, [0][1][2][1][RTW89_ACMA][10] = 38, - [0][1][2][1][RTW89_FCC][12] = 70, - [0][1][2][1][RTW89_ETSI][12] = 42, - [0][1][2][1][RTW89_MKK][12] = 52, + [0][1][2][1][RTW89_FCC][12] = 60, + [0][1][2][1][RTW89_ETSI][12] = 40, + [0][1][2][1][RTW89_MKK][12] = 66, [0][1][2][1][RTW89_IC][12] = 42, [0][1][2][1][RTW89_ACMA][12] = 38, - [0][1][2][1][RTW89_FCC][14] = 70, - [0][1][2][1][RTW89_ETSI][14] = 42, - [0][1][2][1][RTW89_MKK][14] = 52, + [0][1][2][1][RTW89_FCC][14] = 60, + [0][1][2][1][RTW89_ETSI][14] = 40, + [0][1][2][1][RTW89_MKK][14] = 66, [0][1][2][1][RTW89_IC][14] = 42, [0][1][2][1][RTW89_ACMA][14] = 38, - [0][1][2][1][RTW89_FCC][15] = 70, - [0][1][2][1][RTW89_ETSI][15] = 42, - [0][1][2][1][RTW89_MKK][15] = 72, + [0][1][2][1][RTW89_FCC][15] = 60, + [0][1][2][1][RTW89_ETSI][15] = 40, + [0][1][2][1][RTW89_MKK][15] = 68, [0][1][2][1][RTW89_IC][15] = 70, [0][1][2][1][RTW89_ACMA][15] = 38, - [0][1][2][1][RTW89_FCC][17] = 70, - [0][1][2][1][RTW89_ETSI][17] = 42, - [0][1][2][1][RTW89_MKK][17] = 72, + [0][1][2][1][RTW89_FCC][17] = 60, + [0][1][2][1][RTW89_ETSI][17] = 40, + [0][1][2][1][RTW89_MKK][17] = 68, [0][1][2][1][RTW89_IC][17] = 70, [0][1][2][1][RTW89_ACMA][17] = 38, - [0][1][2][1][RTW89_FCC][19] = 70, - [0][1][2][1][RTW89_ETSI][19] = 42, - [0][1][2][1][RTW89_MKK][19] = 72, + [0][1][2][1][RTW89_FCC][19] = 60, + [0][1][2][1][RTW89_ETSI][19] = 40, + [0][1][2][1][RTW89_MKK][19] = 68, [0][1][2][1][RTW89_IC][19] = 70, [0][1][2][1][RTW89_ACMA][19] = 38, - [0][1][2][1][RTW89_FCC][21] = 70, - [0][1][2][1][RTW89_ETSI][21] = 42, - [0][1][2][1][RTW89_MKK][21] = 72, + [0][1][2][1][RTW89_FCC][21] = 60, + [0][1][2][1][RTW89_ETSI][21] = 40, + [0][1][2][1][RTW89_MKK][21] = 68, [0][1][2][1][RTW89_IC][21] = 70, [0][1][2][1][RTW89_ACMA][21] = 38, - [0][1][2][1][RTW89_FCC][23] = 70, - [0][1][2][1][RTW89_ETSI][23] = 42, - [0][1][2][1][RTW89_MKK][23] = 72, + [0][1][2][1][RTW89_FCC][23] = 60, + [0][1][2][1][RTW89_ETSI][23] = 40, + [0][1][2][1][RTW89_MKK][23] = 68, [0][1][2][1][RTW89_IC][23] = 70, [0][1][2][1][RTW89_ACMA][23] = 38, - [0][1][2][1][RTW89_FCC][25] = 68, - [0][1][2][1][RTW89_ETSI][25] = 42, - [0][1][2][1][RTW89_MKK][25] = 72, + [0][1][2][1][RTW89_FCC][25] = 58, + [0][1][2][1][RTW89_ETSI][25] = 40, + [0][1][2][1][RTW89_MKK][25] = 68, [0][1][2][1][RTW89_IC][25] = 127, [0][1][2][1][RTW89_ACMA][25] = 127, - [0][1][2][1][RTW89_FCC][27] = 68, - [0][1][2][1][RTW89_ETSI][27] = 42, - [0][1][2][1][RTW89_MKK][27] = 72, + [0][1][2][1][RTW89_FCC][27] = 58, + [0][1][2][1][RTW89_ETSI][27] = 40, + [0][1][2][1][RTW89_MKK][27] = 68, [0][1][2][1][RTW89_IC][27] = 127, [0][1][2][1][RTW89_ACMA][27] = 127, - [0][1][2][1][RTW89_FCC][29] = 68, - [0][1][2][1][RTW89_ETSI][29] = 42, - [0][1][2][1][RTW89_MKK][29] = 72, + [0][1][2][1][RTW89_FCC][29] = 58, + [0][1][2][1][RTW89_ETSI][29] = 40, + [0][1][2][1][RTW89_MKK][29] = 68, [0][1][2][1][RTW89_IC][29] = 127, [0][1][2][1][RTW89_ACMA][29] = 127, - [0][1][2][1][RTW89_FCC][31] = 68, - [0][1][2][1][RTW89_ETSI][31] = 42, - [0][1][2][1][RTW89_MKK][31] = 72, + [0][1][2][1][RTW89_FCC][31] = 58, + [0][1][2][1][RTW89_ETSI][31] = 40, + [0][1][2][1][RTW89_MKK][31] = 68, [0][1][2][1][RTW89_IC][31] = 68, [0][1][2][1][RTW89_ACMA][31] = 38, - [0][1][2][1][RTW89_FCC][33] = 68, - [0][1][2][1][RTW89_ETSI][33] = 42, - [0][1][2][1][RTW89_MKK][33] = 72, + [0][1][2][1][RTW89_FCC][33] = 58, + [0][1][2][1][RTW89_ETSI][33] = 40, + [0][1][2][1][RTW89_MKK][33] = 68, [0][1][2][1][RTW89_IC][33] = 68, [0][1][2][1][RTW89_ACMA][33] = 38, - [0][1][2][1][RTW89_FCC][35] = 68, - [0][1][2][1][RTW89_ETSI][35] = 42, - [0][1][2][1][RTW89_MKK][35] = 72, + [0][1][2][1][RTW89_FCC][35] = 58, + [0][1][2][1][RTW89_ETSI][35] = 40, + [0][1][2][1][RTW89_MKK][35] = 68, [0][1][2][1][RTW89_IC][35] = 68, [0][1][2][1][RTW89_ACMA][35] = 38, - [0][1][2][1][RTW89_FCC][37] = 70, + [0][1][2][1][RTW89_FCC][37] = 60, [0][1][2][1][RTW89_ETSI][37] = 127, - [0][1][2][1][RTW89_MKK][37] = 72, + [0][1][2][1][RTW89_MKK][37] = 68, [0][1][2][1][RTW89_IC][37] = 70, - [0][1][2][1][RTW89_ACMA][37] = 72, - [0][1][2][1][RTW89_FCC][38] = 80, - [0][1][2][1][RTW89_ETSI][38] = 8, + [0][1][2][1][RTW89_ACMA][37] = 70, + [0][1][2][1][RTW89_FCC][38] = 70, + [0][1][2][1][RTW89_ETSI][38] = 6, [0][1][2][1][RTW89_MKK][38] = 127, - [0][1][2][1][RTW89_IC][38] = 80, - [0][1][2][1][RTW89_ACMA][38] = 76, - [0][1][2][1][RTW89_FCC][40] = 80, - [0][1][2][1][RTW89_ETSI][40] = 8, + [0][1][2][1][RTW89_IC][38] = 70, + [0][1][2][1][RTW89_ACMA][38] = 70, + [0][1][2][1][RTW89_FCC][40] = 70, + [0][1][2][1][RTW89_ETSI][40] = 6, [0][1][2][1][RTW89_MKK][40] = 127, - [0][1][2][1][RTW89_IC][40] = 80, - [0][1][2][1][RTW89_ACMA][40] = 76, - [0][1][2][1][RTW89_FCC][42] = 80, - [0][1][2][1][RTW89_ETSI][42] = 8, + [0][1][2][1][RTW89_IC][40] = 70, + [0][1][2][1][RTW89_ACMA][40] = 70, + [0][1][2][1][RTW89_FCC][42] = 70, + [0][1][2][1][RTW89_ETSI][42] = 6, [0][1][2][1][RTW89_MKK][42] = 127, - [0][1][2][1][RTW89_IC][42] = 80, - [0][1][2][1][RTW89_ACMA][42] = 78, - [0][1][2][1][RTW89_FCC][44] = 80, - [0][1][2][1][RTW89_ETSI][44] = 8, + [0][1][2][1][RTW89_IC][42] = 70, + [0][1][2][1][RTW89_ACMA][42] = 70, + [0][1][2][1][RTW89_FCC][44] = 70, + [0][1][2][1][RTW89_ETSI][44] = 6, [0][1][2][1][RTW89_MKK][44] = 127, - [0][1][2][1][RTW89_IC][44] = 80, - [0][1][2][1][RTW89_ACMA][44] = 78, - [0][1][2][1][RTW89_FCC][46] = 80, - [0][1][2][1][RTW89_ETSI][46] = 8, + [0][1][2][1][RTW89_IC][44] = 70, + [0][1][2][1][RTW89_ACMA][44] = 70, + [0][1][2][1][RTW89_FCC][46] = 70, + [0][1][2][1][RTW89_ETSI][46] = 6, [0][1][2][1][RTW89_MKK][46] = 127, - [0][1][2][1][RTW89_IC][46] = 80, - [0][1][2][1][RTW89_ACMA][46] = 78, - [0][1][2][1][RTW89_FCC][48] = 60, + [0][1][2][1][RTW89_IC][46] = 70, + [0][1][2][1][RTW89_ACMA][46] = 70, + [0][1][2][1][RTW89_FCC][48] = 50, [0][1][2][1][RTW89_ETSI][48] = 127, [0][1][2][1][RTW89_MKK][48] = 127, [0][1][2][1][RTW89_IC][48] = 127, [0][1][2][1][RTW89_ACMA][48] = 127, - [0][1][2][1][RTW89_FCC][50] = 60, + [0][1][2][1][RTW89_FCC][50] = 50, [0][1][2][1][RTW89_ETSI][50] = 127, [0][1][2][1][RTW89_MKK][50] = 127, [0][1][2][1][RTW89_IC][50] = 127, [0][1][2][1][RTW89_ACMA][50] = 127, - [0][1][2][1][RTW89_FCC][52] = 60, + [0][1][2][1][RTW89_FCC][52] = 50, [0][1][2][1][RTW89_ETSI][52] = 127, [0][1][2][1][RTW89_MKK][52] = 127, [0][1][2][1][RTW89_IC][52] = 127, [0][1][2][1][RTW89_ACMA][52] = 127, - [1][0][2][0][RTW89_FCC][1] = 68, + [1][0][2][0][RTW89_FCC][1] = 58, [1][0][2][0][RTW89_ETSI][1] = 66, - [1][0][2][0][RTW89_MKK][1] = 72, - [1][0][2][0][RTW89_IC][1] = 72, - [1][0][2][0][RTW89_ACMA][1] = 72, - [1][0][2][0][RTW89_FCC][5] = 80, + [1][0][2][0][RTW89_MKK][1] = 66, + [1][0][2][0][RTW89_IC][1] = 66, + [1][0][2][0][RTW89_ACMA][1] = 66, + [1][0][2][0][RTW89_FCC][5] = 68, [1][0][2][0][RTW89_ETSI][5] = 66, - [1][0][2][0][RTW89_MKK][5] = 72, - [1][0][2][0][RTW89_IC][5] = 72, - [1][0][2][0][RTW89_ACMA][5] = 72, - [1][0][2][0][RTW89_FCC][9] = 80, + [1][0][2][0][RTW89_MKK][5] = 66, + [1][0][2][0][RTW89_IC][5] = 66, + [1][0][2][0][RTW89_ACMA][5] = 66, + [1][0][2][0][RTW89_FCC][9] = 68, [1][0][2][0][RTW89_ETSI][9] = 66, - [1][0][2][0][RTW89_MKK][9] = 72, - [1][0][2][0][RTW89_IC][9] = 72, - [1][0][2][0][RTW89_ACMA][9] = 72, - [1][0][2][0][RTW89_FCC][13] = 68, + [1][0][2][0][RTW89_MKK][9] = 66, + [1][0][2][0][RTW89_IC][9] = 66, + [1][0][2][0][RTW89_ACMA][9] = 66, + [1][0][2][0][RTW89_FCC][13] = 58, [1][0][2][0][RTW89_ETSI][13] = 66, - [1][0][2][0][RTW89_MKK][13] = 72, - [1][0][2][0][RTW89_IC][13] = 72, - [1][0][2][0][RTW89_ACMA][13] = 72, - [1][0][2][0][RTW89_FCC][16] = 66, + [1][0][2][0][RTW89_MKK][13] = 66, + [1][0][2][0][RTW89_IC][13] = 66, + [1][0][2][0][RTW89_ACMA][13] = 66, + [1][0][2][0][RTW89_FCC][16] = 56, [1][0][2][0][RTW89_ETSI][16] = 66, - [1][0][2][0][RTW89_MKK][16] = 76, - [1][0][2][0][RTW89_IC][16] = 72, - [1][0][2][0][RTW89_ACMA][16] = 72, - [1][0][2][0][RTW89_FCC][20] = 80, + [1][0][2][0][RTW89_MKK][16] = 66, + [1][0][2][0][RTW89_IC][16] = 66, + [1][0][2][0][RTW89_ACMA][16] = 66, + [1][0][2][0][RTW89_FCC][20] = 68, [1][0][2][0][RTW89_ETSI][20] = 66, - [1][0][2][0][RTW89_MKK][20] = 76, - [1][0][2][0][RTW89_IC][20] = 80, - [1][0][2][0][RTW89_ACMA][20] = 72, - [1][0][2][0][RTW89_FCC][24] = 80, + [1][0][2][0][RTW89_MKK][20] = 66, + [1][0][2][0][RTW89_IC][20] = 66, + [1][0][2][0][RTW89_ACMA][20] = 66, + [1][0][2][0][RTW89_FCC][24] = 68, [1][0][2][0][RTW89_ETSI][24] = 66, - [1][0][2][0][RTW89_MKK][24] = 76, + [1][0][2][0][RTW89_MKK][24] = 66, [1][0][2][0][RTW89_IC][24] = 127, [1][0][2][0][RTW89_ACMA][24] = 127, - [1][0][2][0][RTW89_FCC][28] = 80, + [1][0][2][0][RTW89_FCC][28] = 68, [1][0][2][0][RTW89_ETSI][28] = 66, - [1][0][2][0][RTW89_MKK][28] = 76, + [1][0][2][0][RTW89_MKK][28] = 66, [1][0][2][0][RTW89_IC][28] = 127, [1][0][2][0][RTW89_ACMA][28] = 127, - [1][0][2][0][RTW89_FCC][32] = 78, + [1][0][2][0][RTW89_FCC][32] = 68, [1][0][2][0][RTW89_ETSI][32] = 66, - [1][0][2][0][RTW89_MKK][32] = 76, - [1][0][2][0][RTW89_IC][32] = 78, + [1][0][2][0][RTW89_MKK][32] = 66, + [1][0][2][0][RTW89_IC][32] = 66, [1][0][2][0][RTW89_ACMA][32] = 66, - [1][0][2][0][RTW89_FCC][36] = 80, + [1][0][2][0][RTW89_FCC][36] = 68, [1][0][2][0][RTW89_ETSI][36] = 127, - [1][0][2][0][RTW89_MKK][36] = 76, - [1][0][2][0][RTW89_IC][36] = 80, - [1][0][2][0][RTW89_ACMA][36] = 76, - [1][0][2][0][RTW89_FCC][39] = 80, + [1][0][2][0][RTW89_MKK][36] = 66, + [1][0][2][0][RTW89_IC][36] = 66, + [1][0][2][0][RTW89_ACMA][36] = 66, + [1][0][2][0][RTW89_FCC][39] = 68, [1][0][2][0][RTW89_ETSI][39] = 30, [1][0][2][0][RTW89_MKK][39] = 127, - [1][0][2][0][RTW89_IC][39] = 80, - [1][0][2][0][RTW89_ACMA][39] = 76, - [1][0][2][0][RTW89_FCC][43] = 80, + [1][0][2][0][RTW89_IC][39] = 66, + [1][0][2][0][RTW89_ACMA][39] = 66, + [1][0][2][0][RTW89_FCC][43] = 68, [1][0][2][0][RTW89_ETSI][43] = 30, [1][0][2][0][RTW89_MKK][43] = 127, - [1][0][2][0][RTW89_IC][43] = 80, - [1][0][2][0][RTW89_ACMA][43] = 76, - [1][0][2][0][RTW89_FCC][47] = 80, + [1][0][2][0][RTW89_IC][43] = 66, + [1][0][2][0][RTW89_ACMA][43] = 66, + [1][0][2][0][RTW89_FCC][47] = 68, [1][0][2][0][RTW89_ETSI][47] = 127, [1][0][2][0][RTW89_MKK][47] = 127, [1][0][2][0][RTW89_IC][47] = 127, [1][0][2][0][RTW89_ACMA][47] = 127, - [1][0][2][0][RTW89_FCC][51] = 72, + [1][0][2][0][RTW89_FCC][51] = 68, [1][0][2][0][RTW89_ETSI][51] = 127, [1][0][2][0][RTW89_MKK][51] = 127, [1][0][2][0][RTW89_IC][51] = 127, [1][0][2][0][RTW89_ACMA][51] = 127, - [1][1][2][0][RTW89_FCC][1] = 64, + [1][1][2][0][RTW89_FCC][1] = 54, [1][1][2][0][RTW89_ETSI][1] = 54, - [1][1][2][0][RTW89_MKK][1] = 60, + [1][1][2][0][RTW89_MKK][1] = 48, [1][1][2][0][RTW89_IC][1] = 60, [1][1][2][0][RTW89_ACMA][1] = 60, - [1][1][2][0][RTW89_FCC][5] = 78, + [1][1][2][0][RTW89_FCC][5] = 68, [1][1][2][0][RTW89_ETSI][5] = 54, - [1][1][2][0][RTW89_MKK][5] = 60, + [1][1][2][0][RTW89_MKK][5] = 52, [1][1][2][0][RTW89_IC][5] = 60, [1][1][2][0][RTW89_ACMA][5] = 60, - [1][1][2][0][RTW89_FCC][9] = 78, + [1][1][2][0][RTW89_FCC][9] = 68, [1][1][2][0][RTW89_ETSI][9] = 54, - [1][1][2][0][RTW89_MKK][9] = 60, + [1][1][2][0][RTW89_MKK][9] = 52, [1][1][2][0][RTW89_IC][9] = 60, [1][1][2][0][RTW89_ACMA][9] = 60, - [1][1][2][0][RTW89_FCC][13] = 64, + [1][1][2][0][RTW89_FCC][13] = 54, [1][1][2][0][RTW89_ETSI][13] = 54, - [1][1][2][0][RTW89_MKK][13] = 60, + [1][1][2][0][RTW89_MKK][13] = 52, [1][1][2][0][RTW89_IC][13] = 60, [1][1][2][0][RTW89_ACMA][13] = 60, - [1][1][2][0][RTW89_FCC][16] = 58, + [1][1][2][0][RTW89_FCC][16] = 48, [1][1][2][0][RTW89_ETSI][16] = 54, - [1][1][2][0][RTW89_MKK][16] = 72, + [1][1][2][0][RTW89_MKK][16] = 66, [1][1][2][0][RTW89_IC][16] = 58, [1][1][2][0][RTW89_ACMA][16] = 60, - [1][1][2][0][RTW89_FCC][20] = 78, + [1][1][2][0][RTW89_FCC][20] = 68, [1][1][2][0][RTW89_ETSI][20] = 54, - [1][1][2][0][RTW89_MKK][20] = 72, - [1][1][2][0][RTW89_IC][20] = 78, + [1][1][2][0][RTW89_MKK][20] = 66, + [1][1][2][0][RTW89_IC][20] = 66, [1][1][2][0][RTW89_ACMA][20] = 60, - [1][1][2][0][RTW89_FCC][24] = 78, + [1][1][2][0][RTW89_FCC][24] = 68, [1][1][2][0][RTW89_ETSI][24] = 54, - [1][1][2][0][RTW89_MKK][24] = 72, + [1][1][2][0][RTW89_MKK][24] = 66, [1][1][2][0][RTW89_IC][24] = 127, [1][1][2][0][RTW89_ACMA][24] = 127, - [1][1][2][0][RTW89_FCC][28] = 78, + [1][1][2][0][RTW89_FCC][28] = 68, [1][1][2][0][RTW89_ETSI][28] = 54, - [1][1][2][0][RTW89_MKK][28] = 72, + [1][1][2][0][RTW89_MKK][28] = 66, [1][1][2][0][RTW89_IC][28] = 127, [1][1][2][0][RTW89_ACMA][28] = 127, - [1][1][2][0][RTW89_FCC][32] = 70, + [1][1][2][0][RTW89_FCC][32] = 60, [1][1][2][0][RTW89_ETSI][32] = 54, - [1][1][2][0][RTW89_MKK][32] = 72, - [1][1][2][0][RTW89_IC][32] = 70, + [1][1][2][0][RTW89_MKK][32] = 66, + [1][1][2][0][RTW89_IC][32] = 66, [1][1][2][0][RTW89_ACMA][32] = 54, - [1][1][2][0][RTW89_FCC][36] = 78, + [1][1][2][0][RTW89_FCC][36] = 68, [1][1][2][0][RTW89_ETSI][36] = 127, - [1][1][2][0][RTW89_MKK][36] = 72, - [1][1][2][0][RTW89_IC][36] = 78, - [1][1][2][0][RTW89_ACMA][36] = 76, - [1][1][2][0][RTW89_FCC][39] = 80, + [1][1][2][0][RTW89_MKK][36] = 66, + [1][1][2][0][RTW89_IC][36] = 66, + [1][1][2][0][RTW89_ACMA][36] = 66, + [1][1][2][0][RTW89_FCC][39] = 68, [1][1][2][0][RTW89_ETSI][39] = 18, [1][1][2][0][RTW89_MKK][39] = 127, - [1][1][2][0][RTW89_IC][39] = 80, - [1][1][2][0][RTW89_ACMA][39] = 74, - [1][1][2][0][RTW89_FCC][43] = 80, + [1][1][2][0][RTW89_IC][39] = 66, + [1][1][2][0][RTW89_ACMA][39] = 66, + [1][1][2][0][RTW89_FCC][43] = 68, [1][1][2][0][RTW89_ETSI][43] = 18, [1][1][2][0][RTW89_MKK][43] = 127, - [1][1][2][0][RTW89_IC][43] = 80, - [1][1][2][0][RTW89_ACMA][43] = 76, - [1][1][2][0][RTW89_FCC][47] = 70, + [1][1][2][0][RTW89_IC][43] = 66, + [1][1][2][0][RTW89_ACMA][43] = 66, + [1][1][2][0][RTW89_FCC][47] = 60, [1][1][2][0][RTW89_ETSI][47] = 127, [1][1][2][0][RTW89_MKK][47] = 127, [1][1][2][0][RTW89_IC][47] = 127, [1][1][2][0][RTW89_ACMA][47] = 127, - [1][1][2][0][RTW89_FCC][51] = 68, + [1][1][2][0][RTW89_FCC][51] = 58, [1][1][2][0][RTW89_ETSI][51] = 127, [1][1][2][0][RTW89_MKK][51] = 127, [1][1][2][0][RTW89_IC][51] = 127, [1][1][2][0][RTW89_ACMA][51] = 127, - [1][1][2][1][RTW89_FCC][1] = 64, - [1][1][2][1][RTW89_ETSI][1] = 42, - [1][1][2][1][RTW89_MKK][1] = 60, + [1][1][2][1][RTW89_FCC][1] = 54, + [1][1][2][1][RTW89_ETSI][1] = 40, + [1][1][2][1][RTW89_MKK][1] = 48, [1][1][2][1][RTW89_IC][1] = 48, [1][1][2][1][RTW89_ACMA][1] = 48, - [1][1][2][1][RTW89_FCC][5] = 70, - [1][1][2][1][RTW89_ETSI][5] = 42, - [1][1][2][1][RTW89_MKK][5] = 60, + [1][1][2][1][RTW89_FCC][5] = 60, + [1][1][2][1][RTW89_ETSI][5] = 40, + [1][1][2][1][RTW89_MKK][5] = 52, [1][1][2][1][RTW89_IC][5] = 48, [1][1][2][1][RTW89_ACMA][5] = 48, - [1][1][2][1][RTW89_FCC][9] = 70, - [1][1][2][1][RTW89_ETSI][9] = 42, - [1][1][2][1][RTW89_MKK][9] = 60, + [1][1][2][1][RTW89_FCC][9] = 60, + [1][1][2][1][RTW89_ETSI][9] = 40, + [1][1][2][1][RTW89_MKK][9] = 52, [1][1][2][1][RTW89_IC][9] = 48, [1][1][2][1][RTW89_ACMA][9] = 48, - [1][1][2][1][RTW89_FCC][13] = 64, - [1][1][2][1][RTW89_ETSI][13] = 42, - [1][1][2][1][RTW89_MKK][13] = 60, + [1][1][2][1][RTW89_FCC][13] = 54, + [1][1][2][1][RTW89_ETSI][13] = 40, + [1][1][2][1][RTW89_MKK][13] = 52, [1][1][2][1][RTW89_IC][13] = 48, [1][1][2][1][RTW89_ACMA][13] = 48, - [1][1][2][1][RTW89_FCC][16] = 58, - [1][1][2][1][RTW89_ETSI][16] = 42, - [1][1][2][1][RTW89_MKK][16] = 72, + [1][1][2][1][RTW89_FCC][16] = 48, + [1][1][2][1][RTW89_ETSI][16] = 40, + [1][1][2][1][RTW89_MKK][16] = 66, [1][1][2][1][RTW89_IC][16] = 58, [1][1][2][1][RTW89_ACMA][16] = 48, - [1][1][2][1][RTW89_FCC][20] = 70, - [1][1][2][1][RTW89_ETSI][20] = 42, - [1][1][2][1][RTW89_MKK][20] = 72, - [1][1][2][1][RTW89_IC][20] = 70, + [1][1][2][1][RTW89_FCC][20] = 60, + [1][1][2][1][RTW89_ETSI][20] = 40, + [1][1][2][1][RTW89_MKK][20] = 66, + [1][1][2][1][RTW89_IC][20] = 66, [1][1][2][1][RTW89_ACMA][20] = 48, - [1][1][2][1][RTW89_FCC][24] = 70, - [1][1][2][1][RTW89_ETSI][24] = 42, - [1][1][2][1][RTW89_MKK][24] = 72, + [1][1][2][1][RTW89_FCC][24] = 60, + [1][1][2][1][RTW89_ETSI][24] = 40, + [1][1][2][1][RTW89_MKK][24] = 66, [1][1][2][1][RTW89_IC][24] = 127, [1][1][2][1][RTW89_ACMA][24] = 127, - [1][1][2][1][RTW89_FCC][28] = 70, - [1][1][2][1][RTW89_ETSI][28] = 42, - [1][1][2][1][RTW89_MKK][28] = 72, + [1][1][2][1][RTW89_FCC][28] = 60, + [1][1][2][1][RTW89_ETSI][28] = 40, + [1][1][2][1][RTW89_MKK][28] = 66, [1][1][2][1][RTW89_IC][28] = 127, [1][1][2][1][RTW89_ACMA][28] = 127, - [1][1][2][1][RTW89_FCC][32] = 70, - [1][1][2][1][RTW89_ETSI][32] = 42, - [1][1][2][1][RTW89_MKK][32] = 72, - [1][1][2][1][RTW89_IC][32] = 70, + [1][1][2][1][RTW89_FCC][32] = 60, + [1][1][2][1][RTW89_ETSI][32] = 40, + [1][1][2][1][RTW89_MKK][32] = 66, + [1][1][2][1][RTW89_IC][32] = 66, [1][1][2][1][RTW89_ACMA][32] = 42, - [1][1][2][1][RTW89_FCC][36] = 70, + [1][1][2][1][RTW89_FCC][36] = 60, [1][1][2][1][RTW89_ETSI][36] = 127, - [1][1][2][1][RTW89_MKK][36] = 72, - [1][1][2][1][RTW89_IC][36] = 70, - [1][1][2][1][RTW89_ACMA][36] = 72, - [1][1][2][1][RTW89_FCC][39] = 80, - [1][1][2][1][RTW89_ETSI][39] = 8, + [1][1][2][1][RTW89_MKK][36] = 66, + [1][1][2][1][RTW89_IC][36] = 66, + [1][1][2][1][RTW89_ACMA][36] = 66, + [1][1][2][1][RTW89_FCC][39] = 68, + [1][1][2][1][RTW89_ETSI][39] = 6, [1][1][2][1][RTW89_MKK][39] = 127, - [1][1][2][1][RTW89_IC][39] = 80, - [1][1][2][1][RTW89_ACMA][39] = 74, - [1][1][2][1][RTW89_FCC][43] = 80, - [1][1][2][1][RTW89_ETSI][43] = 8, + [1][1][2][1][RTW89_IC][39] = 66, + [1][1][2][1][RTW89_ACMA][39] = 66, + [1][1][2][1][RTW89_FCC][43] = 68, + [1][1][2][1][RTW89_ETSI][43] = 6, [1][1][2][1][RTW89_MKK][43] = 127, - [1][1][2][1][RTW89_IC][43] = 80, - [1][1][2][1][RTW89_ACMA][43] = 76, - [1][1][2][1][RTW89_FCC][47] = 70, + [1][1][2][1][RTW89_IC][43] = 66, + [1][1][2][1][RTW89_ACMA][43] = 66, + [1][1][2][1][RTW89_FCC][47] = 60, [1][1][2][1][RTW89_ETSI][47] = 127, [1][1][2][1][RTW89_MKK][47] = 127, [1][1][2][1][RTW89_IC][47] = 127, [1][1][2][1][RTW89_ACMA][47] = 127, - [1][1][2][1][RTW89_FCC][51] = 68, + [1][1][2][1][RTW89_FCC][51] = 58, [1][1][2][1][RTW89_ETSI][51] = 127, [1][1][2][1][RTW89_MKK][51] = 127, [1][1][2][1][RTW89_IC][51] = 127, [1][1][2][1][RTW89_ACMA][51] = 127, - [2][0][2][0][RTW89_FCC][3] = 66, - [2][0][2][0][RTW89_ETSI][3] = 66, - [2][0][2][0][RTW89_MKK][3] = 66, - [2][0][2][0][RTW89_IC][3] = 64, - [2][0][2][0][RTW89_ACMA][3] = 66, - [2][0][2][0][RTW89_FCC][11] = 68, - [2][0][2][0][RTW89_ETSI][11] = 66, - [2][0][2][0][RTW89_MKK][11] = 66, - [2][0][2][0][RTW89_IC][11] = 66, - [2][0][2][0][RTW89_ACMA][11] = 66, - [2][0][2][0][RTW89_FCC][18] = 64, - [2][0][2][0][RTW89_ETSI][18] = 66, - [2][0][2][0][RTW89_MKK][18] = 72, - [2][0][2][0][RTW89_IC][18] = 64, - [2][0][2][0][RTW89_ACMA][18] = 66, - [2][0][2][0][RTW89_FCC][26] = 76, - [2][0][2][0][RTW89_ETSI][26] = 66, - [2][0][2][0][RTW89_MKK][26] = 72, + [2][0][2][0][RTW89_FCC][3] = 56, + [2][0][2][0][RTW89_ETSI][3] = 60, + [2][0][2][0][RTW89_MKK][3] = 60, + [2][0][2][0][RTW89_IC][3] = 60, + [2][0][2][0][RTW89_ACMA][3] = 60, + [2][0][2][0][RTW89_FCC][11] = 58, + [2][0][2][0][RTW89_ETSI][11] = 60, + [2][0][2][0][RTW89_MKK][11] = 60, + [2][0][2][0][RTW89_IC][11] = 60, + [2][0][2][0][RTW89_ACMA][11] = 60, + [2][0][2][0][RTW89_FCC][18] = 54, + [2][0][2][0][RTW89_ETSI][18] = 60, + [2][0][2][0][RTW89_MKK][18] = 60, + [2][0][2][0][RTW89_IC][18] = 60, + [2][0][2][0][RTW89_ACMA][18] = 60, + [2][0][2][0][RTW89_FCC][26] = 62, + [2][0][2][0][RTW89_ETSI][26] = 60, + [2][0][2][0][RTW89_MKK][26] = 60, [2][0][2][0][RTW89_IC][26] = 127, [2][0][2][0][RTW89_ACMA][26] = 127, - [2][0][2][0][RTW89_FCC][34] = 76, + [2][0][2][0][RTW89_FCC][34] = 62, [2][0][2][0][RTW89_ETSI][34] = 127, - [2][0][2][0][RTW89_MKK][34] = 72, - [2][0][2][0][RTW89_IC][34] = 76, - [2][0][2][0][RTW89_ACMA][34] = 72, - [2][0][2][0][RTW89_FCC][41] = 76, + [2][0][2][0][RTW89_MKK][34] = 60, + [2][0][2][0][RTW89_IC][34] = 60, + [2][0][2][0][RTW89_ACMA][34] = 60, + [2][0][2][0][RTW89_FCC][41] = 62, [2][0][2][0][RTW89_ETSI][41] = 30, [2][0][2][0][RTW89_MKK][41] = 127, - [2][0][2][0][RTW89_IC][41] = 76, - [2][0][2][0][RTW89_ACMA][41] = 72, - [2][0][2][0][RTW89_FCC][49] = 66, + [2][0][2][0][RTW89_IC][41] = 60, + [2][0][2][0][RTW89_ACMA][41] = 60, + [2][0][2][0][RTW89_FCC][49] = 56, [2][0][2][0][RTW89_ETSI][49] = 127, [2][0][2][0][RTW89_MKK][49] = 127, [2][0][2][0][RTW89_IC][49] = 127, [2][0][2][0][RTW89_ACMA][49] = 127, - [2][1][2][0][RTW89_FCC][3] = 58, + [2][1][2][0][RTW89_FCC][3] = 48, [2][1][2][0][RTW89_ETSI][3] = 54, - [2][1][2][0][RTW89_MKK][3] = 54, - [2][1][2][0][RTW89_IC][3] = 54, - [2][1][2][0][RTW89_ACMA][3] = 54, - [2][1][2][0][RTW89_FCC][11] = 64, + [2][1][2][0][RTW89_MKK][3] = 56, + [2][1][2][0][RTW89_IC][3] = 52, + [2][1][2][0][RTW89_ACMA][3] = 52, + [2][1][2][0][RTW89_FCC][11] = 54, [2][1][2][0][RTW89_ETSI][11] = 54, [2][1][2][0][RTW89_MKK][11] = 54, - [2][1][2][0][RTW89_IC][11] = 54, - [2][1][2][0][RTW89_ACMA][11] = 54, - [2][1][2][0][RTW89_FCC][18] = 58, + [2][1][2][0][RTW89_IC][11] = 52, + [2][1][2][0][RTW89_ACMA][11] = 52, + [2][1][2][0][RTW89_FCC][18] = 48, [2][1][2][0][RTW89_ETSI][18] = 54, - [2][1][2][0][RTW89_MKK][18] = 72, + [2][1][2][0][RTW89_MKK][18] = 60, [2][1][2][0][RTW89_IC][18] = 58, - [2][1][2][0][RTW89_ACMA][18] = 54, - [2][1][2][0][RTW89_FCC][26] = 72, + [2][1][2][0][RTW89_ACMA][18] = 52, + [2][1][2][0][RTW89_FCC][26] = 62, [2][1][2][0][RTW89_ETSI][26] = 54, - [2][1][2][0][RTW89_MKK][26] = 72, + [2][1][2][0][RTW89_MKK][26] = 56, [2][1][2][0][RTW89_IC][26] = 127, [2][1][2][0][RTW89_ACMA][26] = 127, - [2][1][2][0][RTW89_FCC][34] = 76, + [2][1][2][0][RTW89_FCC][34] = 62, [2][1][2][0][RTW89_ETSI][34] = 127, - [2][1][2][0][RTW89_MKK][34] = 72, - [2][1][2][0][RTW89_IC][34] = 76, - [2][1][2][0][RTW89_ACMA][34] = 72, - [2][1][2][0][RTW89_FCC][41] = 76, + [2][1][2][0][RTW89_MKK][34] = 60, + [2][1][2][0][RTW89_IC][34] = 60, + [2][1][2][0][RTW89_ACMA][34] = 60, + [2][1][2][0][RTW89_FCC][41] = 62, [2][1][2][0][RTW89_ETSI][41] = 18, [2][1][2][0][RTW89_MKK][41] = 127, - [2][1][2][0][RTW89_IC][41] = 76, - [2][1][2][0][RTW89_ACMA][41] = 72, - [2][1][2][0][RTW89_FCC][49] = 60, + [2][1][2][0][RTW89_IC][41] = 60, + [2][1][2][0][RTW89_ACMA][41] = 60, + [2][1][2][0][RTW89_FCC][49] = 50, [2][1][2][0][RTW89_ETSI][49] = 127, [2][1][2][0][RTW89_MKK][49] = 127, [2][1][2][0][RTW89_IC][49] = 127, [2][1][2][0][RTW89_ACMA][49] = 127, - [2][1][2][1][RTW89_FCC][3] = 58, - [2][1][2][1][RTW89_ETSI][3] = 42, - [2][1][2][1][RTW89_MKK][3] = 54, - [2][1][2][1][RTW89_IC][3] = 42, - [2][1][2][1][RTW89_ACMA][3] = 42, - [2][1][2][1][RTW89_FCC][11] = 64, - [2][1][2][1][RTW89_ETSI][11] = 42, + [2][1][2][1][RTW89_FCC][3] = 48, + [2][1][2][1][RTW89_ETSI][3] = 40, + [2][1][2][1][RTW89_MKK][3] = 56, + [2][1][2][1][RTW89_IC][3] = 40, + [2][1][2][1][RTW89_ACMA][3] = 40, + [2][1][2][1][RTW89_FCC][11] = 54, + [2][1][2][1][RTW89_ETSI][11] = 40, [2][1][2][1][RTW89_MKK][11] = 54, - [2][1][2][1][RTW89_IC][11] = 42, - [2][1][2][1][RTW89_ACMA][11] = 42, - [2][1][2][1][RTW89_FCC][18] = 58, - [2][1][2][1][RTW89_ETSI][18] = 42, - [2][1][2][1][RTW89_MKK][18] = 72, + [2][1][2][1][RTW89_IC][11] = 40, + [2][1][2][1][RTW89_ACMA][11] = 40, + [2][1][2][1][RTW89_FCC][18] = 48, + [2][1][2][1][RTW89_ETSI][18] = 40, + [2][1][2][1][RTW89_MKK][18] = 60, [2][1][2][1][RTW89_IC][18] = 58, - [2][1][2][1][RTW89_ACMA][18] = 42, - [2][1][2][1][RTW89_FCC][26] = 70, - [2][1][2][1][RTW89_ETSI][26] = 44, - [2][1][2][1][RTW89_MKK][26] = 72, + [2][1][2][1][RTW89_ACMA][18] = 40, + [2][1][2][1][RTW89_FCC][26] = 60, + [2][1][2][1][RTW89_ETSI][26] = 42, + [2][1][2][1][RTW89_MKK][26] = 56, [2][1][2][1][RTW89_IC][26] = 127, [2][1][2][1][RTW89_ACMA][26] = 127, - [2][1][2][1][RTW89_FCC][34] = 70, + [2][1][2][1][RTW89_FCC][34] = 60, [2][1][2][1][RTW89_ETSI][34] = 127, - [2][1][2][1][RTW89_MKK][34] = 72, - [2][1][2][1][RTW89_IC][34] = 70, - [2][1][2][1][RTW89_ACMA][34] = 72, - [2][1][2][1][RTW89_FCC][41] = 76, - [2][1][2][1][RTW89_ETSI][41] = 8, + [2][1][2][1][RTW89_MKK][34] = 60, + [2][1][2][1][RTW89_IC][34] = 60, + [2][1][2][1][RTW89_ACMA][34] = 60, + [2][1][2][1][RTW89_FCC][41] = 62, + [2][1][2][1][RTW89_ETSI][41] = 6, [2][1][2][1][RTW89_MKK][41] = 127, - [2][1][2][1][RTW89_IC][41] = 76, - [2][1][2][1][RTW89_ACMA][41] = 72, - [2][1][2][1][RTW89_FCC][49] = 60, + [2][1][2][1][RTW89_IC][41] = 60, + [2][1][2][1][RTW89_ACMA][41] = 60, + [2][1][2][1][RTW89_FCC][49] = 50, [2][1][2][1][RTW89_ETSI][49] = 127, [2][1][2][1][RTW89_MKK][49] = 127, [2][1][2][1][RTW89_IC][49] = 127, [2][1][2][1][RTW89_ACMA][49] = 127, - [3][0][2][0][RTW89_FCC][7] = 56, - [3][0][2][0][RTW89_ETSI][7] = 56, - [3][0][2][0][RTW89_MKK][7] = 56, - [3][0][2][0][RTW89_IC][7] = 56, - [3][0][2][0][RTW89_ACMA][7] = 56, - [3][0][2][0][RTW89_FCC][22] = 56, - [3][0][2][0][RTW89_ETSI][22] = 56, - [3][0][2][0][RTW89_MKK][22] = 56, - [3][0][2][0][RTW89_IC][22] = 56, - [3][0][2][0][RTW89_ACMA][22] = 56, - [3][0][2][0][RTW89_FCC][45] = 56, + [3][0][2][0][RTW89_FCC][7] = 38, + [3][0][2][0][RTW89_ETSI][7] = 50, + [3][0][2][0][RTW89_MKK][7] = 50, + [3][0][2][0][RTW89_IC][7] = 50, + [3][0][2][0][RTW89_ACMA][7] = 50, + [3][0][2][0][RTW89_FCC][22] = 52, + [3][0][2][0][RTW89_ETSI][22] = 50, + [3][0][2][0][RTW89_MKK][22] = 50, + [3][0][2][0][RTW89_IC][22] = 50, + [3][0][2][0][RTW89_ACMA][22] = 50, + [3][0][2][0][RTW89_FCC][45] = 127, [3][0][2][0][RTW89_ETSI][45] = 127, [3][0][2][0][RTW89_MKK][45] = 127, [3][0][2][0][RTW89_IC][45] = 127, [3][0][2][0][RTW89_ACMA][45] = 127, - [3][1][2][0][RTW89_FCC][7] = 44, - [3][1][2][0][RTW89_ETSI][7] = 44, - [3][1][2][0][RTW89_MKK][7] = 44, + [3][1][2][0][RTW89_FCC][7] = 26, + [3][1][2][0][RTW89_ETSI][7] = 50, + [3][1][2][0][RTW89_MKK][7] = 36, [3][1][2][0][RTW89_IC][7] = 44, [3][1][2][0][RTW89_ACMA][7] = 44, - [3][1][2][0][RTW89_FCC][22] = 44, - [3][1][2][0][RTW89_ETSI][22] = 44, - [3][1][2][0][RTW89_MKK][22] = 44, + [3][1][2][0][RTW89_FCC][22] = 42, + [3][1][2][0][RTW89_ETSI][22] = 50, + [3][1][2][0][RTW89_MKK][22] = 48, [3][1][2][0][RTW89_IC][22] = 44, [3][1][2][0][RTW89_ACMA][22] = 44, - [3][1][2][0][RTW89_FCC][45] = 44, + [3][1][2][0][RTW89_FCC][45] = 127, [3][1][2][0][RTW89_ETSI][45] = 127, [3][1][2][0][RTW89_MKK][45] = 127, [3][1][2][0][RTW89_IC][45] = 127, [3][1][2][0][RTW89_ACMA][45] = 127, - [3][1][2][1][RTW89_FCC][7] = 32, - [3][1][2][1][RTW89_ETSI][7] = 32, - [3][1][2][1][RTW89_MKK][7] = 32, + [3][1][2][1][RTW89_FCC][7] = 14, + [3][1][2][1][RTW89_ETSI][7] = 42, + [3][1][2][1][RTW89_MKK][7] = 36, [3][1][2][1][RTW89_IC][7] = 32, [3][1][2][1][RTW89_ACMA][7] = 32, - [3][1][2][1][RTW89_FCC][22] = 32, - [3][1][2][1][RTW89_ETSI][22] = 32, - [3][1][2][1][RTW89_MKK][22] = 32, + [3][1][2][1][RTW89_FCC][22] = 30, + [3][1][2][1][RTW89_ETSI][22] = 42, + [3][1][2][1][RTW89_MKK][22] = 48, [3][1][2][1][RTW89_IC][22] = 32, [3][1][2][1][RTW89_ACMA][22] = 32, - [3][1][2][1][RTW89_FCC][45] = 32, + [3][1][2][1][RTW89_FCC][45] = 127, [3][1][2][1][RTW89_ETSI][45] = 127, [3][1][2][1][RTW89_MKK][45] = 127, [3][1][2][1][RTW89_IC][45] = 127, @@ -17127,7 +17127,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][9] = 32, [0][0][RTW89_WW][10] = 32, [0][0][RTW89_WW][11] = 32, - [0][0][RTW89_WW][12] = 32, + [0][0][RTW89_WW][12] = 24, [0][0][RTW89_WW][13] = 0, [0][1][RTW89_WW][0] = 20, [0][1][RTW89_WW][1] = 22, @@ -17154,8 +17154,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][8] = 44, [1][0][RTW89_WW][9] = 44, [1][0][RTW89_WW][10] = 44, - [1][0][RTW89_WW][11] = 44, - [1][0][RTW89_WW][12] = 38, + [1][0][RTW89_WW][11] = 42, + [1][0][RTW89_WW][12] = 30, [1][0][RTW89_WW][13] = 0, [1][1][RTW89_WW][0] = 32, [1][1][RTW89_WW][1] = 32, @@ -17168,8 +17168,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_WW][8] = 32, [1][1][RTW89_WW][9] = 32, [1][1][RTW89_WW][10] = 32, - [1][1][RTW89_WW][11] = 32, - [1][1][RTW89_WW][12] = 32, + [1][1][RTW89_WW][11] = 30, + [1][1][RTW89_WW][12] = 24, [1][1][RTW89_WW][13] = 0, [2][0][RTW89_WW][0] = 56, [2][0][RTW89_WW][1] = 56, @@ -17182,8 +17182,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_WW][8] = 56, [2][0][RTW89_WW][9] = 56, [2][0][RTW89_WW][10] = 56, - [2][0][RTW89_WW][11] = 56, - [2][0][RTW89_WW][12] = 56, + [2][0][RTW89_WW][11] = 42, + [2][0][RTW89_WW][12] = 38, [2][0][RTW89_WW][13] = 0, [2][1][RTW89_WW][0] = 44, [2][1][RTW89_WW][1] = 44, @@ -17196,72 +17196,72 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_WW][8] = 44, [2][1][RTW89_WW][9] = 44, [2][1][RTW89_WW][10] = 44, - [2][1][RTW89_WW][11] = 44, - [2][1][RTW89_WW][12] = 42, + [2][1][RTW89_WW][11] = 30, + [2][1][RTW89_WW][12] = 26, [2][1][RTW89_WW][13] = 0, - [0][0][RTW89_FCC][0] = 68, - [0][0][RTW89_ETSI][0] = 36, - [0][0][RTW89_MKK][0] = 38, + [0][0][RTW89_FCC][0] = 60, + [0][0][RTW89_ETSI][0] = 34, + [0][0][RTW89_MKK][0] = 36, [0][0][RTW89_IC][0] = 68, [0][0][RTW89_ACMA][0] = 32, - [0][0][RTW89_FCC][1] = 68, - [0][0][RTW89_ETSI][1] = 40, - [0][0][RTW89_MKK][1] = 44, + [0][0][RTW89_FCC][1] = 60, + [0][0][RTW89_ETSI][1] = 38, + [0][0][RTW89_MKK][1] = 40, [0][0][RTW89_IC][1] = 68, [0][0][RTW89_ACMA][1] = 32, - [0][0][RTW89_FCC][2] = 72, - [0][0][RTW89_ETSI][2] = 40, - [0][0][RTW89_MKK][2] = 44, + [0][0][RTW89_FCC][2] = 64, + [0][0][RTW89_ETSI][2] = 38, + [0][0][RTW89_MKK][2] = 40, [0][0][RTW89_IC][2] = 72, [0][0][RTW89_ACMA][2] = 32, - [0][0][RTW89_FCC][3] = 76, - [0][0][RTW89_ETSI][3] = 40, - [0][0][RTW89_MKK][3] = 44, + [0][0][RTW89_FCC][3] = 68, + [0][0][RTW89_ETSI][3] = 38, + [0][0][RTW89_MKK][3] = 40, [0][0][RTW89_IC][3] = 76, [0][0][RTW89_ACMA][3] = 32, - [0][0][RTW89_FCC][4] = 76, - [0][0][RTW89_ETSI][4] = 40, - [0][0][RTW89_MKK][4] = 44, + [0][0][RTW89_FCC][4] = 68, + [0][0][RTW89_ETSI][4] = 38, + [0][0][RTW89_MKK][4] = 40, [0][0][RTW89_IC][4] = 76, [0][0][RTW89_ACMA][4] = 32, - [0][0][RTW89_FCC][5] = 84, - [0][0][RTW89_ETSI][5] = 40, - [0][0][RTW89_MKK][5] = 44, + [0][0][RTW89_FCC][5] = 76, + [0][0][RTW89_ETSI][5] = 38, + [0][0][RTW89_MKK][5] = 40, [0][0][RTW89_IC][5] = 84, [0][0][RTW89_ACMA][5] = 32, - [0][0][RTW89_FCC][6] = 74, - [0][0][RTW89_ETSI][6] = 40, - [0][0][RTW89_MKK][6] = 44, + [0][0][RTW89_FCC][6] = 66, + [0][0][RTW89_ETSI][6] = 38, + [0][0][RTW89_MKK][6] = 40, [0][0][RTW89_IC][6] = 74, [0][0][RTW89_ACMA][6] = 32, - [0][0][RTW89_FCC][7] = 74, - [0][0][RTW89_ETSI][7] = 40, - [0][0][RTW89_MKK][7] = 44, + [0][0][RTW89_FCC][7] = 66, + [0][0][RTW89_ETSI][7] = 38, + [0][0][RTW89_MKK][7] = 40, [0][0][RTW89_IC][7] = 74, [0][0][RTW89_ACMA][7] = 32, - [0][0][RTW89_FCC][8] = 70, - [0][0][RTW89_ETSI][8] = 40, - [0][0][RTW89_MKK][8] = 44, + [0][0][RTW89_FCC][8] = 62, + [0][0][RTW89_ETSI][8] = 38, + [0][0][RTW89_MKK][8] = 40, [0][0][RTW89_IC][8] = 70, [0][0][RTW89_ACMA][8] = 32, - [0][0][RTW89_FCC][9] = 66, - [0][0][RTW89_ETSI][9] = 40, - [0][0][RTW89_MKK][9] = 44, + [0][0][RTW89_FCC][9] = 58, + [0][0][RTW89_ETSI][9] = 38, + [0][0][RTW89_MKK][9] = 40, [0][0][RTW89_IC][9] = 66, [0][0][RTW89_ACMA][9] = 32, - [0][0][RTW89_FCC][10] = 66, - [0][0][RTW89_ETSI][10] = 40, - [0][0][RTW89_MKK][10] = 44, + [0][0][RTW89_FCC][10] = 58, + [0][0][RTW89_ETSI][10] = 38, + [0][0][RTW89_MKK][10] = 40, [0][0][RTW89_IC][10] = 66, [0][0][RTW89_ACMA][10] = 32, - [0][0][RTW89_FCC][11] = 56, - [0][0][RTW89_ETSI][11] = 40, - [0][0][RTW89_MKK][11] = 44, + [0][0][RTW89_FCC][11] = 42, + [0][0][RTW89_ETSI][11] = 38, + [0][0][RTW89_MKK][11] = 40, [0][0][RTW89_IC][11] = 56, [0][0][RTW89_ACMA][11] = 32, - [0][0][RTW89_FCC][12] = 32, - [0][0][RTW89_ETSI][12] = 36, - [0][0][RTW89_MKK][12] = 38, + [0][0][RTW89_FCC][12] = 24, + [0][0][RTW89_ETSI][12] = 34, + [0][0][RTW89_MKK][12] = 36, [0][0][RTW89_IC][12] = 32, [0][0][RTW89_ACMA][12] = 32, [0][0][RTW89_FCC][13] = 127, @@ -17269,69 +17269,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_MKK][13] = 127, [0][0][RTW89_IC][13] = 127, [0][0][RTW89_ACMA][13] = 127, - [0][1][RTW89_FCC][0] = 62, - [0][1][RTW89_ETSI][0] = 24, - [0][1][RTW89_MKK][0] = 26, + [0][1][RTW89_FCC][0] = 46, + [0][1][RTW89_ETSI][0] = 22, + [0][1][RTW89_MKK][0] = 24, [0][1][RTW89_IC][0] = 62, [0][1][RTW89_ACMA][0] = 20, - [0][1][RTW89_FCC][1] = 62, - [0][1][RTW89_ETSI][1] = 26, - [0][1][RTW89_MKK][1] = 32, + [0][1][RTW89_FCC][1] = 46, + [0][1][RTW89_ETSI][1] = 24, + [0][1][RTW89_MKK][1] = 30, [0][1][RTW89_IC][1] = 62, [0][1][RTW89_ACMA][1] = 22, - [0][1][RTW89_FCC][2] = 66, - [0][1][RTW89_ETSI][2] = 26, - [0][1][RTW89_MKK][2] = 32, + [0][1][RTW89_FCC][2] = 50, + [0][1][RTW89_ETSI][2] = 24, + [0][1][RTW89_MKK][2] = 30, [0][1][RTW89_IC][2] = 66, [0][1][RTW89_ACMA][2] = 22, - [0][1][RTW89_FCC][3] = 70, - [0][1][RTW89_ETSI][3] = 26, - [0][1][RTW89_MKK][3] = 32, + [0][1][RTW89_FCC][3] = 54, + [0][1][RTW89_ETSI][3] = 24, + [0][1][RTW89_MKK][3] = 30, [0][1][RTW89_IC][3] = 70, [0][1][RTW89_ACMA][3] = 22, - [0][1][RTW89_FCC][4] = 74, - [0][1][RTW89_ETSI][4] = 26, - [0][1][RTW89_MKK][4] = 32, + [0][1][RTW89_FCC][4] = 58, + [0][1][RTW89_ETSI][4] = 24, + [0][1][RTW89_MKK][4] = 30, [0][1][RTW89_IC][4] = 74, [0][1][RTW89_ACMA][4] = 22, - [0][1][RTW89_FCC][5] = 74, - [0][1][RTW89_ETSI][5] = 26, - [0][1][RTW89_MKK][5] = 32, + [0][1][RTW89_FCC][5] = 66, + [0][1][RTW89_ETSI][5] = 24, + [0][1][RTW89_MKK][5] = 30, [0][1][RTW89_IC][5] = 74, [0][1][RTW89_ACMA][5] = 22, - [0][1][RTW89_FCC][6] = 72, - [0][1][RTW89_ETSI][6] = 26, - [0][1][RTW89_MKK][6] = 32, + [0][1][RTW89_FCC][6] = 58, + [0][1][RTW89_ETSI][6] = 24, + [0][1][RTW89_MKK][6] = 30, [0][1][RTW89_IC][6] = 72, [0][1][RTW89_ACMA][6] = 22, - [0][1][RTW89_FCC][7] = 68, - [0][1][RTW89_ETSI][7] = 26, - [0][1][RTW89_MKK][7] = 32, + [0][1][RTW89_FCC][7] = 54, + [0][1][RTW89_ETSI][7] = 24, + [0][1][RTW89_MKK][7] = 30, [0][1][RTW89_IC][7] = 68, [0][1][RTW89_ACMA][7] = 22, - [0][1][RTW89_FCC][8] = 64, - [0][1][RTW89_ETSI][8] = 26, - [0][1][RTW89_MKK][8] = 32, + [0][1][RTW89_FCC][8] = 50, + [0][1][RTW89_ETSI][8] = 24, + [0][1][RTW89_MKK][8] = 30, [0][1][RTW89_IC][8] = 64, [0][1][RTW89_ACMA][8] = 22, - [0][1][RTW89_FCC][9] = 60, - [0][1][RTW89_ETSI][9] = 26, - [0][1][RTW89_MKK][9] = 32, + [0][1][RTW89_FCC][9] = 46, + [0][1][RTW89_ETSI][9] = 24, + [0][1][RTW89_MKK][9] = 30, [0][1][RTW89_IC][9] = 60, [0][1][RTW89_ACMA][9] = 22, - [0][1][RTW89_FCC][10] = 60, - [0][1][RTW89_ETSI][10] = 26, - [0][1][RTW89_MKK][10] = 32, + [0][1][RTW89_FCC][10] = 46, + [0][1][RTW89_ETSI][10] = 24, + [0][1][RTW89_MKK][10] = 30, [0][1][RTW89_IC][10] = 60, [0][1][RTW89_ACMA][10] = 22, - [0][1][RTW89_FCC][11] = 52, - [0][1][RTW89_ETSI][11] = 26, - [0][1][RTW89_MKK][11] = 32, + [0][1][RTW89_FCC][11] = 30, + [0][1][RTW89_ETSI][11] = 24, + [0][1][RTW89_MKK][11] = 30, [0][1][RTW89_IC][11] = 52, [0][1][RTW89_ACMA][11] = 22, - [0][1][RTW89_FCC][12] = 30, - [0][1][RTW89_ETSI][12] = 22, - [0][1][RTW89_MKK][12] = 26, + [0][1][RTW89_FCC][12] = 22, + [0][1][RTW89_ETSI][12] = 20, + [0][1][RTW89_MKK][12] = 24, [0][1][RTW89_IC][12] = 30, [0][1][RTW89_ACMA][12] = 20, [0][1][RTW89_FCC][13] = 127, @@ -17339,69 +17339,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_MKK][13] = 127, [0][1][RTW89_IC][13] = 127, [0][1][RTW89_ACMA][13] = 127, - [1][0][RTW89_FCC][0] = 78, - [1][0][RTW89_ETSI][0] = 48, + [1][0][RTW89_FCC][0] = 64, + [1][0][RTW89_ETSI][0] = 46, [1][0][RTW89_MKK][0] = 48, [1][0][RTW89_IC][0] = 78, [1][0][RTW89_ACMA][0] = 42, - [1][0][RTW89_FCC][1] = 78, - [1][0][RTW89_ETSI][1] = 48, + [1][0][RTW89_FCC][1] = 64, + [1][0][RTW89_ETSI][1] = 46, [1][0][RTW89_MKK][1] = 48, [1][0][RTW89_IC][1] = 78, [1][0][RTW89_ACMA][1] = 44, - [1][0][RTW89_FCC][2] = 82, - [1][0][RTW89_ETSI][2] = 48, + [1][0][RTW89_FCC][2] = 68, + [1][0][RTW89_ETSI][2] = 46, [1][0][RTW89_MKK][2] = 48, [1][0][RTW89_IC][2] = 82, [1][0][RTW89_ACMA][2] = 44, - [1][0][RTW89_FCC][3] = 84, - [1][0][RTW89_ETSI][3] = 48, + [1][0][RTW89_FCC][3] = 70, + [1][0][RTW89_ETSI][3] = 46, [1][0][RTW89_MKK][3] = 48, [1][0][RTW89_IC][3] = 84, [1][0][RTW89_ACMA][3] = 44, - [1][0][RTW89_FCC][4] = 84, - [1][0][RTW89_ETSI][4] = 48, + [1][0][RTW89_FCC][4] = 70, + [1][0][RTW89_ETSI][4] = 46, [1][0][RTW89_MKK][4] = 48, [1][0][RTW89_IC][4] = 84, [1][0][RTW89_ACMA][4] = 44, - [1][0][RTW89_FCC][5] = 84, - [1][0][RTW89_ETSI][5] = 48, + [1][0][RTW89_FCC][5] = 76, + [1][0][RTW89_ETSI][5] = 46, [1][0][RTW89_MKK][5] = 48, [1][0][RTW89_IC][5] = 84, [1][0][RTW89_ACMA][5] = 44, - [1][0][RTW89_FCC][6] = 78, - [1][0][RTW89_ETSI][6] = 46, + [1][0][RTW89_FCC][6] = 64, + [1][0][RTW89_ETSI][6] = 44, [1][0][RTW89_MKK][6] = 48, [1][0][RTW89_IC][6] = 78, [1][0][RTW89_ACMA][6] = 44, - [1][0][RTW89_FCC][7] = 78, - [1][0][RTW89_ETSI][7] = 48, + [1][0][RTW89_FCC][7] = 64, + [1][0][RTW89_ETSI][7] = 46, [1][0][RTW89_MKK][7] = 48, [1][0][RTW89_IC][7] = 78, [1][0][RTW89_ACMA][7] = 44, - [1][0][RTW89_FCC][8] = 78, - [1][0][RTW89_ETSI][8] = 48, + [1][0][RTW89_FCC][8] = 64, + [1][0][RTW89_ETSI][8] = 46, [1][0][RTW89_MKK][8] = 48, [1][0][RTW89_IC][8] = 78, [1][0][RTW89_ACMA][8] = 44, - [1][0][RTW89_FCC][9] = 74, - [1][0][RTW89_ETSI][9] = 48, + [1][0][RTW89_FCC][9] = 60, + [1][0][RTW89_ETSI][9] = 46, [1][0][RTW89_MKK][9] = 48, [1][0][RTW89_IC][9] = 74, [1][0][RTW89_ACMA][9] = 44, - [1][0][RTW89_FCC][10] = 74, - [1][0][RTW89_ETSI][10] = 48, + [1][0][RTW89_FCC][10] = 60, + [1][0][RTW89_ETSI][10] = 46, [1][0][RTW89_MKK][10] = 48, [1][0][RTW89_IC][10] = 74, [1][0][RTW89_ACMA][10] = 44, - [1][0][RTW89_FCC][11] = 72, - [1][0][RTW89_ETSI][11] = 48, + [1][0][RTW89_FCC][11] = 42, + [1][0][RTW89_ETSI][11] = 46, [1][0][RTW89_MKK][11] = 48, [1][0][RTW89_IC][11] = 72, [1][0][RTW89_ACMA][11] = 44, - [1][0][RTW89_FCC][12] = 38, - [1][0][RTW89_ETSI][12] = 48, - [1][0][RTW89_MKK][12] = 48, + [1][0][RTW89_FCC][12] = 30, + [1][0][RTW89_ETSI][12] = 46, + [1][0][RTW89_MKK][12] = 46, [1][0][RTW89_IC][12] = 38, [1][0][RTW89_ACMA][12] = 42, [1][0][RTW89_FCC][13] = 127, @@ -17409,69 +17409,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_MKK][13] = 127, [1][0][RTW89_IC][13] = 127, [1][0][RTW89_ACMA][13] = 127, - [1][1][RTW89_FCC][0] = 66, - [1][1][RTW89_ETSI][0] = 34, - [1][1][RTW89_MKK][0] = 36, + [1][1][RTW89_FCC][0] = 46, + [1][1][RTW89_ETSI][0] = 32, + [1][1][RTW89_MKK][0] = 34, [1][1][RTW89_IC][0] = 66, [1][1][RTW89_ACMA][0] = 32, - [1][1][RTW89_FCC][1] = 66, - [1][1][RTW89_ETSI][1] = 36, - [1][1][RTW89_MKK][1] = 36, + [1][1][RTW89_FCC][1] = 46, + [1][1][RTW89_ETSI][1] = 34, + [1][1][RTW89_MKK][1] = 34, [1][1][RTW89_IC][1] = 66, [1][1][RTW89_ACMA][1] = 32, - [1][1][RTW89_FCC][2] = 70, - [1][1][RTW89_ETSI][2] = 36, - [1][1][RTW89_MKK][2] = 36, + [1][1][RTW89_FCC][2] = 50, + [1][1][RTW89_ETSI][2] = 34, + [1][1][RTW89_MKK][2] = 34, [1][1][RTW89_IC][2] = 70, [1][1][RTW89_ACMA][2] = 32, - [1][1][RTW89_FCC][3] = 74, - [1][1][RTW89_ETSI][3] = 36, - [1][1][RTW89_MKK][3] = 36, + [1][1][RTW89_FCC][3] = 54, + [1][1][RTW89_ETSI][3] = 34, + [1][1][RTW89_MKK][3] = 34, [1][1][RTW89_IC][3] = 74, [1][1][RTW89_ACMA][3] = 32, - [1][1][RTW89_FCC][4] = 74, - [1][1][RTW89_ETSI][4] = 36, - [1][1][RTW89_MKK][4] = 36, + [1][1][RTW89_FCC][4] = 58, + [1][1][RTW89_ETSI][4] = 34, + [1][1][RTW89_MKK][4] = 34, [1][1][RTW89_IC][4] = 74, [1][1][RTW89_ACMA][4] = 32, - [1][1][RTW89_FCC][5] = 74, - [1][1][RTW89_ETSI][5] = 36, - [1][1][RTW89_MKK][5] = 36, + [1][1][RTW89_FCC][5] = 66, + [1][1][RTW89_ETSI][5] = 34, + [1][1][RTW89_MKK][5] = 34, [1][1][RTW89_IC][5] = 74, [1][1][RTW89_ACMA][5] = 32, - [1][1][RTW89_FCC][6] = 74, - [1][1][RTW89_ETSI][6] = 36, - [1][1][RTW89_MKK][6] = 36, + [1][1][RTW89_FCC][6] = 58, + [1][1][RTW89_ETSI][6] = 34, + [1][1][RTW89_MKK][6] = 34, [1][1][RTW89_IC][6] = 74, [1][1][RTW89_ACMA][6] = 32, - [1][1][RTW89_FCC][7] = 74, - [1][1][RTW89_ETSI][7] = 36, - [1][1][RTW89_MKK][7] = 36, + [1][1][RTW89_FCC][7] = 54, + [1][1][RTW89_ETSI][7] = 34, + [1][1][RTW89_MKK][7] = 34, [1][1][RTW89_IC][7] = 74, [1][1][RTW89_ACMA][7] = 32, - [1][1][RTW89_FCC][8] = 70, - [1][1][RTW89_ETSI][8] = 36, - [1][1][RTW89_MKK][8] = 36, + [1][1][RTW89_FCC][8] = 50, + [1][1][RTW89_ETSI][8] = 34, + [1][1][RTW89_MKK][8] = 34, [1][1][RTW89_IC][8] = 70, [1][1][RTW89_ACMA][8] = 32, - [1][1][RTW89_FCC][9] = 66, - [1][1][RTW89_ETSI][9] = 36, - [1][1][RTW89_MKK][9] = 36, + [1][1][RTW89_FCC][9] = 46, + [1][1][RTW89_ETSI][9] = 34, + [1][1][RTW89_MKK][9] = 34, [1][1][RTW89_IC][9] = 66, [1][1][RTW89_ACMA][9] = 32, - [1][1][RTW89_FCC][10] = 66, - [1][1][RTW89_ETSI][10] = 36, - [1][1][RTW89_MKK][10] = 36, + [1][1][RTW89_FCC][10] = 46, + [1][1][RTW89_ETSI][10] = 34, + [1][1][RTW89_MKK][10] = 34, [1][1][RTW89_IC][10] = 66, [1][1][RTW89_ACMA][10] = 32, - [1][1][RTW89_FCC][11] = 48, - [1][1][RTW89_ETSI][11] = 36, - [1][1][RTW89_MKK][11] = 36, + [1][1][RTW89_FCC][11] = 30, + [1][1][RTW89_ETSI][11] = 34, + [1][1][RTW89_MKK][11] = 34, [1][1][RTW89_IC][11] = 48, [1][1][RTW89_ACMA][11] = 32, - [1][1][RTW89_FCC][12] = 32, - [1][1][RTW89_ETSI][12] = 36, - [1][1][RTW89_MKK][12] = 36, + [1][1][RTW89_FCC][12] = 24, + [1][1][RTW89_ETSI][12] = 34, + [1][1][RTW89_MKK][12] = 34, [1][1][RTW89_IC][12] = 32, [1][1][RTW89_ACMA][12] = 32, [1][1][RTW89_FCC][13] = 127, @@ -17479,69 +17479,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_MKK][13] = 127, [1][1][RTW89_IC][13] = 127, [1][1][RTW89_ACMA][13] = 127, - [2][0][RTW89_FCC][0] = 78, - [2][0][RTW89_ETSI][0] = 60, - [2][0][RTW89_MKK][0] = 60, + [2][0][RTW89_FCC][0] = 64, + [2][0][RTW89_ETSI][0] = 58, + [2][0][RTW89_MKK][0] = 58, [2][0][RTW89_IC][0] = 78, [2][0][RTW89_ACMA][0] = 56, - [2][0][RTW89_FCC][1] = 78, - [2][0][RTW89_ETSI][1] = 60, - [2][0][RTW89_MKK][1] = 60, + [2][0][RTW89_FCC][1] = 64, + [2][0][RTW89_ETSI][1] = 58, + [2][0][RTW89_MKK][1] = 58, [2][0][RTW89_IC][1] = 78, [2][0][RTW89_ACMA][1] = 56, - [2][0][RTW89_FCC][2] = 80, - [2][0][RTW89_ETSI][2] = 60, - [2][0][RTW89_MKK][2] = 60, + [2][0][RTW89_FCC][2] = 66, + [2][0][RTW89_ETSI][2] = 58, + [2][0][RTW89_MKK][2] = 58, [2][0][RTW89_IC][2] = 80, [2][0][RTW89_ACMA][2] = 56, - [2][0][RTW89_FCC][3] = 80, - [2][0][RTW89_ETSI][3] = 60, - [2][0][RTW89_MKK][3] = 60, + [2][0][RTW89_FCC][3] = 66, + [2][0][RTW89_ETSI][3] = 58, + [2][0][RTW89_MKK][3] = 58, [2][0][RTW89_IC][3] = 80, [2][0][RTW89_ACMA][3] = 56, - [2][0][RTW89_FCC][4] = 80, - [2][0][RTW89_ETSI][4] = 60, - [2][0][RTW89_MKK][4] = 60, + [2][0][RTW89_FCC][4] = 66, + [2][0][RTW89_ETSI][4] = 58, + [2][0][RTW89_MKK][4] = 58, [2][0][RTW89_IC][4] = 80, [2][0][RTW89_ACMA][4] = 56, - [2][0][RTW89_FCC][5] = 84, - [2][0][RTW89_ETSI][5] = 60, - [2][0][RTW89_MKK][5] = 60, + [2][0][RTW89_FCC][5] = 76, + [2][0][RTW89_ETSI][5] = 58, + [2][0][RTW89_MKK][5] = 58, [2][0][RTW89_IC][5] = 84, [2][0][RTW89_ACMA][5] = 56, - [2][0][RTW89_FCC][6] = 76, - [2][0][RTW89_ETSI][6] = 58, - [2][0][RTW89_MKK][6] = 60, + [2][0][RTW89_FCC][6] = 62, + [2][0][RTW89_ETSI][6] = 56, + [2][0][RTW89_MKK][6] = 58, [2][0][RTW89_IC][6] = 76, [2][0][RTW89_ACMA][6] = 56, - [2][0][RTW89_FCC][7] = 76, - [2][0][RTW89_ETSI][7] = 60, - [2][0][RTW89_MKK][7] = 60, + [2][0][RTW89_FCC][7] = 62, + [2][0][RTW89_ETSI][7] = 58, + [2][0][RTW89_MKK][7] = 58, [2][0][RTW89_IC][7] = 76, [2][0][RTW89_ACMA][7] = 56, - [2][0][RTW89_FCC][8] = 76, - [2][0][RTW89_ETSI][8] = 60, - [2][0][RTW89_MKK][8] = 60, + [2][0][RTW89_FCC][8] = 62, + [2][0][RTW89_ETSI][8] = 58, + [2][0][RTW89_MKK][8] = 58, [2][0][RTW89_IC][8] = 76, [2][0][RTW89_ACMA][8] = 56, - [2][0][RTW89_FCC][9] = 74, - [2][0][RTW89_ETSI][9] = 60, - [2][0][RTW89_MKK][9] = 60, + [2][0][RTW89_FCC][9] = 60, + [2][0][RTW89_ETSI][9] = 58, + [2][0][RTW89_MKK][9] = 58, [2][0][RTW89_IC][9] = 74, [2][0][RTW89_ACMA][9] = 56, - [2][0][RTW89_FCC][10] = 74, - [2][0][RTW89_ETSI][10] = 60, - [2][0][RTW89_MKK][10] = 60, + [2][0][RTW89_FCC][10] = 60, + [2][0][RTW89_ETSI][10] = 58, + [2][0][RTW89_MKK][10] = 58, [2][0][RTW89_IC][10] = 74, [2][0][RTW89_ACMA][10] = 56, - [2][0][RTW89_FCC][11] = 66, - [2][0][RTW89_ETSI][11] = 60, - [2][0][RTW89_MKK][11] = 60, + [2][0][RTW89_FCC][11] = 42, + [2][0][RTW89_ETSI][11] = 58, + [2][0][RTW89_MKK][11] = 58, [2][0][RTW89_IC][11] = 66, [2][0][RTW89_ACMA][11] = 56, - [2][0][RTW89_FCC][12] = 56, - [2][0][RTW89_ETSI][12] = 60, - [2][0][RTW89_MKK][12] = 60, + [2][0][RTW89_FCC][12] = 38, + [2][0][RTW89_ETSI][12] = 58, + [2][0][RTW89_MKK][12] = 58, [2][0][RTW89_IC][12] = 56, [2][0][RTW89_ACMA][12] = 56, [2][0][RTW89_FCC][13] = 127, @@ -17549,69 +17549,69 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_MKK][13] = 127, [2][0][RTW89_IC][13] = 127, [2][0][RTW89_ACMA][13] = 127, - [2][1][RTW89_FCC][0] = 70, - [2][1][RTW89_ETSI][0] = 48, - [2][1][RTW89_MKK][0] = 48, + [2][1][RTW89_FCC][0] = 46, + [2][1][RTW89_ETSI][0] = 46, + [2][1][RTW89_MKK][0] = 46, [2][1][RTW89_IC][0] = 70, [2][1][RTW89_ACMA][0] = 44, - [2][1][RTW89_FCC][1] = 70, - [2][1][RTW89_ETSI][1] = 48, - [2][1][RTW89_MKK][1] = 48, + [2][1][RTW89_FCC][1] = 46, + [2][1][RTW89_ETSI][1] = 46, + [2][1][RTW89_MKK][1] = 46, [2][1][RTW89_IC][1] = 70, [2][1][RTW89_ACMA][1] = 44, - [2][1][RTW89_FCC][2] = 74, - [2][1][RTW89_ETSI][2] = 48, - [2][1][RTW89_MKK][2] = 48, + [2][1][RTW89_FCC][2] = 50, + [2][1][RTW89_ETSI][2] = 46, + [2][1][RTW89_MKK][2] = 46, [2][1][RTW89_IC][2] = 74, [2][1][RTW89_ACMA][2] = 44, - [2][1][RTW89_FCC][3] = 78, - [2][1][RTW89_ETSI][3] = 48, - [2][1][RTW89_MKK][3] = 48, + [2][1][RTW89_FCC][3] = 54, + [2][1][RTW89_ETSI][3] = 46, + [2][1][RTW89_MKK][3] = 46, [2][1][RTW89_IC][3] = 78, [2][1][RTW89_ACMA][3] = 44, - [2][1][RTW89_FCC][4] = 80, - [2][1][RTW89_ETSI][4] = 48, - [2][1][RTW89_MKK][4] = 48, + [2][1][RTW89_FCC][4] = 56, + [2][1][RTW89_ETSI][4] = 46, + [2][1][RTW89_MKK][4] = 46, [2][1][RTW89_IC][4] = 80, [2][1][RTW89_ACMA][4] = 44, - [2][1][RTW89_FCC][5] = 80, - [2][1][RTW89_ETSI][5] = 48, - [2][1][RTW89_MKK][5] = 48, + [2][1][RTW89_FCC][5] = 72, + [2][1][RTW89_ETSI][5] = 46, + [2][1][RTW89_MKK][5] = 46, [2][1][RTW89_IC][5] = 80, [2][1][RTW89_ACMA][5] = 44, - [2][1][RTW89_FCC][6] = 78, - [2][1][RTW89_ETSI][6] = 46, - [2][1][RTW89_MKK][6] = 48, + [2][1][RTW89_FCC][6] = 54, + [2][1][RTW89_ETSI][6] = 44, + [2][1][RTW89_MKK][6] = 46, [2][1][RTW89_IC][6] = 78, [2][1][RTW89_ACMA][6] = 44, - [2][1][RTW89_FCC][7] = 78, - [2][1][RTW89_ETSI][7] = 48, - [2][1][RTW89_MKK][7] = 48, + [2][1][RTW89_FCC][7] = 54, + [2][1][RTW89_ETSI][7] = 46, + [2][1][RTW89_MKK][7] = 46, [2][1][RTW89_IC][7] = 78, [2][1][RTW89_ACMA][7] = 44, - [2][1][RTW89_FCC][8] = 74, - [2][1][RTW89_ETSI][8] = 48, - [2][1][RTW89_MKK][8] = 48, + [2][1][RTW89_FCC][8] = 50, + [2][1][RTW89_ETSI][8] = 46, + [2][1][RTW89_MKK][8] = 46, [2][1][RTW89_IC][8] = 74, [2][1][RTW89_ACMA][8] = 44, - [2][1][RTW89_FCC][9] = 70, - [2][1][RTW89_ETSI][9] = 48, - [2][1][RTW89_MKK][9] = 48, + [2][1][RTW89_FCC][9] = 46, + [2][1][RTW89_ETSI][9] = 46, + [2][1][RTW89_MKK][9] = 46, [2][1][RTW89_IC][9] = 70, [2][1][RTW89_ACMA][9] = 44, - [2][1][RTW89_FCC][10] = 70, - [2][1][RTW89_ETSI][10] = 48, - [2][1][RTW89_MKK][10] = 48, + [2][1][RTW89_FCC][10] = 46, + [2][1][RTW89_ETSI][10] = 46, + [2][1][RTW89_MKK][10] = 46, [2][1][RTW89_IC][10] = 70, [2][1][RTW89_ACMA][10] = 44, - [2][1][RTW89_FCC][11] = 60, - [2][1][RTW89_ETSI][11] = 48, - [2][1][RTW89_MKK][11] = 48, + [2][1][RTW89_FCC][11] = 30, + [2][1][RTW89_ETSI][11] = 46, + [2][1][RTW89_MKK][11] = 46, [2][1][RTW89_IC][11] = 60, [2][1][RTW89_ACMA][11] = 44, - [2][1][RTW89_FCC][12] = 44, - [2][1][RTW89_ETSI][12] = 46, - [2][1][RTW89_MKK][12] = 48, + [2][1][RTW89_FCC][12] = 26, + [2][1][RTW89_ETSI][12] = 44, + [2][1][RTW89_MKK][12] = 46, [2][1][RTW89_IC][12] = 44, [2][1][RTW89_ACMA][12] = 42, [2][1][RTW89_FCC][13] = 127, @@ -17625,10 +17625,10 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = { [0][0][RTW89_WW][0] = 24, [0][0][RTW89_WW][2] = 24, - [0][0][RTW89_WW][4] = 24, - [0][0][RTW89_WW][6] = 24, - [0][0][RTW89_WW][8] = 24, - [0][0][RTW89_WW][10] = 24, + [0][0][RTW89_WW][4] = 22, + [0][0][RTW89_WW][6] = 22, + [0][0][RTW89_WW][8] = 18, + [0][0][RTW89_WW][10] = 18, [0][0][RTW89_WW][12] = 24, [0][0][RTW89_WW][14] = 24, [0][0][RTW89_WW][15] = 24, @@ -17636,21 +17636,21 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_WW][19] = 24, [0][0][RTW89_WW][21] = 24, [0][0][RTW89_WW][23] = 24, - [0][0][RTW89_WW][25] = 32, - [0][0][RTW89_WW][27] = 32, - [0][0][RTW89_WW][29] = 32, + [0][0][RTW89_WW][25] = 30, + [0][0][RTW89_WW][27] = 30, + [0][0][RTW89_WW][29] = 30, [0][0][RTW89_WW][31] = 24, [0][0][RTW89_WW][33] = 24, [0][0][RTW89_WW][35] = 24, [0][0][RTW89_WW][37] = 44, - [0][0][RTW89_WW][38] = 30, - [0][0][RTW89_WW][40] = 30, - [0][0][RTW89_WW][42] = 30, - [0][0][RTW89_WW][44] = 30, - [0][0][RTW89_WW][46] = 30, - [0][0][RTW89_WW][48] = 32, - [0][0][RTW89_WW][50] = 32, - [0][0][RTW89_WW][52] = 32, + [0][0][RTW89_WW][38] = 28, + [0][0][RTW89_WW][40] = 28, + [0][0][RTW89_WW][42] = 28, + [0][0][RTW89_WW][44] = 28, + [0][0][RTW89_WW][46] = 28, + [0][0][RTW89_WW][48] = 24, + [0][0][RTW89_WW][50] = 24, + [0][0][RTW89_WW][52] = 24, [0][1][RTW89_WW][0] = 0, [0][1][RTW89_WW][2] = 4, [0][1][RTW89_WW][4] = 0, @@ -17664,21 +17664,21 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_WW][19] = 12, [0][1][RTW89_WW][21] = 12, [0][1][RTW89_WW][23] = 12, - [0][1][RTW89_WW][25] = 20, - [0][1][RTW89_WW][27] = 18, - [0][1][RTW89_WW][29] = 18, + [0][1][RTW89_WW][25] = 18, + [0][1][RTW89_WW][27] = 16, + [0][1][RTW89_WW][29] = 16, [0][1][RTW89_WW][31] = 12, [0][1][RTW89_WW][33] = 12, [0][1][RTW89_WW][35] = 12, - [0][1][RTW89_WW][37] = 34, - [0][1][RTW89_WW][38] = 18, - [0][1][RTW89_WW][40] = 18, - [0][1][RTW89_WW][42] = 18, - [0][1][RTW89_WW][44] = 18, - [0][1][RTW89_WW][46] = 18, - [0][1][RTW89_WW][48] = 20, - [0][1][RTW89_WW][50] = 20, - [0][1][RTW89_WW][52] = 20, + [0][1][RTW89_WW][37] = 30, + [0][1][RTW89_WW][38] = 16, + [0][1][RTW89_WW][40] = 16, + [0][1][RTW89_WW][42] = 16, + [0][1][RTW89_WW][44] = 16, + [0][1][RTW89_WW][46] = 16, + [0][1][RTW89_WW][48] = 12, + [0][1][RTW89_WW][50] = 12, + [0][1][RTW89_WW][52] = 12, [1][0][RTW89_WW][0] = 34, [1][0][RTW89_WW][2] = 34, [1][0][RTW89_WW][4] = 34, @@ -17692,21 +17692,21 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_WW][19] = 34, [1][0][RTW89_WW][21] = 34, [1][0][RTW89_WW][23] = 34, - [1][0][RTW89_WW][25] = 42, - [1][0][RTW89_WW][27] = 44, - [1][0][RTW89_WW][29] = 44, + [1][0][RTW89_WW][25] = 40, + [1][0][RTW89_WW][27] = 42, + [1][0][RTW89_WW][29] = 42, [1][0][RTW89_WW][31] = 34, [1][0][RTW89_WW][33] = 34, [1][0][RTW89_WW][35] = 34, - [1][0][RTW89_WW][37] = 52, - [1][0][RTW89_WW][38] = 30, - [1][0][RTW89_WW][40] = 30, - [1][0][RTW89_WW][42] = 30, - [1][0][RTW89_WW][44] = 30, - [1][0][RTW89_WW][46] = 30, - [1][0][RTW89_WW][48] = 44, - [1][0][RTW89_WW][50] = 44, - [1][0][RTW89_WW][52] = 44, + [1][0][RTW89_WW][37] = 56, + [1][0][RTW89_WW][38] = 28, + [1][0][RTW89_WW][40] = 28, + [1][0][RTW89_WW][42] = 28, + [1][0][RTW89_WW][44] = 28, + [1][0][RTW89_WW][46] = 28, + [1][0][RTW89_WW][48] = 36, + [1][0][RTW89_WW][50] = 36, + [1][0][RTW89_WW][52] = 36, [1][1][RTW89_WW][0] = 10, [1][1][RTW89_WW][2] = 14, [1][1][RTW89_WW][4] = 10, @@ -17720,55 +17720,55 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_WW][19] = 22, [1][1][RTW89_WW][21] = 22, [1][1][RTW89_WW][23] = 22, - [1][1][RTW89_WW][25] = 30, - [1][1][RTW89_WW][27] = 32, - [1][1][RTW89_WW][29] = 32, + [1][1][RTW89_WW][25] = 28, + [1][1][RTW89_WW][27] = 30, + [1][1][RTW89_WW][29] = 30, [1][1][RTW89_WW][31] = 22, [1][1][RTW89_WW][33] = 22, [1][1][RTW89_WW][35] = 22, - [1][1][RTW89_WW][37] = 42, - [1][1][RTW89_WW][38] = 18, - [1][1][RTW89_WW][40] = 18, - [1][1][RTW89_WW][42] = 18, - [1][1][RTW89_WW][44] = 18, - [1][1][RTW89_WW][46] = 18, - [1][1][RTW89_WW][48] = 32, - [1][1][RTW89_WW][50] = 32, - [1][1][RTW89_WW][52] = 32, + [1][1][RTW89_WW][37] = 40, + [1][1][RTW89_WW][38] = 16, + [1][1][RTW89_WW][40] = 16, + [1][1][RTW89_WW][42] = 16, + [1][1][RTW89_WW][44] = 16, + [1][1][RTW89_WW][46] = 16, + [1][1][RTW89_WW][48] = 24, + [1][1][RTW89_WW][50] = 24, + [1][1][RTW89_WW][52] = 24, [2][0][RTW89_WW][0] = 46, [2][0][RTW89_WW][2] = 46, [2][0][RTW89_WW][4] = 46, [2][0][RTW89_WW][6] = 46, - [2][0][RTW89_WW][8] = 48, - [2][0][RTW89_WW][10] = 48, - [2][0][RTW89_WW][12] = 46, - [2][0][RTW89_WW][14] = 46, + [2][0][RTW89_WW][8] = 44, + [2][0][RTW89_WW][10] = 44, + [2][0][RTW89_WW][12] = 48, + [2][0][RTW89_WW][14] = 48, [2][0][RTW89_WW][15] = 48, [2][0][RTW89_WW][17] = 48, [2][0][RTW89_WW][19] = 48, [2][0][RTW89_WW][21] = 48, [2][0][RTW89_WW][23] = 48, - [2][0][RTW89_WW][25] = 54, - [2][0][RTW89_WW][27] = 54, - [2][0][RTW89_WW][29] = 54, + [2][0][RTW89_WW][25] = 52, + [2][0][RTW89_WW][27] = 52, + [2][0][RTW89_WW][29] = 52, [2][0][RTW89_WW][31] = 48, [2][0][RTW89_WW][33] = 48, [2][0][RTW89_WW][35] = 48, - [2][0][RTW89_WW][37] = 66, - [2][0][RTW89_WW][38] = 30, - [2][0][RTW89_WW][40] = 30, - [2][0][RTW89_WW][42] = 30, - [2][0][RTW89_WW][44] = 30, - [2][0][RTW89_WW][46] = 30, - [2][0][RTW89_WW][48] = 56, - [2][0][RTW89_WW][50] = 56, - [2][0][RTW89_WW][52] = 56, + [2][0][RTW89_WW][37] = 62, + [2][0][RTW89_WW][38] = 28, + [2][0][RTW89_WW][40] = 28, + [2][0][RTW89_WW][42] = 28, + [2][0][RTW89_WW][44] = 28, + [2][0][RTW89_WW][46] = 28, + [2][0][RTW89_WW][48] = 48, + [2][0][RTW89_WW][50] = 48, + [2][0][RTW89_WW][52] = 48, [2][1][RTW89_WW][0] = 20, [2][1][RTW89_WW][2] = 18, [2][1][RTW89_WW][4] = 22, [2][1][RTW89_WW][6] = 22, - [2][1][RTW89_WW][8] = 34, - [2][1][RTW89_WW][10] = 34, + [2][1][RTW89_WW][8] = 32, + [2][1][RTW89_WW][10] = 32, [2][1][RTW89_WW][12] = 36, [2][1][RTW89_WW][14] = 36, [2][1][RTW89_WW][15] = 36, @@ -17776,857 +17776,857 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_WW][19] = 36, [2][1][RTW89_WW][21] = 36, [2][1][RTW89_WW][23] = 36, - [2][1][RTW89_WW][25] = 42, - [2][1][RTW89_WW][27] = 42, - [2][1][RTW89_WW][29] = 42, + [2][1][RTW89_WW][25] = 40, + [2][1][RTW89_WW][27] = 40, + [2][1][RTW89_WW][29] = 40, [2][1][RTW89_WW][31] = 36, [2][1][RTW89_WW][33] = 36, [2][1][RTW89_WW][35] = 36, - [2][1][RTW89_WW][37] = 50, - [2][1][RTW89_WW][38] = 18, - [2][1][RTW89_WW][40] = 18, - [2][1][RTW89_WW][42] = 18, - [2][1][RTW89_WW][44] = 18, - [2][1][RTW89_WW][46] = 18, - [2][1][RTW89_WW][48] = 44, - [2][1][RTW89_WW][50] = 44, - [2][1][RTW89_WW][52] = 44, - [0][0][RTW89_FCC][0] = 52, - [0][0][RTW89_ETSI][0] = 32, - [0][0][RTW89_MKK][0] = 26, + [2][1][RTW89_WW][37] = 42, + [2][1][RTW89_WW][38] = 16, + [2][1][RTW89_WW][40] = 16, + [2][1][RTW89_WW][42] = 16, + [2][1][RTW89_WW][44] = 16, + [2][1][RTW89_WW][46] = 16, + [2][1][RTW89_WW][48] = 36, + [2][1][RTW89_WW][50] = 36, + [2][1][RTW89_WW][52] = 36, + [0][0][RTW89_FCC][0] = 44, + [0][0][RTW89_ETSI][0] = 30, + [0][0][RTW89_MKK][0] = 36, [0][0][RTW89_IC][0] = 24, [0][0][RTW89_ACMA][0] = 24, - [0][0][RTW89_FCC][2] = 52, - [0][0][RTW89_ETSI][2] = 32, - [0][0][RTW89_MKK][2] = 26, + [0][0][RTW89_FCC][2] = 44, + [0][0][RTW89_ETSI][2] = 30, + [0][0][RTW89_MKK][2] = 36, [0][0][RTW89_IC][2] = 24, [0][0][RTW89_ACMA][2] = 24, - [0][0][RTW89_FCC][4] = 52, - [0][0][RTW89_ETSI][4] = 32, - [0][0][RTW89_MKK][4] = 26, + [0][0][RTW89_FCC][4] = 44, + [0][0][RTW89_ETSI][4] = 30, + [0][0][RTW89_MKK][4] = 22, [0][0][RTW89_IC][4] = 24, [0][0][RTW89_ACMA][4] = 24, - [0][0][RTW89_FCC][6] = 52, - [0][0][RTW89_ETSI][6] = 32, - [0][0][RTW89_MKK][6] = 26, + [0][0][RTW89_FCC][6] = 44, + [0][0][RTW89_ETSI][6] = 30, + [0][0][RTW89_MKK][6] = 22, [0][0][RTW89_IC][6] = 24, [0][0][RTW89_ACMA][6] = 24, - [0][0][RTW89_FCC][8] = 52, - [0][0][RTW89_ETSI][8] = 30, - [0][0][RTW89_MKK][8] = 26, + [0][0][RTW89_FCC][8] = 44, + [0][0][RTW89_ETSI][8] = 28, + [0][0][RTW89_MKK][8] = 18, [0][0][RTW89_IC][8] = 52, [0][0][RTW89_ACMA][8] = 24, - [0][0][RTW89_FCC][10] = 52, - [0][0][RTW89_ETSI][10] = 30, - [0][0][RTW89_MKK][10] = 26, + [0][0][RTW89_FCC][10] = 44, + [0][0][RTW89_ETSI][10] = 28, + [0][0][RTW89_MKK][10] = 18, [0][0][RTW89_IC][10] = 52, [0][0][RTW89_ACMA][10] = 24, - [0][0][RTW89_FCC][12] = 52, - [0][0][RTW89_ETSI][12] = 30, - [0][0][RTW89_MKK][12] = 24, + [0][0][RTW89_FCC][12] = 44, + [0][0][RTW89_ETSI][12] = 28, + [0][0][RTW89_MKK][12] = 34, [0][0][RTW89_IC][12] = 52, [0][0][RTW89_ACMA][12] = 24, - [0][0][RTW89_FCC][14] = 52, - [0][0][RTW89_ETSI][14] = 30, - [0][0][RTW89_MKK][14] = 24, + [0][0][RTW89_FCC][14] = 44, + [0][0][RTW89_ETSI][14] = 28, + [0][0][RTW89_MKK][14] = 34, [0][0][RTW89_IC][14] = 52, [0][0][RTW89_ACMA][14] = 24, - [0][0][RTW89_FCC][15] = 52, - [0][0][RTW89_ETSI][15] = 32, - [0][0][RTW89_MKK][15] = 46, + [0][0][RTW89_FCC][15] = 44, + [0][0][RTW89_ETSI][15] = 30, + [0][0][RTW89_MKK][15] = 56, [0][0][RTW89_IC][15] = 52, [0][0][RTW89_ACMA][15] = 24, - [0][0][RTW89_FCC][17] = 52, - [0][0][RTW89_ETSI][17] = 32, - [0][0][RTW89_MKK][17] = 48, + [0][0][RTW89_FCC][17] = 44, + [0][0][RTW89_ETSI][17] = 30, + [0][0][RTW89_MKK][17] = 58, [0][0][RTW89_IC][17] = 52, [0][0][RTW89_ACMA][17] = 24, - [0][0][RTW89_FCC][19] = 52, - [0][0][RTW89_ETSI][19] = 32, - [0][0][RTW89_MKK][19] = 48, + [0][0][RTW89_FCC][19] = 44, + [0][0][RTW89_ETSI][19] = 30, + [0][0][RTW89_MKK][19] = 58, [0][0][RTW89_IC][19] = 52, [0][0][RTW89_ACMA][19] = 24, - [0][0][RTW89_FCC][21] = 52, - [0][0][RTW89_ETSI][21] = 32, - [0][0][RTW89_MKK][21] = 48, + [0][0][RTW89_FCC][21] = 44, + [0][0][RTW89_ETSI][21] = 30, + [0][0][RTW89_MKK][21] = 58, [0][0][RTW89_IC][21] = 52, [0][0][RTW89_ACMA][21] = 24, - [0][0][RTW89_FCC][23] = 52, - [0][0][RTW89_ETSI][23] = 32, - [0][0][RTW89_MKK][23] = 48, + [0][0][RTW89_FCC][23] = 44, + [0][0][RTW89_ETSI][23] = 30, + [0][0][RTW89_MKK][23] = 58, [0][0][RTW89_IC][23] = 52, [0][0][RTW89_ACMA][23] = 24, - [0][0][RTW89_FCC][25] = 52, - [0][0][RTW89_ETSI][25] = 32, - [0][0][RTW89_MKK][25] = 48, + [0][0][RTW89_FCC][25] = 44, + [0][0][RTW89_ETSI][25] = 30, + [0][0][RTW89_MKK][25] = 58, [0][0][RTW89_IC][25] = 127, [0][0][RTW89_ACMA][25] = 127, - [0][0][RTW89_FCC][27] = 52, - [0][0][RTW89_ETSI][27] = 32, - [0][0][RTW89_MKK][27] = 48, + [0][0][RTW89_FCC][27] = 44, + [0][0][RTW89_ETSI][27] = 30, + [0][0][RTW89_MKK][27] = 58, [0][0][RTW89_IC][27] = 127, [0][0][RTW89_ACMA][27] = 127, - [0][0][RTW89_FCC][29] = 52, - [0][0][RTW89_ETSI][29] = 32, - [0][0][RTW89_MKK][29] = 48, + [0][0][RTW89_FCC][29] = 44, + [0][0][RTW89_ETSI][29] = 30, + [0][0][RTW89_MKK][29] = 58, [0][0][RTW89_IC][29] = 127, [0][0][RTW89_ACMA][29] = 127, - [0][0][RTW89_FCC][31] = 52, - [0][0][RTW89_ETSI][31] = 32, - [0][0][RTW89_MKK][31] = 48, + [0][0][RTW89_FCC][31] = 44, + [0][0][RTW89_ETSI][31] = 30, + [0][0][RTW89_MKK][31] = 58, [0][0][RTW89_IC][31] = 52, [0][0][RTW89_ACMA][31] = 24, - [0][0][RTW89_FCC][33] = 52, - [0][0][RTW89_ETSI][33] = 32, - [0][0][RTW89_MKK][33] = 48, + [0][0][RTW89_FCC][33] = 44, + [0][0][RTW89_ETSI][33] = 30, + [0][0][RTW89_MKK][33] = 58, [0][0][RTW89_IC][33] = 52, [0][0][RTW89_ACMA][33] = 24, - [0][0][RTW89_FCC][35] = 52, - [0][0][RTW89_ETSI][35] = 32, - [0][0][RTW89_MKK][35] = 48, + [0][0][RTW89_FCC][35] = 44, + [0][0][RTW89_ETSI][35] = 30, + [0][0][RTW89_MKK][35] = 58, [0][0][RTW89_IC][35] = 52, [0][0][RTW89_ACMA][35] = 24, - [0][0][RTW89_FCC][37] = 52, + [0][0][RTW89_FCC][37] = 44, [0][0][RTW89_ETSI][37] = 127, - [0][0][RTW89_MKK][37] = 44, + [0][0][RTW89_MKK][37] = 58, [0][0][RTW89_IC][37] = 52, [0][0][RTW89_ACMA][37] = 52, - [0][0][RTW89_FCC][38] = 84, - [0][0][RTW89_ETSI][38] = 30, + [0][0][RTW89_FCC][38] = 76, + [0][0][RTW89_ETSI][38] = 28, [0][0][RTW89_MKK][38] = 127, [0][0][RTW89_IC][38] = 84, [0][0][RTW89_ACMA][38] = 84, - [0][0][RTW89_FCC][40] = 84, - [0][0][RTW89_ETSI][40] = 30, + [0][0][RTW89_FCC][40] = 76, + [0][0][RTW89_ETSI][40] = 28, [0][0][RTW89_MKK][40] = 127, [0][0][RTW89_IC][40] = 84, [0][0][RTW89_ACMA][40] = 84, - [0][0][RTW89_FCC][42] = 84, - [0][0][RTW89_ETSI][42] = 30, + [0][0][RTW89_FCC][42] = 76, + [0][0][RTW89_ETSI][42] = 28, [0][0][RTW89_MKK][42] = 127, [0][0][RTW89_IC][42] = 84, [0][0][RTW89_ACMA][42] = 84, - [0][0][RTW89_FCC][44] = 84, - [0][0][RTW89_ETSI][44] = 30, + [0][0][RTW89_FCC][44] = 76, + [0][0][RTW89_ETSI][44] = 28, [0][0][RTW89_MKK][44] = 127, [0][0][RTW89_IC][44] = 84, [0][0][RTW89_ACMA][44] = 84, - [0][0][RTW89_FCC][46] = 84, - [0][0][RTW89_ETSI][46] = 30, + [0][0][RTW89_FCC][46] = 76, + [0][0][RTW89_ETSI][46] = 28, [0][0][RTW89_MKK][46] = 127, [0][0][RTW89_IC][46] = 84, [0][0][RTW89_ACMA][46] = 84, - [0][0][RTW89_FCC][48] = 32, + [0][0][RTW89_FCC][48] = 24, [0][0][RTW89_ETSI][48] = 127, [0][0][RTW89_MKK][48] = 127, [0][0][RTW89_IC][48] = 127, [0][0][RTW89_ACMA][48] = 127, - [0][0][RTW89_FCC][50] = 32, + [0][0][RTW89_FCC][50] = 24, [0][0][RTW89_ETSI][50] = 127, [0][0][RTW89_MKK][50] = 127, [0][0][RTW89_IC][50] = 127, [0][0][RTW89_ACMA][50] = 127, - [0][0][RTW89_FCC][52] = 32, + [0][0][RTW89_FCC][52] = 24, [0][0][RTW89_ETSI][52] = 127, [0][0][RTW89_MKK][52] = 127, [0][0][RTW89_IC][52] = 127, [0][0][RTW89_ACMA][52] = 127, - [0][1][RTW89_FCC][0] = 34, - [0][1][RTW89_ETSI][0] = 20, - [0][1][RTW89_MKK][0] = 12, + [0][1][RTW89_FCC][0] = 26, + [0][1][RTW89_ETSI][0] = 18, + [0][1][RTW89_MKK][0] = 20, [0][1][RTW89_IC][0] = 0, [0][1][RTW89_ACMA][0] = 12, - [0][1][RTW89_FCC][2] = 38, - [0][1][RTW89_ETSI][2] = 20, - [0][1][RTW89_MKK][2] = 12, + [0][1][RTW89_FCC][2] = 30, + [0][1][RTW89_ETSI][2] = 18, + [0][1][RTW89_MKK][2] = 20, [0][1][RTW89_IC][2] = 4, [0][1][RTW89_ACMA][2] = 12, - [0][1][RTW89_FCC][4] = 34, - [0][1][RTW89_ETSI][4] = 20, - [0][1][RTW89_MKK][4] = 14, + [0][1][RTW89_FCC][4] = 26, + [0][1][RTW89_ETSI][4] = 18, + [0][1][RTW89_MKK][4] = 8, [0][1][RTW89_IC][4] = 0, [0][1][RTW89_ACMA][4] = 12, - [0][1][RTW89_FCC][6] = 34, - [0][1][RTW89_ETSI][6] = 20, - [0][1][RTW89_MKK][6] = 14, + [0][1][RTW89_FCC][6] = 26, + [0][1][RTW89_ETSI][6] = 18, + [0][1][RTW89_MKK][6] = 8, [0][1][RTW89_IC][6] = 0, [0][1][RTW89_ACMA][6] = 12, - [0][1][RTW89_FCC][8] = 34, - [0][1][RTW89_ETSI][8] = 18, - [0][1][RTW89_MKK][8] = 14, + [0][1][RTW89_FCC][8] = 26, + [0][1][RTW89_ETSI][8] = 16, + [0][1][RTW89_MKK][8] = 20, [0][1][RTW89_IC][8] = 34, [0][1][RTW89_ACMA][8] = 12, - [0][1][RTW89_FCC][10] = 34, - [0][1][RTW89_ETSI][10] = 18, - [0][1][RTW89_MKK][10] = 14, + [0][1][RTW89_FCC][10] = 26, + [0][1][RTW89_ETSI][10] = 16, + [0][1][RTW89_MKK][10] = 20, [0][1][RTW89_IC][10] = 34, [0][1][RTW89_ACMA][10] = 12, - [0][1][RTW89_FCC][12] = 38, - [0][1][RTW89_ETSI][12] = 18, - [0][1][RTW89_MKK][12] = 12, + [0][1][RTW89_FCC][12] = 30, + [0][1][RTW89_ETSI][12] = 16, + [0][1][RTW89_MKK][12] = 34, [0][1][RTW89_IC][12] = 38, [0][1][RTW89_ACMA][12] = 12, - [0][1][RTW89_FCC][14] = 34, - [0][1][RTW89_ETSI][14] = 18, - [0][1][RTW89_MKK][14] = 12, + [0][1][RTW89_FCC][14] = 26, + [0][1][RTW89_ETSI][14] = 16, + [0][1][RTW89_MKK][14] = 34, [0][1][RTW89_IC][14] = 34, [0][1][RTW89_ACMA][14] = 12, - [0][1][RTW89_FCC][15] = 34, - [0][1][RTW89_ETSI][15] = 20, - [0][1][RTW89_MKK][15] = 32, + [0][1][RTW89_FCC][15] = 26, + [0][1][RTW89_ETSI][15] = 18, + [0][1][RTW89_MKK][15] = 44, [0][1][RTW89_IC][15] = 34, [0][1][RTW89_ACMA][15] = 12, - [0][1][RTW89_FCC][17] = 34, - [0][1][RTW89_ETSI][17] = 20, - [0][1][RTW89_MKK][17] = 34, + [0][1][RTW89_FCC][17] = 26, + [0][1][RTW89_ETSI][17] = 18, + [0][1][RTW89_MKK][17] = 44, [0][1][RTW89_IC][17] = 34, [0][1][RTW89_ACMA][17] = 12, - [0][1][RTW89_FCC][19] = 38, - [0][1][RTW89_ETSI][19] = 20, - [0][1][RTW89_MKK][19] = 34, + [0][1][RTW89_FCC][19] = 30, + [0][1][RTW89_ETSI][19] = 18, + [0][1][RTW89_MKK][19] = 44, [0][1][RTW89_IC][19] = 38, [0][1][RTW89_ACMA][19] = 12, - [0][1][RTW89_FCC][21] = 38, - [0][1][RTW89_ETSI][21] = 20, - [0][1][RTW89_MKK][21] = 34, + [0][1][RTW89_FCC][21] = 30, + [0][1][RTW89_ETSI][21] = 18, + [0][1][RTW89_MKK][21] = 44, [0][1][RTW89_IC][21] = 38, [0][1][RTW89_ACMA][21] = 12, - [0][1][RTW89_FCC][23] = 38, - [0][1][RTW89_ETSI][23] = 20, - [0][1][RTW89_MKK][23] = 34, + [0][1][RTW89_FCC][23] = 30, + [0][1][RTW89_ETSI][23] = 18, + [0][1][RTW89_MKK][23] = 44, [0][1][RTW89_IC][23] = 38, [0][1][RTW89_ACMA][23] = 12, - [0][1][RTW89_FCC][25] = 38, - [0][1][RTW89_ETSI][25] = 20, - [0][1][RTW89_MKK][25] = 34, + [0][1][RTW89_FCC][25] = 30, + [0][1][RTW89_ETSI][25] = 18, + [0][1][RTW89_MKK][25] = 44, [0][1][RTW89_IC][25] = 127, [0][1][RTW89_ACMA][25] = 127, - [0][1][RTW89_FCC][27] = 38, - [0][1][RTW89_ETSI][27] = 18, - [0][1][RTW89_MKK][27] = 34, + [0][1][RTW89_FCC][27] = 30, + [0][1][RTW89_ETSI][27] = 16, + [0][1][RTW89_MKK][27] = 44, [0][1][RTW89_IC][27] = 127, [0][1][RTW89_ACMA][27] = 127, - [0][1][RTW89_FCC][29] = 38, - [0][1][RTW89_ETSI][29] = 18, - [0][1][RTW89_MKK][29] = 34, + [0][1][RTW89_FCC][29] = 30, + [0][1][RTW89_ETSI][29] = 16, + [0][1][RTW89_MKK][29] = 44, [0][1][RTW89_IC][29] = 127, [0][1][RTW89_ACMA][29] = 127, - [0][1][RTW89_FCC][31] = 38, - [0][1][RTW89_ETSI][31] = 18, - [0][1][RTW89_MKK][31] = 34, + [0][1][RTW89_FCC][31] = 30, + [0][1][RTW89_ETSI][31] = 16, + [0][1][RTW89_MKK][31] = 44, [0][1][RTW89_IC][31] = 34, [0][1][RTW89_ACMA][31] = 12, - [0][1][RTW89_FCC][33] = 34, - [0][1][RTW89_ETSI][33] = 18, - [0][1][RTW89_MKK][33] = 34, + [0][1][RTW89_FCC][33] = 26, + [0][1][RTW89_ETSI][33] = 16, + [0][1][RTW89_MKK][33] = 44, [0][1][RTW89_IC][33] = 34, [0][1][RTW89_ACMA][33] = 12, - [0][1][RTW89_FCC][35] = 34, - [0][1][RTW89_ETSI][35] = 18, - [0][1][RTW89_MKK][35] = 34, + [0][1][RTW89_FCC][35] = 26, + [0][1][RTW89_ETSI][35] = 16, + [0][1][RTW89_MKK][35] = 44, [0][1][RTW89_IC][35] = 34, [0][1][RTW89_ACMA][35] = 12, - [0][1][RTW89_FCC][37] = 38, + [0][1][RTW89_FCC][37] = 30, [0][1][RTW89_ETSI][37] = 127, - [0][1][RTW89_MKK][37] = 34, + [0][1][RTW89_MKK][37] = 44, [0][1][RTW89_IC][37] = 38, [0][1][RTW89_ACMA][37] = 38, - [0][1][RTW89_FCC][38] = 82, - [0][1][RTW89_ETSI][38] = 18, + [0][1][RTW89_FCC][38] = 74, + [0][1][RTW89_ETSI][38] = 16, [0][1][RTW89_MKK][38] = 127, [0][1][RTW89_IC][38] = 82, [0][1][RTW89_ACMA][38] = 84, - [0][1][RTW89_FCC][40] = 82, - [0][1][RTW89_ETSI][40] = 18, + [0][1][RTW89_FCC][40] = 74, + [0][1][RTW89_ETSI][40] = 16, [0][1][RTW89_MKK][40] = 127, [0][1][RTW89_IC][40] = 82, [0][1][RTW89_ACMA][40] = 84, - [0][1][RTW89_FCC][42] = 82, - [0][1][RTW89_ETSI][42] = 18, + [0][1][RTW89_FCC][42] = 74, + [0][1][RTW89_ETSI][42] = 16, [0][1][RTW89_MKK][42] = 127, [0][1][RTW89_IC][42] = 82, [0][1][RTW89_ACMA][42] = 84, - [0][1][RTW89_FCC][44] = 82, - [0][1][RTW89_ETSI][44] = 18, + [0][1][RTW89_FCC][44] = 74, + [0][1][RTW89_ETSI][44] = 16, [0][1][RTW89_MKK][44] = 127, [0][1][RTW89_IC][44] = 82, [0][1][RTW89_ACMA][44] = 84, - [0][1][RTW89_FCC][46] = 82, - [0][1][RTW89_ETSI][46] = 18, + [0][1][RTW89_FCC][46] = 74, + [0][1][RTW89_ETSI][46] = 16, [0][1][RTW89_MKK][46] = 127, [0][1][RTW89_IC][46] = 82, [0][1][RTW89_ACMA][46] = 84, - [0][1][RTW89_FCC][48] = 20, + [0][1][RTW89_FCC][48] = 12, [0][1][RTW89_ETSI][48] = 127, [0][1][RTW89_MKK][48] = 127, [0][1][RTW89_IC][48] = 127, [0][1][RTW89_ACMA][48] = 127, - [0][1][RTW89_FCC][50] = 20, + [0][1][RTW89_FCC][50] = 12, [0][1][RTW89_ETSI][50] = 127, [0][1][RTW89_MKK][50] = 127, [0][1][RTW89_IC][50] = 127, [0][1][RTW89_ACMA][50] = 127, - [0][1][RTW89_FCC][52] = 20, + [0][1][RTW89_FCC][52] = 12, [0][1][RTW89_ETSI][52] = 127, [0][1][RTW89_MKK][52] = 127, [0][1][RTW89_IC][52] = 127, [0][1][RTW89_ACMA][52] = 127, - [1][0][RTW89_FCC][0] = 62, - [1][0][RTW89_ETSI][0] = 42, - [1][0][RTW89_MKK][0] = 36, + [1][0][RTW89_FCC][0] = 54, + [1][0][RTW89_ETSI][0] = 40, + [1][0][RTW89_MKK][0] = 48, [1][0][RTW89_IC][0] = 36, [1][0][RTW89_ACMA][0] = 34, - [1][0][RTW89_FCC][2] = 62, - [1][0][RTW89_ETSI][2] = 42, - [1][0][RTW89_MKK][2] = 36, + [1][0][RTW89_FCC][2] = 54, + [1][0][RTW89_ETSI][2] = 40, + [1][0][RTW89_MKK][2] = 48, [1][0][RTW89_IC][2] = 36, [1][0][RTW89_ACMA][2] = 34, - [1][0][RTW89_FCC][4] = 62, - [1][0][RTW89_ETSI][4] = 42, - [1][0][RTW89_MKK][4] = 34, + [1][0][RTW89_FCC][4] = 54, + [1][0][RTW89_ETSI][4] = 40, + [1][0][RTW89_MKK][4] = 40, [1][0][RTW89_IC][4] = 36, [1][0][RTW89_ACMA][4] = 34, - [1][0][RTW89_FCC][6] = 62, - [1][0][RTW89_ETSI][6] = 42, - [1][0][RTW89_MKK][6] = 34, + [1][0][RTW89_FCC][6] = 54, + [1][0][RTW89_ETSI][6] = 40, + [1][0][RTW89_MKK][6] = 40, [1][0][RTW89_IC][6] = 36, [1][0][RTW89_ACMA][6] = 34, - [1][0][RTW89_FCC][8] = 62, - [1][0][RTW89_ETSI][8] = 42, - [1][0][RTW89_MKK][8] = 36, + [1][0][RTW89_FCC][8] = 54, + [1][0][RTW89_ETSI][8] = 40, + [1][0][RTW89_MKK][8] = 34, [1][0][RTW89_IC][8] = 62, [1][0][RTW89_ACMA][8] = 34, - [1][0][RTW89_FCC][10] = 62, - [1][0][RTW89_ETSI][10] = 42, - [1][0][RTW89_MKK][10] = 36, + [1][0][RTW89_FCC][10] = 54, + [1][0][RTW89_ETSI][10] = 40, + [1][0][RTW89_MKK][10] = 34, [1][0][RTW89_IC][10] = 62, [1][0][RTW89_ACMA][10] = 34, - [1][0][RTW89_FCC][12] = 64, - [1][0][RTW89_ETSI][12] = 42, - [1][0][RTW89_MKK][12] = 36, + [1][0][RTW89_FCC][12] = 56, + [1][0][RTW89_ETSI][12] = 40, + [1][0][RTW89_MKK][12] = 46, [1][0][RTW89_IC][12] = 64, [1][0][RTW89_ACMA][12] = 34, - [1][0][RTW89_FCC][14] = 62, - [1][0][RTW89_ETSI][14] = 42, - [1][0][RTW89_MKK][14] = 36, + [1][0][RTW89_FCC][14] = 54, + [1][0][RTW89_ETSI][14] = 40, + [1][0][RTW89_MKK][14] = 46, [1][0][RTW89_IC][14] = 62, [1][0][RTW89_ACMA][14] = 34, - [1][0][RTW89_FCC][15] = 62, - [1][0][RTW89_ETSI][15] = 42, - [1][0][RTW89_MKK][15] = 54, + [1][0][RTW89_FCC][15] = 54, + [1][0][RTW89_ETSI][15] = 40, + [1][0][RTW89_MKK][15] = 62, [1][0][RTW89_IC][15] = 62, [1][0][RTW89_ACMA][15] = 34, - [1][0][RTW89_FCC][17] = 62, - [1][0][RTW89_ETSI][17] = 42, - [1][0][RTW89_MKK][17] = 58, + [1][0][RTW89_FCC][17] = 54, + [1][0][RTW89_ETSI][17] = 40, + [1][0][RTW89_MKK][17] = 68, [1][0][RTW89_IC][17] = 62, [1][0][RTW89_ACMA][17] = 34, - [1][0][RTW89_FCC][19] = 62, - [1][0][RTW89_ETSI][19] = 42, - [1][0][RTW89_MKK][19] = 58, + [1][0][RTW89_FCC][19] = 54, + [1][0][RTW89_ETSI][19] = 40, + [1][0][RTW89_MKK][19] = 68, [1][0][RTW89_IC][19] = 62, [1][0][RTW89_ACMA][19] = 34, - [1][0][RTW89_FCC][21] = 62, - [1][0][RTW89_ETSI][21] = 42, - [1][0][RTW89_MKK][21] = 58, + [1][0][RTW89_FCC][21] = 54, + [1][0][RTW89_ETSI][21] = 40, + [1][0][RTW89_MKK][21] = 68, [1][0][RTW89_IC][21] = 62, [1][0][RTW89_ACMA][21] = 34, - [1][0][RTW89_FCC][23] = 62, - [1][0][RTW89_ETSI][23] = 42, - [1][0][RTW89_MKK][23] = 58, + [1][0][RTW89_FCC][23] = 54, + [1][0][RTW89_ETSI][23] = 40, + [1][0][RTW89_MKK][23] = 68, [1][0][RTW89_IC][23] = 62, [1][0][RTW89_ACMA][23] = 34, - [1][0][RTW89_FCC][25] = 62, - [1][0][RTW89_ETSI][25] = 42, - [1][0][RTW89_MKK][25] = 58, + [1][0][RTW89_FCC][25] = 54, + [1][0][RTW89_ETSI][25] = 40, + [1][0][RTW89_MKK][25] = 68, [1][0][RTW89_IC][25] = 127, [1][0][RTW89_ACMA][25] = 127, - [1][0][RTW89_FCC][27] = 62, - [1][0][RTW89_ETSI][27] = 44, - [1][0][RTW89_MKK][27] = 58, + [1][0][RTW89_FCC][27] = 54, + [1][0][RTW89_ETSI][27] = 42, + [1][0][RTW89_MKK][27] = 68, [1][0][RTW89_IC][27] = 127, [1][0][RTW89_ACMA][27] = 127, - [1][0][RTW89_FCC][29] = 62, - [1][0][RTW89_ETSI][29] = 44, - [1][0][RTW89_MKK][29] = 58, + [1][0][RTW89_FCC][29] = 54, + [1][0][RTW89_ETSI][29] = 42, + [1][0][RTW89_MKK][29] = 68, [1][0][RTW89_IC][29] = 127, [1][0][RTW89_ACMA][29] = 127, - [1][0][RTW89_FCC][31] = 62, - [1][0][RTW89_ETSI][31] = 44, - [1][0][RTW89_MKK][31] = 58, + [1][0][RTW89_FCC][31] = 54, + [1][0][RTW89_ETSI][31] = 42, + [1][0][RTW89_MKK][31] = 68, [1][0][RTW89_IC][31] = 62, [1][0][RTW89_ACMA][31] = 34, - [1][0][RTW89_FCC][33] = 62, - [1][0][RTW89_ETSI][33] = 44, - [1][0][RTW89_MKK][33] = 58, + [1][0][RTW89_FCC][33] = 54, + [1][0][RTW89_ETSI][33] = 42, + [1][0][RTW89_MKK][33] = 68, [1][0][RTW89_IC][33] = 62, [1][0][RTW89_ACMA][33] = 34, - [1][0][RTW89_FCC][35] = 62, - [1][0][RTW89_ETSI][35] = 44, - [1][0][RTW89_MKK][35] = 58, + [1][0][RTW89_FCC][35] = 54, + [1][0][RTW89_ETSI][35] = 42, + [1][0][RTW89_MKK][35] = 68, [1][0][RTW89_IC][35] = 62, [1][0][RTW89_ACMA][35] = 34, - [1][0][RTW89_FCC][37] = 64, + [1][0][RTW89_FCC][37] = 56, [1][0][RTW89_ETSI][37] = 127, - [1][0][RTW89_MKK][37] = 52, + [1][0][RTW89_MKK][37] = 68, [1][0][RTW89_IC][37] = 64, [1][0][RTW89_ACMA][37] = 64, - [1][0][RTW89_FCC][38] = 84, - [1][0][RTW89_ETSI][38] = 30, + [1][0][RTW89_FCC][38] = 76, + [1][0][RTW89_ETSI][38] = 28, [1][0][RTW89_MKK][38] = 127, [1][0][RTW89_IC][38] = 84, [1][0][RTW89_ACMA][38] = 84, - [1][0][RTW89_FCC][40] = 84, - [1][0][RTW89_ETSI][40] = 30, + [1][0][RTW89_FCC][40] = 76, + [1][0][RTW89_ETSI][40] = 28, [1][0][RTW89_MKK][40] = 127, [1][0][RTW89_IC][40] = 84, [1][0][RTW89_ACMA][40] = 84, - [1][0][RTW89_FCC][42] = 84, - [1][0][RTW89_ETSI][42] = 30, + [1][0][RTW89_FCC][42] = 76, + [1][0][RTW89_ETSI][42] = 28, [1][0][RTW89_MKK][42] = 127, [1][0][RTW89_IC][42] = 84, [1][0][RTW89_ACMA][42] = 84, - [1][0][RTW89_FCC][44] = 84, - [1][0][RTW89_ETSI][44] = 30, + [1][0][RTW89_FCC][44] = 76, + [1][0][RTW89_ETSI][44] = 28, [1][0][RTW89_MKK][44] = 127, [1][0][RTW89_IC][44] = 84, [1][0][RTW89_ACMA][44] = 84, - [1][0][RTW89_FCC][46] = 84, - [1][0][RTW89_ETSI][46] = 30, + [1][0][RTW89_FCC][46] = 76, + [1][0][RTW89_ETSI][46] = 28, [1][0][RTW89_MKK][46] = 127, [1][0][RTW89_IC][46] = 84, [1][0][RTW89_ACMA][46] = 84, - [1][0][RTW89_FCC][48] = 44, + [1][0][RTW89_FCC][48] = 36, [1][0][RTW89_ETSI][48] = 127, [1][0][RTW89_MKK][48] = 127, [1][0][RTW89_IC][48] = 127, [1][0][RTW89_ACMA][48] = 127, - [1][0][RTW89_FCC][50] = 44, + [1][0][RTW89_FCC][50] = 36, [1][0][RTW89_ETSI][50] = 127, [1][0][RTW89_MKK][50] = 127, [1][0][RTW89_IC][50] = 127, [1][0][RTW89_ACMA][50] = 127, - [1][0][RTW89_FCC][52] = 44, + [1][0][RTW89_FCC][52] = 36, [1][0][RTW89_ETSI][52] = 127, [1][0][RTW89_MKK][52] = 127, [1][0][RTW89_IC][52] = 127, [1][0][RTW89_ACMA][52] = 127, - [1][1][RTW89_FCC][0] = 42, - [1][1][RTW89_ETSI][0] = 32, - [1][1][RTW89_MKK][0] = 22, + [1][1][RTW89_FCC][0] = 34, + [1][1][RTW89_ETSI][0] = 30, + [1][1][RTW89_MKK][0] = 34, [1][1][RTW89_IC][0] = 10, [1][1][RTW89_ACMA][0] = 22, - [1][1][RTW89_FCC][2] = 44, - [1][1][RTW89_ETSI][2] = 32, - [1][1][RTW89_MKK][2] = 22, + [1][1][RTW89_FCC][2] = 36, + [1][1][RTW89_ETSI][2] = 30, + [1][1][RTW89_MKK][2] = 34, [1][1][RTW89_IC][2] = 14, [1][1][RTW89_ACMA][2] = 22, - [1][1][RTW89_FCC][4] = 42, - [1][1][RTW89_ETSI][4] = 32, - [1][1][RTW89_MKK][4] = 20, + [1][1][RTW89_FCC][4] = 34, + [1][1][RTW89_ETSI][4] = 30, + [1][1][RTW89_MKK][4] = 26, [1][1][RTW89_IC][4] = 10, [1][1][RTW89_ACMA][4] = 22, - [1][1][RTW89_FCC][6] = 42, - [1][1][RTW89_ETSI][6] = 32, - [1][1][RTW89_MKK][6] = 20, + [1][1][RTW89_FCC][6] = 34, + [1][1][RTW89_ETSI][6] = 30, + [1][1][RTW89_MKK][6] = 26, [1][1][RTW89_IC][6] = 10, [1][1][RTW89_ACMA][6] = 22, - [1][1][RTW89_FCC][8] = 44, - [1][1][RTW89_ETSI][8] = 32, + [1][1][RTW89_FCC][8] = 36, + [1][1][RTW89_ETSI][8] = 30, [1][1][RTW89_MKK][8] = 20, [1][1][RTW89_IC][8] = 44, [1][1][RTW89_ACMA][8] = 22, - [1][1][RTW89_FCC][10] = 44, - [1][1][RTW89_ETSI][10] = 32, + [1][1][RTW89_FCC][10] = 36, + [1][1][RTW89_ETSI][10] = 30, [1][1][RTW89_MKK][10] = 20, [1][1][RTW89_IC][10] = 44, [1][1][RTW89_ACMA][10] = 22, - [1][1][RTW89_FCC][12] = 46, - [1][1][RTW89_ETSI][12] = 32, - [1][1][RTW89_MKK][12] = 22, + [1][1][RTW89_FCC][12] = 38, + [1][1][RTW89_ETSI][12] = 30, + [1][1][RTW89_MKK][12] = 34, [1][1][RTW89_IC][12] = 46, [1][1][RTW89_ACMA][12] = 22, - [1][1][RTW89_FCC][14] = 42, - [1][1][RTW89_ETSI][14] = 32, - [1][1][RTW89_MKK][14] = 22, + [1][1][RTW89_FCC][14] = 34, + [1][1][RTW89_ETSI][14] = 30, + [1][1][RTW89_MKK][14] = 34, [1][1][RTW89_IC][14] = 40, [1][1][RTW89_ACMA][14] = 22, - [1][1][RTW89_FCC][15] = 42, - [1][1][RTW89_ETSI][15] = 30, - [1][1][RTW89_MKK][15] = 42, + [1][1][RTW89_FCC][15] = 34, + [1][1][RTW89_ETSI][15] = 28, + [1][1][RTW89_MKK][15] = 56, [1][1][RTW89_IC][15] = 42, [1][1][RTW89_ACMA][15] = 22, - [1][1][RTW89_FCC][17] = 42, - [1][1][RTW89_ETSI][17] = 30, - [1][1][RTW89_MKK][17] = 44, + [1][1][RTW89_FCC][17] = 34, + [1][1][RTW89_ETSI][17] = 28, + [1][1][RTW89_MKK][17] = 58, [1][1][RTW89_IC][17] = 42, [1][1][RTW89_ACMA][17] = 22, - [1][1][RTW89_FCC][19] = 42, - [1][1][RTW89_ETSI][19] = 30, - [1][1][RTW89_MKK][19] = 44, + [1][1][RTW89_FCC][19] = 34, + [1][1][RTW89_ETSI][19] = 28, + [1][1][RTW89_MKK][19] = 58, [1][1][RTW89_IC][19] = 42, [1][1][RTW89_ACMA][19] = 22, - [1][1][RTW89_FCC][21] = 42, - [1][1][RTW89_ETSI][21] = 30, - [1][1][RTW89_MKK][21] = 44, + [1][1][RTW89_FCC][21] = 34, + [1][1][RTW89_ETSI][21] = 28, + [1][1][RTW89_MKK][21] = 58, [1][1][RTW89_IC][21] = 42, [1][1][RTW89_ACMA][21] = 22, - [1][1][RTW89_FCC][23] = 42, - [1][1][RTW89_ETSI][23] = 30, - [1][1][RTW89_MKK][23] = 44, + [1][1][RTW89_FCC][23] = 34, + [1][1][RTW89_ETSI][23] = 28, + [1][1][RTW89_MKK][23] = 58, [1][1][RTW89_IC][23] = 42, [1][1][RTW89_ACMA][23] = 22, - [1][1][RTW89_FCC][25] = 42, - [1][1][RTW89_ETSI][25] = 30, - [1][1][RTW89_MKK][25] = 44, + [1][1][RTW89_FCC][25] = 34, + [1][1][RTW89_ETSI][25] = 28, + [1][1][RTW89_MKK][25] = 58, [1][1][RTW89_IC][25] = 127, [1][1][RTW89_ACMA][25] = 127, - [1][1][RTW89_FCC][27] = 42, - [1][1][RTW89_ETSI][27] = 32, - [1][1][RTW89_MKK][27] = 44, + [1][1][RTW89_FCC][27] = 34, + [1][1][RTW89_ETSI][27] = 30, + [1][1][RTW89_MKK][27] = 58, [1][1][RTW89_IC][27] = 127, [1][1][RTW89_ACMA][27] = 127, - [1][1][RTW89_FCC][29] = 42, - [1][1][RTW89_ETSI][29] = 32, - [1][1][RTW89_MKK][29] = 44, + [1][1][RTW89_FCC][29] = 34, + [1][1][RTW89_ETSI][29] = 30, + [1][1][RTW89_MKK][29] = 58, [1][1][RTW89_IC][29] = 127, [1][1][RTW89_ACMA][29] = 127, - [1][1][RTW89_FCC][31] = 42, - [1][1][RTW89_ETSI][31] = 32, - [1][1][RTW89_MKK][31] = 44, + [1][1][RTW89_FCC][31] = 34, + [1][1][RTW89_ETSI][31] = 30, + [1][1][RTW89_MKK][31] = 58, [1][1][RTW89_IC][31] = 38, [1][1][RTW89_ACMA][31] = 22, - [1][1][RTW89_FCC][33] = 40, - [1][1][RTW89_ETSI][33] = 32, - [1][1][RTW89_MKK][33] = 44, + [1][1][RTW89_FCC][33] = 32, + [1][1][RTW89_ETSI][33] = 30, + [1][1][RTW89_MKK][33] = 58, [1][1][RTW89_IC][33] = 38, [1][1][RTW89_ACMA][33] = 22, - [1][1][RTW89_FCC][35] = 40, - [1][1][RTW89_ETSI][35] = 32, - [1][1][RTW89_MKK][35] = 44, + [1][1][RTW89_FCC][35] = 32, + [1][1][RTW89_ETSI][35] = 30, + [1][1][RTW89_MKK][35] = 58, [1][1][RTW89_IC][35] = 38, [1][1][RTW89_ACMA][35] = 22, - [1][1][RTW89_FCC][37] = 48, + [1][1][RTW89_FCC][37] = 40, [1][1][RTW89_ETSI][37] = 127, - [1][1][RTW89_MKK][37] = 42, + [1][1][RTW89_MKK][37] = 58, [1][1][RTW89_IC][37] = 48, [1][1][RTW89_ACMA][37] = 48, - [1][1][RTW89_FCC][38] = 84, - [1][1][RTW89_ETSI][38] = 18, + [1][1][RTW89_FCC][38] = 76, + [1][1][RTW89_ETSI][38] = 16, [1][1][RTW89_MKK][38] = 127, [1][1][RTW89_IC][38] = 84, [1][1][RTW89_ACMA][38] = 82, - [1][1][RTW89_FCC][40] = 84, - [1][1][RTW89_ETSI][40] = 18, + [1][1][RTW89_FCC][40] = 76, + [1][1][RTW89_ETSI][40] = 16, [1][1][RTW89_MKK][40] = 127, [1][1][RTW89_IC][40] = 84, [1][1][RTW89_ACMA][40] = 82, - [1][1][RTW89_FCC][42] = 84, - [1][1][RTW89_ETSI][42] = 18, + [1][1][RTW89_FCC][42] = 76, + [1][1][RTW89_ETSI][42] = 16, [1][1][RTW89_MKK][42] = 127, [1][1][RTW89_IC][42] = 84, [1][1][RTW89_ACMA][42] = 84, - [1][1][RTW89_FCC][44] = 84, - [1][1][RTW89_ETSI][44] = 18, + [1][1][RTW89_FCC][44] = 76, + [1][1][RTW89_ETSI][44] = 16, [1][1][RTW89_MKK][44] = 127, [1][1][RTW89_IC][44] = 84, [1][1][RTW89_ACMA][44] = 84, - [1][1][RTW89_FCC][46] = 84, - [1][1][RTW89_ETSI][46] = 18, + [1][1][RTW89_FCC][46] = 76, + [1][1][RTW89_ETSI][46] = 16, [1][1][RTW89_MKK][46] = 127, [1][1][RTW89_IC][46] = 84, [1][1][RTW89_ACMA][46] = 84, - [1][1][RTW89_FCC][48] = 32, + [1][1][RTW89_FCC][48] = 24, [1][1][RTW89_ETSI][48] = 127, [1][1][RTW89_MKK][48] = 127, [1][1][RTW89_IC][48] = 127, [1][1][RTW89_ACMA][48] = 127, - [1][1][RTW89_FCC][50] = 32, + [1][1][RTW89_FCC][50] = 24, [1][1][RTW89_ETSI][50] = 127, [1][1][RTW89_MKK][50] = 127, [1][1][RTW89_IC][50] = 127, [1][1][RTW89_ACMA][50] = 127, - [1][1][RTW89_FCC][52] = 32, + [1][1][RTW89_FCC][52] = 24, [1][1][RTW89_ETSI][52] = 127, [1][1][RTW89_MKK][52] = 127, [1][1][RTW89_IC][52] = 127, [1][1][RTW89_ACMA][52] = 127, - [2][0][RTW89_FCC][0] = 70, - [2][0][RTW89_ETSI][0] = 54, - [2][0][RTW89_MKK][0] = 48, + [2][0][RTW89_FCC][0] = 62, + [2][0][RTW89_ETSI][0] = 52, + [2][0][RTW89_MKK][0] = 60, [2][0][RTW89_IC][0] = 46, [2][0][RTW89_ACMA][0] = 48, - [2][0][RTW89_FCC][2] = 70, - [2][0][RTW89_ETSI][2] = 54, - [2][0][RTW89_MKK][2] = 48, + [2][0][RTW89_FCC][2] = 62, + [2][0][RTW89_ETSI][2] = 52, + [2][0][RTW89_MKK][2] = 60, [2][0][RTW89_IC][2] = 46, [2][0][RTW89_ACMA][2] = 48, - [2][0][RTW89_FCC][4] = 70, - [2][0][RTW89_ETSI][4] = 54, - [2][0][RTW89_MKK][4] = 48, + [2][0][RTW89_FCC][4] = 62, + [2][0][RTW89_ETSI][4] = 52, + [2][0][RTW89_MKK][4] = 50, [2][0][RTW89_IC][4] = 46, [2][0][RTW89_ACMA][4] = 48, - [2][0][RTW89_FCC][6] = 70, - [2][0][RTW89_ETSI][6] = 54, - [2][0][RTW89_MKK][6] = 48, + [2][0][RTW89_FCC][6] = 62, + [2][0][RTW89_ETSI][6] = 52, + [2][0][RTW89_MKK][6] = 50, [2][0][RTW89_IC][6] = 46, [2][0][RTW89_ACMA][6] = 48, - [2][0][RTW89_FCC][8] = 70, - [2][0][RTW89_ETSI][8] = 54, - [2][0][RTW89_MKK][8] = 48, + [2][0][RTW89_FCC][8] = 62, + [2][0][RTW89_ETSI][8] = 52, + [2][0][RTW89_MKK][8] = 44, [2][0][RTW89_IC][8] = 66, [2][0][RTW89_ACMA][8] = 48, - [2][0][RTW89_FCC][10] = 70, - [2][0][RTW89_ETSI][10] = 54, - [2][0][RTW89_MKK][10] = 48, + [2][0][RTW89_FCC][10] = 62, + [2][0][RTW89_ETSI][10] = 52, + [2][0][RTW89_MKK][10] = 44, [2][0][RTW89_IC][10] = 66, [2][0][RTW89_ACMA][10] = 48, - [2][0][RTW89_FCC][12] = 70, - [2][0][RTW89_ETSI][12] = 54, - [2][0][RTW89_MKK][12] = 46, + [2][0][RTW89_FCC][12] = 62, + [2][0][RTW89_ETSI][12] = 52, + [2][0][RTW89_MKK][12] = 58, [2][0][RTW89_IC][12] = 66, [2][0][RTW89_ACMA][12] = 48, - [2][0][RTW89_FCC][14] = 70, - [2][0][RTW89_ETSI][14] = 54, - [2][0][RTW89_MKK][14] = 46, + [2][0][RTW89_FCC][14] = 62, + [2][0][RTW89_ETSI][14] = 52, + [2][0][RTW89_MKK][14] = 58, [2][0][RTW89_IC][14] = 66, [2][0][RTW89_ACMA][14] = 48, - [2][0][RTW89_FCC][15] = 70, - [2][0][RTW89_ETSI][15] = 54, + [2][0][RTW89_FCC][15] = 62, + [2][0][RTW89_ETSI][15] = 52, [2][0][RTW89_MKK][15] = 68, [2][0][RTW89_IC][15] = 70, [2][0][RTW89_ACMA][15] = 48, - [2][0][RTW89_FCC][17] = 70, - [2][0][RTW89_ETSI][17] = 54, - [2][0][RTW89_MKK][17] = 70, + [2][0][RTW89_FCC][17] = 62, + [2][0][RTW89_ETSI][17] = 52, + [2][0][RTW89_MKK][17] = 74, [2][0][RTW89_IC][17] = 70, [2][0][RTW89_ACMA][17] = 48, - [2][0][RTW89_FCC][19] = 70, - [2][0][RTW89_ETSI][19] = 54, - [2][0][RTW89_MKK][19] = 70, + [2][0][RTW89_FCC][19] = 62, + [2][0][RTW89_ETSI][19] = 52, + [2][0][RTW89_MKK][19] = 74, [2][0][RTW89_IC][19] = 70, [2][0][RTW89_ACMA][19] = 48, - [2][0][RTW89_FCC][21] = 70, - [2][0][RTW89_ETSI][21] = 54, - [2][0][RTW89_MKK][21] = 70, + [2][0][RTW89_FCC][21] = 62, + [2][0][RTW89_ETSI][21] = 52, + [2][0][RTW89_MKK][21] = 74, [2][0][RTW89_IC][21] = 70, [2][0][RTW89_ACMA][21] = 48, - [2][0][RTW89_FCC][23] = 70, - [2][0][RTW89_ETSI][23] = 54, - [2][0][RTW89_MKK][23] = 70, + [2][0][RTW89_FCC][23] = 62, + [2][0][RTW89_ETSI][23] = 52, + [2][0][RTW89_MKK][23] = 74, [2][0][RTW89_IC][23] = 70, [2][0][RTW89_ACMA][23] = 48, - [2][0][RTW89_FCC][25] = 70, - [2][0][RTW89_ETSI][25] = 54, - [2][0][RTW89_MKK][25] = 70, + [2][0][RTW89_FCC][25] = 62, + [2][0][RTW89_ETSI][25] = 52, + [2][0][RTW89_MKK][25] = 74, [2][0][RTW89_IC][25] = 127, [2][0][RTW89_ACMA][25] = 127, - [2][0][RTW89_FCC][27] = 70, - [2][0][RTW89_ETSI][27] = 54, - [2][0][RTW89_MKK][27] = 70, + [2][0][RTW89_FCC][27] = 62, + [2][0][RTW89_ETSI][27] = 52, + [2][0][RTW89_MKK][27] = 74, [2][0][RTW89_IC][27] = 127, [2][0][RTW89_ACMA][27] = 127, - [2][0][RTW89_FCC][29] = 70, - [2][0][RTW89_ETSI][29] = 54, - [2][0][RTW89_MKK][29] = 70, + [2][0][RTW89_FCC][29] = 62, + [2][0][RTW89_ETSI][29] = 52, + [2][0][RTW89_MKK][29] = 74, [2][0][RTW89_IC][29] = 127, [2][0][RTW89_ACMA][29] = 127, - [2][0][RTW89_FCC][31] = 70, - [2][0][RTW89_ETSI][31] = 54, - [2][0][RTW89_MKK][31] = 70, + [2][0][RTW89_FCC][31] = 62, + [2][0][RTW89_ETSI][31] = 52, + [2][0][RTW89_MKK][31] = 74, [2][0][RTW89_IC][31] = 72, [2][0][RTW89_ACMA][31] = 48, - [2][0][RTW89_FCC][33] = 72, - [2][0][RTW89_ETSI][33] = 54, - [2][0][RTW89_MKK][33] = 70, + [2][0][RTW89_FCC][33] = 64, + [2][0][RTW89_ETSI][33] = 52, + [2][0][RTW89_MKK][33] = 74, [2][0][RTW89_IC][33] = 72, [2][0][RTW89_ACMA][33] = 48, - [2][0][RTW89_FCC][35] = 72, - [2][0][RTW89_ETSI][35] = 54, - [2][0][RTW89_MKK][35] = 70, + [2][0][RTW89_FCC][35] = 64, + [2][0][RTW89_ETSI][35] = 52, + [2][0][RTW89_MKK][35] = 74, [2][0][RTW89_IC][35] = 72, [2][0][RTW89_ACMA][35] = 48, - [2][0][RTW89_FCC][37] = 70, + [2][0][RTW89_FCC][37] = 62, [2][0][RTW89_ETSI][37] = 127, - [2][0][RTW89_MKK][37] = 66, + [2][0][RTW89_MKK][37] = 74, [2][0][RTW89_IC][37] = 70, [2][0][RTW89_ACMA][37] = 76, - [2][0][RTW89_FCC][38] = 84, - [2][0][RTW89_ETSI][38] = 30, + [2][0][RTW89_FCC][38] = 76, + [2][0][RTW89_ETSI][38] = 28, [2][0][RTW89_MKK][38] = 127, [2][0][RTW89_IC][38] = 84, [2][0][RTW89_ACMA][38] = 84, - [2][0][RTW89_FCC][40] = 84, - [2][0][RTW89_ETSI][40] = 30, + [2][0][RTW89_FCC][40] = 76, + [2][0][RTW89_ETSI][40] = 28, [2][0][RTW89_MKK][40] = 127, [2][0][RTW89_IC][40] = 84, [2][0][RTW89_ACMA][40] = 84, - [2][0][RTW89_FCC][42] = 84, - [2][0][RTW89_ETSI][42] = 30, + [2][0][RTW89_FCC][42] = 76, + [2][0][RTW89_ETSI][42] = 28, [2][0][RTW89_MKK][42] = 127, [2][0][RTW89_IC][42] = 84, [2][0][RTW89_ACMA][42] = 84, - [2][0][RTW89_FCC][44] = 84, - [2][0][RTW89_ETSI][44] = 30, + [2][0][RTW89_FCC][44] = 76, + [2][0][RTW89_ETSI][44] = 28, [2][0][RTW89_MKK][44] = 127, [2][0][RTW89_IC][44] = 84, [2][0][RTW89_ACMA][44] = 84, - [2][0][RTW89_FCC][46] = 84, - [2][0][RTW89_ETSI][46] = 30, + [2][0][RTW89_FCC][46] = 76, + [2][0][RTW89_ETSI][46] = 28, [2][0][RTW89_MKK][46] = 127, [2][0][RTW89_IC][46] = 84, [2][0][RTW89_ACMA][46] = 84, - [2][0][RTW89_FCC][48] = 56, + [2][0][RTW89_FCC][48] = 48, [2][0][RTW89_ETSI][48] = 127, [2][0][RTW89_MKK][48] = 127, [2][0][RTW89_IC][48] = 127, [2][0][RTW89_ACMA][48] = 127, - [2][0][RTW89_FCC][50] = 56, + [2][0][RTW89_FCC][50] = 48, [2][0][RTW89_ETSI][50] = 127, [2][0][RTW89_MKK][50] = 127, [2][0][RTW89_IC][50] = 127, [2][0][RTW89_ACMA][50] = 127, - [2][0][RTW89_FCC][52] = 56, + [2][0][RTW89_FCC][52] = 48, [2][0][RTW89_ETSI][52] = 127, [2][0][RTW89_MKK][52] = 127, [2][0][RTW89_IC][52] = 127, [2][0][RTW89_ACMA][52] = 127, - [2][1][RTW89_FCC][0] = 50, - [2][1][RTW89_ETSI][0] = 42, - [2][1][RTW89_MKK][0] = 36, + [2][1][RTW89_FCC][0] = 42, + [2][1][RTW89_ETSI][0] = 40, + [2][1][RTW89_MKK][0] = 44, [2][1][RTW89_IC][0] = 20, [2][1][RTW89_ACMA][0] = 36, - [2][1][RTW89_FCC][2] = 50, - [2][1][RTW89_ETSI][2] = 42, - [2][1][RTW89_MKK][2] = 36, + [2][1][RTW89_FCC][2] = 42, + [2][1][RTW89_ETSI][2] = 40, + [2][1][RTW89_MKK][2] = 44, [2][1][RTW89_IC][2] = 18, [2][1][RTW89_ACMA][2] = 36, - [2][1][RTW89_FCC][4] = 50, - [2][1][RTW89_ETSI][4] = 42, + [2][1][RTW89_FCC][4] = 42, + [2][1][RTW89_ETSI][4] = 40, [2][1][RTW89_MKK][4] = 36, [2][1][RTW89_IC][4] = 22, [2][1][RTW89_ACMA][4] = 36, - [2][1][RTW89_FCC][6] = 50, - [2][1][RTW89_ETSI][6] = 42, + [2][1][RTW89_FCC][6] = 42, + [2][1][RTW89_ETSI][6] = 40, [2][1][RTW89_MKK][6] = 36, [2][1][RTW89_IC][6] = 22, [2][1][RTW89_ACMA][6] = 36, - [2][1][RTW89_FCC][8] = 50, - [2][1][RTW89_ETSI][8] = 42, - [2][1][RTW89_MKK][8] = 34, + [2][1][RTW89_FCC][8] = 42, + [2][1][RTW89_ETSI][8] = 40, + [2][1][RTW89_MKK][8] = 32, [2][1][RTW89_IC][8] = 50, [2][1][RTW89_ACMA][8] = 36, - [2][1][RTW89_FCC][10] = 50, - [2][1][RTW89_ETSI][10] = 42, - [2][1][RTW89_MKK][10] = 34, + [2][1][RTW89_FCC][10] = 42, + [2][1][RTW89_ETSI][10] = 40, + [2][1][RTW89_MKK][10] = 32, [2][1][RTW89_IC][10] = 50, [2][1][RTW89_ACMA][10] = 36, - [2][1][RTW89_FCC][12] = 52, - [2][1][RTW89_ETSI][12] = 42, - [2][1][RTW89_MKK][12] = 36, + [2][1][RTW89_FCC][12] = 44, + [2][1][RTW89_ETSI][12] = 40, + [2][1][RTW89_MKK][12] = 44, [2][1][RTW89_IC][12] = 52, [2][1][RTW89_ACMA][12] = 36, - [2][1][RTW89_FCC][14] = 52, - [2][1][RTW89_ETSI][14] = 42, - [2][1][RTW89_MKK][14] = 36, + [2][1][RTW89_FCC][14] = 44, + [2][1][RTW89_ETSI][14] = 40, + [2][1][RTW89_MKK][14] = 44, [2][1][RTW89_IC][14] = 52, [2][1][RTW89_ACMA][14] = 36, - [2][1][RTW89_FCC][15] = 50, - [2][1][RTW89_ETSI][15] = 42, - [2][1][RTW89_MKK][15] = 54, + [2][1][RTW89_FCC][15] = 42, + [2][1][RTW89_ETSI][15] = 40, + [2][1][RTW89_MKK][15] = 66, [2][1][RTW89_IC][15] = 50, [2][1][RTW89_ACMA][15] = 36, - [2][1][RTW89_FCC][17] = 50, - [2][1][RTW89_ETSI][17] = 42, - [2][1][RTW89_MKK][17] = 56, + [2][1][RTW89_FCC][17] = 42, + [2][1][RTW89_ETSI][17] = 40, + [2][1][RTW89_MKK][17] = 66, [2][1][RTW89_IC][17] = 50, [2][1][RTW89_ACMA][17] = 36, - [2][1][RTW89_FCC][19] = 50, - [2][1][RTW89_ETSI][19] = 42, - [2][1][RTW89_MKK][19] = 56, + [2][1][RTW89_FCC][19] = 42, + [2][1][RTW89_ETSI][19] = 40, + [2][1][RTW89_MKK][19] = 66, [2][1][RTW89_IC][19] = 50, [2][1][RTW89_ACMA][19] = 36, - [2][1][RTW89_FCC][21] = 50, - [2][1][RTW89_ETSI][21] = 42, - [2][1][RTW89_MKK][21] = 56, + [2][1][RTW89_FCC][21] = 42, + [2][1][RTW89_ETSI][21] = 40, + [2][1][RTW89_MKK][21] = 66, [2][1][RTW89_IC][21] = 50, [2][1][RTW89_ACMA][21] = 36, - [2][1][RTW89_FCC][23] = 50, - [2][1][RTW89_ETSI][23] = 42, - [2][1][RTW89_MKK][23] = 56, + [2][1][RTW89_FCC][23] = 42, + [2][1][RTW89_ETSI][23] = 40, + [2][1][RTW89_MKK][23] = 66, [2][1][RTW89_IC][23] = 50, [2][1][RTW89_ACMA][23] = 36, - [2][1][RTW89_FCC][25] = 50, - [2][1][RTW89_ETSI][25] = 42, - [2][1][RTW89_MKK][25] = 56, + [2][1][RTW89_FCC][25] = 42, + [2][1][RTW89_ETSI][25] = 40, + [2][1][RTW89_MKK][25] = 66, [2][1][RTW89_IC][25] = 127, [2][1][RTW89_ACMA][25] = 127, - [2][1][RTW89_FCC][27] = 50, - [2][1][RTW89_ETSI][27] = 42, - [2][1][RTW89_MKK][27] = 56, + [2][1][RTW89_FCC][27] = 42, + [2][1][RTW89_ETSI][27] = 40, + [2][1][RTW89_MKK][27] = 66, [2][1][RTW89_IC][27] = 127, [2][1][RTW89_ACMA][27] = 127, - [2][1][RTW89_FCC][29] = 50, - [2][1][RTW89_ETSI][29] = 42, - [2][1][RTW89_MKK][29] = 56, + [2][1][RTW89_FCC][29] = 42, + [2][1][RTW89_ETSI][29] = 40, + [2][1][RTW89_MKK][29] = 66, [2][1][RTW89_IC][29] = 127, [2][1][RTW89_ACMA][29] = 127, - [2][1][RTW89_FCC][31] = 50, - [2][1][RTW89_ETSI][31] = 42, - [2][1][RTW89_MKK][31] = 56, + [2][1][RTW89_FCC][31] = 42, + [2][1][RTW89_ETSI][31] = 40, + [2][1][RTW89_MKK][31] = 66, [2][1][RTW89_IC][31] = 50, [2][1][RTW89_ACMA][31] = 36, - [2][1][RTW89_FCC][33] = 50, - [2][1][RTW89_ETSI][33] = 42, - [2][1][RTW89_MKK][33] = 56, + [2][1][RTW89_FCC][33] = 42, + [2][1][RTW89_ETSI][33] = 40, + [2][1][RTW89_MKK][33] = 66, [2][1][RTW89_IC][33] = 50, [2][1][RTW89_ACMA][33] = 36, - [2][1][RTW89_FCC][35] = 50, - [2][1][RTW89_ETSI][35] = 42, - [2][1][RTW89_MKK][35] = 56, + [2][1][RTW89_FCC][35] = 42, + [2][1][RTW89_ETSI][35] = 40, + [2][1][RTW89_MKK][35] = 66, [2][1][RTW89_IC][35] = 50, [2][1][RTW89_ACMA][35] = 36, - [2][1][RTW89_FCC][37] = 50, + [2][1][RTW89_FCC][37] = 42, [2][1][RTW89_ETSI][37] = 127, - [2][1][RTW89_MKK][37] = 54, + [2][1][RTW89_MKK][37] = 66, [2][1][RTW89_IC][37] = 50, [2][1][RTW89_ACMA][37] = 60, - [2][1][RTW89_FCC][38] = 84, - [2][1][RTW89_ETSI][38] = 18, + [2][1][RTW89_FCC][38] = 76, + [2][1][RTW89_ETSI][38] = 16, [2][1][RTW89_MKK][38] = 127, [2][1][RTW89_IC][38] = 84, [2][1][RTW89_ACMA][38] = 84, - [2][1][RTW89_FCC][40] = 84, - [2][1][RTW89_ETSI][40] = 18, + [2][1][RTW89_FCC][40] = 76, + [2][1][RTW89_ETSI][40] = 16, [2][1][RTW89_MKK][40] = 127, [2][1][RTW89_IC][40] = 84, [2][1][RTW89_ACMA][40] = 84, - [2][1][RTW89_FCC][42] = 84, - [2][1][RTW89_ETSI][42] = 18, + [2][1][RTW89_FCC][42] = 76, + [2][1][RTW89_ETSI][42] = 16, [2][1][RTW89_MKK][42] = 127, [2][1][RTW89_IC][42] = 84, [2][1][RTW89_ACMA][42] = 84, - [2][1][RTW89_FCC][44] = 84, - [2][1][RTW89_ETSI][44] = 18, + [2][1][RTW89_FCC][44] = 76, + [2][1][RTW89_ETSI][44] = 16, [2][1][RTW89_MKK][44] = 127, [2][1][RTW89_IC][44] = 84, [2][1][RTW89_ACMA][44] = 84, - [2][1][RTW89_FCC][46] = 84, - [2][1][RTW89_ETSI][46] = 18, + [2][1][RTW89_FCC][46] = 76, + [2][1][RTW89_ETSI][46] = 16, [2][1][RTW89_MKK][46] = 127, [2][1][RTW89_IC][46] = 84, [2][1][RTW89_ACMA][46] = 84, - [2][1][RTW89_FCC][48] = 44, + [2][1][RTW89_FCC][48] = 36, [2][1][RTW89_ETSI][48] = 127, [2][1][RTW89_MKK][48] = 127, [2][1][RTW89_IC][48] = 127, [2][1][RTW89_ACMA][48] = 127, - [2][1][RTW89_FCC][50] = 44, + [2][1][RTW89_FCC][50] = 36, [2][1][RTW89_ETSI][50] = 127, [2][1][RTW89_MKK][50] = 127, [2][1][RTW89_IC][50] = 127, [2][1][RTW89_ACMA][50] = 127, - [2][1][RTW89_FCC][52] = 44, + [2][1][RTW89_FCC][52] = 36, [2][1][RTW89_ETSI][52] = 127, [2][1][RTW89_MKK][52] = 127, [2][1][RTW89_IC][52] = 127, -- cgit v1.2.3 From a06d2dd7e22f93f98beb15c5ae919f6a58fd2635 Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Mon, 16 May 2022 08:52:14 +0800 Subject: rtw89: convert rtw89_band to nl80211_band precisely Before 6 GHz band was supported, i.e. only 2 GHz and 5 GHz, they were the same from the numerical point of view. However, after 6 GHz band support, we need to do this conversion logically. Signed-off-by: Zong-Zhe Yang Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220516005215.5878-6-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/core.c | 11 +++++++---- drivers/net/wireless/realtek/rtw89/core.h | 14 ++++++++++++++ drivers/net/wireless/realtek/rtw89/fw.c | 2 +- drivers/net/wireless/realtek/rtw89/phy.c | 13 +++++++------ 4 files changed, 29 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index e3317deafa1d..a6a90572e74b 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -1608,10 +1608,13 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev, if (rtwdev->scanning && RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { - rx_status->freq = - ieee80211_channel_to_frequency(hal->current_channel, - hal->current_band_type); - rx_status->band = rtwdev->hal.current_band_type; + u8 chan = hal->current_channel; + u8 band = hal->current_band_type; + enum nl80211_band nl_band; + + nl_band = rtw89_hw_to_nl80211_band(band); + rx_status->freq = ieee80211_channel_to_frequency(chan, nl_band); + rx_status->band = nl_band; } if (desc_info->icv_err || desc_info->crc32_err) diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 2921814842ff..e8a77225a90f 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -3480,6 +3480,20 @@ static inline u8 rtw89_hw_to_rate_info_bw(enum rtw89_bandwidth hw_bw) return RATE_INFO_BW_20; } +static inline +enum nl80211_band rtw89_hw_to_nl80211_band(enum rtw89_band hw_band) +{ + switch (hw_band) { + default: + case RTW89_BAND_2G: + return NL80211_BAND_2GHZ; + case RTW89_BAND_5G: + return NL80211_BAND_5GHZ; + case RTW89_BAND_6G: + return NL80211_BAND_6GHZ; + } +} + static inline enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width) { diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index e4be785709d1..4718aced1428 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -2068,7 +2068,7 @@ static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) struct rtw89_pktofld_info *info, *tmp; u8 idx; - for (idx = RTW89_BAND_2G; idx < NUM_NL80211_BANDS; idx++) { + for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { if (!(rtwdev->chip->support_bands & BIT(idx))) continue; diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 79e4c28495c8..762cdba9d3cf 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -429,27 +429,28 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, RTW89_HW_RATE_MCS16, RTW89_HW_RATE_MCS24}; u8 band = rtwdev->hal.current_band_type; + enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); u8 tx_nss = rtwdev->hal.tx_nss; u8 i; for (i = 0; i < tx_nss; i++) if (!__check_rate_pattern(&next_pattern, hw_rate_he[i], RA_MASK_HE_RATES, RTW89_RA_MODE_HE, - mask->control[band].he_mcs[i], + mask->control[nl_band].he_mcs[i], 0, true)) goto out; for (i = 0; i < tx_nss; i++) if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i], RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT, - mask->control[band].vht_mcs[i], + mask->control[nl_band].vht_mcs[i], 0, true)) goto out; for (i = 0; i < tx_nss; i++) if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i], RA_MASK_HT_RATES, RTW89_RA_MODE_HT, - mask->control[band].ht_mcs[i], + mask->control[nl_band].ht_mcs[i], 0, true)) goto out; @@ -457,18 +458,18 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, * require at least one basic rate for ieee80211_set_bitrate_mask, * so the decision just depends on if all bitrates are set or not. */ - sband = rtwdev->hw->wiphy->bands[band]; + sband = rtwdev->hw->wiphy->bands[nl_band]; if (band == RTW89_BAND_2G) { if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1, RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES, RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM, - mask->control[band].legacy, + mask->control[nl_band].legacy, BIT(sband->n_bitrates) - 1, false)) goto out; } else { if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6, RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM, - mask->control[band].legacy, + mask->control[nl_band].legacy, BIT(sband->n_bitrates) - 1, false)) goto out; } -- cgit v1.2.3 From a456021c6f1482b94dce77ebaeef77412cd37dba Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 16 May 2022 08:52:15 +0800 Subject: rtw89: pci: only mask out INT indicator register for disable interrupt v1 The design of INT indicator register (R_AX_PCIE_HIMR00_V1) is to reduce IO during frequent interrupts, because it can stop chip sending interrupt to host if we just set this indicator to 0, not all IMR(s). This indicator register looks like a root interrupt controller of wifi chip. However, we can't set all other IMR(s) to 0 during we are running on interrupt service routine, or the indicator register can't reflect the status of certain interrupt happened during this period, and then miss some interrupts especially SER interrupt events. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20220516005215.5878-7-pkshih@realtek.com --- drivers/net/wireless/realtek/rtw89/pci.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 2bdce7024f25..0ef7821b2e0f 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -682,9 +682,6 @@ EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) { rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); - rtw89_write32(rtwdev, R_AX_HIMR0, 0); - rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, 0); - rtw89_write32(rtwdev, R_AX_HIMR1, 0); } EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); -- cgit v1.2.3 From 76e1e5df4b7cb8a87ca8b0242aabf06c8dc0d09d Mon Sep 17 00:00:00 2001 From: Xiu Jianfeng Date: Mon, 16 May 2022 17:23:37 +0800 Subject: octeontx2-pf: Use memset_startat() helper in otx2_stop() Use memset_startat() helper to simplify the code, there is no functional change in this patch. Signed-off-by: Xiu Jianfeng Link: https://lore.kernel.org/r/20220516092337.131653-1-xiujianfeng@huawei.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index f83e5ca632dd..e193ffbc1fab 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1794,8 +1794,7 @@ int otx2_stop(struct net_device *netdev) kfree(qset->rq); kfree(qset->napi); /* Do not clear RQ/SQ ringsize settings */ - memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0, - sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt)); + memset_startat(qset, 0, sqe_cnt); return 0; } EXPORT_SYMBOL(otx2_stop); -- cgit v1.2.3 From ab4d6357c95f5aa6473cc39382271916091378e3 Mon Sep 17 00:00:00 2001 From: Haowen Bai Date: Mon, 16 May 2022 18:16:52 +0800 Subject: net: thunderx: remove null check after call container_of() container_of() will never return NULL, so remove useless code. Signed-off-by: Haowen Bai Link: https://lore.kernel.org/r/1652696212-17516-1-git-send-email-baihaowen@meizu.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a04aa206fddc..768ea426d49f 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2024,9 +2024,6 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) u8 mode; struct xcast_addr_list *mc; - if (!vf_work) - return; - /* Save message data locally to prevent them from * being overwritten by next ndo_set_rx_mode call(). */ -- cgit v1.2.3 From 5ff0348b7f755aac2770bbfc244f5371e4e55224 Mon Sep 17 00:00:00 2001 From: Guo Zhengkui Date: Mon, 16 May 2022 19:56:25 +0800 Subject: net: smc911x: replace ternary operator with min() Fix the following coccicheck warning: drivers/net/ethernet/smsc/smc911x.c:483:20-22: WARNING opportunity for min() Signed-off-by: Guo Zhengkui Link: https://lore.kernel.org/r/20220516115627.66363-1-guozhengkui@vivo.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/smsc/smc911x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index fc9cef9dcefc..2694287770e6 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -480,7 +480,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) SMC_SET_TX_FIFO(lp, cmdB); DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n"); - PRINT_PKT(buf, len <= 64 ? len : 64); + PRINT_PKT(buf, min(len, 64)); /* Send pkt via PIO or DMA */ #ifdef SMC_USE_DMA -- cgit v1.2.3 From bec67592521ec816371f5f072b1a340e1c2ad434 Mon Sep 17 00:00:00 2001 From: Min Li Date: Mon, 16 May 2022 10:47:06 -0400 Subject: ptp: ptp_clockmatrix: Add PTP_CLK_REQ_EXTTS support Use TOD_READ_SECONDARY for extts to keep TOD_READ_PRIMARY for gettime and settime exclusively. Before this change, TOD_READ_PRIMARY was used for both extts and gettime/settime, which would result in changing TOD read/write triggers between operations. Using TOD_READ_SECONDARY would make extts independent of gettime/settime operation Signed-off-by: Min Li Acked-by: Richard Cochran Link: https://lore.kernel.org/r/1652712427-14703-1-git-send-email-min.li.xe@renesas.com Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_clockmatrix.c | 289 ++++++++++++++++++++++++--------------- drivers/ptp/ptp_clockmatrix.h | 5 + include/linux/mfd/idt8a340_reg.h | 12 +- 3 files changed, 196 insertions(+), 110 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 08e429a06922..25075764eea4 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -239,73 +239,97 @@ static int wait_for_boot_status_ready(struct idtcm *idtcm) return -EBUSY; } -static int _idtcm_set_scsr_read_trig(struct idtcm_channel *channel, - enum scsr_read_trig_sel trig, u8 ref) +static int arm_tod_read_trig_sel_refclk(struct idtcm_channel *channel, u8 ref) { struct idtcm *idtcm = channel->idtcm; - u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD); - u8 val; + u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_SECONDARY_CMD); + u8 val = 0; int err; - if (trig == SCSR_TOD_READ_TRIG_SEL_REFCLK) { - err = idtcm_read(idtcm, channel->tod_read_primary, - TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val)); - if (err) - return err; - - val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT); - val |= (ref << WR_REF_INDEX_SHIFT); - - err = idtcm_write(idtcm, channel->tod_read_primary, - TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val)); - if (err) - return err; - } + val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT); + val |= (ref << WR_REF_INDEX_SHIFT); - err = idtcm_read(idtcm, channel->tod_read_primary, - tod_read_cmd, &val, sizeof(val)); + err = idtcm_write(idtcm, channel->tod_read_secondary, + TOD_READ_SECONDARY_SEL_CFG_0, &val, sizeof(val)); if (err) return err; - val &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT); - val |= (trig << TOD_READ_TRIGGER_SHIFT); - val &= ~TOD_READ_TRIGGER_MODE; /* single shot */ + val = 0 | (SCSR_TOD_READ_TRIG_SEL_REFCLK << TOD_READ_TRIGGER_SHIFT); + + err = idtcm_write(idtcm, channel->tod_read_secondary, tod_read_cmd, + &val, sizeof(val)); + if (err) + dev_err(idtcm->dev, "%s: err = %d", __func__, err); - err = idtcm_write(idtcm, channel->tod_read_primary, - tod_read_cmd, &val, sizeof(val)); return err; } -static int idtcm_enable_extts(struct idtcm_channel *channel, u8 todn, u8 ref, - bool enable) +static bool is_single_shot(u8 mask) { - struct idtcm *idtcm = channel->idtcm; - u8 old_mask = idtcm->extts_mask; - u8 mask = 1 << todn; + /* Treat single bit ToD masks as continuous trigger */ + return mask <= 8 && is_power_of_2(mask); +} + +static int idtcm_extts_enable(struct idtcm_channel *channel, + struct ptp_clock_request *rq, int on) +{ + u8 index = rq->extts.index; + struct idtcm *idtcm; + u8 mask = 1 << index; int err = 0; + u8 old_mask; + int ref; + + idtcm = channel->idtcm; + old_mask = idtcm->extts_mask; - if (todn >= MAX_TOD) + /* Reject requests with unsupported flags */ + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + /* Reject requests to enable time stamping on falling edge */ + if ((rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_FALLING_EDGE)) + return -EOPNOTSUPP; + + if (index >= MAX_TOD) return -EINVAL; - if (enable) { - if (ref > 0xF) /* E_REF_CLK15 */ - return -EINVAL; - if (idtcm->extts_mask & mask) - return 0; - err = _idtcm_set_scsr_read_trig(&idtcm->channel[todn], - SCSR_TOD_READ_TRIG_SEL_REFCLK, - ref); + if (on) { + /* Support triggering more than one TOD_0/1/2/3 by same pin */ + /* Use the pin configured for the channel */ + ref = ptp_find_pin(channel->ptp_clock, PTP_PF_EXTTS, channel->tod); + + if (ref < 0) { + dev_err(idtcm->dev, "%s: No valid pin found for TOD%d!\n", + __func__, channel->tod); + return -EBUSY; + } + + err = arm_tod_read_trig_sel_refclk(&idtcm->channel[index], ref); + if (err == 0) { idtcm->extts_mask |= mask; - idtcm->event_channel[todn] = channel; - idtcm->channel[todn].refn = ref; + idtcm->event_channel[index] = channel; + idtcm->channel[index].refn = ref; + idtcm->extts_single_shot = is_single_shot(idtcm->extts_mask); + + if (old_mask) + return 0; + + schedule_delayed_work(&idtcm->extts_work, + msecs_to_jiffies(EXTTS_PERIOD_MS)); } - } else + } else { idtcm->extts_mask &= ~mask; + idtcm->extts_single_shot = is_single_shot(idtcm->extts_mask); - if (old_mask == 0 && idtcm->extts_mask) - schedule_delayed_work(&idtcm->extts_work, - msecs_to_jiffies(EXTTS_PERIOD_MS)); + if (idtcm->extts_mask == 0) + cancel_delayed_work(&idtcm->extts_work); + } return err; } @@ -371,6 +395,31 @@ static void wait_for_chip_ready(struct idtcm *idtcm) "Continuing while SYS APLL/DPLL is not locked"); } +static int _idtcm_gettime_triggered(struct idtcm_channel *channel, + struct timespec64 *ts) +{ + struct idtcm *idtcm = channel->idtcm; + u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_SECONDARY_CMD); + u8 buf[TOD_BYTE_COUNT]; + u8 trigger; + int err; + + err = idtcm_read(idtcm, channel->tod_read_secondary, + tod_read_cmd, &trigger, sizeof(trigger)); + if (err) + return err; + + if (trigger & TOD_READ_TRIGGER_MASK) + return -EBUSY; + + err = idtcm_read(idtcm, channel->tod_read_secondary, + TOD_READ_SECONDARY_BASE, buf, sizeof(buf)); + if (err) + return err; + + return char_array_to_timespec(buf, sizeof(buf), ts); +} + static int _idtcm_gettime(struct idtcm_channel *channel, struct timespec64 *ts, u8 timeout) { @@ -396,7 +445,7 @@ static int _idtcm_gettime(struct idtcm_channel *channel, } while (trigger & TOD_READ_TRIGGER_MASK); err = idtcm_read(idtcm, channel->tod_read_primary, - TOD_READ_PRIMARY, buf, sizeof(buf)); + TOD_READ_PRIMARY_BASE, buf, sizeof(buf)); if (err) return err; @@ -415,67 +464,38 @@ static int idtcm_extts_check_channel(struct idtcm *idtcm, u8 todn) extts_channel = &idtcm->channel[todn]; ptp_channel = idtcm->event_channel[todn]; + if (extts_channel == ptp_channel) dco_delay = ptp_channel->dco_delay; - err = _idtcm_gettime(extts_channel, &ts, 1); - if (err == 0) { - event.type = PTP_CLOCK_EXTTS; - event.index = todn; - event.timestamp = timespec64_to_ns(&ts) - dco_delay; - ptp_clock_event(ptp_channel->ptp_clock, &event); - } - return err; -} - -static u8 idtcm_enable_extts_mask(struct idtcm_channel *channel, - u8 extts_mask, bool enable) -{ - struct idtcm *idtcm = channel->idtcm; - int i, err; + err = _idtcm_gettime_triggered(extts_channel, &ts); + if (err) + return err; - for (i = 0; i < MAX_TOD; i++) { - u8 mask = 1 << i; - u8 refn = idtcm->channel[i].refn; - - if (extts_mask & mask) { - /* check extts before disabling it */ - if (enable == false) { - err = idtcm_extts_check_channel(idtcm, i); - /* trigger happened so we won't re-enable it */ - if (err == 0) - extts_mask &= ~mask; - } - (void)idtcm_enable_extts(channel, i, refn, enable); - } - } + /* Triggered - save timestamp */ + event.type = PTP_CLOCK_EXTTS; + event.index = todn; + event.timestamp = timespec64_to_ns(&ts) - dco_delay; + ptp_clock_event(ptp_channel->ptp_clock, &event); - return extts_mask; + return err; } static int _idtcm_gettime_immediate(struct idtcm_channel *channel, struct timespec64 *ts) { struct idtcm *idtcm = channel->idtcm; - u8 extts_mask = 0; - int err; - /* Disable extts */ - if (idtcm->extts_mask) { - extts_mask = idtcm_enable_extts_mask(channel, idtcm->extts_mask, - false); - } - - err = _idtcm_set_scsr_read_trig(channel, - SCSR_TOD_READ_TRIG_SEL_IMMEDIATE, 0); - if (err == 0) - err = _idtcm_gettime(channel, ts, 10); + u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD); + u8 val = (SCSR_TOD_READ_TRIG_SEL_IMMEDIATE << TOD_READ_TRIGGER_SHIFT); + int err; - /* Re-enable extts */ - if (extts_mask) - idtcm_enable_extts_mask(channel, extts_mask, true); + err = idtcm_write(idtcm, channel->tod_read_primary, + tod_read_cmd, &val, sizeof(val)); + if (err) + return err; - return err; + return _idtcm_gettime(channel, ts, 10); } static int _sync_pll_output(struct idtcm *idtcm, @@ -1702,6 +1722,9 @@ static int initialize_dco_operating_mode(struct idtcm_channel *channel) /* * Maximum absolute value for write phase offset in picoseconds * + * @channel: channel + * @delta_ns: delta in nanoseconds + * * Destination signed register is 32-bit register in resolution of 50ps * * 0x7fffffff * 50 = 2147483647 * 50 = 107374182350 @@ -1958,8 +1981,7 @@ static int idtcm_enable(struct ptp_clock_info *ptp, err = idtcm_perout_enable(channel, &rq->perout, true); break; case PTP_CLK_REQ_EXTTS: - err = idtcm_enable_extts(channel, rq->extts.index, - rq->extts.rsv[0], on); + err = idtcm_extts_enable(channel, rq, on); break; default: break; @@ -1982,13 +2004,6 @@ static int idtcm_enable_tod(struct idtcm_channel *channel) u8 cfg; int err; - /* STEELAI-366 - Temporary workaround for ts2phc compatibility */ - if (0) { - err = idtcm_output_mask_enable(channel, false); - if (err) - return err; - } - /* * Start the TOD clock ticking. */ @@ -2038,17 +2053,35 @@ static void idtcm_set_version_info(struct idtcm *idtcm) product_id, hw_rev_id, config_select); } +static int idtcm_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + break; + case PTP_PF_PEROUT: + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +static struct ptp_pin_desc pin_config[MAX_TOD][MAX_REF_CLK]; + static const struct ptp_clock_info idtcm_caps = { .owner = THIS_MODULE, .max_adj = 244000, .n_per_out = 12, .n_ext_ts = MAX_TOD, + .n_pins = MAX_REF_CLK, .adjphase = &idtcm_adjphase, .adjfine = &idtcm_adjfine, .adjtime = &idtcm_adjtime, .gettime64 = &idtcm_gettime, .settime64 = &idtcm_settime, .enable = &idtcm_enable, + .verify = &idtcm_verify_pin, .do_aux_work = &idtcm_work_handler, }; @@ -2057,12 +2090,14 @@ static const struct ptp_clock_info idtcm_caps_deprecated = { .max_adj = 244000, .n_per_out = 12, .n_ext_ts = MAX_TOD, + .n_pins = MAX_REF_CLK, .adjphase = &idtcm_adjphase, .adjfine = &idtcm_adjfine, .adjtime = &idtcm_adjtime_deprecated, .gettime64 = &idtcm_gettime, .settime64 = &idtcm_settime_deprecated, .enable = &idtcm_enable, + .verify = &idtcm_verify_pin, .do_aux_work = &idtcm_work_handler, }; @@ -2174,8 +2209,9 @@ static u32 idtcm_get_dco_delay(struct idtcm_channel *channel) n = 1; fodFreq = (u32)div_u64(m, n); + if (fodFreq >= 500000000) - return 18 * (u32)div_u64(NSEC_PER_SEC, fodFreq); + return (u32)div_u64(18 * (u64)NSEC_PER_SEC, fodFreq); return 0; } @@ -2188,24 +2224,28 @@ static int configure_channel_tod(struct idtcm_channel *channel, u32 index) switch (index) { case 0: channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_0); + channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_0); channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_0); channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_0); channel->sync_src = SYNC_SOURCE_DPLL0_TOD_PPS; break; case 1: channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_1); + channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_1); channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_1); channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_1); channel->sync_src = SYNC_SOURCE_DPLL1_TOD_PPS; break; case 2: channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_2); + channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_2); channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_2); channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_2); channel->sync_src = SYNC_SOURCE_DPLL2_TOD_PPS; break; case 3: channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_3); + channel->tod_read_secondary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_SECONDARY_3); channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_3); channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_3); channel->sync_src = SYNC_SOURCE_DPLL3_TOD_PPS; @@ -2221,6 +2261,7 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) { struct idtcm_channel *channel; int err; + int i; if (!(index < MAX_TOD)) return -EINVAL; @@ -2248,6 +2289,17 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) snprintf(channel->caps.name, sizeof(channel->caps.name), "IDT CM TOD%u", index); + channel->caps.pin_config = pin_config[index]; + + for (i = 0; i < channel->caps.n_pins; ++i) { + struct ptp_pin_desc *ppd = &channel->caps.pin_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "input_ref%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + ppd->chan = index; + } + err = initialize_dco_operating_mode(channel); if (err) return err; @@ -2302,26 +2354,40 @@ static int idtcm_enable_extts_channel(struct idtcm *idtcm, u32 index) static void idtcm_extts_check(struct work_struct *work) { struct idtcm *idtcm = container_of(work, struct idtcm, extts_work.work); - int err, i; + struct idtcm_channel *channel; + u8 mask; + int err; + int i; if (idtcm->extts_mask == 0) return; mutex_lock(idtcm->lock); + for (i = 0; i < MAX_TOD; i++) { - u8 mask = 1 << i; + mask = 1 << i; + + if ((idtcm->extts_mask & mask) == 0) + continue; + + err = idtcm_extts_check_channel(idtcm, i); - if (idtcm->extts_mask & mask) { - err = idtcm_extts_check_channel(idtcm, i); + if (err == 0) { /* trigger clears itself, so clear the mask */ - if (err == 0) + if (idtcm->extts_single_shot) { idtcm->extts_mask &= ~mask; + } else { + /* Re-arm */ + channel = &idtcm->channel[i]; + arm_tod_read_trig_sel_refclk(channel, channel->refn); + } } } if (idtcm->extts_mask) schedule_delayed_work(&idtcm->extts_work, msecs_to_jiffies(EXTTS_PERIOD_MS)); + mutex_unlock(idtcm->lock); } @@ -2342,6 +2408,11 @@ static void set_default_masks(struct idtcm *idtcm) idtcm->tod_mask = DEFAULT_TOD_MASK; idtcm->extts_mask = 0; + idtcm->channel[0].tod = 0; + idtcm->channel[1].tod = 1; + idtcm->channel[2].tod = 2; + idtcm->channel[3].tod = 3; + idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL; idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL; idtcm->channel[2].pll = DEFAULT_TOD2_PTP_PLL; @@ -2420,8 +2491,8 @@ static int idtcm_remove(struct platform_device *pdev) { struct idtcm *idtcm = platform_get_drvdata(pdev); + idtcm->extts_mask = 0; ptp_clock_unregister_all(idtcm); - cancel_delayed_work_sync(&idtcm->extts_work); return 0; diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h index 0f3059ae1fff..43796508031f 100644 --- a/drivers/ptp/ptp_clockmatrix.h +++ b/drivers/ptp/ptp_clockmatrix.h @@ -10,11 +10,13 @@ #include #include +#include #include #define FW_FILENAME "idtcm.bin" #define MAX_TOD (4) #define MAX_PLL (8) +#define MAX_REF_CLK (16) #define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL) @@ -90,6 +92,7 @@ struct idtcm_channel { u16 dpll_ctrl_n; u16 dpll_phase_pull_in; u16 tod_read_primary; + u16 tod_read_secondary; u16 tod_write; u16 tod_n; u16 hw_dpll_n; @@ -105,6 +108,7 @@ struct idtcm_channel { /* last input trigger for extts */ u8 refn; u8 pll; + u8 tod; u16 output_mask; }; @@ -116,6 +120,7 @@ struct idtcm { enum fw_version fw_ver; /* Polls for external time stamps */ u8 extts_mask; + bool extts_single_shot; struct delayed_work extts_work; /* Remember the ptp channel to report extts */ struct idtcm_channel *event_channel[MAX_TOD]; diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h index a18c1539a152..0c706085c205 100644 --- a/include/linux/mfd/idt8a340_reg.h +++ b/include/linux/mfd/idt8a340_reg.h @@ -407,7 +407,7 @@ #define TOD_READ_PRIMARY_0 0xcc40 #define TOD_READ_PRIMARY_0_V520 0xcc50 /* 8-bit subns, 32-bit ns, 48-bit seconds */ -#define TOD_READ_PRIMARY 0x0000 +#define TOD_READ_PRIMARY_BASE 0x0000 /* Counter increments after TOD write is completed */ #define TOD_READ_PRIMARY_COUNTER 0x000b /* Read trigger configuration */ @@ -424,6 +424,16 @@ #define TOD_READ_SECONDARY_0 0xcc90 #define TOD_READ_SECONDARY_0_V520 0xcca0 +/* 8-bit subns, 32-bit ns, 48-bit seconds */ +#define TOD_READ_SECONDARY_BASE 0x0000 +/* Counter increments after TOD write is completed */ +#define TOD_READ_SECONDARY_COUNTER 0x000b +/* Read trigger configuration */ +#define TOD_READ_SECONDARY_SEL_CFG_0 0x000c +/* Read trigger selection */ +#define TOD_READ_SECONDARY_CMD 0x000e +#define TOD_READ_SECONDARY_CMD_V520 0x000f + #define TOD_READ_SECONDARY_1 0xcca0 #define TOD_READ_SECONDARY_1_V520 0xccb0 #define TOD_READ_SECONDARY_2 0xccb0 -- cgit v1.2.3 From 7c7dcd66c5e0537b33b4e217ddb347d768a4b294 Mon Sep 17 00:00:00 2001 From: Min Li Date: Mon, 16 May 2022 10:47:07 -0400 Subject: ptp: ptp_clockmatrix: return -EBUSY if phase pull-in is in progress Also removes PEROUT_ENABLE_OUTPUT_MASK Signed-off-by: Min Li Acked-by: Richard Cochran Link: https://lore.kernel.org/r/1652712427-14703-2-git-send-email-min.li.xe@renesas.com Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_clockmatrix.c | 32 ++------------------------------ drivers/ptp/ptp_clockmatrix.h | 2 -- 2 files changed, 2 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 25075764eea4..cb258e1448d5 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -1352,43 +1352,15 @@ static int idtcm_output_enable(struct idtcm_channel *channel, return idtcm_write(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val)); } -static int idtcm_output_mask_enable(struct idtcm_channel *channel, - bool enable) -{ - u16 mask; - int err; - u8 outn; - - mask = channel->output_mask; - outn = 0; - - while (mask) { - if (mask & 0x1) { - err = idtcm_output_enable(channel, enable, outn); - if (err) - return err; - } - - mask >>= 0x1; - outn++; - } - - return 0; -} - static int idtcm_perout_enable(struct idtcm_channel *channel, struct ptp_perout_request *perout, bool enable) { struct idtcm *idtcm = channel->idtcm; - unsigned int flags = perout->flags; struct timespec64 ts = {0, 0}; int err; - if (flags == PEROUT_ENABLE_OUTPUT_MASK) - err = idtcm_output_mask_enable(channel, enable); - else - err = idtcm_output_enable(channel, enable, perout->index); + err = idtcm_output_enable(channel, enable, perout->index); if (err) { dev_err(idtcm->dev, "Unable to set output enable"); @@ -1892,7 +1864,7 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta) int err; if (channel->phase_pull_in == true) - return 0; + return -EBUSY; mutex_lock(idtcm->lock); diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h index 43796508031f..bf1e49409844 100644 --- a/drivers/ptp/ptp_clockmatrix.h +++ b/drivers/ptp/ptp_clockmatrix.h @@ -54,8 +54,6 @@ #define LOCK_TIMEOUT_MS (2000) #define LOCK_POLL_INTERVAL_MS (10) -#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) - #define IDTCM_MAX_WRITE_COUNT (512) #define PHASE_PULL_IN_MAX_PPB (144000) -- cgit v1.2.3 From 6e144b47f560edc25744498f360835b1042b73dd Mon Sep 17 00:00:00 2001 From: Suman Ghosh Date: Tue, 17 May 2022 10:10:55 +0530 Subject: octeontx2-pf: Add support for adaptive interrupt coalescing Added support for adaptive IRQ coalescing. It uses net_dim algorithm to find the suitable delay/IRQ count based on the current packet rate. Signed-off-by: Suman Ghosh Link: https://lore.kernel.org/r/20220517044055.876158-1-sumang@marvell.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/octeontx2/Kconfig | 1 + .../ethernet/marvell/octeontx2/nic/otx2_common.c | 5 --- .../ethernet/marvell/octeontx2/nic/otx2_common.h | 10 +++++ .../ethernet/marvell/octeontx2/nic/otx2_ethtool.c | 45 ++++++++++++++++++++-- .../net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 22 +++++++++++ .../net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 23 +++++++++++ .../net/ethernet/marvell/octeontx2/nic/otx2_txrx.h | 1 + 7 files changed, 99 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 639893d87055..e1036b0eb6b1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -33,6 +33,7 @@ config OCTEONTX2_PF select OCTEONTX2_MBOX select NET_DEVLINK depends on (64BIT && COMPILE_TEST) || ARM64 + select DIMLIB depends on PCI depends on PTP_1588_CLOCK_OPTIONAL help diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index b9d7601138ca..fb8db5888d2f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -97,11 +97,6 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf) { struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; -#define OTX2_GET_RX_STATS(reg) \ - otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) -#define OTX2_GET_TX_STATS(reg) \ - otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) - dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index c587c14ac2a3..ce2766317c0b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -54,6 +55,11 @@ enum arua_mapped_qtypes { /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ #define SEND_CQ_SKID 2000 +#define OTX2_GET_RX_STATS(reg) \ + otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) +#define OTX2_GET_TX_STATS(reg) \ + otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) + struct otx2_lmt_info { u64 lmt_addr; u16 lmt_id; @@ -351,6 +357,7 @@ struct otx2_nic { #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) +#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) u64 flags; u64 *cq_op_addr; @@ -408,6 +415,9 @@ struct otx2_nic { u8 pfc_en; u8 *queue_to_pfc_map; #endif + + /* napi event count. It is needed for adaptive irq coalescing. */ + u32 napi_events; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index fc328de5345e..bc614a4def9e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -455,6 +455,14 @@ static int otx2_get_coalesce(struct net_device *netdev, cmd->rx_max_coalesced_frames = hw->cq_ecount_wait; cmd->tx_coalesce_usecs = hw->cq_time_wait; cmd->tx_max_coalesced_frames = hw->cq_ecount_wait; + if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == + OTX2_FLAG_ADPTV_INT_COAL_ENABLED) { + cmd->use_adaptive_rx_coalesce = 1; + cmd->use_adaptive_tx_coalesce = 1; + } else { + cmd->use_adaptive_rx_coalesce = 0; + cmd->use_adaptive_tx_coalesce = 0; + } return 0; } @@ -466,11 +474,30 @@ static int otx2_set_coalesce(struct net_device *netdev, { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_hw *hw = &pfvf->hw; + u8 priv_coalesce_status; int qidx; if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames) return 0; + if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) { + netdev_err(netdev, + "adaptive-rx should be same as adaptive-tx"); + return -EINVAL; + } + + /* Check and update coalesce status */ + if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == + OTX2_FLAG_ADPTV_INT_COAL_ENABLED) { + priv_coalesce_status = 1; + if (!ec->use_adaptive_rx_coalesce) + pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED; + } else { + priv_coalesce_status = 0; + if (ec->use_adaptive_rx_coalesce) + pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED; + } + /* 'cq_time_wait' is 8bit and is in multiple of 100ns, * so clamp the user given value to the range of 1 to 25usec. */ @@ -494,9 +521,9 @@ static int otx2_set_coalesce(struct net_device *netdev, * so clamp the user given value to the range of 1 to 64k. */ ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames, - 1, U16_MAX); + 1, NAPI_POLL_WEIGHT); ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames, - 1, U16_MAX); + 1, NAPI_POLL_WEIGHT); /* Rx and Tx are mapped to same CQ, check which one * is changed, if both then choose the min. @@ -509,6 +536,17 @@ static int otx2_set_coalesce(struct net_device *netdev, hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames, ec->tx_max_coalesced_frames); + /* Reset 'cq_time_wait' and 'cq_ecount_wait' to + * default values if coalesce status changed from + * 'on' to 'off'. + */ + if (priv_coalesce_status && + ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) != + OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { + hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT; + hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; + } + if (netif_running(netdev)) { for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++) otx2_config_irq_coalescing(pfvf, qidx); @@ -1230,7 +1268,8 @@ end: static const struct ethtool_ops otx2_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e193ffbc1fab..fe3472e04c23 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1254,6 +1254,7 @@ static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); /* Schedule NAPI */ + pf->napi_events++; napi_schedule_irqoff(&cq_poll->napi); return IRQ_HANDLED; @@ -1267,6 +1268,7 @@ static void otx2_disable_napi(struct otx2_nic *pf) for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { cq_poll = &qset->napi[qidx]; + cancel_work_sync(&cq_poll->dim.work); napi_disable(&cq_poll->napi); netif_napi_del(&cq_poll->napi); } @@ -1546,6 +1548,24 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf) mutex_unlock(&pf->mbox.lock); } +static void otx2_dim_work(struct work_struct *w) +{ + struct dim_cq_moder cur_moder; + struct otx2_cq_poll *cq_poll; + struct otx2_nic *pfvf; + struct dim *dim; + + dim = container_of(w, struct dim, work); + cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + cq_poll = container_of(dim, struct otx2_cq_poll, dim); + pfvf = (struct otx2_nic *)cq_poll->dev; + pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ? + CQ_TIMER_THRESH_MAX : cur_moder.usec; + pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ? + NAPI_POLL_WEIGHT : cur_moder.pkts; + dim->state = DIM_START_MEASURE; +} + int otx2_open(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); @@ -1612,6 +1632,8 @@ int otx2_open(struct net_device *netdev) cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; cq_poll->dev = (void *)pf; + cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; + INIT_WORK(&cq_poll->dim.work, otx2_dim_work); netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler, NAPI_POLL_WEIGHT); napi_enable(&cq_poll->napi); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index c26de15b2ac3..3baeafc40807 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -484,6 +484,18 @@ process_cqe: return 0; } +static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) +{ + struct dim_sample dim_sample; + u64 rx_frames, rx_bytes; + + rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) + + OTX2_GET_RX_STATS(RX_UCAST); + rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); + dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample); + net_dim(&cq_poll->dim, dim_sample); +} + int otx2_napi_handler(struct napi_struct *napi, int budget) { struct otx2_cq_queue *rx_cq = NULL; @@ -521,6 +533,17 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (pfvf->flags & OTX2_FLAG_INTF_DOWN) return workdone; + /* Check for adaptive interrupt coalesce */ + if (workdone != 0 && + ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) == + OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) { + /* Adjust irq coalese using net_dim */ + otx2_adjust_adaptive_coalese(pfvf, cq_poll); + /* Update irq coalescing */ + for (i = 0; i < pfvf->hw.cint_cnt; i++) + otx2_config_irq_coalescing(pfvf, i); + } + /* Re-enable interrupts */ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0)); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index f1a04cf9210c..c88e8a436029 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -109,6 +109,7 @@ struct otx2_cq_poll { #define CINT_INVALID_CQ 255 u8 cint_idx; u8 cq_ids[CQS_PER_CINT]; + struct dim dim; struct napi_struct napi; }; -- cgit v1.2.3 From 4c7c8a6d87a83d5e7ffb9bc166ebf865b9360040 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 18 Apr 2022 12:32:15 -0700 Subject: net/mlx5: sparse: error: context imbalance in 'mlx5_vf_get_core_dev' Removing the annotation resolves the issue for some reason. Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 84f75aa25214..87f1552b5d73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1886,7 +1886,6 @@ static struct pci_driver mlx5_core_driver = { * Return: Pointer to the associated mlx5_core_dev or NULL. */ struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev) - __acquires(&mdev->intf_state_mutex) { struct mlx5_core_dev *mdev; @@ -1912,7 +1911,6 @@ EXPORT_SYMBOL(mlx5_vf_get_core_dev); * access the mdev any more. */ void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev) - __releases(&mdev->intf_state_mutex) { mutex_unlock(&mdev->intf_state_mutex); } -- cgit v1.2.3 From 1d2c717bc7f7fd3c9cf38d4a0d5d7ede06adf05b Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Fri, 13 May 2022 06:19:31 +0300 Subject: net/mlx5: Add last command failure syndrome to debugfs Add syndrome of last command failure per command type to debugfs to ease debugging of such failure. last_failed_syndrome - last command failed syndrome returned by FW. Signed-off-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 7 +++++-- drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 2 ++ include/linux/mlx5/driver.h | 2 ++ 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 26ba94cb432e..0377392848d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1887,7 +1887,8 @@ out_in: return err; } -static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int err) +static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, + u32 syndrome, int err) { struct mlx5_cmd_stats *stats; @@ -1902,6 +1903,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int if (err == -EREMOTEIO) { stats->failed_mbox_status++; stats->last_failed_mbox_status = status; + stats->last_failed_syndrome = syndrome; } spin_unlock_irq(&stats->lock); } @@ -1909,6 +1911,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out) { + u32 syndrome = MLX5_GET(mbox_out, out, syndrome); u8 status = MLX5_GET(mbox_out, out, status); if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ @@ -1917,7 +1920,7 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void * if (!err && status != MLX5_CMD_STAT_OK) err = -EREMOTEIO; - cmd_status_log(dev, opcode, status, err); + cmd_status_log(dev, opcode, status, syndrome, err); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 3d3e55a5cb11..9caa1b52321b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -192,6 +192,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) &stats->last_failed_errno); debugfs_create_u8("last_failed_mbox_status", 0400, stats->root, &stats->last_failed_mbox_status); + debugfs_create_x32("last_failed_syndrome", 0400, stats->root, + &stats->last_failed_syndrome); } } } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index d6bac3976913..74c8cfb771a2 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -272,6 +272,8 @@ struct mlx5_cmd_stats { u32 last_failed_errno; /* last bad status returned by FW */ u8 last_failed_mbox_status; + /* last command failed syndrome returned by FW */ + u32 last_failed_syndrome; struct dentry *root; /* protect command average calculations */ spinlock_t lock; -- cgit v1.2.3 From 9b45bde82c229fda94618896ff530dcba9d66fe0 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 25 Jan 2022 14:47:36 +0200 Subject: net/mlx5: Inline db alloc API function Take the wrapper version which picks default node into a header file. This reduces the number of exported functions. Signed-off-by: Tariq Toukan Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 6 ------ include/linux/mlx5/driver.h | 7 ++++++- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index e52b0bac09da..6aca004e88cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -213,12 +213,6 @@ out: } EXPORT_SYMBOL_GPL(mlx5_db_alloc_node); -int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) -{ - return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); -} -EXPORT_SYMBOL_GPL(mlx5_db_alloc); - void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) { u32 db_per_page = PAGE_SIZE / cache_line_size(); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 74c8cfb771a2..b064bc278f52 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1053,9 +1053,14 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node); + +static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); +} + void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); const char *mlx5_command_str(int command); -- cgit v1.2.3 From 773c104d5333aed1c7966e79ee3c75c4c5ac5515 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 27 Apr 2021 16:25:56 +0300 Subject: net/mlx5: Allocate virtually contiguous memory in vport.c Physical continuity is not necessary, and requested allocation size might be larger than PAGE_SIZE. Hence, use v-alloc/free API. Signed-off-by: Tariq Toukan Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 52 ++++++++++++------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 8846d30a380a..ac020cb78072 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -280,7 +280,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); - out = kzalloc(out_sz, GFP_KERNEL); + out = kvzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; @@ -307,7 +307,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, ether_addr_copy(addr_list[i], mac_addr); } out: - kfree(out); + kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list); @@ -335,7 +335,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + list_size * MLX5_ST_SZ_BYTES(mac_address_layout); - in = kzalloc(in_sz, GFP_KERNEL); + in = kvzalloc(in_sz, GFP_KERNEL); if (!in) return -ENOMEM; @@ -360,7 +360,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, } err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); - kfree(in); + kvfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list); @@ -386,7 +386,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, list_size * MLX5_ST_SZ_BYTES(vlan_layout); memset(out, 0, sizeof(out)); - in = kzalloc(in_sz, GFP_KERNEL); + in = kvzalloc(in_sz, GFP_KERNEL); if (!in) return -ENOMEM; @@ -411,7 +411,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, } err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); - kfree(in); + kvfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans); @@ -542,8 +542,8 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, out_sz += nout * sizeof(*gid); - in = kzalloc(in_sz, GFP_KERNEL); - out = kzalloc(out_sz, GFP_KERNEL); + in = kvzalloc(in_sz, GFP_KERNEL); + out = kvzalloc(out_sz, GFP_KERNEL); if (!in || !out) { err = -ENOMEM; goto out; @@ -573,8 +573,8 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, gid->global.interface_id = tmp->global.interface_id; out: - kfree(in); - kfree(out); + kvfree(in); + kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid); @@ -607,8 +607,8 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, out_sz += nout * MLX5_ST_SZ_BYTES(pkey); - in = kzalloc(in_sz, GFP_KERNEL); - out = kzalloc(out_sz, GFP_KERNEL); + in = kvzalloc(in_sz, GFP_KERNEL); + out = kvzalloc(out_sz, GFP_KERNEL); if (!in || !out) { err = -ENOMEM; goto out; @@ -638,8 +638,8 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, *pkey = MLX5_GET_PR(pkey, pkarr, pkey); out: - kfree(in); - kfree(out); + kvfree(in); + kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey); @@ -658,7 +658,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); - out = kzalloc(out_sz, GFP_KERNEL); + out = kvzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; @@ -717,7 +717,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, system_image_guid); ex: - kfree(out); + kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context); @@ -728,7 +728,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, struct mlx5_hca_vport_context *rep; int err; - rep = kzalloc(sizeof(*rep), GFP_KERNEL); + rep = kvzalloc(sizeof(*rep), GFP_KERNEL); if (!rep) return -ENOMEM; @@ -736,7 +736,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, if (!err) *sys_image_guid = rep->sys_image_guid; - kfree(rep); + kvfree(rep); return err; } EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid); @@ -747,7 +747,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, struct mlx5_hca_vport_context *rep; int err; - rep = kzalloc(sizeof(*rep), GFP_KERNEL); + rep = kvzalloc(sizeof(*rep), GFP_KERNEL); if (!rep) return -ENOMEM; @@ -755,7 +755,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, if (!err) *node_guid = rep->node_guid; - kfree(rep); + kvfree(rep); return err; } EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); @@ -770,7 +770,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); int err; - out = kzalloc(outlen, GFP_KERNEL); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -786,7 +786,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, nic_vport_context.promisc_all); out: - kfree(out); + kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc); @@ -874,7 +874,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status) int value; int err; - out = kzalloc(outlen, GFP_KERNEL); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -891,7 +891,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status) *status = !value; out: - kfree(out); + kvfree(out); return err; } EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb); @@ -1033,7 +1033,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, mlx5_core_dbg(dev, "vf %d\n", vf); is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); - in = kzalloc(in_sz, GFP_KERNEL); + in = kvzalloc(in_sz, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1065,7 +1065,7 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, req->cap_mask1_perm); err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in); ex: - kfree(in); + kvfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context); -- cgit v1.2.3 From 88468311c07a8bb34321166341f93981f7337a7d Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 27 Apr 2021 17:23:16 +0300 Subject: net/mlx5: Allocate virtually contiguous memory in pci_irq.c Physical continuity is not necessary, and requested allocation size might be larger than PAGE_SIZE. Hence, use v-alloc/free API. Signed-off-by: Tariq Toukan Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index db77f1d2eeb4..662f1d55e30e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -94,8 +94,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id, if (msix_vec_count > max_msix) return -EOVERFLOW; - query_cap = kzalloc(query_sz, GFP_KERNEL); - hca_cap = kzalloc(set_sz, GFP_KERNEL); + query_cap = kvzalloc(query_sz, GFP_KERNEL); + hca_cap = kvzalloc(set_sz, GFP_KERNEL); if (!hca_cap || !query_cap) { ret = -ENOMEM; goto out; @@ -118,8 +118,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1); ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap); out: - kfree(hca_cap); - kfree(query_cap); + kvfree(hca_cap); + kvfree(query_cap); return ret; } -- cgit v1.2.3 From 035e0dd573929306994a1856061e95998699cd69 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 27 Apr 2021 16:11:12 +0300 Subject: net/mlx5e: Allocate virtually contiguous memory for VLANs list Physical continuity is not necessary, and requested allocation size might be larger than PAGE_SIZE. Hence, use v-alloc/free API. Signed-off-by: Tariq Toukan Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index aeff1d972a46..d2f0773f95c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -155,7 +155,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) list_size = max_list_size; } - vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL); + vlans = kvcalloc(list_size, sizeof(*vlans), GFP_KERNEL); if (!vlans) return -ENOMEM; @@ -171,7 +171,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n", err); - kfree(vlans); + kvfree(vlans); return err; } -- cgit v1.2.3 From 597c112326197786abcc3a38090b08856bf860af Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 27 Apr 2021 16:41:42 +0300 Subject: net/mlx5e: Allocate virtually contiguous memory for reps structures Physical continuity is not necessary, and requested allocation size might be larger than PAGE_SIZE. Hence, use v-alloc/free API. Signed-off-by: Tariq Toukan Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 47f7b4c034cc..ce3b9e65c808 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -412,8 +412,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)); nch = priv->channels.num + ptp_sq; - sqs = kcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs), - GFP_KERNEL); + sqs = kvcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs), + GFP_KERNEL); if (!sqs) goto out; @@ -430,7 +430,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) } err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs); - kfree(sqs); + kvfree(sqs); out: if (err) @@ -1269,7 +1269,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) struct mlx5e_rep_priv *rpriv; int err; - rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + rpriv = kvzalloc(sizeof(*rpriv), GFP_KERNEL); if (!rpriv) return -ENOMEM; @@ -1284,7 +1284,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) err = mlx5e_vport_vf_rep_load(dev, rep); if (err) - kfree(rpriv); + kvfree(rpriv); return err; } @@ -1312,7 +1312,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) priv->profile->cleanup(priv); mlx5e_destroy_netdev(priv); free_ppriv: - kfree(ppriv); /* mlx5e_rep_priv */ + kvfree(ppriv); /* mlx5e_rep_priv */ } static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) -- cgit v1.2.3 From 675b9d51d6fbc8d08842bdce84e4d38a400e357d Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 11 Apr 2022 15:44:01 +0300 Subject: net/mlx5e: IPoIB, Improve ethtool rxnfc callback structure in IPoIB Followup commit 79ce39be1d63 ("net/mlx5e: Improve ethtool rxnfc callback structure") and handle CONFIG_MLX5_EN_RXNFC enabled/disabled inside the fs layer so the ethtool callbacks are always available. The fs layer will provide stubs when CONFIG_MLX5_EN_RXNFC is compiled out. Signed-off-by: Gal Pressman Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index f4f7eaf16446..8da73ef5680f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -221,7 +221,6 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev, return 0; } -#ifdef CONFIG_MLX5_EN_RXNFC static u32 mlx5i_flow_type_mask(u32 flow_type) { return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); @@ -243,9 +242,18 @@ static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, { struct mlx5e_priv *priv = mlx5i_epriv(dev); + /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part + * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc, + * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc + * is compiled out via CONFIG_MLX5_EN_RXNFC=n. + */ + if (info->cmd == ETHTOOL_GRXRINGS) { + info->data = priv->channels.params.num_channels; + return 0; + } + return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs); } -#endif const struct ethtool_ops mlx5i_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | @@ -263,10 +271,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = { .get_coalesce = mlx5i_get_coalesce, .set_coalesce = mlx5i_set_coalesce, .get_ts_info = mlx5i_get_ts_info, -#ifdef CONFIG_MLX5_EN_RXNFC .get_rxnfc = mlx5i_get_rxnfc, .set_rxnfc = mlx5i_set_rxnfc, -#endif .get_link_ksettings = mlx5i_get_link_ksettings, .get_link = ethtool_op_get_link, }; -- cgit v1.2.3 From 682adfa6ca80f86dcdc5cebe85b11d30d15944d9 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 31 Mar 2022 09:26:00 +0300 Subject: net/mlx5e: Support partial GSO for tunnels over vlans Offloading outer checksum on tunnels requires GSO partial, add it to 'vlan_features' to allow offloading tunnels over vlans. For example, running GENEVE over vlan & ipv6 (mandatory UDP checksum) now allows for hardware TSO instead of software segmentation in GSO only. Signed-off-by: Gal Pressman Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bf3bca79e160..29e1e1f8f49e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4812,6 +4812,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_RXCSUM; netdev->vlan_features |= NETIF_F_RXHASH; + netdev->vlan_features |= NETIF_F_GSO_PARTIAL; netdev->mpls_features |= NETIF_F_SG; netdev->mpls_features |= NETIF_F_HW_CSUM; @@ -4877,7 +4878,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_IPXIP6; } - netdev->hw_features |= NETIF_F_GSO_PARTIAL; netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; netdev->hw_features |= NETIF_F_GSO_UDP_L4; netdev->features |= NETIF_F_GSO_UDP_L4; -- cgit v1.2.3 From f05ec8d9d0d62367b6e1f2cb50d7d2a45e7747cf Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Mon, 11 Apr 2022 09:27:39 +0300 Subject: net/mlx5e: Allow relaxed ordering over VFs By PCI spec, the config space of the VF always report relaxed ordering not supported while it inherits this property from its PF. Hence using pcie_relaxed_ordering_enable(), always disables the relaxed ordering on all VFs. Remove this check and rely on the firmware which queries the config space of the PF and set the capability bit accordingly. Signed-off-by: Aya Levin Reviewed-by: Gal Pressman Reviewed-by: Marina Varshaver Reviewed-by: Gal Shalom Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/params.c | 3 +-- drivers/net/ethernet/mellanox/mlx5/core/en_common.c | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 3c1edfa33aa7..68364484a435 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -565,8 +565,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; - bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && - MLX5_CAP_GEN(mdev, relaxed_ordering_write); + bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); return ro && lro_en ? MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index c0f409c195bf..43a536cb81db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -38,12 +38,11 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) { - bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); - MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); - MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read); + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write); } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, -- cgit v1.2.3 From 77422a8f6f61be1ef64978e9a94f40fed0d1634e Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 17 May 2022 20:13:09 -0700 Subject: net/mlx5e: CT: Add ct driver counters Connection offload is translated to multiple rules over several hardware flow tables. Unhandled end-cases may cause a hardware resource leak causing multiple system symptoms such as a host memory leak, decreased performance and other scale related issues. Export the current number of firmware FTEs related to the CT table as a debugfs counter. Also add a dropped packets counter to help debug packets dropped on restore failure. To show the offloaded count: cat /sys/kernel/debug/mlx5//ct_nic/offloaded To show the dropped count: cat /sys/kernel/debug/mlx5//ct_nic/rx_dropped Signed-off-by: Paul Blakey Signed-off-by: Roi Dayan Signed-off-by: Saeed Mahameed Reviewed-by: Oz Shlomo Reviewed-by: Paul Blakey --- drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 52 ++++++++++++++++++++-- 1 file changed, 48 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 228fbd2c20d4..bceea7a1589e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "lib/fs_chains.h" #include "en/tc_ct.h" @@ -47,6 +48,15 @@ #define ct_dbg(fmt, args...)\ netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args) +struct mlx5_tc_ct_debugfs { + struct { + atomic_t offloaded; + atomic_t rx_dropped; + } stats; + + struct dentry *root; +}; + struct mlx5_tc_ct_priv { struct mlx5_core_dev *dev; const struct net_device *netdev; @@ -66,6 +76,8 @@ struct mlx5_tc_ct_priv { struct mlx5_ct_fs *fs; struct mlx5_ct_fs_ops *fs_ops; spinlock_t ht_lock; /* protects ft entries */ + + struct mlx5_tc_ct_debugfs debugfs; }; struct mlx5_ct_flow { @@ -520,6 +532,8 @@ mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv, { mlx5_tc_ct_entry_del_rule(ct_priv, entry, true); mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + + atomic_dec(&ct_priv->debugfs.stats.offloaded); } static struct flow_action_entry * @@ -1040,6 +1054,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, if (err) goto err_nat; + atomic_inc(&ct_priv->debugfs.stats.offloaded); return 0; err_nat: @@ -2064,6 +2079,29 @@ out_err: return err; } +static void +mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv) +{ + bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB; + struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs; + char dirname[16] = {}; + + if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0) + return; + + ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev)); + debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root, + &ct_dbgfs->stats.offloaded); + debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root, + &ct_dbgfs->stats.rx_dropped); +} + +static void +mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv) +{ + debugfs_remove_recursive(ct_priv->debugfs.root); +} + #define INIT_ERR_PREFIX "tc ct offload init failed" struct mlx5_tc_ct_priv * @@ -2139,6 +2177,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (err) goto err_init_fs; + mlx5_ct_tc_create_dbgfs(ct_priv); return ct_priv; err_init_fs: @@ -2171,6 +2210,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) if (!ct_priv) return; + mlx5_ct_tc_remove_dbgfs(ct_priv); chains = ct_priv->chains; ct_priv->fs_ops->destroy(ct_priv->fs); @@ -2200,22 +2240,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, return true; if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone)) - return false; + goto out_inc_drop; if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone)) - return false; + goto out_inc_drop; spin_lock(&ct_priv->ht_lock); entry = mlx5_tc_ct_entry_get(ct_priv, &tuple); if (!entry) { spin_unlock(&ct_priv->ht_lock); - return false; + goto out_inc_drop; } if (IS_ERR(entry)) { spin_unlock(&ct_priv->ht_lock); - return false; + goto out_inc_drop; } spin_unlock(&ct_priv->ht_lock); @@ -2223,4 +2263,8 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, __mlx5_tc_ct_entry_put(entry); return true; + +out_inc_drop: + atomic_inc(&ct_priv->debugfs.stats.rx_dropped); + return false; } -- cgit v1.2.3 From 6d0ba49321a40a8dada22c223bbe91c063b08db4 Mon Sep 17 00:00:00 2001 From: Moshe Tal Date: Wed, 27 Apr 2022 18:26:52 +0300 Subject: net/mlx5e: Correct the calculation of max channels for rep Correct the calculation of maximum channels of rep to better utilize the hardware resources and allow a larger scale of reps. This will allow creation of all virtual ports configured. Fixes: 473baf2e9e8c ("net/mlx5e: Allow profile-specific limitation on max num of channels") Signed-off-by: Moshe Tal Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 9 +++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 10 ++++++++-- 3 files changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b90902db7819..65d3c4865abf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1220,6 +1220,7 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev) MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe); } +int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev); int mlx5e_priv_init(struct mlx5e_priv *priv, const struct mlx5e_profile *profile, struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 29e1e1f8f49e..6816a024db8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5221,6 +5221,15 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev, return max_nch; } +int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev) +{ + /* Indirect TIRS: 2 sets of TTCs (inner + outer steering) + * and 1 set of direct TIRS + */ + return 2 * MLX5E_NUM_INDIR_TIRS + + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile); +} + /* mlx5e generic netdev management API (move to en_common.c) */ int mlx5e_priv_init(struct mlx5e_priv *priv, const struct mlx5e_profile *profile, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index ce3b9e65c808..aa32b1062f7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -604,10 +604,16 @@ bool mlx5e_eswitch_vf_rep(const struct net_device *netdev) return netdev->netdev_ops == &mlx5e_netdev_ops_rep; } +/* One indirect TIR set for outer. Inner not supported in reps. */ +#define REP_NUM_INDIR_TIRS MLX5E_NUM_INDIR_TIRS + static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev) { - return (1 << MLX5_CAP_GEN(mdev, log_max_tir)) / - mlx5_eswitch_get_total_vports(mdev); + int max_tir_num = 1 << MLX5_CAP_GEN(mdev, log_max_tir); + int num_vports = mlx5_eswitch_get_total_vports(mdev); + + return (max_tir_num - mlx5e_get_pf_num_tirs(mdev) + - (num_vports * REP_NUM_INDIR_TIRS)) / num_vports; } static void mlx5e_build_rep_params(struct net_device *netdev) -- cgit v1.2.3 From 65810a2d2ab3c2a640e14a3e249c87e50675e6ce Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 2 Mar 2022 10:02:59 +0200 Subject: net/mlx5e: Add XDP SQs to uplink representors steering tables This patch adds the XDP SQs to the uplink representors steering tables in swichdev mode and enables XDP usage on them. Signed-off-by: Gal Pressman Reviewed-by: Maor Dickman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index aa32b1062f7a..eb90e79388f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -399,7 +399,9 @@ out_err: int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { + int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params); struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + bool is_uplink_rep = mlx5e_is_uplink_rep(priv); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; int n, tc, nch, num_sqs = 0; @@ -411,9 +413,13 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) ptp_sq = !!(priv->channels.ptp && MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)); nch = priv->channels.num + ptp_sq; + /* +2 for xdpsqs, they don't exist on the ptp channel but will not be + * counted for by num_sqs. + */ + if (is_uplink_rep) + sqs_per_channel += 2; - sqs = kvcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs), - GFP_KERNEL); + sqs = kvcalloc(nch * sqs_per_channel, sizeof(*sqs), GFP_KERNEL); if (!sqs) goto out; @@ -421,6 +427,13 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) c = priv->channels.c[n]; for (tc = 0; tc < c->num_tc; tc++) sqs[num_sqs++] = c->sq[tc].sqn; + + if (is_uplink_rep) { + if (c->xdp) + sqs[num_sqs++] = c->rq_xdpsq.sqn; + + sqs[num_sqs++] = c->xdpsq.sqn; + } } if (ptp_sq) { struct mlx5e_ptp *ptp_ch = priv->channels.ptp; -- cgit v1.2.3 From ef9a3a4a813ac0debfd2d64b71db0a55dbc3efd7 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Mon, 24 Jan 2022 11:30:46 +0200 Subject: net/mlx5: Lag, refactor lag state machine LAG state machine is implemented using bit flags. However, all these bit flags, except for MLX5_LAG_FLAG_HASH_BASED, are really mutual exclusive. In addition, MLX5_LAG_FLAG_READY is used by bonding to mark if we have our netdevices successfully added to lag and does not really belong in the same flags variable as the other flags. Rename MLX5_LAG_FLAG_READY to MLX5_LAG_FLAG_NDEVS_READY to better reflect its purpose and put it in a new flags variable. For the rest of the flags, we introduce a mode enum to hold the state of the LAG. Remove the shared fdb boolean flag from struct mlx5_lag and store this configuration as a mode flag. Change all flag related operations to use standard Linux APIs. Signed-off-by: Eli Cohen Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/lag/debugfs.c | 12 +-- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 112 ++++++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h | 33 +++--- drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c | 4 +- 4 files changed, 93 insertions(+), 68 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c index 443daf6e3d4b..6e7001c0cfd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c @@ -5,11 +5,11 @@ static char *get_str_mode_type(struct mlx5_lag *ldev) { - if (ldev->flags & MLX5_LAG_FLAG_ROCE) + if (ldev->mode == MLX5_LAG_MODE_ROCE) return "roce"; - if (ldev->flags & MLX5_LAG_FLAG_SRIOV) + if (ldev->mode == MLX5_LAG_MODE_SRIOV) return "switchdev"; - if (ldev->flags & MLX5_LAG_FLAG_MULTIPATH) + if (ldev->mode == MLX5_LAG_MODE_MULTIPATH) return "multipath"; return NULL; @@ -43,7 +43,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv) ldev = dev->priv.lag; mutex_lock(&ldev->lock); if (__mlx5_lag_is_active(ldev)) - mode = get_str_port_sel_mode(ldev->flags); + mode = get_str_port_sel_mode(ldev->mode_flags); else ret = -EINVAL; mutex_unlock(&ldev->lock); @@ -79,7 +79,7 @@ static int flags_show(struct seq_file *file, void *priv) mutex_lock(&ldev->lock); lag_active = __mlx5_lag_is_active(ldev); if (lag_active) - shared_fdb = ldev->shared_fdb; + shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); mutex_unlock(&ldev->lock); if (!lag_active) @@ -103,7 +103,7 @@ static int mapping_show(struct seq_file *file, void *priv) mutex_lock(&ldev->lock); lag_active = __mlx5_lag_is_active(ldev); if (lag_active) { - if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED) { + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) { mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports, &num_ports); hash = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index b6dd9043061f..3eb195cba205 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -53,21 +53,30 @@ enum { */ static DEFINE_SPINLOCK(lag_lock); -static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, bool shared_fdb, u8 flags) +static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) { + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) + return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT; + + return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY; +} + +static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode, + unsigned long flags) +{ + bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); + int port_sel_mode = get_port_sel_mode(mode, flags); u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; - void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); + void *lag_ctx; + lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); - MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb); - if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) { + if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) { MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); - } else { - MLX5_SET(lagc, lag_ctx, port_select_mode, - MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT); } + MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode); return mlx5_cmd_exec_in(dev, create_lag, in); } @@ -139,7 +148,7 @@ void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, struct mlx5_lag *ldev, struct lag_tracker *tracker, - u8 flags) + unsigned long flags) { char buf[MLX5_MAX_PORTS * 10 + 1] = {}; u8 enabled_ports[MLX5_MAX_PORTS] = {}; @@ -150,7 +159,7 @@ static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev, int i; int j; - if (flags & MLX5_LAG_FLAG_HASH_BASED) { + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { mlx5_infer_tx_enabled(tracker, ldev->ports, enabled_ports, &num_enabled); for (i = 0; i < num_enabled; i++) { @@ -227,6 +236,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev) ldev->nb.notifier_call = NULL; mlx5_core_err(dev, "Failed to register LAG netdev notifier\n"); } + ldev->mode = MLX5_LAG_MODE_NONE; err = mlx5_lag_mp_init(ldev); if (err) @@ -252,12 +262,12 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev) { - return !!(ldev->flags & MLX5_LAG_FLAG_ROCE); + return ldev->mode == MLX5_LAG_MODE_ROCE; } static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev) { - return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV); + return ldev->mode == MLX5_LAG_MODE_SRIOV; } /* Create a mapping between steering slots and active ports. @@ -372,7 +382,7 @@ static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED) + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) return mlx5_lag_port_sel_modify(ldev, ports); return mlx5_cmd_modify_lag(dev0, ldev->ports, ports); } @@ -404,19 +414,19 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, memcpy(ldev->v2p_map, ports, sizeof(ports)); mlx5_lag_print_mapping(dev0, ldev, tracker, - ldev->flags); + ldev->mode_flags); break; } } if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && - !(ldev->flags & MLX5_LAG_FLAG_ROCE)) + !(ldev->mode == MLX5_LAG_MODE_ROCE)) mlx5_lag_drop_rule_setup(ldev, tracker); } #define MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED 4 static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, - struct lag_tracker *tracker, u8 *flags) + unsigned long *flags) { struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; @@ -424,7 +434,7 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, /* Four ports are support only in hash mode */ if (!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table)) return -EINVAL; - *flags |= MLX5_LAG_FLAG_HASH_BASED; + set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); if (ldev->ports > 2) ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS; } @@ -433,38 +443,46 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, } static int mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, - struct lag_tracker *tracker, u8 *flags) + struct lag_tracker *tracker, unsigned long *flags) { struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) - *flags |= MLX5_LAG_FLAG_HASH_BASED; + set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); return 0; } -static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, - struct lag_tracker *tracker, u8 *flags) +static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, + struct lag_tracker *tracker, bool shared_fdb, + unsigned long *flags) { - bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE); + bool roce_lag = mode == MLX5_LAG_MODE_ROCE; + + *flags = 0; + if (shared_fdb) + set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags); if (roce_lag) - return mlx5_lag_set_port_sel_mode_roce(ldev, tracker, flags); + return mlx5_lag_set_port_sel_mode_roce(ldev, flags); + return mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, flags); } -char *get_str_port_sel_mode(u8 flags) +char *get_str_port_sel_mode(unsigned long flags) { - if (flags & MLX5_LAG_FLAG_HASH_BASED) + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) return "hash"; return "queue_affinity"; } static int mlx5_create_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker, - bool shared_fdb, u8 flags) + enum mlx5_lag_mode mode, + unsigned long flags) { + bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; @@ -474,7 +492,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", shared_fdb, get_str_port_sel_mode(flags)); - err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, shared_fdb, flags); + err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags); if (err) { mlx5_core_err(dev0, "Failed to create LAG (%d)\n", @@ -503,20 +521,20 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, int mlx5_activate_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker, - u8 flags, + enum mlx5_lag_mode mode, bool shared_fdb) { - bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE); + bool roce_lag = mode == MLX5_LAG_MODE_ROCE; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + unsigned long flags; int err; - err = mlx5_lag_set_port_sel_mode(ldev, tracker, &flags); + err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags); if (err) return err; mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map); - - if (flags & MLX5_LAG_FLAG_HASH_BASED) { + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, ldev->v2p_map); if (err) { @@ -527,9 +545,9 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, } } - err = mlx5_create_lag(ldev, tracker, shared_fdb, flags); + err = mlx5_create_lag(ldev, tracker, mode, flags); if (err) { - if (flags & MLX5_LAG_FLAG_HASH_BASED) + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) mlx5_lag_port_sel_destroy(ldev); if (roce_lag) mlx5_core_err(dev0, @@ -545,8 +563,8 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, !roce_lag) mlx5_lag_drop_rule_setup(ldev, tracker); - ldev->flags |= flags; - ldev->shared_fdb = shared_fdb; + ldev->mode = mode; + ldev->mode_flags = flags; return 0; } @@ -556,16 +574,17 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; bool roce_lag = __mlx5_lag_is_roce(ldev); - u8 flags = ldev->flags; + unsigned long flags = ldev->mode_flags; int err; - ldev->flags &= ~MLX5_LAG_MODE_FLAGS; + ldev->mode = MLX5_LAG_MODE_NONE; + ldev->mode_flags = 0; mlx5_lag_mp_reset(ldev); - if (ldev->shared_fdb) { + if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) { mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch, dev1->priv.eswitch); - ldev->shared_fdb = false; + clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); } MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); @@ -582,7 +601,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) return err; } - if (flags & MLX5_LAG_FLAG_HASH_BASED) + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) mlx5_lag_port_sel_destroy(ldev); if (mlx5_lag_has_drop_rule(ldev)) mlx5_lag_drop_rule_cleanup(ldev); @@ -658,9 +677,9 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev) static void mlx5_disable_lag(struct mlx5_lag *ldev) { + bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; - bool shared_fdb = ldev->shared_fdb; bool roce_lag; int err; int i; @@ -759,8 +778,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) mlx5_lag_remove_devices(ldev); err = mlx5_activate_lag(ldev, &tracker, - roce_lag ? MLX5_LAG_FLAG_ROCE : - MLX5_LAG_FLAG_SRIOV, + roce_lag ? MLX5_LAG_MODE_ROCE : + MLX5_LAG_MODE_SRIOV, shared_fdb); if (err) { if (shared_fdb || roce_lag) @@ -1156,7 +1175,7 @@ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, mutex_lock(&ldev->lock); mlx5_ldev_remove_netdev(ldev, netdev); - ldev->flags &= ~MLX5_LAG_FLAG_READY; + clear_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags); lag_is_active = __mlx5_lag_is_active(ldev); mutex_unlock(&ldev->lock); @@ -1183,7 +1202,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, break; if (i >= ldev->ports) - ldev->flags |= MLX5_LAG_FLAG_READY; + set_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags); mutex_unlock(&ldev->lock); mlx5_queue_bond_work(ldev, 0); } @@ -1252,7 +1271,8 @@ bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev) spin_lock(&lag_lock); ldev = mlx5_lag_dev(dev); - res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb; + res = ldev && __mlx5_lag_is_sriov(ldev) && + test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); spin_unlock(&lag_lock); return res; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 46683b84ff84..244b548e1420 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -17,16 +17,20 @@ enum { }; enum { - MLX5_LAG_FLAG_ROCE = 1 << 0, - MLX5_LAG_FLAG_SRIOV = 1 << 1, - MLX5_LAG_FLAG_MULTIPATH = 1 << 2, - MLX5_LAG_FLAG_READY = 1 << 3, - MLX5_LAG_FLAG_HASH_BASED = 1 << 4, + MLX5_LAG_FLAG_NDEVS_READY, }; -#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\ - MLX5_LAG_FLAG_MULTIPATH | \ - MLX5_LAG_FLAG_HASH_BASED) +enum { + MLX5_LAG_MODE_FLAG_HASH_BASED, + MLX5_LAG_MODE_FLAG_SHARED_FDB, +}; + +enum mlx5_lag_mode { + MLX5_LAG_MODE_NONE, + MLX5_LAG_MODE_ROCE, + MLX5_LAG_MODE_SRIOV, + MLX5_LAG_MODE_MULTIPATH, +}; struct lag_func { struct mlx5_core_dev *dev; @@ -47,11 +51,12 @@ struct lag_tracker { * It serves both its phys functions. */ struct mlx5_lag { - u8 flags; + enum mlx5_lag_mode mode; + unsigned long mode_flags; + unsigned long state_flags; u8 ports; u8 buckets; int mode_changes_in_progress; - bool shared_fdb; u8 v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS]; struct kref ref; struct lag_func pf[MLX5_MAX_PORTS]; @@ -74,25 +79,25 @@ mlx5_lag_dev(struct mlx5_core_dev *dev) static inline bool __mlx5_lag_is_active(struct mlx5_lag *ldev) { - return !!(ldev->flags & MLX5_LAG_MODE_FLAGS); + return ldev->mode != MLX5_LAG_MODE_NONE; } static inline bool mlx5_lag_is_ready(struct mlx5_lag *ldev) { - return ldev->flags & MLX5_LAG_FLAG_READY; + return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags); } void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker); int mlx5_activate_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker, - u8 flags, + enum mlx5_lag_mode mode, bool shared_fdb); int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, struct net_device *ndev); -char *get_str_port_sel_mode(u8 flags); +char *get_str_port_sel_mode(unsigned long flags); void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, u8 *ports, int *num_enabled); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c index d6c3e6dfd71f..0259a149a64c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c @@ -11,7 +11,7 @@ static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev) { - return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH); + return ldev->mode == MLX5_LAG_MODE_MULTIPATH; } static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) @@ -179,7 +179,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, struct lag_tracker tracker; tracker = ldev->tracker; - mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH, false); + mlx5_activate_lag(ldev, &tracker, MLX5_LAG_MODE_MULTIPATH, false); } mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY); -- cgit v1.2.3 From a4a9c87ebb689c8c5e6609b8b4dda0b6da5b1ebe Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Sun, 6 Feb 2022 14:57:42 +0200 Subject: net/mlx5: Remove unused argument Argument ndev is not used in mlx5_handle_changeupper_event() Remove it. Signed-off-by: Eli Cohen Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 3eb195cba205..5c3900586d23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -850,7 +850,6 @@ static void mlx5_do_bond_work(struct work_struct *work) static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, struct lag_tracker *tracker, - struct net_device *ndev, struct netdev_notifier_changeupper_info *info) { struct net_device *upper = info->upper_dev, *ndev_tmp; @@ -1006,8 +1005,7 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, switch (event) { case NETDEV_CHANGEUPPER: - changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev, - ptr); + changed = mlx5_handle_changeupper_event(ldev, &tracker, ptr); break; case NETDEV_CHANGELOWERSTATE: changed = mlx5_handle_changelowerstate_event(ldev, &tracker, -- cgit v1.2.3 From 94db3317781922ba52722c58061e0e8517d4d80d Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Mon, 31 Jan 2022 07:49:51 +0200 Subject: net/mlx5: Support multiport eswitch mode Multiport eswitch mode is a LAG mode that allows to add rules that forward traffic to a specific physical port without being affected by LAG affinity configuration. This mode of operation is mutual exclusive with the other LAG modes used by multipath and bonding. To make the transition between the modes, we maintain a counter on the number of rules specifying one of the uplink representors as the target of mirred egress redirect action. An example of such rule would be: $ tc filter add dev enp8s0f0_0 prot all root flower dst_mac \ 00:11:22:33:44:55 action mirred egress redirect dev enp8s0f0 If the reference count just grows to one and LAG is not in use, we create the LAG in multiport eswitch mode. Other mode changes are not allowed while in this mode. When the reference count reaches zero, we destroy the LAG and let other modes be used if needed. logic also changed such that if forwarding to some uplink destination cannot be guaranteed, we fail the operation so the rule will eventually be in software and not in hardware. Signed-off-by: Eli Cohen Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../ethernet/mellanox/mlx5/core/en/tc/act/mirred.c | 14 +++ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 28 +++++- drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 7 ++ .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 3 + .../net/ethernet/mellanox/mlx5/core/lag/debugfs.c | 17 ++-- drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 86 ++++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h | 10 +- .../net/ethernet/mellanox/mlx5/core/lag/mpesw.c | 101 +++++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/lag/mpesw.h | 26 ++++++ include/linux/mlx5/mlx5_ifc.h | 5 +- 11 files changed, 259 insertions(+), 40 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 7895ed7cc285..9ea867a45764 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -39,7 +39,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \ en_rep.o en/rep/bond.o en/mod_hdr.o \ - en/mapping.o + en/mapping.o lag/mpesw.o mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ lib/fs_chains.o en/tc_tun.o \ esw/indir_table.o en/tc_tun_encap.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c index 2b002c6a2e73..4ac7de3f6afa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c @@ -10,6 +10,7 @@ #include "en/tc_tun_encap.h" #include "en/tc_priv.h" #include "en_rep.h" +#include "lag/lag.h" static bool same_vf_reps(struct mlx5e_priv *priv, struct net_device *out_dev) @@ -215,6 +216,7 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, struct net_device *uplink_dev; struct mlx5e_priv *out_priv; struct mlx5_eswitch *esw; + bool is_uplink_rep; int *ifindexes; int if_count; int err; @@ -229,6 +231,10 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, parse_state->ifindexes[if_count] = out_dev->ifindex; parse_state->if_count++; + is_uplink_rep = mlx5e_eswitch_uplink_rep(out_dev); + err = mlx5_lag_do_mirred(priv->mdev, out_dev); + if (err) + return err; out_dev = get_fdb_out_dev(uplink_dev, out_dev); if (!out_dev) @@ -268,6 +274,14 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state, rpriv = out_priv->ppriv; esw_attr->dests[esw_attr->out_count].rep = rpriv->rep; esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev; + + /* If output device is bond master then rules are not explicit + * so we don't attempt to count them. + */ + if (is_uplink_rep && MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) && + MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up)) + attr->lag.count = true; + esw_attr->out_count++; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ac0f73074f7a..49dea02a12d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1740,6 +1740,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, free_flow_post_acts(flow); + if (flow->attr->lag.count) + mlx5_lag_del_mpesw_rule(esw->dev); + kvfree(attr->esw_attr->rx_tun_attr); kvfree(attr->parse_attr); kfree(flow->attr); @@ -3788,12 +3791,25 @@ static bool is_lag_dev(struct mlx5e_priv *priv, same_hw_reps(priv, peer_netdev)); } +static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev) +{ + if (mlx5e_eswitch_uplink_rep(out_dev) && + MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) && + MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up)) + return true; + + return false; +} + bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, struct net_device *out_dev) { if (is_merged_eswitch_vfs(priv, out_dev)) return true; + if (is_multiport_eligible(priv, out_dev)) + return true; + if (is_lag_dev(priv, out_dev)) return true; @@ -4050,6 +4066,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5_core_dev *in_mdev) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct netlink_ext_ack *extack = f->common.extack; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; @@ -4085,17 +4102,26 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; + if (flow->attr->lag.count) { + err = mlx5_lag_add_mpesw_rule(esw->dev); + if (err) + goto err_free; + } + err = mlx5e_tc_add_fdb_flow(priv, flow, extack); complete_all(&flow->init_done); if (err) { if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev))) - goto err_free; + goto err_lag; add_unready_flow(flow); } return flow; +err_lag: + if (flow->attr->lag.count) + mlx5_lag_del_mpesw_rule(esw->dev); err_free: mlx5e_flow_put(priv, flow); out: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index a80b00946f1b..e2a1250aeca1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -85,6 +85,13 @@ struct mlx5_flow_attr { u32 flags; struct list_head list; struct mlx5e_post_act_handle *post_act_handle; + struct { + /* Indicate whether the parsed flow should be counted for lag mode decision + * making + */ + bool count; + } lag; + /* keep this union last */ union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 3b151332e2f8..217cac29057f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -49,6 +49,7 @@ #include "en_tc.h" #include "en/mapping.h" #include "devlink.h" +#include "lag/lag.h" #define mlx5_esw_for_each_rep(esw, i, rep) \ xa_for_each(&((esw)->offloads.vport_reps), i, rep) @@ -418,6 +419,8 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f dest[dest_idx].vport.vhca_id = MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + if (mlx5_lag_mpesw_is_activated(esw->dev)) + dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; } if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { if (pkt_reformat) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c index 6e7001c0cfd4..15e41dc84d53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c @@ -5,12 +5,13 @@ static char *get_str_mode_type(struct mlx5_lag *ldev) { - if (ldev->mode == MLX5_LAG_MODE_ROCE) - return "roce"; - if (ldev->mode == MLX5_LAG_MODE_SRIOV) - return "switchdev"; - if (ldev->mode == MLX5_LAG_MODE_MULTIPATH) - return "multipath"; + switch (ldev->mode) { + case MLX5_LAG_MODE_ROCE: return "roce"; + case MLX5_LAG_MODE_SRIOV: return "switchdev"; + case MLX5_LAG_MODE_MULTIPATH: return "multipath"; + case MLX5_LAG_MODE_MPESW: return "multiport_eswitch"; + default: return "invalid"; + } return NULL; } @@ -43,11 +44,11 @@ static int port_sel_mode_show(struct seq_file *file, void *priv) ldev = dev->priv.lag; mutex_lock(&ldev->lock); if (__mlx5_lag_is_active(ldev)) - mode = get_str_port_sel_mode(ldev->mode_flags); + mode = mlx5_get_str_port_sel_mode(ldev); else ret = -EINVAL; mutex_unlock(&ldev->lock); - if (ret || !mode) + if (ret) return ret; seq_printf(file, "%s\n", mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 5c3900586d23..552b6e26e701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -41,6 +41,7 @@ #include "esw/acl/ofld.h" #include "lag.h" #include "mp.h" +#include "mpesw.h" enum { MLX5_LAG_EGRESS_PORT_1 = 1, @@ -58,6 +59,9 @@ static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT; + if (mode == MLX5_LAG_MODE_MPESW) + return MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW; + return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY; } @@ -196,7 +200,8 @@ static void mlx5_ldev_free(struct kref *ref) if (ldev->nb.notifier_call) unregister_netdevice_notifier_net(&init_net, &ldev->nb); mlx5_lag_mp_cleanup(ldev); - cancel_delayed_work_sync(&ldev->bond_work); + mlx5_lag_mpesw_cleanup(ldev); + cancel_work_sync(&ldev->mpesw_work); destroy_workqueue(ldev->wq); mutex_destroy(&ldev->lock); kfree(ldev); @@ -242,6 +247,8 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev) if (err) mlx5_core_err(dev, "Failed to init multipath lag err=%d\n", err); + + mlx5_lag_mpesw_init(ldev); ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports); ldev->buckets = 1; @@ -442,16 +449,19 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, return 0; } -static int mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, - struct lag_tracker *tracker, unsigned long *flags) +static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + enum mlx5_lag_mode mode, + unsigned long *flags) { struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; + if (mode == MLX5_LAG_MODE_MPESW) + return; + if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); - - return 0; } static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, @@ -467,14 +477,20 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, if (roce_lag) return mlx5_lag_set_port_sel_mode_roce(ldev, flags); - return mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, flags); + mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags); + return 0; } -char *get_str_port_sel_mode(unsigned long flags) +char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev) { - if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) - return "hash"; - return "queue_affinity"; + int port_sel_mode = get_port_sel_mode(ldev->mode, ldev->mode_flags); + + switch (port_sel_mode) { + case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity"; + case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: return "hash"; + case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW: return "mpesw"; + default: return "invalid"; + } } static int mlx5_create_lag(struct mlx5_lag *ldev, @@ -488,9 +504,10 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; int err; - mlx5_lag_print_mapping(dev0, ldev, tracker, flags); + if (tracker) + mlx5_lag_print_mapping(dev0, ldev, tracker, flags); mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", - shared_fdb, get_str_port_sel_mode(flags)); + shared_fdb, mlx5_get_str_port_sel_mode(ldev)); err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags); if (err) { @@ -526,22 +543,24 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, { bool roce_lag = mode == MLX5_LAG_MODE_ROCE; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; - unsigned long flags; + unsigned long flags = 0; int err; err = mlx5_lag_set_flags(ldev, mode, tracker, shared_fdb, &flags); if (err) return err; - mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map); - if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { - err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, - ldev->v2p_map); - if (err) { - mlx5_core_err(dev0, - "Failed to create LAG port selection(%d)\n", - err); - return err; + if (mode != MLX5_LAG_MODE_MPESW) { + mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->buckets, ldev->v2p_map); + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) { + err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, + ldev->v2p_map); + if (err) { + mlx5_core_err(dev0, + "Failed to create LAG port selection(%d)\n", + err); + return err; + } } } @@ -559,7 +578,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, return err; } - if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && + if (tracker && tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && !roce_lag) mlx5_lag_drop_rule_setup(ldev, tracker); @@ -675,7 +694,7 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev) } } -static void mlx5_disable_lag(struct mlx5_lag *ldev) +void mlx5_disable_lag(struct mlx5_lag *ldev) { bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; @@ -712,7 +731,7 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) } } -static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) +bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; @@ -748,6 +767,18 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev) return roce_lag; } +static bool mlx5_lag_should_modify_lag(struct mlx5_lag *ldev, bool do_bond) +{ + return do_bond && __mlx5_lag_is_active(ldev) && + ldev->mode != MLX5_LAG_MODE_MPESW; +} + +static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond) +{ + return !do_bond && __mlx5_lag_is_active(ldev) && + ldev->mode != MLX5_LAG_MODE_MPESW; +} + static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; @@ -810,9 +841,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) return; } } - } else if (do_bond && __mlx5_lag_is_active(ldev)) { + } else if (mlx5_lag_should_modify_lag(ldev, do_bond)) { mlx5_modify_lag(ldev, &tracker); - } else if (!do_bond && __mlx5_lag_is_active(ldev)) { + } else if (mlx5_lag_should_disable_lag(ldev, do_bond)) { mlx5_disable_lag(ldev); } } @@ -986,6 +1017,7 @@ static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev, return 1; } +/* this handler is always registered to netdev events */ static int mlx5_lag_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 244b548e1420..72f70fad4641 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -10,6 +10,7 @@ #include "mlx5_core.h" #include "mp.h" #include "port_sel.h" +#include "mpesw.h" enum { MLX5_LAG_P1, @@ -30,6 +31,7 @@ enum mlx5_lag_mode { MLX5_LAG_MODE_ROCE, MLX5_LAG_MODE_SRIOV, MLX5_LAG_MODE_MULTIPATH, + MLX5_LAG_MODE_MPESW, }; struct lag_func { @@ -63,11 +65,13 @@ struct mlx5_lag { struct lag_tracker tracker; struct workqueue_struct *wq; struct delayed_work bond_work; + struct work_struct mpesw_work; struct notifier_block nb; struct lag_mp lag_mp; struct mlx5_lag_port_sel port_sel; /* Protect lag fields/state changes */ struct mutex lock; + struct lag_mpesw lag_mpesw; }; static inline struct mlx5_lag * @@ -96,12 +100,16 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, bool shared_fdb); int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, struct net_device *ndev); +bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev); +void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev); +int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev); -char *get_str_port_sel_mode(unsigned long flags); +char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev); void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports, u8 *ports, int *num_enabled); void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev); void mlx5_ldev_remove_debugfs(struct dentry *dbg); +void mlx5_disable_lag(struct mlx5_lag *ldev); #endif /* __MLX5_LAG_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c new file mode 100644 index 000000000000..ee4b25a50315 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include +#include +#include "lag/lag.h" +#include "eswitch.h" +#include "lib/mlx5.h" + +void mlx5_mpesw_work(struct work_struct *work) +{ + struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work); + + mutex_lock(&ldev->lock); + mlx5_disable_lag(ldev); + mutex_unlock(&ldev->lock); +} + +static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev = dev->priv.lag; + + if (!queue_work(ldev->wq, &ldev->mpesw_work)) + mlx5_core_warn(dev, "failed to queue work\n"); +} + +void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev = dev->priv.lag; + + if (!ldev) + return; + + mutex_lock(&ldev->lock); + if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) && + ldev->mode == MLX5_LAG_MODE_MPESW) + mlx5_lag_disable_mpesw(dev); + mutex_unlock(&ldev->lock); +} + +int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev = dev->priv.lag; + bool shared_fdb; + int err = 0; + + if (!ldev) + return 0; + + mutex_lock(&ldev->lock); + if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1) + goto out; + + if (ldev->mode != MLX5_LAG_MODE_NONE) { + err = -EINVAL; + goto out; + } + shared_fdb = mlx5_shared_fdb_supported(ldev); + err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, shared_fdb); + if (err) + mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err); + +out: + mutex_unlock(&ldev->lock); + return err; +} + +int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev) +{ + struct mlx5_lag *ldev = mdev->priv.lag; + + if (!netif_is_bond_master(out_dev) || !ldev) + return 0; + + mutex_lock(&ldev->lock); + if (ldev->mode == MLX5_LAG_MODE_MPESW) { + mutex_unlock(&ldev->lock); + return -EOPNOTSUPP; + } + mutex_unlock(&ldev->lock); + return 0; +} + +bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev) +{ + bool ret; + + ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW; + return ret; +} + +void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) +{ + INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work); + atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0); +} + +void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) +{ + cancel_delayed_work_sync(&ldev->bond_work); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h new file mode 100644 index 000000000000..d39a02280e29 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_LAG_MPESW_H__ +#define __MLX5_LAG_MPESW_H__ + +#include "lag.h" +#include "mlx5_core.h" + +struct lag_mpesw { + struct work_struct mpesw_work; + atomic_t mpesw_rule_count; +}; + +void mlx5_mpesw_work(struct work_struct *work); +int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev); +bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev); +#if IS_ENABLED(CONFIG_MLX5_ESWITCH) +void mlx5_lag_mpesw_init(struct mlx5_lag *ldev); +void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev); +#else +void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) {} +void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) {} +#endif + +#endif /* __MLX5_LAG_MPESW_H__ */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 7bab3e51c61e..78b3d3465dd7 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1359,7 +1359,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vhca_resource_manager[0x1]; u8 hca_cap_2[0x1]; - u8 reserved_at_21[0x1]; + u8 create_lag_when_not_master_up[0x1]; u8 dtor[0x1]; u8 event_on_vhca_state_teardown_request[0x1]; u8 event_on_vhca_state_in_use[0x1]; @@ -10816,7 +10816,8 @@ struct mlx5_ifc_dcbx_param_bits { enum { MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0, - MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT, + MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT = 1, + MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW = 2, }; struct mlx5_ifc_lagc_bits { -- cgit v1.2.3 From c1918196427b917d1fbf064b07538410807f8b03 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 17 May 2022 12:05:05 +0300 Subject: iwlwifi: pcie: simplify MSI-X cause mapping We're currently manually encoding a calculation here since the HW just maps all the bits of specific registers to specific offsets, which led to the bug fixed here previously with the Bz SW_ERROR interrupt. Clean up the code to only know about the mapping offset (-16 or 16 depending on the register) to avoid such issues in the future. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120044.19abe9a4d171.I934356911277f9b2a955808763f317986f69a461@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 48 +++++++++++++++---------- 1 file changed, 29 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 8be3c3c8c68b..6fc69c42f36e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1085,34 +1085,44 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) } struct iwl_causes_list { - u32 cause_num; - u32 mask_reg; + u16 mask_reg; + u8 bit; u8 addr; }; +#define CAUSE(reg, mask) \ + { \ + .mask_reg = reg, \ + .bit = ilog2(mask), \ + .addr = ilog2(mask) + \ + ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \ + (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \ + 0xffff), /* causes overflow warning */ \ + } + static const struct iwl_causes_list causes_list_common[] = { - {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, - {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, - {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, - {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, - {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, - {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, - {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12}, - {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, - {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, - {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, - {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, - {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, - {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, - {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, + CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM), + CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM), + CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D), + CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR), + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP), }; static const struct iwl_causes_list causes_list_pre_bz[] = { - {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR), }; static const struct iwl_causes_list causes_list_bz[] = { - {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x15}, + CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ), }; static void iwl_pcie_map_list(struct iwl_trans *trans, @@ -1124,7 +1134,7 @@ static void iwl_pcie_map_list(struct iwl_trans *trans, for (i = 0; i < arr_size; i++) { iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); iwl_clear_bit(trans, causes[i].mask_reg, - causes[i].cause_num); + BIT(causes[i].bit)); } } -- cgit v1.2.3 From 537b76d26cbbeb696ec7b47536fc9ce58f5ea22b Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Tue, 17 May 2022 12:05:06 +0300 Subject: iwlwifi: mvm: use NULL instead of ERR_PTR when parsing wowlan status We anyway don't differentiate between the errors so it is pointless, returning NULL will be simpler in this case. Signed-off-by: Haim Dreyfuss Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120044.78a7651327bb.I77480de7c26db850680f96a3440fb6a1b45dd9d2@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index bcc4ed20fe5b..61f9136a333d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1956,18 +1956,18 @@ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ \ if (len < sizeof(*data)) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ - return ERR_PTR(-EIO); \ + return NULL; \ } \ \ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \ if (len != sizeof(*data) + data_size) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ - return ERR_PTR(-EIO); \ + return NULL; \ } \ \ status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \ if (!status) \ - return ERR_PTR(-ENOMEM); \ + return NULL; \ \ /* copy all the common fields */ \ status->replay_ctr = le64_to_cpu(data->replay_ctr); \ @@ -2097,7 +2097,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > @@ -2128,7 +2128,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc); @@ -2141,7 +2141,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) * difference is only in a few not used (reserved) fields. */ status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc); @@ -2153,7 +2153,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc); @@ -2165,7 +2165,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) IWL_ERR(mvm, "Firmware advertises unknown WoWLAN status response %d!\n", notif_ver); - status = ERR_PTR(-EIO); + status = NULL; } out_free_resp: @@ -2203,7 +2203,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_ap_sta; status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id); - if (IS_ERR(status)) + if (!status) goto out_unlock; IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", @@ -2370,7 +2370,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, int i, n_matches, ret; status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA); - if (!IS_ERR(status)) { + if (status) { reasons = status->wakeup_reasons; kfree(status); } -- cgit v1.2.3 From 51e073c23b46236d8dfbc1cbf7d95b0a5ff6e6e7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 17 May 2022 12:05:07 +0300 Subject: iwlwifi: mvm: clean up authorized condition We track in mvmvif->authorized when the AP STA becomes authorized and no longer authorized, so we don't need the complex condition with station lookup. Simplify the code. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120044.41f528383a6b.I1cdf165581b781c53c8e6ac8779a2282b1f67c59@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 5aa4520b70ac..e7f18f549ca9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -567,7 +567,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && !force_assoc_off) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u8 ap_sta_id = mvmvif->ap_sta_id; u32 dtim_offs; /* @@ -614,24 +613,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, * allow multicast data frames only as long as the station is * authorized, i.e., GTK keys are already installed (if needed) */ - if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { - struct ieee80211_sta *sta; - - rcu_read_lock(); - - sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); - if (!IS_ERR_OR_NULL(sta)) { - struct iwl_mvm_sta *mvmsta = - iwl_mvm_sta_from_mac80211(sta); - - if (mvmsta->sta_state == - IEEE80211_STA_AUTHORIZED) - cmd.filter_flags |= - cpu_to_le32(MAC_FILTER_ACCEPT_GRP); - } - - rcu_read_unlock(); - } + if (mvmvif->authorized) + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP); } else { ctxt_sta->is_assoc = cpu_to_le32(0); -- cgit v1.2.3 From d1f6530c3e373ddd7c76b05646052a27eead14ad Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 17 May 2022 12:05:08 +0300 Subject: iwlwifi: fw: init SAR GEO table only if data is present When no table data was read from ACPI, then filling the data and returning success here will fill zero values, which means transmit power will be limited to 0 dBm. This is clearly not intended. Return an error from iwl_sar_geo_init() if there's no data to fill into the command structure. Cc: stable@vger.kernel.org Signed-off-by: Johannes Berg Fixes: 78a19d5285d9 ("iwlwifi: mvm: Read the PPAG and SAR tables at INIT stage") Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120044.bc45923b74e9.Id2b4362234b7f8ced82c591b95d4075dd2ec12f4@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/fw/acpi.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 33aae639ad37..e6d64152c81a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -937,6 +937,9 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, { int i, j; + if (!fwrt->geo_enabled) + return -ENODATA; + if (!iwl_sar_geo_support(fwrt)) return -EOPNOTSUPP; -- cgit v1.2.3 From 9d096e3d3061dbf4ee10e2b59fc2c06e05bdb997 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 17 May 2022 12:05:09 +0300 Subject: iwlwifi: mvm: fix assert 1F04 upon reconfig When we reconfig we must not send the MAC_POWER command that relates to a MAC that was not yet added to the firmware. Ignore those in the iterator. Cc: stable@vger.kernel.org Signed-off-by: Emmanuel Grumbach Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120044.ed2ffc8ce732.If786e19512d0da4334a6382ea6148703422c7d7b@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index b2ea2fca5376..b9bd81242b21 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -563,6 +563,9 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, struct iwl_power_vifs *power_iterator = _data; bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX; + if (!mvmvif->uploaded) + return; + switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: break; -- cgit v1.2.3 From 184f10db5f8f15902ac8687b26507b558ac3204d Mon Sep 17 00:00:00 2001 From: Mordechay Goodstein Date: Tue, 17 May 2022 12:05:10 +0300 Subject: iwlwifi: mvm: add OTP info in case of init failure This helps to understand HW issues that can happen while initializing the nic. Signed-off-by: Mordechay Goodstein Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120045.48464938b27a.I9b381f0da5e0636ad6a5f6c13f98edb9031b50fb@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/iwl-prph.h | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 15 +++++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index a22788a68168..157d1f31c487 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -389,6 +389,8 @@ enum { #define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c #define WFPM_ARC1_PD_NOTIFICATION 0xa03044 #define HPM_SECONDARY_DEVICE_STATE 0xa03404 +#define WFPM_MAC_OTP_CFG7_ADDR 0xa03338 +#define WFPM_MAC_OTP_CFG7_DATA 0xa0333c /* For UMAG_GEN_HW_STATUS reg check */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index e842816134f1..f041e77af059 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -287,6 +287,9 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) { +#define IWL_FW_PRINT_REG_INFO(reg_name) \ + IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) + struct iwl_trans *trans = mvm->trans; enum iwl_device_family device_family = trans->trans_cfg->device_family; @@ -294,15 +297,15 @@ static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) return; if (device_family <= IWL_DEVICE_FAMILY_9000) - IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n", - iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION)); + IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION); else - IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n", - iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION)); + IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); - IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n", - iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE)); + IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); + /* print OPT info */ + IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); + IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); } static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, -- cgit v1.2.3 From 147eb05f24e6f603dc2dc7f2e0675ba1707c538f Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 17 May 2022 12:05:11 +0300 Subject: iwlwifi: mvm: always tell the firmware to accept MCAST frames in BSS Make the firmware's life easier and always accept MCAST frames. If needed, drop them in the driver. We need to filter out MCAST frames in order not to have false positives in the decryption check. If we accept MCAST frames before we have the GKT installed, we'll end up complaining that we can't decrypt the frame. Implement the same filtering, but in the driver. Signed-off-by: Emmanuel Grumbach Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120045.479956a46317.I21fac7ede9eca85a662671d694872898df884f0b@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 13 ++++--- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 44 ++++++++++++++++------- 2 files changed, 38 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index e7f18f549ca9..56fa20596f16 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -552,6 +552,12 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); + /* + * We always want to hear MCAST frames, if we're not authorized yet, + * we'll drop them. + */ + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP); + if (vif->p2p) { struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; @@ -608,13 +614,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO)) ctxt_sta->data_policy |= cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE); - - /* - * allow multicast data frames only as long as the station is - * authorized, i.e., GTK keys are already installed (if needed) - */ - if (mvmvif->authorized) - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP); } else { ctxt_sta->is_assoc = cpu_to_le32(0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 78198da7e55b..49ca1e168fc5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -326,17 +326,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); - /* - * drop the packet if it has failed being decrypted by HW - */ - if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, - &crypt_len)) { - IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", - rx_pkt_status); - kfree_skb(skb); - return; - } - /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. @@ -386,6 +375,37 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } + if (sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_vif *vif = mvmsta->vif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* + * Don't even try to decrypt a MCAST frame that was received + * before the managed vif is authorized, we'd fail anyway. + */ + if (vif->type == NL80211_IFTYPE_STATION && + !mvmvif->authorized && + is_multicast_ether_addr(hdr->addr1)) { + IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n"); + kfree_skb(skb); + rcu_read_unlock(); + return; + } + } + + /* + * drop the packet if it has failed being decrypted by HW + */ + if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, + &crypt_len)) { + IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", + rx_pkt_status); + kfree_skb(skb); + rcu_read_unlock(); + return; + } + if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = -- cgit v1.2.3 From 98c0de7b26a1872f000ffae5661d2709b1d01932 Mon Sep 17 00:00:00 2001 From: Miri Korenblit Date: Tue, 17 May 2022 12:05:12 +0300 Subject: iwlwifi: mvm: remove vif_count We used to count the number of ieee80211_vifs in mvm. This was needed for the legacy PM API, which is no longer supported. Remove it. Signed-off-by: Miri Korenblit Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120045.8c91ae023b15.Ia6145e4930b1d28f3fcedc316b4f177295b00557@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 17 +++-------------- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 - 2 files changed, 3 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 4fda6c3ba9f3..bb9bd2165355 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -976,7 +976,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ieee80211_wake_queues(mvm->hw); - mvm->vif_count = 0; mvm->rx_ba_sessions = 0; mvm->fwrt.dump.conf = FW_DBG_INVALID; mvm->monitor_on = false; @@ -1380,10 +1379,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); - /* Counting number of interfaces is needed for legacy PM */ - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count++; - /* * The AP binding flow can be done only after the beacon * template is configured (which happens only in the mac80211 @@ -1400,7 +1395,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) { IWL_ERR(mvm, "Failed to allocate bcast sta\n"); - goto out_release; + goto out_unlock; } /* @@ -1411,7 +1406,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 0, vif->type, IWL_STA_MULTICAST); if (ret) - goto out_release; + goto out_unlock; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; @@ -1421,7 +1416,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) - goto out_release; + goto out_unlock; ret = iwl_mvm_power_update_mac(mvm); if (ret) @@ -1498,9 +1493,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, out_remove_mac: mvmvif->phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); - out_release: - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count--; out_unlock: mutex_unlock(&mvm->mutex); @@ -1582,9 +1574,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvmvif->phy_ctxt = NULL; } - if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count--; - iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index c6bc85d4600a..bf35e130c876 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -934,7 +934,6 @@ struct iwl_mvm { unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; u8 fw_key_deleted[STA_KEY_MAX_NUM]; - u8 vif_count; struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; /* -1 for always, 0 for never, >0 for that many times */ -- cgit v1.2.3 From 55cf10488d7a9fa1b1b473a5e44a80666932e094 Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Tue, 17 May 2022 12:05:13 +0300 Subject: iwlwifi: mei: clear the sap data header before sending The SAP data header has some fields that are marked as reserved but are actually in use by CSME. Clear those fields before sending the data to avoid having random values in those fields. Cc: stable@vger.kernel.org Signed-off-by: Avraham Stern Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120045.8dd3423cf683.I02976028eaa6aab395cb2e701fa7127212762eb7@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mei/main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c index b4f45234cfc8..3d2eb15a9662 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/main.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -493,6 +493,7 @@ void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx) if (cb_tx) { struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr)); + memset(cb_hdr, 0, sizeof(*cb_hdr)); cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET); cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr)); cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); -- cgit v1.2.3 From 78488a64aea94a3336ee97f345c1496e9bc5ebdf Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 17 May 2022 12:05:14 +0300 Subject: iwlwifi: mei: fix potential NULL-ptr deref If SKB allocation fails, continue rather than using the NULL pointer. Coverity CID: 1497650 Cc: stable@vger.kernel.org Fixes: 2da4366f9e2c ("iwlwifi: mei: add the driver to allow cooperation with CSME") Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20220517120045.90c1b1fd534e.Ibb42463e74d0ec7d36ec81df22e171ae1f6268b0@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mei/main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c index 3d2eb15a9662..357f14626cf4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/main.c +++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c @@ -1020,6 +1020,8 @@ static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev, /* We need enough room for the WiFi header + SNAP + IV */ skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN); + if (!skb) + continue; skb_reserve(skb, QOS_HDR_IV_SNAP_LEN); ethhdr = skb_push(skb, sizeof(*ethhdr)); -- cgit v1.2.3 From a30bf805592ed56af7a9482b4339d9c9bb092784 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 17 May 2022 15:13:25 +0530 Subject: net: dsa: microchip: ksz8795: update the port_cnt value in ksz_chip_data The port_cnt value in the structure is not used in the switch_init. Instead it uses the fls(chip->cpu_port), this is due to one of port in the ksz8794 unavailable. The cpu_port for the 8794 is 0x10, fls(0x10) = 5, hence updating it directly in the ksz_chip_data structure in order to same with all the other switches in ksz8795.c and ksz9477.c files. Signed-off-by: Arun Ramadoss Reviewed-by: Florian Fainelli Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index f91deea9368e..83bcabf2dc54 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1607,6 +1607,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = { * KSZ8794 0,1,2 4 * KSZ8795 0,1,2,3 4 * KSZ8765 0,1,2,3 4 + * port_cnt is configured as 5, even though it is 4 */ .chip_id = 0x8794, .dev_name = "KSZ8794", @@ -1614,7 +1615,7 @@ static const struct ksz_chip_data ksz8_switch_chips[] = { .num_alus = 0, .num_statics = 8, .cpu_ports = 0x10, /* can be configured as cpu port */ - .port_cnt = 4, /* total cpu and user ports */ + .port_cnt = 5, /* total cpu and user ports */ .ksz87xx_eee_link_erratum = true, }, { @@ -1653,7 +1654,7 @@ static int ksz8_switch_init(struct ksz_device *dev) dev->num_vlans = chip->num_vlans; dev->num_alus = chip->num_alus; dev->num_statics = chip->num_statics; - dev->port_cnt = fls(chip->cpu_ports); + dev->port_cnt = chip->port_cnt; dev->cpu_port = fls(chip->cpu_ports) - 1; dev->phy_port_cnt = dev->port_cnt - 1; dev->cpu_ports = chip->cpu_ports; -- cgit v1.2.3 From 462d525018f068338a4d0b571aa515e26a8320e7 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 17 May 2022 15:13:26 +0530 Subject: net: dsa: microchip: move ksz_chip_data to ksz_common This patch moves the ksz_chip_data in ksz8795 and ksz9477 to ksz_common. At present, the dev->chip_id is iterated with the ksz_chip_data and then copy its value to the ksz_dev structure. These values are declared as constant. Instead of copying the values and referencing it, this patch update the dev->info to the ksz_chip_data based on the chip_id in the init function. And also update the ksz_chip_data values for the LAN937x based switches. Signed-off-by: Arun Ramadoss Reviewed-by: Florian Fainelli Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 114 +++----------------- drivers/net/dsa/microchip/ksz9477.c | 108 ++++--------------- drivers/net/dsa/microchip/ksz_common.c | 190 +++++++++++++++++++++++++++++++-- drivers/net/dsa/microchip/ksz_common.h | 53 +++++++-- 4 files changed, 259 insertions(+), 206 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 83bcabf2dc54..b6032b65afc2 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1036,13 +1036,13 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) int first, index, cnt; struct ksz_port *p; - if ((uint)port < dev->port_cnt) { + if ((uint)port < dev->info->port_cnt) { first = port; cnt = port + 1; } else { /* Flush all ports. */ first = 0; - cnt = dev->port_cnt; + cnt = dev->info->port_cnt; } for (index = first; index < cnt; index++) { p = &dev->ports[index]; @@ -1118,7 +1118,7 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port, * Remove Tag flag to be changed, unless there are no * other VLANs currently configured. */ - for (vid = 1; vid < dev->num_vlans; ++vid) { + for (vid = 1; vid < dev->info->num_vlans; ++vid) { /* Skip the VID we are going to add or reconfigure */ if (vid == vlan->vid) continue; @@ -1389,7 +1389,7 @@ static int ksz8_handle_global_errata(struct dsa_switch *ds) * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in * the link dropping. */ - if (dev->ksz87xx_eee_link_erratum) + if (dev->info->ksz87xx_eee_link_erratum) ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0); return ret; @@ -1402,7 +1402,7 @@ static int ksz8_setup(struct dsa_switch *ds) int i, ret = 0; dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), - dev->num_vlans, GFP_KERNEL); + dev->info->num_vlans, GFP_KERNEL); if (!dev->vlan_cache) return -ENOMEM; @@ -1446,7 +1446,7 @@ static int ksz8_setup(struct dsa_switch *ds) (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100); - for (i = 0; i < (dev->num_vlans / 4); i++) + for (i = 0; i < (dev->info->num_vlans / 4); i++) ksz8_r_vlan_entries(dev, i); /* Setup STP address for STP operation. */ @@ -1571,74 +1571,6 @@ static int ksz8_switch_detect(struct ksz_device *dev) return 0; } -struct ksz_chip_data { - u16 chip_id; - const char *dev_name; - int num_vlans; - int num_alus; - int num_statics; - int cpu_ports; - int port_cnt; - bool ksz87xx_eee_link_erratum; -}; - -static const struct ksz_chip_data ksz8_switch_chips[] = { - { - .chip_id = 0x8795, - .dev_name = "KSZ8795", - .num_vlans = 4096, - .num_alus = 0, - .num_statics = 8, - .cpu_ports = 0x10, /* can be configured as cpu port */ - .port_cnt = 5, /* total cpu and user ports */ - .ksz87xx_eee_link_erratum = true, - }, - { - /* - * WARNING - * ======= - * KSZ8794 is similar to KSZ8795, except the port map - * contains a gap between external and CPU ports, the - * port map is NOT continuous. The per-port register - * map is shifted accordingly too, i.e. registers at - * offset 0x40 are NOT used on KSZ8794 and they ARE - * used on KSZ8795 for external port 3. - * external cpu - * KSZ8794 0,1,2 4 - * KSZ8795 0,1,2,3 4 - * KSZ8765 0,1,2,3 4 - * port_cnt is configured as 5, even though it is 4 - */ - .chip_id = 0x8794, - .dev_name = "KSZ8794", - .num_vlans = 4096, - .num_alus = 0, - .num_statics = 8, - .cpu_ports = 0x10, /* can be configured as cpu port */ - .port_cnt = 5, /* total cpu and user ports */ - .ksz87xx_eee_link_erratum = true, - }, - { - .chip_id = 0x8765, - .dev_name = "KSZ8765", - .num_vlans = 4096, - .num_alus = 0, - .num_statics = 8, - .cpu_ports = 0x10, /* can be configured as cpu port */ - .port_cnt = 5, /* total cpu and user ports */ - .ksz87xx_eee_link_erratum = true, - }, - { - .chip_id = 0x8830, - .dev_name = "KSZ8863/KSZ8873", - .num_vlans = 16, - .num_alus = 0, - .num_statics = 8, - .cpu_ports = 0x4, /* can be configured as cpu port */ - .port_cnt = 3, - }, -}; - static int ksz8_switch_init(struct ksz_device *dev) { struct ksz8 *ksz8 = dev->priv; @@ -1646,30 +1578,10 @@ static int ksz8_switch_init(struct ksz_device *dev) dev->ds->ops = &ksz8_switch_ops; - for (i = 0; i < ARRAY_SIZE(ksz8_switch_chips); i++) { - const struct ksz_chip_data *chip = &ksz8_switch_chips[i]; - - if (dev->chip_id == chip->chip_id) { - dev->name = chip->dev_name; - dev->num_vlans = chip->num_vlans; - dev->num_alus = chip->num_alus; - dev->num_statics = chip->num_statics; - dev->port_cnt = chip->port_cnt; - dev->cpu_port = fls(chip->cpu_ports) - 1; - dev->phy_port_cnt = dev->port_cnt - 1; - dev->cpu_ports = chip->cpu_ports; - dev->host_mask = chip->cpu_ports; - dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | - chip->cpu_ports; - dev->ksz87xx_eee_link_erratum = - chip->ksz87xx_eee_link_erratum; - break; - } - } - - /* no switch found */ - if (!dev->cpu_ports) - return -ENODEV; + dev->cpu_port = fls(dev->info->cpu_ports) - 1; + dev->host_mask = dev->info->cpu_ports; + dev->phy_port_cnt = dev->info->port_cnt - 1; + dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports; if (ksz_is_ksz88x3(dev)) { ksz8->regs = ksz8863_regs; @@ -1688,11 +1600,11 @@ static int ksz8_switch_init(struct ksz_device *dev) dev->reg_mib_cnt = MIB_COUNTER_NUM; dev->ports = devm_kzalloc(dev->dev, - dev->port_cnt * sizeof(struct ksz_port), + dev->info->port_cnt * sizeof(struct ksz_port), GFP_KERNEL); if (!dev->ports) return -ENOMEM; - for (i = 0; i < dev->port_cnt; i++) { + for (i = 0; i < dev->info->port_cnt; i++) { mutex_init(&dev->ports[i].mib.cnt_mutex); dev->ports[i].mib.counters = devm_kzalloc(dev->dev, @@ -1704,7 +1616,7 @@ static int ksz8_switch_init(struct ksz_device *dev) } /* set the real number of ports */ - dev->ds->num_ports = dev->port_cnt; + dev->ds->num_ports = dev->info->port_cnt; /* We rely on software untagging on the CPU port, so that we * can support both tagged and untagged VLANs diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 61dd0fa97748..c712a0011367 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -100,7 +100,7 @@ static int ksz9477_change_mtu(struct dsa_switch *ds, int port, int mtu) /* Cache the per-port MTU setting */ dev->ports[port].max_frame = frame_size; - for (i = 0; i < dev->port_cnt; i++) + for (i = 0; i < dev->info->port_cnt; i++) max_frame = max(max_frame, dev->ports[i].max_frame); return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, @@ -434,7 +434,7 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S, SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S); - if (port < dev->port_cnt) { + if (port < dev->info->port_cnt) { /* flush individual port */ ksz_pread8(dev, port, P_STP_CTRL, &data); if (!(data & PORT_LEARN_DISABLE)) @@ -756,7 +756,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port, mutex_lock(&dev->alu_mutex); - for (index = 0; index < dev->num_statics; index++) { + for (index = 0; index < dev->info->num_statics; index++) { /* find empty slot first */ data = (index << ALU_STAT_INDEX_S) | ALU_STAT_READ | ALU_STAT_START; @@ -787,7 +787,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port, } /* no available entry */ - if (index == dev->num_statics) { + if (index == dev->info->num_statics) { err = -ENOSPC; goto exit; } @@ -832,7 +832,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port, mutex_lock(&dev->alu_mutex); - for (index = 0; index < dev->num_statics; index++) { + for (index = 0; index < dev->info->num_statics; index++) { /* find empty slot first */ data = (index << ALU_STAT_INDEX_S) | ALU_STAT_READ | ALU_STAT_START; @@ -861,7 +861,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port, } /* no available entry */ - if (index == dev->num_statics) + if (index == dev->info->num_statics) goto exit; /* clear port */ @@ -903,7 +903,7 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port, * Check if any of the port is already set for sniffing * If yes, instruct the user to remove the previous entry & exit */ - for (p = 0; p < dev->port_cnt; p++) { + for (p = 0; p < dev->info->port_cnt; p++) { /* Skip the current sniffing port */ if (p == mirror->to_local_port) continue; @@ -946,7 +946,7 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, /* Check if any of the port is still referring to sniffer port */ - for (p = 0; p < dev->port_cnt; p++) { + for (p = 0; p < dev->info->port_cnt; p++) { ksz_pread8(dev, p, P_MIRROR_CTRL, &data); if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) { @@ -1194,7 +1194,7 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, false); - if (dev->phy_errata_9477) + if (dev->info->phy_errata_9477) ksz9477_phy_errata_setup(dev, port); } else { /* force flow control */ @@ -1259,8 +1259,9 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) struct ksz_port *p; int i; - for (i = 0; i < dev->port_cnt; i++) { - if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) { + for (i = 0; i < dev->info->port_cnt; i++) { + if (dsa_is_cpu_port(ds, i) && + (dev->info->cpu_ports & (1 << i))) { phy_interface_t interface; const char *prev_msg; const char *prev_mode; @@ -1304,7 +1305,7 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) } } - for (i = 0; i < dev->port_cnt; i++) { + for (i = 0; i < dev->info->port_cnt; i++) { if (i == dev->cpu_port) continue; p = &dev->ports[i]; @@ -1328,7 +1329,7 @@ static int ksz9477_setup(struct dsa_switch *ds) int ret = 0; dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), - dev->num_vlans, GFP_KERNEL); + dev->info->num_vlans, GFP_KERNEL); if (!dev->vlan_cache) return -ENOMEM; @@ -1470,96 +1471,23 @@ static int ksz9477_switch_detect(struct ksz_device *dev) return 0; } -struct ksz_chip_data { - u32 chip_id; - const char *dev_name; - int num_vlans; - int num_alus; - int num_statics; - int cpu_ports; - int port_cnt; - bool phy_errata_9477; -}; - -static const struct ksz_chip_data ksz9477_switch_chips[] = { - { - .chip_id = 0x00947700, - .dev_name = "KSZ9477", - .num_vlans = 4096, - .num_alus = 4096, - .num_statics = 16, - .cpu_ports = 0x7F, /* can be configured as cpu port */ - .port_cnt = 7, /* total physical port count */ - .phy_errata_9477 = true, - }, - { - .chip_id = 0x00989700, - .dev_name = "KSZ9897", - .num_vlans = 4096, - .num_alus = 4096, - .num_statics = 16, - .cpu_ports = 0x7F, /* can be configured as cpu port */ - .port_cnt = 7, /* total physical port count */ - .phy_errata_9477 = true, - }, - { - .chip_id = 0x00989300, - .dev_name = "KSZ9893", - .num_vlans = 4096, - .num_alus = 4096, - .num_statics = 16, - .cpu_ports = 0x07, /* can be configured as cpu port */ - .port_cnt = 3, /* total port count */ - }, - { - .chip_id = 0x00956700, - .dev_name = "KSZ9567", - .num_vlans = 4096, - .num_alus = 4096, - .num_statics = 16, - .cpu_ports = 0x7F, /* can be configured as cpu port */ - .port_cnt = 7, /* total physical port count */ - .phy_errata_9477 = true, - }, -}; - static int ksz9477_switch_init(struct ksz_device *dev) { int i; dev->ds->ops = &ksz9477_switch_ops; - for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) { - const struct ksz_chip_data *chip = &ksz9477_switch_chips[i]; - - if (dev->chip_id == chip->chip_id) { - dev->name = chip->dev_name; - dev->num_vlans = chip->num_vlans; - dev->num_alus = chip->num_alus; - dev->num_statics = chip->num_statics; - dev->port_cnt = chip->port_cnt; - dev->cpu_ports = chip->cpu_ports; - dev->phy_errata_9477 = chip->phy_errata_9477; - - break; - } - } - - /* no switch found */ - if (!dev->port_cnt) - return -ENODEV; - - dev->port_mask = (1 << dev->port_cnt) - 1; + dev->port_mask = (1 << dev->info->port_cnt) - 1; dev->reg_mib_cnt = SWITCH_COUNTER_NUM; dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM; dev->ports = devm_kzalloc(dev->dev, - dev->port_cnt * sizeof(struct ksz_port), + dev->info->port_cnt * sizeof(struct ksz_port), GFP_KERNEL); if (!dev->ports) return -ENOMEM; - for (i = 0; i < dev->port_cnt; i++) { + for (i = 0; i < dev->info->port_cnt; i++) { spin_lock_init(&dev->ports[i].mib.stats64_lock); mutex_init(&dev->ports[i].mib.cnt_mutex); dev->ports[i].mib.counters = @@ -1572,7 +1500,7 @@ static int ksz9477_switch_init(struct ksz_device *dev) } /* set the real number of ports */ - dev->ds->num_ports = dev->port_cnt; + dev->ds->num_ports = dev->info->port_cnt; return 0; } diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 10f127b09e58..ebb4753051d4 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -59,6 +59,172 @@ struct ksz_stats_raw { u64 tx_discards; }; +static const struct ksz_chip_data ksz_switch_chips[] = { + [KSZ8795] = { + .chip_id = KSZ8795_CHIP_ID, + .dev_name = "KSZ8795", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 5, /* total cpu and user ports */ + .ksz87xx_eee_link_erratum = true, + }, + + [KSZ8794] = { + /* WARNING + * ======= + * KSZ8794 is similar to KSZ8795, except the port map + * contains a gap between external and CPU ports, the + * port map is NOT continuous. The per-port register + * map is shifted accordingly too, i.e. registers at + * offset 0x40 are NOT used on KSZ8794 and they ARE + * used on KSZ8795 for external port 3. + * external cpu + * KSZ8794 0,1,2 4 + * KSZ8795 0,1,2,3 4 + * KSZ8765 0,1,2,3 4 + * port_cnt is configured as 5, even though it is 4 + */ + .chip_id = KSZ8794_CHIP_ID, + .dev_name = "KSZ8794", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 5, /* total cpu and user ports */ + .ksz87xx_eee_link_erratum = true, + }, + + [KSZ8765] = { + .chip_id = KSZ8765_CHIP_ID, + .dev_name = "KSZ8765", + .num_vlans = 4096, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 5, /* total cpu and user ports */ + .ksz87xx_eee_link_erratum = true, + }, + + [KSZ8830] = { + .chip_id = KSZ8830_CHIP_ID, + .dev_name = "KSZ8863/KSZ8873", + .num_vlans = 16, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x4, /* can be configured as cpu port */ + .port_cnt = 3, + }, + + [KSZ9477] = { + .chip_id = KSZ9477_CHIP_ID, + .dev_name = "KSZ9477", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x7F, /* can be configured as cpu port */ + .port_cnt = 7, /* total physical port count */ + .phy_errata_9477 = true, + }, + + [KSZ9897] = { + .chip_id = KSZ9897_CHIP_ID, + .dev_name = "KSZ9897", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x7F, /* can be configured as cpu port */ + .port_cnt = 7, /* total physical port count */ + .phy_errata_9477 = true, + }, + + [KSZ9893] = { + .chip_id = KSZ9893_CHIP_ID, + .dev_name = "KSZ9893", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x07, /* can be configured as cpu port */ + .port_cnt = 3, /* total port count */ + }, + + [KSZ9567] = { + .chip_id = KSZ9567_CHIP_ID, + .dev_name = "KSZ9567", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x7F, /* can be configured as cpu port */ + .port_cnt = 7, /* total physical port count */ + .phy_errata_9477 = true, + }, + + [LAN9370] = { + .chip_id = LAN9370_CHIP_ID, + .dev_name = "LAN9370", + .num_vlans = 4096, + .num_alus = 1024, + .num_statics = 256, + .cpu_ports = 0x10, /* can be configured as cpu port */ + .port_cnt = 5, /* total physical port count */ + }, + + [LAN9371] = { + .chip_id = LAN9371_CHIP_ID, + .dev_name = "LAN9371", + .num_vlans = 4096, + .num_alus = 1024, + .num_statics = 256, + .cpu_ports = 0x30, /* can be configured as cpu port */ + .port_cnt = 6, /* total physical port count */ + }, + + [LAN9372] = { + .chip_id = LAN9372_CHIP_ID, + .dev_name = "LAN9372", + .num_vlans = 4096, + .num_alus = 1024, + .num_statics = 256, + .cpu_ports = 0x30, /* can be configured as cpu port */ + .port_cnt = 8, /* total physical port count */ + }, + + [LAN9373] = { + .chip_id = LAN9373_CHIP_ID, + .dev_name = "LAN9373", + .num_vlans = 4096, + .num_alus = 1024, + .num_statics = 256, + .cpu_ports = 0x38, /* can be configured as cpu port */ + .port_cnt = 5, /* total physical port count */ + }, + + [LAN9374] = { + .chip_id = LAN9374_CHIP_ID, + .dev_name = "LAN9374", + .num_vlans = 4096, + .num_alus = 1024, + .num_statics = 256, + .cpu_ports = 0x30, /* can be configured as cpu port */ + .port_cnt = 8, /* total physical port count */ + }, +}; + +static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { + const struct ksz_chip_data *chip = &ksz_switch_chips[i]; + + if (chip->chip_id == prod_num) + return chip; + } + + return NULL; +} + void ksz_r_mib_stats64(struct ksz_device *dev, int port) { struct rtnl_link_stats64 *stats; @@ -207,7 +373,7 @@ static void ksz_mib_read_work(struct work_struct *work) struct ksz_port *p; int i; - for (i = 0; i < dev->port_cnt; i++) { + for (i = 0; i < dev->info->port_cnt; i++) { if (dsa_is_unused_port(dev->ds, i)) continue; @@ -242,7 +408,7 @@ void ksz_init_mib_timer(struct ksz_device *dev) INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); - for (i = 0; i < dev->port_cnt; i++) + for (i = 0; i < dev->info->port_cnt; i++) dev->dev_ops->port_init_cnt(dev, i); } EXPORT_SYMBOL_GPL(ksz_init_mib_timer); @@ -382,7 +548,7 @@ int ksz_port_mdb_add(struct dsa_switch *ds, int port, int empty = 0; alu.port_forward = 0; - for (index = 0; index < dev->num_statics; index++) { + for (index = 0; index < dev->info->num_statics; index++) { if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) { /* Found one already in static MAC table. */ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) && @@ -395,11 +561,11 @@ int ksz_port_mdb_add(struct dsa_switch *ds, int port, } /* no available entry */ - if (index == dev->num_statics && !empty) + if (index == dev->info->num_statics && !empty) return -ENOSPC; /* add entry */ - if (index == dev->num_statics) { + if (index == dev->info->num_statics) { index = empty - 1; memset(&alu, 0, sizeof(alu)); memcpy(alu.mac, mdb->addr, ETH_ALEN); @@ -426,7 +592,7 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port, struct alu_struct alu; int index; - for (index = 0; index < dev->num_statics; index++) { + for (index = 0; index < dev->info->num_statics; index++) { if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) { /* Found one already in static MAC table. */ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) && @@ -436,7 +602,7 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port, } /* no available entry */ - if (index == dev->num_statics) + if (index == dev->info->num_statics) goto exit; /* clear port */ @@ -537,6 +703,7 @@ EXPORT_SYMBOL(ksz_switch_alloc); int ksz_switch_register(struct ksz_device *dev, const struct ksz_dev_ops *ops) { + const struct ksz_chip_data *info; struct device_node *port, *ports; phy_interface_t interface; unsigned int port_num; @@ -567,6 +734,13 @@ int ksz_switch_register(struct ksz_device *dev, if (dev->dev_ops->detect(dev)) return -EINVAL; + info = ksz_lookup_info(dev->chip_id); + if (!info) + return -ENODEV; + + /* Update the compatible info with the probed one */ + dev->info = info; + ret = dev->dev_ops->init(dev); if (ret) return ret; @@ -574,7 +748,7 @@ int ksz_switch_register(struct ksz_device *dev, /* Host port interface will be self detected, or specifically set in * device tree. */ - for (port_num = 0; port_num < dev->port_cnt; ++port_num) + for (port_num = 0; port_num < dev->info->port_cnt; ++port_num) dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; if (dev->dev->of_node) { ret = of_get_phy_mode(dev->dev->of_node, &interface); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 28cda79b090f..a8f73e9a63ac 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -26,6 +26,18 @@ struct ksz_port_mib { struct spinlock stats64_lock; }; +struct ksz_chip_data { + u32 chip_id; + const char *dev_name; + int num_vlans; + int num_alus; + int num_statics; + int cpu_ports; + int port_cnt; + bool phy_errata_9477; + bool ksz87xx_eee_link_erratum; +}; + struct ksz_port { bool remove_tag; /* Remove Tag flag set, for ksz8795 only */ int stp_state; @@ -48,6 +60,7 @@ struct ksz_device { struct dsa_switch *ds; struct ksz_platform_data *pdata; const char *name; + const struct ksz_chip_data *info; struct mutex dev_mutex; /* device access */ struct mutex regmap_mutex; /* regmap access */ @@ -64,20 +77,13 @@ struct ksz_device { /* chip specific data */ u32 chip_id; - int num_vlans; - int num_alus; - int num_statics; int cpu_port; /* port connected to CPU */ - int cpu_ports; /* port bitmap can be cpu port */ int phy_port_cnt; - int port_cnt; u8 reg_mib_cnt; int mib_cnt; const struct mib_names *mib_names; phy_interface_t compat_interface; u32 regs_size; - bool phy_errata_9477; - bool ksz87xx_eee_link_erratum; bool synclko_125; bool synclko_disable; @@ -94,6 +100,39 @@ struct ksz_device { u16 port_mask; }; +/* List of supported models */ +enum ksz_model { + KSZ8795, + KSZ8794, + KSZ8765, + KSZ8830, + KSZ9477, + KSZ9897, + KSZ9893, + KSZ9567, + LAN9370, + LAN9371, + LAN9372, + LAN9373, + LAN9374, +}; + +enum ksz_chip_id { + KSZ8795_CHIP_ID = 0x8795, + KSZ8794_CHIP_ID = 0x8794, + KSZ8765_CHIP_ID = 0x8765, + KSZ8830_CHIP_ID = 0x8830, + KSZ9477_CHIP_ID = 0x00947700, + KSZ9897_CHIP_ID = 0x00989700, + KSZ9893_CHIP_ID = 0x00989300, + KSZ9567_CHIP_ID = 0x00956700, + LAN9370_CHIP_ID = 0x00937000, + LAN9371_CHIP_ID = 0x00937100, + LAN9372_CHIP_ID = 0x00937200, + LAN9373_CHIP_ID = 0x00937300, + LAN9374_CHIP_ID = 0x00937400, +}; + struct alu_struct { /* entry 1 */ u8 is_static:1; -- cgit v1.2.3 From eee16b147121cec77db51455ef30f4ad5102b270 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 17 May 2022 15:13:27 +0530 Subject: net: dsa: microchip: perform the compatibility check for dev probed This patch perform the compatibility check for the device after the chip detect is done. It is to prevent the mismatch between the device compatible specified in the device tree and actual device found during the detect. The ksz9477 device doesn't use any .data in the of_device_id. But the ksz8795_spi uses .data for assigning the regmap between 8830 family and 87xx family switch. Changed the regmap assignment based on the chip_id from the .data. Signed-off-by: Arun Ramadoss Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795_spi.c | 35 ++++++++++++++++++++++++++------- drivers/net/dsa/microchip/ksz8863_smi.c | 10 ++++++++-- drivers/net/dsa/microchip/ksz9477_i2c.c | 30 ++++++++++++++++++++++------ drivers/net/dsa/microchip/ksz9477_spi.c | 30 ++++++++++++++++++++++------ drivers/net/dsa/microchip/ksz_common.c | 25 ++++++++++++++++++++++- drivers/net/dsa/microchip/ksz_common.h | 1 + 6 files changed, 109 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index 5f8d94aee774..961a74c359a8 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -34,6 +34,7 @@ KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT, static int ksz8795_spi_probe(struct spi_device *spi) { const struct regmap_config *regmap_config; + const struct ksz_chip_data *chip; struct device *ddev = &spi->dev; struct regmap_config rc; struct ksz_device *dev; @@ -50,10 +51,15 @@ static int ksz8795_spi_probe(struct spi_device *spi) if (!dev) return -ENOMEM; - regmap_config = device_get_match_data(ddev); - if (!regmap_config) + chip = device_get_match_data(ddev); + if (!chip) return -EINVAL; + if (chip->chip_id == KSZ8830_CHIP_ID) + regmap_config = ksz8863_regmap_config; + else + regmap_config = ksz8795_regmap_config; + for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) { rc = regmap_config[i]; rc.lock_arg = &dev->regmap_mutex; @@ -113,11 +119,26 @@ static void ksz8795_spi_shutdown(struct spi_device *spi) } static const struct of_device_id ksz8795_dt_ids[] = { - { .compatible = "microchip,ksz8765", .data = &ksz8795_regmap_config }, - { .compatible = "microchip,ksz8794", .data = &ksz8795_regmap_config }, - { .compatible = "microchip,ksz8795", .data = &ksz8795_regmap_config }, - { .compatible = "microchip,ksz8863", .data = &ksz8863_regmap_config }, - { .compatible = "microchip,ksz8873", .data = &ksz8863_regmap_config }, + { + .compatible = "microchip,ksz8765", + .data = &ksz_switch_chips[KSZ8765] + }, + { + .compatible = "microchip,ksz8794", + .data = &ksz_switch_chips[KSZ8794] + }, + { + .compatible = "microchip,ksz8795", + .data = &ksz_switch_chips[KSZ8795] + }, + { + .compatible = "microchip,ksz8863", + .data = &ksz_switch_chips[KSZ8830] + }, + { + .compatible = "microchip,ksz8873", + .data = &ksz_switch_chips[KSZ8830] + }, {}, }; MODULE_DEVICE_TABLE(of, ksz8795_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c index 5883fa7edda2..b6f99e641dca 100644 --- a/drivers/net/dsa/microchip/ksz8863_smi.c +++ b/drivers/net/dsa/microchip/ksz8863_smi.c @@ -206,8 +206,14 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev) } static const struct of_device_id ksz8863_dt_ids[] = { - { .compatible = "microchip,ksz8863" }, - { .compatible = "microchip,ksz8873" }, + { + .compatible = "microchip,ksz8863", + .data = &ksz_switch_chips[KSZ8830] + }, + { + .compatible = "microchip,ksz8873", + .data = &ksz_switch_chips[KSZ8830] + }, { }, }; MODULE_DEVICE_TABLE(of, ksz8863_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index cbc0b20e7e1b..faa3163c86b0 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -87,12 +87,30 @@ static const struct i2c_device_id ksz9477_i2c_id[] = { MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id); static const struct of_device_id ksz9477_dt_ids[] = { - { .compatible = "microchip,ksz9477" }, - { .compatible = "microchip,ksz9897" }, - { .compatible = "microchip,ksz9893" }, - { .compatible = "microchip,ksz9563" }, - { .compatible = "microchip,ksz9567" }, - { .compatible = "microchip,ksz8563" }, + { + .compatible = "microchip,ksz9477", + .data = &ksz_switch_chips[KSZ9477] + }, + { + .compatible = "microchip,ksz9897", + .data = &ksz_switch_chips[KSZ9897] + }, + { + .compatible = "microchip,ksz9893", + .data = &ksz_switch_chips[KSZ9893] + }, + { + .compatible = "microchip,ksz9563", + .data = &ksz_switch_chips[KSZ9893] + }, + { + .compatible = "microchip,ksz8563", + .data = &ksz_switch_chips[KSZ9893] + }, + { + .compatible = "microchip,ksz9567", + .data = &ksz_switch_chips[KSZ9567] + }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index 87ca464dad32..1bc8b0cbe458 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -86,12 +86,30 @@ static void ksz9477_spi_shutdown(struct spi_device *spi) } static const struct of_device_id ksz9477_dt_ids[] = { - { .compatible = "microchip,ksz9477" }, - { .compatible = "microchip,ksz9897" }, - { .compatible = "microchip,ksz9893" }, - { .compatible = "microchip,ksz9563" }, - { .compatible = "microchip,ksz8563" }, - { .compatible = "microchip,ksz9567" }, + { + .compatible = "microchip,ksz9477", + .data = &ksz_switch_chips[KSZ9477] + }, + { + .compatible = "microchip,ksz9897", + .data = &ksz_switch_chips[KSZ9897] + }, + { + .compatible = "microchip,ksz9893", + .data = &ksz_switch_chips[KSZ9893] + }, + { + .compatible = "microchip,ksz9563", + .data = &ksz_switch_chips[KSZ9893] + }, + { + .compatible = "microchip,ksz8563", + .data = &ksz_switch_chips[KSZ9893] + }, + { + .compatible = "microchip,ksz9567", + .data = &ksz_switch_chips[KSZ9567] + }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index ebb4753051d4..09048e5e7ff2 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -59,7 +60,7 @@ struct ksz_stats_raw { u64 tx_discards; }; -static const struct ksz_chip_data ksz_switch_chips[] = { +const struct ksz_chip_data ksz_switch_chips[] = { [KSZ8795] = { .chip_id = KSZ8795_CHIP_ID, .dev_name = "KSZ8795", @@ -210,6 +211,7 @@ static const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 8, /* total physical port count */ }, }; +EXPORT_SYMBOL_GPL(ksz_switch_chips); static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num) { @@ -225,6 +227,23 @@ static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num) return NULL; } +static int ksz_check_device_id(struct ksz_device *dev) +{ + const struct ksz_chip_data *dt_chip_data; + + dt_chip_data = of_device_get_match_data(dev->dev); + + /* Check for Device Tree and Chip ID */ + if (dt_chip_data->chip_id != dev->chip_id) { + dev_err(dev->dev, + "Device tree specifies chip %s but found %s, please fix it!\n", + dt_chip_data->dev_name, dev->info->dev_name); + return -ENODEV; + } + + return 0; +} + void ksz_r_mib_stats64(struct ksz_device *dev, int port) { struct rtnl_link_stats64 *stats; @@ -741,6 +760,10 @@ int ksz_switch_register(struct ksz_device *dev, /* Update the compatible info with the probed one */ dev->info = info; + ret = ksz_check_device_id(dev); + if (ret) + return ret; + ret = dev->dev_ops->init(dev); if (ret) return ret; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index a8f73e9a63ac..b526f8027eea 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -193,6 +193,7 @@ void ksz_init_mib_timer(struct ksz_device *dev); void ksz_r_mib_stats64(struct ksz_device *dev, int port); void ksz_get_stats64(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *s); +extern const struct ksz_chip_data ksz_switch_chips[]; /* Common DSA access functions */ -- cgit v1.2.3 From a530e6f2204afe28d745adc5ecbba77e3d6a78b0 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 17 May 2022 15:13:28 +0530 Subject: net: dsa: microchip: move struct mib_names to ksz_chip_data The ksz88xx family has one set of mib_names. The ksz87xx, ksz9477, LAN937x based switches has one set of mib_names. In order to remove redundant declaration, moved the struct mib_names to ksz_chip_data structure. Signed-off-by: Arun Ramadoss Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 106 +++----------------------- drivers/net/dsa/microchip/ksz9477.c | 58 ++------------- drivers/net/dsa/microchip/ksz_common.c | 131 +++++++++++++++++++++++++++++++-- drivers/net/dsa/microchip/ksz_common.h | 11 ++- 4 files changed, 149 insertions(+), 157 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index b6032b65afc2..764e16b4f24b 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -126,86 +126,6 @@ static u8 ksz8863_shifts[] = { [DYNAMIC_MAC_SRC_PORT] = 20, }; -struct mib_names { - char string[ETH_GSTRING_LEN]; -}; - -static const struct mib_names ksz87xx_mib_names[] = { - { "rx_hi" }, - { "rx_undersize" }, - { "rx_fragments" }, - { "rx_oversize" }, - { "rx_jabbers" }, - { "rx_symbol_err" }, - { "rx_crc_err" }, - { "rx_align_err" }, - { "rx_mac_ctrl" }, - { "rx_pause" }, - { "rx_bcast" }, - { "rx_mcast" }, - { "rx_ucast" }, - { "rx_64_or_less" }, - { "rx_65_127" }, - { "rx_128_255" }, - { "rx_256_511" }, - { "rx_512_1023" }, - { "rx_1024_1522" }, - { "rx_1523_2000" }, - { "rx_2001" }, - { "tx_hi" }, - { "tx_late_col" }, - { "tx_pause" }, - { "tx_bcast" }, - { "tx_mcast" }, - { "tx_ucast" }, - { "tx_deferred" }, - { "tx_total_col" }, - { "tx_exc_col" }, - { "tx_single_col" }, - { "tx_mult_col" }, - { "rx_total" }, - { "tx_total" }, - { "rx_discards" }, - { "tx_discards" }, -}; - -static const struct mib_names ksz88xx_mib_names[] = { - { "rx" }, - { "rx_hi" }, - { "rx_undersize" }, - { "rx_fragments" }, - { "rx_oversize" }, - { "rx_jabbers" }, - { "rx_symbol_err" }, - { "rx_crc_err" }, - { "rx_align_err" }, - { "rx_mac_ctrl" }, - { "rx_pause" }, - { "rx_bcast" }, - { "rx_mcast" }, - { "rx_ucast" }, - { "rx_64_or_less" }, - { "rx_65_127" }, - { "rx_128_255" }, - { "rx_256_511" }, - { "rx_512_1023" }, - { "rx_1024_1522" }, - { "tx" }, - { "tx_hi" }, - { "tx_late_col" }, - { "tx_pause" }, - { "tx_bcast" }, - { "tx_mcast" }, - { "tx_ucast" }, - { "tx_deferred" }, - { "tx_total_col" }, - { "tx_exc_col" }, - { "tx_single_col" }, - { "tx_mult_col" }, - { "rx_discards" }, - { "tx_discards" }, -}; - static bool ksz_is_ksz88x3(struct ksz_device *dev) { return dev->chip_id == 0x8830; @@ -306,7 +226,7 @@ static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) masks = ksz8->masks; regs = ksz8->regs; - ctrl_addr = addr + dev->reg_mib_cnt * port; + ctrl_addr = addr + dev->info->reg_mib_cnt * port; ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); mutex_lock(&dev->alu_mutex); @@ -343,7 +263,7 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, masks = ksz8->masks; regs = ksz8->regs; - addr -= dev->reg_mib_cnt; + addr -= dev->info->reg_mib_cnt; ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port; ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0; ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); @@ -392,7 +312,7 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, u32 data; u32 cur; - addr -= dev->reg_mib_cnt; + addr -= dev->info->reg_mib_cnt; ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 : KSZ8863_MIB_PACKET_DROPPED_RX_0; ctrl_addr += port; @@ -453,23 +373,23 @@ static void ksz8_port_init_cnt(struct ksz_device *dev, int port) mib->cnt_ptr = 0; /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */ - while (mib->cnt_ptr < dev->reg_mib_cnt) { + while (mib->cnt_ptr < dev->info->reg_mib_cnt) { dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, &mib->counters[mib->cnt_ptr]); ++mib->cnt_ptr; } /* last one in storage */ - dropped = &mib->counters[dev->mib_cnt]; + dropped = &mib->counters[dev->info->mib_cnt]; /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ - while (mib->cnt_ptr < dev->mib_cnt) { + while (mib->cnt_ptr < dev->info->mib_cnt) { dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, dropped, &mib->counters[mib->cnt_ptr]); ++mib->cnt_ptr; } mib->cnt_ptr = 0; - memset(mib->counters, 0, dev->mib_cnt * sizeof(u64)); + memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); } static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data) @@ -1009,9 +929,9 @@ static void ksz8_get_strings(struct dsa_switch *ds, int port, struct ksz_device *dev = ds->priv; int i; - for (i = 0; i < dev->mib_cnt; i++) { + for (i = 0; i < dev->info->mib_cnt; i++) { memcpy(buf + i * ETH_GSTRING_LEN, - dev->mib_names[i].string, ETH_GSTRING_LEN); + dev->info->mib_names[i].string, ETH_GSTRING_LEN); } } @@ -1587,18 +1507,12 @@ static int ksz8_switch_init(struct ksz_device *dev) ksz8->regs = ksz8863_regs; ksz8->masks = ksz8863_masks; ksz8->shifts = ksz8863_shifts; - dev->mib_cnt = ARRAY_SIZE(ksz88xx_mib_names); - dev->mib_names = ksz88xx_mib_names; } else { ksz8->regs = ksz8795_regs; ksz8->masks = ksz8795_masks; ksz8->shifts = ksz8795_shifts; - dev->mib_cnt = ARRAY_SIZE(ksz87xx_mib_names); - dev->mib_names = ksz87xx_mib_names; } - dev->reg_mib_cnt = MIB_COUNTER_NUM; - dev->ports = devm_kzalloc(dev->dev, dev->info->port_cnt * sizeof(struct ksz_port), GFP_KERNEL); @@ -1609,7 +1523,7 @@ static int ksz8_switch_init(struct ksz_device *dev) dev->ports[i].mib.counters = devm_kzalloc(dev->dev, sizeof(u64) * - (dev->mib_cnt + 1), + (dev->info->mib_cnt + 1), GFP_KERNEL); if (!dev->ports[i].mib.counters) return -ENOMEM; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index c712a0011367..f7d4a3498e5d 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -23,48 +23,6 @@ #define NEW_XMII BIT(1) #define IS_9893 BIT(2) -static const struct { - int index; - char string[ETH_GSTRING_LEN]; -} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = { - { 0x00, "rx_hi" }, - { 0x01, "rx_undersize" }, - { 0x02, "rx_fragments" }, - { 0x03, "rx_oversize" }, - { 0x04, "rx_jabbers" }, - { 0x05, "rx_symbol_err" }, - { 0x06, "rx_crc_err" }, - { 0x07, "rx_align_err" }, - { 0x08, "rx_mac_ctrl" }, - { 0x09, "rx_pause" }, - { 0x0A, "rx_bcast" }, - { 0x0B, "rx_mcast" }, - { 0x0C, "rx_ucast" }, - { 0x0D, "rx_64_or_less" }, - { 0x0E, "rx_65_127" }, - { 0x0F, "rx_128_255" }, - { 0x10, "rx_256_511" }, - { 0x11, "rx_512_1023" }, - { 0x12, "rx_1024_1522" }, - { 0x13, "rx_1523_2000" }, - { 0x14, "rx_2001" }, - { 0x15, "tx_hi" }, - { 0x16, "tx_late_col" }, - { 0x17, "tx_pause" }, - { 0x18, "tx_bcast" }, - { 0x19, "tx_mcast" }, - { 0x1A, "tx_ucast" }, - { 0x1B, "tx_deferred" }, - { 0x1C, "tx_total_col" }, - { 0x1D, "tx_exc_col" }, - { 0x1E, "tx_single_col" }, - { 0x1F, "tx_mult_col" }, - { 0x80, "rx_total" }, - { 0x81, "tx_total" }, - { 0x82, "rx_discards" }, - { 0x83, "tx_discards" }, -}; - static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); @@ -287,7 +245,7 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, u64 *dropped, u64 *cnt) { - addr = ksz9477_mib_names[addr].index; + addr = dev->info->mib_names[addr].index; ksz9477_r_mib_cnt(dev, port, addr, cnt); } @@ -318,7 +276,7 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port) mutex_unlock(&mib->cnt_mutex); mib->cnt_ptr = 0; - memset(mib->counters, 0, dev->mib_cnt * sizeof(u64)); + memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); } static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds, @@ -403,14 +361,15 @@ static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg, static void ksz9477_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *buf) { + struct ksz_device *dev = ds->priv; int i; if (stringset != ETH_SS_STATS) return; - for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { - memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string, - ETH_GSTRING_LEN); + for (i = 0; i < dev->info->mib_cnt; i++) { + memcpy(buf + i * ETH_GSTRING_LEN, + dev->info->mib_names[i].string, ETH_GSTRING_LEN); } } @@ -1479,9 +1438,6 @@ static int ksz9477_switch_init(struct ksz_device *dev) dev->port_mask = (1 << dev->info->port_cnt) - 1; - dev->reg_mib_cnt = SWITCH_COUNTER_NUM; - dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM; - dev->ports = devm_kzalloc(dev->dev, dev->info->port_cnt * sizeof(struct ksz_port), GFP_KERNEL); @@ -1493,7 +1449,7 @@ static int ksz9477_switch_init(struct ksz_device *dev) dev->ports[i].mib.counters = devm_kzalloc(dev->dev, sizeof(u64) * - (TOTAL_SWITCH_COUNTER_NUM + 1), + (dev->info->mib_cnt + 1), GFP_KERNEL); if (!dev->ports[i].mib.counters) return -ENOMEM; diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 09048e5e7ff2..243032b29ff2 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -21,6 +21,8 @@ #include "ksz_common.h" +#define MIB_COUNTER_NUM 0x20 + struct ksz_stats_raw { u64 rx_hi; u64 rx_undersize; @@ -60,6 +62,82 @@ struct ksz_stats_raw { u64 tx_discards; }; +static const struct ksz_mib_names ksz88xx_mib_names[] = { + { 0x00, "rx" }, + { 0x01, "rx_hi" }, + { 0x02, "rx_undersize" }, + { 0x03, "rx_fragments" }, + { 0x04, "rx_oversize" }, + { 0x05, "rx_jabbers" }, + { 0x06, "rx_symbol_err" }, + { 0x07, "rx_crc_err" }, + { 0x08, "rx_align_err" }, + { 0x09, "rx_mac_ctrl" }, + { 0x0a, "rx_pause" }, + { 0x0b, "rx_bcast" }, + { 0x0c, "rx_mcast" }, + { 0x0d, "rx_ucast" }, + { 0x0e, "rx_64_or_less" }, + { 0x0f, "rx_65_127" }, + { 0x10, "rx_128_255" }, + { 0x11, "rx_256_511" }, + { 0x12, "rx_512_1023" }, + { 0x13, "rx_1024_1522" }, + { 0x14, "tx" }, + { 0x15, "tx_hi" }, + { 0x16, "tx_late_col" }, + { 0x17, "tx_pause" }, + { 0x18, "tx_bcast" }, + { 0x19, "tx_mcast" }, + { 0x1a, "tx_ucast" }, + { 0x1b, "tx_deferred" }, + { 0x1c, "tx_total_col" }, + { 0x1d, "tx_exc_col" }, + { 0x1e, "tx_single_col" }, + { 0x1f, "tx_mult_col" }, + { 0x100, "rx_discards" }, + { 0x101, "tx_discards" }, +}; + +static const struct ksz_mib_names ksz9477_mib_names[] = { + { 0x00, "rx_hi" }, + { 0x01, "rx_undersize" }, + { 0x02, "rx_fragments" }, + { 0x03, "rx_oversize" }, + { 0x04, "rx_jabbers" }, + { 0x05, "rx_symbol_err" }, + { 0x06, "rx_crc_err" }, + { 0x07, "rx_align_err" }, + { 0x08, "rx_mac_ctrl" }, + { 0x09, "rx_pause" }, + { 0x0A, "rx_bcast" }, + { 0x0B, "rx_mcast" }, + { 0x0C, "rx_ucast" }, + { 0x0D, "rx_64_or_less" }, + { 0x0E, "rx_65_127" }, + { 0x0F, "rx_128_255" }, + { 0x10, "rx_256_511" }, + { 0x11, "rx_512_1023" }, + { 0x12, "rx_1024_1522" }, + { 0x13, "rx_1523_2000" }, + { 0x14, "rx_2001" }, + { 0x15, "tx_hi" }, + { 0x16, "tx_late_col" }, + { 0x17, "tx_pause" }, + { 0x18, "tx_bcast" }, + { 0x19, "tx_mcast" }, + { 0x1A, "tx_ucast" }, + { 0x1B, "tx_deferred" }, + { 0x1C, "tx_total_col" }, + { 0x1D, "tx_exc_col" }, + { 0x1E, "tx_single_col" }, + { 0x1F, "tx_mult_col" }, + { 0x80, "rx_total" }, + { 0x81, "tx_total" }, + { 0x82, "rx_discards" }, + { 0x83, "tx_discards" }, +}; + const struct ksz_chip_data ksz_switch_chips[] = { [KSZ8795] = { .chip_id = KSZ8795_CHIP_ID, @@ -70,6 +148,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .ksz87xx_eee_link_erratum = true, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [KSZ8794] = { @@ -95,6 +176,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .ksz87xx_eee_link_erratum = true, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [KSZ8765] = { @@ -106,6 +190,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .ksz87xx_eee_link_erratum = true, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [KSZ8830] = { @@ -116,6 +203,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_statics = 8, .cpu_ports = 0x4, /* can be configured as cpu port */ .port_cnt = 3, + .mib_names = ksz88xx_mib_names, + .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [KSZ9477] = { @@ -127,6 +217,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x7F, /* can be configured as cpu port */ .port_cnt = 7, /* total physical port count */ .phy_errata_9477 = true, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [KSZ9897] = { @@ -138,6 +231,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x7F, /* can be configured as cpu port */ .port_cnt = 7, /* total physical port count */ .phy_errata_9477 = true, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [KSZ9893] = { @@ -148,6 +244,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_statics = 16, .cpu_ports = 0x07, /* can be configured as cpu port */ .port_cnt = 3, /* total port count */ + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [KSZ9567] = { @@ -159,6 +258,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x7F, /* can be configured as cpu port */ .port_cnt = 7, /* total physical port count */ .phy_errata_9477 = true, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [LAN9370] = { @@ -169,6 +271,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_statics = 256, .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total physical port count */ + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [LAN9371] = { @@ -179,6 +284,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_statics = 256, .cpu_ports = 0x30, /* can be configured as cpu port */ .port_cnt = 6, /* total physical port count */ + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [LAN9372] = { @@ -189,6 +297,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_statics = 256, .cpu_ports = 0x30, /* can be configured as cpu port */ .port_cnt = 8, /* total physical port count */ + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [LAN9373] = { @@ -199,6 +310,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_statics = 256, .cpu_ports = 0x38, /* can be configured as cpu port */ .port_cnt = 5, /* total physical port count */ + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, [LAN9374] = { @@ -209,6 +323,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .num_statics = 256, .cpu_ports = 0x30, /* can be configured as cpu port */ .port_cnt = 8, /* total physical port count */ + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, }, }; EXPORT_SYMBOL_GPL(ksz_switch_chips); @@ -366,17 +483,17 @@ static void port_r_cnt(struct ksz_device *dev, int port) u64 *dropped; /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */ - while (mib->cnt_ptr < dev->reg_mib_cnt) { + while (mib->cnt_ptr < dev->info->reg_mib_cnt) { dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, &mib->counters[mib->cnt_ptr]); ++mib->cnt_ptr; } /* last one in storage */ - dropped = &mib->counters[dev->mib_cnt]; + dropped = &mib->counters[dev->info->mib_cnt]; /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ - while (mib->cnt_ptr < dev->mib_cnt) { + while (mib->cnt_ptr < dev->info->mib_cnt) { dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, dropped, &mib->counters[mib->cnt_ptr]); ++mib->cnt_ptr; @@ -407,7 +524,7 @@ static void ksz_mib_read_work(struct work_struct *work) const struct dsa_port *dp = dsa_to_port(dev->ds, i); if (!netif_carrier_ok(dp->slave)) - mib->cnt_ptr = dev->reg_mib_cnt; + mib->cnt_ptr = dev->info->reg_mib_cnt; } port_r_cnt(dev, i); p->read = false; @@ -474,7 +591,7 @@ int ksz_sset_count(struct dsa_switch *ds, int port, int sset) if (sset != ETH_SS_STATS) return 0; - return dev->mib_cnt; + return dev->info->mib_cnt; } EXPORT_SYMBOL_GPL(ksz_sset_count); @@ -489,9 +606,9 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf) /* Only read dropped counters if no link. */ if (!netif_carrier_ok(dp->slave)) - mib->cnt_ptr = dev->reg_mib_cnt; + mib->cnt_ptr = dev->info->reg_mib_cnt; port_r_cnt(dev, port); - memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64)); + memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64)); mutex_unlock(&mib->cnt_mutex); } EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index b526f8027eea..7c0158f20d34 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -26,6 +26,11 @@ struct ksz_port_mib { struct spinlock stats64_lock; }; +struct ksz_mib_names { + int index; + char string[ETH_GSTRING_LEN]; +}; + struct ksz_chip_data { u32 chip_id; const char *dev_name; @@ -36,6 +41,9 @@ struct ksz_chip_data { int port_cnt; bool phy_errata_9477; bool ksz87xx_eee_link_erratum; + const struct ksz_mib_names *mib_names; + int mib_cnt; + u8 reg_mib_cnt; }; struct ksz_port { @@ -79,9 +87,6 @@ struct ksz_device { u32 chip_id; int cpu_port; /* port connected to CPU */ int phy_port_cnt; - u8 reg_mib_cnt; - int mib_cnt; - const struct mib_names *mib_names; phy_interface_t compat_interface; u32 regs_size; bool synclko_125; -- cgit v1.2.3 From 198b34783ab10d0c58bf2ae2574e7fc704129058 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 17 May 2022 15:13:29 +0530 Subject: net: dsa: microchip: move port memory allocation to ksz_common ksz8795 and ksz9477 init function initializes the memory to dev->ports, mib counters and assigns the ds real number of ports. Since both the routines are same, moved the allocation of port memory to ksz_switch_register after init. Signed-off-by: Arun Ramadoss Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 20 -------------------- drivers/net/dsa/microchip/ksz9477.c | 22 ---------------------- drivers/net/dsa/microchip/ksz_common.c | 21 +++++++++++++++++++++ 3 files changed, 21 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 764e16b4f24b..3490b6072641 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1494,7 +1494,6 @@ static int ksz8_switch_detect(struct ksz_device *dev) static int ksz8_switch_init(struct ksz_device *dev) { struct ksz8 *ksz8 = dev->priv; - int i; dev->ds->ops = &ksz8_switch_ops; @@ -1513,25 +1512,6 @@ static int ksz8_switch_init(struct ksz_device *dev) ksz8->shifts = ksz8795_shifts; } - dev->ports = devm_kzalloc(dev->dev, - dev->info->port_cnt * sizeof(struct ksz_port), - GFP_KERNEL); - if (!dev->ports) - return -ENOMEM; - for (i = 0; i < dev->info->port_cnt; i++) { - mutex_init(&dev->ports[i].mib.cnt_mutex); - dev->ports[i].mib.counters = - devm_kzalloc(dev->dev, - sizeof(u64) * - (dev->info->mib_cnt + 1), - GFP_KERNEL); - if (!dev->ports[i].mib.counters) - return -ENOMEM; - } - - /* set the real number of ports */ - dev->ds->num_ports = dev->info->port_cnt; - /* We rely on software untagging on the CPU port, so that we * can support both tagged and untagged VLANs */ diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index f7d4a3498e5d..d4729f0dd831 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1432,32 +1432,10 @@ static int ksz9477_switch_detect(struct ksz_device *dev) static int ksz9477_switch_init(struct ksz_device *dev) { - int i; - dev->ds->ops = &ksz9477_switch_ops; dev->port_mask = (1 << dev->info->port_cnt) - 1; - dev->ports = devm_kzalloc(dev->dev, - dev->info->port_cnt * sizeof(struct ksz_port), - GFP_KERNEL); - if (!dev->ports) - return -ENOMEM; - for (i = 0; i < dev->info->port_cnt; i++) { - spin_lock_init(&dev->ports[i].mib.stats64_lock); - mutex_init(&dev->ports[i].mib.cnt_mutex); - dev->ports[i].mib.counters = - devm_kzalloc(dev->dev, - sizeof(u64) * - (dev->info->mib_cnt + 1), - GFP_KERNEL); - if (!dev->ports[i].mib.counters) - return -ENOMEM; - } - - /* set the real number of ports */ - dev->ds->num_ports = dev->info->port_cnt; - return 0; } diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 243032b29ff2..8f90bf29fd4c 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -844,6 +844,7 @@ int ksz_switch_register(struct ksz_device *dev, phy_interface_t interface; unsigned int port_num; int ret; + int i; if (dev->pdata) dev->chip_id = dev->pdata->chip_id; @@ -885,6 +886,26 @@ int ksz_switch_register(struct ksz_device *dev, if (ret) return ret; + dev->ports = devm_kzalloc(dev->dev, + dev->info->port_cnt * sizeof(struct ksz_port), + GFP_KERNEL); + if (!dev->ports) + return -ENOMEM; + + for (i = 0; i < dev->info->port_cnt; i++) { + spin_lock_init(&dev->ports[i].mib.stats64_lock); + mutex_init(&dev->ports[i].mib.cnt_mutex); + dev->ports[i].mib.counters = + devm_kzalloc(dev->dev, + sizeof(u64) * (dev->info->mib_cnt + 1), + GFP_KERNEL); + if (!dev->ports[i].mib.counters) + return -ENOMEM; + } + + /* set the real number of ports */ + dev->ds->num_ports = dev->info->port_cnt; + /* Host port interface will be self detected, or specifically set in * device tree. */ -- cgit v1.2.3 From 997d2126ac61128360f024ab4b7d72a006cf6094 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 17 May 2022 15:13:30 +0530 Subject: net: dsa: microchip: move get_strings to ksz_common ksz8795 and ksz9477 uses the same algorithm for copying the ethtool strings. Hence moved to ksz_common to remove the redundant code. Signed-off-by: Arun Ramadoss Reviewed-by: Florian Fainelli Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 14 +------------- drivers/net/dsa/microchip/ksz9477.c | 17 +---------------- drivers/net/dsa/microchip/ksz_common.c | 16 ++++++++++++++++ drivers/net/dsa/microchip/ksz_common.h | 2 ++ 4 files changed, 20 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 3490b6072641..251048ffd3d4 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -923,18 +923,6 @@ static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port) return 0; } -static void ksz8_get_strings(struct dsa_switch *ds, int port, - u32 stringset, uint8_t *buf) -{ - struct ksz_device *dev = ds->priv; - int i; - - for (i = 0; i < dev->info->mib_cnt; i++) { - memcpy(buf + i * ETH_GSTRING_LEN, - dev->info->mib_names[i].string, ETH_GSTRING_LEN); - } -} - static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) { u8 data; @@ -1424,7 +1412,7 @@ static const struct dsa_switch_ops ksz8_switch_ops = { .phylink_get_caps = ksz8_get_caps, .phylink_mac_link_down = ksz_mac_link_down, .port_enable = ksz_enable_port, - .get_strings = ksz8_get_strings, + .get_strings = ksz_get_strings, .get_ethtool_stats = ksz_get_ethtool_stats, .get_sset_count = ksz_sset_count, .port_bridge_join = ksz_port_bridge_join, diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index d4729f0dd831..7cffc3388106 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -358,21 +358,6 @@ static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg, return 0; } -static void ksz9477_get_strings(struct dsa_switch *ds, int port, - u32 stringset, uint8_t *buf) -{ - struct ksz_device *dev = ds->priv; - int i; - - if (stringset != ETH_SS_STATS) - return; - - for (i = 0; i < dev->info->mib_cnt; i++) { - memcpy(buf + i * ETH_GSTRING_LEN, - dev->info->mib_names[i].string, ETH_GSTRING_LEN); - } -} - static void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member) { @@ -1341,7 +1326,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = { .phy_write = ksz9477_phy_write16, .phylink_mac_link_down = ksz_mac_link_down, .port_enable = ksz_enable_port, - .get_strings = ksz9477_get_strings, + .get_strings = ksz_get_strings, .get_ethtool_stats = ksz_get_ethtool_stats, .get_sset_count = ksz_sset_count, .port_bridge_join = ksz_port_bridge_join, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 8f90bf29fd4c..c0acb12fab6d 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -418,6 +418,22 @@ void ksz_get_stats64(struct dsa_switch *ds, int port, } EXPORT_SYMBOL_GPL(ksz_get_stats64); +void ksz_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf) +{ + struct ksz_device *dev = ds->priv; + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < dev->info->mib_cnt; i++) { + memcpy(buf + i * ETH_GSTRING_LEN, + dev->info->mib_names[i].string, ETH_GSTRING_LEN); + } +} +EXPORT_SYMBOL_GPL(ksz_get_strings); + void ksz_update_port_member(struct ksz_device *dev, int port) { struct ksz_port *p = &dev->ports[port]; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 7c0158f20d34..8b2a30ef0664 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -225,6 +225,8 @@ int ksz_port_mdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb, struct dsa_db db); int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); +void ksz_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf); /* Common register access functions */ -- cgit v1.2.3 From b094c679662c0125f763d996a46c687c437b42a0 Mon Sep 17 00:00:00 2001 From: Prasanna Vengateshan Date: Tue, 17 May 2022 15:13:31 +0530 Subject: net: dsa: move mib->cnt_ptr reset code to ksz_common.c mib->cnt_ptr resetting is handled in multiple places as part of port_init_cnt(). Hence moved mib->cnt_ptr code to ksz common layer and removed from individual product files. Signed-off-by: Prasanna Vengateshan Signed-off-by: Arun Ramadoss Reviewed-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 2 -- drivers/net/dsa/microchip/ksz9477.c | 3 --- drivers/net/dsa/microchip/ksz_common.c | 8 +++++++- 3 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 251048ffd3d4..d6162b00e4fb 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -388,8 +388,6 @@ static void ksz8_port_init_cnt(struct ksz_device *dev, int port) dropped, &mib->counters[mib->cnt_ptr]); ++mib->cnt_ptr; } - mib->cnt_ptr = 0; - memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); } static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data) diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 7cffc3388106..22ef56e2cb7b 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -274,9 +274,6 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port) ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH); ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0); mutex_unlock(&mib->cnt_mutex); - - mib->cnt_ptr = 0; - memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); } static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index c0acb12fab6d..873a6469934f 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -560,8 +560,14 @@ void ksz_init_mib_timer(struct ksz_device *dev) INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); - for (i = 0; i < dev->info->port_cnt; i++) + for (i = 0; i < dev->info->port_cnt; i++) { + struct ksz_port_mib *mib = &dev->ports[i].mib; + dev->dev_ops->port_init_cnt(dev, i); + + mib->cnt_ptr = 0; + memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64)); + } } EXPORT_SYMBOL_GPL(ksz_init_mib_timer); -- cgit v1.2.3 From 65ac79e1812016d7c5760872736802f985ec7455 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 17 May 2022 15:13:32 +0530 Subject: net: dsa: microchip: add the phylink get_caps This patch add the support for phylink_get_caps for ksz8795 and ksz9477 series switch. It updates the struct ksz_switch_chip with the details of the internal phys and xmii interface. Then during the get_caps based on the bits set in the structure, corresponding phy mode is set. Signed-off-by: Arun Ramadoss Reviewed-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 10 +--- drivers/net/dsa/microchip/ksz9477.c | 10 ++++ drivers/net/dsa/microchip/ksz_common.c | 98 ++++++++++++++++++++++++++++++++++ drivers/net/dsa/microchip/ksz_common.h | 8 +++ 4 files changed, 117 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index d6162b00e4fb..9d6d3c69fd47 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1376,15 +1376,7 @@ static void ksz8_get_caps(struct dsa_switch *ds, int port, { struct ksz_device *dev = ds->priv; - if (port == dev->cpu_port) { - __set_bit(PHY_INTERFACE_MODE_RMII, - config->supported_interfaces); - __set_bit(PHY_INTERFACE_MODE_MII, - config->supported_interfaces); - } else { - __set_bit(PHY_INTERFACE_MODE_INTERNAL, - config->supported_interfaces); - } + ksz_phylink_get_caps(ds, port, config); config->mac_capabilities = MAC_10 | MAC_100; diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 22ef56e2cb7b..ab40b700cf1a 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1097,6 +1097,15 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port) ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee); } +static void ksz9477_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + ksz_phylink_get_caps(ds, port, config); + + config->mac_capabilities = MAC_10 | MAC_100 | MAC_1000FD | + MAC_ASYM_PAUSE | MAC_SYM_PAUSE; +} + static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) { struct ksz_port *p = &dev->ports[port]; @@ -1322,6 +1331,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = { .phy_read = ksz9477_phy_read16, .phy_write = ksz9477_phy_write16, .phylink_mac_link_down = ksz_mac_link_down, + .phylink_get_caps = ksz9477_get_caps, .port_enable = ksz_enable_port, .get_strings = ksz_get_strings, .get_ethtool_stats = ksz_get_ethtool_stats, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 873a6469934f..9ca8c8d7740f 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -151,6 +151,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, true}, + .supports_rmii = {false, false, false, false, true}, + .supports_rgmii = {false, false, false, false, true}, + .internal_phy = {true, true, true, true, false}, }, [KSZ8794] = { @@ -179,6 +183,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, true}, + .supports_rmii = {false, false, false, false, true}, + .supports_rgmii = {false, false, false, false, true}, + .internal_phy = {true, true, true, false, false}, }, [KSZ8765] = { @@ -193,6 +201,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, true}, + .supports_rmii = {false, false, false, false, true}, + .supports_rgmii = {false, false, false, false, true}, + .internal_phy = {true, true, true, true, false}, }, [KSZ8830] = { @@ -206,6 +218,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz88xx_mib_names, .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, true}, + .supports_rmii = {false, false, true}, + .internal_phy = {true, true, false}, }, [KSZ9477] = { @@ -220,6 +235,14 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, + false, true, false}, + .supports_rmii = {false, false, false, false, + false, true, false}, + .supports_rgmii = {false, false, false, false, + false, true, false}, + .internal_phy = {true, true, true, true, + true, false, false}, }, [KSZ9897] = { @@ -234,6 +257,14 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, + false, true, true}, + .supports_rmii = {false, false, false, false, + false, true, true}, + .supports_rgmii = {false, false, false, false, + false, true, true}, + .internal_phy = {true, true, true, true, + true, false, false}, }, [KSZ9893] = { @@ -247,6 +278,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, true}, + .supports_rmii = {false, false, true}, + .supports_rgmii = {false, false, true}, + .internal_phy = {true, true, false}, }, [KSZ9567] = { @@ -261,6 +296,14 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, + false, true, true}, + .supports_rmii = {false, false, false, false, + false, true, true}, + .supports_rgmii = {false, false, false, false, + false, true, true}, + .internal_phy = {true, true, true, true, + true, false, false}, }, [LAN9370] = { @@ -274,6 +317,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, true}, + .supports_rmii = {false, false, false, false, true}, + .supports_rgmii = {false, false, false, false, true}, + .internal_phy = {true, true, true, true, false}, }, [LAN9371] = { @@ -287,6 +334,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, true, true}, + .supports_rmii = {false, false, false, false, true, true}, + .supports_rgmii = {false, false, false, false, true, true}, + .internal_phy = {true, true, true, true, false, false}, }, [LAN9372] = { @@ -300,6 +351,14 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, + true, true, false, false}, + .supports_rmii = {false, false, false, false, + true, true, false, false}, + .supports_rgmii = {false, false, false, false, + true, true, false, false}, + .internal_phy = {true, true, true, true, + false, false, true, true}, }, [LAN9373] = { @@ -313,6 +372,14 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, + true, true, false, false}, + .supports_rmii = {false, false, false, false, + true, true, false, false}, + .supports_rgmii = {false, false, false, false, + true, true, false, false}, + .internal_phy = {true, true, true, false, + false, false, true, true}, }, [LAN9374] = { @@ -326,6 +393,14 @@ const struct ksz_chip_data ksz_switch_chips[] = { .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, + .supports_mii = {false, false, false, false, + true, true, false, false}, + .supports_rmii = {false, false, false, false, + true, true, false, false}, + .supports_rgmii = {false, false, false, false, + true, true, false, false}, + .internal_phy = {true, true, true, true, + false, false, true, true}, }, }; EXPORT_SYMBOL_GPL(ksz_switch_chips); @@ -361,6 +436,29 @@ static int ksz_check_device_id(struct ksz_device *dev) return 0; } +void ksz_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + struct ksz_device *dev = ds->priv; + + config->legacy_pre_march2020 = false; + + if (dev->info->supports_mii[port]) + __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); + + if (dev->info->supports_rmii[port]) + __set_bit(PHY_INTERFACE_MODE_RMII, + config->supported_interfaces); + + if (dev->info->supports_rgmii[port]) + phy_interface_set_rgmii(config->supported_interfaces); + + if (dev->info->internal_phy[port]) + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); +} +EXPORT_SYMBOL_GPL(ksz_phylink_get_caps); + void ksz_r_mib_stats64(struct ksz_device *dev, int port) { struct rtnl_link_stats64 *stats; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 8b2a30ef0664..de963f0f83e2 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -14,6 +14,8 @@ #include #include +#define KSZ_MAX_NUM_PORTS 8 + struct vlan_table { u32 table[3]; }; @@ -44,6 +46,10 @@ struct ksz_chip_data { const struct ksz_mib_names *mib_names; int mib_cnt; u8 reg_mib_cnt; + bool supports_mii[KSZ_MAX_NUM_PORTS]; + bool supports_rmii[KSZ_MAX_NUM_PORTS]; + bool supports_rgmii[KSZ_MAX_NUM_PORTS]; + bool internal_phy[KSZ_MAX_NUM_PORTS]; }; struct ksz_port { @@ -198,6 +204,8 @@ void ksz_init_mib_timer(struct ksz_device *dev); void ksz_r_mib_stats64(struct ksz_device *dev, int port); void ksz_get_stats64(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *s); +void ksz_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config); extern const struct ksz_chip_data ksz_switch_chips[]; /* Common DSA access functions */ -- cgit v1.2.3 From 008db08b64f44dfc69efb788e8b4bfa43ba91700 Mon Sep 17 00:00:00 2001 From: Arun Ramadoss Date: Tue, 17 May 2022 15:13:33 +0530 Subject: net: dsa: microchip: remove unused members in ksz_device The name, regs_size and overrides members in struct ksz_device are unused. Hence remove it. And host_mask is used in only place of ksz8795.c file, which can be replaced by dev->info->cpu_ports Signed-off-by: Arun Ramadoss Reviewed-by: Florian Fainelli Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 3 +-- drivers/net/dsa/microchip/ksz_common.h | 4 ---- 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 9d6d3c69fd47..12a599d5e61a 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1360,7 +1360,7 @@ static int ksz8_setup(struct dsa_switch *ds) ether_addr_copy(alu.mac, eth_stp_addr); alu.is_static = true; alu.is_override = true; - alu.port_forward = dev->host_mask; + alu.port_forward = dev->info->cpu_ports; ksz8_w_sta_mac_table(dev, 0, &alu); @@ -1476,7 +1476,6 @@ static int ksz8_switch_init(struct ksz_device *dev) dev->ds->ops = &ksz8_switch_ops; dev->cpu_port = fls(dev->info->cpu_ports) - 1; - dev->host_mask = dev->info->cpu_ports; dev->phy_port_cnt = dev->info->port_cnt - 1; dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index de963f0f83e2..8500eaedad67 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -73,7 +73,6 @@ struct ksz_port { struct ksz_device { struct dsa_switch *ds; struct ksz_platform_data *pdata; - const char *name; const struct ksz_chip_data *info; struct mutex dev_mutex; /* device access */ @@ -94,7 +93,6 @@ struct ksz_device { int cpu_port; /* port connected to CPU */ int phy_port_cnt; phy_interface_t compat_interface; - u32 regs_size; bool synclko_125; bool synclko_disable; @@ -106,8 +104,6 @@ struct ksz_device { u16 mirror_rx; u16 mirror_tx; u32 features; /* chip specific features */ - u32 overrides; /* chip functions set by user */ - u16 host_mask; u16 port_mask; }; -- cgit v1.2.3 From e991d0ed0b7a4c135236ec41ba7df7ea99ea9247 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Tue, 17 May 2022 10:16:01 +0200 Subject: net: stmmac: remove unused get_addr() callback The last caller of the stmmac_desc_ops::get_addr() callback was removed a while ago, so remove the unused callback. Note that the callback also only gets half the descriptor address on systems with 64-bit descriptor addresses, so that should be fixed if it needs to be resurrected later. Fixes: ec222003bd948de8f3 ("net: stmmac: Prepare to add Split Header support") Signed-off-by: Vincent Whitchurch Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | 6 ------ drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c | 6 ------ drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 6 ------ drivers/net/ethernet/stmicro/stmmac/hwif.h | 4 ---- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 6 ------ 5 files changed, 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index d3b4765c1a5b..8cc80b1db4cb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -462,11 +462,6 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); } -static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr) -{ - *addr = le32_to_cpu(p->des0); -} - static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr) { p->des0 = cpu_to_le32(lower_32_bits(addr)); @@ -575,7 +570,6 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { .init_tx_desc = dwmac4_rd_init_tx_desc, .display_ring = dwmac4_display_ring, .set_mss = dwmac4_set_mss_ctxt, - .get_addr = dwmac4_get_addr, .set_addr = dwmac4_set_addr, .clear = dwmac4_clear, .set_sarc = dwmac4_set_sarc, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c index ccfb0102dde4..b1f0c3984a09 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c @@ -239,11 +239,6 @@ static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss) p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV); } -static void dwxgmac2_get_addr(struct dma_desc *p, unsigned int *addr) -{ - *addr = le32_to_cpu(p->des0); -} - static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr) { p->des0 = cpu_to_le32(lower_32_bits(addr)); @@ -366,7 +361,6 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = { .init_rx_desc = dwxgmac2_init_rx_desc, .init_tx_desc = dwxgmac2_init_tx_desc, .set_mss = dwxgmac2_set_mss, - .get_addr = dwxgmac2_get_addr, .set_addr = dwxgmac2_set_addr, .clear = dwxgmac2_clear, .get_rx_hash = dwxgmac2_get_rx_hash, diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 6650edfab5bc..1bcbbd724fb5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -440,11 +440,6 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx, pr_info("\n"); } -static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr) -{ - *addr = le32_to_cpu(p->des2); -} - static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr) { p->des2 = cpu_to_le32(addr); @@ -475,7 +470,6 @@ const struct stmmac_desc_ops enh_desc_ops = { .get_timestamp = enh_desc_get_timestamp, .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, .display_ring = enh_desc_display_ring, - .get_addr = enh_desc_get_addr, .set_addr = enh_desc_set_addr, .clear = enh_desc_clear, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index f7dc447f05a0..592b4067f9b8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -82,8 +82,6 @@ struct stmmac_desc_ops { dma_addr_t dma_rx_phy, unsigned int desc_size); /* set MSS via context descriptor */ void (*set_mss)(struct dma_desc *p, unsigned int mss); - /* get descriptor skbuff address */ - void (*get_addr)(struct dma_desc *p, unsigned int *addr); /* set descriptor skbuff address */ void (*set_addr)(struct dma_desc *p, dma_addr_t addr); /* clear descriptor */ @@ -142,8 +140,6 @@ struct stmmac_desc_ops { stmmac_do_void_callback(__priv, desc, display_ring, __args) #define stmmac_set_mss(__priv, __args...) \ stmmac_do_void_callback(__priv, desc, set_mss, __args) -#define stmmac_get_desc_addr(__priv, __args...) \ - stmmac_do_void_callback(__priv, desc, get_addr, __args) #define stmmac_set_desc_addr(__priv, __args...) \ stmmac_do_void_callback(__priv, desc, set_addr, __args) #define stmmac_clear_desc(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 98ef43f35802..e3da4da242ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -292,11 +292,6 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx, pr_info("\n"); } -static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr) -{ - *addr = le32_to_cpu(p->des2); -} - static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr) { p->des2 = cpu_to_le32(addr); @@ -326,7 +321,6 @@ const struct stmmac_desc_ops ndesc_ops = { .get_timestamp = ndesc_get_timestamp, .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, .display_ring = ndesc_display_ring, - .get_addr = ndesc_get_addr, .set_addr = ndesc_set_addr, .clear = ndesc_clear, }; -- cgit v1.2.3 From 32329216ca1d6ee29c41215f18b3053bb6158541 Mon Sep 17 00:00:00 2001 From: Martin Liška Date: Wed, 18 May 2022 09:18:53 +0200 Subject: eth: sun: cassini: remove dead code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the following GCC warning: drivers/net/ethernet/sun/cassini.c:1316:29: error: comparison between two arrays [-Werror=array-compare] drivers/net/ethernet/sun/cassini.c:3783:34: error: comparison between two arrays [-Werror=array-compare] Note that 2 arrays should be compared by comparing of their addresses: note: use ‘&cas_prog_workaroundtab[0] == &cas_prog_null[0]’ to compare the addresses Signed-off-by: Martin Liska Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/cassini.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index b04a6a7bf566..435dc00d04e5 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1313,7 +1313,7 @@ static void cas_init_rx_dma(struct cas *cp) writel(val, cp->regs + REG_RX_PAGE_SIZE); /* enable the header parser if desired */ - if (CAS_HP_FIRMWARE == cas_prog_null) + if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0]) return; val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); @@ -3780,7 +3780,7 @@ static void cas_reset(struct cas *cp, int blkflag) /* program header parser */ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || - (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { + (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) { cas_load_firmware(cp, CAS_HP_FIRMWARE); } else { cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); -- cgit v1.2.3 From 223153ea6c79a0d78dc2a04e3300de1deb336f14 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Wed, 18 May 2022 10:08:12 +0800 Subject: net: ethernet: sunplus: add missing of_node_put() in spl2sw_mdio_init() of_get_child_by_name() returns device node pointer with refcount incremented. The refcount should be decremented before returning from spl2sw_mdio_init(). Fixes: fd3040b9394c ("net: ethernet: Add driver for Sunplus SP7021") Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Reviewed-by: Wells Lu Signed-off-by: David S. Miller --- drivers/net/ethernet/sunplus/spl2sw_mdio.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.c b/drivers/net/ethernet/sunplus/spl2sw_mdio.c index 139ac8f2685e..733ae1704269 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_mdio.c +++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.c @@ -97,8 +97,10 @@ u32 spl2sw_mdio_init(struct spl2sw_common *comm) /* Allocate and register mdio bus. */ mii_bus = devm_mdiobus_alloc(&comm->pdev->dev); - if (!mii_bus) - return -ENOMEM; + if (!mii_bus) { + ret = -ENOMEM; + goto out; + } mii_bus->name = "sunplus_mii_bus"; mii_bus->parent = &comm->pdev->dev; @@ -110,10 +112,13 @@ u32 spl2sw_mdio_init(struct spl2sw_common *comm) ret = of_mdiobus_register(mii_bus, mdio_np); if (ret) { dev_err(&comm->pdev->dev, "Failed to register mdiobus!\n"); - return ret; + goto out; } comm->mii_bus = mii_bus; + +out: + of_node_put(mdio_np); return ret; } -- cgit v1.2.3 From a3641ca416a3da7cbeae5bcf1fc26ba9797a1438 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 18 May 2022 11:04:19 +0200 Subject: net: smc911x: Fix min() use in debug code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If ENABLE_SMC_DEBUG_PKTS=1: drivers/net/ethernet/smsc/smc911x.c: In function ‘smc911x_hardware_send_pkt’: include/linux/minmax.h:20:28: error: comparison of distinct pointer types lacks a cast [-Werror] 20 | (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) | ^~ drivers/net/ethernet/smsc/smc911x.c:483:17: note: in expansion of macro ‘min’ 483 | PRINT_PKT(buf, min(len, 64)); Fix this by making the constant unsigned, to match the type of "len". While at it, replace the other missed ternary operator by min(), too. Convert the dummy PRINT_PKT() from a macro to a static inline function, to catch mistakes like this without having to enable debug options manually. Fixes: 5ff0348b7f755aac ("net: smc911x: replace ternary operator with min()") Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/smc911x.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 2694287770e6..24d66af797d4 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -140,7 +140,7 @@ static void PRINT_PKT(u_char *buf, int length) pr_cont("\n"); } #else -#define PRINT_PKT(x...) do { } while (0) +static inline void PRINT_PKT(u_char *buf, int length) { } #endif @@ -430,7 +430,7 @@ static inline void smc911x_rcv(struct net_device *dev) SMC_PULL_DATA(lp, data, pkt_len+2+3); DBG(SMC_DEBUG_PKTS, dev, "Received packet\n"); - PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); + PRINT_PKT(data, min(pkt_len - 4, 64U)); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; @@ -480,7 +480,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) SMC_SET_TX_FIFO(lp, cmdB); DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n"); - PRINT_PKT(buf, min(len, 64)); + PRINT_PKT(buf, min(len, 64U)); /* Send pkt via PIO or DMA */ #ifdef SMC_USE_DMA -- cgit v1.2.3 From ce3342161edcb9ccac60ed6964652d37de8788c8 Mon Sep 17 00:00:00 2001 From: Josua Mayer Date: Tue, 17 May 2022 11:54:30 +0300 Subject: net: phy: adin: add support for clock output The ADIN1300 supports generating certain clocks on its GP_CLK pin, as well as providing the reference clock on CLK25_REF. Add support for selecting the clock via device-tree properties. Technically the phy also supports a recovered 125MHz clock for synchronous ethernet. SyncE should be configured dynamically at runtime, however Linux does not currently have a toggle for this, so support is explicitly omitted. Co-developed-by: Alvaro Karsz Signed-off-by: Alvaro Karsz Signed-off-by: Josua Mayer Signed-off-by: Jakub Kicinski --- drivers/net/phy/adin.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c index 5ce6da62cc8e..ee374a85544a 100644 --- a/drivers/net/phy/adin.c +++ b/drivers/net/phy/adin.c @@ -99,6 +99,15 @@ #define ADIN1300_GE_SOFT_RESET_REG 0xff0c #define ADIN1300_GE_SOFT_RESET BIT(0) +#define ADIN1300_GE_CLK_CFG_REG 0xff1f +#define ADIN1300_GE_CLK_CFG_MASK GENMASK(5, 0) +#define ADIN1300_GE_CLK_CFG_RCVR_125 BIT(5) +#define ADIN1300_GE_CLK_CFG_FREE_125 BIT(4) +#define ADIN1300_GE_CLK_CFG_REF_EN BIT(3) +#define ADIN1300_GE_CLK_CFG_HRT_RCVR BIT(2) +#define ADIN1300_GE_CLK_CFG_HRT_FREE BIT(1) +#define ADIN1300_GE_CLK_CFG_25 BIT(0) + #define ADIN1300_GE_RGMII_CFG_REG 0xff23 #define ADIN1300_GE_RGMII_RX_MSK GENMASK(8, 6) #define ADIN1300_GE_RGMII_RX_SEL(x) \ @@ -433,6 +442,33 @@ static int adin_set_tunable(struct phy_device *phydev, } } +static int adin_config_clk_out(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + const char *val = NULL; + u8 sel = 0; + + device_property_read_string(dev, "adi,phy-output-clock", &val); + if (!val) { + /* property not present, do not enable GP_CLK pin */ + } else if (strcmp(val, "25mhz-reference") == 0) { + sel |= ADIN1300_GE_CLK_CFG_25; + } else if (strcmp(val, "125mhz-free-running") == 0) { + sel |= ADIN1300_GE_CLK_CFG_FREE_125; + } else if (strcmp(val, "adaptive-free-running") == 0) { + sel |= ADIN1300_GE_CLK_CFG_HRT_FREE; + } else { + phydev_err(phydev, "invalid adi,phy-output-clock\n"); + return -EINVAL; + } + + if (device_property_read_bool(dev, "adi,phy-output-reference-clock")) + sel |= ADIN1300_GE_CLK_CFG_REF_EN; + + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_CLK_CFG_REG, + ADIN1300_GE_CLK_CFG_MASK, sel); +} + static int adin_config_init(struct phy_device *phydev) { int rc; @@ -455,6 +491,10 @@ static int adin_config_init(struct phy_device *phydev) if (rc < 0) return rc; + rc = adin_config_clk_out(phydev); + if (rc < 0) + return rc; + phydev_dbg(phydev, "PHY is using mode '%s'\n", phy_modes(phydev->interface)); -- cgit v1.2.3 From 4d3bf6fb533461227ea532024b050cc31461e025 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 17 May 2022 22:59:51 +0200 Subject: octeon_ep: Fix a memory leak in the error handling path of octep_request_irqs() 'oct->non_ioq_irq_names' is not freed in the error handling path of octep_request_irqs(). Add the missing kfree(). Fixes: 37d79d059606 ("octeon_ep: add Tx/Rx processing and interrupt support") Signed-off-by: Christophe JAILLET Acked-by: Veerasenareddy Burru Reviewed-by: Dan Carpenter Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/marvell/octeon_ep/octep_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 94409e329a9a..4e8db659c0d6 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -267,6 +267,8 @@ non_ioq_irq_err: --i; free_irq(oct->msix_entries[i].vector, oct); } + kfree(oct->non_ioq_irq_names); + oct->non_ioq_irq_names = NULL; alloc_err: return -1; } -- cgit v1.2.3 From 3588c189e45aac69aa0deeedfd0d5de364030f37 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 17 May 2022 22:59:59 +0200 Subject: octeon_ep: Fix irq releasing in the error handling path of octep_request_irqs() When taken, the error handling path does not undo correctly what has already been allocated. Introduce a new loop index, 'j', in order to simplify the error handling path and rewrite part of it. It is now written with the same logic and intermediate variables used when resources are allocated. This is much more straightforward. Fixes: 37d79d059606 ("octeon_ep: add Tx/Rx processing and interrupt support") Signed-off-by: Christophe JAILLET Reviewed-by: Dan Carpenter Signed-off-by: Jakub Kicinski --- .../net/ethernet/marvell/octeon_ep/octep_main.c | 25 ++++++++++++---------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 4e8db659c0d6..97f080c66dd4 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -202,7 +202,7 @@ static int octep_request_irqs(struct octep_device *oct) struct msix_entry *msix_entry; char **non_ioq_msix_names; int num_non_ioq_msix; - int ret, i; + int ret, i, j; num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); @@ -233,23 +233,23 @@ static int octep_request_irqs(struct octep_device *oct) } /* Request IRQs for Tx/Rx queues */ - for (i = 0; i < oct->num_oqs; i++) { - ioq_vector = oct->ioq_vector[i]; - msix_entry = &oct->msix_entries[i + num_non_ioq_msix]; + for (j = 0; j < oct->num_oqs; j++) { + ioq_vector = oct->ioq_vector[j]; + msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; snprintf(ioq_vector->name, sizeof(ioq_vector->name), - "%s-q%d", netdev->name, i); + "%s-q%d", netdev->name, j); ret = request_irq(msix_entry->vector, octep_ioq_intr_handler, 0, ioq_vector->name, ioq_vector); if (ret) { netdev_err(netdev, "request_irq failed for Q-%d; err=%d", - i, ret); + j, ret); goto ioq_irq_err; } - cpumask_set_cpu(i % num_online_cpus(), + cpumask_set_cpu(j % num_online_cpus(), &ioq_vector->affinity_mask); irq_set_affinity_hint(msix_entry->vector, &ioq_vector->affinity_mask); @@ -257,10 +257,13 @@ static int octep_request_irqs(struct octep_device *oct) return 0; ioq_irq_err: - while (i > num_non_ioq_msix) { - --i; - irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); - free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]); + while (j) { + --j; + ioq_vector = oct->ioq_vector[j]; + msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; + + irq_set_affinity_hint(msix_entry->vector, NULL); + free_irq(msix_entry->vector, ioq_vector); } non_ioq_irq_err: while (i) { -- cgit v1.2.3 From 309ec443079b0c6fcfb93a62c2554a164ade3f3e Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 17 May 2022 23:58:20 -0700 Subject: sfc: siena: Have a unique wrapper ifndef for efx channels header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both sfc/efx_channels.h and sfc/siena/efx_channels.h used the same wrapper #ifndef EFX_CHANNELS_H, this patch changes the siena define to be EFX_SIENA_CHANNELS_H to avoid build system confusion. This fixes the following build break: drivers/net/ethernet/sfc/ptp.c:2191:28: error: ‘efx_copy_channel’ undeclared here (not in a function); did you mean ‘efx_ptp_channel’? 2191 | .copy = efx_copy_channel, Fixes: 6e173d3b4af9 ("sfc: Copy shared files needed for Siena (part 1)") Signed-off-by: Saeed Mahameed Cc: Edward Cree Acked-by: Martin Habets Link: https://lore.kernel.org/r/20220518065820.131611-1-saeed@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/efx_channels.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.h b/drivers/net/ethernet/sfc/siena/efx_channels.h index 10d78049b885..c4b95a2d770f 100644 --- a/drivers/net/ethernet/sfc/siena/efx_channels.h +++ b/drivers/net/ethernet/sfc/siena/efx_channels.h @@ -8,8 +8,8 @@ * by the Free Software Foundation, incorporated herein by reference. */ -#ifndef EFX_CHANNELS_H -#define EFX_CHANNELS_H +#ifndef EFX_SIENA_CHANNELS_H +#define EFX_SIENA_CHANNELS_H extern unsigned int efx_siena_interrupt_mode; extern unsigned int efx_siena_rss_cpus; -- cgit v1.2.3 From d935053a62fa11d06c757c1725782e46e7e823db Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 18 May 2022 11:30:22 -0700 Subject: net/mlx5: fix multiple definitions of mlx5_lag_mpesw_init / mlx5_lag_mpesw_cleanup static inline is needed in the header. Fixes: 94db33177819 ("net/mlx5: Support multiport eswitch mode") Acked-by: Saeed Mahameed Link: https://lore.kernel.org/r/20220518183022.2034373-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h index d39a02280e29..be4abcb8fcd5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h @@ -19,8 +19,8 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev); void mlx5_lag_mpesw_init(struct mlx5_lag *ldev); void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev); #else -void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) {} -void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) {} +static inline void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) {} +static inline void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) {} #endif #endif /* __MLX5_LAG_MPESW_H__ */ -- cgit v1.2.3 From b885aab3d39d1c81709e957324c7fb9aeac02c38 Mon Sep 17 00:00:00 2001 From: Michael Trimarchi Date: Wed, 18 May 2022 08:20:07 +0200 Subject: net: fec: Avoid allocating rx buffer using ATOMIC in ndo_open Make ndo_open less sensitive to memory pressure. Signed-off-by: Michael Trimarchi Reviewed-by: Jakub Kicinski Link: https://lore.kernel.org/r/20220518062007.10056-1-michael@amarulasolutions.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/freescale/fec_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 6e52f3ad182f..3cb298f1d69c 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3076,7 +3076,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) rxq = fep->rx_queue[queue]; bdp = rxq->bd.base; for (i = 0; i < rxq->bd.ring_size; i++) { - skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); + skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL); if (!skb) goto err_alloc; -- cgit v1.2.3 From e43d940f480b69ab98dd5d14e9fefdef8851db1a Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Wed, 18 May 2022 09:50:55 +0200 Subject: nfp: flower: support ct merging when mangle action exists Current implementation of ct merging doesn't support the case that the fields mangling in pre_ct rules are matched in post_ct rules. This change is to support merging when mangling mac address, ip address, tos, ttl and l4 port. VLAN and MPLS mangling is not involved yet. Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Simon Horman Link: https://lore.kernel.org/r/20220518075055.130649-1-simon.horman@corigine.com Signed-off-by: Paolo Abeni --- .../net/ethernet/netronome/nfp/flower/conntrack.c | 243 +++++++++++++-------- drivers/net/ethernet/netronome/nfp/flower/match.c | 51 +++-- 2 files changed, 189 insertions(+), 105 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 1edcd9f86c9c..443a5d6eb57b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -76,12 +76,119 @@ bool is_post_ct_flow(struct flow_cls_offload *flow) return false; } +/** + * get_mangled_key() - Mangle the key if mangle act exists + * @rule: rule that carries the actions + * @buf: pointer to key to be mangled + * @offset: used to adjust mangled offset in L2/L3/L4 header + * @key_sz: key size + * @htype: mangling type + * + * Returns buf where the mangled key stores. + */ +static void *get_mangled_key(struct flow_rule *rule, void *buf, + u32 offset, size_t key_sz, + enum flow_action_mangle_base htype) +{ + struct flow_action_entry *act; + u32 *val = (u32 *)buf; + u32 off, msk, key; + int i; + + flow_action_for_each(i, act, &rule->action) { + if (act->id == FLOW_ACTION_MANGLE && + act->mangle.htype == htype) { + off = act->mangle.offset - offset; + msk = act->mangle.mask; + key = act->mangle.val; + + /* Mangling is supposed to be u32 aligned */ + if (off % 4 || off >= key_sz) + continue; + + val[off >> 2] &= msk; + val[off >> 2] |= key; + } + } + + return buf; +} + +/* Only tos and ttl are involved in flow_match_ip structure, which + * doesn't conform to the layout of ip/ipv6 header definition. So + * they need particular process here: fill them into the ip/ipv6 + * header, so that mangling actions can work directly. + */ +#define NFP_IPV4_TOS_MASK GENMASK(23, 16) +#define NFP_IPV4_TTL_MASK GENMASK(31, 24) +#define NFP_IPV6_TCLASS_MASK GENMASK(27, 20) +#define NFP_IPV6_HLIMIT_MASK GENMASK(7, 0) +static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf, + bool is_v6) +{ + struct flow_match_ip match; + /* IPv4's ttl field is in third dword. */ + __be32 ip_hdr[3]; + u32 tmp, hdr_len; + + flow_rule_match_ip(rule, &match); + + if (is_v6) { + tmp = FIELD_PREP(NFP_IPV6_TCLASS_MASK, match.key->tos); + ip_hdr[0] = cpu_to_be32(tmp); + tmp = FIELD_PREP(NFP_IPV6_HLIMIT_MASK, match.key->ttl); + ip_hdr[1] = cpu_to_be32(tmp); + hdr_len = 2 * sizeof(__be32); + } else { + tmp = FIELD_PREP(NFP_IPV4_TOS_MASK, match.key->tos); + ip_hdr[0] = cpu_to_be32(tmp); + tmp = FIELD_PREP(NFP_IPV4_TTL_MASK, match.key->ttl); + ip_hdr[2] = cpu_to_be32(tmp); + hdr_len = 3 * sizeof(__be32); + } + + get_mangled_key(rule, ip_hdr, 0, hdr_len, + is_v6 ? FLOW_ACT_MANGLE_HDR_TYPE_IP6 : + FLOW_ACT_MANGLE_HDR_TYPE_IP4); + + match.key = buf; + + if (is_v6) { + tmp = be32_to_cpu(ip_hdr[0]); + match.key->tos = FIELD_GET(NFP_IPV6_TCLASS_MASK, tmp); + tmp = be32_to_cpu(ip_hdr[1]); + match.key->ttl = FIELD_GET(NFP_IPV6_HLIMIT_MASK, tmp); + } else { + tmp = be32_to_cpu(ip_hdr[0]); + match.key->tos = FIELD_GET(NFP_IPV4_TOS_MASK, tmp); + tmp = be32_to_cpu(ip_hdr[2]); + match.key->ttl = FIELD_GET(NFP_IPV4_TTL_MASK, tmp); + } + + return buf; +} + +/* Note entry1 and entry2 are not swappable, entry1 should be + * the former flow whose mangle action need be taken into account + * if existed, and entry2 should be the latter flow whose action + * we don't care. + */ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, struct nfp_fl_ct_flow_entry *entry2) { unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys & entry2->rule->match.dissector->used_keys; - bool out; + bool out, is_v6 = false; + u8 ip_proto = 0; + /* Temporary buffer for mangling keys, 64 is enough to cover max + * struct size of key in various fields that may be mangled. + * Supported fileds to mangle: + * mac_src/mac_dst(struct flow_match_eth_addrs, 12B) + * nw_tos/nw_ttl(struct flow_match_ip, 2B) + * nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B) + * tp_src/tp_dst(struct flow_match_ports, 4B) + */ + char buf[64]; if (entry1->netdev && entry2->netdev && entry1->netdev != entry2->netdev) @@ -105,6 +212,14 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, flow_rule_match_basic(entry1->rule, &match1); flow_rule_match_basic(entry2->rule, &match2); + + /* n_proto field is a must in ct-related flows, + * it should be either ipv4 or ipv6. + */ + is_v6 = match1.key->n_proto == htons(ETH_P_IPV6); + /* ip_proto field is a must when port field is cared */ + ip_proto = match1.key->ip_proto; + COMPARE_UNMASKED_FIELDS(match1, match2, &out); if (out) goto check_failed; @@ -115,6 +230,13 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, flow_rule_match_ipv4_addrs(entry1->rule, &match1); flow_rule_match_ipv4_addrs(entry2->rule, &match2); + + memcpy(buf, match1.key, sizeof(*match1.key)); + match1.key = get_mangled_key(entry1->rule, buf, + offsetof(struct iphdr, saddr), + sizeof(*match1.key), + FLOW_ACT_MANGLE_HDR_TYPE_IP4); + COMPARE_UNMASKED_FIELDS(match1, match2, &out); if (out) goto check_failed; @@ -125,16 +247,34 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, flow_rule_match_ipv6_addrs(entry1->rule, &match1); flow_rule_match_ipv6_addrs(entry2->rule, &match2); + + memcpy(buf, match1.key, sizeof(*match1.key)); + match1.key = get_mangled_key(entry1->rule, buf, + offsetof(struct ipv6hdr, saddr), + sizeof(*match1.key), + FLOW_ACT_MANGLE_HDR_TYPE_IP6); + COMPARE_UNMASKED_FIELDS(match1, match2, &out); if (out) goto check_failed; } if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) { + enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC; struct flow_match_ports match1, match2; flow_rule_match_ports(entry1->rule, &match1); flow_rule_match_ports(entry2->rule, &match2); + + if (ip_proto == IPPROTO_UDP) + htype = FLOW_ACT_MANGLE_HDR_TYPE_UDP; + else if (ip_proto == IPPROTO_TCP) + htype = FLOW_ACT_MANGLE_HDR_TYPE_TCP; + + memcpy(buf, match1.key, sizeof(*match1.key)); + match1.key = get_mangled_key(entry1->rule, buf, 0, + sizeof(*match1.key), htype); + COMPARE_UNMASKED_FIELDS(match1, match2, &out); if (out) goto check_failed; @@ -145,6 +285,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, flow_rule_match_eth_addrs(entry1->rule, &match1); flow_rule_match_eth_addrs(entry2->rule, &match2); + + memcpy(buf, match1.key, sizeof(*match1.key)); + match1.key = get_mangled_key(entry1->rule, buf, 0, + sizeof(*match1.key), + FLOW_ACT_MANGLE_HDR_TYPE_ETH); + COMPARE_UNMASKED_FIELDS(match1, match2, &out); if (out) goto check_failed; @@ -185,6 +331,8 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1, flow_rule_match_ip(entry1->rule, &match1); flow_rule_match_ip(entry2->rule, &match2); + + match1.key = get_mangled_tos_ttl(entry1->rule, buf, is_v6); COMPARE_UNMASKED_FIELDS(match1, match2, &out); if (out) goto check_failed; @@ -256,98 +404,16 @@ check_failed: return -EINVAL; } -static int nfp_ct_check_mangle_merge(struct flow_action_entry *a_in, - struct flow_rule *rule) -{ - enum flow_action_mangle_base htype = a_in->mangle.htype; - u32 offset = a_in->mangle.offset; - - switch (htype) { - case FLOW_ACT_MANGLE_HDR_TYPE_ETH: - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) - return -EOPNOTSUPP; - break; - case FLOW_ACT_MANGLE_HDR_TYPE_IP4: - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { - struct flow_match_ip match; - - flow_rule_match_ip(rule, &match); - if (offset == offsetof(struct iphdr, ttl) && - match.mask->ttl) - return -EOPNOTSUPP; - if (offset == round_down(offsetof(struct iphdr, tos), 4) && - match.mask->tos) - return -EOPNOTSUPP; - } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - struct flow_match_ipv4_addrs match; - - flow_rule_match_ipv4_addrs(rule, &match); - if (offset == offsetof(struct iphdr, saddr) && - match.mask->src) - return -EOPNOTSUPP; - if (offset == offsetof(struct iphdr, daddr) && - match.mask->dst) - return -EOPNOTSUPP; - } - break; - case FLOW_ACT_MANGLE_HDR_TYPE_IP6: - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { - struct flow_match_ip match; - - flow_rule_match_ip(rule, &match); - if (offset == round_down(offsetof(struct ipv6hdr, hop_limit), 4) && - match.mask->ttl) - return -EOPNOTSUPP; - /* for ipv6, tos and flow_lbl are in the same word */ - if (offset == round_down(offsetof(struct ipv6hdr, flow_lbl), 4) && - match.mask->tos) - return -EOPNOTSUPP; - } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - struct flow_match_ipv6_addrs match; - - flow_rule_match_ipv6_addrs(rule, &match); - if (offset >= offsetof(struct ipv6hdr, saddr) && - offset < offsetof(struct ipv6hdr, daddr) && - memchr_inv(&match.mask->src, 0, sizeof(match.mask->src))) - return -EOPNOTSUPP; - if (offset >= offsetof(struct ipv6hdr, daddr) && - offset < sizeof(struct ipv6hdr) && - memchr_inv(&match.mask->dst, 0, sizeof(match.mask->dst))) - return -EOPNOTSUPP; - } - break; - case FLOW_ACT_MANGLE_HDR_TYPE_TCP: - case FLOW_ACT_MANGLE_HDR_TYPE_UDP: - /* currently only can modify ports */ - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) - return -EOPNOTSUPP; - break; - default: - break; - } - return 0; -} - static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry, struct nfp_fl_ct_flow_entry *post_ct_entry, struct nfp_fl_ct_flow_entry *nft_entry) { struct flow_action_entry *act; - int err, i; + int i; /* Check for pre_ct->action conflicts */ flow_action_for_each(i, act, &pre_ct_entry->rule->action) { switch (act->id) { - case FLOW_ACTION_MANGLE: - err = nfp_ct_check_mangle_merge(act, nft_entry->rule); - if (err) - return err; - err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule); - if (err) - return err; - break; case FLOW_ACTION_VLAN_PUSH: case FLOW_ACTION_VLAN_POP: case FLOW_ACTION_VLAN_MANGLE: @@ -363,11 +429,6 @@ static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry, /* Check for nft->action conflicts */ flow_action_for_each(i, act, &nft_entry->rule->action) { switch (act->id) { - case FLOW_ACTION_MANGLE: - err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule); - if (err) - return err; - break; case FLOW_ACTION_VLAN_PUSH: case FLOW_ACTION_VLAN_POP: case FLOW_ACTION_VLAN_MANGLE: @@ -924,7 +985,7 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt, err = nfp_ct_merge_check(pre_ct_entry, nft_entry); if (err) return err; - err = nfp_ct_merge_check(post_ct_entry, nft_entry); + err = nfp_ct_merge_check(nft_entry, post_ct_entry); if (err) return err; err = nfp_ct_check_meta(post_ct_entry, nft_entry); @@ -1009,7 +1070,7 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt, if (post_ct_entry->chain_index != pre_ct_entry->chain_index) return -EINVAL; - err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry); + err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry); if (err) return err; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 9d86eea4dc16..193a167a6762 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -98,16 +98,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; + u8 tmp; int i; flow_rule_match_eth_addrs(rule, &match); /* Populate mac frame. */ for (i = 0; i < ETH_ALEN; i++) { - ext->mac_dst[i] |= match.key->dst[i] & - match.mask->dst[i]; + tmp = match.key->dst[i] & match.mask->dst[i]; + ext->mac_dst[i] |= tmp & (~msk->mac_dst[i]); msk->mac_dst[i] |= match.mask->dst[i]; - ext->mac_src[i] |= match.key->src[i] & - match.mask->src[i]; + + tmp = match.key->src[i] & match.mask->src[i]; + ext->mac_src[i] |= tmp & (~msk->mac_src[i]); msk->mac_src[i] |= match.mask->src[i]; } } @@ -189,11 +191,16 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; + __be16 tmp; flow_rule_match_ports(rule, &match); - ext->port_src |= match.key->src & match.mask->src; - ext->port_dst |= match.key->dst & match.mask->dst; + + tmp = match.key->src & match.mask->src; + ext->port_src |= tmp & (~msk->port_src); msk->port_src |= match.mask->src; + + tmp = match.key->dst & match.mask->dst; + ext->port_dst |= tmp & (~msk->port_dst); msk->port_dst |= match.mask->dst; } } @@ -212,11 +219,16 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { struct flow_match_ip match; + u8 tmp; flow_rule_match_ip(rule, &match); - ext->tos |= match.key->tos & match.mask->tos; - ext->ttl |= match.key->ttl & match.mask->ttl; + + tmp = match.key->tos & match.mask->tos; + ext->tos |= tmp & (~msk->tos); msk->tos |= match.mask->tos; + + tmp = match.key->ttl & match.mask->ttl; + ext->ttl |= tmp & (~msk->ttl); msk->ttl |= match.mask->ttl; } @@ -325,11 +337,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { struct flow_match_ipv4_addrs match; + __be32 tmp; flow_rule_match_ipv4_addrs(rule, &match); - ext->ipv4_src |= match.key->src & match.mask->src; - ext->ipv4_dst |= match.key->dst & match.mask->dst; + + tmp = match.key->src & match.mask->src; + ext->ipv4_src |= tmp & (~msk->ipv4_src); msk->ipv4_src |= match.mask->src; + + tmp = match.key->dst & match.mask->dst; + ext->ipv4_dst |= tmp & (~msk->ipv4_dst); msk->ipv4_dst |= match.mask->dst; } @@ -342,15 +359,21 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match; + u8 tmp; int i; flow_rule_match_ipv6_addrs(rule, &match); for (i = 0; i < sizeof(ext->ipv6_src); i++) { - ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] & - match.mask->src.s6_addr[i]; - ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] & - match.mask->dst.s6_addr[i]; + tmp = match.key->src.s6_addr[i] & + match.mask->src.s6_addr[i]; + ext->ipv6_src.s6_addr[i] |= tmp & + (~msk->ipv6_src.s6_addr[i]); msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i]; + + tmp = match.key->dst.s6_addr[i] & + match.mask->dst.s6_addr[i]; + ext->ipv6_dst.s6_addr[i] |= tmp & + (~msk->ipv6_dst.s6_addr[i]); msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i]; } } -- cgit v1.2.3 From caf6b7f81e053dfdc5f16e943b355bc954e0de34 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 16 May 2022 17:23:45 -0700 Subject: can: can-dev: move to netif_napi_add_weight() We want to remove the weight argument from the basic version of the netif_napi_add() call. Move all the callers in drivers/net/can that pass a custom weight (i.e. not NAPI_POLL_WEIGHT or 64) to the netif_napi_add_weight() API. Link: https://lore.kernel.org/all/20220517002345.1812104-1-kuba@kernel.org Signed-off-by: Jakub Kicinski Signed-off-by: Marc Kleine-Budde --- drivers/net/can/at91_can.c | 2 +- drivers/net/can/c_can/c_can_main.c | 3 ++- drivers/net/can/dev/rx-offload.c | 3 ++- drivers/net/can/grcan.c | 2 +- drivers/net/can/janz-ican3.c | 2 +- drivers/net/can/mscan/mscan.c | 2 +- drivers/net/can/pch_can.c | 2 +- drivers/net/can/rcar/rcar_can.c | 4 ++-- drivers/net/can/rcar/rcar_canfd.c | 4 ++-- drivers/net/can/xilinx_can.c | 2 +- 10 files changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index a00655ccda02..953aef8d3978 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1317,7 +1317,7 @@ static int at91_can_probe(struct platform_device *pdev) priv->pdata = dev_get_platdata(&pdev->dev); priv->mb0_id = 0x7ff; - netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv)); + netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv)); if (at91_is_sam9263(priv)) dev->sysfs_groups[0] = &at91_sysfs_attr_group; diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index faa217f26771..2034dbe30958 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -1246,7 +1246,8 @@ struct net_device *alloc_c_can_dev(int msg_obj_num) priv->tx.tail = 0; priv->tx.obj_num = msg_obj_tx_num; - netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num); + netif_napi_add_weight(dev, &priv->napi, c_can_poll, + priv->msg_obj_rx_num); priv->dev = dev; priv->can.bittiming_const = &c_can_bittiming_const; diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c index 6d0dc18c03e7..509d21170c8c 100644 --- a/drivers/net/can/dev/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -337,7 +337,8 @@ static int can_rx_offload_init_queue(struct net_device *dev, skb_queue_head_init(&offload->skb_queue); __skb_queue_head_init(&offload->skb_irq_queue); - netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); + netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll, + weight); dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", __func__, offload->skb_queue_len_max); diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 5215bd9b2c80..76df4807d366 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1609,7 +1609,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev, timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0); } - netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT); + netif_napi_add_weight(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT); SET_NETDEV_DEV(dev, &ofdev->dev); dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n", diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 808c105cf8f7..35bfb82d6929 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1910,7 +1910,7 @@ static int ican3_probe(struct platform_device *pdev) mod = netdev_priv(ndev); mod->ndev = ndev; mod->num = pdata->modno; - netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); + netif_napi_add_weight(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); skb_queue_head_init(&mod->echoq); spin_lock_init(&mod->lock); init_completion(&mod->termination_comp); diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 5b5802fac772..78a21ab63601 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -679,7 +679,7 @@ struct net_device *alloc_mscandev(void) dev->flags |= IFF_ECHO; /* we support local echo */ - netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8); + netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8); priv->can.bittiming_const = &mscan_bittiming_const; priv->can.do_set_bittiming = mscan_do_set_bittiming; diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 888bef03de09..fde3ac516d26 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -1189,7 +1189,7 @@ static int pch_can_probe(struct pci_dev *pdev, ndev->netdev_ops = &pch_can_netdev_ops; priv->can.clock.freq = PCH_CAN_CLK; /* Hz */ - netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END); + netif_napi_add_weight(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END); rc = pci_enable_msi(priv->dev); if (rc) { diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 33e37395379d..103f1a92eee2 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -803,8 +803,8 @@ static int rcar_can_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); - netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll, - RCAR_CAN_NAPI_WEIGHT); + netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll, + RCAR_CAN_NAPI_WEIGHT); err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed, error %d\n", diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 1e121e04208c..f75ebaa35519 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1789,8 +1789,8 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, priv->gpriv = gpriv; SET_NETDEV_DEV(ndev, &pdev->dev); - netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll, - RCANFD_NAPI_WEIGHT); + netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll, + RCANFD_NAPI_WEIGHT); spin_lock_init(&priv->tx_lock); devm_can_led_init(ndev); gpriv->ch[priv->channel] = priv; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 43f0c6a064ba..7f730b471e9f 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1804,7 +1804,7 @@ static int xcan_probe(struct platform_device *pdev) priv->can.clock.freq = clk_get_rate(priv->can_clk); - netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max); + netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max); ret = register_candev(ndev); if (ret) { -- cgit v1.2.3 From 6c1e423a3c84953edcf91ff03ab97829b287184a Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Wed, 18 May 2022 17:45:27 +0200 Subject: can: can-dev: remove obsolete CAN LED support Since commit 30f3b42147ba6f ("can: mark led trigger as broken") the CAN specific LED support was disabled and marked as BROKEN. As the common LED support with CONFIG_LEDS_TRIGGER_NETDEV should do this work now the code can be removed as preparation for a CAN netdevice Kconfig rework. Link: https://lore.kernel.org/all/20220518154527.29046-1-socketcan@hartkopp.net Suggested-by: Vincent Mailhol Signed-off-by: Oliver Hartkopp [mkl: remove led.h from MAINTAINERS] Signed-off-by: Marc Kleine-Budde --- MAINTAINERS | 1 - drivers/net/can/Kconfig | 17 ---- drivers/net/can/at91_can.c | 10 --- drivers/net/can/c_can/c_can_main.c | 16 +--- drivers/net/can/ctucanfd/ctucanfd_base.c | 10 --- drivers/net/can/dev/Makefile | 2 - drivers/net/can/dev/dev.c | 5 -- drivers/net/can/dev/rx-offload.c | 2 - drivers/net/can/flexcan/flexcan-core.c | 7 -- drivers/net/can/ifi_canfd/ifi_canfd.c | 9 -- drivers/net/can/led.c | 140 ------------------------------- drivers/net/can/m_can/m_can.c | 11 --- drivers/net/can/m_can/m_can.h | 1 - drivers/net/can/rcar/rcar_can.c | 8 -- drivers/net/can/rcar/rcar_canfd.c | 7 -- drivers/net/can/sja1000/sja1000.c | 11 --- drivers/net/can/spi/hi311x.c | 8 -- drivers/net/can/spi/mcp251x.c | 10 --- drivers/net/can/sun4i_can.c | 7 -- drivers/net/can/ti_hecc.c | 8 -- drivers/net/can/usb/mcba_usb.c | 8 -- drivers/net/can/usb/usb_8dev.c | 11 --- drivers/net/can/xilinx_can.c | 10 +-- include/linux/can/dev.h | 10 --- include/linux/can/led.h | 51 ----------- 25 files changed, 2 insertions(+), 378 deletions(-) delete mode 100644 drivers/net/can/led.c delete mode 100644 include/linux/can/led.h (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index 3a29bf2452f4..234380c959c8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4376,7 +4376,6 @@ F: drivers/net/can/ F: drivers/phy/phy-can-transceiver.c F: include/linux/can/bittiming.h F: include/linux/can/dev.h -F: include/linux/can/led.h F: include/linux/can/length.h F: include/linux/can/platform/ F: include/linux/can/rx-offload.h diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index ac760fd39282..b2dcc1e5a388 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -71,23 +71,6 @@ config CAN_CALC_BITTIMING arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw". If unsure, say Y. -config CAN_LEDS - bool "Enable LED triggers for Netlink based drivers" - depends on LEDS_CLASS - # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do - # everything that this driver is doing. This is marked as broken - # because it uses stuff that is intended to be changed or removed. - # Please consider switching to the netdev trigger and confirm it - # fulfills your needs instead of fixing this driver. - depends on BROKEN - select LEDS_TRIGGERS - help - This option adds two LED triggers for packet receive and transmit - events on each supported CAN device. - - Say Y here if you are working on a system with led-class supported - LEDs and you want to use them as canbus activity indicators. - config CAN_AT91 tristate "Atmel AT91 onchip CAN controller" depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 953aef8d3978..29ed0d3cd171 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -23,7 +23,6 @@ #include #include -#include #define AT91_MB_MASK(i) ((1 << (i)) - 1) @@ -618,8 +617,6 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) stats->rx_bytes += cf->len; netif_receive_skb(skb); - - can_led_event(dev, CAN_LED_EVENT_RX); } /** @@ -854,7 +851,6 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) mb - get_mb_tx_first(priv), NULL); dev->stats.tx_packets++; - can_led_event(dev, CAN_LED_EVENT_TX); } } @@ -1101,8 +1097,6 @@ static int at91_open(struct net_device *dev) goto out_close; } - can_led_event(dev, CAN_LED_EVENT_OPEN); - /* start chip and queuing */ at91_chip_start(dev); napi_enable(&priv->napi); @@ -1133,8 +1127,6 @@ static int at91_close(struct net_device *dev) close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); - return 0; } @@ -1331,8 +1323,6 @@ static int at91_can_probe(struct platform_device *pdev) goto exit_free; } - devm_can_led_init(dev); - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->reg_base, dev->irq); diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index 2034dbe30958..a7362af0babb 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -40,7 +40,6 @@ #include #include #include -#include #include "c_can.h" @@ -759,7 +758,6 @@ static void c_can_do_tx(struct net_device *dev) stats->tx_bytes += bytes; stats->tx_packets += pkts; - can_led_event(dev, CAN_LED_EVENT_TX); tail = c_can_get_tx_tail(tx_ring); @@ -906,9 +904,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota) quota -= n; } - if (pkts) - can_led_event(dev, CAN_LED_EVENT_RX); - return pkts; } @@ -1182,8 +1177,6 @@ static int c_can_open(struct net_device *dev) if (err) goto exit_start_fail; - can_led_event(dev, CAN_LED_EVENT_OPEN); - napi_enable(&priv->napi); /* enable status change, error and module interrupts */ c_can_irq_control(priv, true); @@ -1214,8 +1207,6 @@ static int c_can_close(struct net_device *dev) c_can_reset_ram(priv, false); c_can_pm_runtime_put_sync(priv); - can_led_event(dev, CAN_LED_EVENT_STOP); - return 0; } @@ -1365,8 +1356,6 @@ static const struct net_device_ops c_can_netdev_ops = { int register_c_can_dev(struct net_device *dev) { - int err; - /* Deactivate pins to prevent DRA7 DCAN IP from being * stuck in transition when module is disabled. * Pins are activated in c_can_start() and deactivated @@ -1378,10 +1367,7 @@ int register_c_can_dev(struct net_device *dev) dev->netdev_ops = &c_can_netdev_ops; c_can_set_ethtool_ops(dev); - err = register_candev(dev); - if (!err) - devm_can_led_init(dev); - return err; + return register_candev(dev); } EXPORT_SYMBOL_GPL(register_c_can_dev); diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c index 2ada097d1ede..64990bf20fdc 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_base.c +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include "ctucanfd.h" @@ -957,9 +956,6 @@ static int ctucan_rx_poll(struct napi_struct *napi, int quota) ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO); } - if (work_done) - can_led_event(ndev, CAN_LED_EVENT_RX); - if (!framecnt && res != 0) { if (napi_complete_done(napi, work_done)) { /* Clear and enable RBNEI. It is level-triggered, so @@ -1079,8 +1075,6 @@ clear: } } while (some_buffers_processed); - can_led_event(ndev, CAN_LED_EVENT_TX); - spin_lock_irqsave(&priv->tx_lock, flags); /* Check if at least one TX buffer is free */ @@ -1236,7 +1230,6 @@ static int ctucan_open(struct net_device *ndev) } netdev_info(ndev, "ctu_can_fd device registered\n"); - can_led_event(ndev, CAN_LED_EVENT_OPEN); napi_enable(&priv->napi); netif_start_queue(ndev); @@ -1269,7 +1262,6 @@ static int ctucan_close(struct net_device *ndev) free_irq(ndev->irq, ndev); close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); pm_runtime_put(priv->dev); return 0; @@ -1434,8 +1426,6 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne goto err_deviceoff; } - devm_can_led_init(ndev); - pm_runtime_put(dev); netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n", diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile index 3e2e207861fc..af2901db473c 100644 --- a/drivers/net/can/dev/Makefile +++ b/drivers/net/can/dev/Makefile @@ -7,5 +7,3 @@ can-dev-y += length.o can-dev-y += netlink.o can-dev-y += rx-offload.o can-dev-y += skb.o - -can-dev-$(CONFIG_CAN_LEDS) += led.o diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index e7ab45f1c43b..96c9d9db00cf 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -512,8 +511,6 @@ static __init int can_dev_init(void) { int err; - can_led_notifier_init(); - err = can_netlink_register(); if (!err) pr_info(MOD_DESC "\n"); @@ -525,8 +522,6 @@ module_init(can_dev_init); static __exit void can_dev_exit(void) { can_netlink_unregister(); - - can_led_notifier_exit(); } module_exit(can_dev_exit); diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c index 509d21170c8c..a32a01c172d4 100644 --- a/drivers/net/can/dev/rx-offload.c +++ b/drivers/net/can/dev/rx-offload.c @@ -70,8 +70,6 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) napi_reschedule(&offload->napi); } - can_led_event(offload->dev, CAN_LED_EVENT_RX); - return work_done; } diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index fe9bda0f5ec4..d060088047f1 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -1081,7 +1080,6 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) can_rx_offload_get_echo_skb(&priv->offload, 0, reg_ctrl << 16, NULL); stats->tx_packets++; - can_led_event(dev, CAN_LED_EVENT_TX); /* after sending a RTR frame MB is in RX mode */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, @@ -1738,8 +1736,6 @@ static int flexcan_open(struct net_device *dev) flexcan_chip_interrupts_enable(dev); - can_led_event(dev, CAN_LED_EVENT_OPEN); - netif_start_queue(dev); return 0; @@ -1785,8 +1781,6 @@ static int flexcan_close(struct net_device *dev) pm_runtime_put(priv->dev); - can_led_event(dev, CAN_LED_EVENT_STOP); - return 0; } @@ -2189,7 +2183,6 @@ static int flexcan_probe(struct platform_device *pdev) } of_can_transceiver(dev); - devm_can_led_init(dev); return 0; diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index b0a3473f211d..968ed6d7316b 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -345,9 +345,6 @@ static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota) rxst = readl(priv->base + IFI_CANFD_RXSTCMD); } - if (pkts) - can_led_event(ndev, CAN_LED_EVENT_RX); - return pkts; } @@ -626,7 +623,6 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) { stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL); stats->tx_packets++; - can_led_event(ndev, CAN_LED_EVENT_TX); } if (isr & tx_irq_mask) @@ -830,7 +826,6 @@ static int ifi_canfd_open(struct net_device *ndev) ifi_canfd_start(ndev); - can_led_event(ndev, CAN_LED_EVENT_OPEN); napi_enable(&priv->napi); netif_start_queue(ndev); @@ -853,8 +848,6 @@ static int ifi_canfd_close(struct net_device *ndev) close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); - return 0; } @@ -1004,8 +997,6 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) goto err_reg; } - devm_can_led_init(ndev); - dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n", priv->base, ndev->irq, priv->can.clock.freq); diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c deleted file mode 100644 index db14897f8e16..000000000000 --- a/drivers/net/can/led.c +++ /dev/null @@ -1,140 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2012, Fabio Baltieri - * Copyright 2012, Kurt Van Dijck - */ - -#include -#include -#include -#include -#include -#include - -#include - -static unsigned long led_delay = 50; -module_param(led_delay, ulong, 0644); -MODULE_PARM_DESC(led_delay, - "blink delay time for activity leds (msecs, default: 50)."); - -/* Trigger a LED event in response to a CAN device event */ -void can_led_event(struct net_device *netdev, enum can_led_event event) -{ - struct can_priv *priv = netdev_priv(netdev); - - switch (event) { - case CAN_LED_EVENT_OPEN: - led_trigger_event(priv->tx_led_trig, LED_FULL); - led_trigger_event(priv->rx_led_trig, LED_FULL); - led_trigger_event(priv->rxtx_led_trig, LED_FULL); - break; - case CAN_LED_EVENT_STOP: - led_trigger_event(priv->tx_led_trig, LED_OFF); - led_trigger_event(priv->rx_led_trig, LED_OFF); - led_trigger_event(priv->rxtx_led_trig, LED_OFF); - break; - case CAN_LED_EVENT_TX: - if (led_delay) { - led_trigger_blink_oneshot(priv->tx_led_trig, - &led_delay, &led_delay, 1); - led_trigger_blink_oneshot(priv->rxtx_led_trig, - &led_delay, &led_delay, 1); - } - break; - case CAN_LED_EVENT_RX: - if (led_delay) { - led_trigger_blink_oneshot(priv->rx_led_trig, - &led_delay, &led_delay, 1); - led_trigger_blink_oneshot(priv->rxtx_led_trig, - &led_delay, &led_delay, 1); - } - break; - } -} -EXPORT_SYMBOL_GPL(can_led_event); - -static void can_led_release(struct device *gendev, void *res) -{ - struct can_priv *priv = netdev_priv(to_net_dev(gendev)); - - led_trigger_unregister_simple(priv->tx_led_trig); - led_trigger_unregister_simple(priv->rx_led_trig); - led_trigger_unregister_simple(priv->rxtx_led_trig); -} - -/* Register CAN LED triggers for a CAN device - * - * This is normally called from a driver's probe function - */ -void devm_can_led_init(struct net_device *netdev) -{ - struct can_priv *priv = netdev_priv(netdev); - void *res; - - res = devres_alloc(can_led_release, 0, GFP_KERNEL); - if (!res) { - netdev_err(netdev, "cannot register LED triggers\n"); - return; - } - - snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name), - "%s-tx", netdev->name); - snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name), - "%s-rx", netdev->name); - snprintf(priv->rxtx_led_trig_name, sizeof(priv->rxtx_led_trig_name), - "%s-rxtx", netdev->name); - - led_trigger_register_simple(priv->tx_led_trig_name, - &priv->tx_led_trig); - led_trigger_register_simple(priv->rx_led_trig_name, - &priv->rx_led_trig); - led_trigger_register_simple(priv->rxtx_led_trig_name, - &priv->rxtx_led_trig); - - devres_add(&netdev->dev, res); -} -EXPORT_SYMBOL_GPL(devm_can_led_init); - -/* NETDEV rename notifier to rename the associated led triggers too */ -static int can_led_notifier(struct notifier_block *nb, unsigned long msg, - void *ptr) -{ - struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - struct can_priv *priv = safe_candev_priv(netdev); - char name[CAN_LED_NAME_SZ]; - - if (!priv) - return NOTIFY_DONE; - - if (!priv->tx_led_trig || !priv->rx_led_trig || !priv->rxtx_led_trig) - return NOTIFY_DONE; - - if (msg == NETDEV_CHANGENAME) { - snprintf(name, sizeof(name), "%s-tx", netdev->name); - led_trigger_rename_static(name, priv->tx_led_trig); - - snprintf(name, sizeof(name), "%s-rx", netdev->name); - led_trigger_rename_static(name, priv->rx_led_trig); - - snprintf(name, sizeof(name), "%s-rxtx", netdev->name); - led_trigger_rename_static(name, priv->rxtx_led_trig); - } - - return NOTIFY_DONE; -} - -/* notifier block for netdevice event */ -static struct notifier_block can_netdev_notifier __read_mostly = { - .notifier_call = can_led_notifier, -}; - -int __init can_led_notifier_init(void) -{ - return register_netdevice_notifier(&can_netdev_notifier); -} - -void __exit can_led_notifier_exit(void) -{ - unregister_netdevice_notifier(&can_netdev_notifier); -} diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 7582908e1192..5d0c82d8b9a9 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -565,9 +565,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) rxfs = m_can_read(cdev, M_CAN_RXF0S); } - if (pkts) - can_led_event(dev, CAN_LED_EVENT_RX); - return pkts; } @@ -1087,8 +1084,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) if (cdev->is_peripheral) timestamp = m_can_get_timestamp(cdev); m_can_tx_update_stats(cdev, 0, timestamp); - - can_led_event(dev, CAN_LED_EVENT_TX); netif_wake_queue(dev); } } else { @@ -1097,7 +1092,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id) if (m_can_echo_tx_event(dev) != 0) goto out_fail; - can_led_event(dev, CAN_LED_EVENT_TX); if (netif_queue_stopped(dev) && !m_can_tx_fifo_full(cdev)) netif_wake_queue(dev); @@ -1562,7 +1556,6 @@ static int m_can_close(struct net_device *dev) can_rx_offload_disable(&cdev->offload); close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); phy_power_off(cdev->transceiver); @@ -1806,8 +1799,6 @@ static int m_can_open(struct net_device *dev) /* start the m_can controller */ m_can_start(dev); - can_led_event(dev, CAN_LED_EVENT_OPEN); - if (!cdev->is_peripheral) napi_enable(&cdev->napi); @@ -1995,8 +1986,6 @@ int m_can_class_register(struct m_can_classdev *cdev) goto rx_offload_del; } - devm_can_led_init(cdev->net); - of_can_transceiver(cdev->net); dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n", diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index d18b515e6ccc..4c0267f9f297 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -7,7 +7,6 @@ #define _CAN_M_CAN_H_ #include -#include #include #include #include diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 103f1a92eee2..d45762f1cf6b 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -389,7 +388,6 @@ static void rcar_can_tx_done(struct net_device *ndev) /* Clear interrupt */ isr = readb(&priv->regs->isr); writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr); - can_led_event(ndev, CAN_LED_EVENT_TX); } static irqreturn_t rcar_can_interrupt(int irq, void *dev_id) @@ -531,7 +529,6 @@ static int rcar_can_open(struct net_device *ndev) ndev->irq, err); goto out_close; } - can_led_event(ndev, CAN_LED_EVENT_OPEN); rcar_can_start(ndev); netif_start_queue(ndev); return 0; @@ -581,7 +578,6 @@ static int rcar_can_close(struct net_device *ndev) clk_disable_unprepare(priv->can_clk); clk_disable_unprepare(priv->clk); close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); return 0; } @@ -666,8 +662,6 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv) } stats->rx_packets++; - can_led_event(priv->ndev, CAN_LED_EVENT_RX); - netif_receive_skb(skb); } @@ -812,8 +806,6 @@ static int rcar_can_probe(struct platform_device *pdev) goto fail_candev; } - devm_can_led_init(ndev); - dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq); return 0; diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index f75ebaa35519..40a11445d021 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -1128,7 +1127,6 @@ static void rcar_canfd_tx_done(struct net_device *ndev) /* Clear interrupt */ rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts & ~RCANFD_CFSTS_CFTXIF); - can_led_event(ndev, CAN_LED_EVENT_TX); } static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch) @@ -1419,7 +1417,6 @@ static int rcar_canfd_open(struct net_device *ndev) if (err) goto out_close; netif_start_queue(ndev); - can_led_event(ndev, CAN_LED_EVENT_OPEN); return 0; out_close: napi_disable(&priv->napi); @@ -1469,7 +1466,6 @@ static int rcar_canfd_close(struct net_device *ndev) napi_disable(&priv->napi); clk_disable_unprepare(gpriv->can_clk); close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); return 0; } @@ -1619,8 +1615,6 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) */ rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff); - can_led_event(priv->ndev, CAN_LED_EVENT_RX); - if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; stats->rx_packets++; @@ -1792,7 +1786,6 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll, RCANFD_NAPI_WEIGHT); spin_lock_init(&priv->tx_lock); - devm_can_led_init(ndev); gpriv->ch[priv->channel] = priv; err = register_candev(ndev); if (err) { diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 966316479485..2e7638f98cf1 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -60,7 +60,6 @@ #include #include -#include #include "sja1000.h" @@ -383,8 +382,6 @@ static void sja1000_rx(struct net_device *dev) sja1000_write_cmdreg(priv, CMD_RRB); netif_rx(skb); - - can_led_event(dev, CAN_LED_EVENT_RX); } static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) @@ -531,7 +528,6 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) stats->tx_packets++; } netif_wake_queue(dev); - can_led_event(dev, CAN_LED_EVENT_TX); } if (isrc & IRQ_RI) { /* receive interrupt */ @@ -587,8 +583,6 @@ static int sja1000_open(struct net_device *dev) /* init and start chi */ sja1000_start(dev); - can_led_event(dev, CAN_LED_EVENT_OPEN); - netif_start_queue(dev); return 0; @@ -606,8 +600,6 @@ static int sja1000_close(struct net_device *dev) close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); - return 0; } @@ -673,9 +665,6 @@ int register_sja1000dev(struct net_device *dev) ret = register_candev(dev); - if (!ret) - devm_can_led_init(dev); - return ret; } EXPORT_SYMBOL_GPL(register_sja1000dev); diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index a5b2952b8d0f..ebc4ebb44c98 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include @@ -354,8 +353,6 @@ static void hi3110_hw_rx(struct spi_device *spi) } priv->net->stats.rx_packets++; - can_led_event(priv->net, CAN_LED_EVENT_RX); - netif_rx(skb); } @@ -567,8 +564,6 @@ static int hi3110_stop(struct net_device *net) mutex_unlock(&priv->hi3110_lock); - can_led_event(net, CAN_LED_EVENT_STOP); - return 0; } @@ -725,7 +720,6 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) if (priv->tx_busy && statf & HI3110_STAT_TXMTY) { net->stats.tx_packets++; net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL); - can_led_event(net, CAN_LED_EVENT_TX); priv->tx_busy = false; netif_wake_queue(net); } @@ -783,7 +777,6 @@ static int hi3110_open(struct net_device *net) if (ret) goto out_free_wq; - can_led_event(net, CAN_LED_EVENT_OPEN); netif_wake_queue(net); mutex_unlock(&priv->hi3110_lock); @@ -931,7 +924,6 @@ static int hi3110_can_probe(struct spi_device *spi) if (ret) goto error_probe; - devm_can_led_init(net); netdev_info(net, "%x successfully initialized.\n", priv->model); return 0; diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index fc747bff5eeb..666a4505a55a 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -738,8 +737,6 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) } priv->net->stats.rx_packets++; - can_led_event(priv->net, CAN_LED_EVENT_RX); - netif_rx(skb); } @@ -973,8 +970,6 @@ static int mcp251x_stop(struct net_device *net) mutex_unlock(&priv->mcp_lock); - can_led_event(net, CAN_LED_EVENT_STOP); - return 0; } @@ -1177,7 +1172,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) break; if (intf & CANINTF_TX) { - can_led_event(net, CAN_LED_EVENT_TX); if (priv->tx_busy) { net->stats.tx_packets++; net->stats.tx_bytes += can_get_echo_skb(net, 0, @@ -1232,8 +1226,6 @@ static int mcp251x_open(struct net_device *net) if (ret) goto out_free_irq; - can_led_event(net, CAN_LED_EVENT_OPEN); - netif_wake_queue(net); mutex_unlock(&priv->mcp_lock); @@ -1403,8 +1395,6 @@ static int mcp251x_can_probe(struct spi_device *spi) if (ret) goto error_probe; - devm_can_led_init(net); - ret = mcp251x_gpio_setup(priv); if (ret) goto error_probe; diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 25d6d81ab4f4..155b90f6c767 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -516,8 +515,6 @@ static void sun4i_can_rx(struct net_device *dev) sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF); netif_rx(skb); - - can_led_event(dev, CAN_LED_EVENT_RX); } static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) @@ -664,7 +661,6 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id) stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; netif_wake_queue(dev); - can_led_event(dev, CAN_LED_EVENT_TX); } if ((isrc & SUN4I_INT_RBUF_VLD) && !(isrc & SUN4I_INT_DATA_OR)) { @@ -729,7 +725,6 @@ static int sun4ican_open(struct net_device *dev) goto exit_can_start; } - can_led_event(dev, CAN_LED_EVENT_OPEN); netif_start_queue(dev); return 0; @@ -756,7 +751,6 @@ static int sun4ican_close(struct net_device *dev) free_irq(dev->irq, dev); close_candev(dev); - can_led_event(dev, CAN_LED_EVENT_STOP); return 0; } @@ -883,7 +877,6 @@ static int sun4ican_probe(struct platform_device *pdev) DRV_NAME, err); goto exit_free; } - devm_can_led_init(dev); dev_info(&pdev->dev, "device registered (base=%p, irq=%d)\n", priv->base, dev->irq); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index bb3f2e3b004c..debe17bfd0f0 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -34,7 +34,6 @@ #include #include -#include #include #define DRV_NAME "ti_hecc" @@ -759,7 +758,6 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) can_rx_offload_get_echo_skb(&priv->offload, mbxno, stamp, NULL); stats->tx_packets++; - can_led_event(ndev, CAN_LED_EVENT_TX); --priv->tx_tail; } @@ -814,8 +812,6 @@ static int ti_hecc_open(struct net_device *ndev) return err; } - can_led_event(ndev, CAN_LED_EVENT_OPEN); - ti_hecc_start(ndev); can_rx_offload_enable(&priv->offload); netif_start_queue(ndev); @@ -834,8 +830,6 @@ static int ti_hecc_close(struct net_device *ndev) close_candev(ndev); ti_hecc_transceiver_switch(priv, 0); - can_led_event(ndev, CAN_LED_EVENT_STOP); - return 0; } @@ -954,8 +948,6 @@ static int ti_hecc_probe(struct platform_device *pdev) goto probe_exit_offload; } - devm_can_led_init(ndev); - dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", priv->base, (u32)ndev->irq); diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index c45a814e1de2..792ab9da317d 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -232,8 +231,6 @@ static void mcba_usb_write_bulk_callback(struct urb *urb) netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx, NULL); - - can_led_event(netdev, CAN_LED_EVENT_TX); } if (urb->status) @@ -452,7 +449,6 @@ static void mcba_usb_process_can(struct mcba_priv *priv, } stats->rx_packets++; - can_led_event(priv->netdev, CAN_LED_EVENT_RX); netif_rx(skb); } @@ -700,7 +696,6 @@ static int mcba_usb_open(struct net_device *netdev) priv->can_speed_check = true; priv->can.state = CAN_STATE_ERROR_ACTIVE; - can_led_event(netdev, CAN_LED_EVENT_OPEN); netif_start_queue(netdev); return 0; @@ -732,7 +727,6 @@ static int mcba_usb_close(struct net_device *netdev) mcba_urb_unlink(priv); close_candev(netdev); - can_led_event(netdev, CAN_LED_EVENT_STOP); return 0; } @@ -857,8 +851,6 @@ static int mcba_usb_probe(struct usb_interface *intf, priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress); priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress); - devm_can_led_init(netdev); - /* Start USB dev only if we have successfully registered CAN device */ err = mcba_usb_start(priv); if (err) { diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index b638604bf1ee..f3363575bf32 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -21,7 +21,6 @@ #include #include #include -#include /* driver constants */ #define MAX_RX_URBS 20 @@ -480,8 +479,6 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, stats->rx_packets++; netif_rx(skb); - - can_led_event(priv->netdev, CAN_LED_EVENT_RX); } else { netdev_warn(priv->netdev, "frame type %d unknown", msg->type); @@ -582,8 +579,6 @@ static void usb_8dev_write_bulk_callback(struct urb *urb) netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL); - can_led_event(netdev, CAN_LED_EVENT_TX); - /* Release context */ context->echo_index = MAX_TX_URBS; @@ -807,8 +802,6 @@ static int usb_8dev_open(struct net_device *netdev) if (err) return err; - can_led_event(netdev, CAN_LED_EVENT_OPEN); - /* finally start device */ err = usb_8dev_start(priv); if (err) { @@ -865,8 +858,6 @@ static int usb_8dev_close(struct net_device *netdev) close_candev(netdev); - can_led_event(netdev, CAN_LED_EVENT_STOP); - return err; } @@ -974,8 +965,6 @@ static int usb_8dev_probe(struct usb_interface *intf, (version>>8) & 0xff, version & 0xff); } - devm_can_led_init(netdev); - return 0; cleanup_unregister_candev: diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 7f730b471e9f..8a3b7b103ca4 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #define DRIVER_NAME "xilinx_can" @@ -1209,10 +1208,8 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) XCAN_IXR_RXNEMP_MASK); } - if (work_done) { - can_led_event(ndev, CAN_LED_EVENT_RX); + if (work_done) xcan_update_error_state_after_rxtx(ndev); - } if (work_done < quota) { if (napi_complete_done(napi, work_done)) { @@ -1298,7 +1295,6 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) spin_unlock_irqrestore(&priv->tx_lock, flags); - can_led_event(ndev, CAN_LED_EVENT_TX); xcan_update_error_state_after_rxtx(ndev); } @@ -1420,7 +1416,6 @@ static int xcan_open(struct net_device *ndev) goto err_candev; } - can_led_event(ndev, CAN_LED_EVENT_OPEN); napi_enable(&priv->napi); netif_start_queue(ndev); @@ -1452,7 +1447,6 @@ static int xcan_close(struct net_device *ndev) free_irq(ndev->irq, ndev); close_candev(ndev); - can_led_event(ndev, CAN_LED_EVENT_STOP); pm_runtime_put(priv->dev); return 0; @@ -1812,8 +1806,6 @@ static int xcan_probe(struct platform_device *pdev) goto err_disableclks; } - devm_can_led_init(ndev); - pm_runtime_put(&pdev->dev); if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index c2ea47f30046..e22dc03c850e 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -85,15 +84,6 @@ struct can_priv { int (*do_get_berr_counter)(const struct net_device *dev, struct can_berr_counter *bec); int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv); - -#ifdef CONFIG_CAN_LEDS - struct led_trigger *tx_led_trig; - char tx_led_trig_name[CAN_LED_NAME_SZ]; - struct led_trigger *rx_led_trig; - char rx_led_trig_name[CAN_LED_NAME_SZ]; - struct led_trigger *rxtx_led_trig; - char rxtx_led_trig_name[CAN_LED_NAME_SZ]; -#endif }; static inline bool can_tdc_is_enabled(const struct can_priv *priv) diff --git a/include/linux/can/led.h b/include/linux/can/led.h deleted file mode 100644 index 7c3cfd798c56..000000000000 --- a/include/linux/can/led.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2012, Fabio Baltieri - */ - -#ifndef _CAN_LED_H -#define _CAN_LED_H - -#include -#include -#include - -enum can_led_event { - CAN_LED_EVENT_OPEN, - CAN_LED_EVENT_STOP, - CAN_LED_EVENT_TX, - CAN_LED_EVENT_RX, -}; - -#ifdef CONFIG_CAN_LEDS - -/* keep space for interface name + "-tx"/"-rx"/"-rxtx" - * suffix and null terminator - */ -#define CAN_LED_NAME_SZ (IFNAMSIZ + 6) - -void can_led_event(struct net_device *netdev, enum can_led_event event); -void devm_can_led_init(struct net_device *netdev); -int __init can_led_notifier_init(void); -void __exit can_led_notifier_exit(void); - -#else - -static inline void can_led_event(struct net_device *netdev, - enum can_led_event event) -{ -} -static inline void devm_can_led_init(struct net_device *netdev) -{ -} -static inline int can_led_notifier_init(void) -{ - return 0; -} -static inline void can_led_notifier_exit(void) -{ -} - -#endif - -#endif /* !_CAN_LED_H */ -- cgit v1.2.3 From 1a6dd9996699889313327be03981716a8337656b Mon Sep 17 00:00:00 2001 From: Vincent Mailhol Date: Wed, 18 May 2022 20:43:57 +0900 Subject: can: mcp251xfd: silence clang's -Wunaligned-access warning clang emits a -Wunaligned-access warning on union mcp251xfd_tx_ojb_load_buf. The reason is that field hw_tx_obj (not declared as packed) is being packed right after a 16 bits field inside a packed struct: | union mcp251xfd_tx_obj_load_buf { | struct __packed { | struct mcp251xfd_buf_cmd cmd; | /* ^ 16 bits fields */ | struct mcp251xfd_hw_tx_obj_raw hw_tx_obj; | /* ^ not declared as packed */ | } nocrc; | struct __packed { | struct mcp251xfd_buf_cmd_crc cmd; | struct mcp251xfd_hw_tx_obj_raw hw_tx_obj; | __be16 crc; | } crc; | } ____cacheline_aligned; Starting from LLVM 14, having an unpacked struct nested in a packed struct triggers a warning. c.f. [1]. This is a false positive because the field is always being accessed with the relevant put_unaligned_*() function. Adding __packed to the structure declaration silences the warning. [1] https://github.com/llvm/llvm-project/issues/55520 Link: https://lore.kernel.org/all/20220518114357.55452-1-mailhol.vincent@wanadoo.fr Signed-off-by: Vincent Mailhol Reported-by: kernel test robot Tested-by: Nathan Chancellor # build Signed-off-by: Marc Kleine-Budde --- drivers/net/can/spi/mcp251xfd/mcp251xfd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index 1d43bccc29bf..2b0309fedfac 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -441,7 +441,7 @@ struct mcp251xfd_hw_tef_obj { /* The tx_obj_raw version is used in spi async, i.e. without * regmap. We have to take care of endianness ourselves. */ -struct mcp251xfd_hw_tx_obj_raw { +struct __packed mcp251xfd_hw_tx_obj_raw { __le32 id; __le32 flags; u8 data[sizeof_field(struct canfd_frame, data)]; -- cgit v1.2.3 From 5cebb40bc9554aafcc492431181f43c6231b0459 Mon Sep 17 00:00:00 2001 From: Harini Katakam Date: Wed, 18 May 2022 22:37:56 +0530 Subject: net: macb: Fix PTP one step sync support PTP one step sync packets cannot have CSUM padding and insertion in SW since time stamp is inserted on the fly by HW. In addition, ptp4l version 3.0 and above report an error when skb timestamps are reported for packets that not processed for TX TS after transmission. Add a helper to identify PTP one step sync and fix the above two errors. Add a common mask for PTP header flag field "twoStepflag". Also reset ptp OSS bit when one step is not selected. Fixes: ab91f0a9b5f4 ("net: macb: Add hardware PTP support") Fixes: 653e92a9175e ("net: macb: add support for padding and fcs computation") Signed-off-by: Harini Katakam Reviewed-by: Radhey Shyam Pandey Reviewed-by: Claudiu Beznea Link: https://lore.kernel.org/r/20220518170756.7752-1-harini.katakam@xilinx.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/cadence/macb_main.c | 40 ++++++++++++++++++++++++++++---- drivers/net/ethernet/cadence/macb_ptp.c | 4 +++- include/linux/ptp_classify.h | 3 +++ 3 files changed, 42 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 61284baa0496..3a1b5ac48ca5 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include "macb.h" @@ -1124,6 +1125,36 @@ static void macb_tx_error_task(struct work_struct *work) spin_unlock_irqrestore(&bp->lock, flags); } +static bool ptp_one_step_sync(struct sk_buff *skb) +{ + struct ptp_header *hdr; + unsigned int ptp_class; + u8 msgtype; + + /* No need to parse packet if PTP TS is not involved */ + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + goto not_oss; + + /* Identify and return whether PTP one step sync is being processed */ + ptp_class = ptp_classify_raw(skb); + if (ptp_class == PTP_CLASS_NONE) + goto not_oss; + + hdr = ptp_parse_header(skb, ptp_class); + if (!hdr) + goto not_oss; + + if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP) + goto not_oss; + + msgtype = ptp_get_msgtype(hdr, ptp_class); + if (msgtype == PTP_MSGTYPE_SYNC) + return true; + +not_oss: + return false; +} + static void macb_tx_interrupt(struct macb_queue *queue) { unsigned int tail; @@ -1168,8 +1199,8 @@ static void macb_tx_interrupt(struct macb_queue *queue) /* First, update TX stats if needed */ if (skb) { - if (unlikely(skb_shinfo(skb)->tx_flags & - SKBTX_HW_TSTAMP) && + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + !ptp_one_step_sync(skb) && gem_ptp_do_txstamp(queue, skb, desc) == 0) { /* skb now belongs to timestamp buffer * and will be removed later @@ -1999,7 +2030,8 @@ static unsigned int macb_tx_map(struct macb *bp, ctrl |= MACB_BF(TX_LSO, lso_ctrl); ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); if ((bp->dev->features & NETIF_F_HW_CSUM) && - skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) + skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl && + !ptp_one_step_sync(skb)) ctrl |= MACB_BIT(TX_NOCRC); } else /* Only set MSS/MFS on payload descriptors @@ -2097,7 +2129,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) if (!(ndev->features & NETIF_F_HW_CSUM) || !((*skb)->ip_summed != CHECKSUM_PARTIAL) || - skb_shinfo(*skb)->gso_size) /* Not available for GSO */ + skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb)) return 0; if (padlen <= 0) { diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index fb6b27f46b15..9559c16078f9 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -470,8 +470,10 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) case HWTSTAMP_TX_ONESTEP_SYNC: if (gem_ptp_set_one_step_sync(bp, 1) != 0) return -ERANGE; - fallthrough; + tx_bd_control = TSTAMP_ALL_FRAMES; + break; case HWTSTAMP_TX_ON: + gem_ptp_set_one_step_sync(bp, 0); tx_bd_control = TSTAMP_ALL_FRAMES; break; default: diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index fefa7790dc46..2b6ea36ad162 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -43,6 +43,9 @@ #define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */ #define OFF_PTP_SEQUENCE_ID 30 +/* PTP header flag fields */ +#define PTP_FLAG_TWOSTEP BIT(1) + /* Below defines should actually be removed at some point in time. */ #define IP6_HLEN 40 #define UDP_HLEN 8 -- cgit v1.2.3 From f01cdcf891a569dee187a5de0c25cd5766151524 Mon Sep 17 00:00:00 2001 From: David Ober Date: Tue, 17 May 2022 14:05:39 -0400 Subject: net: usb: r8152: Add in new Devices that are supported for Mac-Passthru Lenovo Thunderbolt 4 Dock, and other Lenovo USB Docks are using the original Realtek USB ethernet Vendor and Product IDs If the Network device is Realtek verify that it is on a Lenovo USB hub before enabling the passthru feature This also adds in the device IDs for the Lenovo USB Dongle and one other USB-C dock V2 fix formating of code V3 remove Generic define for Device ID 0x8153 and change it to use value V4 rearrange defines and case statement to put them in better order v5 create helper function to do the testing work as suggested Signed-off-by: David Ober Link: https://lore.kernel.org/r/20220517180539.25839-1-dober6023@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/usb/r8152.c | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index c2da3438387c..7389d6ef8569 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -771,7 +771,9 @@ enum rtl8152_flags { }; #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 +#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 +#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062 struct tally_counter { __le64 tx_packets; @@ -9562,6 +9564,29 @@ u8 rtl8152_get_version(struct usb_interface *intf) } EXPORT_SYMBOL_GPL(rtl8152_get_version); +static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) +{ + int parent_vendor_id = le16_to_cpu(udev->parent->descriptor.idVendor); + int product_id = le16_to_cpu(udev->descriptor.idProduct); + int vendor_id = le16_to_cpu(udev->descriptor.idVendor); + + if (vendor_id == VENDOR_ID_LENOVO) { + switch (product_id) { + case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: + case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: + case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3: + case DEVICE_ID_THINKPAD_USB_C_DONGLE: + return 1; + } + } else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) { + switch (product_id) { + case 0x8153: + return 1; + } + } + return 0; +} + static int rtl8152_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -9642,13 +9667,7 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->hw_features &= ~NETIF_F_RXCSUM; } - if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) { - switch (le16_to_cpu(udev->descriptor.idProduct)) { - case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: - case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: - tp->lenovo_macpassthru = 1; - } - } + tp->lenovo_macpassthru = rtl8152_supports_lenovo_macpassthru(udev); if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && (!strcmp(udev->serial, "000001000000") || -- cgit v1.2.3 From df98714e432abf5cbdac3e4c1a13f94c65ddb8d3 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 18 May 2022 20:37:15 +0300 Subject: net: ethernet: SP7021: fix a use after free of skb->len The netif_receive_skb() function frees "skb" so store skb->len before it is freed. Fixes: fd3040b9394c ("net: ethernet: Add driver for Sunplus SP7021") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/YoUuy4iTjFAcSn03@kili Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sunplus/spl2sw_int.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sunplus/spl2sw_int.c b/drivers/net/ethernet/sunplus/spl2sw_int.c index 69b1e2e0271e..a37c9a4c281f 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_int.c +++ b/drivers/net/ethernet/sunplus/spl2sw_int.c @@ -29,6 +29,7 @@ int spl2sw_rx_poll(struct napi_struct *napi, int budget) u32 mask; int port; u32 cmd; + u32 len; /* Process high-priority queue and then low-priority queue. */ for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) { @@ -63,10 +64,11 @@ int spl2sw_rx_poll(struct napi_struct *napi, int budget) skb_put(skb, pkg_len - 4); /* Minus FCS */ skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, comm->ndev[port]); + len = skb->len; netif_receive_skb(skb); stats->rx_packets++; - stats->rx_bytes += skb->len; + stats->rx_bytes += len; /* Allocate a new skb for receiving. */ new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size); -- cgit v1.2.3 From b413b0cb008646e9f24ce5253cb3cf7ee217aff6 Mon Sep 17 00:00:00 2001 From: Duoming Zhou Date: Wed, 18 May 2022 19:57:33 +0800 Subject: NFC: hci: fix sleep in atomic context bugs in nfc_hci_hcp_message_tx There are sleep in atomic context bugs when the request to secure element of st21nfca is timeout. The root cause is that kzalloc and alloc_skb with GFP_KERNEL parameter and mutex_lock are called in st21nfca_se_wt_timeout which is a timer handler. The call tree shows the execution paths that could lead to bugs: (Interrupt context) st21nfca_se_wt_timeout nfc_hci_send_event nfc_hci_hcp_message_tx kzalloc(..., GFP_KERNEL) //may sleep alloc_skb(..., GFP_KERNEL) //may sleep mutex_lock() //may sleep This patch moves the operations that may sleep into a work item. The work item will run in another kernel thread which is in process context to execute the bottom half of the interrupt. So it could prevent atomic context from sleeping. Fixes: 2130fb97fecf ("NFC: st21nfca: Adding support for secure element") Signed-off-by: Duoming Zhou Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20220518115733.62111-1-duoming@zju.edu.cn Signed-off-by: Jakub Kicinski --- drivers/nfc/st21nfca/se.c | 17 ++++++++++++++--- drivers/nfc/st21nfca/st21nfca.h | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index c922f10d0d7b..7e213f8ddc98 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -241,7 +241,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx, } EXPORT_SYMBOL(st21nfca_hci_se_io); -static void st21nfca_se_wt_timeout(struct timer_list *t) +static void st21nfca_se_wt_work(struct work_struct *work) { /* * No answer from the secure element @@ -254,8 +254,9 @@ static void st21nfca_se_wt_timeout(struct timer_list *t) */ /* hardware reset managed through VCC_UICC_OUT power supply */ u8 param = 0x01; - struct st21nfca_hci_info *info = from_timer(info, t, - se_info.bwi_timer); + struct st21nfca_hci_info *info = container_of(work, + struct st21nfca_hci_info, + se_info.timeout_work); info->se_info.bwi_active = false; @@ -271,6 +272,13 @@ static void st21nfca_se_wt_timeout(struct timer_list *t) info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); } +static void st21nfca_se_wt_timeout(struct timer_list *t) +{ + struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer); + + schedule_work(&info->se_info.timeout_work); +} + static void st21nfca_se_activation_timeout(struct timer_list *t) { struct st21nfca_hci_info *info = from_timer(info, t, @@ -360,6 +368,7 @@ int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev, switch (event) { case ST21NFCA_EVT_TRANSMIT_DATA: del_timer_sync(&info->se_info.bwi_timer); + cancel_work_sync(&info->se_info.timeout_work); info->se_info.bwi_active = false; r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0); @@ -389,6 +398,7 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev) struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); init_completion(&info->se_info.req_completion); + INIT_WORK(&info->se_info.timeout_work, st21nfca_se_wt_work); /* initialize timers */ timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0); info->se_info.bwi_active = false; @@ -416,6 +426,7 @@ void st21nfca_se_deinit(struct nfc_hci_dev *hdev) if (info->se_info.se_active) del_timer_sync(&info->se_info.se_active_timer); + cancel_work_sync(&info->se_info.timeout_work); info->se_info.bwi_active = false; info->se_info.se_active = false; } diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h index cb6ad916be91..ae6771cc9894 100644 --- a/drivers/nfc/st21nfca/st21nfca.h +++ b/drivers/nfc/st21nfca/st21nfca.h @@ -141,6 +141,7 @@ struct st21nfca_se_info { se_io_cb_t cb; void *cb_context; + struct work_struct timeout_work; }; struct st21nfca_hci_info { -- cgit v1.2.3 From 0600bdde1fae75fb9bad72033d28edddc72b44b2 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:54:31 +0100 Subject: net: mtk_eth_soc: remove unused mac->mode mac->mode is only ever written to in one location, and is thus superflous. Remove it. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1 - drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 - 2 files changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 31c5da5d6b72..b45469ad5ce7 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2982,7 +2982,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) /* mac config is not set */ mac->interface = PHY_INTERFACE_MODE_NA; - mac->mode = MLO_AN_PHY; mac->speed = SPEED_UNKNOWN; mac->phylink_config.dev = ð->netdev[id]->dev; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index b04977fa84f6..b292ee2d6575 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -999,7 +999,6 @@ struct mtk_eth { struct mtk_mac { int id; phy_interface_t interface; - unsigned int mode; int speed; struct device_node *of_node; struct phylink *phylink; -- cgit v1.2.3 From 5a7a2f4b29d7546244da7d8bbc1962fce5b230f2 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:54:36 +0100 Subject: net: mtk_eth_soc: remove unused sgmii flags The "flags" member of struct mtk_sgmii appears to be unused, as are the MTK_SGMII_PHYSPEED_* and MTK_HAS_FLAGS() macros. Remove them. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index b292ee2d6575..085f53d23f25 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -867,23 +867,15 @@ struct mtk_soc_data { /* currently no SoC has more than 2 macs */ #define MTK_MAX_DEVS 2 -#define MTK_SGMII_PHYSPEED_AN BIT(31) -#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0) -#define MTK_SGMII_PHYSPEED_1000 BIT(0) -#define MTK_SGMII_PHYSPEED_2500 BIT(1) -#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x)) - /* struct mtk_sgmii - This is the structure holding sgmii regmap and its * characteristics * @regmap: The register map pointing at the range used to setup * SGMII modes - * @flags: The enum refers to which mode the sgmii wants to run on * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap */ struct mtk_sgmii { struct regmap *regmap[MTK_MAX_DEVS]; - u32 flags[MTK_MAX_DEVS]; u32 ana_rgc3; }; -- cgit v1.2.3 From bc5e93e0cd22e360eda23859b939280205567580 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:54:42 +0100 Subject: net: mtk_eth_soc: add mask and update PCS speed definitions The PCS speed setting is a two bit field, but it is defined as two separate bits. Add a bitfield mask for the speed definitions, an use the FIELD_PREP() macro to define each PCS speed. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 085f53d23f25..4f97195159f3 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -17,6 +17,7 @@ #include #include #include +#include #include "mtk_ppe.h" #define MTK_QDMA_PAGE_SIZE 2048 @@ -493,9 +494,10 @@ #define SGMSYS_SGMII_MODE 0x20 #define SGMII_IF_MODE_BIT0 BIT(0) #define SGMII_SPEED_DUPLEX_AN BIT(1) -#define SGMII_SPEED_10 0x0 -#define SGMII_SPEED_100 BIT(2) -#define SGMII_SPEED_1000 BIT(3) +#define SGMII_SPEED_MASK GENMASK(3, 2) +#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0) +#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1) +#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2) #define SGMII_DUPLEX_FULL BIT(4) #define SGMII_IF_MODE_BIT5 BIT(5) #define SGMII_REMOTE_FAULT_DIS BIT(8) -- cgit v1.2.3 From 7da3f901f8ecb425105fad39a0f5de73306abe52 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:54:47 +0100 Subject: net: mtk_eth_soc: correct 802.3z speed setting Phylink does not guarantee that state->speed will be set correctly in the mac_config() call, so it's a bug that the driver makes use of it. Moreover, it is making use of it in a function that is only ever called for 1000BASE-X and 2500BASE-X which operate at a fixed speed which happens to be the same setting irrespective of the interface mode. We can simply remove the switch statement and just set the SGMII interface speed. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_sgmii.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index 5897940a418b..d378ecdf56cc 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -34,6 +34,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) return 0; } +/* For SGMII interface mode */ int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) { unsigned int val; @@ -60,6 +61,9 @@ int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) return 0; } +/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a + * fixed speed. + */ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, const struct phylink_link_state *state) { @@ -82,19 +86,7 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, /* SGMII force mode setting */ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); val &= ~SGMII_IF_MODE_MASK; - - switch (state->speed) { - case SPEED_10: - val |= SGMII_SPEED_10; - break; - case SPEED_100: - val |= SGMII_SPEED_100; - break; - case SPEED_2500: - case SPEED_1000: - val |= SGMII_SPEED_1000; - break; - } + val |= SGMII_SPEED_1000; if (state->duplex == DUPLEX_FULL) val |= SGMII_DUPLEX_FULL; -- cgit v1.2.3 From a459187390bb221827f9c07866c3a5ffbdf9622b Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 18 May 2022 15:54:52 +0100 Subject: net: mtk_eth_soc: correct 802.3z duplex setting Phylink does not guarantee that state->duplex will be set correctly in the mac_config() call, so it's a bug that the driver makes use of it. Move the 802.3z PCS duplex configuration to mac_link_up(). Signed-off-by: Russell King Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 16 ++++++++++++---- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 + drivers/net/ethernet/mediatek/mtk_sgmii.c | 22 ++++++++++++++++------ 3 files changed, 29 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b45469ad5ce7..6c3d5f165e3d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -485,8 +485,18 @@ static void mtk_mac_link_up(struct phylink_config *config, { struct mtk_mac *mac = container_of(config, struct mtk_mac, phylink_config); - u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + u32 mcr; + + if (phy_interface_mode_is_8023z(interface)) { + struct mtk_eth *eth = mac->hw; + + /* Decide how GMAC and SGMIISYS be mapped */ + int sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? + 0 : mac->id; + mtk_sgmii_link_up(eth->sgmii, sid, speed, duplex); + } + mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | MAC_MCR_FORCE_RX_FC); @@ -2986,9 +2996,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->phylink_config.dev = ð->netdev[id]->dev; mac->phylink_config.type = PHYLINK_NETDEV; - /* This driver makes use of state->speed/state->duplex in - * mac_config - */ + /* This driver makes use of state->speed in mac_config */ mac->phylink_config.legacy_pre_march2020 = true; mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 4f97195159f3..7540dcb06a72 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1017,6 +1017,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id); int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, const struct phylink_link_state *state); +void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex); void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index d378ecdf56cc..f07a9d50a770 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -83,14 +83,10 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, val &= ~SGMII_AN_ENABLE; regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); - /* SGMII force mode setting */ + /* Set the speed etc but leave the duplex unchanged */ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); - val &= ~SGMII_IF_MODE_MASK; + val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK; val |= SGMII_SPEED_1000; - - if (state->duplex == DUPLEX_FULL) - val |= SGMII_DUPLEX_FULL; - regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); /* Release PHYA power down state */ @@ -101,6 +97,20 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, return 0; } +/* For 1000BASE-X and 2500BASE-X interface modes */ +void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) +{ + unsigned int val; + + /* SGMII force duplex setting */ + regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); + val &= ~SGMII_DUPLEX_FULL; + if (duplex == DUPLEX_FULL) + val |= SGMII_DUPLEX_FULL; + + regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); +} + void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id) { struct mtk_sgmii *ss = eth->sgmii; -- cgit v1.2.3 From 4ce5a0bd3958ed248f0325bfcb95339f7c74feb2 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:54:57 +0100 Subject: net: mtk_eth_soc: stop passing phylink state to sgmii setup Now that mtk_sgmii_setup_mode_force() only uses the interface mode from the phylink state, pass just the interface mode into this function. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- drivers/net/ethernet/mediatek/mtk_sgmii.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6c3d5f165e3d..080f7e356591 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -390,7 +390,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, /* Setup SGMIISYS with the determined property */ if (state->interface != PHY_INTERFACE_MODE_SGMII) err = mtk_sgmii_setup_mode_force(eth->sgmii, sid, - state); + state->interface); else if (phylink_autoneg_inband(mode)) err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 7540dcb06a72..455b1460160e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1016,7 +1016,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, u32 ana_rgc3); int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id); int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, - const struct phylink_link_state *state); + phy_interface_t interface); void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex); void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index f07a9d50a770..38d52386c88f 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -65,7 +65,7 @@ int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) * fixed speed. */ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, - const struct phylink_link_state *state) + phy_interface_t interface) { unsigned int val; @@ -74,7 +74,7 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, regmap_read(ss->regmap[id], ss->ana_rgc3, &val); val &= ~RG_PHY_SPEED_MASK; - if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + if (interface == PHY_INTERFACE_MODE_2500BASEX) val |= RG_PHY_SPEED_3_125G; regmap_write(ss->regmap[id], ss->ana_rgc3, val); -- cgit v1.2.3 From 1ec619ee4a052fb9ac48b57554ac2722a0bfe73c Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:55:02 +0100 Subject: net: mtk_eth_soc: provide mtk_sgmii_config() Provide mtk_sgmii_config() to wrap up the decisions about which SGMII configuration will be called. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 7 +------ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 ++--- drivers/net/ethernet/mediatek/mtk_sgmii.c | 20 +++++++++++++++++--- 3 files changed, 20 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 080f7e356591..87b5a837695f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -388,12 +388,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, 0 : mac->id; /* Setup SGMIISYS with the determined property */ - if (state->interface != PHY_INTERFACE_MODE_SGMII) - err = mtk_sgmii_setup_mode_force(eth->sgmii, sid, - state->interface); - else if (phylink_autoneg_inband(mode)) - err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); - + err = mtk_sgmii_config(eth->sgmii, sid, mode, state->interface); if (err) goto init_err; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 455b1460160e..fa3c085bf7ed 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1014,9 +1014,8 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg); int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, u32 ana_rgc3); -int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id); -int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, - phy_interface_t interface); +int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, + phy_interface_t interface); void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex); void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index 38d52386c88f..4defc1ca41b6 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -35,7 +35,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) } /* For SGMII interface mode */ -int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) +static int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) { unsigned int val; @@ -64,8 +64,8 @@ int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) /* For 1000BASE-X and 2500BASE-X interface modes, which operate at a * fixed speed. */ -int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, - phy_interface_t interface) +static int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, + phy_interface_t interface) { unsigned int val; @@ -97,6 +97,20 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, return 0; } +int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, + phy_interface_t interface) +{ + int err = 0; + + /* Setup SGMIISYS with the determined property */ + if (interface != PHY_INTERFACE_MODE_SGMII) + err = mtk_sgmii_setup_mode_force(ss, id, interface); + else if (phylink_autoneg_inband(mode)) + err = mtk_sgmii_setup_mode_an(ss, id); + + return err; +} + /* For 1000BASE-X and 2500BASE-X interface modes */ void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) { -- cgit v1.2.3 From 650a49bc65df6b0e0051a8f62d7c22d95a8f350d Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:55:07 +0100 Subject: net: mtk_eth_soc: add fixme comment for state->speed use Add a fixme comment for the last remaining incorrect usage of state->speed in the mac_config() method, which is strangely in a code path which is only run when the PHY interface mode changes. This means if we are in RGMII mode, changes in state->speed will not cause the INTF_MODE, TRGMII_RCK_CTRL and TRGMII_TCK_CTRL registers to be set according to the speed, nor will the TRGPLL clock be set to the correct value. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 87b5a837695f..211457a8831d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -327,6 +327,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, state->interface)) goto err_phy; } else { + /* FIXME: this is incorrect. Not only does it + * use state->speed (which is not guaranteed + * to be correct) but it also makes use of it + * in a code path that will only be reachable + * when the PHY interface mode changes, not + * when the speed changes. Consequently, RGMII + * is probably broken. + */ mtk_gmac0_rgmii_adjust(mac->hw, state->interface, state->speed); -- cgit v1.2.3 From 0e37ad71b2ff772009595002da2860999e98e14e Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:55:12 +0100 Subject: net: mtk_eth_soc: move MAC_MCR setting to mac_finish() Move the setting of the MTK_MAC_MCR register from the end of mac_config into the phylink mac_finish() method, to keep it as the very last write that is done during configuration. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 33 +++++++++++++++++++---------- 1 file changed, 22 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 211457a8831d..d9947faa2cd3 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -269,8 +269,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, struct mtk_mac *mac = container_of(config, struct mtk_mac, phylink_config); struct mtk_eth *eth = mac->hw; - u32 mcr_cur, mcr_new, sid, i; int val, ge_mode, err = 0; + u32 sid, i; /* MT76x8 has no hardware settings between for the MAC */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && @@ -408,16 +408,6 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, return; } - /* Setup gmac */ - mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr_new = mcr_cur; - mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | - MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; - - /* Only update control register when needed! */ - if (mcr_new != mcr_cur) - mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); - return; err_phy: @@ -430,6 +420,26 @@ init_err: mac->id, phy_modes(state->interface), err); } +static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr_cur, mcr_new; + + /* Setup gmac */ + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur; + mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; + + /* Only update control register when needed! */ + if (mcr_new != mcr_cur) + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); + + return 0; +} + static void mtk_mac_pcs_get_state(struct phylink_config *config, struct phylink_link_state *state) { @@ -534,6 +544,7 @@ static const struct phylink_mac_ops mtk_phylink_ops = { .mac_pcs_get_state = mtk_mac_pcs_get_state, .mac_an_restart = mtk_mac_an_restart, .mac_config = mtk_mac_config, + .mac_finish = mtk_mac_finish, .mac_link_down = mtk_mac_link_down, .mac_link_up = mtk_mac_link_up, }; -- cgit v1.2.3 From 21089867278deb2a110b685e3cd33f64f9ce41e2 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:55:17 +0100 Subject: net: mtk_eth_soc: move restoration of SYSCFG0 to mac_finish() The SGMIISYS configuration is performed while ETHSYS_SYSCFG0 is in a disabled state. In order to preserve this when we switch to phylink_pcs we need to move the restoration of this register to the mac_finish() callback. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 +++++++++-- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 + 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index d9947faa2cd3..6dca98de0067 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -400,8 +400,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, if (err) goto init_err; - regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, - SYSCFG0_SGMII_MASK, val); + /* Save the syscfg0 value for mac_finish */ + mac->syscfg0 = val; } else if (phylink_autoneg_inband(mode)) { dev_err(eth->dev, "In-band mode not supported in non SGMII mode!\n"); @@ -425,8 +425,15 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, { struct mtk_mac *mac = container_of(config, struct mtk_mac, phylink_config); + struct mtk_eth *eth = mac->hw; u32 mcr_cur, mcr_new; + /* Enable SGMII */ + if (interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(interface)) + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, mac->syscfg0); + /* Setup gmac */ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); mcr_new = mcr_cur; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index fa3c085bf7ed..1ad5c26f01fb 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1001,6 +1001,7 @@ struct mtk_mac { struct mtk_hw_stats *hw_stats; __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; int hwlro_ip_cnt; + unsigned int syscfg0; }; /* the struct describing the SoC. these are declared in the soc_xyz.c files */ -- cgit v1.2.3 From 901f3fbe13c3e56f0742e02717ccbfabbc95c463 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:55:22 +0100 Subject: net: mtk_eth_soc: convert code structure to suit split PCS support Provide a mtk_pcs structure which encapsulates everything that the PCS functions need (the regmap and ana_rgc3 offset), and use this in the PCS functions. Provide shim functions to convert from the existing "mtk_sgmii_*" interface to the converted PCS functions. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 15 +++- drivers/net/ethernet/mediatek/mtk_sgmii.c | 123 +++++++++++++++------------- 2 files changed, 79 insertions(+), 59 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 1ad5c26f01fb..8bf2d53cbec8 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -869,16 +869,23 @@ struct mtk_soc_data { /* currently no SoC has more than 2 macs */ #define MTK_MAX_DEVS 2 -/* struct mtk_sgmii - This is the structure holding sgmii regmap and its - * characteristics +/* struct mtk_pcs - This structure holds each sgmii regmap and associated + * data * @regmap: The register map pointing at the range used to setup * SGMII modes * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap */ +struct mtk_pcs { + struct regmap *regmap; + u32 ana_rgc3; +}; +/* struct mtk_sgmii - This is the structure holding sgmii regmap and its + * characteristics + * @pcs Array of individual PCS structures + */ struct mtk_sgmii { - struct regmap *regmap[MTK_MAX_DEVS]; - u32 ana_rgc3; + struct mtk_pcs pcs[MTK_MAX_DEVS]; }; /* struct mtk_eth - This is the main datasructure for holding the state diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index 4defc1ca41b6..7f257f492926 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -9,90 +9,71 @@ #include #include +#include #include #include "mtk_eth_soc.h" -int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) -{ - struct device_node *np; - int i; - - ss->ana_rgc3 = ana_rgc3; - - for (i = 0; i < MTK_MAX_DEVS; i++) { - np = of_parse_phandle(r, "mediatek,sgmiisys", i); - if (!np) - break; - - ss->regmap[i] = syscon_node_to_regmap(np); - of_node_put(np); - if (IS_ERR(ss->regmap[i])) - return PTR_ERR(ss->regmap[i]); - } - - return 0; -} - /* For SGMII interface mode */ -static int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id) +static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs) { unsigned int val; - if (!ss->regmap[id]) + if (!mpcs->regmap) return -EINVAL; /* Setup the link timer and QPHY power up inside SGMIISYS */ - regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER, + regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, SGMII_LINK_TIMER_DEFAULT); - regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); + regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); val |= SGMII_REMOTE_FAULT_DIS; - regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); + regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); - regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val); + regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); val |= SGMII_AN_RESTART; - regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); + regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); - regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val); + regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val); val &= ~SGMII_PHYA_PWD; - regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val); + regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val); return 0; + } /* For 1000BASE-X and 2500BASE-X interface modes, which operate at a * fixed speed. */ -static int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, - phy_interface_t interface) +static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs, + phy_interface_t interface) { unsigned int val; - if (!ss->regmap[id]) + if (!mpcs->regmap) return -EINVAL; - regmap_read(ss->regmap[id], ss->ana_rgc3, &val); + regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val); val &= ~RG_PHY_SPEED_MASK; if (interface == PHY_INTERFACE_MODE_2500BASEX) val |= RG_PHY_SPEED_3_125G; - regmap_write(ss->regmap[id], ss->ana_rgc3, val); + regmap_write(mpcs->regmap, mpcs->ana_rgc3, val); /* Disable SGMII AN */ - regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val); + regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); val &= ~SGMII_AN_ENABLE; - regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val); + regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); /* Set the speed etc but leave the duplex unchanged */ - regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); + regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK; val |= SGMII_SPEED_1000; - regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); + regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); /* Release PHYA power down state */ - regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val); + regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val); val &= ~SGMII_PHYA_PWD; - regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val); + regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val); return 0; } @@ -100,44 +81,76 @@ static int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id, int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, phy_interface_t interface) { + struct mtk_pcs *mpcs = &ss->pcs[id]; int err = 0; /* Setup SGMIISYS with the determined property */ if (interface != PHY_INTERFACE_MODE_SGMII) - err = mtk_sgmii_setup_mode_force(ss, id, interface); + err = mtk_pcs_setup_mode_force(mpcs, interface); else if (phylink_autoneg_inband(mode)) - err = mtk_sgmii_setup_mode_an(ss, id); + err = mtk_pcs_setup_mode_an(mpcs); return err; } -/* For 1000BASE-X and 2500BASE-X interface modes */ -void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) +static void mtk_pcs_restart_an(struct mtk_pcs *mpcs) +{ + unsigned int val; + + if (!mpcs->regmap) + return; + + regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); +} + +static void mtk_pcs_link_up(struct mtk_pcs *mpcs, int speed, int duplex) { unsigned int val; /* SGMII force duplex setting */ - regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val); + regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); val &= ~SGMII_DUPLEX_FULL; if (duplex == DUPLEX_FULL) val |= SGMII_DUPLEX_FULL; - regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val); + regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); +} + +/* For 1000BASE-X and 2500BASE-X interface modes */ +void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) +{ + mtk_pcs_link_up(&ss->pcs[id], speed, duplex); +} + +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) +{ + struct device_node *np; + int i; + + for (i = 0; i < MTK_MAX_DEVS; i++) { + np = of_parse_phandle(r, "mediatek,sgmiisys", i); + if (!np) + break; + + ss->pcs[i].ana_rgc3 = ana_rgc3; + ss->pcs[i].regmap = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(ss->pcs[i].regmap)) + return PTR_ERR(ss->pcs[i].regmap); + } + + return 0; } void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id) { - struct mtk_sgmii *ss = eth->sgmii; - unsigned int val, sid; + unsigned int sid; /* Decide how GMAC and SGMIISYS be mapped */ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? 0 : mac_id; - if (!ss->regmap[sid]) - return; - - regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val); - val |= SGMII_AN_RESTART; - regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val); + mtk_pcs_restart_an(ð->sgmii->pcs[sid]); } -- cgit v1.2.3 From 14a44ab0330d290fade1403a920e299cc56d7300 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Wed, 18 May 2022 15:55:28 +0100 Subject: net: mtk_eth_soc: partially convert to phylink_pcs Partially convert mtk_eth_soc to phylink_pcs, moving the configuration, link up and AN restart over. However, it seems mac_pcs_get_state() doesn't actually get the state from the PCS, so we can't convert that over without a better understanding of the hardware. Signed-off-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 49 +++++++++++-------------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 7 ++-- drivers/net/ethernet/mediatek/mtk_sgmii.c | 55 +++++++++++++++-------------- 3 files changed, 53 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6dca98de0067..16f131445d8b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -263,6 +263,25 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, mtk_w32(eth, val, TRGMII_TCK_CTRL); } +static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + unsigned int sid; + + if (interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(interface)) { + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? + 0 : mac->id; + + return mtk_sgmii_select_pcs(eth->sgmii, sid); + } + + return NULL; +} + static void mtk_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -270,7 +289,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, phylink_config); struct mtk_eth *eth = mac->hw; int val, ge_mode, err = 0; - u32 sid, i; + u32 i; /* MT76x8 has no hardware settings between for the MAC */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && @@ -391,15 +410,6 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, SYSCFG0_SGMII_MASK, ~(u32)SYSCFG0_SGMII_MASK); - /* Decide how GMAC and SGMIISYS be mapped */ - sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? - 0 : mac->id; - - /* Setup SGMIISYS with the determined property */ - err = mtk_sgmii_config(eth->sgmii, sid, mode, state->interface); - if (err) - goto init_err; - /* Save the syscfg0 value for mac_finish */ mac->syscfg0 = val; } else if (phylink_autoneg_inband(mode)) { @@ -479,14 +489,6 @@ static void mtk_mac_pcs_get_state(struct phylink_config *config, state->pause |= MLO_PAUSE_TX; } -static void mtk_mac_an_restart(struct phylink_config *config) -{ - struct mtk_mac *mac = container_of(config, struct mtk_mac, - phylink_config); - - mtk_sgmii_restart_an(mac->hw, mac->id); -} - static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { @@ -507,15 +509,6 @@ static void mtk_mac_link_up(struct phylink_config *config, phylink_config); u32 mcr; - if (phy_interface_mode_is_8023z(interface)) { - struct mtk_eth *eth = mac->hw; - - /* Decide how GMAC and SGMIISYS be mapped */ - int sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? - 0 : mac->id; - mtk_sgmii_link_up(eth->sgmii, sid, speed, duplex); - } - mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | @@ -548,8 +541,8 @@ static void mtk_mac_link_up(struct phylink_config *config, static const struct phylink_mac_ops mtk_phylink_ops = { .validate = phylink_generic_validate, + .mac_select_pcs = mtk_mac_select_pcs, .mac_pcs_get_state = mtk_mac_pcs_get_state, - .mac_an_restart = mtk_mac_an_restart, .mac_config = mtk_mac_config, .mac_finish = mtk_mac_finish, .mac_link_down = mtk_mac_link_down, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 8bf2d53cbec8..c1d46eb281ea 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -874,10 +874,12 @@ struct mtk_soc_data { * @regmap: The register map pointing at the range used to setup * SGMII modes * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap + * @pcs: Phylink PCS structure */ struct mtk_pcs { struct regmap *regmap; u32 ana_rgc3; + struct phylink_pcs pcs; }; /* struct mtk_sgmii - This is the structure holding sgmii regmap and its @@ -1020,12 +1022,9 @@ void mtk_stats_update_mac(struct mtk_mac *mac); void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); u32 mtk_r32(struct mtk_eth *eth, unsigned reg); +struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id); int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, u32 ana_rgc3); -int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, - phy_interface_t interface); -void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex); -void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id); int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index 7f257f492926..736839c84130 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -14,14 +14,16 @@ #include "mtk_eth_soc.h" +static struct mtk_pcs *pcs_to_mtk_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mtk_pcs, pcs); +} + /* For SGMII interface mode */ static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs) { unsigned int val; - if (!mpcs->regmap) - return -EINVAL; - /* Setup the link timer and QPHY power up inside SGMIISYS */ regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, SGMII_LINK_TIMER_DEFAULT); @@ -50,9 +52,6 @@ static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs, { unsigned int val; - if (!mpcs->regmap) - return -EINVAL; - regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val); val &= ~RG_PHY_SPEED_MASK; if (interface == PHY_INTERFACE_MODE_2500BASEX) @@ -78,10 +77,12 @@ static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs, return 0; } -int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, - phy_interface_t interface) +static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) { - struct mtk_pcs *mpcs = &ss->pcs[id]; + struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); int err = 0; /* Setup SGMIISYS with the determined property */ @@ -93,22 +94,25 @@ int mtk_sgmii_config(struct mtk_sgmii *ss, int id, unsigned int mode, return err; } -static void mtk_pcs_restart_an(struct mtk_pcs *mpcs) +static void mtk_pcs_restart_an(struct phylink_pcs *pcs) { + struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); unsigned int val; - if (!mpcs->regmap) - return; - regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); val |= SGMII_AN_RESTART; regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); } -static void mtk_pcs_link_up(struct mtk_pcs *mpcs, int speed, int duplex) +static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex) { + struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); unsigned int val; + if (!phy_interface_mode_is_8023z(interface)) + return; + /* SGMII force duplex setting */ regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); val &= ~SGMII_DUPLEX_FULL; @@ -118,11 +122,11 @@ static void mtk_pcs_link_up(struct mtk_pcs *mpcs, int speed, int duplex) regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); } -/* For 1000BASE-X and 2500BASE-X interface modes */ -void mtk_sgmii_link_up(struct mtk_sgmii *ss, int id, int speed, int duplex) -{ - mtk_pcs_link_up(&ss->pcs[id], speed, duplex); -} +static const struct phylink_pcs_ops mtk_pcs_ops = { + .pcs_config = mtk_pcs_config, + .pcs_an_restart = mtk_pcs_restart_an, + .pcs_link_up = mtk_pcs_link_up, +}; int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) { @@ -139,18 +143,17 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) of_node_put(np); if (IS_ERR(ss->pcs[i].regmap)) return PTR_ERR(ss->pcs[i].regmap); + + ss->pcs[i].pcs.ops = &mtk_pcs_ops; } return 0; } -void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id) +struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id) { - unsigned int sid; - - /* Decide how GMAC and SGMIISYS be mapped */ - sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? - 0 : mac_id; + if (!ss->pcs[id].regmap) + return NULL; - mtk_pcs_restart_an(ð->sgmii->pcs[sid]); + return &ss->pcs[id].pcs; } -- cgit v1.2.3 From 86afd5a0e78eb9b84b158b33d85f711c5f748fd1 Mon Sep 17 00:00:00 2001 From: Ricardo Martinez Date: Wed, 18 May 2022 12:55:29 -0700 Subject: net: wwan: t7xx: Fix smatch errors t7xx_request_irq() error: uninitialized symbol 'ret'. t7xx_core_hk_handler() error: potentially dereferencing uninitialized 'event'. If the condition to enter the loop that waits for the handshake event is false on the first iteration then the uninitialized 'event' will be dereferenced, fix this by initializing 'event' to NULL. t7xx_port_proxy_recv_skb() warn: variable dereferenced before check 'skb'. No need to check skb at t7xx_port_proxy_recv_skb() since we know it is always called with a valid skb by t7xx_cldma_gpd_rx_from_q(). Reported-by: Dan Carpenter Signed-off-by: Ricardo Martinez Link: https://lore.kernel.org/r/20220518195529.126246-1-ricardo.martinez@linux.intel.com Signed-off-by: Jakub Kicinski --- drivers/net/wwan/t7xx/t7xx_modem_ops.c | 4 ++-- drivers/net/wwan/t7xx/t7xx_pci.c | 2 +- drivers/net/wwan/t7xx/t7xx_port_proxy.c | 3 --- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c index 1056ad9bf34f..3458af31e864 100644 --- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c +++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c @@ -458,9 +458,9 @@ static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl enum t7xx_fsm_event_state event_id, enum t7xx_fsm_event_state err_detect) { + struct t7xx_fsm_event *event = NULL, *event_next; struct t7xx_sys_info *core_info = &md->core_md; struct device *dev = &md->t7xx_dev->pdev->dev; - struct t7xx_fsm_event *event, *event_next; unsigned long flags; int ret; @@ -493,7 +493,7 @@ static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl goto err_free_event; } - if (ctl->exp_flg) + if (!event || ctl->exp_flg) goto err_free_event; ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length); diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 5f1bb8d6afb6..871f2a27a398 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -568,7 +568,7 @@ static const struct dev_pm_ops t7xx_pci_pm_ops = { static int t7xx_request_irq(struct pci_dev *pdev) { struct t7xx_pci_dev *t7xx_dev; - int ret, i; + int ret = 0, i; t7xx_dev = pci_get_drvdata(pdev); diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c index 7d2c0e81e33d..d4de047ff0d4 100644 --- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c +++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c @@ -350,9 +350,6 @@ static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *s u16 seq_num, channel; int ret; - if (!skb) - return -EINVAL; - channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status)); if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) { dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel); -- cgit v1.2.3 From 7b4149bdee6a6363cb4eca3599296d41a7b3700d Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Thu, 19 May 2022 00:00:50 +0200 Subject: net: dsa: lantiq_gswip: Fix start index in gswip_port_fdb() The first N entries in priv->vlans are reserved for managing ports which are not part of a bridge. Use priv->hw_info->max_ports to consistently access per-bridge entries at index 7. Starting at priv->hw_info->cpu_port (6) is harmless in this case because priv->vlan[6].bridge is always NULL so the comparison result is always false (which results in this entry being skipped). Acked-by: Hauke Mehrtens Signed-off-by: Martin Blumenstingl Reviewed-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/lantiq_gswip.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 12c15da55664..0c313db23451 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1360,7 +1360,7 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port, struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port)); struct gswip_priv *priv = ds->priv; struct gswip_pce_table_entry mac_bridge = {0,}; - unsigned int cpu_port = priv->hw_info->cpu_port; + unsigned int max_ports = priv->hw_info->max_ports; int fid = -1; int i; int err; @@ -1368,7 +1368,7 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port, if (!bridge) return -EINVAL; - for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) { + for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) { if (priv->vlans[i].bridge == bridge) { fid = priv->vlans[i].fid; break; -- cgit v1.2.3 From 4951995dbe9dd0c3fbe1fbfdb760b05a797700f1 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Thu, 19 May 2022 00:00:51 +0200 Subject: net: dsa: lantiq_gswip: Fix typo in gswip_port_fdb_dump() error print gswip_port_fdb_dump() reads the MAC bridge entries. The error message should say "failed to read mac bridge entry". While here, also add the index to the error print so humans can get to the cause of the problem easier. Acked-by: Hauke Mehrtens Reviewed-by: Vladimir Oltean Signed-off-by: Martin Blumenstingl Signed-off-by: Jakub Kicinski --- drivers/net/dsa/lantiq_gswip.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 0c313db23451..8af4def38a98 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1426,8 +1426,9 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, err = gswip_pce_table_entry_read(priv, &mac_bridge); if (err) { - dev_err(priv->dev, "failed to write mac bridge: %d\n", - err); + dev_err(priv->dev, + "failed to read mac bridge entry %d: %d\n", + i, err); return err; } -- cgit v1.2.3 From dbbc7d04c549a43ad343c69e17b27a57e2102041 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 18 May 2022 17:43:42 -0700 Subject: net: wwan: iosm: remove pointless null check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 12 warns: drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c: In function ‘ipc_protocol_dl_td_process’: drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c:406:13: warning: the comparison will always evaluate as ‘true’ for the address of ‘cb’ will never be NULL [-Waddress] 406 | if (!IPC_CB(skb)) { | ^ Indeed the check seems entirely pointless. Hopefully the other validation checks will catch if the cb is bad, but it can't be NULL. Reviewed-by: M Chetan Kumar Link: https://lore.kernel.org/r/20220519004342.2109832-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c index c6b032f95d2e..4627847c6daa 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c @@ -372,8 +372,6 @@ bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol, struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol, struct ipc_pipe *pipe) { - u32 tail = - le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]); struct ipc_protocol_td *p_td; struct sk_buff *skb; @@ -403,14 +401,6 @@ struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol, goto ret; } - if (!IPC_CB(skb)) { - dev_err(ipc_protocol->dev, "pipe# %d, tail: %d skb_cb is NULL", - pipe->pipe_nr, tail); - ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); - skb = NULL; - goto ret; - } - if (p_td->buffer.address != IPC_CB(skb)->mapping) { dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p", (unsigned long long)p_td->buffer.address, skb->data); -- cgit v1.2.3 From 1172aa6e4a195aaf5941ae89a93068cb9dd07b47 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 18 May 2022 17:44:17 -0700 Subject: net: ipa: don't proceed to out-of-bound write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 12 seems upset that we check ipa_irq against array bound but then proceed, anyway: drivers/net/ipa/ipa_interrupt.c: In function ‘ipa_interrupt_add’: drivers/net/ipa/ipa_interrupt.c:196:27: warning: array subscript 30 is above array bounds of ‘void (*[30])(struct ipa *, enum ipa_irq_id)’ [-Warray-bounds] 196 | interrupt->handler[ipa_irq] = handler; | ~~~~~~~~~~~~~~~~~~^~~~~~~~~ drivers/net/ipa/ipa_interrupt.c:42:27: note: while referencing ‘handler’ 42 | ipa_irq_handler_t handler[IPA_IRQ_COUNT]; | ^~~~~~~ Reviewed-by: Alex Elder Link: https://lore.kernel.org/r/20220519004417.2109886-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ipa/ipa_interrupt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index b35170a93b0f..307bed2ee707 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -191,7 +191,8 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt, struct ipa *ipa = interrupt->ipa; u32 offset; - WARN_ON(ipa_irq >= IPA_IRQ_COUNT); + if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT)) + return; interrupt->handler[ipa_irq] = handler; @@ -208,7 +209,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq) struct ipa *ipa = interrupt->ipa; u32 offset; - WARN_ON(ipa_irq >= IPA_IRQ_COUNT); + if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT)) + return; /* Update the IPA interrupt mask to disable it */ interrupt->enabled &= ~BIT(ipa_irq); -- cgit v1.2.3 From 9b80ccda233fa6c59de411bf889cc4d0e028f2c7 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Thu, 19 May 2022 10:01:48 +0800 Subject: bonding: fix missed rcu protection When removing the rcu_read_lock in bond_ethtool_get_ts_info() as discussed [1], I didn't notice it could be called via setsockopt, which doesn't hold rcu lock, as syzbot pointed: stack backtrace: CPU: 0 PID: 3599 Comm: syz-executor317 Not tainted 5.18.0-rc5-syzkaller-01392-g01f4685797a5 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 bond_option_active_slave_get_rcu include/net/bonding.h:353 [inline] bond_ethtool_get_ts_info+0x32c/0x3a0 drivers/net/bonding/bond_main.c:5595 __ethtool_get_ts_info+0x173/0x240 net/ethtool/common.c:554 ethtool_get_phc_vclocks+0x99/0x110 net/ethtool/common.c:568 sock_timestamping_bind_phc net/core/sock.c:869 [inline] sock_set_timestamping+0x3a3/0x7e0 net/core/sock.c:916 sock_setsockopt+0x543/0x2ec0 net/core/sock.c:1221 __sys_setsockopt+0x55e/0x6a0 net/socket.c:2223 __do_sys_setsockopt net/socket.c:2238 [inline] __se_sys_setsockopt net/socket.c:2235 [inline] __x64_sys_setsockopt+0xba/0x150 net/socket.c:2235 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f8902c8eb39 Fix it by adding rcu_read_lock and take a ref on the real_dev. Since dev_hold() and dev_put() can take NULL these days, we can skip checking if real_dev exist. [1] https://lore.kernel.org/netdev/27565.1642742439@famine/ Reported-by: syzbot+92beb3d46aab498710fa@syzkaller.appspotmail.com Fixes: aa6034678e87 ("bonding: use rcu_dereference_rtnl when get bonding active slave") Suggested-by: Vladimir Oltean Suggested-by: Jakub Kicinski Signed-off-by: Hangbin Liu Reviewed-by: Vladimir Oltean Link: https://lore.kernel.org/r/20220519020148.1058344-1-liuhangbin@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/bonding/bond_main.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 38e152548126..b5c5196e03ee 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5591,16 +5591,23 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev, const struct ethtool_ops *ops; struct net_device *real_dev; struct phy_device *phydev; + int ret = 0; + rcu_read_lock(); real_dev = bond_option_active_slave_get_rcu(bond); + dev_hold(real_dev); + rcu_read_unlock(); + if (real_dev) { ops = real_dev->ethtool_ops; phydev = real_dev->phydev; if (phy_has_tsinfo(phydev)) { - return phy_ts_info(phydev, info); + ret = phy_ts_info(phydev, info); + goto out; } else if (ops->get_ts_info) { - return ops->get_ts_info(real_dev, info); + ret = ops->get_ts_info(real_dev, info); + goto out; } } @@ -5608,7 +5615,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev, SOF_TIMESTAMPING_SOFTWARE; info->phc_index = -1; - return 0; +out: + dev_put(real_dev); + return ret; } static const struct ethtool_ops bond_ethtool_ops = { -- cgit v1.2.3 From 16ea52c44e7a56c983a2a49e218af5a9dbbd8965 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 19 May 2022 18:25:55 -0700 Subject: eth: mtk_ppe: fix up after merge I missed this in the barrage of GCC 12 warnings. Commit cf2df74e202d ("net: fix dev_fill_forward_path with pppoe + bridge") changed the pointer into an array. Fixes: d7e6f5836038 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net") Link: https://lore.kernel.org/r/20220520012555.2262461-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 1fe31058b0f2..c9353071f96a 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -90,10 +90,11 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i { struct net_device_path_ctx ctx = { .dev = dev, - .daddr = addr, }; struct net_device_path path = {}; + memcpy(ctx.daddr, addr, sizeof(ctx.daddr)); + if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) return -1; -- cgit v1.2.3 From c15f950d149564989aa5a134b42e83e57511e628 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 19 May 2022 10:12:11 -0500 Subject: net: ipa: drop an unneeded transaction reference In gsi_channel_update(), a reference count is taken on the last completed transaction "to keep it from completing" before we give the event back to the hardware. Completion processing for that transaction (and any other "new" ones) will not occur until after this function returns, so there's no risk it completing early. So there's no need to take and drop the additional transaction reference. Use local variables in the call to gsi_evt_ring_doorbell(). Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index bc6e04bd00e1..1512a569900f 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1492,12 +1492,8 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) if (index == ring->index % ring->count) return NULL; - /* Get the transaction for the latest completed event. Take a - * reference to keep it from completing before we give the events - * for this and previous transactions back to the hardware. - */ + /* Get the transaction for the latest completed event. */ trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1)); - refcount_inc(&trans->refcount); /* For RX channels, update each completed transaction with the number * of bytes that were actually received. For TX channels, report @@ -1512,9 +1508,7 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) gsi_trans_move_complete(trans); /* Tell the hardware we've handled these events */ - gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); - - gsi_trans_free(trans); + gsi_evt_ring_doorbell(gsi, evt_ring_id, index); return gsi_channel_trans_complete(channel); } -- cgit v1.2.3 From c9d92cf28c0cda6e3bd76b75e6e343b86075cd2d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 19 May 2022 10:12:12 -0500 Subject: net: ipa: rename a GSI error code The CHANNEL_NOT_RUNNING error condition has been generalized, so rename it to be INCORRECT_CHANNEL_STATE. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi.c | 6 +++--- drivers/net/ipa/gsi_reg.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 1512a569900f..9cfe84319ee4 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1179,15 +1179,15 @@ static void gsi_isr_gp_int1(struct gsi *gsi) * Similarly, we could get an error back when updating flow control * on a channel because it's not in the proper state. * - * In either case, we silently ignore a CHANNEL_NOT_RUNNING error - * if we receive it. + * In either case, we silently ignore a INCORRECT_CHANNEL_STATE + * error if we receive it. */ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); switch (result) { case GENERIC_EE_SUCCESS: - case GENERIC_EE_CHANNEL_NOT_RUNNING: + case GENERIC_EE_INCORRECT_CHANNEL_STATE: gsi->result = 0; break; diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index 8906f4381032..5bd8b31656d3 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -515,7 +515,7 @@ enum gsi_err_type { /** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */ enum gsi_generic_ee_result { GENERIC_EE_SUCCESS = 0x1, - GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2, + GENERIC_EE_INCORRECT_CHANNEL_STATE = 0x2, GENERIC_EE_INCORRECT_DIRECTION = 0x3, GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4, GENERIC_EE_INCORRECT_CHANNEL = 0x5, -- cgit v1.2.3 From 332ef7c814bdd60f08d0d9013d0e1104798b2d23 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 19 May 2022 10:12:13 -0500 Subject: net: ipa: ignore endianness if there is no header If we program an RX endpoint to have no header (header length is 0), header-related endpoint configuration values are meaningless and are ignored. The only case we support that defines a header is QMAP endpoints. In ipa_endpoint_init_hdr_ext() we set the endianness mask value unconditionally, but it should not be done if there is no header (meaning it is not configured for QMAP). Set the endianness conditionally, and rearrange the logic in that function slightly to avoid testing the qmap flag twice. Delete an incorrect comment in ipa_endpoint_init_aggr(). Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 53764f3c0c7e..55322800ba58 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -587,19 +587,23 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) struct ipa *ipa = endpoint->ipa; u32 val = 0; - val |= HDR_ENDIANNESS_FMASK; /* big endian */ - - /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet - * driver assumes this field is meaningful in packets it receives, - * and assumes the header's payload length includes that padding. - * The RMNet driver does *not* pad packets it sends, however, so - * the pad field (although 0) should be ignored. - */ - if (endpoint->data->qmap && !endpoint->toward_ipa) { - val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; - /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ - val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; - /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ + if (endpoint->data->qmap) { + /* We have a header, so we must specify its endianness */ + val |= HDR_ENDIANNESS_FMASK; /* big endian */ + + /* A QMAP header contains a 6 bit pad field at offset 0. + * The RMNet driver assumes this field is meaningful in + * packets it receives, and assumes the header's payload + * length includes that padding. The RMNet driver does + * *not* pad packets it sends, however, so the pad field + * (although 0) should be ignored. + */ + if (!endpoint->toward_ipa) { + val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; + /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ + val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; + /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ + } } /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ @@ -759,8 +763,6 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) close_eof = rx_data->aggr_close_eof; val |= aggr_sw_eof_active_encoded(version, close_eof); - - /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ } else { val |= u32_encode_bits(IPA_ENABLE_DEAGGR, AGGR_EN_FMASK); -- cgit v1.2.3 From 75944b040bbcce0fc1d1432787efe8a96b51b595 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 19 May 2022 10:12:14 -0500 Subject: net: ipa: open-code ether_setup() About half of the fields set by the call in ipa_modem_netdev_setup() are overwritten after the call. Instead, just skip the call, and open-code the (other) assignments it makes to the net_device structure fields. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_modem.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index 27d87097433f..dd6464ced254 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include @@ -203,15 +205,20 @@ static const struct net_device_ops ipa_modem_ops = { static void ipa_modem_netdev_setup(struct net_device *netdev) { netdev->netdev_ops = &ipa_modem_ops; - ether_setup(netdev); - /* No header ops (override value set by ether_setup()) */ + netdev->header_ops = NULL; netdev->type = ARPHRD_RAWIP; netdev->hard_header_len = 0; + netdev->min_header_len = ETH_HLEN; + netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = IPA_MTU; netdev->mtu = netdev->max_mtu; netdev->addr_len = 0; + netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + netdev->priv_flags |= IFF_TX_SKB_SHARING; + eth_broadcast_addr(netdev->broadcast); + /* The endpoint is configured for QMAP */ netdev->needed_headroom = sizeof(struct rmnet_map_header); netdev->needed_tailroom = IPA_NETDEV_TAILROOM; -- cgit v1.2.3 From f0488c540e8a36e515c744a531f27a6e994888d6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 19 May 2022 10:12:15 -0500 Subject: net: ipa: move endpoint configuration data definitions Move the definitions of the structures defining endpoint-specific configuration data out of "ipa_data.h" and into "ipa_endpoint.h". This is a trivial movement of code without any other change, to prepare for the next few patches. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_data.h | 62 ------------------------------------------ drivers/net/ipa/ipa_endpoint.h | 62 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 62 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index dbbeecf6df29..d611b5e96497 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -95,68 +95,6 @@ struct gsi_channel_data { u8 tlv_count; }; -/** - * struct ipa_endpoint_tx_data - configuration data for TX endpoints - * @seq_type: primary packet processing sequencer type - * @seq_rep_type: sequencer type for replication processing - * @status_endpoint: endpoint to which status elements are sent - * - * The @status_endpoint is only valid if the endpoint's @status_enable - * flag is set. - */ -struct ipa_endpoint_tx_data { - enum ipa_seq_type seq_type; - enum ipa_seq_rep_type seq_rep_type; - enum ipa_endpoint_name status_endpoint; -}; - -/** - * struct ipa_endpoint_rx_data - configuration data for RX endpoints - * @buffer_size: requested receive buffer size (bytes) - * @pad_align: power-of-2 boundary to which packet payload is aligned - * @aggr_close_eof: whether aggregation closes on end-of-frame - * - * With each packet it transfers, the IPA hardware can perform certain - * transformations of its packet data. One of these is adding pad bytes - * to the end of the packet data so the result ends on a power-of-2 boundary. - * - * It is also able to aggregate multiple packets into a single receive buffer. - * Aggregation is "open" while a buffer is being filled, and "closes" when - * certain criteria are met. One of those criteria is the sender indicating - * a "frame" consisting of several transfers has ended. - */ -struct ipa_endpoint_rx_data { - u32 buffer_size; - u32 pad_align; - bool aggr_close_eof; -}; - -/** - * struct ipa_endpoint_config_data - IPA endpoint hardware configuration - * @resource_group: resource group to assign endpoint to - * @checksum: whether checksum offload is enabled - * @qmap: whether endpoint uses QMAP protocol - * @aggregation: whether endpoint supports aggregation - * @status_enable: whether endpoint uses status elements - * @dma_mode: whether endpoint operates in DMA mode - * @dma_endpoint: peer endpoint, if operating in DMA mode - * @tx: TX-specific endpoint information (see above) - * @rx: RX-specific endpoint information (see above) - */ -struct ipa_endpoint_config_data { - u32 resource_group; - bool checksum; - bool qmap; - bool aggregation; - bool status_enable; - bool dma_mode; - enum ipa_endpoint_name dma_endpoint; - union { - struct ipa_endpoint_tx_data tx; - struct ipa_endpoint_rx_data rx; - }; -}; - /** * struct ipa_endpoint_data - IPA endpoint configuration data * @filter_support: whether endpoint supports filtering diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 12fd5b16c18e..85fe15b5d983 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -40,6 +40,68 @@ enum ipa_endpoint_name { #define IPA_ENDPOINT_MAX 32 /* Max supported by driver */ +/** + * struct ipa_endpoint_tx_data - configuration data for TX endpoints + * @seq_type: primary packet processing sequencer type + * @seq_rep_type: sequencer type for replication processing + * @status_endpoint: endpoint to which status elements are sent + * + * The @status_endpoint is only valid if the endpoint's @status_enable + * flag is set. + */ +struct ipa_endpoint_tx_data { + enum ipa_seq_type seq_type; + enum ipa_seq_rep_type seq_rep_type; + enum ipa_endpoint_name status_endpoint; +}; + +/** + * struct ipa_endpoint_rx_data - configuration data for RX endpoints + * @buffer_size: requested receive buffer size (bytes) + * @pad_align: power-of-2 boundary to which packet payload is aligned + * @aggr_close_eof: whether aggregation closes on end-of-frame + * + * With each packet it transfers, the IPA hardware can perform certain + * transformations of its packet data. One of these is adding pad bytes + * to the end of the packet data so the result ends on a power-of-2 boundary. + * + * It is also able to aggregate multiple packets into a single receive buffer. + * Aggregation is "open" while a buffer is being filled, and "closes" when + * certain criteria are met. One of those criteria is the sender indicating + * a "frame" consisting of several transfers has ended. + */ +struct ipa_endpoint_rx_data { + u32 buffer_size; + u32 pad_align; + bool aggr_close_eof; +}; + +/** + * struct ipa_endpoint_config_data - IPA endpoint hardware configuration + * @resource_group: resource group to assign endpoint to + * @checksum: whether checksum offload is enabled + * @qmap: whether endpoint uses QMAP protocol + * @aggregation: whether endpoint supports aggregation + * @status_enable: whether endpoint uses status elements + * @dma_mode: whether endpoint operates in DMA mode + * @dma_endpoint: peer endpoint, if operating in DMA mode + * @tx: TX-specific endpoint information (see above) + * @rx: RX-specific endpoint information (see above) + */ +struct ipa_endpoint_config_data { + u32 resource_group; + bool checksum; + bool qmap; + bool aggregation; + bool status_enable; + bool dma_mode; + enum ipa_endpoint_name dma_endpoint; + union { + struct ipa_endpoint_tx_data tx; + struct ipa_endpoint_rx_data rx; + }; +}; + /** * enum ipa_replenish_flag: RX buffer replenish flags * -- cgit v1.2.3 From cf4e73a1667e620ef703ff1f86ae96471ffa80f6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 19 May 2022 10:12:16 -0500 Subject: net: ipa: rename a few endpoint config data types Rename the just-moved data structure types to drop the "_data" suffix, to make it more obvious they are no longer meant to be used just as read-only initialization data. Rename the fields and variables of these types to use "config" instead of "data" in the name. This is another small step meant to facilitate review. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_data.h | 8 +++--- drivers/net/ipa/ipa_endpoint.c | 58 +++++++++++++++++++++--------------------- drivers/net/ipa/ipa_endpoint.h | 26 +++++++++---------- drivers/net/ipa/ipa_modem.c | 2 +- 4 files changed, 47 insertions(+), 47 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index d611b5e96497..e15eb3cd3e33 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -98,7 +98,7 @@ struct gsi_channel_data { /** * struct ipa_endpoint_data - IPA endpoint configuration data * @filter_support: whether endpoint supports filtering - * @config: hardware configuration (see above) + * @config: hardware configuration * * Not all endpoints support the IPA filtering capability. A filter table * defines the filters to apply for those endpoints that support it. The @@ -106,12 +106,12 @@ struct gsi_channel_data { * for non-AP endpoints. For this reason we define *all* endpoints used * in the system, and indicate whether they support filtering. * - * The remaining endpoint configuration data applies only to AP endpoints. + * The remaining endpoint configuration data specifies default hardware + * configuration values that apply only to AP endpoints. */ struct ipa_endpoint_data { bool filter_support; - /* Everything else is specified only for AP endpoints */ - struct ipa_endpoint_config_data config; + struct ipa_endpoint_config config; }; /** diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 55322800ba58..361f9e17e869 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -333,7 +333,7 @@ static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) { struct ipa *ipa = endpoint->ipa; - if (!endpoint->data->aggregation) + if (!endpoint->config->aggregation) return; /* Nothing to do if the endpoint doesn't have aggregation open */ @@ -453,7 +453,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) u32 val = 0; /* FRAG_OFFLOAD_EN is 0 */ - if (endpoint->data->checksum) { + if (endpoint->config->checksum) { enum ipa_version version = endpoint->ipa->version; if (endpoint->toward_ipa) { @@ -502,7 +502,7 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) u32 header_size = sizeof(struct rmnet_map_header); /* Without checksum offload, we just have the MAP header */ - if (!endpoint->data->checksum) + if (!endpoint->config->checksum) return header_size; if (version < IPA_VERSION_4_5) { @@ -544,7 +544,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) struct ipa *ipa = endpoint->ipa; u32 val = 0; - if (endpoint->data->qmap) { + if (endpoint->config->qmap) { enum ipa_version version = ipa->version; size_t header_size; @@ -583,11 +583,11 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); - u32 pad_align = endpoint->data->rx.pad_align; + u32 pad_align = endpoint->config->rx.pad_align; struct ipa *ipa = endpoint->ipa; u32 val = 0; - if (endpoint->data->qmap) { + if (endpoint->config->qmap) { /* We have a header, so we must specify its endianness */ val |= HDR_ENDIANNESS_FMASK; /* big endian */ @@ -615,7 +615,7 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) */ if (ipa->version >= IPA_VERSION_4_5) { /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ - if (endpoint->data->qmap && !endpoint->toward_ipa) { + if (endpoint->config->qmap && !endpoint->toward_ipa) { u32 offset; offset = offsetof(struct rmnet_map_header, pkt_len); @@ -640,7 +640,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); /* Note that HDR_ENDIANNESS indicates big endian header fields */ - if (endpoint->data->qmap) + if (endpoint->config->qmap) val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); iowrite32(val, endpoint->ipa->reg_virt + offset); @@ -654,8 +654,8 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) if (!endpoint->toward_ipa) return; /* Register not valid for RX endpoints */ - if (endpoint->data->dma_mode) { - enum ipa_endpoint_name name = endpoint->data->dma_endpoint; + if (endpoint->config->dma_mode) { + enum ipa_endpoint_name name = endpoint->config->dma_endpoint; u32 dma_endpoint_id; dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; @@ -741,18 +741,18 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) enum ipa_version version = endpoint->ipa->version; u32 val = 0; - if (endpoint->data->aggregation) { + if (endpoint->config->aggregation) { if (!endpoint->toward_ipa) { - const struct ipa_endpoint_rx_data *rx_data; + const struct ipa_endpoint_rx *rx_config; u32 buffer_size; bool close_eof; u32 limit; - rx_data = &endpoint->data->rx; + rx_config = &endpoint->config->rx; val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); - buffer_size = rx_data->buffer_size; + buffer_size = rx_config->buffer_size; limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD); val |= aggr_byte_limit_encoded(version, limit); @@ -761,7 +761,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) /* AGGR_PKT_LIMIT is 0 (unlimited) */ - close_eof = rx_data->aggr_close_eof; + close_eof = rx_config->aggr_close_eof; val |= aggr_sw_eof_active_encoded(version, close_eof); } else { val |= u32_encode_bits(IPA_ENABLE_DEAGGR, @@ -947,7 +947,7 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) struct ipa *ipa = endpoint->ipa; u32 val; - val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); + val = rsrc_grp_encoded(ipa->version, endpoint->config->resource_group); iowrite32(val, ipa->reg_virt + offset); } @@ -960,10 +960,10 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) return; /* Register not valid for RX endpoints */ /* Low-order byte configures primary packet processing */ - val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK); + val |= u32_encode_bits(endpoint->config->tx.seq_type, SEQ_TYPE_FMASK); /* Second byte configures replicated packet processing */ - val |= u32_encode_bits(endpoint->data->tx.seq_rep_type, + val |= u32_encode_bits(endpoint->config->tx.seq_rep_type, SEQ_REP_TYPE_FMASK); iowrite32(val, endpoint->ipa->reg_virt + offset); @@ -1021,13 +1021,13 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint) offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); - if (endpoint->data->status_enable) { + if (endpoint->config->status_enable) { val |= STATUS_EN_FMASK; if (endpoint->toward_ipa) { enum ipa_endpoint_name name; u32 status_endpoint_id; - name = endpoint->data->tx.status_endpoint; + name = endpoint->config->tx.status_endpoint; status_endpoint_id = ipa->name_map[name]->endpoint_id; val |= u32_encode_bits(status_endpoint_id, @@ -1051,7 +1051,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, u32 len; int ret; - buffer_size = endpoint->data->rx.buffer_size; + buffer_size = endpoint->config->rx.buffer_size; page = dev_alloc_pages(get_order(buffer_size)); if (!page) return -ENOMEM; @@ -1168,7 +1168,7 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, struct page *page, u32 len) { - u32 buffer_size = endpoint->data->rx.buffer_size; + u32 buffer_size = endpoint->config->rx.buffer_size; struct sk_buff *skb; /* Nothing to do if there's no netdev */ @@ -1275,7 +1275,7 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, struct page *page, u32 total_len) { - u32 buffer_size = endpoint->data->rx.buffer_size; + u32 buffer_size = endpoint->config->rx.buffer_size; void *data = page_address(page) + NET_SKB_PAD; u32 unused = buffer_size - total_len; u32 resid = total_len; @@ -1305,10 +1305,10 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, * And if checksum offload is enabled a trailer containing * computed checksum information will be appended. */ - align = endpoint->data->rx.pad_align ? : 1; + align = endpoint->config->rx.pad_align ? : 1; len = le16_to_cpu(status->pkt_len); len = sizeof(*status) + ALIGN(len, align); - if (endpoint->data->checksum) + if (endpoint->config->checksum) len += sizeof(struct rmnet_map_dl_csum_trailer); if (!ipa_endpoint_status_drop(endpoint, status)) { @@ -1352,7 +1352,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, /* Parse or build a socket buffer using the actual received length */ page = trans->data; - if (endpoint->data->status_enable) + if (endpoint->config->status_enable) ipa_endpoint_status_parse(endpoint, page, trans->len); else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) trans->data = NULL; /* Pages have been consumed */ @@ -1386,7 +1386,7 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, struct page *page = trans->data; if (page) { - u32 buffer_size = endpoint->data->rx.buffer_size; + u32 buffer_size = endpoint->config->rx.buffer_size; __free_pages(page, get_order(buffer_size)); } @@ -1520,7 +1520,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) * All other cases just need to reset the underlying GSI channel. */ special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && - endpoint->data->aggregation; + endpoint->config->aggregation; if (special && ipa_endpoint_aggr_active(endpoint)) ret = ipa_endpoint_reset_rx_aggr(endpoint); else @@ -1835,7 +1835,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, endpoint->channel_id = data->channel_id; endpoint->endpoint_id = data->endpoint_id; endpoint->toward_ipa = data->toward_ipa; - endpoint->data = &data->endpoint.config; + endpoint->config = &data->endpoint.config; ipa->initialized |= BIT(endpoint->endpoint_id); } diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 85fe15b5d983..e8d1300a6002 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -41,7 +41,7 @@ enum ipa_endpoint_name { #define IPA_ENDPOINT_MAX 32 /* Max supported by driver */ /** - * struct ipa_endpoint_tx_data - configuration data for TX endpoints + * struct ipa_endpoint_tx - Endpoint configuration for TX endpoints * @seq_type: primary packet processing sequencer type * @seq_rep_type: sequencer type for replication processing * @status_endpoint: endpoint to which status elements are sent @@ -49,17 +49,17 @@ enum ipa_endpoint_name { * The @status_endpoint is only valid if the endpoint's @status_enable * flag is set. */ -struct ipa_endpoint_tx_data { +struct ipa_endpoint_tx { enum ipa_seq_type seq_type; enum ipa_seq_rep_type seq_rep_type; enum ipa_endpoint_name status_endpoint; }; /** - * struct ipa_endpoint_rx_data - configuration data for RX endpoints - * @buffer_size: requested receive buffer size (bytes) - * @pad_align: power-of-2 boundary to which packet payload is aligned - * @aggr_close_eof: whether aggregation closes on end-of-frame + * struct ipa_endpoint_rx - Endpoint configuration for RX endpoints + * @buffer_size: requested receive buffer size (bytes) + * @pad_align: power-of-2 boundary to which packet payload is aligned + * @aggr_close_eof: whether aggregation closes on end-of-frame * * With each packet it transfers, the IPA hardware can perform certain * transformations of its packet data. One of these is adding pad bytes @@ -70,14 +70,14 @@ struct ipa_endpoint_tx_data { * certain criteria are met. One of those criteria is the sender indicating * a "frame" consisting of several transfers has ended. */ -struct ipa_endpoint_rx_data { +struct ipa_endpoint_rx { u32 buffer_size; u32 pad_align; bool aggr_close_eof; }; /** - * struct ipa_endpoint_config_data - IPA endpoint hardware configuration + * struct ipa_endpoint_config - IPA endpoint hardware configuration * @resource_group: resource group to assign endpoint to * @checksum: whether checksum offload is enabled * @qmap: whether endpoint uses QMAP protocol @@ -88,7 +88,7 @@ struct ipa_endpoint_rx_data { * @tx: TX-specific endpoint information (see above) * @rx: RX-specific endpoint information (see above) */ -struct ipa_endpoint_config_data { +struct ipa_endpoint_config { u32 resource_group; bool checksum; bool qmap; @@ -97,8 +97,8 @@ struct ipa_endpoint_config_data { bool dma_mode; enum ipa_endpoint_name dma_endpoint; union { - struct ipa_endpoint_tx_data tx; - struct ipa_endpoint_rx_data rx; + struct ipa_endpoint_tx tx; + struct ipa_endpoint_rx rx; }; }; @@ -122,7 +122,7 @@ enum ipa_replenish_flag { * @channel_id: GSI channel used by the endpoint * @endpoint_id: IPA endpoint number * @toward_ipa: Endpoint direction (true = TX, false = RX) - * @data: Endpoint configuration data + * @config: Default endpoint configuration * @trans_tre_max: Maximum number of TRE descriptors per transaction * @evt_ring_id: GSI event ring used by the endpoint * @netdev: Network device pointer, if endpoint uses one @@ -136,7 +136,7 @@ struct ipa_endpoint { u32 channel_id; u32 endpoint_id; bool toward_ipa; - const struct ipa_endpoint_config_data *data; + const struct ipa_endpoint_config *config; u32 trans_tre_max; u32 evt_ring_id; diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index dd6464ced254..7975e324690b 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -129,7 +129,7 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) goto err_drop_skb; endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]; - if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP)) + if (endpoint->config->qmap && skb->protocol != htons(ETH_P_MAP)) goto err_drop_skb; /* The hardware must be powered for us to transmit */ -- cgit v1.2.3 From 660e52d651ab7faa20d1ba08ae90a306b023e395 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 19 May 2022 10:12:17 -0500 Subject: net: ipa: save a copy of endpoint default config All elements of the default endpoint configuration are used in the code when programming an endpoint for use. But none of the other configuration data is ever needed once things are initialized. So rather than saving a pointer to *all* of the configuration data, save a copy of only the endpoint configuration portion. This will eventually allow endpoint configuration to be modifiable at runtime. But even before that it means we won't keep a pointer to configuration data after when no longer needed. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 52 +++++++++++++++++++++--------------------- drivers/net/ipa/ipa_endpoint.h | 2 +- drivers/net/ipa/ipa_modem.c | 2 +- 3 files changed, 28 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 361f9e17e869..0f489723689c 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -333,7 +333,7 @@ static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) { struct ipa *ipa = endpoint->ipa; - if (!endpoint->config->aggregation) + if (!endpoint->config.aggregation) return; /* Nothing to do if the endpoint doesn't have aggregation open */ @@ -453,7 +453,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) u32 val = 0; /* FRAG_OFFLOAD_EN is 0 */ - if (endpoint->config->checksum) { + if (endpoint->config.checksum) { enum ipa_version version = endpoint->ipa->version; if (endpoint->toward_ipa) { @@ -502,7 +502,7 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) u32 header_size = sizeof(struct rmnet_map_header); /* Without checksum offload, we just have the MAP header */ - if (!endpoint->config->checksum) + if (!endpoint->config.checksum) return header_size; if (version < IPA_VERSION_4_5) { @@ -544,7 +544,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) struct ipa *ipa = endpoint->ipa; u32 val = 0; - if (endpoint->config->qmap) { + if (endpoint->config.qmap) { enum ipa_version version = ipa->version; size_t header_size; @@ -583,11 +583,11 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); - u32 pad_align = endpoint->config->rx.pad_align; + u32 pad_align = endpoint->config.rx.pad_align; struct ipa *ipa = endpoint->ipa; u32 val = 0; - if (endpoint->config->qmap) { + if (endpoint->config.qmap) { /* We have a header, so we must specify its endianness */ val |= HDR_ENDIANNESS_FMASK; /* big endian */ @@ -615,7 +615,7 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) */ if (ipa->version >= IPA_VERSION_4_5) { /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ - if (endpoint->config->qmap && !endpoint->toward_ipa) { + if (endpoint->config.qmap && !endpoint->toward_ipa) { u32 offset; offset = offsetof(struct rmnet_map_header, pkt_len); @@ -640,7 +640,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); /* Note that HDR_ENDIANNESS indicates big endian header fields */ - if (endpoint->config->qmap) + if (endpoint->config.qmap) val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); iowrite32(val, endpoint->ipa->reg_virt + offset); @@ -654,8 +654,8 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) if (!endpoint->toward_ipa) return; /* Register not valid for RX endpoints */ - if (endpoint->config->dma_mode) { - enum ipa_endpoint_name name = endpoint->config->dma_endpoint; + if (endpoint->config.dma_mode) { + enum ipa_endpoint_name name = endpoint->config.dma_endpoint; u32 dma_endpoint_id; dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; @@ -741,14 +741,14 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) enum ipa_version version = endpoint->ipa->version; u32 val = 0; - if (endpoint->config->aggregation) { + if (endpoint->config.aggregation) { if (!endpoint->toward_ipa) { const struct ipa_endpoint_rx *rx_config; u32 buffer_size; bool close_eof; u32 limit; - rx_config = &endpoint->config->rx; + rx_config = &endpoint->config.rx; val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); @@ -947,7 +947,7 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) struct ipa *ipa = endpoint->ipa; u32 val; - val = rsrc_grp_encoded(ipa->version, endpoint->config->resource_group); + val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group); iowrite32(val, ipa->reg_virt + offset); } @@ -960,10 +960,10 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) return; /* Register not valid for RX endpoints */ /* Low-order byte configures primary packet processing */ - val |= u32_encode_bits(endpoint->config->tx.seq_type, SEQ_TYPE_FMASK); + val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK); /* Second byte configures replicated packet processing */ - val |= u32_encode_bits(endpoint->config->tx.seq_rep_type, + val |= u32_encode_bits(endpoint->config.tx.seq_rep_type, SEQ_REP_TYPE_FMASK); iowrite32(val, endpoint->ipa->reg_virt + offset); @@ -1021,13 +1021,13 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint) offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); - if (endpoint->config->status_enable) { + if (endpoint->config.status_enable) { val |= STATUS_EN_FMASK; if (endpoint->toward_ipa) { enum ipa_endpoint_name name; u32 status_endpoint_id; - name = endpoint->config->tx.status_endpoint; + name = endpoint->config.tx.status_endpoint; status_endpoint_id = ipa->name_map[name]->endpoint_id; val |= u32_encode_bits(status_endpoint_id, @@ -1051,7 +1051,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, u32 len; int ret; - buffer_size = endpoint->config->rx.buffer_size; + buffer_size = endpoint->config.rx.buffer_size; page = dev_alloc_pages(get_order(buffer_size)); if (!page) return -ENOMEM; @@ -1168,7 +1168,7 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, struct page *page, u32 len) { - u32 buffer_size = endpoint->config->rx.buffer_size; + u32 buffer_size = endpoint->config.rx.buffer_size; struct sk_buff *skb; /* Nothing to do if there's no netdev */ @@ -1275,7 +1275,7 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, struct page *page, u32 total_len) { - u32 buffer_size = endpoint->config->rx.buffer_size; + u32 buffer_size = endpoint->config.rx.buffer_size; void *data = page_address(page) + NET_SKB_PAD; u32 unused = buffer_size - total_len; u32 resid = total_len; @@ -1305,10 +1305,10 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, * And if checksum offload is enabled a trailer containing * computed checksum information will be appended. */ - align = endpoint->config->rx.pad_align ? : 1; + align = endpoint->config.rx.pad_align ? : 1; len = le16_to_cpu(status->pkt_len); len = sizeof(*status) + ALIGN(len, align); - if (endpoint->config->checksum) + if (endpoint->config.checksum) len += sizeof(struct rmnet_map_dl_csum_trailer); if (!ipa_endpoint_status_drop(endpoint, status)) { @@ -1352,7 +1352,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, /* Parse or build a socket buffer using the actual received length */ page = trans->data; - if (endpoint->config->status_enable) + if (endpoint->config.status_enable) ipa_endpoint_status_parse(endpoint, page, trans->len); else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) trans->data = NULL; /* Pages have been consumed */ @@ -1386,7 +1386,7 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, struct page *page = trans->data; if (page) { - u32 buffer_size = endpoint->config->rx.buffer_size; + u32 buffer_size = endpoint->config.rx.buffer_size; __free_pages(page, get_order(buffer_size)); } @@ -1520,7 +1520,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) * All other cases just need to reset the underlying GSI channel. */ special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && - endpoint->config->aggregation; + endpoint->config.aggregation; if (special && ipa_endpoint_aggr_active(endpoint)) ret = ipa_endpoint_reset_rx_aggr(endpoint); else @@ -1835,7 +1835,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, endpoint->channel_id = data->channel_id; endpoint->endpoint_id = data->endpoint_id; endpoint->toward_ipa = data->toward_ipa; - endpoint->config = &data->endpoint.config; + endpoint->config = data->endpoint.config; ipa->initialized |= BIT(endpoint->endpoint_id); } diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index e8d1300a6002..39a12c249f66 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -136,7 +136,7 @@ struct ipa_endpoint { u32 channel_id; u32 endpoint_id; bool toward_ipa; - const struct ipa_endpoint_config *config; + struct ipa_endpoint_config config; u32 trans_tre_max; u32 evt_ring_id; diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index 7975e324690b..c8b1c4d9c507 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -129,7 +129,7 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) goto err_drop_skb; endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]; - if (endpoint->config->qmap && skb->protocol != htons(ETH_P_MAP)) + if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP)) goto err_drop_skb; /* The hardware must be powered for us to transmit */ -- cgit v1.2.3 From cc398a34d16fd90a2dcc59b1105c634f038ea53b Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Thu, 19 May 2022 08:17:15 +0100 Subject: sfc/siena: Remove duplicate check on segments Siena only supports software TSO. This means more code can be deleted, as pointed out by the Smatch static checker warning: drivers/net/ethernet/sfc/siena/tx.c:184 __efx_siena_enqueue_skb() warn: duplicate check 'segments' (previous on line 158) Fixes: 956f2d86cb37 ("sfc/siena: Remove build references to missing functionality") Reported-by: Dan Carpenter Link: https://lore.kernel.org/kernel-janitors/YoH5tJMnwuGTrn1Z@kili/ Signed-off-by: Martin Habets Link: https://lore.kernel.org/r/165294463549.23865.4557617334650441347.stgit@palantir17.mph.net Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/siena/tx.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c index b84b9e348c13..e166dcb9b99c 100644 --- a/drivers/net/ethernet/sfc/siena/tx.c +++ b/drivers/net/ethernet/sfc/siena/tx.c @@ -181,14 +181,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) efx_tx_send_pending(tx_queue->channel); - if (segments) { - tx_queue->tso_bursts++; - tx_queue->tso_packets += segments; - tx_queue->tx_packets += segments; - } else { - tx_queue->tx_packets++; - } - + tx_queue->tx_packets++; return NETDEV_TX_OK; -- cgit v1.2.3 From fe5c5fc145edcf98a759b895f52b646730eeb7be Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 18 May 2022 17:43:05 -0700 Subject: net: stmmac: fix out-of-bounds access in a selftest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 12 points out that struct tc_action is smaller than struct tcf_action: drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c: In function ‘stmmac_test_rxp’: drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c:1132:21: warning: array subscript ‘struct tcf_gact[0]’ is partly outside array bounds of ‘unsigned char[272]’ [-Warray-bounds] 1132 | gact->tcf_action = TC_ACT_SHOT; | ^~ Fixes: ccfc639a94f2 ("net: stmmac: selftests: Add a selftest for Flexible RX Parser") Link: https://lore.kernel.org/r/20220519004305.2109708-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 9f1759593b94..2fc51dc5eb0b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -1084,8 +1084,9 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00}; struct tc_cls_u32_offload cls_u32 = { }; struct stmmac_packet_attrs attr = { }; - struct tc_action **actions, *act; + struct tc_action **actions; struct tc_u32_sel *sel; + struct tcf_gact *gact; struct tcf_exts *exts; int ret, i, nk = 1; @@ -1110,8 +1111,8 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) goto cleanup_exts; } - act = kcalloc(nk, sizeof(*act), GFP_KERNEL); - if (!act) { + gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL); + if (!gact) { ret = -ENOMEM; goto cleanup_actions; } @@ -1126,9 +1127,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) exts->nr_actions = nk; exts->actions = actions; for (i = 0; i < nk; i++) { - struct tcf_gact *gact = to_gact(&act[i]); - - actions[i] = &act[i]; + actions[i] = (struct tc_action *)&gact[i]; gact->tcf_action = TC_ACT_SHOT; } @@ -1152,7 +1151,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) stmmac_tc_setup_cls_u32(priv, priv, &cls_u32); cleanup_act: - kfree(act); + kfree(gact); cleanup_actions: kfree(actions); cleanup_exts: -- cgit v1.2.3 From 937956ba404e70a765ca5aa39d3d7564d86a8872 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Thu, 19 May 2022 03:15:54 +0000 Subject: amt: fix gateway mode stuck If a gateway can not receive any response to requests from a relay, gateway resets status from SENT_REQUEST to INIT and variable about a relay as well. And then it should start the full establish step from sending a discovery message and receiving advertisement message. But, after failure in amt_req_work() it continues sending a request message step with flushed(invalid) relay information and sets SENT_REQUEST. So, a gateway can't be established with a relay. In order to avoid this situation, it stops sending the request message step if it fails. Fixes: cbc21dc1cfe9 ("amt: add data plane of amt interface") Signed-off-by: Taehee Yoo Signed-off-by: Jakub Kicinski --- drivers/net/amt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 10455c9b9da0..2b4ce3869f08 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -943,7 +943,7 @@ static void amt_req_work(struct work_struct *work) if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT) goto out; - if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) { + if (amt->req_cnt > AMT_MAX_REQ_COUNT) { netdev_dbg(amt->dev, "Gateway is not ready"); amt->qi = AMT_INIT_REQ_TIMEOUT; amt->ready4 = false; @@ -951,13 +951,15 @@ static void amt_req_work(struct work_struct *work) amt->remote_ip = 0; __amt_update_gw_status(amt, AMT_STATUS_INIT, false); amt->req_cnt = 0; + goto out; } spin_unlock_bh(&amt->lock); amt_send_request(amt, false); amt_send_request(amt, true); - amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); spin_lock_bh(&amt->lock); + __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); + amt->req_cnt++; out: exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT); mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000)); -- cgit v1.2.3 From fe29794c3585d039fefebaa2b5a4932a627ad4fd Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Thu, 19 May 2022 03:15:55 +0000 Subject: amt: fix memory leak for advertisement message When a gateway receives an advertisement message, it extracts relay information and then it should be freed. But the advertisement handler doesn't free it. So, memory leak would occur. Fixes: cbc21dc1cfe9 ("amt: add data plane of amt interface") Signed-off-by: Taehee Yoo Signed-off-by: Jakub Kicinski --- drivers/net/amt.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 2b4ce3869f08..de4ea518c793 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -2698,9 +2698,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) err = true; goto drop; } - if (amt_advertisement_handler(amt, skb)) - amt->dev->stats.rx_dropped++; - goto out; + err = amt_advertisement_handler(amt, skb); + break; case AMT_MSG_MULTICAST_DATA: if (iph->saddr != amt->remote_ip) { netdev_dbg(amt->dev, "Invalid Relay IP\n"); -- cgit v1.2.3 From dc2df00af951569689ec39ce0a41b1dff7e3595e Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Fri, 20 May 2022 15:28:21 +0200 Subject: net: tulip: fix build with CONFIG_GSC Fix typo which breaks build for parisc. Fixes: 3daebfbeb455 ("net: tulip: convert to devres") Reported-by: Linux Kernel Functional Testing Link: https://lore.kernel.org/all/CA+G9fYuCzU5VZ_nc+6NEdBXJdVCH=J2SB1Na1G_NS_0BNdGYtg@mail.gmail.com/ Reported-by: kernel test robot Signed-off-by: Rolf Eike Beer Link: https://lore.kernel.org/r/4719560.GXAFRqVoOG@eto.sf-tec.de Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/dec/tulip/eeprom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c index 67a67cb78dbb..d5657ff15e3c 100644 --- a/drivers/net/ethernet/dec/tulip/eeprom.c +++ b/drivers/net/ethernet/dec/tulip/eeprom.c @@ -117,7 +117,7 @@ static void tulip_build_fake_mediatable(struct tulip_private *tp) 0x00, 0x06 /* ttm bit map */ }; - tp->mtable = devm_kmalloc(&tp->pdev->pdev, sizeof(struct mediatable) + + tp->mtable = devm_kmalloc(&tp->pdev->dev, sizeof(struct mediatable) + sizeof(struct medialeaf), GFP_KERNEL); if (tp->mtable == NULL) -- cgit v1.2.3 From 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 19 May 2022 11:21:08 +0800 Subject: net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr() Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock, so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass the flag. Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface") Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Reviewed-by: Loic Poulain Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com Signed-off-by: Jakub Kicinski --- drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c index 0c52801ed0de..6ff30cb8eb16 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c @@ -91,9 +91,9 @@ static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_p } static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, - size_t size) + size_t size, gfp_t gfp_mask) { - req->skb = __dev_alloc_skb(size, GFP_KERNEL); + req->skb = __dev_alloc_skb(size, gfp_mask); if (!req->skb) return -ENOMEM; @@ -174,7 +174,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool spin_unlock_irqrestore(&queue->ring_lock, flags); req = queue->rx_refill; - ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size); + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); if (ret) return ret; @@ -402,7 +402,7 @@ static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, s if (!req->gpd) goto err_free_req; - val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size); + val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); if (val) goto err_free_pool; @@ -801,7 +801,7 @@ static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) if (req->skb) continue; - ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size); + ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); if (ret) break; -- cgit v1.2.3 From dbb2f362c7835660a4e39eadd7481563e2e176b7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 19 May 2022 23:19:55 -0700 Subject: eth: bnxt: make ulp_id unsigned to make GCC 12 happy GCC array bounds checking complains that ulp_id is validated only against upper bound. Make it unsigned. Reviewed-by: Michael Chan Link: https://lore.kernel.org/r/20220520061955.2312968-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 12 ++++++------ drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index fde0c3e8ac57..2e54bf4fc7a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -25,7 +25,7 @@ #include "bnxt_hwrm.h" #include "bnxt_ulp.h" -static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, +static int bnxt_register_dev(struct bnxt_en_dev *edev, unsigned int ulp_id, struct bnxt_ulp_ops *ulp_ops, void *handle) { struct net_device *dev = edev->net; @@ -62,7 +62,7 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, return 0; } -static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) +static int bnxt_unregister_dev(struct bnxt_en_dev *edev, unsigned int ulp_id) { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); @@ -115,7 +115,7 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) } } -static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, +static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id, struct bnxt_msix_entry *ent, int num_msix) { struct net_device *dev = edev->net; @@ -179,7 +179,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, return avail_msix; } -static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) +static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id) { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); @@ -233,7 +233,7 @@ int bnxt_get_ulp_stat_ctxs(struct bnxt *bp) return 0; } -static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, +static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id, struct bnxt_fw_msg *fw_msg) { struct net_device *dev = edev->net; @@ -447,7 +447,7 @@ void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) rcu_read_unlock(); } -static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id, +static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp_id, unsigned long *events_bmap, u16 max_id) { struct net_device *dev = edev->net; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index 54d59f681b86..42b50abc3e91 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -77,15 +77,15 @@ struct bnxt_en_dev { }; struct bnxt_en_ops { - int (*bnxt_register_device)(struct bnxt_en_dev *, int, + int (*bnxt_register_device)(struct bnxt_en_dev *, unsigned int, struct bnxt_ulp_ops *, void *); - int (*bnxt_unregister_device)(struct bnxt_en_dev *, int); - int (*bnxt_request_msix)(struct bnxt_en_dev *, int, + int (*bnxt_unregister_device)(struct bnxt_en_dev *, unsigned int); + int (*bnxt_request_msix)(struct bnxt_en_dev *, unsigned int, struct bnxt_msix_entry *, int); - int (*bnxt_free_msix)(struct bnxt_en_dev *, int); - int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int, + int (*bnxt_free_msix)(struct bnxt_en_dev *, unsigned int); + int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, unsigned int, struct bnxt_fw_msg *); - int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int, + int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, unsigned int, unsigned long *, u16); }; -- cgit v1.2.3 From c2e10f53455c898050738d6a5f8c237f27aec225 Mon Sep 17 00:00:00 2001 From: Alaa Mohamed Date: Fri, 20 May 2022 02:36:14 +0200 Subject: net: vxlan: Fix kernel coding style The continuation line does not align with the opening bracket and this patch fix it. Signed-off-by: Alaa Mohamed Link: https://lore.kernel.org/r/20220520003614.6073-1-eng.alaamohamedsoliman.am@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/vxlan/vxlan_core.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 293082c32a78..265d4a0245e7 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1135,12 +1135,11 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, struct net *net = dev_net(vxlan->dev); int err; - if (tb[NDA_NH_ID] && (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] || - tb[NDA_PORT])) { - NL_SET_ERR_MSG(extack, - "DST, VNI, ifindex and port are mutually exclusive with NH_ID"); - return -EINVAL; - } + if (tb[NDA_NH_ID] && + (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] || tb[NDA_PORT])) { + NL_SET_ERR_MSG(extack, "DST, VNI, ifindex and port are mutually exclusive with NH_ID"); + return -EINVAL; + } if (tb[NDA_DST]) { err = vxlan_nla_get_addr(ip, tb[NDA_DST]); @@ -1297,7 +1296,7 @@ out: static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); union vxlan_addr ip; -- cgit v1.2.3 From f7b5a89c66defe87d11240cfdaa8b6cbee5784eb Mon Sep 17 00:00:00 2001 From: Alaa Mohamed Date: Fri, 20 May 2022 02:20:40 +0200 Subject: net: mscc: fix the alignment in ocelot_port_fdb_del() align the extack argument of the ocelot_port_fdb_del() function. Signed-off-by: Alaa Mohamed Link: https://lore.kernel.org/r/20220520002040.4442-1-eng.alaamohamedsoliman.am@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mscc/ocelot_net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index be168a372498..5e6136e80282 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -775,7 +775,7 @@ static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; -- cgit v1.2.3 From eb4c0788964730d12e8dd520bd8f5217ca48321c Mon Sep 17 00:00:00 2001 From: Yongzhi Liu Date: Thu, 19 May 2022 05:09:48 -0700 Subject: hv_netvsc: Fix potential dereference of NULL pointer The return value of netvsc_devinfo_get() needs to be checked to avoid use of NULL pointer in case of an allocation failure. Fixes: 0efeea5fb153 ("hv_netvsc: Add the support of hibernation") Signed-off-by: Yongzhi Liu Reviewed-by: Haiyang Zhang Link: https://lore.kernel.org/r/1652962188-129281-1-git-send-email-lyz_cs@pku.edu.cn Signed-off-by: Jakub Kicinski --- drivers/net/hyperv/netvsc_drv.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fde1c492ca02..b1dece6b9698 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2671,7 +2671,10 @@ static int netvsc_suspend(struct hv_device *dev) /* Save the current config info */ ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev); - + if (!ndev_ctx->saved_netvsc_dev_info) { + ret = -ENOMEM; + goto out; + } ret = netvsc_detach(net, nvdev); out: rtnl_unlock(); -- cgit v1.2.3 From eac67d83bf2553f98cfddd42cfbcfd6f9ccfc287 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 19 May 2022 23:00:13 -0700 Subject: wwan: iosm: use a flexible array rather than allocate short objects GCC array-bounds warns that ipc_coredump_get_list() under-allocates the size of struct iosm_cd_table *cd_table. This is avoidable - we just need a flexible array. Nothing calls sizeof() on struct iosm_cd_list or anything that contains it. Reviewed-by: M Chetan Kumar Link: https://lore.kernel.org/r/20220520060013.2309497-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/wwan/iosm/iosm_ipc_coredump.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.h b/drivers/net/wwan/iosm/iosm_ipc_coredump.h index 0809ba664276..3da5ec75e0f0 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_coredump.h +++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.h @@ -14,9 +14,6 @@ /* Max buffer allocated to receive coredump data */ #define MAX_DATA_SIZE 0x00010000 -/* Max number of file entries */ -#define MAX_NOF_ENTRY 256 - /* Max length */ #define MAX_SIZE_LEN 32 @@ -38,7 +35,7 @@ struct iosm_cd_list_entry { */ struct iosm_cd_list { __le32 num_entries; - struct iosm_cd_list_entry entry[MAX_NOF_ENTRY]; + struct iosm_cd_list_entry entry[]; } __packed; /** -- cgit v1.2.3 From 1f36a72ae347bc922bddf9382b608afa47a25a28 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:10:20 +0200 Subject: net: sparx5: switchdev: fix typo in comment Spelling mistake (triple letters) in comment. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/sparx5/sparx5_port.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index 189a6a0a2e08..32709d21ab2f 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -742,7 +742,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5, if (err) return -EINVAL; } else { - sgmii = true; /* Phy is connnected to the MAC */ + sgmii = true; /* Phy is connected to the MAC */ } /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */ -- cgit v1.2.3 From e34be16bee65dc4eee4e4f9398e6acf26d69379e Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:10:29 +0200 Subject: net: mvpp2: fix typo in comment Spelling mistake (triple letters) in comment. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2b7eade373be..b84128b549b4 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1869,7 +1869,7 @@ static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) * design, incremented at different moments in the chain of packet processing, * it is very likely that incoming packets could have been dropped after being * counted by hardware but before reaching software statistics (most probably - * multicast packets), and in the oppposite way, during transmission, FCS bytes + * multicast packets), and in the opposite way, during transmission, FCS bytes * are added in between as well as TSO skb will be split and header bytes added. * Hence, statistics gathered from userspace with ifconfig (software) and * ethtool (hardware) cannot be compared. -- cgit v1.2.3 From b0ea505ba0d74dd9b5a2e40e872253b13e5c5671 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:10:31 +0200 Subject: net/mlx5: fix typo in comment Spelling mistake (triple letters) in comment. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index fbd4dd76a19f..c9b4e50a593e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1769,7 +1769,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) } /* Panic tear down fw command will stop the PCI bus communication - * with the HCA, so the health polll is no longer needed. + * with the HCA, so the health poll is no longer needed. */ mlx5_drain_health_wq(dev); mlx5_stop_health_poll(dev, false); -- cgit v1.2.3 From 3f660c1820f7aae10f841ac05aa6f804566ca2cb Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:10:57 +0200 Subject: cirrus: cs89x0: fix typo in comment Spelling mistake (triple letters) in comment. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/ethernet/cirrus/cs89x0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 4a97aa8e1387..06a0c00af99c 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -985,7 +985,7 @@ release_irq: if (result == DETECTED_NONE) { pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name); if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ - result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */ + result = DETECTED_AUI; /* Yes! I don't care if I see a carrier */ } break; case A_CNF_MEDIA_10B_2: -- cgit v1.2.3 From 878e2eb29ac130c1a2dec6a3c3bb8706f5dd1476 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:11:16 +0200 Subject: net: marvell: prestera: fix typo in comment Spelling mistake (triple letters) in comment. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/prestera/prestera_rxtx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index e452cdeaf703..dc3e3ddc60bf 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -102,7 +102,7 @@ struct prestera_sdma { struct net_device napi_dev; u32 map_addr; u64 dma_mask; - /* protect SDMA with concurrrent access from multiple CPUs */ + /* protect SDMA with concurrent access from multiple CPUs */ spinlock_t tx_lock; }; -- cgit v1.2.3 From b993e72cdd44345b12ed160dfb823bc9bd027108 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:11:23 +0200 Subject: nfp: flower: fix typo in comment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Spelling mistake (triple letters) in comment. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Reviewed-by: Niklas Söderlund Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/lag_conf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index 63907aeb3884..ede90e086b28 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -576,7 +576,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, group->dirty = true; group->slave_cnt = slave_count; - /* Group may have been on queue for removal but is now offfloable. */ + /* Group may have been on queue for removal but is now offloable. */ group->to_remove = false; mutex_unlock(&lag->lock); -- cgit v1.2.3 From 60f243ad1426b796efed750937733b0e410f9257 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:11:28 +0200 Subject: qed: fix typos in comments Spelling mistakes (triple letters) in comments. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_vf.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h index 9d5a0c9e1ca0..f6cd1b3efdfd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h @@ -1282,7 +1282,7 @@ void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed * results. * - * Return: Rrror if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 306b5f4bc632..2bd51a41ce8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -225,7 +225,7 @@ struct pfvf_start_queue_resp_tlv { }; /* Extended queue information - additional index for reference inside qzone. - * If commmunicated between VF/PF, each TLV relating to queues should be + * If communicated between VF/PF, each TLV relating to queues should be * extended by one such [or have a future base TLV that already contains info]. */ struct vfpf_qid_tlv { -- cgit v1.2.3 From 153213f0554d5053d5f9385ae6b4f116c4dc1fb8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:15 -0500 Subject: net: ipa: make endpoint HOLB drop configurable Add a new Boolean flag for RX endpoints defining whether HOLB drop is initially enabled or disabled for the endpoint. All existing AP endpoints should have HOLB drop disabled. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 8 ++++++-- drivers/net/ipa/ipa_endpoint.h | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 0f489723689c..3ad97fbf6884 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1554,8 +1554,12 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) ipa_endpoint_init_hdr_metadata_mask(endpoint); ipa_endpoint_init_mode(endpoint); ipa_endpoint_init_aggr(endpoint); - if (!endpoint->toward_ipa) - ipa_endpoint_init_hol_block_disable(endpoint); + if (!endpoint->toward_ipa) { + if (endpoint->config.rx.holb_drop) + ipa_endpoint_init_hol_block_enable(endpoint, 0); + else + ipa_endpoint_init_hol_block_disable(endpoint); + } ipa_endpoint_init_deaggr(endpoint); ipa_endpoint_init_rsrc_grp(endpoint); ipa_endpoint_init_seq(endpoint); diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 39a12c249f66..3ab62fb892ec 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -60,6 +60,7 @@ struct ipa_endpoint_tx { * @buffer_size: requested receive buffer size (bytes) * @pad_align: power-of-2 boundary to which packet payload is aligned * @aggr_close_eof: whether aggregation closes on end-of-frame + * @holb_drop: whether to drop packets to avoid head-of-line blocking * * With each packet it transfers, the IPA hardware can perform certain * transformations of its packet data. One of these is adding pad bytes @@ -74,6 +75,7 @@ struct ipa_endpoint_rx { u32 buffer_size; u32 pad_align; bool aggr_close_eof; + bool holb_drop; }; /** -- cgit v1.2.3 From 3cebb7c2ed2d76f6f8df67b030c34ffe27867766 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:16 -0500 Subject: net: ipa: support hard aggregation limits Add a new flag for AP receive endpoints that indicates whether a "hard limit" is used as a criterion for closing aggregation. Add comments explaining the difference between "hard" and "soft" aggregation limits. Pass a flag to ipa_aggr_size_kb() so it computes the proper aggregation size value whether using hard or soft limits. Move that function earlier in "ipa_endpoint.c" so it can be used without a forward-reference. Update ipa_endpoint_data_valid_one() so it validates endpoints whose data indicate a hard aggregation limit is used, and so it reports set aggregation flags for endpoints without aggregation enabled. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 89 ++++++++++++++++++++++++++---------------- drivers/net/ipa/ipa_endpoint.h | 15 ++++++- 2 files changed, 69 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 3ad97fbf6884..6079670bd860 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -81,6 +81,24 @@ static u32 aggr_byte_limit_max(enum ipa_version version) return field_max(aggr_byte_limit_fmask(false)); } +/* Compute the aggregation size value to use for a given buffer size */ +static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit) +{ + /* A hard aggregation limit will not be crossed; aggregation closes + * if saving incoming data would cross the hard byte limit boundary. + * + * With a soft limit, aggregation closes *after* the size boundary + * has been crossed. In that case the limit must leave enough space + * after that limit to receive a full MTU of data plus overhead. + */ + if (!aggr_hard_limit) + rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; + + /* The byte limit is encoded as a number of kilobytes */ + + return rx_buffer_size / SZ_1K; +} + static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *all_data, const struct ipa_gsi_endpoint_data *data) @@ -93,7 +111,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return true; if (!data->toward_ipa) { + const struct ipa_endpoint_rx *rx_config; u32 buffer_size; + u32 aggr_size; u32 limit; if (data->endpoint.filter_support) { @@ -107,8 +127,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, if (data->ee_id != GSI_EE_AP) return true; - buffer_size = data->endpoint.config.rx.buffer_size; + rx_config = &data->endpoint.config.rx; + /* The buffer size must hold an MTU plus overhead */ + buffer_size = rx_config->buffer_size; limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD; if (buffer_size < limit) { dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n", @@ -116,27 +138,39 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return false; } - /* For an endpoint supporting receive aggregation, the - * aggregation byte limit defines the point at which an - * aggregation window will close. It is programmed into the - * IPA hardware as a number of KB. We don't use "hard byte - * limit" aggregation, so we need to supply enough space in - * a receive buffer to hold a complete MTU plus normal skb - * overhead *after* that aggregation byte limit has been - * crossed. - * - * This check just ensures the receive buffer size doesn't - * exceed what's representable in the aggregation limit field. - */ - if (data->endpoint.config.aggregation) { - limit += SZ_1K * aggr_byte_limit_max(ipa->version); - if (buffer_size - NET_SKB_PAD > limit) { - dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n", - data->endpoint_id, - buffer_size - NET_SKB_PAD, limit); + if (!data->endpoint.config.aggregation) { + bool result = true; - return false; + /* No aggregation; check for bogus aggregation data */ + if (rx_config->aggr_hard_limit) { + dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n", + data->endpoint_id); + result = false; + } + + if (rx_config->aggr_close_eof) { + dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n", + data->endpoint_id); + result = false; } + + return result; /* Nothing more to check */ + } + + /* For an endpoint supporting receive aggregation, the byte + * limit defines the point at which aggregation closes. This + * check ensures the receive buffer size doesn't result in a + * limit that exceeds what's representable in the aggregation + * byte limit field. + */ + aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, + rx_config->aggr_hard_limit); + limit = aggr_byte_limit_max(ipa->version); + if (aggr_size > limit) { + dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n", + data->endpoint_id, aggr_size, limit); + + return false; } return true; /* Nothing more to check for RX */ @@ -670,18 +704,6 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) iowrite32(val, endpoint->ipa->reg_virt + offset); } -/* Compute the aggregation size value to use for a given buffer size */ -static u32 ipa_aggr_size_kb(u32 rx_buffer_size) -{ - /* We don't use "hard byte limit" aggregation, so we define the - * aggregation limit such that our buffer has enough space *after* - * that limit to receive a full MTU of data, plus overhead. - */ - rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; - - return rx_buffer_size / SZ_1K; -} - /* Encoded values for AGGR endpoint register fields */ static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) { @@ -753,7 +775,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); buffer_size = rx_config->buffer_size; - limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD); + limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, + rx_config->aggr_hard_limit); val |= aggr_byte_limit_encoded(version, limit); limit = IPA_AGGR_TIME_LIMIT; diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 3ab62fb892ec..1e72a9695d3d 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -59,21 +59,32 @@ struct ipa_endpoint_tx { * struct ipa_endpoint_rx - Endpoint configuration for RX endpoints * @buffer_size: requested receive buffer size (bytes) * @pad_align: power-of-2 boundary to which packet payload is aligned + * @aggr_hard_limit: whether aggregation closes before or after boundary * @aggr_close_eof: whether aggregation closes on end-of-frame * @holb_drop: whether to drop packets to avoid head-of-line blocking * + * The actual size of the receive buffer is rounded up if necessary + * to be a power-of-2 number of pages. + * * With each packet it transfers, the IPA hardware can perform certain * transformations of its packet data. One of these is adding pad bytes * to the end of the packet data so the result ends on a power-of-2 boundary. * * It is also able to aggregate multiple packets into a single receive buffer. * Aggregation is "open" while a buffer is being filled, and "closes" when - * certain criteria are met. One of those criteria is the sender indicating - * a "frame" consisting of several transfers has ended. + * certain criteria are met. + * + * Insufficient space available in the receive buffer can close aggregation. + * The aggregation byte limit defines the point (in units of 1024 bytes) in + * the buffer where aggregation closes. With a "soft" aggregation limit, + * aggregation closes when a packet written to the buffer *crosses* that + * aggregation limit. With a "hard" aggregation limit, aggregation will + * close *before* writing a packet that would cross that boundary. */ struct ipa_endpoint_rx { u32 buffer_size; u32 pad_align; + bool aggr_hard_limit; bool aggr_close_eof; bool holb_drop; }; -- cgit v1.2.3 From beb90cba607ff060c325e6717d2d5e7ff58abf11 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:17 -0500 Subject: net: ipa: specify RX aggregation time limit in config data Don't assume that a 500 microsecond time limit should be used for all receive endpoints that support aggregation. Instead, specify the time limit to use in the configuration data. Set a 500 microsecond limit for all existing RX endpoints, as before. Checking for overflow for the time limit field is a bit complicated. Rather than duplicate a lot of code in ipa_endpoint_data_valid_one(), call WARN() if any value is found to be too large when encoding it. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_data-v3.1.c | 2 ++ drivers/net/ipa/ipa_data-v3.5.1.c | 2 ++ drivers/net/ipa/ipa_data-v4.11.c | 2 ++ drivers/net/ipa/ipa_data-v4.2.c | 2 ++ drivers/net/ipa/ipa_data-v4.5.c | 2 ++ drivers/net/ipa/ipa_data-v4.9.c | 2 ++ drivers/net/ipa/ipa_endpoint.c | 21 +++++++++++++++++---- drivers/net/ipa/ipa_endpoint.h | 6 ++++++ 8 files changed, 35 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c index 8ff351aefd23..00f4e506e6e5 100644 --- a/drivers/net/ipa/ipa_data-v3.1.c +++ b/drivers/net/ipa/ipa_data-v3.1.c @@ -103,6 +103,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .rx = { .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, }, }, }, @@ -150,6 +151,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .rx = { .buffer_size = 8192, + .aggr_time_limit = 500, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c index d1c466abddb2..b7e32e87733e 100644 --- a/drivers/net/ipa/ipa_data-v3.5.1.c +++ b/drivers/net/ipa/ipa_data-v3.5.1.c @@ -94,6 +94,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .rx = { .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, }, }, }, @@ -142,6 +143,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .rx = { .buffer_size = 8192, + .aggr_time_limit = 500, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c index b1991cc6f0ca..1be823e5c5c2 100644 --- a/drivers/net/ipa/ipa_data-v4.11.c +++ b/drivers/net/ipa/ipa_data-v4.11.c @@ -88,6 +88,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .rx = { .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, }, }, }, @@ -135,6 +136,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .rx = { .buffer_size = 32768, + .aggr_time_limit = 500, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c index 1190a43e8743..683f1f91042f 100644 --- a/drivers/net/ipa/ipa_data-v4.2.c +++ b/drivers/net/ipa/ipa_data-v4.2.c @@ -84,6 +84,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .rx = { .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, }, }, }, @@ -132,6 +133,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .rx = { .buffer_size = 8192, + .aggr_time_limit = 500, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c index 944f72b0f285..79398f286a9c 100644 --- a/drivers/net/ipa/ipa_data-v4.5.c +++ b/drivers/net/ipa/ipa_data-v4.5.c @@ -97,6 +97,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .rx = { .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, }, }, }, @@ -144,6 +145,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .rx = { .buffer_size = 8192, + .aggr_time_limit = 500, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c index 16786bff7ef8..4b96efd05cf2 100644 --- a/drivers/net/ipa/ipa_data-v4.9.c +++ b/drivers/net/ipa/ipa_data-v4.9.c @@ -89,6 +89,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .rx = { .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), + .aggr_time_limit = 500, }, }, }, @@ -136,6 +137,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .rx = { .buffer_size = 8192, + .aggr_time_limit = 500, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 6079670bd860..586529511cf6 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -35,7 +35,6 @@ #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 -#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */ /** enum ipa_status_opcode - status element opcode hardware values */ enum ipa_status_opcode { @@ -142,6 +141,13 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, bool result = true; /* No aggregation; check for bogus aggregation data */ + if (rx_config->aggr_time_limit) { + dev_err(dev, + "time limit with no aggregation for RX endpoint %u\n", + data->endpoint_id); + result = false; + } + if (rx_config->aggr_hard_limit) { dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n", data->endpoint_id); @@ -722,9 +728,13 @@ static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) if (version < IPA_VERSION_4_5) { /* We set aggregation granularity in ipa_hardware_config() */ - limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); + fmask = aggr_time_limit_fmask(true); + val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); + WARN(val > field_max(fmask), + "aggr_time_limit too large (%u > %u usec)\n", + val, field_max(fmask) * IPA_AGGR_GRANULARITY); - return u32_encode_bits(limit, aggr_time_limit_fmask(true)); + return u32_encode_bits(val, fmask); } /* IPA v4.5 expresses the time limit using Qtime. The AP has @@ -739,6 +749,9 @@ static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) /* Have to use pulse generator 1 (millisecond granularity) */ gran_sel = AGGR_GRAN_SEL_FMASK; val = DIV_ROUND_CLOSEST(limit, 1000); + WARN(val > field_max(fmask), + "aggr_time_limit too large (%u > %u usec)\n", + limit, field_max(fmask) * 1000); } else { /* We can use pulse generator 0 (100 usec granularity) */ gran_sel = 0; @@ -779,7 +792,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) rx_config->aggr_hard_limit); val |= aggr_byte_limit_encoded(version, limit); - limit = IPA_AGGR_TIME_LIMIT; + limit = rx_config->aggr_time_limit; val |= aggr_time_limit_encoded(version, limit); /* AGGR_PKT_LIMIT is 0 (unlimited) */ diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 1e72a9695d3d..01790c60bee8 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -59,6 +59,7 @@ struct ipa_endpoint_tx { * struct ipa_endpoint_rx - Endpoint configuration for RX endpoints * @buffer_size: requested receive buffer size (bytes) * @pad_align: power-of-2 boundary to which packet payload is aligned + * @aggr_time_limit: time before aggregation closes (microseconds) * @aggr_hard_limit: whether aggregation closes before or after boundary * @aggr_close_eof: whether aggregation closes on end-of-frame * @holb_drop: whether to drop packets to avoid head-of-line blocking @@ -74,6 +75,10 @@ struct ipa_endpoint_tx { * Aggregation is "open" while a buffer is being filled, and "closes" when * certain criteria are met. * + * A time limit can be specified to close aggregation. Aggregation will be + * closed if this period passes after data is first written into a receive + * buffer. If not specified, no time limit is imposed. + * * Insufficient space available in the receive buffer can close aggregation. * The aggregation byte limit defines the point (in units of 1024 bytes) in * the buffer where aggregation closes. With a "soft" aggregation limit, @@ -84,6 +89,7 @@ struct ipa_endpoint_tx { struct ipa_endpoint_rx { u32 buffer_size; u32 pad_align; + u32 aggr_time_limit; bool aggr_hard_limit; bool aggr_close_eof; bool holb_drop; -- cgit v1.2.3 From d15180b4eadbdee782d97afcbf3c56e2f246c40a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:18 -0500 Subject: net: ipa: kill gsi_trans_commit_wait_timeout() Since the beginning gsi_trans_commit_wait_timeout() has existed to provide a way to allow waiting a limited time for a transaction to complete. But that function has never been used. In fact, there is no use for this function, because a transaction committed to hardware should *always* complete. The only reason it might not complete is if there were a hardware failure, or perhaps a system configuration error. Furthermore, if a timeout ever did occur, the IPA hardware would be in an indeterminate state, from which there is no recovery. It would require some sort of complete IPA reset, and would require the participation of the modem, and at this time there is no such sequence defined. So get rid of the definition of gsi_trans_commit_wait_timeout(), and update a few comments accordingly. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi_trans.c | 22 ---------------------- drivers/net/ipa/gsi_trans.h | 9 --------- drivers/net/ipa/ipa_cmd.c | 15 +++++++-------- drivers/net/ipa/ipa_endpoint.c | 1 - 4 files changed, 7 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 87e1d43c118c..bf31ef3d56ad 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -637,28 +637,6 @@ out_trans_free: gsi_trans_free(trans); } -/* Commit a GSI transaction and wait for it to complete, with timeout */ -int gsi_trans_commit_wait_timeout(struct gsi_trans *trans, - unsigned long timeout) -{ - unsigned long timeout_jiffies = msecs_to_jiffies(timeout); - unsigned long remaining = 1; /* In case of empty transaction */ - - if (!trans->used) - goto out_trans_free; - - refcount_inc(&trans->refcount); - - __gsi_trans_commit(trans, true); - - remaining = wait_for_completion_timeout(&trans->completion, - timeout_jiffies); -out_trans_free: - gsi_trans_free(trans); - - return remaining ? 0 : -ETIMEDOUT; -} - /* Process the completion of a transaction; called while polling */ void gsi_trans_complete(struct gsi_trans *trans) { diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index af379b49299e..387ea50dd039 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -205,15 +205,6 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db); */ void gsi_trans_commit_wait(struct gsi_trans *trans); -/** - * gsi_trans_commit_wait_timeout() - Commit a GSI transaction and wait for - * it to complete, with timeout - * @trans: Transaction to commit - * @timeout: Timeout period (in milliseconds) - */ -int gsi_trans_commit_wait_timeout(struct gsi_trans *trans, - unsigned long timeout); - /** * gsi_trans_read_byte() - Issue a single byte read TRE on a channel * @gsi: GSI pointer diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index d57472ea077f..77b84cea6e68 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -26,14 +26,13 @@ * other than data transfer to another endpoint. * * Immediate commands are represented by GSI transactions just like other - * transfer requests, represented by a single GSI TRE. Each immediate - * command has a well-defined format, having a payload of a known length. - * This allows the transfer element's length field to be used to hold an - * immediate command's opcode. The payload for a command resides in DRAM - * and is described by a single scatterlist entry in its transaction. - * Commands do not require a transaction completion callback. To commit - * an immediate command transaction, either gsi_trans_commit_wait() or - * gsi_trans_commit_wait_timeout() is used. + * transfer requests, and use a single GSI TRE. Each immediate command + * has a well-defined format, having a payload of a known length. This + * allows the transfer element's length field to be used to hold an + * immediate command's opcode. The payload for a command resides in AP + * memory and is described by a single scatterlist entry in its transaction. + * Commands do not require a transaction completion callback, and are + * (currently) always issued using gsi_trans_commit_wait(). */ /* Some commands can wait until indicated pipeline stages are clear */ diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 586529511cf6..5ed5b8fd3ea3 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -478,7 +478,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) ipa_cmd_pipeline_clear_add(trans); - /* XXX This should have a 1 second timeout */ gsi_trans_commit_wait(trans); ipa_cmd_pipeline_clear_wait(ipa); -- cgit v1.2.3 From 2091c79ac4deb4f4dd6c5b223d50f4536e3265a2 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:19 -0500 Subject: net: ipa: count the number of modem TX endpoints In ipa_endpoint_modem_exception_reset_all(), a high estimate was made of the number of endpoints that need their status register updated. We only used what was needed, so the high estimate didn't matter much. However the next few patches are going to limit the number of commands in a single transaction, and the overestimate would exceed that. So count the number of modem TX endpoints at initialization time, and use it in ipa_endpoint_modem_exception_reset_all(). Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa.h | 2 ++ drivers/net/ipa/ipa_endpoint.c | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 9fc880eb7e3a..4fc3c72359f5 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -62,6 +62,7 @@ struct ipa_interrupt; * @initialized: Bit mask indicating endpoints initialized * @set_up: Bit mask indicating endpoints set up * @enabled: Bit mask indicating endpoints enabled + * @modem_tx_count: Number of defined modem TX endoints * @endpoint: Array of endpoint information * @channel_map: Mapping of GSI channel to IPA endpoint * @name_map: Mapping of IPA endpoint name to IPA endpoint @@ -114,6 +115,7 @@ struct ipa { u32 set_up; u32 enabled; + u32 modem_tx_count; struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX]; struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX]; struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT]; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 5ed5b8fd3ea3..385aa63ab4bb 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -442,12 +442,10 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) struct gsi_trans *trans; u32 count; - /* We need one command per modem TX endpoint. We can get an upper - * bound on that by assuming all initialized endpoints are modem->IPA. - * That won't happen, and we could be more precise, but this is fine - * for now. End the transaction with commands to clear the pipeline. + /* We need one command per modem TX endpoint, plus the commands + * that clear the pipeline. */ - count = hweight32(initialized) + ipa_cmd_pipeline_clear_count(); + count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); trans = ipa_cmd_trans_alloc(ipa, count); if (!trans) { dev_err(&ipa->pdev->dev, @@ -1924,6 +1922,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count, if (data->endpoint.filter_support) filter_map |= BIT(data->endpoint_id); + if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) + ipa->modem_tx_count++; } if (!ipa_filter_map_valid(ipa, filter_map)) -- cgit v1.2.3 From 7ffba3bdf76a7bf93ec2dc56369ea563dea92b7c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:20 -0500 Subject: net: ipa: get rid of ipa_cmd_info->direction The direction field of the ipa_cmd_info structure is set, but never used. It seems it might have been used for the DMA_SHARED_MEM immediate command, but the DIRECTION flag is set based on the value of the passed-in direction flag there. Anyway, remove this unused field from the ipa_cmd_info structure. This is done as a separate patch to make it very obvious that it's not required. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi_trans.c | 5 +---- drivers/net/ipa/ipa_cmd.h | 2 -- 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index bf31ef3d56ad..cf8ee4237354 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -413,7 +413,6 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, dma_addr_t addr, enum dma_data_direction direction, enum ipa_cmd_opcode opcode) { - struct ipa_cmd_info *info; u32 which = trans->used++; struct scatterlist *sg; @@ -438,9 +437,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, sg_dma_address(sg) = addr; sg_dma_len(sg) = size; - info = &trans->info[which]; - info->opcode = opcode; - info->direction = direction; + trans->info[which].opcode = opcode; } /* Add a page transfer to a transaction. It will fill the only TRE. */ diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 05ed7e42e184..d4dbe2ce96dc 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -50,11 +50,9 @@ enum ipa_cmd_opcode { * struct ipa_cmd_info - information needed for an IPA immediate command * * @opcode: The command opcode. - * @direction: Direction of data transfer for DMA commands */ struct ipa_cmd_info { enum ipa_cmd_opcode opcode; - enum dma_data_direction direction; }; /** -- cgit v1.2.3 From 4de284b72e59e1006a6b6358c060265a9f1be100 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:21 -0500 Subject: net: ipa: remove command direction argument We no longer use the direction argument for gsi_trans_cmd_add(), so get rid of it in its definition, and in its seven callers. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi_trans.c | 3 +-- drivers/net/ipa/gsi_trans.h | 4 +--- drivers/net/ipa/ipa_cmd.c | 22 +++++++--------------- 3 files changed, 9 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index cf8ee4237354..472792992f86 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -410,8 +410,7 @@ void gsi_trans_free(struct gsi_trans *trans) /* Add an immediate command to a transaction */ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, - dma_addr_t addr, enum dma_data_direction direction, - enum ipa_cmd_opcode opcode) + dma_addr_t addr, enum ipa_cmd_opcode opcode) { u32 which = trans->used++; struct scatterlist *sg; diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index 387ea50dd039..9a39909915ef 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -165,12 +165,10 @@ void gsi_trans_free(struct gsi_trans *trans); * @buf: Buffer pointer for command payload * @size: Number of bytes in buffer * @addr: DMA address for payload - * @direction: Direction of DMA transfer (or DMA_NONE if none required) * @opcode: IPA immediate command opcode */ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, - dma_addr_t addr, enum dma_data_direction direction, - enum ipa_cmd_opcode opcode); + dma_addr_t addr, enum ipa_cmd_opcode opcode); /** * gsi_trans_page_add() - Add a page transfer to a transaction diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 77b84cea6e68..5fd74d700704 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -402,7 +402,6 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans, dma_addr_t hash_addr) { struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); - enum dma_data_direction direction = DMA_TO_DEVICE; struct ipa_cmd_hw_ip_fltrt_init *payload; union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; @@ -433,7 +432,7 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans, payload->nhash_rules_addr = cpu_to_le64(addr); gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, - direction, opcode); + opcode); } /* Initialize header space in IPA-local memory */ @@ -442,7 +441,6 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size, { struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL; - enum dma_data_direction direction = DMA_TO_DEVICE; struct ipa_cmd_hw_hdr_init_local *payload; union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; @@ -464,7 +462,7 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size, payload->flags = cpu_to_le32(flags); gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, - direction, opcode); + opcode); } void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, @@ -521,7 +519,7 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, payload->clear_options = cpu_to_le32(options); gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, - DMA_NONE, opcode); + opcode); } /* Skip IP packet processing on the next data transfer on a TX channel */ @@ -529,7 +527,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) { struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT; - enum dma_data_direction direction = DMA_TO_DEVICE; struct ipa_cmd_ip_packet_init *payload; union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; @@ -541,7 +538,7 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) IPA_PACKET_INIT_DEST_ENDPOINT_FMASK); gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, - direction, opcode); + opcode); } /* Use a DMA command to read or write a block of IPA-resident memory */ @@ -552,7 +549,6 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM; struct ipa_cmd_hw_dma_mem_mem *payload; union ipa_cmd_payload *cmd_payload; - enum dma_data_direction direction; dma_addr_t payload_addr; u16 flags; @@ -583,17 +579,14 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, payload->flags = cpu_to_le16(flags); payload->system_addr = cpu_to_le64(addr); - direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, - direction, opcode); + opcode); } static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans) { struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS; - enum dma_data_direction direction = DMA_TO_DEVICE; struct ipa_cmd_ip_packet_tag_status *payload; union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; @@ -604,14 +597,13 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans) payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK); gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, - direction, opcode); + opcode); } /* Issue a small command TX data transfer */ static void ipa_cmd_transfer_add(struct gsi_trans *trans) { struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); - enum dma_data_direction direction = DMA_TO_DEVICE; enum ipa_cmd_opcode opcode = IPA_CMD_NONE; union ipa_cmd_payload *payload; dma_addr_t payload_addr; @@ -620,7 +612,7 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans) payload = ipa_cmd_payload_alloc(ipa, &payload_addr); gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, - direction, opcode); + opcode); } /* Add immediate commands to a transaction to clear the hardware pipeline */ -- cgit v1.2.3 From 8797972afff3d462711758acec68be11172e2d01 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:22 -0500 Subject: net: ipa: remove command info pool The ipa_cmd_info structure now contains only one field, and it's an enumerated type whose values all fit in 8 bits. Currently we'll never use more than 8 TREs in a command transaction, and we can represent that number of command opcodes in the same space as a 64 bit pointer to an ipa_cmd_info structure. Define IPA_COMMAND_TRANS_TRE_MAX as the maximum number of TREs that can be in a command transaction. Replace the info pointer in a transaction with a fixed-size array named cmd_opcode[] of that many bytes. Store the opcode in this array when adding a command TRE to a transaction, as was done previously for the info array. This makes the ipa_cmd_info unused, so get rid of it. When committing an immediate command transaction, use the channel's Boolean command flag to determine whether to fill in the opcode, which will be taken (as before) from the array in the transaction. This makes the command info pool unnecessary, so get rid of it. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi.h | 1 - drivers/net/ipa/gsi_trans.c | 10 +++++----- drivers/net/ipa/gsi_trans.h | 7 +++++-- drivers/net/ipa/ipa_cmd.c | 41 ++++++++--------------------------------- drivers/net/ipa/ipa_cmd.h | 9 --------- 5 files changed, 18 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 9cc657658811..5d66116b46b0 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -84,7 +84,6 @@ struct gsi_trans_info { struct gsi_trans_pool pool; /* transaction pool */ struct gsi_trans_pool sg_pool; /* scatterlist pool */ struct gsi_trans_pool cmd_pool; /* command payload DMA pool */ - struct gsi_trans_pool info_pool;/* command information pool */ struct gsi_trans **map; /* TRE -> transaction map */ spinlock_t spinlock; /* protects updates to the lists */ diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 472792992f86..55f8fe7d2668 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -436,7 +436,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, sg_dma_address(sg) = addr; sg_dma_len(sg) = size; - trans->info[which].opcode = opcode; + trans->cmd_opcode[which] = opcode; } /* Add a page transfer to a transaction. It will fill the only TRE. */ @@ -552,10 +552,10 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db) struct gsi_ring *ring = &channel->tre_ring; enum ipa_cmd_opcode opcode = IPA_CMD_NONE; bool bei = channel->toward_ipa; - struct ipa_cmd_info *info; struct gsi_tre *dest_tre; struct scatterlist *sg; u32 byte_count = 0; + u8 *cmd_opcode; u32 avail; u32 i; @@ -566,7 +566,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db) * If there is no info array we're doing a simple data * transfer request, whose opcode is IPA_CMD_NONE. */ - info = trans->info ? &trans->info[0] : NULL; + cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL; avail = ring->count - ring->index % ring->count; dest_tre = gsi_ring_virt(ring, ring->index); for_each_sg(trans->sgl, sg, trans->used, i) { @@ -577,8 +577,8 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db) byte_count += len; if (!avail--) dest_tre = gsi_ring_virt(ring, 0); - if (info) - opcode = info++->opcode; + if (cmd_opcode) + opcode = *cmd_opcode++; gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode); dest_tre++; diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index 9a39909915ef..99ce2cba0dc3 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -22,6 +22,9 @@ struct gsi; struct gsi_trans; struct gsi_trans_pool; +/* Maximum number of TREs in an IPA immediate command transaction */ +#define IPA_COMMAND_TRANS_TRE_MAX 8 + /** * struct gsi_trans - a GSI transaction * @@ -34,8 +37,8 @@ struct gsi_trans_pool; * @used: Number of TREs *used* (could be less than tre_count) * @len: Total # of transfer bytes represented in sgl[] (set by core) * @data: Preserved but not touched by the core transaction code + * @cmd_opcode: Array of command opcodes (command channel only) * @sgl: An array of scatter/gather entries managed by core code - * @info: Array of command information structures (command channel) * @direction: DMA transfer direction (DMA_NONE for commands) * @refcount: Reference count used for destruction * @completion: Completed when the transaction completes @@ -58,8 +61,8 @@ struct gsi_trans { u32 len; /* total # bytes across sgl[] */ void *data; + u8 cmd_opcode[IPA_COMMAND_TRANS_TRE_MAX]; struct scatterlist *sgl; - struct ipa_cmd_info *info; /* array of entries, or null */ enum dma_data_direction direction; refcount_t refcount; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 5fd74d700704..e58cd4478fd3 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -349,7 +349,6 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max) { struct gsi_trans_info *trans_info = &channel->trans_info; struct device *dev = channel->gsi->dev; - int ret; /* This is as good a place as any to validate build constants */ ipa_cmd_validate_build(); @@ -358,20 +357,9 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max) * a single transaction can require up to tlv_count of them, * so we treat them as if that many can be allocated at once. */ - ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool, - sizeof(union ipa_cmd_payload), - tre_max, channel->tlv_count); - if (ret) - return ret; - - /* Each TRE needs a command info structure */ - ret = gsi_trans_pool_init(&trans_info->info_pool, - sizeof(struct ipa_cmd_info), - tre_max, channel->tlv_count); - if (ret) - gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool); - - return ret; + return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool, + sizeof(union ipa_cmd_payload), + tre_max, channel->tlv_count); } void ipa_cmd_pool_exit(struct gsi_channel *channel) @@ -379,7 +367,6 @@ void ipa_cmd_pool_exit(struct gsi_channel *channel) struct gsi_trans_info *trans_info = &channel->trans_info; struct device *dev = channel->gsi->dev; - gsi_trans_pool_exit(&trans_info->info_pool); gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool); } @@ -652,28 +639,16 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa) wait_for_completion(&ipa->completion); } -static struct ipa_cmd_info * -ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) -{ - struct gsi_channel *channel; - - channel = &endpoint->ipa->gsi.channel[endpoint->channel_id]; - - return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count); -} - /* Allocate a transaction for the command TX endpoint */ struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count) { struct ipa_endpoint *endpoint; - struct gsi_trans *trans; - endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; + if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX)) + return NULL; - trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id, - tre_count, DMA_NONE); - if (trans) - trans->info = ipa_cmd_info_alloc(endpoint, tre_count); + endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; - return trans; + return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id, + tre_count, DMA_NONE); } diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index d4dbe2ce96dc..9215ddad1010 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -46,15 +46,6 @@ enum ipa_cmd_opcode { IPA_CMD_IP_PACKET_TAG_STATUS = 0x14, }; -/** - * struct ipa_cmd_info - information needed for an IPA immediate command - * - * @opcode: The command opcode. - */ -struct ipa_cmd_info { - enum ipa_cmd_opcode opcode; -}; - /** * ipa_cmd_table_valid() - Validate a memory region holding a table * @ipa: - IPA pointer -- cgit v1.2.3 From a224bd4b88caa3ac62a4b541a07fae7ea26cab7a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 21 May 2022 19:32:23 -0500 Subject: net: ipa: use data space for command opcodes The 64-bit data field in a transaction is not used for commands. And the opcode array is *only* used for commands. They're (currently) the same size; save a little space in the transaction structure by enclosing the two fields in a union. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi_trans.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index 99ce2cba0dc3..020c3b32de1d 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -60,8 +60,10 @@ struct gsi_trans { u8 used; /* # entries used in sgl[] */ u32 len; /* total # bytes across sgl[] */ - void *data; - u8 cmd_opcode[IPA_COMMAND_TRANS_TRE_MAX]; + union { + void *data; + u8 cmd_opcode[IPA_COMMAND_TRANS_TRE_MAX]; + }; struct scatterlist *sgl; enum dma_data_direction direction; -- cgit v1.2.3 From 621427fbdada788f18f77238e1c36f463c2cb9d1 Mon Sep 17 00:00:00 2001 From: Tommaso Merciai Date: Sat, 21 May 2022 01:58:46 +0200 Subject: net: phy: DP83822: enable rgmii mode if phy_interface_is_rgmii RGMII mode can be enable from dp83822 straps, and also writing bit 9 of register 0x17 - RMII and Status Register (RCSR). When phy_interface_is_rgmii rgmii mode must be enabled, same for contrary, this prevents malconfigurations of hw straps References: - https://www.ti.com/lit/gpn/dp83822i p66 Signed-off-by: Tommaso Merciai Co-developed-by: Michael Trimarchi Suggested-by: Alberto Bianchi Tested-by: Tommaso Merciai Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/dp83822.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index ce17b2af3218..e6ad3a494d32 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -94,7 +94,8 @@ #define DP83822_WOL_INDICATION_SEL BIT(8) #define DP83822_WOL_CLR_INDICATION BIT(11) -/* RSCR bits */ +/* RCSR bits */ +#define DP83822_RGMII_MODE_EN BIT(9) #define DP83822_RX_CLK_SHIFT BIT(12) #define DP83822_TX_CLK_SHIFT BIT(11) @@ -408,6 +409,12 @@ static int dp83822_config_init(struct phy_device *phydev) if (err) return err; } + + phy_set_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); + } else { + phy_clear_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); } if (dp83822->fx_enabled) { -- cgit v1.2.3 From 43252ed15f46658cec64edecfe610e40f6a12d85 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Sat, 21 May 2022 10:34:25 +0200 Subject: net: fec: Do proper error checking for optional clks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An error code returned by devm_clk_get() might have other meanings than "This clock doesn't exist". So use devm_clk_get_optional() and handle all remaining errors as fatal. Signed-off-by: Uwe Kleine-König Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9f33ec838b52..9b31a9ce7b27 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3866,17 +3866,21 @@ fec_probe(struct platform_device *pdev) fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); /* enet_out is optional, depends on board */ - fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); - if (IS_ERR(fep->clk_enet_out)) - fep->clk_enet_out = NULL; + fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); + if (IS_ERR(fep->clk_enet_out)) { + ret = PTR_ERR(fep->clk_enet_out); + goto failed_clk; + } fep->ptp_clk_on = false; mutex_init(&fep->ptp_clk_mutex); /* clk_ref is optional, depends on board */ - fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); - if (IS_ERR(fep->clk_ref)) - fep->clk_ref = NULL; + fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); + if (IS_ERR(fep->clk_ref)) { + ret = PTR_ERR(fep->clk_ref); + goto failed_clk; + } fep->clk_ref_rate = clk_get_rate(fep->clk_ref); /* clk_2x_txclk is optional, depends on board */ -- cgit v1.2.3 From 15d221d0c345b76947911a3ac91897ffe2f1cc4e Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 21 May 2022 08:33:01 +0200 Subject: hinic: Avoid some over memory allocation 'prod_idx' (atomic_t) is larger than 'shadow_idx' (u16), so some memory is over-allocated. Fixes: b15a9f37be2b ("net-next/hinic: Add wq") Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index f7dc7d825f63..4daf6bf291ec 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -386,7 +386,7 @@ static int alloc_wqes_shadow(struct hinic_wq *wq) return -ENOMEM; wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages, - sizeof(wq->prod_idx), GFP_KERNEL); + sizeof(*wq->shadow_idx), GFP_KERNEL); if (!wq->shadow_idx) goto err_shadow_idx; -- cgit v1.2.3 From 5ff851b7be752a6d607c289b489c40f96f9dc489 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Fri, 20 May 2022 08:26:50 +0200 Subject: net: fec: Do proper error checking for enet_out clk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An error code returned by devm_clk_get() might have other meanings than "This clock doesn't exist". So use devm_clk_get_optional() and handle all remaining errors as fatal. Signed-off-by: Uwe Kleine-König Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3cb298f1d69c..9d7ef993ce45 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3866,9 +3866,11 @@ fec_probe(struct platform_device *pdev) fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); /* enet_out is optional, depends on board */ - fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); - if (IS_ERR(fep->clk_enet_out)) - fep->clk_enet_out = NULL; + fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); + if (IS_ERR(fep->clk_enet_out)) { + ret = PTR_ERR(fep->clk_enet_out); + goto failed_clk; + } fep->ptp_clk_on = false; mutex_init(&fep->ptp_clk_mutex); -- cgit v1.2.3 From 4149af28318a1335f844a778d67d1fd27d05a505 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 22 May 2022 12:27:01 +0300 Subject: net: mscc: ocelot: offload tc action "ok" using an empty action vector The "ok" tc action is useful when placed in front of a more generic filter to exclude some more specific rules from matching it. The ocelot switches can offload this tc action by creating an empty action vector (no _ENA fields set to 1). This makes sense for all of VCAP IS1, IS2 and ES0 (but not for PSFP). Add support for this action. Note that this makes the gact_drop_and_ok_test() selftest pass, where "action ok" is used in front of an "action drop" rule, both offloaded to VCAP IS2. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_flower.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 51cf241ff7d0..7c0897e779dc 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -279,6 +279,22 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port, filter->action.pol_ix = OCELOT_POLICER_DISCARD; filter->type = OCELOT_VCAP_FILTER_OFFLOAD; break; + case FLOW_ACTION_ACCEPT: + if (filter->block_id != VCAP_ES0 && + filter->block_id != VCAP_IS1 && + filter->block_id != VCAP_IS2) { + NL_SET_ERR_MSG_MOD(extack, + "Accept action can only be offloaded to VCAP chains"); + return -EOPNOTSUPP; + } + if (filter->block_id != VCAP_ES0 && + filter->goto_target != -1) { + NL_SET_ERR_MSG_MOD(extack, + "Last action must be GOTO"); + return -EOPNOTSUPP; + } + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + break; case FLOW_ACTION_TRAP: if (filter->block_id != VCAP_IS2 || filter->lookup != 0) { -- cgit v1.2.3 From 06d129946a71f3159b3b40ee95549183edf2c79d Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Sun, 22 May 2022 15:52:49 +0300 Subject: dpaa2-eth: retrieve the virtual address before dma_unmap The TSO header was DMA unmapped before the virtual address was retrieved and then used to free the buffer. This meant that we were actually removing the DMA map and then trying to search for it to help in retrieving the virtual address. This lead to a invalid virtual address being used in the kfree call. Fix this by calling dpaa2_iova_to_virt() prior to the dma_unmap call. [ 487.231819] Unable to handle kernel paging request at virtual address fffffd9807000008 (...) [ 487.354061] Hardware name: SolidRun LX2160A Honeycomb (DT) [ 487.359535] pstate: a0400005 (NzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 487.366485] pc : kfree+0xac/0x304 [ 487.369799] lr : kfree+0x204/0x304 [ 487.373191] sp : ffff80000c4eb120 [ 487.376493] x29: ffff80000c4eb120 x28: ffff662240c46400 x27: 0000000000000001 [ 487.383621] x26: 0000000000000001 x25: ffff662246da0cc0 x24: ffff66224af78000 [ 487.390748] x23: ffffad184f4ce008 x22: ffffad1850185000 x21: ffffad1838d13cec [ 487.397874] x20: ffff6601c0000000 x19: fffffd9807000000 x18: 0000000000000000 [ 487.405000] x17: ffffb910cdc49000 x16: ffffad184d7d9080 x15: 0000000000004000 [ 487.412126] x14: 0000000000000008 x13: 000000000000ffff x12: 0000000000000000 [ 487.419252] x11: 0000000000000004 x10: 0000000000000001 x9 : ffffad184d7d927c [ 487.426379] x8 : 0000000000000000 x7 : 0000000ffffffd1d x6 : ffff662240a94900 [ 487.433505] x5 : 0000000000000003 x4 : 0000000000000009 x3 : ffffad184f4ce008 [ 487.440632] x2 : ffff662243eec000 x1 : 0000000100000100 x0 : fffffc0000000000 [ 487.447758] Call trace: [ 487.450194] kfree+0xac/0x304 [ 487.453151] dpaa2_eth_free_tx_fd.isra.0+0x33c/0x3e0 [fsl_dpaa2_eth] [ 487.459507] dpaa2_eth_tx_conf+0x100/0x2e0 [fsl_dpaa2_eth] [ 487.464989] dpaa2_eth_poll+0xdc/0x380 [fsl_dpaa2_eth] Fixes: 3dc709e0cd47 ("dpaa2-eth: add support for software TSO") Link: https://bugzilla.kernel.org/show_bug.cgi?id=215886 Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 4b047255d928..766391310d1b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1097,6 +1097,7 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, u32 fd_len = dpaa2_fd_get_len(fd); struct dpaa2_sg_entry *sgt; int should_free_skb = 1; + void *tso_hdr; int i; fd_addr = dpaa2_fd_get_addr(fd); @@ -1136,9 +1137,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, priv->tx_data_offset); /* Unmap and free the header */ + tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)); dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE, DMA_TO_DEVICE); - kfree(dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt))); + kfree(tso_hdr); /* Unmap the other SG entries for the data */ for (i = 1; i < swa->tso.num_sg; i++) -- cgit v1.2.3 From d5f4e19a85670b4e5697654f4a4e086e064f8a47 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Sun, 22 May 2022 15:52:50 +0300 Subject: dpaa2-eth: use the correct software annotation field The incorrect software annotation field was being used, swa->sg.sgt_size instead of swa->tso.sgt_size, which meant that the SGT buffer was unmapped with a wrong size. This is also confirmed by the DMA API debug prints which showed the following: [ 38.962434] DMA-API: fsl_dpaa2_eth dpni.2: device driver frees DMA memory with different size [device address=0x0000fffffafba740] [map size=224 bytes] [unmap size=0 bytes] [ 38.980496] WARNING: CPU: 11 PID: 1131 at kernel/dma/debug.c:973 check_unmap+0x58c/0x9b0 [ 38.988586] Modules linked in: [ 38.991631] CPU: 11 PID: 1131 Comm: iperf3 Not tainted 5.18.0-rc7-00117-g59130eeb2b8f #1972 [ 38.999970] Hardware name: NXP Layerscape LX2160ARDB (DT) Fixes: 3dc709e0cd47 ("dpaa2-eth: add support for software TSO") Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 766391310d1b..f1f140277184 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1148,7 +1148,7 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE); /* Unmap the SGT buffer */ - dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, + dma_unmap_single(dev, fd_addr, swa->tso.sgt_size, DMA_BIDIRECTIONAL); if (!swa->tso.is_last_fd) -- cgit v1.2.3 From 0a09c5b8cb8f75344da7d90c771b84f7cdeaea04 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Sun, 22 May 2022 15:52:51 +0300 Subject: dpaa2-eth: unmap the SGT buffer before accessing its contents DMA unmap the Scatter/Gather table before going through the array to unmap and free each of the header and data chunks. This is so we do not touch the data between the dma_map and dma_unmap calls. Fixes: 3dc709e0cd47 ("dpaa2-eth: add support for software TSO") Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index f1f140277184..cd9ec80522e7 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1136,6 +1136,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, sgt = (struct dpaa2_sg_entry *)(buffer_start + priv->tx_data_offset); + /* Unmap the SGT buffer */ + dma_unmap_single(dev, fd_addr, swa->tso.sgt_size, + DMA_BIDIRECTIONAL); + /* Unmap and free the header */ tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt)); dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE, @@ -1147,10 +1151,6 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]), dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE); - /* Unmap the SGT buffer */ - dma_unmap_single(dev, fd_addr, swa->tso.sgt_size, - DMA_BIDIRECTIONAL); - if (!swa->tso.is_last_fd) should_free_skb = 0; } else { -- cgit v1.2.3 From 06da3e8f390aba135ae8808e9424e8a68e53d261 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:56:03 -0700 Subject: eth: mtk_eth_soc: silence the GCC 12 array-bounds warning GCC 12 gets upset because in mtk_foe_entry_commit_subflow() this driver allocates a partial structure. The writes are within bounds. Silence these warnings for now, our build bot runs GCC 12 so we won't allow any new instances. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 45ba0970504a..fe66ba8793cf 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -11,3 +11,8 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o endif obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o + +# FIXME: temporarily silence -Warray-bounds on non W=1+ builds +ifndef KBUILD_EXTRA_WARN +CFLAGS_mtk_ppe.o += -Wno-array-bounds +endif -- cgit v1.2.3 From 385bc51b41ea74a91545600cb3d731754372f75e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:56:04 -0700 Subject: eth: ice: silence the GCC 12 array-bounds warning GCC 12 gets upset because driver allocates partial struct ice_aqc_sw_rules_elem buffers. The writes are within bounds. Silence these warnings for now, our build bot runs GCC 12 so we won't allow any new instances. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ice/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 9183d480b70b..46f439641441 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -47,3 +47,8 @@ ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o + +# FIXME: temporarily silence -Warray-bounds on non W=1+ builds +ifndef KBUILD_EXTRA_WARN +CFLAGS_ice_switch.o += -Wno-array-bounds +endif -- cgit v1.2.3 From 9dec850fd7c210a04b4707df8e6c95bfafdd6a4b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:56:05 -0700 Subject: eth: tg3: silence the GCC 12 array-bounds warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 12 currently generates a rather inconsistent warning: drivers/net/ethernet/broadcom/tg3.c:17795:51: warning: array subscript 5 is above array bounds of ‘struct tg3_napi[5]’ [-Warray-bounds] 17795 | struct tg3_napi *tnapi = &tp->napi[i]; | ~~~~~~~~^~~ i is guaranteed < tp->irq_max which in turn is either 1 or 5. There are more loops like this one in the driver, but strangely GCC 12 dislikes only this single one. Silence this silliness for now. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 0ddfb5b5d53c..2e6c5f258a1f 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -17,3 +17,8 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o obj-$(CONFIG_BNXT) += bnxt/ + +# FIXME: temporarily silence -Warray-bounds on non W=1+ builds +ifndef KBUILD_EXTRA_WARN +CFLAGS_tg3.o += -Wno-array-bounds +endif -- cgit v1.2.3 From 62dfb4cc444649b3f841ddb4f472a12ed6422714 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:26 +0200 Subject: net: ethernet: mtk_eth_soc: rely on GFP_KERNEL for dma_alloc_coherent whenever possible Rely on GFP_KERNEL for dma descriptors mappings in mtk_tx_alloc(), mtk_rx_alloc() and mtk_init_fq_dma() since they are run in non-irq context. Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 16f131445d8b..ccd864c968b2 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -816,7 +816,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, cnt * sizeof(struct mtk_tx_dma), ð->phy_scratch_ring, - GFP_ATOMIC); + GFP_KERNEL); if (unlikely(!eth->scratch_ring)) return -ENOMEM; @@ -1591,7 +1591,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) goto no_tx_mem; ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, - &ring->phys, GFP_ATOMIC); + &ring->phys, GFP_KERNEL); if (!ring->dma) goto no_tx_mem; @@ -1609,8 +1609,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, - &ring->phys_pdma, - GFP_ATOMIC); + &ring->phys_pdma, GFP_KERNEL); if (!ring->dma_pdma) goto no_tx_mem; @@ -1722,7 +1721,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) ring->dma = dma_alloc_coherent(eth->dma_dev, rx_dma_size * sizeof(*ring->dma), - &ring->phys, GFP_ATOMIC); + &ring->phys, GFP_KERNEL); if (!ring->dma) return -ENOMEM; -- cgit v1.2.3 From 731f3fd6bc879fcf11e99a2576747a33d3958cef Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:27 +0200 Subject: net: ethernet: mtk_eth_soc: move tx dma desc configuration in mtk_tx_set_dma_desc Move tx dma descriptor configuration in mtk_tx_set_dma_desc routine. This is a preliminary patch to introduce mt7986 ethernet support since it relies on a different tx dma descriptor layout. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 105 +++++++++++++++------------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 11 +++ 2 files changed, 67 insertions(+), 49 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index ccd864c968b2..6e713f68273f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -942,18 +942,51 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, } } +static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc, + struct mtk_tx_dma_desc_info *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + u32 data; + + WRITE_ONCE(desc->txd1, info->addr); + + data = TX_DMA_SWC | TX_DMA_PLEN0(info->size); + if (info->last) + data |= TX_DMA_LS0; + WRITE_ONCE(desc->txd3, data); + + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */ + if (info->first) { + if (info->gso) + data |= TX_DMA_TSO; + /* tx checksum offload */ + if (info->csum) + data |= TX_DMA_CHKSUM; + /* vlan header offload */ + if (info->vlan) + data |= TX_DMA_INS_VLAN | info->vlan_tci; + } + WRITE_ONCE(desc->txd4, data); +} + static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, int tx_num, struct mtk_tx_ring *ring, bool gso) { + struct mtk_tx_dma_desc_info txd_info = { + .size = skb_headlen(skb), + .gso = gso, + .csum = skb->ip_summed == CHECKSUM_PARTIAL, + .vlan = skb_vlan_tag_present(skb), + .vlan_tci = skb_vlan_tag_get(skb), + .first = true, + .last = !skb_is_nonlinear(skb), + }; struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; struct mtk_tx_dma *itxd, *txd; struct mtk_tx_dma *itxd_pdma, *txd_pdma; struct mtk_tx_buf *itx_buf, *tx_buf; - dma_addr_t mapped_addr; - unsigned int nr_frags; int i, n_desc = 1; - u32 txd4 = 0, fport; int k = 0; itxd = ring->next_free; @@ -961,49 +994,32 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (itxd == ring->last_free) return -ENOMEM; - /* set the forward port */ - fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; - txd4 |= fport; - itx_buf = mtk_desc_to_tx_buf(ring, itxd); memset(itx_buf, 0, sizeof(*itx_buf)); - if (gso) - txd4 |= TX_DMA_TSO; - - /* TX Checksum offload */ - if (skb->ip_summed == CHECKSUM_PARTIAL) - txd4 |= TX_DMA_CHKSUM; - - /* VLAN header offload */ - if (skb_vlan_tag_present(skb)) - txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); - - mapped_addr = dma_map_single(eth->dma_dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) + txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) return -ENOMEM; - WRITE_ONCE(itxd->txd1, mapped_addr); + mtk_tx_set_dma_desc(dev, itxd, &txd_info); + itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; - setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb), + setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, k++); /* TX SG offload */ txd = itxd; txd_pdma = qdma_to_pdma(ring, txd); - nr_frags = skb_shinfo(skb)->nr_frags; - for (i = 0; i < nr_frags; i++) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; unsigned int offset = 0; int frag_size = skb_frag_size(frag); while (frag_size) { - bool last_frag = false; - unsigned int frag_map_size; bool new_desc = true; if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || @@ -1018,23 +1034,17 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, new_desc = false; } - - frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); - mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset, - frag_map_size, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr))) + memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); + txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN); + txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && + !(frag_size - txd_info.size); + txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, + offset, txd_info.size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) goto err_dma; - if (i == nr_frags - 1 && - (frag_size - frag_map_size) == 0) - last_frag = true; - - WRITE_ONCE(txd->txd1, mapped_addr); - WRITE_ONCE(txd->txd3, (TX_DMA_SWC | - TX_DMA_PLEN0(frag_map_size) | - last_frag * TX_DMA_LS0)); - WRITE_ONCE(txd->txd4, fport); + mtk_tx_set_dma_desc(dev, txd, &txd_info); tx_buf = mtk_desc_to_tx_buf(ring, txd); if (new_desc) @@ -1044,20 +1054,17 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; - setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr, - frag_map_size, k++); + setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, + txd_info.size, k++); - frag_size -= frag_map_size; - offset += frag_map_size; + frag_size -= txd_info.size; + offset += txd_info.size; } } /* store skb to cleanup */ itx_buf->skb = skb; - WRITE_ONCE(itxd->txd4, txd4); - WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | - (!nr_frags * TX_DMA_LS0))); if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { if (k & 0x1) txd_pdma->txd2 |= TX_DMA_LS0; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index c1d46eb281ea..7e0f2964ac23 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -846,6 +846,17 @@ enum mkt_eth_capabilities { MTK_MUX_U3_GMAC2_TO_QPHY | \ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) +struct mtk_tx_dma_desc_info { + dma_addr_t addr; + u32 size; + u16 vlan_tci; + u8 gso:1; + u8 csum:1; + u8 vlan:1; + u8 first:1; + u8 last:1; +}; + /* struct mtk_eth_data - This is the structure holding all differences * among various plaforms * @ana_rgc3: The offset for register ANA_RGC3 related to -- cgit v1.2.3 From eb067347aa87eaf8a42d945d3e877b1e8333f5ea Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:28 +0200 Subject: net: ethernet: mtk_eth_soc: add txd_size to mtk_soc_data In order to remove mtk_tx_dma size dependency, introduce txd_size in mtk_soc_data data structure. Rely on txd_size in mtk_init_fq_dma() and mtk_dma_free() routines. This is a preliminary patch to add mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 47 +++++++++++++++++++++-------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 +++ 2 files changed, 38 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6e713f68273f..e00c83982aa9 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -808,20 +808,20 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { + const struct mtk_soc_data *soc = eth->soc; dma_addr_t phy_ring_tail; int cnt = MTK_DMA_SIZE; dma_addr_t dma_addr; int i; eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, - cnt * sizeof(struct mtk_tx_dma), + cnt * soc->txrx.txd_size, ð->phy_scratch_ring, GFP_KERNEL); if (unlikely(!eth->scratch_ring)) return -ENOMEM; - eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, - GFP_KERNEL); + eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); if (unlikely(!eth->scratch_head)) return -ENOMEM; @@ -831,16 +831,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; - phy_ring_tail = eth->phy_scratch_ring + - (sizeof(struct mtk_tx_dma) * (cnt - 1)); + phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); for (i = 0; i < cnt; i++) { - eth->scratch_ring[i].txd1 = - (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); + struct mtk_tx_dma *txd; + + txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size; + txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; if (i < cnt - 1) - eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + - ((i + 1) * sizeof(struct mtk_tx_dma))); - eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); + txd->txd2 = eth->phy_scratch_ring + + (i + 1) * soc->txrx.txd_size; + + txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); + txd->txd4 = 0; } mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); @@ -2131,6 +2134,7 @@ static int mtk_dma_init(struct mtk_eth *eth) static void mtk_dma_free(struct mtk_eth *eth) { + const struct mtk_soc_data *soc = eth->soc; int i; for (i = 0; i < MTK_MAC_COUNT; i++) @@ -2138,9 +2142,8 @@ static void mtk_dma_free(struct mtk_eth *eth) netdev_reset_queue(eth->netdev[i]); if (eth->scratch_ring) { dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), - eth->scratch_ring, - eth->phy_scratch_ring); + MTK_DMA_SIZE * soc->txrx.txd_size, + eth->scratch_ring, eth->phy_scratch_ring); eth->scratch_ring = NULL; eth->phy_scratch_ring = 0; } @@ -3373,6 +3376,9 @@ static const struct mtk_soc_data mt2701_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + }, }; static const struct mtk_soc_data mt7621_data = { @@ -3381,6 +3387,9 @@ static const struct mtk_soc_data mt7621_data = { .required_clks = MT7621_CLKS_BITMAP, .required_pctl = false, .offload_version = 2, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + }, }; static const struct mtk_soc_data mt7622_data = { @@ -3390,6 +3399,9 @@ static const struct mtk_soc_data mt7622_data = { .required_clks = MT7622_CLKS_BITMAP, .required_pctl = false, .offload_version = 2, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + }, }; static const struct mtk_soc_data mt7623_data = { @@ -3398,6 +3410,9 @@ static const struct mtk_soc_data mt7623_data = { .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, .offload_version = 2, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + }, }; static const struct mtk_soc_data mt7629_data = { @@ -3406,6 +3421,9 @@ static const struct mtk_soc_data mt7629_data = { .hw_features = MTK_HW_FEATURES, .required_clks = MT7629_CLKS_BITMAP, .required_pctl = false, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + }, }; static const struct mtk_soc_data rt5350_data = { @@ -3413,6 +3431,9 @@ static const struct mtk_soc_data rt5350_data = { .hw_features = MTK_HW_FEATURES_MT7628, .required_clks = MT7628_CLKS_BITMAP, .required_pctl = false, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + }, }; const struct of_device_id of_mtk_match[] = { diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 7e0f2964ac23..7a5ad14b8be6 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -867,6 +867,7 @@ struct mtk_tx_dma_desc_info { * the target SoC * @required_pctl A bool value to show whether the SoC requires * the extra setup for those pins used by GMAC. + * @txd_size Tx DMA descriptor size. */ struct mtk_soc_data { u32 ana_rgc3; @@ -875,6 +876,9 @@ struct mtk_soc_data { bool required_pctl; u8 offload_version; netdev_features_t hw_features; + struct { + u32 txd_size; + } txrx; }; /* currently no SoC has more than 2 macs */ -- cgit v1.2.3 From 0e05744beda4ae2d65100ed217e4bd50130b4078 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:29 +0200 Subject: net: ethernet: mtk_eth_soc: rely on txd_size in mtk_tx_alloc/mtk_tx_clean This is a preliminary patch to add mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e00c83982aa9..331814f4801f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1592,8 +1592,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) static int mtk_tx_alloc(struct mtk_eth *eth) { + const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_ring *ring = ð->tx_ring; - int i, sz = sizeof(*ring->dma); + int i, sz = soc->txrx.txd_size; + struct mtk_tx_dma *txd; ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), GFP_KERNEL); @@ -1609,8 +1611,10 @@ static int mtk_tx_alloc(struct mtk_eth *eth) int next = (i + 1) % MTK_DMA_SIZE; u32 next_ptr = ring->phys + next * sz; - ring->dma[i].txd2 = next_ptr; - ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + txd = (void *)ring->dma + i * sz; + txd->txd2 = next_ptr; + txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + txd->txd4 = 0; } /* On MT7688 (PDMA only) this driver uses the ring->dma structs @@ -1632,7 +1636,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ring->dma_size = MTK_DMA_SIZE; atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); ring->next_free = &ring->dma[0]; - ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; + ring->last_free = (void *)txd; ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); ring->thresh = MAX_SKB_FRAGS; @@ -1665,6 +1669,7 @@ no_tx_mem: static void mtk_tx_clean(struct mtk_eth *eth) { + const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_ring *ring = ð->tx_ring; int i; @@ -1677,17 +1682,15 @@ static void mtk_tx_clean(struct mtk_eth *eth) if (ring->dma) { dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * sizeof(*ring->dma), - ring->dma, - ring->phys); + MTK_DMA_SIZE * soc->txrx.txd_size, + ring->dma, ring->phys); ring->dma = NULL; } if (ring->dma_pdma) { dma_free_coherent(eth->dma_dev, - MTK_DMA_SIZE * sizeof(*ring->dma_pdma), - ring->dma_pdma, - ring->phys_pdma); + MTK_DMA_SIZE * soc->txrx.txd_size, + ring->dma_pdma, ring->phys_pdma); ring->dma_pdma = NULL; } } -- cgit v1.2.3 From c4fd06c2bb82c8167e69ca655ea2ed217ec3ca89 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:30 +0200 Subject: net: ethernet: mtk_eth_soc: rely on txd_size in mtk_desc_to_tx_buf This is a preliminary patch to add mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 331814f4801f..7c314324f929 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -861,10 +861,11 @@ static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) return ret + (desc - ring->phys); } -static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, - struct mtk_tx_dma *txd) +static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, + struct mtk_tx_dma *txd, + u32 txd_size) { - int idx = txd - ring->dma; + int idx = ((void *)txd - (void *)ring->dma) / txd_size; return &ring->buf[idx]; } @@ -986,6 +987,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, }; struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; + const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_dma *itxd, *txd; struct mtk_tx_dma *itxd_pdma, *txd_pdma; struct mtk_tx_buf *itx_buf, *tx_buf; @@ -997,7 +999,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (itxd == ring->last_free) return -ENOMEM; - itx_buf = mtk_desc_to_tx_buf(ring, itxd); + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); memset(itx_buf, 0, sizeof(*itx_buf)); txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, @@ -1025,7 +1027,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, while (frag_size) { bool new_desc = true; - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (i & 0x1)) { txd = mtk_qdma_phys_to_virt(ring, txd->txd2); txd_pdma = qdma_to_pdma(ring, txd); @@ -1049,7 +1051,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, mtk_tx_set_dma_desc(dev, txd, &txd_info); - tx_buf = mtk_desc_to_tx_buf(ring, txd); + tx_buf = mtk_desc_to_tx_buf(ring, txd, + soc->txrx.txd_size); if (new_desc) memset(tx_buf, 0, sizeof(*tx_buf)); tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; @@ -1068,7 +1071,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, /* store skb to cleanup */ itx_buf->skb = skb; - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { if (k & 0x1) txd_pdma->txd2 |= TX_DMA_LS0; else @@ -1086,7 +1089,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, */ wmb(); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !netdev_xmit_more()) mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); @@ -1100,13 +1103,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, err_dma: do { - tx_buf = mtk_desc_to_tx_buf(ring, itxd); + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); /* unmap dma */ mtk_tx_unmap(eth, tx_buf, false); itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) itxd_pdma->txd2 = TX_DMA_DESP2_DEF; itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); @@ -1417,7 +1420,8 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) break; - tx_buf = mtk_desc_to_tx_buf(ring, desc); + tx_buf = mtk_desc_to_tx_buf(ring, desc, + eth->soc->txrx.txd_size); if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) mac = 1; -- cgit v1.2.3 From e70a5634c1f58a352aea3dbf2a50e98a0cadd68c Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:31 +0200 Subject: net: ethernet: mtk_eth_soc: rely on txd_size in txd_to_idx This is a preliminary patch to add mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 7c314324f929..1bf5edd9eb44 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -876,9 +876,10 @@ static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, return ring->dma_pdma - ring->dma + dma; } -static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) +static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma, + u32 txd_size) { - return ((void *)dma - (void *)ring->dma) / sizeof(*dma); + return ((void *)dma - (void *)ring->dma) / txd_size; } static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, @@ -1094,8 +1095,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, !netdev_xmit_more()) mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); } else { - int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd), - ring->dma_size); + int next_idx; + + next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size), + ring->dma_size); mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); } -- cgit v1.2.3 From 670ff7dabbb0f317305fa8ccdc87ac88d0c87809 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:32 +0200 Subject: net: ethernet: mtk_eth_soc: add rxd_size to mtk_soc_data Similar to tx counterpart, introduce rxd_size in mtk_soc_data data structure. This is a preliminary patch to add mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 13 +++++++++---- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 ++ 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 1bf5edd9eb44..7c4e63cc7c2a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1740,7 +1740,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) } ring->dma = dma_alloc_coherent(eth->dma_dev, - rx_dma_size * sizeof(*ring->dma), + rx_dma_size * eth->soc->txrx.rxd_size, &ring->phys, GFP_KERNEL); if (!ring->dma) return -ENOMEM; @@ -1798,9 +1798,8 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) if (ring->dma) { dma_free_coherent(eth->dma_dev, - ring->dma_size * sizeof(*ring->dma), - ring->dma, - ring->phys); + ring->dma_size * eth->soc->txrx.rxd_size, + ring->dma, ring->phys); ring->dma = NULL; } } @@ -3388,6 +3387,7 @@ static const struct mtk_soc_data mt2701_data = { .required_pctl = true, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), }, }; @@ -3399,6 +3399,7 @@ static const struct mtk_soc_data mt7621_data = { .offload_version = 2, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), }, }; @@ -3411,6 +3412,7 @@ static const struct mtk_soc_data mt7622_data = { .offload_version = 2, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), }, }; @@ -3422,6 +3424,7 @@ static const struct mtk_soc_data mt7623_data = { .offload_version = 2, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), }, }; @@ -3433,6 +3436,7 @@ static const struct mtk_soc_data mt7629_data = { .required_pctl = false, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), }, }; @@ -3443,6 +3447,7 @@ static const struct mtk_soc_data rt5350_data = { .required_pctl = false, .txrx = { .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), }, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 7a5ad14b8be6..dcbf4b5c70e0 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -868,6 +868,7 @@ struct mtk_tx_dma_desc_info { * @required_pctl A bool value to show whether the SoC requires * the extra setup for those pins used by GMAC. * @txd_size Tx DMA descriptor size. + * @rxd_size Rx DMA descriptor size. */ struct mtk_soc_data { u32 ana_rgc3; @@ -878,6 +879,7 @@ struct mtk_soc_data { netdev_features_t hw_features; struct { u32 txd_size; + u32 rxd_size; } txrx; }; -- cgit v1.2.3 From 649a752775c26bb9f9d163aa1102ab9cc5941496 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:33 +0200 Subject: net: ethernet: mtk_eth_soc: rely on txd_size field in mtk_poll_tx/mtk_poll_rx This is a preliminary to ad mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 7c4e63cc7c2a..c7820dbc75f1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1235,9 +1235,12 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) return ð->rx_ring[0]; for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { + struct mtk_rx_dma *rxd; + ring = ð->rx_ring[i]; idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); - if (ring->dma[idx].rxd2 & RX_DMA_DONE) { + rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; + if (rxd->rxd2 & RX_DMA_DONE) { ring->calc_idx_update = true; return ring; } @@ -1288,7 +1291,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto rx_done; idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); - rxd = &ring->dma[idx]; + rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; data = ring->data[idx]; if (!mtk_rx_get_desc(&trxd, rxd)) @@ -1477,7 +1480,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, mtk_tx_unmap(eth, tx_buf, true); - desc = &ring->dma[cpu]; + desc = (void *)ring->dma + cpu * eth->soc->txrx.txd_size; ring->last_free = desc; atomic_inc(&ring->free_count); -- cgit v1.2.3 From 72e27d3718ba24e75bef7a17d1acd8fbfe37c623 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:34 +0200 Subject: net: ethernet: mtk_eth_soc: rely on rxd_size field in mtk_rx_alloc/mtk_rx_clean Remove mtk_rx_dma structure layout dependency in mtk_rx_alloc/mtk_rx_clean. Initialize to 0 rxd3 and rxd4 in mtk_rx_alloc. This is a preliminary patch to add mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c7820dbc75f1..4706e3708bbc 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1749,18 +1749,25 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { + struct mtk_rx_dma *rxd; + dma_addr_t dma_addr = dma_map_single(eth->dma_dev, ring->data[i] + NET_SKB_PAD + eth->ip_align, ring->buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; - ring->dma[i].rxd1 = (unsigned int)dma_addr; + + rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size; + rxd->rxd1 = (unsigned int)dma_addr; if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) - ring->dma[i].rxd2 = RX_DMA_LSO; + rxd->rxd2 = RX_DMA_LSO; else - ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); + + rxd->rxd3 = 0; + rxd->rxd4 = 0; } ring->dma_size = rx_dma_size; ring->calc_idx_update = false; @@ -1785,14 +1792,17 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) if (ring->data && ring->dma) { for (i = 0; i < ring->dma_size; i++) { + struct mtk_rx_dma *rxd; + if (!ring->data[i]) continue; - if (!ring->dma[i].rxd1) + + rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size; + if (!rxd->rxd1) continue; - dma_unmap_single(eth->dma_dev, - ring->dma[i].rxd1, - ring->buf_size, - DMA_FROM_DEVICE); + + dma_unmap_single(eth->dma_dev, rxd->rxd1, + ring->buf_size, DMA_FROM_DEVICE); skb_free_frag(ring->data[i]); } kfree(ring->data); -- cgit v1.2.3 From 8cb42714cdc1fc1d91f0a67aa710b794280b7af0 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:35 +0200 Subject: net: ethernet: mtk_eth_soc: introduce device register map Introduce reg_map structure to add the capability to support different register definitions. Move register definitions in mtk_regmap structure. This is a preliminary patch to introduce mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 223 ++++++++++++++++++---------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 146 ++++++------------ 2 files changed, 188 insertions(+), 181 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4706e3708bbc..503829a9c270 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -34,6 +34,59 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); #define MTK_ETHTOOL_STAT(x) { #x, \ offsetof(struct mtk_hw_stats, x) / sizeof(u64) } +static const struct mtk_reg_map mtk_reg_map = { + .tx_irq_mask = 0x1a1c, + .tx_irq_status = 0x1a18, + .pdma = { + .rx_ptr = 0x0900, + .rx_cnt_cfg = 0x0904, + .pcrx_ptr = 0x0908, + .glo_cfg = 0x0a04, + .rst_idx = 0x0a08, + .delay_irq = 0x0a0c, + .irq_status = 0x0a20, + .irq_mask = 0x0a28, + .int_grp = 0x0a50, + }, + .qdma = { + .qtx_cfg = 0x1800, + .rx_ptr = 0x1900, + .rx_cnt_cfg = 0x1904, + .qcrx_ptr = 0x1908, + .glo_cfg = 0x1a04, + .rst_idx = 0x1a08, + .delay_irq = 0x1a0c, + .fc_th = 0x1a10, + .int_grp = 0x1a20, + .hred = 0x1a44, + .ctx_ptr = 0x1b00, + .dtx_ptr = 0x1b04, + .crx_ptr = 0x1b10, + .drx_ptr = 0x1b14, + .fq_head = 0x1b20, + .fq_tail = 0x1b24, + .fq_count = 0x1b28, + .fq_blen = 0x1b2c, + }, + .gdm1_cnt = 0x2400, +}; + +static const struct mtk_reg_map mt7628_reg_map = { + .tx_irq_mask = 0x0a28, + .tx_irq_status = 0x0a20, + .pdma = { + .rx_ptr = 0x0900, + .rx_cnt_cfg = 0x0904, + .pcrx_ptr = 0x0908, + .glo_cfg = 0x0a04, + .rst_idx = 0x0a08, + .delay_irq = 0x0a0c, + .irq_status = 0x0a20, + .irq_mask = 0x0a28, + .int_grp = 0x0a50, + }, +}; + /* strings used by ethtool */ static const struct mtk_ethtool_stats { char str[ETH_GSTRING_LEN]; @@ -600,8 +653,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) u32 val; spin_lock_irqsave(ð->tx_irq_lock, flags); - val = mtk_r32(eth, eth->tx_int_mask_reg); - mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg); + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); + mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); spin_unlock_irqrestore(ð->tx_irq_lock, flags); } @@ -611,8 +664,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) u32 val; spin_lock_irqsave(ð->tx_irq_lock, flags); - val = mtk_r32(eth, eth->tx_int_mask_reg); - mtk_w32(eth, val | mask, eth->tx_int_mask_reg); + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); + mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); spin_unlock_irqrestore(ð->tx_irq_lock, flags); } @@ -622,8 +675,8 @@ static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) u32 val; spin_lock_irqsave(ð->rx_irq_lock, flags); - val = mtk_r32(eth, MTK_PDMA_INT_MASK); - mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); + mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); spin_unlock_irqrestore(ð->rx_irq_lock, flags); } @@ -633,8 +686,8 @@ static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) u32 val; spin_lock_irqsave(ð->rx_irq_lock, flags); - val = mtk_r32(eth, MTK_PDMA_INT_MASK); - mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); + mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); spin_unlock_irqrestore(ð->rx_irq_lock, flags); } @@ -685,39 +738,39 @@ void mtk_stats_update_mac(struct mtk_mac *mac) hw_stats->rx_checksum_errors += mtk_r32(mac->hw, MT7628_SDM_CS_ERR); } else { + const struct mtk_reg_map *reg_map = eth->soc->reg_map; unsigned int offs = hw_stats->reg_offset; u64 stats; - hw_stats->rx_bytes += mtk_r32(mac->hw, - MTK_GDM1_RX_GBCNT_L + offs); - stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs); + hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs); + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs); if (stats) hw_stats->rx_bytes += (stats << 32); hw_stats->rx_packets += - mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs); hw_stats->rx_overflow += - mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs); hw_stats->rx_fcs_errors += - mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs); hw_stats->rx_short_errors += - mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs); hw_stats->rx_long_errors += - mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs); hw_stats->rx_checksum_errors += - mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs); hw_stats->rx_flow_control_packets += - mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs); hw_stats->tx_skip += - mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs); hw_stats->tx_collisions += - mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs); hw_stats->tx_bytes += - mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs); - stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs); + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs); if (stats) hw_stats->tx_bytes += (stats << 32); hw_stats->tx_packets += - mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs); + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs); } u64_stats_update_end(&hw_stats->syncp); @@ -846,10 +899,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) txd->txd4 = 0; } - mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); - mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); - mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); - mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); + mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); + mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); + mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); return 0; } @@ -1093,7 +1146,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !netdev_xmit_more()) - mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); + mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); } else { int next_idx; @@ -1407,6 +1460,7 @@ rx_done: static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, unsigned int *done, unsigned int *bytes) { + const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_dma *desc; struct sk_buff *skb; @@ -1414,7 +1468,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, u32 cpu, dma; cpu = ring->last_free_ptr; - dma = mtk_r32(eth, MTK_QTX_DRX_PTR); + dma = mtk_r32(eth, reg_map->qdma.drx_ptr); desc = mtk_qdma_phys_to_virt(ring, cpu); @@ -1449,7 +1503,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, } ring->last_free_ptr = cpu; - mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); + mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); return budget; } @@ -1542,24 +1596,25 @@ static void mtk_handle_status_irq(struct mtk_eth *eth) static int mtk_napi_tx(struct napi_struct *napi, int budget) { struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); + const struct mtk_reg_map *reg_map = eth->soc->reg_map; int tx_done = 0; if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) mtk_handle_status_irq(eth); - mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg); + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); tx_done = mtk_poll_tx(eth, budget); if (unlikely(netif_msg_intr(eth))) { dev_info(eth->dev, "done tx %d, intr 0x%08x/0x%x\n", tx_done, - mtk_r32(eth, eth->tx_int_status_reg), - mtk_r32(eth, eth->tx_int_mask_reg)); + mtk_r32(eth, reg_map->tx_irq_status), + mtk_r32(eth, reg_map->tx_irq_mask)); } if (tx_done == budget) return budget; - if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) return budget; if (napi_complete_done(napi, tx_done)) @@ -1571,6 +1626,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) static int mtk_napi_rx(struct napi_struct *napi, int budget) { struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); + const struct mtk_reg_map *reg_map = eth->soc->reg_map; int rx_done_total = 0; mtk_handle_status_irq(eth); @@ -1578,21 +1634,21 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) do { int rx_done; - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status); rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); rx_done_total += rx_done; if (unlikely(netif_msg_intr(eth))) { dev_info(eth->dev, "done rx %d, intr 0x%08x/0x%x\n", rx_done, - mtk_r32(eth, MTK_PDMA_INT_STATUS), - mtk_r32(eth, MTK_PDMA_INT_MASK)); + mtk_r32(eth, reg_map->pdma.irq_status), + mtk_r32(eth, reg_map->pdma.irq_mask)); } if (rx_done_total == budget) return budget; - } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT); + } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT); if (napi_complete_done(napi, rx_done_total)) mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); @@ -1655,20 +1711,20 @@ static int mtk_tx_alloc(struct mtk_eth *eth) */ wmb(); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); - mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); + mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); mtk_w32(eth, ring->phys + ((MTK_DMA_SIZE - 1) * sz), - MTK_QTX_CRX_PTR); - mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR); + soc->reg_map->qdma.crx_ptr); + mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, - MTK_QTX_CFG(0)); + soc->reg_map->qdma.qtx_cfg); } else { mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); - mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX); + mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); } return 0; @@ -1707,6 +1763,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { + const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct mtk_rx_ring *ring; int rx_data_len, rx_dma_size; int i; @@ -1772,16 +1829,18 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) ring->dma_size = rx_dma_size; ring->calc_idx_update = false; ring->calc_idx = rx_dma_size - 1; - ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no); + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET; /* make sure that all changes to the dma ring are flushed before we * continue */ wmb(); - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); + mtk_w32(eth, ring->phys, + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset); + mtk_w32(eth, rx_dma_size, + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset); mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset); return 0; } @@ -2087,9 +2146,9 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth) u32 val; if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) - reg = MTK_QDMA_GLO_CFG; + reg = eth->soc->reg_map->qdma.glo_cfg; else - reg = MTK_PDMA_GLO_CFG; + reg = eth->soc->reg_map->pdma.glo_cfg; ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)), @@ -2147,8 +2206,8 @@ static int mtk_dma_init(struct mtk_eth *eth) * automatically */ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | - FC_THRES_MIN, MTK_QDMA_FC_THRES); - mtk_w32(eth, 0x0, MTK_QDMA_HRED2); + FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); + mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); } return 0; @@ -2222,13 +2281,14 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) static irqreturn_t mtk_handle_irq(int irq, void *_eth) { struct mtk_eth *eth = _eth; + const struct mtk_reg_map *reg_map = eth->soc->reg_map; - if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) { - if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT) + if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) { + if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT) mtk_handle_irq_rx(irq, _eth); } - if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) { - if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) + if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) mtk_handle_irq_tx(irq, _eth); } @@ -2252,6 +2312,7 @@ static void mtk_poll_controller(struct net_device *dev) static int mtk_start_dma(struct mtk_eth *eth) { u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; + const struct mtk_reg_map *reg_map = eth->soc->reg_map; int err; err = mtk_dma_init(eth); @@ -2266,16 +2327,15 @@ static int mtk_start_dma(struct mtk_eth *eth) MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | MTK_RX_BT_32DWORDS, - MTK_QDMA_GLO_CFG); - + reg_map->qdma.glo_cfg); mtk_w32(eth, MTK_RX_DMA_EN | rx_2b_offset | MTK_RX_BT_32DWORDS | MTK_MULTI_EN, - MTK_PDMA_GLO_CFG); + reg_map->pdma.glo_cfg); } else { mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS, - MTK_PDMA_GLO_CFG); + reg_map->pdma.glo_cfg); } return 0; @@ -2398,8 +2458,8 @@ static int mtk_stop(struct net_device *dev) cancel_work_sync(ð->tx_dim.work); if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) - mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); - mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); + mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); + mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); mtk_dma_free(eth); @@ -2453,6 +2513,7 @@ static void mtk_dim_rx(struct work_struct *work) { struct dim *dim = container_of(work, struct dim, work); struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); + const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct dim_cq_moder cur_profile; u32 val, cur; @@ -2460,7 +2521,7 @@ static void mtk_dim_rx(struct work_struct *work) dim->profile_ix); spin_lock_bh(ð->dim_lock); - val = mtk_r32(eth, MTK_PDMA_DELAY_INT); + val = mtk_r32(eth, reg_map->pdma.delay_irq); val &= MTK_PDMA_DELAY_TX_MASK; val |= MTK_PDMA_DELAY_RX_EN; @@ -2470,9 +2531,9 @@ static void mtk_dim_rx(struct work_struct *work) cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT; - mtk_w32(eth, val, MTK_PDMA_DELAY_INT); + mtk_w32(eth, val, reg_map->pdma.delay_irq); if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) - mtk_w32(eth, val, MTK_QDMA_DELAY_INT); + mtk_w32(eth, val, reg_map->qdma.delay_irq); spin_unlock_bh(ð->dim_lock); @@ -2483,6 +2544,7 @@ static void mtk_dim_tx(struct work_struct *work) { struct dim *dim = container_of(work, struct dim, work); struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); + const struct mtk_reg_map *reg_map = eth->soc->reg_map; struct dim_cq_moder cur_profile; u32 val, cur; @@ -2490,7 +2552,7 @@ static void mtk_dim_tx(struct work_struct *work) dim->profile_ix); spin_lock_bh(ð->dim_lock); - val = mtk_r32(eth, MTK_PDMA_DELAY_INT); + val = mtk_r32(eth, reg_map->pdma.delay_irq); val &= MTK_PDMA_DELAY_RX_MASK; val |= MTK_PDMA_DELAY_TX_EN; @@ -2500,9 +2562,9 @@ static void mtk_dim_tx(struct work_struct *work) cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT; - mtk_w32(eth, val, MTK_PDMA_DELAY_INT); + mtk_w32(eth, val, reg_map->pdma.delay_irq); if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) - mtk_w32(eth, val, MTK_QDMA_DELAY_INT); + mtk_w32(eth, val, reg_map->qdma.delay_irq); spin_unlock_bh(ð->dim_lock); @@ -2513,6 +2575,7 @@ static int mtk_hw_init(struct mtk_eth *eth) { u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | ETHSYS_DMA_AG_MAP_PPE; + const struct mtk_reg_map *reg_map = eth->soc->reg_map; int i, val, ret; if (test_and_set_bit(MTK_HW_INIT, ð->state)) @@ -2587,10 +2650,10 @@ static int mtk_hw_init(struct mtk_eth *eth) mtk_rx_irq_disable(eth, ~0); /* FE int grouping */ - mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); - mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4); + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); + mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); return 0; @@ -3153,14 +3216,6 @@ static int mtk_probe(struct platform_device *pdev) if (IS_ERR(eth->base)) return PTR_ERR(eth->base); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - eth->tx_int_mask_reg = MTK_QDMA_INT_MASK; - eth->tx_int_status_reg = MTK_QDMA_INT_STATUS; - } else { - eth->tx_int_mask_reg = MTK_PDMA_INT_MASK; - eth->tx_int_status_reg = MTK_PDMA_INT_STATUS; - } - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; eth->ip_align = NET_IP_ALIGN; @@ -3394,6 +3449,7 @@ static int mtk_remove(struct platform_device *pdev) } static const struct mtk_soc_data mt2701_data = { + .reg_map = &mtk_reg_map, .caps = MT7623_CAPS | MTK_HWLRO, .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, @@ -3405,6 +3461,7 @@ static const struct mtk_soc_data mt2701_data = { }; static const struct mtk_soc_data mt7621_data = { + .reg_map = &mtk_reg_map, .caps = MT7621_CAPS, .hw_features = MTK_HW_FEATURES, .required_clks = MT7621_CLKS_BITMAP, @@ -3417,6 +3474,7 @@ static const struct mtk_soc_data mt7621_data = { }; static const struct mtk_soc_data mt7622_data = { + .reg_map = &mtk_reg_map, .ana_rgc3 = 0x2028, .caps = MT7622_CAPS | MTK_HWLRO, .hw_features = MTK_HW_FEATURES, @@ -3430,6 +3488,7 @@ static const struct mtk_soc_data mt7622_data = { }; static const struct mtk_soc_data mt7623_data = { + .reg_map = &mtk_reg_map, .caps = MT7623_CAPS | MTK_HWLRO, .hw_features = MTK_HW_FEATURES, .required_clks = MT7623_CLKS_BITMAP, @@ -3442,6 +3501,7 @@ static const struct mtk_soc_data mt7623_data = { }; static const struct mtk_soc_data mt7629_data = { + .reg_map = &mtk_reg_map, .ana_rgc3 = 0x128, .caps = MT7629_CAPS | MTK_HWLRO, .hw_features = MTK_HW_FEATURES, @@ -3454,6 +3514,7 @@ static const struct mtk_soc_data mt7629_data = { }; static const struct mtk_soc_data rt5350_data = { + .reg_map = &mt7628_reg_map, .caps = MT7628_CAPS, .hw_features = MTK_HW_FEATURES_MT7628, .required_clks = MT7628_CLKS_BITMAP, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index dcbf4b5c70e0..7e29f042357f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -48,6 +48,8 @@ #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) #define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) +#define MTK_QRX_OFFSET 0x10 + #define MTK_MAX_RX_RING_NUM 4 #define MTK_HW_LRO_DMA_SIZE 8 @@ -100,18 +102,6 @@ /* Unicast Filter MAC Address Register - High */ #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) -/* PDMA RX Base Pointer Register */ -#define MTK_PRX_BASE_PTR0 0x900 -#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10)) - -/* PDMA RX Maximum Count Register */ -#define MTK_PRX_MAX_CNT0 0x904 -#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10)) - -/* PDMA RX CPU Pointer Register */ -#define MTK_PRX_CRX_IDX0 0x908 -#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10)) - /* PDMA HW LRO Control Registers */ #define MTK_PDMA_LRO_CTRL_DW0 0x980 #define MTK_LRO_EN BIT(0) @@ -126,18 +116,19 @@ #define MTK_ADMA_MODE BIT(15) #define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16) -/* PDMA Global Configuration Register */ -#define MTK_PDMA_GLO_CFG 0xa04 +#define MTK_RX_DMA_LRO_EN BIT(8) #define MTK_MULTI_EN BIT(10) #define MTK_PDMA_SIZE_8DWORDS (1 << 4) +/* PDMA Global Configuration Register */ +#define MTK_PDMA_LRO_SDL 0x3000 +#define MTK_RX_CFG_SDL_OFFSET 16 + /* PDMA Reset Index Register */ -#define MTK_PDMA_RST_IDX 0xa08 #define MTK_PST_DRX_IDX0 BIT(16) #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) /* PDMA Delay Interrupt Register */ -#define MTK_PDMA_DELAY_INT 0xa0c #define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) #define MTK_PDMA_DELAY_RX_EN BIT(15) #define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 @@ -151,19 +142,9 @@ #define MTK_PDMA_DELAY_PINT_MASK 0x7f #define MTK_PDMA_DELAY_PTIME_MASK 0xff -/* PDMA Interrupt Status Register */ -#define MTK_PDMA_INT_STATUS 0xa20 - -/* PDMA Interrupt Mask Register */ -#define MTK_PDMA_INT_MASK 0xa28 - /* PDMA HW LRO Alter Flow Delta Register */ #define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c -/* PDMA Interrupt grouping registers */ -#define MTK_PDMA_INT_GRP1 0xa50 -#define MTK_PDMA_INT_GRP2 0xa54 - /* PDMA HW LRO IP Setting Registers */ #define MTK_LRO_RX_RING0_DIP_DW0 0xb04 #define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40)) @@ -185,26 +166,9 @@ #define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) /* QDMA TX Queue Configuration Registers */ -#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) #define QDMA_RES_THRES 4 -/* QDMA TX Queue Scheduler Registers */ -#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10)) - -/* QDMA RX Base Pointer Register */ -#define MTK_QRX_BASE_PTR0 0x1900 - -/* QDMA RX Maximum Count Register */ -#define MTK_QRX_MAX_CNT0 0x1904 - -/* QDMA RX CPU Pointer Register */ -#define MTK_QRX_CRX_IDX0 0x1908 - -/* QDMA RX DMA Pointer Register */ -#define MTK_QRX_DRX_IDX0 0x190C - /* QDMA Global Configuration Register */ -#define MTK_QDMA_GLO_CFG 0x1A04 #define MTK_RX_2B_OFFSET BIT(31) #define MTK_RX_BT_32DWORDS (3 << 11) #define MTK_NDP_CO_PRO BIT(10) @@ -216,20 +180,12 @@ #define MTK_TX_DMA_EN BIT(0) #define MTK_DMA_BUSY_TIMEOUT_US 1000000 -/* QDMA Reset Index Register */ -#define MTK_QDMA_RST_IDX 0x1A08 - -/* QDMA Delay Interrupt Register */ -#define MTK_QDMA_DELAY_INT 0x1A0C - /* QDMA Flow Control Register */ -#define MTK_QDMA_FC_THRES 0x1A10 #define FC_THRES_DROP_MODE BIT(20) #define FC_THRES_DROP_EN (7 << 16) #define FC_THRES_MIN 0x4444 /* QDMA Interrupt Status Register */ -#define MTK_QDMA_INT_STATUS 0x1A18 #define MTK_RX_DONE_DLY BIT(30) #define MTK_TX_DONE_DLY BIT(28) #define MTK_RX_DONE_INT3 BIT(19) @@ -244,55 +200,8 @@ #define MTK_TX_DONE_INT MTK_TX_DONE_DLY /* QDMA Interrupt grouping registers */ -#define MTK_QDMA_INT_GRP1 0x1a20 -#define MTK_QDMA_INT_GRP2 0x1a24 #define MTK_RLS_DONE_INT BIT(0) -/* QDMA Interrupt Status Register */ -#define MTK_QDMA_INT_MASK 0x1A1C - -/* QDMA Interrupt Mask Register */ -#define MTK_QDMA_HRED2 0x1A44 - -/* QDMA TX Forward CPU Pointer Register */ -#define MTK_QTX_CTX_PTR 0x1B00 - -/* QDMA TX Forward DMA Pointer Register */ -#define MTK_QTX_DTX_PTR 0x1B04 - -/* QDMA TX Release CPU Pointer Register */ -#define MTK_QTX_CRX_PTR 0x1B10 - -/* QDMA TX Release DMA Pointer Register */ -#define MTK_QTX_DRX_PTR 0x1B14 - -/* QDMA FQ Head Pointer Register */ -#define MTK_QDMA_FQ_HEAD 0x1B20 - -/* QDMA FQ Head Pointer Register */ -#define MTK_QDMA_FQ_TAIL 0x1B24 - -/* QDMA FQ Free Page Counter Register */ -#define MTK_QDMA_FQ_CNT 0x1B28 - -/* QDMA FQ Free Page Buffer Length Register */ -#define MTK_QDMA_FQ_BLEN 0x1B2C - -/* GMA1 counter / statics register */ -#define MTK_GDM1_RX_GBCNT_L 0x2400 -#define MTK_GDM1_RX_GBCNT_H 0x2404 -#define MTK_GDM1_RX_GPCNT 0x2408 -#define MTK_GDM1_RX_OERCNT 0x2410 -#define MTK_GDM1_RX_FERCNT 0x2414 -#define MTK_GDM1_RX_SERCNT 0x2418 -#define MTK_GDM1_RX_LENCNT 0x241c -#define MTK_GDM1_RX_CERCNT 0x2420 -#define MTK_GDM1_RX_FCCNT 0x2424 -#define MTK_GDM1_TX_SKIPCNT 0x2428 -#define MTK_GDM1_TX_COLCNT 0x242c -#define MTK_GDM1_TX_GBCNT_L 0x2430 -#define MTK_GDM1_TX_GBCNT_H 0x2434 -#define MTK_GDM1_TX_GPCNT 0x2438 #define MTK_STAT_OFFSET 0x40 #define MTK_WDMA0_BASE 0x2800 @@ -857,8 +766,46 @@ struct mtk_tx_dma_desc_info { u8 last:1; }; +struct mtk_reg_map { + u32 tx_irq_mask; + u32 tx_irq_status; + struct { + u32 rx_ptr; /* rx base pointer */ + u32 rx_cnt_cfg; /* rx max count configuration */ + u32 pcrx_ptr; /* rx cpu pointer */ + u32 glo_cfg; /* global configuration */ + u32 rst_idx; /* reset index */ + u32 delay_irq; /* delay interrupt */ + u32 irq_status; /* interrupt status */ + u32 irq_mask; /* interrupt mask */ + u32 int_grp; + } pdma; + struct { + u32 qtx_cfg; /* tx queue configuration */ + u32 rx_ptr; /* rx base pointer */ + u32 rx_cnt_cfg; /* rx max count configuration */ + u32 qcrx_ptr; /* rx cpu pointer */ + u32 glo_cfg; /* global configuration */ + u32 rst_idx; /* reset index */ + u32 delay_irq; /* delay interrupt */ + u32 fc_th; /* flow control */ + u32 int_grp; + u32 hred; /* interrupt mask */ + u32 ctx_ptr; /* tx acquire cpu pointer */ + u32 dtx_ptr; /* tx acquire dma pointer */ + u32 crx_ptr; /* tx release cpu pointer */ + u32 drx_ptr; /* tx release dma pointer */ + u32 fq_head; /* fq head pointer */ + u32 fq_tail; /* fq tail pointer */ + u32 fq_count; /* fq free page count */ + u32 fq_blen; /* fq free page buffer length */ + } qdma; + u32 gdm1_cnt; +}; + /* struct mtk_eth_data - This is the structure holding all differences * among various plaforms + * @reg_map Soc register map. * @ana_rgc3: The offset for register ANA_RGC3 related to * sgmiisys syscon * @caps Flags shown the extra capability for the SoC @@ -871,6 +818,7 @@ struct mtk_tx_dma_desc_info { * @rxd_size Rx DMA descriptor size. */ struct mtk_soc_data { + const struct mtk_reg_map *reg_map; u32 ana_rgc3; u32 caps; u32 required_clks; @@ -999,8 +947,6 @@ struct mtk_eth { u32 tx_bytes; struct dim tx_dim; - u32 tx_int_mask_reg; - u32 tx_int_status_reg; u32 rx_dma_l4_valid; int ip_align; -- cgit v1.2.3 From 160d3a9b192985b1cea7f394ab745de762d89165 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:36 +0200 Subject: net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support Introduce MTK_NETSYS_V2 support. MTK_NETSYS_V2 defines 32B TX/RX DMA descriptors. This is a preliminary patch to add mt7986 ethernet support. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 321 ++++++++++++++++++++++------ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 126 ++++++++++- 2 files changed, 372 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 503829a9c270..9169f6360ab5 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -844,8 +844,8 @@ static inline int mtk_max_buf_size(int frag_size) return buf_size; } -static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, - struct mtk_rx_dma *dma_rxd) +static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, + struct mtk_rx_dma_v2 *dma_rxd) { rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); if (!(rxd->rxd2 & RX_DMA_DONE)) @@ -854,6 +854,10 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd, rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); + rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); + } return true; } @@ -887,7 +891,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); for (i = 0; i < cnt; i++) { - struct mtk_tx_dma *txd; + struct mtk_tx_dma_v2 *txd; txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size; txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; @@ -897,6 +901,12 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); txd->txd4 = 0; + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { + txd->txd5 = 0; + txd->txd6 = 0; + txd->txd7 = 0; + txd->txd8 = 0; + } } mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); @@ -1000,10 +1010,12 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, } } -static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc, - struct mtk_tx_dma_desc_info *info) +static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd, + struct mtk_tx_dma_desc_info *info) { struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_dma *desc = txd; u32 data; WRITE_ONCE(desc->txd1, info->addr); @@ -1027,6 +1039,59 @@ static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc, WRITE_ONCE(desc->txd4, data); } +static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd, + struct mtk_tx_dma_desc_info *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_tx_dma_v2 *desc = txd; + struct mtk_eth *eth = mac->hw; + u32 data; + + WRITE_ONCE(desc->txd1, info->addr); + + data = TX_DMA_PLEN0(info->size); + if (info->last) + data |= TX_DMA_LS0; + WRITE_ONCE(desc->txd3, data); + + if (!info->qid && mac->id) + info->qid = MTK_QDMA_GMAC2_QID; + + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */ + data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); + WRITE_ONCE(desc->txd4, data); + + data = 0; + if (info->first) { + if (info->gso) + data |= TX_DMA_TSO_V2; + /* tx checksum offload */ + if (info->csum) + data |= TX_DMA_CHKSUM_V2; + } + WRITE_ONCE(desc->txd5, data); + + data = 0; + if (info->first && info->vlan) + data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci; + WRITE_ONCE(desc->txd6, data); + + WRITE_ONCE(desc->txd7, 0); + WRITE_ONCE(desc->txd8, 0); +} + +static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd, + struct mtk_tx_dma_desc_info *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + mtk_tx_set_dma_desc_v2(dev, txd, info); + else + mtk_tx_set_dma_desc_v1(dev, txd, info); +} + static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, int tx_num, struct mtk_tx_ring *ring, bool gso) { @@ -1035,6 +1100,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, .gso = gso, .csum = skb->ip_summed == CHECKSUM_PARTIAL, .vlan = skb_vlan_tag_present(skb), + .qid = skb->mark & MTK_QDMA_TX_MASK, .vlan_tci = skb_vlan_tag_get(skb), .first = true, .last = !skb_is_nonlinear(skb), @@ -1094,7 +1160,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, } memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); - txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN); + txd_info.size = min_t(unsigned int, frag_size, + soc->txrx.dma_max_len); + txd_info.qid = skb->mark & MTK_QDMA_TX_MASK; txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && !(frag_size - txd_info.size); txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, @@ -1175,17 +1243,16 @@ err_dma: return -ENOMEM; } -static inline int mtk_cal_txd_req(struct sk_buff *skb) +static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) { - int i, nfrags; + int i, nfrags = 1; skb_frag_t *frag; - nfrags = 1; if (skb_is_gso(skb)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; nfrags += DIV_ROUND_UP(skb_frag_size(frag), - MTK_TX_DMA_BUF_LEN); + eth->soc->txrx.dma_max_len); } } else { nfrags += skb_shinfo(skb)->nr_frags; @@ -1237,7 +1304,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(test_bit(MTK_RESETTING, ð->state))) goto drop; - tx_num = mtk_cal_txd_req(skb); + tx_num = mtk_cal_txd_req(eth, skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { netif_stop_queue(dev); netif_err(eth, tx_queued, dev, @@ -1329,7 +1396,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, int idx; struct sk_buff *skb; u8 *data, *new_data; - struct mtk_rx_dma *rxd, trxd; + struct mtk_rx_dma_v2 *rxd, trxd; int done = 0, bytes = 0; while (done < budget) { @@ -1337,7 +1404,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, unsigned int pktlen; dma_addr_t dma_addr; u32 hash, reason; - int mac; + int mac = 0; ring = mtk_get_rx_ring(eth); if (unlikely(!ring)) @@ -1347,16 +1414,15 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; data = ring->data[idx]; - if (!mtk_rx_get_desc(&trxd, rxd)) + if (!mtk_rx_get_desc(eth, &trxd, rxd)) break; /* find out which mac the packet come from. values start at 1 */ - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) || - (trxd.rxd4 & RX_DMA_SPECIAL_TAG)) - mac = 0; - else - mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & - RX_DMA_FPORT_MASK) - 1; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1; + else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && + !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) + mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1; if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || !eth->netdev[mac])) @@ -1399,7 +1465,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); skb->dev = netdev; skb_put(skb, pktlen); - if (trxd.rxd4 & eth->rx_dma_l4_valid) + if (trxd.rxd4 & eth->soc->txrx.rx_dma_l4_valid) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); @@ -1417,10 +1483,25 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, mtk_ppe_check_skb(eth->ppe, skb, trxd.rxd4 & MTK_RXD4_FOE_ENTRY); - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && - (trxd.rxd2 & RX_DMA_VTAG)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - RX_DMA_VID(trxd.rxd3)); + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (trxd.rxd3 & RX_DMA_VTAG_V2) + __vlan_hwaccel_put_tag(skb, + htons(RX_DMA_VPID(trxd.rxd4)), + RX_DMA_VID(trxd.rxd4)); + } else if (trxd.rxd2 & RX_DMA_VTAG) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + RX_DMA_VID(trxd.rxd3)); + } + + /* If the device is attached to a dsa switch, the special + * tag inserted in VLAN field by hw switch can * be offloaded + * by RX HW VLAN offload. Clear vlan info. + */ + if (netdev_uses_dsa(netdev)) + __vlan_hwaccel_clear_tag(skb); + } + skb_record_rx_queue(skb, 0); napi_gro_receive(napi, skb); @@ -1432,7 +1513,7 @@ release_desc: if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) rxd->rxd2 = RX_DMA_LSO; else - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); ring->calc_idx = idx; @@ -1634,7 +1715,8 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) do { int rx_done; - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.irq_status); + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, + reg_map->pdma.irq_status); rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); rx_done_total += rx_done; @@ -1648,10 +1730,11 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) if (rx_done_total == budget) return budget; - } while (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT); + } while (mtk_r32(eth, reg_map->pdma.irq_status) & + eth->soc->txrx.rx_irq_done_mask); if (napi_complete_done(napi, rx_done_total)) - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); return rx_done_total; } @@ -1661,7 +1744,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_ring *ring = ð->tx_ring; int i, sz = soc->txrx.txd_size; - struct mtk_tx_dma *txd; + struct mtk_tx_dma_v2 *txd; ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), GFP_KERNEL); @@ -1681,13 +1764,19 @@ static int mtk_tx_alloc(struct mtk_eth *eth) txd->txd2 = next_ptr; txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; txd->txd4 = 0; + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { + txd->txd5 = 0; + txd->txd6 = 0; + txd->txd7 = 0; + txd->txd8 = 0; + } } /* On MT7688 (PDMA only) this driver uses the ring->dma structs * only as the framework. The real HW descriptors are the PDMA * descriptors in ring->dma_pdma. */ - if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, &ring->phys_pdma, GFP_KERNEL); if (!ring->dma_pdma) @@ -1767,13 +1856,11 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) struct mtk_rx_ring *ring; int rx_data_len, rx_dma_size; int i; - u32 offset = 0; if (rx_flag == MTK_RX_FLAGS_QDMA) { if (ring_no) return -EINVAL; ring = ð->rx_ring_qdma; - offset = 0x1000; } else { ring = ð->rx_ring[ring_no]; } @@ -1806,7 +1893,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { - struct mtk_rx_dma *rxd; + struct mtk_rx_dma_v2 *rxd; dma_addr_t dma_addr = dma_map_single(eth->dma_dev, ring->data[i] + NET_SKB_PAD + eth->ip_align, @@ -1821,26 +1908,47 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) rxd->rxd2 = RX_DMA_LSO; else - rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); rxd->rxd3 = 0; rxd->rxd4 = 0; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + rxd->rxd5 = 0; + rxd->rxd6 = 0; + rxd->rxd7 = 0; + rxd->rxd8 = 0; + } } ring->dma_size = rx_dma_size; ring->calc_idx_update = false; ring->calc_idx = rx_dma_size - 1; - ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + ring_no * MTK_QRX_OFFSET; + if (rx_flag == MTK_RX_FLAGS_QDMA) + ring->crx_idx_reg = reg_map->qdma.qcrx_ptr + + ring_no * MTK_QRX_OFFSET; + else + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + + ring_no * MTK_QRX_OFFSET; /* make sure that all changes to the dma ring are flushed before we * continue */ wmb(); - mtk_w32(eth, ring->phys, - reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET + offset); - mtk_w32(eth, rx_dma_size, - reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET + offset); - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), reg_map->pdma.rst_idx + offset); + if (rx_flag == MTK_RX_FLAGS_QDMA) { + mtk_w32(eth, ring->phys, + reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET); + mtk_w32(eth, rx_dma_size, + reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), + reg_map->qdma.rst_idx); + } else { + mtk_w32(eth, ring->phys, + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET); + mtk_w32(eth, rx_dma_size, + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), + reg_map->pdma.rst_idx); + } + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); return 0; } @@ -2259,7 +2367,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { __napi_schedule(ð->rx_napi); - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); } return IRQ_HANDLED; @@ -2283,8 +2391,10 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth) struct mtk_eth *eth = _eth; const struct mtk_reg_map *reg_map = eth->soc->reg_map; - if (mtk_r32(eth, reg_map->pdma.irq_mask) & MTK_RX_DONE_INT) { - if (mtk_r32(eth, reg_map->pdma.irq_status) & MTK_RX_DONE_INT) + if (mtk_r32(eth, reg_map->pdma.irq_mask) & + eth->soc->txrx.rx_irq_done_mask) { + if (mtk_r32(eth, reg_map->pdma.irq_status) & + eth->soc->txrx.rx_irq_done_mask) mtk_handle_irq_rx(irq, _eth); } if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { @@ -2302,16 +2412,16 @@ static void mtk_poll_controller(struct net_device *dev) struct mtk_eth *eth = mac->hw; mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); mtk_handle_irq_rx(eth->irq[2], dev); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); } #endif static int mtk_start_dma(struct mtk_eth *eth) { - u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; + u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; const struct mtk_reg_map *reg_map = eth->soc->reg_map; int err; @@ -2322,12 +2432,19 @@ static int mtk_start_dma(struct mtk_eth *eth) } if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { - mtk_w32(eth, - MTK_TX_WB_DDONE | MTK_TX_DMA_EN | - MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | - MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | - MTK_RX_BT_32DWORDS, - reg_map->qdma.glo_cfg); + val = mtk_r32(eth, reg_map->qdma.glo_cfg); + val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN | + MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | + MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + val |= MTK_MUTLI_CNT | MTK_RESV_BUF | + MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | + MTK_CHK_DDONE_EN; + else + val |= MTK_RX_BT_32DWORDS; + mtk_w32(eth, val, reg_map->qdma.glo_cfg); + mtk_w32(eth, MTK_RX_DMA_EN | rx_2b_offset | MTK_RX_BT_32DWORDS | MTK_MULTI_EN, @@ -2398,7 +2515,7 @@ static int mtk_open(struct net_device *dev) napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); refcount_set(ð->dma_refcnt, 1); } else @@ -2450,7 +2567,7 @@ static int mtk_stop(struct net_device *dev) mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); @@ -2610,9 +2727,25 @@ static int mtk_hw_init(struct mtk_eth *eth) return 0; } - /* Non-MT7628 handling... */ - ethsys_reset(eth, RSTCTRL_FE); - ethsys_reset(eth, RSTCTRL_PPE); + val = RSTCTRL_FE | RSTCTRL_PPE; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); + + val |= RSTCTRL_ETH; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= RSTCTRL_PPE1; + } + + ethsys_reset(eth, val); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, + 0x3ffffff); + + /* Set FE to PDMAv2 if necessary */ + val = mtk_r32(eth, MTK_FE_GLO_MISC); + mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); + } if (eth->pctl) { /* Set GE2 driving and slew rate */ @@ -2651,11 +2784,47 @@ static int mtk_hw_init(struct mtk_eth *eth) /* FE int grouping */ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->pdma.int_grp + 4); + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); - mtk_w32(eth, MTK_RX_DONE_INT, reg_map->qdma.int_grp + 4); + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + /* PSE should not drop port8 and port9 packets */ + mtk_w32(eth, 0x00000300, PSE_DROP_CFG); + + /* PSE Free Queue Flow Control */ + mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); + + /* PSE config input queue threshold */ + mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); + mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); + mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); + + /* PSE config output queue threshold */ + mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); + mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); + mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); + mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); + mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); + mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); + mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); + mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); + + /* GDM and CDM Threshold */ + mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); + mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); + mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); + } + return 0; err_disable_pm: @@ -3216,12 +3385,8 @@ static int mtk_probe(struct platform_device *pdev) if (IS_ERR(eth->base)) return PTR_ERR(eth->base); - if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { - eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) eth->ip_align = NET_IP_ALIGN; - } else { - eth->rx_dma_l4_valid = RX_DMA_L4_VALID; - } spin_lock_init(ð->page_lock); spin_lock_init(ð->tx_irq_lock); @@ -3457,6 +3622,10 @@ static const struct mtk_soc_data mt2701_data = { .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, }, }; @@ -3470,6 +3639,10 @@ static const struct mtk_soc_data mt7621_data = { .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, }, }; @@ -3484,6 +3657,10 @@ static const struct mtk_soc_data mt7622_data = { .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, }, }; @@ -3497,6 +3674,10 @@ static const struct mtk_soc_data mt7623_data = { .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, }, }; @@ -3510,6 +3691,10 @@ static const struct mtk_soc_data mt7629_data = { .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, }, }; @@ -3522,6 +3707,10 @@ static const struct mtk_soc_data rt5350_data = { .txrx = { .txd_size = sizeof(struct mtk_tx_dma), .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, }, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 7e29f042357f..1db5424edee5 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -24,6 +24,7 @@ #define MTK_MAX_RX_LENGTH 1536 #define MTK_MAX_RX_LENGTH_2K 2048 #define MTK_TX_DMA_BUF_LEN 0x3fff +#define MTK_TX_DMA_BUF_LEN_V2 0xffff #define MTK_DMA_SIZE 512 #define MTK_MAC_COUNT 2 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) @@ -83,6 +84,10 @@ #define MTK_CDMQ_IG_CTRL 0x1400 #define MTK_CDMQ_STAG_EN BIT(0) +/* CDMP Ingress Control Register */ +#define MTK_CDMP_IG_CTRL 0x400 +#define MTK_CDMP_STAG_EN BIT(0) + /* CDMP Exgress Control Register */ #define MTK_CDMP_EG_CTRL 0x404 @@ -102,13 +107,38 @@ /* Unicast Filter MAC Address Register - High */ #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) +/* FE global misc reg*/ +#define MTK_FE_GLO_MISC 0x124 + +/* PSE Free Queue Flow Control */ +#define PSE_FQFC_CFG1 0x100 +#define PSE_FQFC_CFG2 0x104 +#define PSE_DROP_CFG 0x108 + +/* PSE Input Queue Reservation Register*/ +#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) + +/* PSE Output Queue Threshold Register*/ +#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2)) + +/* GDM and CDM Threshold */ +#define MTK_GDM2_THRES 0x1530 +#define MTK_CDMW0_THRES 0x164c +#define MTK_CDMW1_THRES 0x1650 +#define MTK_CDME0_THRES 0x1654 +#define MTK_CDME1_THRES 0x1658 +#define MTK_CDMM_THRES 0x165c + /* PDMA HW LRO Control Registers */ #define MTK_PDMA_LRO_CTRL_DW0 0x980 #define MTK_LRO_EN BIT(0) #define MTK_L3_CKS_UPD_EN BIT(7) +#define MTK_L3_CKS_UPD_EN_V2 BIT(19) #define MTK_LRO_ALT_PKT_CNT_MODE BIT(21) #define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26) +#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24) #define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29) +#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28) #define MTK_PDMA_LRO_CTRL_DW1 0x984 #define MTK_PDMA_LRO_CTRL_DW2 0x988 @@ -180,6 +210,13 @@ #define MTK_TX_DMA_EN BIT(0) #define MTK_DMA_BUSY_TIMEOUT_US 1000000 +/* QDMA V2 Global Configuration Register */ +#define MTK_CHK_DDONE_EN BIT(28) +#define MTK_DMAD_WR_WDONE BIT(26) +#define MTK_WCOMP_EN BIT(24) +#define MTK_RESV_BUF (0x40 << 16) +#define MTK_MUTLI_CNT (0x4 << 12) + /* QDMA Flow Control Register */ #define FC_THRES_DROP_MODE BIT(20) #define FC_THRES_DROP_EN (7 << 16) @@ -199,11 +236,32 @@ #define MTK_RX_DONE_INT MTK_RX_DONE_DLY #define MTK_TX_DONE_INT MTK_TX_DONE_DLY +#define MTK_RX_DONE_INT_V2 BIT(14) + /* QDMA Interrupt grouping registers */ #define MTK_RLS_DONE_INT BIT(0) #define MTK_STAT_OFFSET 0x40 +/* QDMA TX NUM */ +#define MTK_QDMA_TX_NUM 16 +#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1) +#define QID_BITS_V2(x) (((x) & 0x3f) << 16) +#define MTK_QDMA_GMAC2_QID 8 + +#define MTK_TX_DMA_BUF_SHIFT 8 + +/* QDMA V2 descriptor txd6 */ +#define TX_DMA_INS_VLAN_V2 BIT(16) +/* QDMA V2 descriptor txd5 */ +#define TX_DMA_CHKSUM_V2 (0x7 << 28) +#define TX_DMA_TSO_V2 BIT(31) + +/* QDMA V2 descriptor txd4 */ +#define TX_DMA_FPORT_SHIFT_V2 8 +#define TX_DMA_FPORT_MASK_V2 0xf +#define TX_DMA_SWC_V2 BIT(30) + #define MTK_WDMA0_BASE 0x2800 #define MTK_WDMA1_BASE 0x2c00 @@ -217,10 +275,9 @@ /* QDMA descriptor txd3 */ #define TX_DMA_OWNER_CPU BIT(31) #define TX_DMA_LS0 BIT(30) -#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16) -#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN) +#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) +#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) #define TX_DMA_SWC BIT(14) -#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16) /* PDMA on MT7628 */ #define TX_DMA_DONE BIT(31) @@ -230,12 +287,14 @@ /* QDMA descriptor rxd2 */ #define RX_DMA_DONE BIT(31) #define RX_DMA_LSO BIT(30) -#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) -#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) #define RX_DMA_VTAG BIT(15) /* QDMA descriptor rxd3 */ -#define RX_DMA_VID(_x) ((_x) & 0xfff) +#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) +#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) +#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff) /* QDMA descriptor rxd4 */ #define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) @@ -246,10 +305,15 @@ /* QDMA descriptor rxd4 */ #define RX_DMA_L4_VALID BIT(24) #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ -#define RX_DMA_FPORT_SHIFT 19 -#define RX_DMA_FPORT_MASK 0x7 #define RX_DMA_SPECIAL_TAG BIT(22) +#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf) +#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7) + +/* PDMA V2 descriptor rxd3 */ +#define RX_DMA_VTAG_V2 BIT(0) +#define RX_DMA_L4_VALID_V2 BIT(2) + /* PHY Indirect Access Control registers */ #define MTK_PHY_IAC 0x10004 #define PHY_IAC_ACCESS BIT(31) @@ -372,6 +436,16 @@ #define ETHSYS_TRGMII_MT7621_APLL BIT(6) #define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5) +/* ethernet reset control register */ +#define ETHSYS_RSTCTRL 0x34 +#define RSTCTRL_FE BIT(6) +#define RSTCTRL_PPE BIT(31) +#define RSTCTRL_PPE1 BIT(30) +#define RSTCTRL_ETH BIT(23) + +/* ethernet reset check idle register */ +#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 + /* ethernet reset control register */ #define ETHSYS_RSTCTRL 0x34 #define RSTCTRL_FE BIT(6) @@ -457,6 +531,17 @@ struct mtk_rx_dma { unsigned int rxd4; } __packed __aligned(4); +struct mtk_rx_dma_v2 { + unsigned int rxd1; + unsigned int rxd2; + unsigned int rxd3; + unsigned int rxd4; + unsigned int rxd5; + unsigned int rxd6; + unsigned int rxd7; + unsigned int rxd8; +} __packed __aligned(4); + struct mtk_tx_dma { unsigned int txd1; unsigned int txd2; @@ -464,6 +549,17 @@ struct mtk_tx_dma { unsigned int txd4; } __packed __aligned(4); +struct mtk_tx_dma_v2 { + unsigned int txd1; + unsigned int txd2; + unsigned int txd3; + unsigned int txd4; + unsigned int txd5; + unsigned int txd6; + unsigned int txd7; + unsigned int txd8; +} __packed __aligned(4); + struct mtk_eth; struct mtk_mac; @@ -650,7 +746,9 @@ enum mkt_eth_capabilities { MTK_SHARED_INT_BIT, MTK_TRGMII_MT7621_CLK_BIT, MTK_QDMA_BIT, + MTK_NETSYS_V2_BIT, MTK_SOC_MT7628_BIT, + MTK_RSTCTRL_PPE1_BIT, /* MUX BITS*/ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, @@ -682,7 +780,9 @@ enum mkt_eth_capabilities { #define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) #define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) #define MTK_QDMA BIT(MTK_QDMA_BIT) +#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT) #define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) +#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT) #define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) @@ -759,6 +859,7 @@ struct mtk_tx_dma_desc_info { dma_addr_t addr; u32 size; u16 vlan_tci; + u16 qid; u8 gso:1; u8 csum:1; u8 vlan:1; @@ -816,6 +917,10 @@ struct mtk_reg_map { * the extra setup for those pins used by GMAC. * @txd_size Tx DMA descriptor size. * @rxd_size Rx DMA descriptor size. + * @rx_irq_done_mask Rx irq done register mask. + * @rx_dma_l4_valid Rx DMA valid register mask. + * @dma_max_len Max DMA tx/rx buffer length. + * @dma_len_offset Tx/Rx DMA length field offset. */ struct mtk_soc_data { const struct mtk_reg_map *reg_map; @@ -828,6 +933,10 @@ struct mtk_soc_data { struct { u32 txd_size; u32 rxd_size; + u32 rx_irq_done_mask; + u32 rx_dma_l4_valid; + u32 dma_max_len; + u32 dma_len_offset; } txrx; }; @@ -947,7 +1056,6 @@ struct mtk_eth { u32 tx_bytes; struct dim tx_dim; - u32 rx_dma_l4_valid; int ip_align; struct mtk_ppe *ppe; -- cgit v1.2.3 From 7173eca8eeb7d58f885f7506f808f17851f934ce Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:37 +0200 Subject: net: ethernet: mtk_eth_soc: convert ring dma pointer to void Simplify the code converting {tx,rx} ring dma pointer to void Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 32 +++++++++++++---------------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 ++-- 2 files changed, 16 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 9169f6360ab5..64c201e763c3 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -917,18 +917,15 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) return 0; } -static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) +static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) { - void *ret = ring->dma; - - return ret + (desc - ring->phys); + return ring->dma + (desc - ring->phys); } static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, - struct mtk_tx_dma *txd, - u32 txd_size) + void *txd, u32 txd_size) { - int idx = ((void *)txd - (void *)ring->dma) / txd_size; + int idx = (txd - ring->dma) / txd_size; return &ring->buf[idx]; } @@ -936,13 +933,12 @@ static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma) { - return ring->dma_pdma - ring->dma + dma; + return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma; } -static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma, - u32 txd_size) +static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size) { - return ((void *)dma - (void *)ring->dma) / txd_size; + return (dma - ring->dma) / txd_size; } static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, @@ -1359,7 +1355,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) ring = ð->rx_ring[i]; idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); - rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; + rxd = ring->dma + idx * eth->soc->txrx.rxd_size; if (rxd->rxd2 & RX_DMA_DONE) { ring->calc_idx_update = true; return ring; @@ -1411,7 +1407,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto rx_done; idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); - rxd = (void *)ring->dma + idx * eth->soc->txrx.rxd_size; + rxd = ring->dma + idx * eth->soc->txrx.rxd_size; data = ring->data[idx]; if (!mtk_rx_get_desc(eth, &trxd, rxd)) @@ -1615,7 +1611,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, mtk_tx_unmap(eth, tx_buf, true); - desc = (void *)ring->dma + cpu * eth->soc->txrx.txd_size; + desc = ring->dma + cpu * eth->soc->txrx.txd_size; ring->last_free = desc; atomic_inc(&ring->free_count); @@ -1760,7 +1756,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) int next = (i + 1) % MTK_DMA_SIZE; u32 next_ptr = ring->phys + next * sz; - txd = (void *)ring->dma + i * sz; + txd = ring->dma + i * sz; txd->txd2 = next_ptr; txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; txd->txd4 = 0; @@ -1790,7 +1786,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) ring->dma_size = MTK_DMA_SIZE; atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); - ring->next_free = &ring->dma[0]; + ring->next_free = ring->dma; ring->last_free = (void *)txd; ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); ring->thresh = MAX_SKB_FRAGS; @@ -1902,7 +1898,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; - rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size; + rxd = ring->dma + i * eth->soc->txrx.rxd_size; rxd->rxd1 = (unsigned int)dma_addr; if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) @@ -1964,7 +1960,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) if (!ring->data[i]) continue; - rxd = (void *)ring->dma + i * eth->soc->txrx.rxd_size; + rxd = ring->dma + i * eth->soc->txrx.rxd_size; if (!rxd->rxd1) continue; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 1db5424edee5..f53024682698 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -692,7 +692,7 @@ struct mtk_tx_buf { * are present */ struct mtk_tx_ring { - struct mtk_tx_dma *dma; + void *dma; struct mtk_tx_buf *buf; dma_addr_t phys; struct mtk_tx_dma *next_free; @@ -722,7 +722,7 @@ enum mtk_rx_flags { * @calc_idx: The current head of ring */ struct mtk_rx_ring { - struct mtk_rx_dma *dma; + void *dma; u8 **data; dma_addr_t phys; u16 frag_size; -- cgit v1.2.3 From 4d6426904f13bdb5ab20a2dddfb3427dc518f1f8 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:38 +0200 Subject: net: ethernet: mtk_eth_soc: convert scratch_ring pointer to void Simplify the code converting scratch_ring pointer to void Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 64c201e763c3..c034fd90dbdc 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -893,7 +893,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) for (i = 0; i < cnt; i++) { struct mtk_tx_dma_v2 *txd; - txd = (void *)eth->scratch_ring + i * soc->txrx.txd_size; + txd = eth->scratch_ring + i * soc->txrx.txd_size; txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; if (i < cnt - 1) txd->txd2 = eth->phy_scratch_ring + diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index f53024682698..67482124de2a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1033,7 +1033,7 @@ struct mtk_eth { struct mtk_rx_ring rx_ring_qdma; struct napi_struct tx_napi; struct napi_struct rx_napi; - struct mtk_tx_dma *scratch_ring; + void *scratch_ring; dma_addr_t phy_scratch_ring; void *scratch_head; struct clk *clks[MTK_CLK_MAX]; -- cgit v1.2.3 From 197c9e9b17b115e04c30d9e185ced582b1fee2a8 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 20 May 2022 20:11:39 +0200 Subject: net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset Add support for mt7986-eth driver available on mt7986 soc. Tested-by: Sam Shih Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 55 ++++++++++++++++++++++++++++- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 18 ++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c034fd90dbdc..a9d4fd8945bb 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -87,6 +87,43 @@ static const struct mtk_reg_map mt7628_reg_map = { }, }; +static const struct mtk_reg_map mt7986_reg_map = { + .tx_irq_mask = 0x461c, + .tx_irq_status = 0x4618, + .pdma = { + .rx_ptr = 0x6100, + .rx_cnt_cfg = 0x6104, + .pcrx_ptr = 0x6108, + .glo_cfg = 0x6204, + .rst_idx = 0x6208, + .delay_irq = 0x620c, + .irq_status = 0x6220, + .irq_mask = 0x6228, + .int_grp = 0x6250, + }, + .qdma = { + .qtx_cfg = 0x4400, + .rx_ptr = 0x4500, + .rx_cnt_cfg = 0x4504, + .qcrx_ptr = 0x4508, + .glo_cfg = 0x4604, + .rst_idx = 0x4608, + .delay_irq = 0x460c, + .fc_th = 0x4610, + .int_grp = 0x4620, + .hred = 0x4644, + .ctx_ptr = 0x4700, + .dtx_ptr = 0x4704, + .crx_ptr = 0x4710, + .drx_ptr = 0x4714, + .fq_head = 0x4720, + .fq_tail = 0x4724, + .fq_count = 0x4728, + .fq_blen = 0x472c, + }, + .gdm1_cnt = 0x1c00, +}; + /* strings used by ethtool */ static const struct mtk_ethtool_stats { char str[ETH_GSTRING_LEN]; @@ -110,7 +147,7 @@ static const char * const mtk_clks_source_name[] = { "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll", "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb", - "sgmii_ck", "eth2pll", + "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1" }; void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) @@ -3694,6 +3731,21 @@ static const struct mtk_soc_data mt7629_data = { }, }; +static const struct mtk_soc_data mt7986_data = { + .reg_map = &mt7986_reg_map, + .ana_rgc3 = 0x128, + .caps = MT7986_CAPS, + .required_clks = MT7986_CLKS_BITMAP, + .required_pctl = false, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma_v2), + .rxd_size = sizeof(struct mtk_rx_dma_v2), + .rx_irq_done_mask = MTK_RX_DONE_INT_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, + }, +}; + static const struct mtk_soc_data rt5350_data = { .reg_map = &mt7628_reg_map, .caps = MT7628_CAPS, @@ -3716,6 +3768,7 @@ const struct of_device_id of_mtk_match[] = { { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, + { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data}, { .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, {}, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 67482124de2a..0a632896451a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -627,6 +627,10 @@ enum mtk_clks_map { MTK_CLK_SGMII2_CDR_FB, MTK_CLK_SGMII_CK, MTK_CLK_ETH2PLL, + MTK_CLK_WOCPU0, + MTK_CLK_WOCPU1, + MTK_CLK_NETSYS0, + MTK_CLK_NETSYS1, MTK_CLK_MAX }; @@ -657,6 +661,16 @@ enum mtk_clks_map { BIT(MTK_CLK_SGMII2_CDR_FB) | \ BIT(MTK_CLK_SGMII_CK) | \ BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP)) +#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII2_TX_250M) | \ + BIT(MTK_CLK_SGMII2_RX_250M) | \ + BIT(MTK_CLK_SGMII2_CDR_REF) | \ + BIT(MTK_CLK_SGMII2_CDR_FB)) enum mtk_dev_state { MTK_HW_INIT, @@ -855,6 +869,10 @@ enum mkt_eth_capabilities { MTK_MUX_U3_GMAC2_TO_QPHY | \ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) +#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ + MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ + MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1) + struct mtk_tx_dma_desc_info { dma_addr_t addr; u32 size; -- cgit v1.2.3 From 2a11fb1d1b8501c430472c66ea5390cb719bbd8f Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 22 May 2022 09:36:27 -0400 Subject: net: fddi: skfp: smt: Remove extra parameters to vararg macro cppcheck reports [drivers/net/fddi/skfp/smt.c:750]: (warning) printf format string requires 0 parameters but 2 are given. DB_SBAN is a vararg macro, like DB_ESSN. Remove the extra args and the nl. Signed-off-by: Tom Rix Signed-off-by: David S. Miller --- drivers/net/fddi/skfp/smt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index 72c31f0013ad..dd15af4e98c2 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -747,7 +747,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) #endif #ifdef SBA - DB_SBAN(2,"SBA: RAF frame received\n",0,0) ; + DB_SBAN(2, "SBA: RAF frame received") ; sba_raf_received_pack(smc,sm,fs) ; #endif break ; -- cgit v1.2.3 From 0a3ad7d323686fbaae8688326cc5ea0d185c6fca Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 19 May 2022 22:15:23 -0700 Subject: net: dsa: restrict SMSC_LAN9303_I2C kconfig Since kconfig 'select' does not follow dependency chains, if symbol KSA selects KSB, then KSA should also depend on the same symbols that KSB depends on, in order to prevent Kconfig warnings and possible build errors. Change NET_DSA_SMSC_LAN9303_I2C and NET_DSA_SMSC_LAN9303_MDIO so that they are limited to VLAN_8021Q if the latter is enabled. This prevents the Kconfig warning: WARNING: unmet direct dependencies detected for NET_DSA_SMSC_LAN9303 Depends on [m]: NETDEVICES [=y] && NET_DSA [=y] && (VLAN_8021Q [=m] || VLAN_8021Q [=m]=n) Selected by [y]: - NET_DSA_SMSC_LAN9303_I2C [=y] && NETDEVICES [=y] && NET_DSA [=y] && I2C [=y] Fixes: 430065e26719 ("net: dsa: lan9303: add VLAN IDs to master device") Signed-off-by: Randy Dunlap Cc: Andrew Lunn Cc: Vivien Didelot Cc: Florian Fainelli Cc: Vladimir Oltean Cc: Juergen Borleis Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: Mans Rullgard Reviewed-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 37a3dabdce31..6d1fcb08bba1 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -72,7 +72,6 @@ source "drivers/net/dsa/realtek/Kconfig" config NET_DSA_SMSC_LAN9303 tristate - depends on VLAN_8021Q || VLAN_8021Q=n select NET_DSA_TAG_LAN9303 select REGMAP help @@ -82,6 +81,7 @@ config NET_DSA_SMSC_LAN9303 config NET_DSA_SMSC_LAN9303_I2C tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode" depends on I2C + depends on VLAN_8021Q || VLAN_8021Q=n select NET_DSA_SMSC_LAN9303 select REGMAP_I2C help @@ -91,6 +91,7 @@ config NET_DSA_SMSC_LAN9303_I2C config NET_DSA_SMSC_LAN9303_MDIO tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode" select NET_DSA_SMSC_LAN9303 + depends on VLAN_8021Q || VLAN_8021Q=n help Enable access functions if the SMSC/Microchip LAN9303 is configured for MDIO managed mode. -- cgit v1.2.3 From 0c7ab953d1f2358cce5bcddf96877b16966bbabc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:43:13 -0700 Subject: wifi: plfxlc: remove redundant NULL-check for GCC 12 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC is upset that we check the return value of plfxlc_usb_dev() even tho it can't be NULL: drivers/net/wireless/purelifi/plfxlc/usb.c: In function ‘resume’: drivers/net/wireless/purelifi/plfxlc/usb.c:840:20: warning: the comparison will always evaluate as ‘true’ for the address of ‘dev’ will never be NULL [-Waddress] 840 | if (!pl || !plfxlc_usb_dev(pl)) | ^ plfxlc_usb_dev() returns an address of one of the members of pl, so it's safe to drop these checks. Acked-by: Kalle Valo Signed-off-by: Jakub Kicinski --- drivers/net/wireless/purelifi/plfxlc/usb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c index d0e98b2f1365..8519cf0adfff 100644 --- a/drivers/net/wireless/purelifi/plfxlc/usb.c +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -824,7 +824,7 @@ static int suspend(struct usb_interface *interface, struct plfxlc_usb *pl = get_plfxlc_usb(interface); struct plfxlc_mac *mac = plfxlc_usb_to_mac(pl); - if (!pl || !plfxlc_usb_dev(pl)) + if (!pl) return -ENODEV; if (pl->initialized == 0) return 0; @@ -837,7 +837,7 @@ static int resume(struct usb_interface *interface) { struct plfxlc_usb *pl = get_plfxlc_usb(interface); - if (!pl || !plfxlc_usb_dev(pl)) + if (!pl) return -ENODEV; if (pl->was_running) plfxlc_usb_resume(pl); -- cgit v1.2.3 From e95032988053c17baf6c7e27024f5103a19a5f4a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:43:14 -0700 Subject: wifi: ath9k: silence array-bounds warning on GCC 12 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 12 says: drivers/net/wireless/ath/ath9k/mac.c: In function ‘ath9k_hw_resettxqueue’: drivers/net/wireless/ath/ath9k/mac.c:373:22: warning: array subscript 32 is above array bounds of ‘struct ath9k_tx_queue_info[10]’ [-Warray-bounds] 373 | qi = &ah->txq[q]; | ~~~~~~~^~~ I don't know where it got the 32 from, relegate the warning to W=1+. Acked-by: Kalle Valo Signed-off-by: Jakub Kicinski --- drivers/net/wireless/ath/ath9k/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index eff94bcd1f0a..9bdfcee2f448 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -45,6 +45,11 @@ ath9k_hw-y:= \ ar9003_eeprom.o \ ar9003_paprd.o +# FIXME: temporarily silence -Warray-bounds on non W=1+ builds +ifndef KBUILD_EXTRA_WARN +CFLAGS_mac.o += -Wno-array-bounds +endif + ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \ -- cgit v1.2.3 From ee3db469dd317e82f57b13aa3bc61be5cb60c2b4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:43:15 -0700 Subject: wifi: rtlwifi: remove always-true condition pointed out by GCC 12 The .value is a two-dim array, not a pointer. struct iqk_matrix_regs { bool iqk_done; long value[1][IQK_MATRIX_REG_NUM]; }; Acked-by: Kalle Valo Signed-off-by: Jakub Kicinski --- drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 51fe51bb0504..15e6a6aded31 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -2386,10 +2386,7 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel) rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "Just Read IQK Matrix reg for channel:%d....\n", channel); - if ((rtlphy->iqk_matrix[indexforchannel]. - value[0] != NULL) - /*&&(regea4 != 0) */) - _rtl92d_phy_patha_fill_iqk_matrix(hw, true, + _rtl92d_phy_patha_fill_iqk_matrix(hw, true, rtlphy->iqk_matrix[ indexforchannel].value, 0, (rtlphy->iqk_matrix[ -- cgit v1.2.3 From bd1d129daa3ede265a880e2c6a7f91eab0f4dc62 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:43:16 -0700 Subject: wifi: ath6k: silence false positive -Wno-dangling-pointer warning on GCC 12 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For some reason GCC 12 decided to complain about the common pattern of queuing an object onto a list on the stack in ath6k: inlined from ‘ath6kl_htc_mbox_tx’ at drivers/net/wireless/ath/ath6kl/htc_mbox.c:1142:3: include/linux/list.h:74:19: warning: storing the address of local variable ‘queue’ in ‘*&packet_15(D)->list.prev’ [-Wdangling-pointer=] 74 | new->prev = prev; | ~~~~~~~~~~^~~~~~ Move the warning to W=1, hopefully it goes away with a compiler update. Acked-by: Kalle Valo Signed-off-by: Jakub Kicinski --- drivers/net/wireless/ath/ath6kl/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile index dc2b3b46781e..01cc0d50fee6 100644 --- a/drivers/net/wireless/ath/ath6kl/Makefile +++ b/drivers/net/wireless/ath/ath6kl/Makefile @@ -36,6 +36,11 @@ ath6kl_core-y += wmi.o ath6kl_core-y += core.o ath6kl_core-y += recovery.o +# FIXME: temporarily silence -Wdangling-pointer on non W=1+ builds +ifndef KBUILD_EXTRA_WARN +CFLAGS_htc_mbox.o += -Wno-dangling-pointer +endif + ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o -- cgit v1.2.3 From af3cdfd30c631dd6bb56933d67a0f0df2980b7d1 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:43:17 -0700 Subject: wifi: iwlwifi: use unsigned to silence a GCC 12 warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 12 says: drivers/net/wireless/intel/iwlwifi/mvm/sta.c:1076:37: warning: array subscript -1 is below array bounds of ‘struct iwl_mvm_tid_data[9]’ [-Warray-bounds] 1076 | if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) | ~~~~~~~~~~~~~~~~^~~~~ Whatever, tid is a bit from for_each_set_bit(), it's clearly unsigned. Acked-by: Kalle Valo Signed-off-by: Jakub Kicinski --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 406f0a50a5bf..bbb1522e7280 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1058,7 +1058,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, unsigned long *unshare_queues, unsigned long *changetid_queues) { - int tid; + unsigned int tid; lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvm->mutex); -- cgit v1.2.3 From 84f23fb192ef62cef438fdae932ce9d96d131b0c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:43:18 -0700 Subject: wifi: brcmfmac: work around a GCC 12 -Warray-bounds warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 12 really doesn't like partial struct allocations: drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c:2202:32: warning: array subscript ‘struct brcmf_ext_join_params_le[0]’ is partly outside array bounds of ‘void[70]’ [-Warray-bounds] 2202 | ext_join_params->scan_le.passive_time = | ^~ brcmfmac is trying to save 2 bytes at the end by either allocating or not allocating a channel member. Let's keep @join_params_size the "right" size but kmalloc() the full structure. Acked-by: Kalle Valo Signed-off-by: Jakub Kicinski --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 360b103fe898..605206abe424 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2167,7 +2167,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, offsetof(struct brcmf_assoc_params_le, chanspec_list); if (cfg->channel) join_params_size += sizeof(u16); - ext_join_params = kzalloc(join_params_size, GFP_KERNEL); + ext_join_params = kzalloc(sizeof(*ext_join_params), GFP_KERNEL); if (ext_join_params == NULL) { err = -ENOMEM; goto done; -- cgit v1.2.3 From 13182526173679c3be794c8149226fc258fdfdbe Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:43:20 -0700 Subject: wifi: carl9170: silence a GCC 12 -Warray-bounds warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit carl9170 has a big union (struct carl9170_cmd) with all the command types in it. But it allocates buffers only large enough for a given command. This upsets GCC 12: drivers/net/wireless/ath/carl9170/cmd.c:125:30: warning: array subscript ‘struct carl9170_cmd[0]’ is partly outside array bounds of ‘unsigned char[8]’ [-Warray-bounds] 125 | tmp->hdr.cmd = cmd; | ~~~~~~~~~~~~~^~~~~ Punt the warning to W=1 for now. Hopefully GCC will learn to recognize which fields are in-bounds. Acked-by: Christian Lamparter Acked-by: Kalle Valo Signed-off-by: Jakub Kicinski --- drivers/net/wireless/ath/carl9170/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/carl9170/Makefile b/drivers/net/wireless/ath/carl9170/Makefile index 1a81868ce26d..7463baa62fa8 100644 --- a/drivers/net/wireless/ath/carl9170/Makefile +++ b/drivers/net/wireless/ath/carl9170/Makefile @@ -3,3 +3,8 @@ carl9170-objs := main.o usb.o cmd.o mac.o phy.o led.o fw.o tx.o rx.o carl9170-$(CONFIG_CARL9170_DEBUGFS) += debug.o obj-$(CONFIG_CARL9170) += carl9170.o + +# FIXME: temporarily silence -Warray-bounds on non W=1+ builds +ifndef KBUILD_EXTRA_WARN +CFLAGS_cmd.o += -Wno-array-bounds +endif -- cgit v1.2.3 From 129b7532a0edc3f381a6c5f3bb1a155e743d141b Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 22 May 2022 00:37:38 +0300 Subject: net: dsa: fix missing adjustment of host broadcast flooding PGID_BC is configured statically by ocelot_init() to flood towards the CPU port module, and dynamically by ocelot_port_set_bcast_flood() towards all user ports. When the tagging protocol changes, the intention is to turn off flooding towards the old pipe towards the host, and to turn it on towards the new pipe. Due to a recent change which removed the adjustment of PGID_BC from felix_set_host_flood(), 3 things happen. - when we change from NPI to tag_8021q mode: in this mode, the CPU port module is accessed via registers, and used to read PTP packets with timestamps. We fail to disable broadcast flooding towards the CPU port module, and to enable broadcast flooding towards the physical port that serves as a DSA tag_8021q CPU port. - from tag_8021q to NPI mode: in this mode, the CPU port module is redirected to a physical port. We fail to disable broadcast flooding towards the physical tag_8021q CPU port, and to enable it towards the CPU port module at ocelot->num_phys_ports. - when the ports are put in promiscuous mode, we also fail to update PGID_BC towards the host pipe of the current protocol. First issue means that felix_check_xtr_pkt() has to do extra work, because it will not see only PTP packets, but also broadcasts. It needs to dequeue these packets just to drop them. Third issue is inconsequential, since PGID_BC is allocated from the nonreserved multicast PGID space, and these PGIDs are conveniently initialized to 0x7f (i.e. flood towards all ports except the CPU port module). Broadcasts reach the NPI port via ocelot_init(), and reach the tag_8021q CPU port via the hardware defaults. Second issue is also inconsequential, because we fail both at disabling and at enabling broadcast flooding on a port, so the defaults mentioned above are preserved, and they are fine except for the performance impact. Fixes: 7a29d220f4c0 ("net: dsa: felix: reimplement tagging protocol change with function pointers") Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index d38258a39d07..78c32b7de185 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -534,6 +534,9 @@ static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); + + val = bc ? mask : 0; + ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC); } static void -- cgit v1.2.3 From 61be79ba2d90162b0b213384f35eb8db7eb183b8 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 22 May 2022 00:37:39 +0300 Subject: net: dsa: felix: move the updating of PGID_CPU to the ocelot lib PGID_CPU must be updated every time a port is configured or unconfigured as a tag_8021q CPU port. The ocelot switch lib already has a hook for that operation, so move the updating of PGID_CPU to those hooks. These bits are pretty specific to DSA, so normally I would keep them out of the common switch lib, but when tag_8021q is in use, this has implications upon the forwarding mask determined by ocelot_apply_bridge_fwd_mask() and called extensively by the switch lib. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 7 ------- drivers/net/ethernet/mscc/ocelot.c | 31 +++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 78c32b7de185..1299f6a8ac5b 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -253,9 +253,6 @@ static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) ocelot_port_set_dsa_8021q_cpu(ocelot, port); - /* Overwrite PGID_CPU with the non-tagging port */ - ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU); - ocelot_apply_bridge_fwd_mask(ocelot, true); mutex_unlock(&ocelot->fwd_domain_lock); @@ -267,10 +264,6 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) ocelot_port_unset_dsa_8021q_cpu(ocelot, port); - /* Restore PGID_CPU */ - ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID, - PGID_CPU); - ocelot_apply_bridge_fwd_mask(ocelot, true); mutex_unlock(&ocelot->fwd_domain_lock); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index e0d1d5b59981..ac9faf1923c5 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -2165,6 +2165,33 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) } EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); +/* Update PGID_CPU which is the destination port mask used for whitelisting + * unicast addresses filtered towards the host. In the normal and NPI modes, + * this points to the analyzer entry for the CPU port module, while in DSA + * tag_8021q mode, it is a bit mask of all active CPU ports. + * PGID_SRC will take care of forwarding a packet from one user port to + * no more than a single CPU port. + */ +static void ocelot_update_pgid_cpu(struct ocelot *ocelot) +{ + int pgid_cpu = 0; + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu) + continue; + + pgid_cpu |= BIT(port); + } + + if (!pgid_cpu) + pgid_cpu = BIT(ocelot->num_phys_ports); + + ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU); +} + void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port) { u16 vid; @@ -2173,6 +2200,8 @@ void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port) for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) ocelot_vlan_member_add(ocelot, port, vid, true); + + ocelot_update_pgid_cpu(ocelot); } EXPORT_SYMBOL_GPL(ocelot_port_set_dsa_8021q_cpu); @@ -2184,6 +2213,8 @@ void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port) for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) ocelot_vlan_member_del(ocelot, port, vid); + + ocelot_update_pgid_cpu(ocelot); } EXPORT_SYMBOL_GPL(ocelot_port_unset_dsa_8021q_cpu); -- cgit v1.2.3 From a72e23dd679c632bcf6ab47a3a1a58c1e777713a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 22 May 2022 00:37:40 +0300 Subject: net: dsa: felix: update bridge fwd mask from ocelot lib when changing tag_8021q CPU Add more logic to ocelot_port_{,un}set_dsa_8021q_cpu() from the ocelot switch lib by encapsulating the ocelot_apply_bridge_fwd_mask() call that felix used to have. This is necessary because the CPU port change procedure will also need to do this, and it's good to reduce code duplication by having an entry point in the ocelot switch lib that does all that is needed. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 4 ---- drivers/net/ethernet/mscc/ocelot.c | 7 +++++-- include/soc/mscc/ocelot.h | 1 - 3 files changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 1299f6a8ac5b..b60d6e7295e1 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -253,8 +253,6 @@ static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) ocelot_port_set_dsa_8021q_cpu(ocelot, port); - ocelot_apply_bridge_fwd_mask(ocelot, true); - mutex_unlock(&ocelot->fwd_domain_lock); } @@ -264,8 +262,6 @@ static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) ocelot_port_unset_dsa_8021q_cpu(ocelot, port); - ocelot_apply_bridge_fwd_mask(ocelot, true); - mutex_unlock(&ocelot->fwd_domain_lock); } diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index ac9faf1923c5..4011a7968be5 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -2094,7 +2094,7 @@ u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) } EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask); -void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) +static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) { unsigned long cpu_fwd_mask; int port; @@ -2163,7 +2163,6 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) if (!joining && ocelot->ops->cut_through_fwd) ocelot->ops->cut_through_fwd(ocelot); } -EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask); /* Update PGID_CPU which is the destination port mask used for whitelisting * unicast addresses filtered towards the host. In the normal and NPI modes, @@ -2202,6 +2201,8 @@ void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port) ocelot_vlan_member_add(ocelot, port, vid, true); ocelot_update_pgid_cpu(ocelot); + + ocelot_apply_bridge_fwd_mask(ocelot, true); } EXPORT_SYMBOL_GPL(ocelot_port_set_dsa_8021q_cpu); @@ -2215,6 +2216,8 @@ void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port) ocelot_vlan_member_del(ocelot, port, vid); ocelot_update_pgid_cpu(ocelot); + + ocelot_apply_bridge_fwd_mask(ocelot, true); } EXPORT_SYMBOL_GPL(ocelot_port_unset_dsa_8021q_cpu); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 3b8c5a54fb00..2c90a24ca064 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -880,7 +880,6 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled, void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state); u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot); u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port); -void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining); int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, struct switchdev_brport_flags val); void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, -- cgit v1.2.3 From 8c166acb60f8284470db673b6dccd43e6fedfae2 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 22 May 2022 00:37:41 +0300 Subject: net: dsa: felix: directly call ocelot_port_{set,unset}_dsa_8021q_cpu Absorb the final details of calling ocelot_port_{,un}set_dsa_8021q_cpu(), i.e. the need to lock &ocelot->fwd_domain_lock, into the callee, to simplify the caller and permit easier code reuse later. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 36 +++++++++--------------------------- drivers/net/ethernet/mscc/ocelot.c | 8 ++++++++ 2 files changed, 17 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index b60d6e7295e1..033f7d5cc03d 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -240,31 +240,6 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) return 0; } -/* Alternatively to using the NPI functionality, that same hardware MAC - * connected internally to the enetc or fman DSA master can be configured to - * use the software-defined tag_8021q frame format. As far as the hardware is - * concerned, it thinks it is a "dumb switch" - the queues of the CPU port - * module are now disconnected from it, but can still be accessed through - * register-based MMIO. - */ -static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port) -{ - mutex_lock(&ocelot->fwd_domain_lock); - - ocelot_port_set_dsa_8021q_cpu(ocelot, port); - - mutex_unlock(&ocelot->fwd_domain_lock); -} - -static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port) -{ - mutex_lock(&ocelot->fwd_domain_lock); - - ocelot_port_unset_dsa_8021q_cpu(ocelot, port); - - mutex_unlock(&ocelot->fwd_domain_lock); -} - static int felix_trap_get_cpu_port(struct dsa_switch *ds, const struct ocelot_vcap_filter *trap) { @@ -423,6 +398,13 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) return BIT(ocelot->num_phys_ports); } +/* Alternatively to using the NPI functionality, that same hardware MAC + * connected internally to the enetc or fman DSA master can be configured to + * use the software-defined tag_8021q frame format. As far as the hardware is + * concerned, it thinks it is a "dumb switch" - the queues of the CPU port + * module are now disconnected from it, but can still be accessed through + * register-based MMIO. + */ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { .setup = felix_tag_npi_setup, .teardown = felix_tag_npi_teardown, @@ -440,7 +422,7 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds) return err; dsa_switch_for_each_cpu_port(cpu_dp, ds) { - felix_8021q_cpu_port_init(ocelot, cpu_dp->index); + ocelot_port_set_dsa_8021q_cpu(ocelot, cpu_dp->index); /* TODO we could support multiple CPU ports in tag_8021q mode */ break; @@ -490,7 +472,7 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds) } dsa_switch_for_each_cpu_port(cpu_dp, ds) { - felix_8021q_cpu_port_deinit(ocelot, cpu_dp->index); + ocelot_port_unset_dsa_8021q_cpu(ocelot, cpu_dp->index); /* TODO we could support multiple CPU ports in tag_8021q mode */ break; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 4011a7968be5..d208d57f4894 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -2195,6 +2195,8 @@ void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port) { u16 vid; + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ports[port]->is_dsa_8021q_cpu = true; for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) @@ -2203,6 +2205,8 @@ void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port) ocelot_update_pgid_cpu(ocelot); ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL_GPL(ocelot_port_set_dsa_8021q_cpu); @@ -2210,6 +2214,8 @@ void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port) { u16 vid; + mutex_lock(&ocelot->fwd_domain_lock); + ocelot->ports[port]->is_dsa_8021q_cpu = false; for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) @@ -2218,6 +2224,8 @@ void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port) ocelot_update_pgid_cpu(ocelot); ocelot_apply_bridge_fwd_mask(ocelot, true); + + mutex_unlock(&ocelot->fwd_domain_lock); } EXPORT_SYMBOL_GPL(ocelot_port_unset_dsa_8021q_cpu); -- cgit v1.2.3 From c295f9831f1db12330d6a28a1cb2bd2562535e37 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 22 May 2022 00:37:42 +0300 Subject: net: mscc: ocelot: switch from {,un}set to {,un}assign for tag_8021q CPU ports There is a desire for the felix driver to gain support for multiple tag_8021q CPU ports, but the current model prevents it. This is because ocelot_apply_bridge_fwd_mask() only takes into consideration whether a port is a tag_8021q CPU port, but not whose CPU port it is. We need a model where we can have a direct affinity between an ocelot port and a tag_8021q CPU port. This serves as the basis for multiple CPU ports. Declare a "dsa_8021q_cpu" backpointer in struct ocelot_port which encodes that affinity. Repurpose the "ocelot_set_dsa_8021q_cpu" API to "ocelot_assign_dsa_8021q_cpu" to express the change of paradigm. Note that this change makes the first practical use of the new ocelot_port->index field in ocelot_port_unassign_dsa_8021q_cpu(), where we need to remove the old tag_8021q CPU port from the reserved VLAN range. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 27 +++----- drivers/net/dsa/ocelot/felix_vsc9959.c | 3 +- drivers/net/ethernet/mscc/ocelot.c | 120 ++++++++++++++++++++------------- include/soc/mscc/ocelot.h | 10 ++- 4 files changed, 92 insertions(+), 68 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 033f7d5cc03d..01d8a731851e 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -414,21 +414,18 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { static int felix_tag_8021q_setup(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; - struct dsa_port *dp, *cpu_dp; + struct dsa_port *dp; int err; err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); if (err) return err; - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - ocelot_port_set_dsa_8021q_cpu(ocelot, cpu_dp->index); - - /* TODO we could support multiple CPU ports in tag_8021q mode */ - break; - } + dsa_switch_for_each_user_port(dp, ds) + ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index, + dp->cpu_dp->index); - dsa_switch_for_each_available_port(dp, ds) { + dsa_switch_for_each_available_port(dp, ds) /* This overwrites ocelot_init(): * Do not forward BPDU frames to the CPU port module, * for 2 reasons: @@ -442,7 +439,6 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds) ocelot_write_gix(ocelot, ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); - } /* The ownership of the CPU port module's queues might have just been * transferred to the tag_8021q tagger from the NPI-based tagger. @@ -459,9 +455,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds) static void felix_tag_8021q_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; - struct dsa_port *dp, *cpu_dp; + struct dsa_port *dp; - dsa_switch_for_each_available_port(dp, ds) { + dsa_switch_for_each_available_port(dp, ds) /* Restore the logic from ocelot_init: * do not forward BPDU frames to the front ports. */ @@ -469,14 +465,9 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds) ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); - } - dsa_switch_for_each_cpu_port(cpu_dp, ds) { - ocelot_port_unset_dsa_8021q_cpu(ocelot, cpu_dp->index); - - /* TODO we could support multiple CPU ports in tag_8021q mode */ - break; - } + dsa_switch_for_each_user_port(dp, ds) + ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index); dsa_tag_8021q_unregister(ds); } diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 98caca4317d7..570d0204b7be 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -2162,7 +2162,8 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot) if (ocelot->npi >= 0) mask |= BIT(ocelot->npi); else - mask |= ocelot_get_dsa_8021q_cpu_mask(ocelot); + mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); } /* Calculate the minimum link speed, among the ports that are diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index d208d57f4894..8da7e25a47c9 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -2046,57 +2046,68 @@ static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond) return __ffs(bond_mask); } -u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port) +static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot, + struct ocelot_port *cpu) { - struct ocelot_port *ocelot_port = ocelot->ports[src_port]; - const struct net_device *bridge; u32 mask = 0; int port; - if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING) - return 0; - - bridge = ocelot_port->bridge; - if (!bridge) - return 0; - for (port = 0; port < ocelot->num_phys_ports; port++) { - ocelot_port = ocelot->ports[port]; + struct ocelot_port *ocelot_port = ocelot->ports[port]; if (!ocelot_port) continue; - if (ocelot_port->stp_state == BR_STATE_FORWARDING && - ocelot_port->bridge == bridge) + if (ocelot_port->dsa_8021q_cpu == cpu) mask |= BIT(port); } return mask; } -EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask); -u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot) +u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu; + + if (!cpu_port) + return 0; + + return BIT(cpu_port->index); +} +EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask); + +u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[src_port]; + const struct net_device *bridge; u32 mask = 0; int port; + if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING) + return 0; + + bridge = ocelot_port->bridge; + if (!bridge) + return 0; + for (port = 0; port < ocelot->num_phys_ports; port++) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; + ocelot_port = ocelot->ports[port]; if (!ocelot_port) continue; - if (ocelot_port->is_dsa_8021q_cpu) + if (ocelot_port->stp_state == BR_STATE_FORWARDING && + ocelot_port->bridge == bridge) mask |= BIT(port); } return mask; } -EXPORT_SYMBOL_GPL(ocelot_get_dsa_8021q_cpu_mask); +EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask); static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) { - unsigned long cpu_fwd_mask; int port; lockdep_assert_held(&ocelot->fwd_domain_lock); @@ -2108,15 +2119,6 @@ static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) if (joining && ocelot->ops->cut_through_fwd) ocelot->ops->cut_through_fwd(ocelot); - /* If a DSA tag_8021q CPU exists, it needs to be included in the - * regular forwarding path of the front ports regardless of whether - * those are bridged or standalone. - * If DSA tag_8021q is not used, this returns 0, which is fine because - * the hardware-based CPU port module can be a destination for packets - * even if it isn't part of PGID_SRC. - */ - cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot); - /* Apply FWD mask. The loop is needed to add/remove the current port as * a source for the other ports. */ @@ -2129,17 +2131,19 @@ static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) mask = 0; } else if (ocelot_port->is_dsa_8021q_cpu) { /* The DSA tag_8021q CPU ports need to be able to - * forward packets to all other ports except for - * themselves + * forward packets to all ports assigned to them. */ - mask = GENMASK(ocelot->num_phys_ports - 1, 0); - mask &= ~cpu_fwd_mask; + mask = ocelot_dsa_8021q_cpu_assigned_ports(ocelot, + ocelot_port); } else if (ocelot_port->bridge) { struct net_device *bond = ocelot_port->bond; mask = ocelot_get_bridge_fwd_mask(ocelot, port); - mask |= cpu_fwd_mask; mask &= ~BIT(port); + + mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); + if (bond) mask &= ~ocelot_get_bond_mask(ocelot, bond); } else { @@ -2147,7 +2151,8 @@ static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining) * ports (if those exist), or to the hardware CPU port * module otherwise. */ - mask = cpu_fwd_mask; + mask = ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot, + port); } ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port); @@ -2191,43 +2196,66 @@ static void ocelot_update_pgid_cpu(struct ocelot *ocelot) ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU); } -void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port) +void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, + int cpu) { + struct ocelot_port *cpu_port = ocelot->ports[cpu]; u16 vid; mutex_lock(&ocelot->fwd_domain_lock); - ocelot->ports[port]->is_dsa_8021q_cpu = true; + ocelot->ports[port]->dsa_8021q_cpu = cpu_port; + + if (!cpu_port->is_dsa_8021q_cpu) { + cpu_port->is_dsa_8021q_cpu = true; - for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) - ocelot_vlan_member_add(ocelot, port, vid, true); + for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_add(ocelot, cpu, vid, true); - ocelot_update_pgid_cpu(ocelot); + ocelot_update_pgid_cpu(ocelot); + } ocelot_apply_bridge_fwd_mask(ocelot, true); mutex_unlock(&ocelot->fwd_domain_lock); } -EXPORT_SYMBOL_GPL(ocelot_port_set_dsa_8021q_cpu); +EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu); -void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port) +void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port) { + struct ocelot_port *cpu_port = ocelot->ports[port]->dsa_8021q_cpu; + bool keep = false; u16 vid; + int p; mutex_lock(&ocelot->fwd_domain_lock); - ocelot->ports[port]->is_dsa_8021q_cpu = false; + ocelot->ports[port]->dsa_8021q_cpu = NULL; + + for (p = 0; p < ocelot->num_phys_ports; p++) { + if (!ocelot->ports[p]) + continue; + + if (ocelot->ports[p]->dsa_8021q_cpu == cpu_port) { + keep = true; + break; + } + } + + if (!keep) { + cpu_port->is_dsa_8021q_cpu = false; - for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) - ocelot_vlan_member_del(ocelot, port, vid); + for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_del(ocelot, cpu_port->index, vid); - ocelot_update_pgid_cpu(ocelot); + ocelot_update_pgid_cpu(ocelot); + } ocelot_apply_bridge_fwd_mask(ocelot, true); mutex_unlock(&ocelot->fwd_domain_lock); } -EXPORT_SYMBOL_GPL(ocelot_port_unset_dsa_8021q_cpu); +EXPORT_SYMBOL_GPL(ocelot_port_unassign_dsa_8021q_cpu); void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) { diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 2c90a24ca064..5f88385a7748 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -654,6 +654,8 @@ struct ocelot_mirror { int to; }; +struct ocelot_port; + struct ocelot_port { struct ocelot *ocelot; @@ -662,6 +664,8 @@ struct ocelot_port { struct net_device *bond; struct net_device *bridge; + struct ocelot_port *dsa_8021q_cpu; + /* VLAN that untagged frames are classified to, on ingress */ const struct ocelot_bridge_vlan *pvid_vlan; @@ -865,8 +869,9 @@ void ocelot_deinit(struct ocelot *ocelot); void ocelot_init_port(struct ocelot *ocelot, int port); void ocelot_deinit_port(struct ocelot *ocelot, int port); -void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port); -void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port); +void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port, int cpu); +void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port); +u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port); /* DSA callbacks */ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data); @@ -878,7 +883,6 @@ void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs); int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled, struct netlink_ext_ack *extack); void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state); -u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot); u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port); int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, struct switchdev_brport_flags val); -- cgit v1.2.3 From a4e044dc4c5b1e03f4195224c47108dc44122917 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 22 May 2022 00:37:43 +0300 Subject: net: dsa: felix: tag_8021q preparation for multiple CPU ports Update the VCAP filters to support multiple tag_8021q CPU ports. TX works using a filter for VLAN ID on the ingress of the CPU port, with a redirect and a VLAN pop action. This can be updated trivially by amending the ingress port mask of this rule to match on all tag_8021q CPU ports. RX works using a filter for ingress port on the egress of the CPU port, with a VLAN push action. Here we need to replicate these filters for each tag_8021q CPU port, and let them all have the same action. This means that the OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN() cookie needs to encode a unique value for every {user port, CPU port} pair it's given. Do this by encoding the CPU port in the upper 16 bits of the cookie, and the user port in the lower 16 bits. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 104 +++++++++++++++++++++++++---------------- include/soc/mscc/ocelot_vcap.h | 2 +- 2 files changed, 65 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 01d8a731851e..3e07dc39007a 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -45,24 +45,26 @@ static struct net_device *felix_classify_db(struct dsa_db db) /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that * the tagger can perform RX source port identification. */ -static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid) +static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port, + int upstream, u16 vid) { struct ocelot_vcap_filter *outer_tagging_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int key_length, upstream, err; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int key_length, err; key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; - upstream = dsa_upstream_port(ds, port); outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!outer_tagging_rule) return -ENOMEM; + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); + outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; outer_tagging_rule->prio = 1; - outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port); + outer_tagging_rule->id.cookie = cookie; outer_tagging_rule->id.tc_offload = false; outer_tagging_rule->block_id = VCAP_ES0; outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -83,16 +85,19 @@ static int felix_tag_8021q_vlan_add_rx(struct felix *felix, int port, u16 vid) return err; } -static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid) +static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port, + int upstream, u16 vid) { struct ocelot_vcap_filter *outer_tagging_rule; struct ocelot_vcap_block *block_vcap_es0; - struct ocelot *ocelot = &felix->ocelot; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; block_vcap_es0 = &ocelot->block[VCAP_ES0]; + cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, - port, false); + cookie, false); if (!outer_tagging_rule) return -ENOENT; @@ -102,12 +107,14 @@ static int felix_tag_8021q_vlan_del_rx(struct felix *felix, int port, u16 vid) /* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 * rules for steering those tagged packets towards the correct destination port */ -static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid) +static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port, + u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; - struct ocelot *ocelot = &felix->ocelot; - struct dsa_switch *ds = felix->ds; - int upstream, err; + unsigned long cpu_ports = dsa_cpu_ports(ds); + struct ocelot *ocelot = ds->priv; + unsigned long cookie; + int err; untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!untagging_rule) @@ -119,14 +126,14 @@ static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid) return -ENOMEM; } - upstream = dsa_upstream_port(ds, port); + cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; - untagging_rule->ingress_port_mask = BIT(upstream); + untagging_rule->ingress_port_mask = cpu_ports; untagging_rule->vlan.vid.value = vid; untagging_rule->vlan.vid.mask = VLAN_VID_MASK; untagging_rule->prio = 1; - untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); + untagging_rule->id.cookie = cookie; untagging_rule->id.tc_offload = false; untagging_rule->block_id = VCAP_IS1; untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -143,11 +150,13 @@ static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid) return err; } + cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); + redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; - redirect_rule->ingress_port_mask = BIT(upstream); + redirect_rule->ingress_port_mask = cpu_ports; redirect_rule->pag = port; redirect_rule->prio = 1; - redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); + redirect_rule->id.cookie = cookie; redirect_rule->id.tc_offload = false; redirect_rule->block_id = VCAP_IS2; redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; @@ -165,19 +174,21 @@ static int felix_tag_8021q_vlan_add_tx(struct felix *felix, int port, u16 vid) return 0; } -static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid) +static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; struct ocelot_vcap_block *block_vcap_is1; struct ocelot_vcap_block *block_vcap_is2; - struct ocelot *ocelot = &felix->ocelot; + struct ocelot *ocelot = ds->priv; + unsigned long cookie; int err; block_vcap_is1 = &ocelot->block[VCAP_IS1]; block_vcap_is2 = &ocelot->block[VCAP_IS2]; + cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, - port, false); + cookie, false); if (!untagging_rule) return -ENOENT; @@ -185,8 +196,9 @@ static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid) if (err) return err; + cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, - port, false); + cookie, false); if (!redirect_rule) return -ENOENT; @@ -196,7 +208,7 @@ static int felix_tag_8021q_vlan_del_tx(struct felix *felix, int port, u16 vid) static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, u16 flags) { - struct ocelot *ocelot = ds->priv; + struct dsa_port *cpu_dp; int err; /* tag_8021q.c assumes we are implementing this via port VLAN @@ -206,38 +218,50 @@ static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, if (!dsa_is_user_port(ds, port)) return 0; - err = felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); - if (err) - return err; - - err = felix_tag_8021q_vlan_add_tx(ocelot_to_felix(ocelot), port, vid); - if (err) { - felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); - return err; + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid); + if (err) + return err; } + err = felix_tag_8021q_vlan_add_tx(ds, port, vid); + if (err) + goto add_tx_failed; + return 0; + +add_tx_failed: + dsa_switch_for_each_cpu_port(cpu_dp, ds) + felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); + + return err; } static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) { - struct ocelot *ocelot = ds->priv; + struct dsa_port *cpu_dp; int err; if (!dsa_is_user_port(ds, port)) return 0; - err = felix_tag_8021q_vlan_del_rx(ocelot_to_felix(ocelot), port, vid); - if (err) - return err; - - err = felix_tag_8021q_vlan_del_tx(ocelot_to_felix(ocelot), port, vid); - if (err) { - felix_tag_8021q_vlan_add_rx(ocelot_to_felix(ocelot), port, vid); - return err; + dsa_switch_for_each_cpu_port(cpu_dp, ds) { + err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); + if (err) + return err; } + err = felix_tag_8021q_vlan_del_tx(ds, port, vid); + if (err) + goto del_tx_failed; + return 0; + +del_tx_failed: + dsa_switch_for_each_cpu_port(cpu_dp, ds) + felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid); + + return err; } static int felix_trap_get_cpu_port(struct dsa_switch *ds, diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h index de26c992f821..c601a4598b0d 100644 --- a/include/soc/mscc/ocelot_vcap.h +++ b/include/soc/mscc/ocelot_vcap.h @@ -11,7 +11,7 @@ /* Cookie definitions for private VCAP filters installed by the driver. * Must be unique per VCAP block. */ -#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port) (port) +#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port)) #define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port) #define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port) #define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port)) -- cgit v1.2.3 From 0097e86c8ec5a68f20bc1ae19f5c21fb0e751f83 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 19 May 2022 17:08:00 +0300 Subject: net: ethernet: mtk_eth_soc: fix error code in mtk_flow_offload_replace() Preserve the error code from mtk_foe_entry_commit(). Do not return success. Fixes: c4f033d9e03e ("net: ethernet: mtk_eth_soc: rework hardware flow table management") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index c9353071f96a..90e7dfd011c9 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -435,7 +435,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) memcpy(&entry->data, &foe, sizeof(entry->data)); entry->wed_index = wed_index; - if (mtk_foe_entry_commit(eth->ppe, entry) < 0) + err = mtk_foe_entry_commit(eth->ppe, entry); + if (err < 0) goto free; err = rhashtable_insert_fast(ð->flow_table, &entry->node, -- cgit v1.2.3 From 32c53420d2a0c2c3a691edb895d90d9d028ca3e0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 18 May 2022 20:13:45 -0700 Subject: eth: de4x5: remove support for Generic DECchip & DIGITAL EtherWORKS PCI/EISA Looks like almost all changes to this driver had been tree-wide refactoring since git era begun. There is one commit from Al 15 years ago which could potentially be fixing a real bug. The driver is using virt_to_bus() and is a real magnet for pointless cleanups. It seems unlikely to have real users. Let's try to shed this maintenance burden. Signed-off-by: Jakub Kicinski Acked-by: Michael Ellerman (powerpc) Acked-by: Arnd Bergmann Signed-off-by: David S. Miller --- .../device_drivers/ethernet/dec/de4x5.rst | 189 - .../networking/device_drivers/ethernet/index.rst | 1 - arch/mips/configs/mtx1_defconfig | 1 - arch/powerpc/configs/chrp32_defconfig | 1 - arch/powerpc/configs/ppc6xx_defconfig | 1 - drivers/net/ethernet/dec/tulip/Kconfig | 15 - drivers/net/ethernet/dec/tulip/Makefile | 1 - drivers/net/ethernet/dec/tulip/de4x5.c | 5591 -------------------- drivers/net/ethernet/dec/tulip/de4x5.h | 1017 ---- 9 files changed, 6817 deletions(-) delete mode 100644 Documentation/networking/device_drivers/ethernet/dec/de4x5.rst delete mode 100644 drivers/net/ethernet/dec/tulip/de4x5.c delete mode 100644 drivers/net/ethernet/dec/tulip/de4x5.h (limited to 'drivers') diff --git a/Documentation/networking/device_drivers/ethernet/dec/de4x5.rst b/Documentation/networking/device_drivers/ethernet/dec/de4x5.rst deleted file mode 100644 index e03e9c631879..000000000000 --- a/Documentation/networking/device_drivers/ethernet/dec/de4x5.rst +++ /dev/null @@ -1,189 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -=================================== -DEC EtherWORKS Ethernet De4x5 cards -=================================== - - Originally, this driver was written for the Digital Equipment - Corporation series of EtherWORKS Ethernet cards: - - - DE425 TP/COAX EISA - - DE434 TP PCI - - DE435 TP/COAX/AUI PCI - - DE450 TP/COAX/AUI PCI - - DE500 10/100 PCI Fasternet - - but it will now attempt to support all cards which conform to the - Digital Semiconductor SROM Specification. The driver currently - recognises the following chips: - - - DC21040 (no SROM) - - DC21041[A] - - DC21140[A] - - DC21142 - - DC21143 - - So far the driver is known to work with the following cards: - - - KINGSTON - - Linksys - - ZNYX342 - - SMC8432 - - SMC9332 (w/new SROM) - - ZNYX31[45] - - ZNYX346 10/100 4 port (can act as a 10/100 bridge!) - - The driver has been tested on a relatively busy network using the DE425, - DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred - 16M of data to a DECstation 5000/200 as follows:: - - TCP UDP - TX RX TX RX - DE425 1030k 997k 1170k 1128k - DE434 1063k 995k 1170k 1125k - DE435 1063k 995k 1170k 1125k - DE500 1063k 998k 1170k 1125k in 10Mb/s mode - - All values are typical (in kBytes/sec) from a sample of 4 for each - measurement. Their error is +/-20k on a quiet (private) network and also - depend on what load the CPU has. - ----------------------------------------------------------------------------- - - The ability to load this driver as a loadable module has been included - and used extensively during the driver development (to save those long - reboot sequences). Loadable module support under PCI and EISA has been - achieved by letting the driver autoprobe as if it were compiled into the - kernel. Do make sure you're not sharing interrupts with anything that - cannot accommodate interrupt sharing! - - To utilise this ability, you have to do 8 things: - - 0) have a copy of the loadable modules code installed on your system. - 1) copy de4x5.c from the /linux/drivers/net directory to your favourite - temporary directory. - 2) for fixed autoprobes (not recommended), edit the source code near - line 5594 to reflect the I/O address you're using, or assign these when - loading by:: - - insmod de4x5 io=0xghh where g = bus number - hh = device number - - .. note:: - - autoprobing for modules is now supported by default. You may just - use:: - - insmod de4x5 - - to load all available boards. For a specific board, still use - the 'io=?' above. - 3) compile de4x5.c, but include -DMODULE in the command line to ensure - that the correct bits are compiled (see end of source code). - 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a - kernel with the de4x5 configuration turned off and reboot. - 5) insmod de4x5 [io=0xghh] - 6) run the net startup bits for your new eth?? interface(s) manually - (usually /etc/rc.inet[12] at boot time). - 7) enjoy! - - To unload a module, turn off the associated interface(s) - 'ifconfig eth?? down' then 'rmmod de4x5'. - - Automedia detection is included so that in principle you can disconnect - from, e.g. TP, reconnect to BNC and things will still work (after a - pause while the driver figures out where its media went). My tests - using ping showed that it appears to work.... - - By default, the driver will now autodetect any DECchip based card. - Should you have a need to restrict the driver to DIGITAL only cards, you - can compile with a DEC_ONLY define, or if loading as a module, use the - 'dec_only=1' parameter. - - I've changed the timing routines to use the kernel timer and scheduling - functions so that the hangs and other assorted problems that occurred - while autosensing the media should be gone. A bonus for the DC21040 - auto media sense algorithm is that it can now use one that is more in - line with the rest (the DC21040 chip doesn't have a hardware timer). - The downside is the 1 'jiffies' (10ms) resolution. - - IEEE 802.3u MII interface code has been added in anticipation that some - products may use it in the future. - - The SMC9332 card has a non-compliant SROM which needs fixing - I have - patched this driver to detect it because the SROM format used complies - to a previous DEC-STD format. - - I have removed the buffer copies needed for receive on Intels. I cannot - remove them for Alphas since the Tulip hardware only does longword - aligned DMA transfers and the Alphas get alignment traps with non - longword aligned data copies (which makes them really slow). No comment. - - I have added SROM decoding routines to make this driver work with any - card that supports the Digital Semiconductor SROM spec. This will help - all cards running the dc2114x series chips in particular. Cards using - the dc2104x chips should run correctly with the basic driver. I'm in - debt to for the testing and feedback that helped get - this feature working. So far we have tested KINGSTON, SMC8432, SMC9332 - (with the latest SROM complying with the SROM spec V3: their first was - broken), ZNYX342 and LinkSys. ZNYX314 (dual 21041 MAC) and ZNYX 315 - (quad 21041 MAC) cards also appear to work despite their incorrectly - wired IRQs. - - I have added a temporary fix for interrupt problems when some SCSI cards - share the same interrupt as the DECchip based cards. The problem occurs - because the SCSI card wants to grab the interrupt as a fast interrupt - (runs the service routine with interrupts turned off) vs. this card - which really needs to run the service routine with interrupts turned on. - This driver will now add the interrupt service routine as a fast - interrupt if it is bounced from the slow interrupt. THIS IS NOT A - RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time - until people sort out their compatibility issues and the kernel - interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST - INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not - run on the same interrupt. PCMCIA/CardBus is another can of worms... - - Finally, I think I have really fixed the module loading problem with - more than one DECchip based card. As a side effect, I don't mess with - the device structure any more which means that if more than 1 card in - 2.0.x is installed (4 in 2.1.x), the user will have to edit - linux/drivers/net/Space.c to make room for them. Hence, module loading - is the preferred way to use this driver, since it doesn't have this - limitation. - - Where SROM media detection is used and full duplex is specified in the - SROM, the feature is ignored unless lp->params.fdx is set at compile - time OR during a module load (insmod de4x5 args='eth??:fdx' [see - below]). This is because there is no way to automatically detect full - duplex links except through autonegotiation. When I include the - autonegotiation feature in the SROM autoconf code, this detection will - occur automatically for that case. - - Command line arguments are now allowed, similar to passing arguments - through LILO. This will allow a per adapter board set up of full duplex - and media. The only lexical constraints are: the board name (dev->name) - appears in the list before its parameters. The list of parameters ends - either at the end of the parameter list or with another board name. The - following parameters are allowed: - - ========= =============================================== - fdx for full duplex - autosense to set the media/speed; with the following - sub-parameters: - TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO - ========= =============================================== - - Case sensitivity is important for the sub-parameters. They *must* be - upper case. Examples:: - - insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. - - For a compiled in driver, in linux/drivers/net/CONFIG, place e.g.:: - - DE4X5_OPTS = -DDE4X5_PARM='"eth0:fdx autosense=AUI eth2:autosense=TP"' - - Yes, I know full duplex isn't permissible on BNC or AUI; they're just - examples. By default, full duplex is turned off and AUTO is the default - autosense setting. In reality, I expect only the full duplex option to - be used. Note the use of single quotes in the two examples above and the - lack of commas to separate items. diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index 21a97703421d..4e06684d079b 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -19,7 +19,6 @@ Contents: cirrus/cs89x0 dlink/dl2k davicom/dm9000 - dec/de4x5 dec/dmfe freescale/dpaa freescale/dpaa2/index diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 0cb4d9aa14d1..4194e79b435c 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -277,7 +277,6 @@ CONFIG_CHELSIO_T1=m CONFIG_NET_TULIP=y CONFIG_DE2104X=m CONFIG_TULIP=m -CONFIG_DE4X5=m CONFIG_WINBOND_840=m CONFIG_DM9102=m CONFIG_ULI526X=m diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig index a4a805b87469..fb314f75ad4b 100644 --- a/arch/powerpc/configs/chrp32_defconfig +++ b/arch/powerpc/configs/chrp32_defconfig @@ -53,7 +53,6 @@ CONFIG_ATA_GENERIC=y CONFIG_NETDEVICES=y CONFIG_PCNET32=y CONFIG_NET_TULIP=y -CONFIG_DE4X5=y CONFIG_MV643XX_ETH=y CONFIG_8139CP=y CONFIG_8139TOO=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index bb549cb1c3e3..b622ecd73286 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -444,7 +444,6 @@ CONFIG_NET_TULIP=y CONFIG_DE2104X=m CONFIG_TULIP=m CONFIG_TULIP_MMIO=y -CONFIG_DE4X5=m CONFIG_WINBOND_840=m CONFIG_DM9102=m CONFIG_ULI526X=m diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 79dc336ce709..078a12f07e96 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -104,21 +104,6 @@ config TULIP_DM910X def_bool y depends on TULIP && SPARC -config DE4X5 - tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" - depends on (PCI || EISA) - depends on VIRT_TO_BUS || ALPHA || PPC || SPARC - select CRC32 - help - This is support for the DIGITAL series of PCI/EISA Ethernet cards. - These include the DE425, DE434, DE435, DE450 and DE500 models. If - you have a network card of this type, say Y. More specific - information is contained in - . - - To compile this driver as a module, choose M here. The module will - be called de4x5. - config WINBOND_840 tristate "Winbond W89c840 Ethernet support" depends on PCI diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile index 8aab37564d5d..d4f1d21d29a0 100644 --- a/drivers/net/ethernet/dec/tulip/Makefile +++ b/drivers/net/ethernet/dec/tulip/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_DM9102) += dmfe.o obj-$(CONFIG_WINBOND_840) += winbond-840.o obj-$(CONFIG_DE2104X) += de2104x.o obj-$(CONFIG_TULIP) += tulip.o -obj-$(CONFIG_DE4X5) += de4x5.o obj-$(CONFIG_ULI526X) += uli526x.o # Declare multi-part drivers. diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c deleted file mode 100644 index 71730ef4cd57..000000000000 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ /dev/null @@ -1,5591 +0,0 @@ -/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500 - ethernet driver for Linux. - - Copyright 1994, 1995 Digital Equipment Corporation. - - Testing resources for this driver have been made available - in part by NASA Ames Research Center (mjacob@nas.nasa.gov). - - The author may be reached at davies@maniac.ultranet.com. - - This program is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by the - Free Software Foundation; either version 2 of the License, or (at your - option) any later version. - - THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 675 Mass Ave, Cambridge, MA 02139, USA. - - Originally, this driver was written for the Digital Equipment - Corporation series of EtherWORKS ethernet cards: - - DE425 TP/COAX EISA - DE434 TP PCI - DE435 TP/COAX/AUI PCI - DE450 TP/COAX/AUI PCI - DE500 10/100 PCI Fasternet - - but it will now attempt to support all cards which conform to the - Digital Semiconductor SROM Specification. The driver currently - recognises the following chips: - - DC21040 (no SROM) - DC21041[A] - DC21140[A] - DC21142 - DC21143 - - So far the driver is known to work with the following cards: - - KINGSTON - Linksys - ZNYX342 - SMC8432 - SMC9332 (w/new SROM) - ZNYX31[45] - ZNYX346 10/100 4 port (can act as a 10/100 bridge!) - - The driver has been tested on a relatively busy network using the DE425, - DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred - 16M of data to a DECstation 5000/200 as follows: - - TCP UDP - TX RX TX RX - DE425 1030k 997k 1170k 1128k - DE434 1063k 995k 1170k 1125k - DE435 1063k 995k 1170k 1125k - DE500 1063k 998k 1170k 1125k in 10Mb/s mode - - All values are typical (in kBytes/sec) from a sample of 4 for each - measurement. Their error is +/-20k on a quiet (private) network and also - depend on what load the CPU has. - - ========================================================================= - This driver has been written substantially from scratch, although its - inheritance of style and stack interface from 'ewrk3.c' and in turn from - Donald Becker's 'lance.c' should be obvious. With the module autoload of - every usable DECchip board, I pinched Donald's 'next_module' field to - link my modules together. - - Up to 15 EISA cards can be supported under this driver, limited primarily - by the available IRQ lines. I have checked different configurations of - multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a - problem yet (provided you have at least depca.c v0.38) ... - - PCI support has been added to allow the driver to work with the DE434, - DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due - to the differences in the EISA and PCI CSR address offsets from the base - address. - - The ability to load this driver as a loadable module has been included - and used extensively during the driver development (to save those long - reboot sequences). Loadable module support under PCI and EISA has been - achieved by letting the driver autoprobe as if it were compiled into the - kernel. Do make sure you're not sharing interrupts with anything that - cannot accommodate interrupt sharing! - - To utilise this ability, you have to do 8 things: - - 0) have a copy of the loadable modules code installed on your system. - 1) copy de4x5.c from the /linux/drivers/net directory to your favourite - temporary directory. - 2) for fixed autoprobes (not recommended), edit the source code near - line 5594 to reflect the I/O address you're using, or assign these when - loading by: - - insmod de4x5 io=0xghh where g = bus number - hh = device number - - NB: autoprobing for modules is now supported by default. You may just - use: - - insmod de4x5 - - to load all available boards. For a specific board, still use - the 'io=?' above. - 3) compile de4x5.c, but include -DMODULE in the command line to ensure - that the correct bits are compiled (see end of source code). - 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a - kernel with the de4x5 configuration turned off and reboot. - 5) insmod de4x5 [io=0xghh] - 6) run the net startup bits for your new eth?? interface(s) manually - (usually /etc/rc.inet[12] at boot time). - 7) enjoy! - - To unload a module, turn off the associated interface(s) - 'ifconfig eth?? down' then 'rmmod de4x5'. - - Automedia detection is included so that in principal you can disconnect - from, e.g. TP, reconnect to BNC and things will still work (after a - pause whilst the driver figures out where its media went). My tests - using ping showed that it appears to work.... - - By default, the driver will now autodetect any DECchip based card. - Should you have a need to restrict the driver to DIGITAL only cards, you - can compile with a DEC_ONLY define, or if loading as a module, use the - 'dec_only=1' parameter. - - I've changed the timing routines to use the kernel timer and scheduling - functions so that the hangs and other assorted problems that occurred - while autosensing the media should be gone. A bonus for the DC21040 - auto media sense algorithm is that it can now use one that is more in - line with the rest (the DC21040 chip doesn't have a hardware timer). - The downside is the 1 'jiffies' (10ms) resolution. - - IEEE 802.3u MII interface code has been added in anticipation that some - products may use it in the future. - - The SMC9332 card has a non-compliant SROM which needs fixing - I have - patched this driver to detect it because the SROM format used complies - to a previous DEC-STD format. - - I have removed the buffer copies needed for receive on Intels. I cannot - remove them for Alphas since the Tulip hardware only does longword - aligned DMA transfers and the Alphas get alignment traps with non - longword aligned data copies (which makes them really slow). No comment. - - I have added SROM decoding routines to make this driver work with any - card that supports the Digital Semiconductor SROM spec. This will help - all cards running the dc2114x series chips in particular. Cards using - the dc2104x chips should run correctly with the basic driver. I'm in - debt to for the testing and feedback that helped get - this feature working. So far we have tested KINGSTON, SMC8432, SMC9332 - (with the latest SROM complying with the SROM spec V3: their first was - broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315 - (quad 21041 MAC) cards also appear to work despite their incorrectly - wired IRQs. - - I have added a temporary fix for interrupt problems when some SCSI cards - share the same interrupt as the DECchip based cards. The problem occurs - because the SCSI card wants to grab the interrupt as a fast interrupt - (runs the service routine with interrupts turned off) vs. this card - which really needs to run the service routine with interrupts turned on. - This driver will now add the interrupt service routine as a fast - interrupt if it is bounced from the slow interrupt. THIS IS NOT A - RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time - until people sort out their compatibility issues and the kernel - interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST - INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not - run on the same interrupt. PCMCIA/CardBus is another can of worms... - - Finally, I think I have really fixed the module loading problem with - more than one DECchip based card. As a side effect, I don't mess with - the device structure any more which means that if more than 1 card in - 2.0.x is installed (4 in 2.1.x), the user will have to edit - linux/drivers/net/Space.c to make room for them. Hence, module loading - is the preferred way to use this driver, since it doesn't have this - limitation. - - Where SROM media detection is used and full duplex is specified in the - SROM, the feature is ignored unless lp->params.fdx is set at compile - time OR during a module load (insmod de4x5 args='eth??:fdx' [see - below]). This is because there is no way to automatically detect full - duplex links except through autonegotiation. When I include the - autonegotiation feature in the SROM autoconf code, this detection will - occur automatically for that case. - - Command line arguments are now allowed, similar to passing arguments - through LILO. This will allow a per adapter board set up of full duplex - and media. The only lexical constraints are: the board name (dev->name) - appears in the list before its parameters. The list of parameters ends - either at the end of the parameter list or with another board name. The - following parameters are allowed: - - fdx for full duplex - autosense to set the media/speed; with the following - sub-parameters: - TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO - - Case sensitivity is important for the sub-parameters. They *must* be - upper case. Examples: - - insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. - - For a compiled in driver, at or above line 548, place e.g. - #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP" - - Yes, I know full duplex isn't permissible on BNC or AUI; they're just - examples. By default, full duplex is turned off and AUTO is the default - autosense setting. In reality, I expect only the full duplex option to - be used. Note the use of single quotes in the two examples above and the - lack of commas to separate items. ALSO, you must get the requested media - correct in relation to what the adapter SROM says it has. There's no way - to determine this in advance other than by trial and error and common - sense, e.g. call a BNC connectored port 'BNC', not '10Mb'. - - Changed the bus probing. EISA used to be done first, followed by PCI. - Most people probably don't even know what a de425 is today and the EISA - probe has messed up some SCSI cards in the past, so now PCI is always - probed first followed by EISA if a) the architecture allows EISA and - either b) there have been no PCI cards detected or c) an EISA probe is - forced by the user. To force a probe include "force_eisa" in your - insmod "args" line; for built-in kernels either change the driver to do - this automatically or include #define DE4X5_FORCE_EISA on or before - line 1040 in the driver. - - TO DO: - ------ - - Revision History - ---------------- - - Version Date Description - - 0.1 17-Nov-94 Initial writing. ALPHA code release. - 0.2 13-Jan-95 Added PCI support for DE435's. - 0.21 19-Jan-95 Added auto media detection. - 0.22 10-Feb-95 Fix interrupt handler call . - Fix recognition bug reported by . - Add request/release_region code. - Add loadable modules support for PCI. - Clean up loadable modules support. - 0.23 28-Feb-95 Added DC21041 and DC21140 support. - Fix missed frame counter value and initialisation. - Fixed EISA probe. - 0.24 11-Apr-95 Change delay routine to use . - Change TX_BUFFS_AVAIL macro. - Change media autodetection to allow manual setting. - Completed DE500 (DC21140) support. - 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm. - 0.242 10-May-95 Minor changes. - 0.30 12-Jun-95 Timer fix for DC21140. - Portability changes. - Add ALPHA changes from . - Add DE500 semi automatic autosense. - Add Link Fail interrupt TP failure detection. - Add timer based link change detection. - Plugged a memory leak in de4x5_queue_pkt(). - 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1. - 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a - suggestion by . - 0.33 8-Aug-95 Add shared interrupt support (not released yet). - 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs. - Fix de4x5_interrupt(). - Fix dc21140_autoconf() mess. - No shared interrupt support. - 0.332 11-Sep-95 Added MII management interface routines. - 0.40 5-Mar-96 Fix setup frame timeout . - Add kernel timer code (h/w is too flaky). - Add MII based PHY autosense. - Add new multicasting code. - Add new autosense algorithms for media/mode - selection using kernel scheduling/timing. - Re-formatted. - Made changes suggested by : - Change driver to detect all DECchip based cards - with DEC_ONLY restriction a special case. - Changed driver to autoprobe as a module. No irq - checking is done now - assume BIOS is good! - Added SMC9332 detection - 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card - only - Fix for multiple PCI cards reported by - Duh, put the IRQF_SHARED flag into request_interrupt(). - Fix SMC ethernet address in enet_det[]. - Print chip name instead of "UNKNOWN" during boot. - 0.42 26-Apr-96 Fix MII write TA bit error. - Fix bug in dc21040 and dc21041 autosense code. - Remove buffer copies on receive for Intels. - Change sk_buff handling during media disconnects to - eliminate DUP packets. - Add dynamic TX thresholding. - Change all chips to use perfect multicast filtering. - Fix alloc_device() bug - 0.43 21-Jun-96 Fix unconnected media TX retry bug. - Add Accton to the list of broken cards. - Fix TX under-run bug for non DC21140 chips. - Fix boot command probe bug in alloc_device() as - reported by and - . - Add cache locks to prevent a race condition as - reported by and - . - Upgraded alloc_device() code. - 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion - with - 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips. - Fix EISA probe bugs reported by - and . - 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media - with a loopback packet. - 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported - by - 0.45 8-Dec-96 Include endian functions for PPC use, from work - by and . - 0.451 28-Dec-96 Added fix to allow autoprobe for modules after - suggestion from . - 0.5 30-Jan-97 Added SROM decoding functions. - Updated debug flags. - Fix sleep/wakeup calls for PCI cards, bug reported - by . - Added multi-MAC, one SROM feature from discussion - with . - Added full module autoprobe capability. - Added attempt to use an SMC9332 with broken SROM. - Added fix for ZYNX multi-mac cards that didn't - get their IRQs wired correctly. - 0.51 13-Feb-97 Added endian fixes for the SROM accesses from - - Fix init_connection() to remove extra device reset. - Fix MAC/PHY reset ordering in dc21140m_autoconf(). - Fix initialisation problem with lp->timeout in - typeX_infoblock() from . - Fix MII PHY reset problem from work done by - . - 0.52 26-Apr-97 Some changes may not credit the right people - - a disk crash meant I lost some mail. - Change RX interrupt routine to drop rather than - defer packets to avoid hang reported by - . - Fix srom_exec() to return for COMPACT and type 1 - infoblocks. - Added DC21142 and DC21143 functions. - Added byte counters from - Added IRQF_DISABLED temporary fix from - . - 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during - module load: bug reported by - - Fix multi-MAC, one SROM, to work with 2114x chips: - bug reported by . - Make above search independent of BIOS device scan - direction. - Completed DC2114[23] autosense functions. - 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by - and - . - Added argument list to set up each board from either - a module's command line or a compiled in #define. - Added generic MII PHY functionality to deal with - newer PHY chips. - Fix the mess in 2.1.67. - 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by - . - Fix bug in pci_probe() for 64 bit systems reported - by . - 0.533 9-Jan-98 Fix more 64 bit bugs reported by . - 0.534 24-Jan-98 Fix last (?) endian bug from - 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040. - 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure. - **Incompatible with 2.0.x from here.** - 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP - from - Add TP, AUI and BNC cases to 21140m_autoconf() for - case where a 21140 under SROM control uses, e.g. AUI - from problem report by - Add MII parallel detection to 2114x_autoconf() for - case where no autonegotiation partner exists from - problem report by . - Add ability to force connection type directly even - when using SROM control from problem report by - . - Updated the PCI interface to conform with the latest - version. I hope nothing is broken... - Add TX done interrupt modification from suggestion - by . - Fix is_anc_capable() bug reported by - . - Fix type[13]_infoblock() bug: during MII search, PHY - lp->rst not run because lp->ibn not initialised - - from report & fix by . - Fix probe bug with EISA & PCI cards present from - report by . - 0.541 24-Aug-98 Fix compiler problems associated with i386-string - ops from multiple bug reports and temporary fix - from . - Fix pci_probe() to correctly emulate the old - pcibios_find_class() function. - Add an_exception() for old ZYNX346 and fix compile - warning on PPC & SPARC, from . - Fix lastPCI to correctly work with compiled in - kernels and modules from bug report by - et al. - 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages - when media is unconnected. - Change dev->interrupt to lp->interrupt to ensure - alignment for Alpha's and avoid their unaligned - access traps. This flag is merely for log messages: - should do something more definitive though... - 0.543 30-Dec-98 Add SMP spin locking. - 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using - a 21143 by . - Change PCI/EISA bus probing order. - 0.545 28-Nov-99 Further Moto SROM bug fix from - - Remove double checking for DEBUG_RX in de4x5_dbg_rx() - from report by - 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function - was causing a page fault when initializing the - variable 'pb', on a non de4x5 PCI device, in this - case a PCI bridge (DEC chip 21152). The value of - 'pb' is now only initialized if a de4x5 chip is - present. - - 0.547 08-Nov-01 Use library crc32 functions by - 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and - generic DMA APIs. Fixed DE425 support on Alpha. - - ========================================================================= -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#ifdef CONFIG_PPC_PMAC -#include -#endif /* CONFIG_PPC_PMAC */ - -#include "de4x5.h" - -static const char version[] = - KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; - -#define c_char const char - -/* -** MII Information -*/ -struct phy_table { - int reset; /* Hard reset required? */ - int id; /* IEEE OUI */ - int ta; /* One cycle TA time - 802.3u is confusing here */ - struct { /* Non autonegotiation (parallel) speed det. */ - int reg; - int mask; - int value; - } spd; -}; - -struct mii_phy { - int reset; /* Hard reset required? */ - int id; /* IEEE OUI */ - int ta; /* One cycle TA time */ - struct { /* Non autonegotiation (parallel) speed det. */ - int reg; - int mask; - int value; - } spd; - int addr; /* MII address for the PHY */ - u_char *gep; /* Start of GEP sequence block in SROM */ - u_char *rst; /* Start of reset sequence in SROM */ - u_int mc; /* Media Capabilities */ - u_int ana; /* NWay Advertisement */ - u_int fdx; /* Full DupleX capabilities for each media */ - u_int ttm; /* Transmit Threshold Mode for each media */ - u_int mci; /* 21142 MII Connector Interrupt info */ -}; - -#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */ - -struct sia_phy { - u_char mc; /* Media Code */ - u_char ext; /* csr13-15 valid when set */ - int csr13; /* SIA Connectivity Register */ - int csr14; /* SIA TX/RX Register */ - int csr15; /* SIA General Register */ - int gepc; /* SIA GEP Control Information */ - int gep; /* SIA GEP Data */ -}; - -/* -** Define the know universe of PHY devices that can be -** recognised by this driver. -*/ -static struct phy_table phy_info[] = { - {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */ - {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */ - {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */ - {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */ - {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */ -}; - -/* -** These GENERIC values assumes that the PHY devices follow 802.3u and -** allow parallel detection to set the link partner ability register. -** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported. -*/ -#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */ -#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */ -#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */ - -/* -** Define special SROM detection cases -*/ -static c_char enet_det[][ETH_ALEN] = { - {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00}, - {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00} -}; - -#define SMC 1 -#define ACCTON 2 - -/* -** SROM Repair definitions. If a broken SROM is detected a card may -** use this information to help figure out what to do. This is a -** "stab in the dark" and so far for SMC9332's only. -*/ -static c_char srom_repair_info[][100] = { - {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */ - 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02, - 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50, - 0x00,0x18,} -}; - - -#ifdef DE4X5_DEBUG -static int de4x5_debug = DE4X5_DEBUG; -#else -/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/ -static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION); -#endif - -/* -** Allow per adapter set up. For modules this is simply a command line -** parameter, e.g.: -** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. -** -** For a compiled in driver, place e.g. -** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP" -** here -*/ -#ifdef DE4X5_PARM -static char *args = DE4X5_PARM; -#else -static char *args; -#endif - -struct parameters { - bool fdx; - int autosense; -}; - -#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */ - -#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */ - -/* -** Ethernet PROM defines -*/ -#define PROBE_LENGTH 32 -#define ETH_PROM_SIG 0xAA5500FFUL - -/* -** Ethernet Info -*/ -#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */ -#define IEEE802_3_SZ 1518 /* Packet + CRC */ -#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */ -#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */ -#define MIN_DAT_SZ 1 /* Minimum ethernet data length */ -#define PKT_HDR_LEN 14 /* Addresses and data length info */ -#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1) -#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */ - - -/* -** EISA bus defines -*/ -#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ -#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */ - -#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11} - -#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"} -#define DE4X5_NAME_LENGTH 8 - -static c_char *de4x5_signatures[] = DE4X5_SIGNATURE; - -/* -** Ethernet PROM defines for DC21040 -*/ -#define PROBE_LENGTH 32 -#define ETH_PROM_SIG 0xAA5500FFUL - -/* -** PCI Bus defines -*/ -#define PCI_MAX_BUS_NUM 8 -#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */ -#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */ - -/* -** Memory Alignment. Each descriptor is 4 longwords long. To force a -** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and -** DESC_ALIGN. ALIGN aligns the start address of the private memory area -** and hence the RX descriptor ring's first entry. -*/ -#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ -#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */ -#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */ -#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */ -#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */ -#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */ - -#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */ -#define DE4X5_CACHE_ALIGN CAL_16LONG -#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */ -/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */ -#define DESC_ALIGN - -#ifndef DEC_ONLY /* See README.de4x5 for using this */ -static int dec_only; -#else -static int dec_only = 1; -#endif - -/* -** DE4X5 IRQ ENABLE/DISABLE -*/ -#define ENABLE_IRQs { \ - imr |= lp->irq_en;\ - outl(imr, DE4X5_IMR); /* Enable the IRQs */\ -} - -#define DISABLE_IRQs {\ - imr = inl(DE4X5_IMR);\ - imr &= ~lp->irq_en;\ - outl(imr, DE4X5_IMR); /* Disable the IRQs */\ -} - -#define UNMASK_IRQs {\ - imr |= lp->irq_mask;\ - outl(imr, DE4X5_IMR); /* Unmask the IRQs */\ -} - -#define MASK_IRQs {\ - imr = inl(DE4X5_IMR);\ - imr &= ~lp->irq_mask;\ - outl(imr, DE4X5_IMR); /* Mask the IRQs */\ -} - -/* -** DE4X5 START/STOP -*/ -#define START_DE4X5 {\ - omr = inl(DE4X5_OMR);\ - omr |= OMR_ST | OMR_SR;\ - outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\ -} - -#define STOP_DE4X5 {\ - omr = inl(DE4X5_OMR);\ - omr &= ~(OMR_ST|OMR_SR);\ - outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \ -} - -/* -** DE4X5 SIA RESET -*/ -#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */ - -/* -** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS) -*/ -#define DE4X5_AUTOSENSE_MS 250 - -/* -** SROM Structure -*/ -struct de4x5_srom { - char sub_vendor_id[2]; - char sub_system_id[2]; - char reserved[12]; - char id_block_crc; - char reserved2; - char version; - char num_controllers; - char ieee_addr[6]; - char info[100]; - short chksum; -}; -#define SUB_VENDOR_ID 0x500a - -/* -** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous -** and have sizes of both a power of 2 and a multiple of 4. -** A size of 256 bytes for each buffer could be chosen because over 90% of -** all packets in our network are <256 bytes long and 64 longword alignment -** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX -** descriptors are needed for machines with an ALPHA CPU. -*/ -#define NUM_RX_DESC 8 /* Number of RX descriptors */ -#define NUM_TX_DESC 32 /* Number of TX descriptors */ -#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */ - /* Multiple of 4 for DC21040 */ - /* Allows 512 byte alignment */ -struct de4x5_desc { - volatile __le32 status; - __le32 des1; - __le32 buf; - __le32 next; - DESC_ALIGN -}; - -/* -** The DE4X5 private structure -*/ -#define DE4X5_PKT_STAT_SZ 16 -#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you - increase DE4X5_PKT_STAT_SZ */ - -struct pkt_stats { - u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */ - u_int unicast; - u_int multicast; - u_int broadcast; - u_int excessive_collisions; - u_int tx_underruns; - u_int excessive_underruns; - u_int rx_runt_frames; - u_int rx_collision; - u_int rx_dribble; - u_int rx_overflow; -}; - -struct de4x5_private { - char adapter_name[80]; /* Adapter name */ - u_long interrupt; /* Aligned ISR flag */ - struct de4x5_desc *rx_ring; /* RX descriptor ring */ - struct de4x5_desc *tx_ring; /* TX descriptor ring */ - struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */ - struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */ - int rx_new, rx_old; /* RX descriptor ring pointers */ - int tx_new, tx_old; /* TX descriptor ring pointers */ - char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */ - char frame[64]; /* Min sized packet for loopback*/ - spinlock_t lock; /* Adapter specific spinlock */ - struct net_device_stats stats; /* Public stats */ - struct pkt_stats pktStats; /* Private stats counters */ - char rxRingSize; - char txRingSize; - int bus; /* EISA or PCI */ - int bus_num; /* PCI Bus number */ - int device; /* Device number on PCI bus */ - int state; /* Adapter OPENED or CLOSED */ - int chipset; /* DC21040, DC21041 or DC21140 */ - s32 irq_mask; /* Interrupt Mask (Enable) bits */ - s32 irq_en; /* Summary interrupt bits */ - int media; /* Media (eg TP), mode (eg 100B)*/ - int c_media; /* Remember the last media conn */ - bool fdx; /* media full duplex flag */ - int linkOK; /* Link is OK */ - int autosense; /* Allow/disallow autosensing */ - bool tx_enable; /* Enable descriptor polling */ - int setup_f; /* Setup frame filtering type */ - int local_state; /* State within a 'media' state */ - struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */ - struct sia_phy sia; /* SIA PHY Information */ - int active; /* Index to active PHY device */ - int mii_cnt; /* Number of attached PHY's */ - int timeout; /* Scheduling counter */ - struct timer_list timer; /* Timer info for kernel */ - int tmp; /* Temporary global per card */ - struct { - u_long lock; /* Lock the cache accesses */ - s32 csr0; /* Saved Bus Mode Register */ - s32 csr6; /* Saved Operating Mode Reg. */ - s32 csr7; /* Saved IRQ Mask Register */ - s32 gep; /* Saved General Purpose Reg. */ - s32 gepc; /* Control info for GEP */ - s32 csr13; /* Saved SIA Connectivity Reg. */ - s32 csr14; /* Saved SIA TX/RX Register */ - s32 csr15; /* Saved SIA General Register */ - int save_cnt; /* Flag if state already saved */ - struct sk_buff_head queue; /* Save the (re-ordered) skb's */ - } cache; - struct de4x5_srom srom; /* A copy of the SROM */ - int cfrv; /* Card CFRV copy */ - int rx_ovf; /* Check for 'RX overflow' tag */ - bool useSROM; /* For non-DEC card use SROM */ - bool useMII; /* Infoblock using the MII */ - int asBitValid; /* Autosense bits in GEP? */ - int asPolarity; /* 0 => asserted high */ - int asBit; /* Autosense bit number in GEP */ - int defMedium; /* SROM default medium */ - int tcount; /* Last infoblock number */ - int infoblock_init; /* Initialised this infoblock? */ - int infoleaf_offset; /* SROM infoleaf for controller */ - s32 infoblock_csr6; /* csr6 value in SROM infoblock */ - int infoblock_media; /* infoblock media */ - int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */ - u_char *rst; /* Pointer to Type 5 reset info */ - u_char ibn; /* Infoblock number */ - struct parameters params; /* Command line/ #defined params */ - struct device *gendev; /* Generic device */ - dma_addr_t dma_rings; /* DMA handle for rings */ - int dma_size; /* Size of the DMA area */ - char *rx_bufs; /* rx bufs on alpha, sparc, ... */ -}; - -/* -** To get around certain poxy cards that don't provide an SROM -** for the second and more DECchip, I have to key off the first -** chip's address. I'll assume there's not a bad SROM iff: -** -** o the chipset is the same -** o the bus number is the same and > 0 -** o the sum of all the returned hw address bytes is 0 or 0x5fa -** -** Also have to save the irq for those cards whose hardware designers -** can't follow the PCI to PCI Bridge Architecture spec. -*/ -static struct { - int chipset; - int bus; - int irq; - u_char addr[ETH_ALEN]; -} last = {0,}; - -/* -** The transmit ring full condition is described by the tx_old and tx_new -** pointers by: -** tx_old = tx_new Empty ring -** tx_old = tx_new+1 Full ring -** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition) -*/ -#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ - lp->tx_old+lp->txRingSize-lp->tx_new-1:\ - lp->tx_old -lp->tx_new-1) - -#define TX_PKT_PENDING (lp->tx_old != lp->tx_new) - -/* -** Public Functions -*/ -static int de4x5_open(struct net_device *dev); -static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t de4x5_interrupt(int irq, void *dev_id); -static int de4x5_close(struct net_device *dev); -static struct net_device_stats *de4x5_get_stats(struct net_device *dev); -static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len); -static void set_multicast_list(struct net_device *dev); -static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, - void __user *data, int cmd); - -/* -** Private functions -*/ -static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev); -static int de4x5_init(struct net_device *dev); -static int de4x5_sw_reset(struct net_device *dev); -static int de4x5_rx(struct net_device *dev); -static int de4x5_tx(struct net_device *dev); -static void de4x5_ast(struct timer_list *t); -static int de4x5_txur(struct net_device *dev); -static int de4x5_rx_ovfc(struct net_device *dev); - -static int autoconf_media(struct net_device *dev); -static void create_packet(struct net_device *dev, char *frame, int len); -static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb); -static int dc21040_autoconf(struct net_device *dev); -static int dc21041_autoconf(struct net_device *dev); -static int dc21140m_autoconf(struct net_device *dev); -static int dc2114x_autoconf(struct net_device *dev); -static int srom_autoconf(struct net_device *dev); -static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *)); -static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int)); -static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec); -static int test_for_100Mb(struct net_device *dev, int msec); -static int wait_for_link(struct net_device *dev); -static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec); -static int is_spd_100(struct net_device *dev); -static int is_100_up(struct net_device *dev); -static int is_10_up(struct net_device *dev); -static int is_anc_capable(struct net_device *dev); -static int ping_media(struct net_device *dev, int msec); -static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len); -static void de4x5_free_rx_buffs(struct net_device *dev); -static void de4x5_free_tx_buffs(struct net_device *dev); -static void de4x5_save_skbs(struct net_device *dev); -static void de4x5_rst_desc_ring(struct net_device *dev); -static void de4x5_cache_state(struct net_device *dev, int flag); -static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb); -static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb); -static struct sk_buff *de4x5_get_cache(struct net_device *dev); -static void de4x5_setup_intr(struct net_device *dev); -static void de4x5_init_connection(struct net_device *dev); -static int de4x5_reset_phy(struct net_device *dev); -static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr); -static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec); -static int test_tp(struct net_device *dev, s32 msec); -static int EISA_signature(char *name, struct device *device); -static void PCI_signature(char *name, struct de4x5_private *lp); -static void DevicePresent(struct net_device *dev, u_long iobase); -static void enet_addr_rst(u_long aprom_addr); -static int de4x5_bad_srom(struct de4x5_private *lp); -static short srom_rd(u_long address, u_char offset); -static void srom_latch(u_int command, u_long address); -static void srom_command(u_int command, u_long address); -static void srom_address(u_int command, u_long address, u_char offset); -static short srom_data(u_int command, u_long address); -/*static void srom_busy(u_int command, u_long address);*/ -static void sendto_srom(u_int command, u_long addr); -static int getfrom_srom(u_long addr); -static int srom_map_media(struct net_device *dev); -static int srom_infoleaf_info(struct net_device *dev); -static void srom_init(struct net_device *dev); -static void srom_exec(struct net_device *dev, u_char *p); -static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr); -static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr); -static int mii_rdata(u_long ioaddr); -static void mii_wdata(int data, int len, u_long ioaddr); -static void mii_ta(u_long rw, u_long ioaddr); -static int mii_swap(int data, int len); -static void mii_address(u_char addr, u_long ioaddr); -static void sendto_mii(u32 command, int data, u_long ioaddr); -static int getfrom_mii(u32 command, u_long ioaddr); -static int mii_get_oui(u_char phyaddr, u_long ioaddr); -static int mii_get_phy(struct net_device *dev); -static void SetMulticastFilter(struct net_device *dev); -static int get_hw_addr(struct net_device *dev); -static void srom_repair(struct net_device *dev, int card); -static int test_bad_enet(struct net_device *dev, int status); -static int an_exception(struct de4x5_private *lp); -static char *build_setup_frame(struct net_device *dev, int mode); -static void disable_ast(struct net_device *dev); -static long de4x5_switch_mac_port(struct net_device *dev); -static int gep_rd(struct net_device *dev); -static void gep_wr(s32 data, struct net_device *dev); -static void yawn(struct net_device *dev, int state); -static void de4x5_parse_params(struct net_device *dev); -static void de4x5_dbg_open(struct net_device *dev); -static void de4x5_dbg_mii(struct net_device *dev, int k); -static void de4x5_dbg_media(struct net_device *dev); -static void de4x5_dbg_srom(struct de4x5_srom *p); -static void de4x5_dbg_rx(struct sk_buff *skb, int len); -static int dc21041_infoleaf(struct net_device *dev); -static int dc21140_infoleaf(struct net_device *dev); -static int dc21142_infoleaf(struct net_device *dev); -static int dc21143_infoleaf(struct net_device *dev); -static int type0_infoblock(struct net_device *dev, u_char count, u_char *p); -static int type1_infoblock(struct net_device *dev, u_char count, u_char *p); -static int type2_infoblock(struct net_device *dev, u_char count, u_char *p); -static int type3_infoblock(struct net_device *dev, u_char count, u_char *p); -static int type4_infoblock(struct net_device *dev, u_char count, u_char *p); -static int type5_infoblock(struct net_device *dev, u_char count, u_char *p); -static int compact_infoblock(struct net_device *dev, u_char count, u_char *p); - -/* -** Note now that module autoprobing is allowed under EISA and PCI. The -** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes -** to "do the right thing". -*/ - -static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */ - -module_param_hw(io, int, ioport, 0); -module_param(de4x5_debug, int, 0); -module_param(dec_only, int, 0); -module_param(args, charp, 0); - -MODULE_PARM_DESC(io, "de4x5 I/O base address"); -MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask"); -MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)"); -MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details"); -MODULE_LICENSE("GPL"); - -/* -** List the SROM infoleaf functions and chipsets -*/ -struct InfoLeaf { - int chipset; - int (*fn)(struct net_device *); -}; -static struct InfoLeaf infoleaf_array[] = { - {DC21041, dc21041_infoleaf}, - {DC21140, dc21140_infoleaf}, - {DC21142, dc21142_infoleaf}, - {DC21143, dc21143_infoleaf} -}; -#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array) - -/* -** List the SROM info block functions -*/ -static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = { - type0_infoblock, - type1_infoblock, - type2_infoblock, - type3_infoblock, - type4_infoblock, - type5_infoblock, - compact_infoblock -}; - -#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1) - -/* -** Miscellaneous defines... -*/ -#define RESET_DE4X5 {\ - int i;\ - i=inl(DE4X5_BMR);\ - mdelay(1);\ - outl(i | BMR_SWR, DE4X5_BMR);\ - mdelay(1);\ - outl(i, DE4X5_BMR);\ - mdelay(1);\ - for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\ - mdelay(1);\ -} - -#define PHY_HARD_RESET {\ - outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\ - mdelay(1); /* Assert for 1ms */\ - outl(0x00, DE4X5_GEP);\ - mdelay(2); /* Wait for 2ms */\ -} - -static const struct net_device_ops de4x5_netdev_ops = { - .ndo_open = de4x5_open, - .ndo_stop = de4x5_close, - .ndo_start_xmit = de4x5_queue_pkt, - .ndo_get_stats = de4x5_get_stats, - .ndo_set_rx_mode = set_multicast_list, - .ndo_siocdevprivate = de4x5_siocdevprivate, - .ndo_set_mac_address= eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - - -static int -de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) -{ - char name[DE4X5_NAME_LENGTH + 1]; - struct de4x5_private *lp = netdev_priv(dev); - struct pci_dev *pdev = NULL; - int i, status=0; - - dev_set_drvdata(gendev, dev); - - /* Ensure we're not sleeping */ - if (lp->bus == EISA) { - outb(WAKEUP, PCI_CFPM); - } else { - pdev = to_pci_dev (gendev); - pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP); - } - mdelay(10); - - RESET_DE4X5; - - if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) { - return -ENXIO; /* Hardware could not reset */ - } - - /* - ** Now find out what kind of DC21040/DC21041/DC21140 board we have. - */ - lp->useSROM = false; - if (lp->bus == PCI) { - PCI_signature(name, lp); - } else { - EISA_signature(name, gendev); - } - - if (*name == '\0') { /* Not found a board signature */ - return -ENXIO; - } - - dev->base_addr = iobase; - printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase); - - status = get_hw_addr(dev); - printk(", h/w address %pM\n", dev->dev_addr); - - if (status != 0) { - printk(" which has an Ethernet PROM CRC error.\n"); - return -ENXIO; - } else { - skb_queue_head_init(&lp->cache.queue); - lp->cache.gepc = GEP_INIT; - lp->asBit = GEP_SLNK; - lp->asPolarity = GEP_SLNK; - lp->asBitValid = ~0; - lp->timeout = -1; - lp->gendev = gendev; - spin_lock_init(&lp->lock); - timer_setup(&lp->timer, de4x5_ast, 0); - de4x5_parse_params(dev); - - /* - ** Choose correct autosensing in case someone messed up - */ - lp->autosense = lp->params.autosense; - if (lp->chipset != DC21140) { - if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) { - lp->params.autosense = TP; - } - if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) { - lp->params.autosense = BNC; - } - } - lp->fdx = lp->params.fdx; - sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev)); - - lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); -#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY) - lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; -#endif - lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, - &lp->dma_rings, GFP_ATOMIC); - if (lp->rx_ring == NULL) { - return -ENOMEM; - } - - lp->tx_ring = lp->rx_ring + NUM_RX_DESC; - - /* - ** Set up the RX descriptor ring (Intels) - ** Allocate contiguous receive buffers, long word aligned (Alphas) - */ -#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) - for (i=0; irx_ring[i].status = 0; - lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); - lp->rx_ring[i].buf = 0; - lp->rx_ring[i].next = 0; - lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */ - } - -#else - { - dma_addr_t dma_rx_bufs; - - dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC) - * sizeof(struct de4x5_desc); - dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN; - lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC - + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN); - for (i=0; irx_ring[i].status = 0; - lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); - lp->rx_ring[i].buf = - cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ); - lp->rx_ring[i].next = 0; - lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */ - } - - } -#endif - - barrier(); - - lp->rxRingSize = NUM_RX_DESC; - lp->txRingSize = NUM_TX_DESC; - - /* Write the end of list marker to the descriptor lists */ - lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER); - lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER); - - /* Tell the adapter where the TX/RX rings are located. */ - outl(lp->dma_rings, DE4X5_RRBA); - outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), - DE4X5_TRBA); - - /* Initialise the IRQ mask and Enable/Disable */ - lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM; - lp->irq_en = IMR_NIM | IMR_AIM; - - /* Create a loopback packet frame for later media probing */ - create_packet(dev, lp->frame, sizeof(lp->frame)); - - /* Check if the RX overflow bug needs testing for */ - i = lp->cfrv & 0x000000fe; - if ((lp->chipset == DC21140) && (i == 0x20)) { - lp->rx_ovf = 1; - } - - /* Initialise the SROM pointers if possible */ - if (lp->useSROM) { - lp->state = INITIALISED; - if (srom_infoleaf_info(dev)) { - dma_free_coherent (gendev, lp->dma_size, - lp->rx_ring, lp->dma_rings); - return -ENXIO; - } - srom_init(dev); - } - - lp->state = CLOSED; - - /* - ** Check for an MII interface - */ - if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) { - mii_get_phy(dev); - } - - printk(" and requires IRQ%d (provided by %s).\n", dev->irq, - ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); - } - - if (de4x5_debug & DEBUG_VERSION) { - printk(version); - } - - /* The DE4X5-specific entries in the device structure. */ - SET_NETDEV_DEV(dev, gendev); - dev->netdev_ops = &de4x5_netdev_ops; - dev->mem_start = 0; - - /* Fill in the generic fields of the device structure. */ - if ((status = register_netdev (dev))) { - dma_free_coherent (gendev, lp->dma_size, - lp->rx_ring, lp->dma_rings); - return status; - } - - /* Let the adapter sleep to save power */ - yawn(dev, SLEEP); - - return status; -} - - -static int -de4x5_open(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int i, status = 0; - s32 omr; - - /* Allocate the RX buffers */ - for (i=0; irxRingSize; i++) { - if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) { - de4x5_free_rx_buffs(dev); - return -EAGAIN; - } - } - - /* - ** Wake up the adapter - */ - yawn(dev, WAKEUP); - - /* - ** Re-initialize the DE4X5... - */ - status = de4x5_init(dev); - spin_lock_init(&lp->lock); - lp->state = OPEN; - de4x5_dbg_open(dev); - - if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, - lp->adapter_name, dev)) { - printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq); - if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, - lp->adapter_name, dev)) { - printk("\n Cannot get IRQ- reconfigure your hardware.\n"); - disable_ast(dev); - de4x5_free_rx_buffs(dev); - de4x5_free_tx_buffs(dev); - yawn(dev, SLEEP); - lp->state = CLOSED; - return -EAGAIN; - } else { - printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n"); - printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n"); - } - } - - lp->interrupt = UNMASK_INTERRUPTS; - netif_trans_update(dev); /* prevent tx timeout */ - - START_DE4X5; - - de4x5_setup_intr(dev); - - if (de4x5_debug & DEBUG_OPEN) { - printk("\tsts: 0x%08x\n", inl(DE4X5_STS)); - printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR)); - printk("\timr: 0x%08x\n", inl(DE4X5_IMR)); - printk("\tomr: 0x%08x\n", inl(DE4X5_OMR)); - printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR)); - printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR)); - printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR)); - printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR)); - } - - return status; -} - -/* -** Initialize the DE4X5 operating conditions. NB: a chip problem with the -** DC21140 requires using perfect filtering mode for that chip. Since I can't -** see why I'd want > 14 multicast addresses, I have changed all chips to use -** the perfect filtering mode. Keep the DMA burst length at 8: there seems -** to be data corruption problems if it is larger (UDP errors seen from a -** ttcp source). -*/ -static int -de4x5_init(struct net_device *dev) -{ - /* Lock out other processes whilst setting up the hardware */ - netif_stop_queue(dev); - - de4x5_sw_reset(dev); - - /* Autoconfigure the connected port */ - autoconf_media(dev); - - return 0; -} - -static int -de4x5_sw_reset(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int i, j, status = 0; - s32 bmr, omr; - - /* Select the MII or SRL port now and RESET the MAC */ - if (!lp->useSROM) { - if (lp->phy[lp->active].id != 0) { - lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD; - } else { - lp->infoblock_csr6 = OMR_SDP | OMR_TTM; - } - de4x5_switch_mac_port(dev); - } - - /* - ** Set the programmable burst length to 8 longwords for all the DC21140 - ** Fasternet chips and 4 longwords for all others: DMA errors result - ** without these values. Cache align 16 long. - */ - bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN; - bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0); - outl(bmr, DE4X5_BMR); - - omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */ - if (lp->chipset == DC21140) { - omr |= (OMR_SDP | OMR_SB); - } - lp->setup_f = PERFECT; - outl(lp->dma_rings, DE4X5_RRBA); - outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), - DE4X5_TRBA); - - lp->rx_new = lp->rx_old = 0; - lp->tx_new = lp->tx_old = 0; - - for (i = 0; i < lp->rxRingSize; i++) { - lp->rx_ring[i].status = cpu_to_le32(R_OWN); - } - - for (i = 0; i < lp->txRingSize; i++) { - lp->tx_ring[i].status = cpu_to_le32(0); - } - - barrier(); - - /* Build the setup frame depending on filtering mode */ - SetMulticastFilter(dev); - - load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1); - outl(omr|OMR_ST, DE4X5_OMR); - - /* Poll for setup frame completion (adapter interrupts are disabled now) */ - - for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */ - mdelay(1); - if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1; - } - outl(omr, DE4X5_OMR); /* Stop everything! */ - - if (j == 0) { - printk("%s: Setup frame timed out, status %08x\n", dev->name, - inl(DE4X5_STS)); - status = -EIO; - } - - lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; - lp->tx_old = lp->tx_new; - - return status; -} - -/* -** Writes a socket buffer address to the next available transmit descriptor. -*/ -static netdev_tx_t -de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - u_long flags = 0; - - netif_stop_queue(dev); - if (!lp->tx_enable) /* Cannot send for now */ - goto tx_err; - - /* - ** Clean out the TX ring asynchronously to interrupts - sometimes the - ** interrupts are lost by delayed descriptor status updates relative to - ** the irq assertion, especially with a busy PCI bus. - */ - spin_lock_irqsave(&lp->lock, flags); - de4x5_tx(dev); - spin_unlock_irqrestore(&lp->lock, flags); - - /* Test if cache is already locked - requeue skb if so */ - if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) - goto tx_err; - - /* Transmit descriptor ring full or stale skb */ - if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) { - if (lp->interrupt) { - de4x5_putb_cache(dev, skb); /* Requeue the buffer */ - } else { - de4x5_put_cache(dev, skb); - } - if (de4x5_debug & DEBUG_TX) { - printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO"); - } - } else if (skb->len > 0) { - /* If we already have stuff queued locally, use that first */ - if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) { - de4x5_put_cache(dev, skb); - skb = de4x5_get_cache(dev); - } - - while (skb && !netif_queue_stopped(dev) && - (u_long) lp->tx_skb[lp->tx_new] <= 1) { - spin_lock_irqsave(&lp->lock, flags); - netif_stop_queue(dev); - load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); - lp->stats.tx_bytes += skb->len; - outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ - - lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; - - if (TX_BUFFS_AVAIL) { - netif_start_queue(dev); /* Another pkt may be queued */ - } - skb = de4x5_get_cache(dev); - spin_unlock_irqrestore(&lp->lock, flags); - } - if (skb) de4x5_putb_cache(dev, skb); - } - - lp->cache.lock = 0; - - return NETDEV_TX_OK; -tx_err: - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -/* -** The DE4X5 interrupt handler. -** -** I/O Read/Writes through intermediate PCI bridges are never 'posted', -** so that the asserted interrupt always has some real data to work with - -** if these I/O accesses are ever changed to memory accesses, ensure the -** STS write is read immediately to complete the transaction if the adapter -** is not on bus 0. Lost interrupts can still occur when the PCI bus load -** is high and descriptor status bits cannot be set before the associated -** interrupt is asserted and this routine entered. -*/ -static irqreturn_t -de4x5_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct de4x5_private *lp; - s32 imr, omr, sts, limit; - u_long iobase; - unsigned int handled = 0; - - lp = netdev_priv(dev); - spin_lock(&lp->lock); - iobase = dev->base_addr; - - DISABLE_IRQs; /* Ensure non re-entrancy */ - - if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt)) - printk("%s: Re-entering the interrupt handler.\n", dev->name); - - synchronize_irq(dev->irq); - - for (limit=0; limit<8; limit++) { - sts = inl(DE4X5_STS); /* Read IRQ status */ - outl(sts, DE4X5_STS); /* Reset the board interrupts */ - - if (!(sts & lp->irq_mask)) break;/* All done */ - handled = 1; - - if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */ - de4x5_rx(dev); - - if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */ - de4x5_tx(dev); - - if (sts & STS_LNF) { /* TP Link has failed */ - lp->irq_mask &= ~IMR_LFM; - } - - if (sts & STS_UNF) { /* Transmit underrun */ - de4x5_txur(dev); - } - - if (sts & STS_SE) { /* Bus Error */ - STOP_DE4X5; - printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n", - dev->name, sts); - spin_unlock(&lp->lock); - return IRQ_HANDLED; - } - } - - /* Load the TX ring with any locally stored packets */ - if (!test_and_set_bit(0, (void *)&lp->cache.lock)) { - while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) { - de4x5_queue_pkt(de4x5_get_cache(dev), dev); - } - lp->cache.lock = 0; - } - - lp->interrupt = UNMASK_INTERRUPTS; - ENABLE_IRQs; - spin_unlock(&lp->lock); - - return IRQ_RETVAL(handled); -} - -static int -de4x5_rx(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int entry; - s32 status; - - for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0; - entry=lp->rx_new) { - status = (s32)le32_to_cpu(lp->rx_ring[entry].status); - - if (lp->rx_ovf) { - if (inl(DE4X5_MFC) & MFC_FOCM) { - de4x5_rx_ovfc(dev); - break; - } - } - - if (status & RD_FS) { /* Remember the start of frame */ - lp->rx_old = entry; - } - - if (status & RD_LS) { /* Valid frame status */ - if (lp->tx_enable) lp->linkOK++; - if (status & RD_ES) { /* There was an error. */ - lp->stats.rx_errors++; /* Update the error stats. */ - if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++; - if (status & RD_CE) lp->stats.rx_crc_errors++; - if (status & RD_OF) lp->stats.rx_fifo_errors++; - if (status & RD_TL) lp->stats.rx_length_errors++; - if (status & RD_RF) lp->pktStats.rx_runt_frames++; - if (status & RD_CS) lp->pktStats.rx_collision++; - if (status & RD_DB) lp->pktStats.rx_dribble++; - if (status & RD_OF) lp->pktStats.rx_overflow++; - } else { /* A valid frame received */ - struct sk_buff *skb; - short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status) - >> 16) - 4; - - if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) { - printk("%s: Insufficient memory; nuking packet.\n", - dev->name); - lp->stats.rx_dropped++; - } else { - de4x5_dbg_rx(skb, pkt_len); - - /* Push up the protocol stack */ - skb->protocol=eth_type_trans(skb,dev); - de4x5_local_stats(dev, skb->data, pkt_len); - netif_rx(skb); - - /* Update stats */ - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; - } - } - - /* Change buffer ownership for this frame, back to the adapter */ - for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) { - lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN); - barrier(); - } - lp->rx_ring[entry].status = cpu_to_le32(R_OWN); - barrier(); - } - - /* - ** Update entry information - */ - lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize; - } - - return 0; -} - -static inline void -de4x5_free_tx_buff(struct de4x5_private *lp, int entry) -{ - dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf), - le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1, - DMA_TO_DEVICE); - if ((u_long) lp->tx_skb[entry] > 1) - dev_kfree_skb_irq(lp->tx_skb[entry]); - lp->tx_skb[entry] = NULL; -} - -/* -** Buffer sent - check for TX buffer errors. -*/ -static int -de4x5_tx(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int entry; - s32 status; - - for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { - status = (s32)le32_to_cpu(lp->tx_ring[entry].status); - if (status < 0) { /* Buffer not sent yet */ - break; - } else if (status != 0x7fffffff) { /* Not setup frame */ - if (status & TD_ES) { /* An error happened */ - lp->stats.tx_errors++; - if (status & TD_NC) lp->stats.tx_carrier_errors++; - if (status & TD_LC) lp->stats.tx_window_errors++; - if (status & TD_UF) lp->stats.tx_fifo_errors++; - if (status & TD_EC) lp->pktStats.excessive_collisions++; - if (status & TD_DE) lp->stats.tx_aborted_errors++; - - if (TX_PKT_PENDING) { - outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */ - } - } else { /* Packet sent */ - lp->stats.tx_packets++; - if (lp->tx_enable) lp->linkOK++; - } - /* Update the collision counter */ - lp->stats.collisions += ((status & TD_EC) ? 16 : - ((status & TD_CC) >> 3)); - - /* Free the buffer. */ - if (lp->tx_skb[entry] != NULL) - de4x5_free_tx_buff(lp, entry); - } - - /* Update all the pointers */ - lp->tx_old = (lp->tx_old + 1) % lp->txRingSize; - } - - /* Any resources available? */ - if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) { - if (lp->interrupt) - netif_wake_queue(dev); - else - netif_start_queue(dev); - } - - return 0; -} - -static void -de4x5_ast(struct timer_list *t) -{ - struct de4x5_private *lp = from_timer(lp, t, timer); - struct net_device *dev = dev_get_drvdata(lp->gendev); - int next_tick = DE4X5_AUTOSENSE_MS; - int dt; - - if (lp->useSROM) - next_tick = srom_autoconf(dev); - else if (lp->chipset == DC21140) - next_tick = dc21140m_autoconf(dev); - else if (lp->chipset == DC21041) - next_tick = dc21041_autoconf(dev); - else if (lp->chipset == DC21040) - next_tick = dc21040_autoconf(dev); - lp->linkOK = 0; - - dt = (next_tick * HZ) / 1000; - - if (!dt) - dt = 1; - - mod_timer(&lp->timer, jiffies + dt); -} - -static int -de4x5_txur(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int omr; - - omr = inl(DE4X5_OMR); - if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) { - omr &= ~(OMR_ST|OMR_SR); - outl(omr, DE4X5_OMR); - while (inl(DE4X5_STS) & STS_TS); - if ((omr & OMR_TR) < OMR_TR) { - omr += 0x4000; - } else { - omr |= OMR_SF; - } - outl(omr | OMR_ST | OMR_SR, DE4X5_OMR); - } - - return 0; -} - -static int -de4x5_rx_ovfc(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int omr; - - omr = inl(DE4X5_OMR); - outl(omr & ~OMR_SR, DE4X5_OMR); - while (inl(DE4X5_STS) & STS_RS); - - for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) { - lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN); - lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize; - } - - outl(omr, DE4X5_OMR); - - return 0; -} - -static int -de4x5_close(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - s32 imr, omr; - - disable_ast(dev); - - netif_stop_queue(dev); - - if (de4x5_debug & DEBUG_CLOSE) { - printk("%s: Shutting down ethercard, status was %8.8x.\n", - dev->name, inl(DE4X5_STS)); - } - - /* - ** We stop the DE4X5 here... mask interrupts and stop TX & RX - */ - DISABLE_IRQs; - STOP_DE4X5; - - /* Free the associated irq */ - free_irq(dev->irq, dev); - lp->state = CLOSED; - - /* Free any socket buffers */ - de4x5_free_rx_buffs(dev); - de4x5_free_tx_buffs(dev); - - /* Put the adapter to sleep to save power */ - yawn(dev, SLEEP); - - return 0; -} - -static struct net_device_stats * -de4x5_get_stats(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR)); - - return &lp->stats; -} - -static void -de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len) -{ - struct de4x5_private *lp = netdev_priv(dev); - int i; - - for (i=1; ipktStats.bins[i]++; - i = DE4X5_PKT_STAT_SZ; - } - } - if (is_multicast_ether_addr(buf)) { - if (is_broadcast_ether_addr(buf)) { - lp->pktStats.broadcast++; - } else { - lp->pktStats.multicast++; - } - } else if (ether_addr_equal(buf, dev->dev_addr)) { - lp->pktStats.unicast++; - } - - lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ - if (lp->pktStats.bins[0] == 0) { /* Reset counters */ - memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats)); - } -} - -/* -** Removes the TD_IC flag from previous descriptor to improve TX performance. -** If the flag is changed on a descriptor that is being read by the hardware, -** I assume PCI transaction ordering will mean you are either successful or -** just miss asserting the change to the hardware. Anyway you're messing with -** a descriptor you don't own, but this shouldn't kill the chip provided -** the descriptor register is read only to the hardware. -*/ -static void -load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb) -{ - struct de4x5_private *lp = netdev_priv(dev); - int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1); - dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE); - - lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma); - lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER); - lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags); - lp->tx_skb[lp->tx_new] = skb; - lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC); - barrier(); - - lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN); - barrier(); -} - -/* -** Set or clear the multicast filter for this adaptor. -*/ -static void -set_multicast_list(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - /* First, double check that the adapter is open */ - if (lp->state == OPEN) { - if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ - u32 omr; - omr = inl(DE4X5_OMR); - omr |= OMR_PR; - outl(omr, DE4X5_OMR); - } else { - SetMulticastFilter(dev); - load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | - SETUP_FRAME_LEN, (struct sk_buff *)1); - - lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; - outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ - netif_trans_update(dev); /* prevent tx timeout */ - } - } -} - -/* -** Calculate the hash code and update the logical address filter -** from a list of ethernet multicast addresses. -** Little endian crc one liner from Matt Thomas, DEC. -*/ -static void -SetMulticastFilter(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - struct netdev_hw_addr *ha; - u_long iobase = dev->base_addr; - int i, bit, byte; - u16 hashcode; - u32 omr, crc; - char *pa; - unsigned char *addrs; - - omr = inl(DE4X5_OMR); - omr &= ~(OMR_PR | OMR_PM); - pa = build_setup_frame(dev, ALL); /* Build the basic frame */ - - if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) { - omr |= OMR_PM; /* Pass all multicasts */ - } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ - netdev_for_each_mc_addr(ha, dev) { - crc = ether_crc_le(ETH_ALEN, ha->addr); - hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */ - - byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ - bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */ - - byte <<= 1; /* calc offset into setup frame */ - if (byte & 0x02) { - byte -= 1; - } - lp->setup_frame[byte] |= bit; - } - } else { /* Perfect filtering */ - netdev_for_each_mc_addr(ha, dev) { - addrs = ha->addr; - for (i=0; ibase_addr; - - if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5")) - return -EBUSY; - - if (!request_region (iobase + DE4X5_EISA_IO_PORTS, - DE4X5_EISA_TOTAL_SIZE, "de4x5")) { - status = -EBUSY; - goto release_reg_1; - } - - if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { - status = -ENOMEM; - goto release_reg_2; - } - lp = netdev_priv(dev); - - cfid = (u32) inl(PCI_CFID); - lp->cfrv = (u_short) inl(PCI_CFRV); - device = (cfid >> 8) & 0x00ffff00; - vendor = (u_short) cfid; - - /* Read the EISA Configuration Registers */ - regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT); -#ifdef CONFIG_ALPHA - /* Looks like the Jensen firmware (rev 2.2) doesn't really - * care about the EISA configuration, and thus doesn't - * configure the PLX bridge properly. Oh well... Simply mimic - * the EISA config file to sort it out. */ - - /* EISA REG1: Assert DecChip 21040 HW Reset */ - outb (ER1_IAM | 1, EISA_REG1); - mdelay (1); - - /* EISA REG1: Deassert DecChip 21040 HW Reset */ - outb (ER1_IAM, EISA_REG1); - mdelay (1); - - /* EISA REG3: R/W Burst Transfer Enable */ - outb (ER3_BWE | ER3_BRE, EISA_REG3); - - /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */ - outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0); -#endif - irq = de4x5_irq[(regval >> 1) & 0x03]; - - if (is_DC2114x) { - device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); - } - lp->chipset = device; - lp->bus = EISA; - - /* Write the PCI Configuration Registers */ - outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS); - outl(0x00006000, PCI_CFLT); - outl(iobase, PCI_CBIO); - - DevicePresent(dev, EISA_APROM); - - dev->irq = irq; - - if (!(status = de4x5_hw_init (dev, iobase, gendev))) { - return 0; - } - - free_netdev (dev); - release_reg_2: - release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); - release_reg_1: - release_region (iobase, DE4X5_EISA_TOTAL_SIZE); - - return status; -} - -static int de4x5_eisa_remove(struct device *device) -{ - struct net_device *dev; - u_long iobase; - - dev = dev_get_drvdata(device); - iobase = dev->base_addr; - - unregister_netdev (dev); - free_netdev (dev); - release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); - release_region (iobase, DE4X5_EISA_TOTAL_SIZE); - - return 0; -} - -static const struct eisa_device_id de4x5_eisa_ids[] = { - { "DEC4250", 0 }, /* 0 is the board name index... */ - { "" } -}; -MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); - -static struct eisa_driver de4x5_eisa_driver = { - .id_table = de4x5_eisa_ids, - .driver = { - .name = "de4x5", - .probe = de4x5_eisa_probe, - .remove = de4x5_eisa_remove, - } -}; -#endif - -#ifdef CONFIG_PCI - -/* -** This function searches the current bus (which is >0) for a DECchip with an -** SROM, so that in multiport cards that have one SROM shared between multiple -** DECchips, we can find the base SROM irrespective of the BIOS scan direction. -** For single port cards this is a time waster... -*/ -static void -srom_search(struct net_device *dev, struct pci_dev *pdev) -{ - u_char pb; - u_short vendor, status; - u_int irq = 0, device; - u_long iobase = 0; /* Clear upper 32 bits in Alphas */ - int i, j; - struct de4x5_private *lp = netdev_priv(dev); - struct pci_dev *this_dev; - - list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) { - vendor = this_dev->vendor; - device = this_dev->device << 8; - if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue; - - /* Get the chip configuration revision register */ - pb = this_dev->bus->number; - - /* Set the device number information */ - lp->device = PCI_SLOT(this_dev->devfn); - lp->bus_num = pb; - - /* Set the chipset information */ - if (is_DC2114x) { - device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK - ? DC21142 : DC21143); - } - lp->chipset = device; - - /* Get the board I/O address (64 bits on sparc64) */ - iobase = pci_resource_start(this_dev, 0); - - /* Fetch the IRQ to be used */ - irq = this_dev->irq; - if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue; - - /* Check if I/O accesses are enabled */ - pci_read_config_word(this_dev, PCI_COMMAND, &status); - if (!(status & PCI_COMMAND_IO)) continue; - - /* Search for a valid SROM attached to this DECchip */ - DevicePresent(dev, DE4X5_APROM); - for (j=0, i=0; isrom + SROM_HWADD + i); - } - if (j != 0 && j != 6 * 0xff) { - last.chipset = device; - last.bus = pb; - last.irq = irq; - for (i=0; isrom + SROM_HWADD + i); - } - return; - } - } -} - -/* -** PCI bus I/O device probe -** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not -** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be -** enabled by the user first in the set up utility. Hence we just check for -** enabled features and silently ignore the card if they're not. -** -** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering -** bit. Here, check for I/O accesses and then set BM. If you put the card in -** a non BM slot, you're on your own (and complain to the PC vendor that your -** PC doesn't conform to the PCI standard)! -** -** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x -** kernels use the V0.535[n] drivers. -*/ - -static int de4x5_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - u_char pb, pbus = 0, dev_num, dnum = 0, timer; - u_short vendor, status; - u_int irq = 0, device; - u_long iobase = 0; /* Clear upper 32 bits in Alphas */ - int error; - struct net_device *dev; - struct de4x5_private *lp; - - dev_num = PCI_SLOT(pdev->devfn); - pb = pdev->bus->number; - - if (io) { /* probe a single PCI device */ - pbus = (u_short)(io >> 8); - dnum = (u_short)(io & 0xff); - if ((pbus != pb) || (dnum != dev_num)) - return -ENODEV; - } - - vendor = pdev->vendor; - device = pdev->device << 8; - if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) - return -ENODEV; - - /* Ok, the device seems to be for us. */ - if ((error = pci_enable_device (pdev))) - return error; - - if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { - error = -ENOMEM; - goto disable_dev; - } - - lp = netdev_priv(dev); - lp->bus = PCI; - lp->bus_num = 0; - - /* Search for an SROM on this bus */ - if (lp->bus_num != pb) { - lp->bus_num = pb; - srom_search(dev, pdev); - } - - /* Get the chip configuration revision register */ - lp->cfrv = pdev->revision; - - /* Set the device number information */ - lp->device = dev_num; - lp->bus_num = pb; - - /* Set the chipset information */ - if (is_DC2114x) { - device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); - } - lp->chipset = device; - - /* Get the board I/O address (64 bits on sparc64) */ - iobase = pci_resource_start(pdev, 0); - - /* Fetch the IRQ to be used */ - irq = pdev->irq; - if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) { - error = -ENODEV; - goto free_dev; - } - - /* Check if I/O accesses and Bus Mastering are enabled */ - pci_read_config_word(pdev, PCI_COMMAND, &status); -#ifdef __powerpc__ - if (!(status & PCI_COMMAND_IO)) { - status |= PCI_COMMAND_IO; - pci_write_config_word(pdev, PCI_COMMAND, status); - pci_read_config_word(pdev, PCI_COMMAND, &status); - } -#endif /* __powerpc__ */ - if (!(status & PCI_COMMAND_IO)) { - error = -ENODEV; - goto free_dev; - } - - if (!(status & PCI_COMMAND_MASTER)) { - status |= PCI_COMMAND_MASTER; - pci_write_config_word(pdev, PCI_COMMAND, status); - pci_read_config_word(pdev, PCI_COMMAND, &status); - } - if (!(status & PCI_COMMAND_MASTER)) { - error = -ENODEV; - goto free_dev; - } - - /* Check the latency timer for values >= 0x60 */ - pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer); - if (timer < 0x60) { - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60); - } - - DevicePresent(dev, DE4X5_APROM); - - if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) { - error = -EBUSY; - goto free_dev; - } - - dev->irq = irq; - - if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) { - goto release; - } - - return 0; - - release: - release_region (iobase, DE4X5_PCI_TOTAL_SIZE); - free_dev: - free_netdev (dev); - disable_dev: - pci_disable_device (pdev); - return error; -} - -static void de4x5_pci_remove(struct pci_dev *pdev) -{ - struct net_device *dev; - u_long iobase; - - dev = pci_get_drvdata(pdev); - iobase = dev->base_addr; - - unregister_netdev (dev); - free_netdev (dev); - release_region (iobase, DE4X5_PCI_TOTAL_SIZE); - pci_disable_device (pdev); -} - -static const struct pci_device_id de4x5_pci_tbl[] = { - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, - { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, - { }, -}; - -static struct pci_driver de4x5_pci_driver = { - .name = "de4x5", - .id_table = de4x5_pci_tbl, - .probe = de4x5_pci_probe, - .remove = de4x5_pci_remove, -}; - -#endif - -/* -** Auto configure the media here rather than setting the port at compile -** time. This routine is called by de4x5_init() and when a loss of media is -** detected (excessive collisions, loss of carrier, no carrier or link fail -** [TP] or no recent receive activity) to check whether the user has been -** sneaky and changed the port on us. -*/ -static int -autoconf_media(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - disable_ast(dev); - - lp->c_media = AUTO; /* Bogus last media */ - inl(DE4X5_MFC); /* Zero the lost frames counter */ - lp->media = INIT; - lp->tcount = 0; - - de4x5_ast(&lp->timer); - - return lp->media; -} - -/* -** Autoconfigure the media when using the DC21040. AUI cannot be distinguished -** from BNC as the port has a jumper to set thick or thin wire. When set for -** BNC, the BNC port will indicate activity if it's not terminated correctly. -** The only way to test for that is to place a loopback packet onto the -** network and watch for errors. Since we're messing with the interrupt mask -** register, disable the board interrupts and do not allow any more packets to -** be queued to the hardware. Re-enable everything only when the media is -** found. -** I may have to "age out" locally queued packets so that the higher layer -** timeouts don't effectively duplicate packets on the network. -*/ -static int -dc21040_autoconf(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int next_tick = DE4X5_AUTOSENSE_MS; - s32 imr; - - switch (lp->media) { - case INIT: - DISABLE_IRQs; - lp->tx_enable = false; - lp->timeout = -1; - de4x5_save_skbs(dev); - if ((lp->autosense == AUTO) || (lp->autosense == TP)) { - lp->media = TP; - } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) { - lp->media = BNC_AUI; - } else if (lp->autosense == EXT_SIA) { - lp->media = EXT_SIA; - } else { - lp->media = NC; - } - lp->local_state = 0; - next_tick = dc21040_autoconf(dev); - break; - - case TP: - next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI, - TP_SUSPECT, test_tp); - break; - - case TP_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf); - break; - - case BNC: - case AUI: - case BNC_AUI: - next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA, - BNC_AUI_SUSPECT, ping_media); - break; - - case BNC_AUI_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf); - break; - - case EXT_SIA: - next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000, - NC, EXT_SIA_SUSPECT, ping_media); - break; - - case EXT_SIA_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf); - break; - - case NC: - /* default to TP for all */ - reset_init_sia(dev, 0x8f01, 0xffff, 0x0000); - if (lp->media != lp->c_media) { - de4x5_dbg_media(dev); - lp->c_media = lp->media; - } - lp->media = INIT; - lp->tx_enable = false; - break; - } - - return next_tick; -} - -static int -dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, - int next_state, int suspect_state, - int (*fn)(struct net_device *, int)) -{ - struct de4x5_private *lp = netdev_priv(dev); - int next_tick = DE4X5_AUTOSENSE_MS; - int linkBad; - - switch (lp->local_state) { - case 0: - reset_init_sia(dev, csr13, csr14, csr15); - lp->local_state++; - next_tick = 500; - break; - - case 1: - if (!lp->tx_enable) { - linkBad = fn(dev, timeout); - if (linkBad < 0) { - next_tick = linkBad & ~TIMER_CB; - } else { - if (linkBad && (lp->autosense == AUTO)) { - lp->local_state = 0; - lp->media = next_state; - } else { - de4x5_init_connection(dev); - } - } - } else if (!lp->linkOK && (lp->autosense == AUTO)) { - lp->media = suspect_state; - next_tick = 3000; - } - break; - } - - return next_tick; -} - -static int -de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, - int (*fn)(struct net_device *, int), - int (*asfn)(struct net_device *)) -{ - struct de4x5_private *lp = netdev_priv(dev); - int next_tick = DE4X5_AUTOSENSE_MS; - int linkBad; - - switch (lp->local_state) { - case 1: - if (lp->linkOK) { - lp->media = prev_state; - } else { - lp->local_state++; - next_tick = asfn(dev); - } - break; - - case 2: - linkBad = fn(dev, timeout); - if (linkBad < 0) { - next_tick = linkBad & ~TIMER_CB; - } else if (!linkBad) { - lp->local_state--; - lp->media = prev_state; - } else { - lp->media = INIT; - lp->tcount++; - } - } - - return next_tick; -} - -/* -** Autoconfigure the media when using the DC21041. AUI needs to be tested -** before BNC, because the BNC port will indicate activity if it's not -** terminated correctly. The only way to test for that is to place a loopback -** packet onto the network and watch for errors. Since we're messing with -** the interrupt mask register, disable the board interrupts and do not allow -** any more packets to be queued to the hardware. Re-enable everything only -** when the media is found. -*/ -static int -dc21041_autoconf(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - s32 sts, irqs, irq_mask, imr, omr; - int next_tick = DE4X5_AUTOSENSE_MS; - - switch (lp->media) { - case INIT: - DISABLE_IRQs; - lp->tx_enable = false; - lp->timeout = -1; - de4x5_save_skbs(dev); /* Save non transmitted skb's */ - if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) { - lp->media = TP; /* On chip auto negotiation is broken */ - } else if (lp->autosense == TP) { - lp->media = TP; - } else if (lp->autosense == BNC) { - lp->media = BNC; - } else if (lp->autosense == AUI) { - lp->media = AUI; - } else { - lp->media = NC; - } - lp->local_state = 0; - next_tick = dc21041_autoconf(dev); - break; - - case TP_NW: - if (lp->timeout < 0) { - omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */ - outl(omr | OMR_FDX, DE4X5_OMR); - } - irqs = STS_LNF | STS_LNP; - irq_mask = IMR_LFM | IMR_LPM; - sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400); - if (sts < 0) { - next_tick = sts & ~TIMER_CB; - } else { - if (sts & STS_LNP) { - lp->media = ANS; - } else { - lp->media = AUI; - } - next_tick = dc21041_autoconf(dev); - } - break; - - case ANS: - if (!lp->tx_enable) { - irqs = STS_LNP; - irq_mask = IMR_LPM; - sts = test_ans(dev, irqs, irq_mask, 3000); - if (sts < 0) { - next_tick = sts & ~TIMER_CB; - } else { - if (!(sts & STS_LNP) && (lp->autosense == AUTO)) { - lp->media = TP; - next_tick = dc21041_autoconf(dev); - } else { - lp->local_state = 1; - de4x5_init_connection(dev); - } - } - } else if (!lp->linkOK && (lp->autosense == AUTO)) { - lp->media = ANS_SUSPECT; - next_tick = 3000; - } - break; - - case ANS_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf); - break; - - case TP: - if (!lp->tx_enable) { - if (lp->timeout < 0) { - omr = inl(DE4X5_OMR); /* Set up half duplex for TP */ - outl(omr & ~OMR_FDX, DE4X5_OMR); - } - irqs = STS_LNF | STS_LNP; - irq_mask = IMR_LFM | IMR_LPM; - sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400); - if (sts < 0) { - next_tick = sts & ~TIMER_CB; - } else { - if (!(sts & STS_LNP) && (lp->autosense == AUTO)) { - if (inl(DE4X5_SISR) & SISR_NRA) { - lp->media = AUI; /* Non selected port activity */ - } else { - lp->media = BNC; - } - next_tick = dc21041_autoconf(dev); - } else { - lp->local_state = 1; - de4x5_init_connection(dev); - } - } - } else if (!lp->linkOK && (lp->autosense == AUTO)) { - lp->media = TP_SUSPECT; - next_tick = 3000; - } - break; - - case TP_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf); - break; - - case AUI: - if (!lp->tx_enable) { - if (lp->timeout < 0) { - omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */ - outl(omr & ~OMR_FDX, DE4X5_OMR); - } - irqs = 0; - irq_mask = 0; - sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000); - if (sts < 0) { - next_tick = sts & ~TIMER_CB; - } else { - if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) { - lp->media = BNC; - next_tick = dc21041_autoconf(dev); - } else { - lp->local_state = 1; - de4x5_init_connection(dev); - } - } - } else if (!lp->linkOK && (lp->autosense == AUTO)) { - lp->media = AUI_SUSPECT; - next_tick = 3000; - } - break; - - case AUI_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf); - break; - - case BNC: - switch (lp->local_state) { - case 0: - if (lp->timeout < 0) { - omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */ - outl(omr & ~OMR_FDX, DE4X5_OMR); - } - irqs = 0; - irq_mask = 0; - sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000); - if (sts < 0) { - next_tick = sts & ~TIMER_CB; - } else { - lp->local_state++; /* Ensure media connected */ - next_tick = dc21041_autoconf(dev); - } - break; - - case 1: - if (!lp->tx_enable) { - if ((sts = ping_media(dev, 3000)) < 0) { - next_tick = sts & ~TIMER_CB; - } else { - if (sts) { - lp->local_state = 0; - lp->media = NC; - } else { - de4x5_init_connection(dev); - } - } - } else if (!lp->linkOK && (lp->autosense == AUTO)) { - lp->media = BNC_SUSPECT; - next_tick = 3000; - } - break; - } - break; - - case BNC_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf); - break; - - case NC: - omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */ - outl(omr | OMR_FDX, DE4X5_OMR); - reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */ - if (lp->media != lp->c_media) { - de4x5_dbg_media(dev); - lp->c_media = lp->media; - } - lp->media = INIT; - lp->tx_enable = false; - break; - } - - return next_tick; -} - -/* -** Some autonegotiation chips are broken in that they do not return the -** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement -** register, except at the first power up negotiation. -*/ -static int -dc21140m_autoconf(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - int ana, anlpa, cap, cr, slnk, sr; - int next_tick = DE4X5_AUTOSENSE_MS; - u_long imr, omr, iobase = dev->base_addr; - - switch(lp->media) { - case INIT: - if (lp->timeout < 0) { - DISABLE_IRQs; - lp->tx_enable = false; - lp->linkOK = 0; - de4x5_save_skbs(dev); /* Save non transmitted skb's */ - } - if ((next_tick = de4x5_reset_phy(dev)) < 0) { - next_tick &= ~TIMER_CB; - } else { - if (lp->useSROM) { - if (srom_map_media(dev) < 0) { - lp->tcount++; - return next_tick; - } - srom_exec(dev, lp->phy[lp->active].gep); - if (lp->infoblock_media == ANS) { - ana = lp->phy[lp->active].ana | MII_ANA_CSMA; - mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); - } - } else { - lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */ - SET_10Mb; - if (lp->autosense == _100Mb) { - lp->media = _100Mb; - } else if (lp->autosense == _10Mb) { - lp->media = _10Mb; - } else if ((lp->autosense == AUTO) && - ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { - ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); - ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); - mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); - lp->media = ANS; - } else if (lp->autosense == AUTO) { - lp->media = SPD_DET; - } else if (is_spd_100(dev) && is_100_up(dev)) { - lp->media = _100Mb; - } else { - lp->media = NC; - } - } - lp->local_state = 0; - next_tick = dc21140m_autoconf(dev); - } - break; - - case ANS: - switch (lp->local_state) { - case 0: - if (lp->timeout < 0) { - mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); - } - cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500); - if (cr < 0) { - next_tick = cr & ~TIMER_CB; - } else { - if (cr) { - lp->local_state = 0; - lp->media = SPD_DET; - } else { - lp->local_state++; - } - next_tick = dc21140m_autoconf(dev); - } - break; - - case 1: - if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) { - next_tick = sr & ~TIMER_CB; - } else { - lp->media = SPD_DET; - lp->local_state = 0; - if (sr) { /* Success! */ - lp->tmp = MII_SR_ASSC; - anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); - ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); - if (!(anlpa & MII_ANLPA_RF) && - (cap = anlpa & MII_ANLPA_TAF & ana)) { - if (cap & MII_ANA_100M) { - lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0; - lp->media = _100Mb; - } else if (cap & MII_ANA_10M) { - lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0; - - lp->media = _10Mb; - } - } - } /* Auto Negotiation failed to finish */ - next_tick = dc21140m_autoconf(dev); - } /* Auto Negotiation failed to start */ - break; - } - break; - - case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ - if (lp->timeout < 0) { - lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS : - (~gep_rd(dev) & GEP_LNP)); - SET_100Mb_PDET; - } - if ((slnk = test_for_100Mb(dev, 6500)) < 0) { - next_tick = slnk & ~TIMER_CB; - } else { - if (is_spd_100(dev) && is_100_up(dev)) { - lp->media = _100Mb; - } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) { - lp->media = _10Mb; - } else { - lp->media = NC; - } - next_tick = dc21140m_autoconf(dev); - } - break; - - case _100Mb: /* Set 100Mb/s */ - next_tick = 3000; - if (!lp->tx_enable) { - SET_100Mb; - de4x5_init_connection(dev); - } else { - if (!lp->linkOK && (lp->autosense == AUTO)) { - if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) { - lp->media = INIT; - lp->tcount++; - next_tick = DE4X5_AUTOSENSE_MS; - } - } - } - break; - - case BNC: - case AUI: - case _10Mb: /* Set 10Mb/s */ - next_tick = 3000; - if (!lp->tx_enable) { - SET_10Mb; - de4x5_init_connection(dev); - } else { - if (!lp->linkOK && (lp->autosense == AUTO)) { - if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) { - lp->media = INIT; - lp->tcount++; - next_tick = DE4X5_AUTOSENSE_MS; - } - } - } - break; - - case NC: - if (lp->media != lp->c_media) { - de4x5_dbg_media(dev); - lp->c_media = lp->media; - } - lp->media = INIT; - lp->tx_enable = false; - break; - } - - return next_tick; -} - -/* -** This routine may be merged into dc21140m_autoconf() sometime as I'm -** changing how I figure out the media - but trying to keep it backwards -** compatible with the de500-xa and de500-aa. -** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock -** functions and set during de4x5_mac_port() and/or de4x5_reset_phy(). -** This routine just has to figure out whether 10Mb/s or 100Mb/s is -** active. -** When autonegotiation is working, the ANS part searches the SROM for -** the highest common speed (TP) link that both can run and if that can -** be full duplex. That infoblock is executed and then the link speed set. -** -** Only _10Mb and _100Mb are tested here. -*/ -static int -dc2114x_autoconf(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts; - int next_tick = DE4X5_AUTOSENSE_MS; - - switch (lp->media) { - case INIT: - if (lp->timeout < 0) { - DISABLE_IRQs; - lp->tx_enable = false; - lp->linkOK = 0; - lp->timeout = -1; - de4x5_save_skbs(dev); /* Save non transmitted skb's */ - if (lp->params.autosense & ~AUTO) { - srom_map_media(dev); /* Fixed media requested */ - if (lp->media != lp->params.autosense) { - lp->tcount++; - lp->media = INIT; - return next_tick; - } - lp->media = INIT; - } - } - if ((next_tick = de4x5_reset_phy(dev)) < 0) { - next_tick &= ~TIMER_CB; - } else { - if (lp->autosense == _100Mb) { - lp->media = _100Mb; - } else if (lp->autosense == _10Mb) { - lp->media = _10Mb; - } else if (lp->autosense == TP) { - lp->media = TP; - } else if (lp->autosense == BNC) { - lp->media = BNC; - } else if (lp->autosense == AUI) { - lp->media = AUI; - } else { - lp->media = SPD_DET; - if ((lp->infoblock_media == ANS) && - ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { - ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); - ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); - mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); - lp->media = ANS; - } - } - lp->local_state = 0; - next_tick = dc2114x_autoconf(dev); - } - break; - - case ANS: - switch (lp->local_state) { - case 0: - if (lp->timeout < 0) { - mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); - } - cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500); - if (cr < 0) { - next_tick = cr & ~TIMER_CB; - } else { - if (cr) { - lp->local_state = 0; - lp->media = SPD_DET; - } else { - lp->local_state++; - } - next_tick = dc2114x_autoconf(dev); - } - break; - - case 1: - sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000); - if (sr < 0) { - next_tick = sr & ~TIMER_CB; - } else { - lp->media = SPD_DET; - lp->local_state = 0; - if (sr) { /* Success! */ - lp->tmp = MII_SR_ASSC; - anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); - ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); - if (!(anlpa & MII_ANLPA_RF) && - (cap = anlpa & MII_ANLPA_TAF & ana)) { - if (cap & MII_ANA_100M) { - lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0; - lp->media = _100Mb; - } else if (cap & MII_ANA_10M) { - lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0; - lp->media = _10Mb; - } - } - } /* Auto Negotiation failed to finish */ - next_tick = dc2114x_autoconf(dev); - } /* Auto Negotiation failed to start */ - break; - } - break; - - case AUI: - if (!lp->tx_enable) { - if (lp->timeout < 0) { - omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */ - outl(omr & ~OMR_FDX, DE4X5_OMR); - } - irqs = 0; - irq_mask = 0; - sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000); - if (sts < 0) { - next_tick = sts & ~TIMER_CB; - } else { - if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) { - lp->media = BNC; - next_tick = dc2114x_autoconf(dev); - } else { - lp->local_state = 1; - de4x5_init_connection(dev); - } - } - } else if (!lp->linkOK && (lp->autosense == AUTO)) { - lp->media = AUI_SUSPECT; - next_tick = 3000; - } - break; - - case AUI_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf); - break; - - case BNC: - switch (lp->local_state) { - case 0: - if (lp->timeout < 0) { - omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */ - outl(omr & ~OMR_FDX, DE4X5_OMR); - } - irqs = 0; - irq_mask = 0; - sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000); - if (sts < 0) { - next_tick = sts & ~TIMER_CB; - } else { - lp->local_state++; /* Ensure media connected */ - next_tick = dc2114x_autoconf(dev); - } - break; - - case 1: - if (!lp->tx_enable) { - if ((sts = ping_media(dev, 3000)) < 0) { - next_tick = sts & ~TIMER_CB; - } else { - if (sts) { - lp->local_state = 0; - lp->tcount++; - lp->media = INIT; - } else { - de4x5_init_connection(dev); - } - } - } else if (!lp->linkOK && (lp->autosense == AUTO)) { - lp->media = BNC_SUSPECT; - next_tick = 3000; - } - break; - } - break; - - case BNC_SUSPECT: - next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf); - break; - - case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ - if (srom_map_media(dev) < 0) { - lp->tcount++; - lp->media = INIT; - return next_tick; - } - if (lp->media == _100Mb) { - if ((slnk = test_for_100Mb(dev, 6500)) < 0) { - lp->media = SPD_DET; - return slnk & ~TIMER_CB; - } - } else { - if (wait_for_link(dev) < 0) { - lp->media = SPD_DET; - return PDET_LINK_WAIT; - } - } - if (lp->media == ANS) { /* Do MII parallel detection */ - if (is_spd_100(dev)) { - lp->media = _100Mb; - } else { - lp->media = _10Mb; - } - next_tick = dc2114x_autoconf(dev); - } else if (((lp->media == _100Mb) && is_100_up(dev)) || - (((lp->media == _10Mb) || (lp->media == TP) || - (lp->media == BNC) || (lp->media == AUI)) && - is_10_up(dev))) { - next_tick = dc2114x_autoconf(dev); - } else { - lp->tcount++; - lp->media = INIT; - } - break; - - case _10Mb: - next_tick = 3000; - if (!lp->tx_enable) { - SET_10Mb; - de4x5_init_connection(dev); - } else { - if (!lp->linkOK && (lp->autosense == AUTO)) { - if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) { - lp->media = INIT; - lp->tcount++; - next_tick = DE4X5_AUTOSENSE_MS; - } - } - } - break; - - case _100Mb: - next_tick = 3000; - if (!lp->tx_enable) { - SET_100Mb; - de4x5_init_connection(dev); - } else { - if (!lp->linkOK && (lp->autosense == AUTO)) { - if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) { - lp->media = INIT; - lp->tcount++; - next_tick = DE4X5_AUTOSENSE_MS; - } - } - } - break; - - default: - lp->tcount++; -printk("Huh?: media:%02x\n", lp->media); - lp->media = INIT; - break; - } - - return next_tick; -} - -static int -srom_autoconf(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - - return lp->infoleaf_fn(dev); -} - -/* -** This mapping keeps the original media codes and FDX flag unchanged. -** While it isn't strictly necessary, it helps me for the moment... -** The early return avoids a media state / SROM media space clash. -*/ -static int -srom_map_media(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - - lp->fdx = false; - if (lp->infoblock_media == lp->media) - return 0; - - switch(lp->infoblock_media) { - case SROM_10BASETF: - if (!lp->params.fdx) return -1; - lp->fdx = true; - fallthrough; - - case SROM_10BASET: - if (lp->params.fdx && !lp->fdx) return -1; - if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) { - lp->media = _10Mb; - } else { - lp->media = TP; - } - break; - - case SROM_10BASE2: - lp->media = BNC; - break; - - case SROM_10BASE5: - lp->media = AUI; - break; - - case SROM_100BASETF: - if (!lp->params.fdx) return -1; - lp->fdx = true; - fallthrough; - - case SROM_100BASET: - if (lp->params.fdx && !lp->fdx) return -1; - lp->media = _100Mb; - break; - - case SROM_100BASET4: - lp->media = _100Mb; - break; - - case SROM_100BASEFF: - if (!lp->params.fdx) return -1; - lp->fdx = true; - fallthrough; - - case SROM_100BASEF: - if (lp->params.fdx && !lp->fdx) return -1; - lp->media = _100Mb; - break; - - case ANS: - lp->media = ANS; - lp->fdx = lp->params.fdx; - break; - - default: - printk("%s: Bad media code [%d] detected in SROM!\n", dev->name, - lp->infoblock_media); - return -1; - } - - return 0; -} - -static void -de4x5_init_connection(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - u_long flags = 0; - - if (lp->media != lp->c_media) { - de4x5_dbg_media(dev); - lp->c_media = lp->media; /* Stop scrolling media messages */ - } - - spin_lock_irqsave(&lp->lock, flags); - de4x5_rst_desc_ring(dev); - de4x5_setup_intr(dev); - lp->tx_enable = true; - spin_unlock_irqrestore(&lp->lock, flags); - outl(POLL_DEMAND, DE4X5_TPD); - - netif_wake_queue(dev); -} - -/* -** General PHY reset function. Some MII devices don't reset correctly -** since their MII address pins can float at voltages that are dependent -** on the signal pin use. Do a double reset to ensure a reset. -*/ -static int -de4x5_reset_phy(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int next_tick = 0; - - if ((lp->useSROM) || (lp->phy[lp->active].id)) { - if (lp->timeout < 0) { - if (lp->useSROM) { - if (lp->phy[lp->active].rst) { - srom_exec(dev, lp->phy[lp->active].rst); - srom_exec(dev, lp->phy[lp->active].rst); - } else if (lp->rst) { /* Type 5 infoblock reset */ - srom_exec(dev, lp->rst); - srom_exec(dev, lp->rst); - } - } else { - PHY_HARD_RESET; - } - if (lp->useMII) { - mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); - } - } - if (lp->useMII) { - next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500); - } - } else if (lp->chipset == DC21140) { - PHY_HARD_RESET; - } - - return next_tick; -} - -static int -test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - s32 sts, csr12; - - if (lp->timeout < 0) { - lp->timeout = msec/100; - if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */ - reset_init_sia(dev, csr13, csr14, csr15); - } - - /* set up the interrupt mask */ - outl(irq_mask, DE4X5_IMR); - - /* clear all pending interrupts */ - sts = inl(DE4X5_STS); - outl(sts, DE4X5_STS); - - /* clear csr12 NRA and SRA bits */ - if ((lp->chipset == DC21041) || lp->useSROM) { - csr12 = inl(DE4X5_SISR); - outl(csr12, DE4X5_SISR); - } - } - - sts = inl(DE4X5_STS) & ~TIMER_CB; - - if (!(sts & irqs) && --lp->timeout) { - sts = 100 | TIMER_CB; - } else { - lp->timeout = -1; - } - - return sts; -} - -static int -test_tp(struct net_device *dev, s32 msec) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int sisr; - - if (lp->timeout < 0) { - lp->timeout = msec/100; - } - - sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR); - - if (sisr && --lp->timeout) { - sisr = 100 | TIMER_CB; - } else { - lp->timeout = -1; - } - - return sisr; -} - -/* -** Samples the 100Mb Link State Signal. The sample interval is important -** because too fast a rate can give erroneous results and confuse the -** speed sense algorithm. -*/ -#define SAMPLE_INTERVAL 500 /* ms */ -#define SAMPLE_DELAY 2000 /* ms */ -static int -test_for_100Mb(struct net_device *dev, int msec) -{ - struct de4x5_private *lp = netdev_priv(dev); - int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK); - - if (lp->timeout < 0) { - if ((msec/SAMPLE_INTERVAL) <= 0) return 0; - if (msec > SAMPLE_DELAY) { - lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL; - gep = SAMPLE_DELAY | TIMER_CB; - return gep; - } else { - lp->timeout = msec/SAMPLE_INTERVAL; - } - } - - if (lp->phy[lp->active].id || lp->useSROM) { - gep = is_100_up(dev) | is_spd_100(dev); - } else { - gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP)); - } - if (!(gep & ret) && --lp->timeout) { - gep = SAMPLE_INTERVAL | TIMER_CB; - } else { - lp->timeout = -1; - } - - return gep; -} - -static int -wait_for_link(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - - if (lp->timeout < 0) { - lp->timeout = 1; - } - - if (lp->timeout--) { - return TIMER_CB; - } else { - lp->timeout = -1; - } - - return 0; -} - -/* -** -** -*/ -static int -test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec) -{ - struct de4x5_private *lp = netdev_priv(dev); - int test; - u_long iobase = dev->base_addr; - - if (lp->timeout < 0) { - lp->timeout = msec/100; - } - - reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask; - test = (reg ^ (pol ? ~0 : 0)) & mask; - - if (test && --lp->timeout) { - reg = 100 | TIMER_CB; - } else { - lp->timeout = -1; - } - - return reg; -} - -static int -is_spd_100(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int spd; - - if (lp->useMII) { - spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII); - spd = ~(spd ^ lp->phy[lp->active].spd.value); - spd &= lp->phy[lp->active].spd.mask; - } else if (!lp->useSROM) { /* de500-xa */ - spd = ((~gep_rd(dev)) & GEP_SLNK); - } else { - if ((lp->ibn == 2) || !lp->asBitValid) - return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0; - - spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) | - (lp->linkOK & ~lp->asBitValid); - } - - return spd; -} - -static int -is_100_up(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - if (lp->useMII) { - /* Double read for sticky bits & temporary drops */ - mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); - return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS; - } else if (!lp->useSROM) { /* de500-xa */ - return (~gep_rd(dev)) & GEP_SLNK; - } else { - if ((lp->ibn == 2) || !lp->asBitValid) - return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0; - - return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | - (lp->linkOK & ~lp->asBitValid); - } -} - -static int -is_10_up(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - if (lp->useMII) { - /* Double read for sticky bits & temporary drops */ - mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); - return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS; - } else if (!lp->useSROM) { /* de500-xa */ - return (~gep_rd(dev)) & GEP_LNP; - } else { - if ((lp->ibn == 2) || !lp->asBitValid) - return ((lp->chipset & ~0x00ff) == DC2114x) ? - (~inl(DE4X5_SISR)&SISR_LS10): - 0; - - return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | - (lp->linkOK & ~lp->asBitValid); - } -} - -static int -is_anc_capable(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { - return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); - } else if ((lp->chipset & ~0x00ff) == DC2114x) { - return (inl(DE4X5_SISR) & SISR_LPN) >> 12; - } else { - return 0; - } -} - -/* -** Send a packet onto the media and watch for send errors that indicate the -** media is bad or unconnected. -*/ -static int -ping_media(struct net_device *dev, int msec) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int sisr; - - if (lp->timeout < 0) { - lp->timeout = msec/100; - - lp->tmp = lp->tx_new; /* Remember the ring position */ - load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1); - lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; - outl(POLL_DEMAND, DE4X5_TPD); - } - - sisr = inl(DE4X5_SISR); - - if ((!(sisr & SISR_NCR)) && - ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) && - (--lp->timeout)) { - sisr = 100 | TIMER_CB; - } else { - if ((!(sisr & SISR_NCR)) && - !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) && - lp->timeout) { - sisr = 0; - } else { - sisr = 1; - } - lp->timeout = -1; - } - - return sisr; -} - -/* -** This function does 2 things: on Intels it kmalloc's another buffer to -** replace the one about to be passed up. On Alpha's it kmallocs a buffer -** into which the packet is copied. -*/ -static struct sk_buff * -de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) -{ - struct de4x5_private *lp = netdev_priv(dev); - struct sk_buff *p; - -#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) - struct sk_buff *ret; - u_long i=0, tmp; - - p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2); - if (!p) return NULL; - - tmp = virt_to_bus(p->data); - i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; - skb_reserve(p, i); - lp->rx_ring[index].buf = cpu_to_le32(tmp + i); - - ret = lp->rx_skb[index]; - lp->rx_skb[index] = p; - - if ((u_long) ret > 1) { - skb_put(ret, len); - } - - return ret; - -#else - if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */ - - p = netdev_alloc_skb(dev, len + 2); - if (!p) return NULL; - - skb_reserve(p, 2); /* Align */ - if (index < lp->rx_old) { /* Wrapped buffer */ - short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; - skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen); - skb_put_data(p, lp->rx_bufs, len - tlen); - } else { /* Linear buffer */ - skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len); - } - - return p; -#endif -} - -static void -de4x5_free_rx_buffs(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - int i; - - for (i=0; irxRingSize; i++) { - if ((u_long) lp->rx_skb[i] > 1) { - dev_kfree_skb(lp->rx_skb[i]); - } - lp->rx_ring[i].status = 0; - lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */ - } -} - -static void -de4x5_free_tx_buffs(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - int i; - - for (i=0; itxRingSize; i++) { - if (lp->tx_skb[i]) - de4x5_free_tx_buff(lp, i); - lp->tx_ring[i].status = 0; - } - - /* Unload the locally queued packets */ - __skb_queue_purge(&lp->cache.queue); -} - -/* -** When a user pulls a connection, the DECchip can end up in a -** 'running - waiting for end of transmission' state. This means that we -** have to perform a chip soft reset to ensure that we can synchronize -** the hardware and software and make any media probes using a loopback -** packet meaningful. -*/ -static void -de4x5_save_skbs(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - s32 omr; - - if (!lp->cache.save_cnt) { - STOP_DE4X5; - de4x5_tx(dev); /* Flush any sent skb's */ - de4x5_free_tx_buffs(dev); - de4x5_cache_state(dev, DE4X5_SAVE_STATE); - de4x5_sw_reset(dev); - de4x5_cache_state(dev, DE4X5_RESTORE_STATE); - lp->cache.save_cnt++; - START_DE4X5; - } -} - -static void -de4x5_rst_desc_ring(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int i; - s32 omr; - - if (lp->cache.save_cnt) { - STOP_DE4X5; - outl(lp->dma_rings, DE4X5_RRBA); - outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), - DE4X5_TRBA); - - lp->rx_new = lp->rx_old = 0; - lp->tx_new = lp->tx_old = 0; - - for (i = 0; i < lp->rxRingSize; i++) { - lp->rx_ring[i].status = cpu_to_le32(R_OWN); - } - - for (i = 0; i < lp->txRingSize; i++) { - lp->tx_ring[i].status = cpu_to_le32(0); - } - - barrier(); - lp->cache.save_cnt--; - START_DE4X5; - } -} - -static void -de4x5_cache_state(struct net_device *dev, int flag) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - switch(flag) { - case DE4X5_SAVE_STATE: - lp->cache.csr0 = inl(DE4X5_BMR); - lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR)); - lp->cache.csr7 = inl(DE4X5_IMR); - break; - - case DE4X5_RESTORE_STATE: - outl(lp->cache.csr0, DE4X5_BMR); - outl(lp->cache.csr6, DE4X5_OMR); - outl(lp->cache.csr7, DE4X5_IMR); - if (lp->chipset == DC21140) { - gep_wr(lp->cache.gepc, dev); - gep_wr(lp->cache.gep, dev); - } else { - reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, - lp->cache.csr15); - } - break; - } -} - -static void -de4x5_put_cache(struct net_device *dev, struct sk_buff *skb) -{ - struct de4x5_private *lp = netdev_priv(dev); - - __skb_queue_tail(&lp->cache.queue, skb); -} - -static void -de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb) -{ - struct de4x5_private *lp = netdev_priv(dev); - - __skb_queue_head(&lp->cache.queue, skb); -} - -static struct sk_buff * -de4x5_get_cache(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - - return __skb_dequeue(&lp->cache.queue); -} - -/* -** Check the Auto Negotiation State. Return OK when a link pass interrupt -** is received and the auto-negotiation status is NWAY OK. -*/ -static int -test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - s32 sts, ans; - - if (lp->timeout < 0) { - lp->timeout = msec/100; - outl(irq_mask, DE4X5_IMR); - - /* clear all pending interrupts */ - sts = inl(DE4X5_STS); - outl(sts, DE4X5_STS); - } - - ans = inl(DE4X5_SISR) & SISR_ANS; - sts = inl(DE4X5_STS) & ~TIMER_CB; - - if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) { - sts = 100 | TIMER_CB; - } else { - lp->timeout = -1; - } - - return sts; -} - -static void -de4x5_setup_intr(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - s32 imr, sts; - - if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */ - imr = 0; - UNMASK_IRQs; - sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */ - outl(sts, DE4X5_STS); - ENABLE_IRQs; - } -} - -/* -** -*/ -static void -reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - RESET_SIA; - if (lp->useSROM) { - if (lp->ibn == 3) { - srom_exec(dev, lp->phy[lp->active].rst); - srom_exec(dev, lp->phy[lp->active].gep); - outl(1, DE4X5_SICR); - return; - } else { - csr15 = lp->cache.csr15; - csr14 = lp->cache.csr14; - csr13 = lp->cache.csr13; - outl(csr15 | lp->cache.gepc, DE4X5_SIGR); - outl(csr15 | lp->cache.gep, DE4X5_SIGR); - } - } else { - outl(csr15, DE4X5_SIGR); - } - outl(csr14, DE4X5_STRR); - outl(csr13, DE4X5_SICR); - - mdelay(10); -} - -/* -** Create a loopback ethernet packet -*/ -static void -create_packet(struct net_device *dev, char *frame, int len) -{ - int i; - char *buf = frame; - - for (i=0; idev_addr[i]; - } - for (i=0; idev_addr[i]; - } - - *buf++ = 0; /* Packet length (2 bytes) */ - *buf++ = 1; -} - -/* -** Look for a particular board name in the EISA configuration space -*/ -static int -EISA_signature(char *name, struct device *device) -{ - int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); - struct eisa_device *edev; - - *name = '\0'; - edev = to_eisa_device (device); - i = edev->id.driver_data; - - if (i >= 0 && i < siglen) { - strcpy (name, de4x5_signatures[i]); - status = 1; - } - - return status; /* return the device name string */ -} - -/* -** Look for a particular board name in the PCI configuration space -*/ -static void -PCI_signature(char *name, struct de4x5_private *lp) -{ - int i, siglen = ARRAY_SIZE(de4x5_signatures); - - if (lp->chipset == DC21040) { - strcpy(name, "DE434/5"); - return; - } else { /* Search for a DEC name in the SROM */ - int tmp = *((char *)&lp->srom + 19) * 3; - strncpy(name, (char *)&lp->srom + 26 + tmp, 8); - } - name[8] = '\0'; - for (i=0; ichipset == DC21040) ? "DC21040" : - ((lp->chipset == DC21041) ? "DC21041" : - ((lp->chipset == DC21140) ? "DC21140" : - ((lp->chipset == DC21142) ? "DC21142" : - ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN" - ))))))); - } - if (lp->chipset != DC21041) { - lp->useSROM = true; /* card is not recognisably DEC */ - } - } else if ((lp->chipset & ~0x00ff) == DC2114x) { - lp->useSROM = true; - } -} - -/* -** Set up the Ethernet PROM counter to the start of the Ethernet address on -** the DC21040, else read the SROM for the other chips. -** The SROM may not be present in a multi-MAC card, so first read the -** MAC address and check for a bad address. If there is a bad one then exit -** immediately with the prior srom contents intact (the h/w address will -** be fixed up later). -*/ -static void -DevicePresent(struct net_device *dev, u_long aprom_addr) -{ - int i, j=0; - struct de4x5_private *lp = netdev_priv(dev); - - if (lp->chipset == DC21040) { - if (lp->bus == EISA) { - enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */ - } else { - outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */ - } - } else { /* Read new srom */ - u_short tmp; - __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD); - for (i=0; i<(ETH_ALEN>>1); i++) { - tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i); - j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */ - *p = cpu_to_le16(tmp); - } - if (j == 0 || j == 3 * 0xffff) { - /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */ - return; - } - - p = (__le16 *)&lp->srom; - for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) { - tmp = srom_rd(aprom_addr, i); - *p++ = cpu_to_le16(tmp); - } - de4x5_dbg_srom(&lp->srom); - } -} - -/* -** Since the write on the Enet PROM register doesn't seem to reset the PROM -** pointer correctly (at least on my DE425 EISA card), this routine should do -** it...from depca.c. -*/ -static void -enet_addr_rst(u_long aprom_addr) -{ - union { - struct { - u32 a; - u32 b; - } llsig; - char Sig[sizeof(u32) << 1]; - } dev; - short sigLength=0; - s8 data; - int i, j; - - dev.llsig.a = ETH_PROM_SIG; - dev.llsig.b = ETH_PROM_SIG; - sigLength = sizeof(u32) << 1; - - for (i=0,j=0;jbase_addr; - int broken, i, k, tmp, status = 0; - u_short j,chksum; - struct de4x5_private *lp = netdev_priv(dev); - u8 addr[ETH_ALEN]; - - broken = de4x5_bad_srom(lp); - - for (i=0,k=0,j=0;j<3;j++) { - k <<= 1; - if (k > 0xffff) k-=0xffff; - - if (lp->bus == PCI) { - if (lp->chipset == DC21040) { - while ((tmp = inl(DE4X5_APROM)) < 0); - k += (u_char) tmp; - addr[i++] = (u_char) tmp; - while ((tmp = inl(DE4X5_APROM)) < 0); - k += (u_short) (tmp << 8); - addr[i++] = (u_char) tmp; - } else if (!broken) { - addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; - addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; - } else if ((broken == SMC) || (broken == ACCTON)) { - addr[i] = *((u_char *)&lp->srom + i); i++; - addr[i] = *((u_char *)&lp->srom + i); i++; - } - } else { - k += (u_char) (tmp = inb(EISA_APROM)); - addr[i++] = (u_char) tmp; - k += (u_short) ((tmp = inb(EISA_APROM)) << 8); - addr[i++] = (u_char) tmp; - } - - if (k > 0xffff) k-=0xffff; - } - if (k == 0xffff) k=0; - - eth_hw_addr_set(dev, addr); - - if (lp->bus == PCI) { - if (lp->chipset == DC21040) { - while ((tmp = inl(DE4X5_APROM)) < 0); - chksum = (u_char) tmp; - while ((tmp = inl(DE4X5_APROM)) < 0); - chksum |= (u_short) (tmp << 8); - if ((k != chksum) && (dec_only)) status = -1; - } - } else { - chksum = (u_char) inb(EISA_APROM); - chksum |= (u_short) (inb(EISA_APROM) << 8); - if ((k != chksum) && (dec_only)) status = -1; - } - - /* If possible, try to fix a broken card - SMC only so far */ - srom_repair(dev, broken); - -#ifdef CONFIG_PPC_PMAC - /* - ** If the address starts with 00 a0, we have to bit-reverse - ** each byte of the address. - */ - if ( machine_is(powermac) && - (dev->dev_addr[0] == 0) && - (dev->dev_addr[1] == 0xa0) ) - { - for (i = 0; i < ETH_ALEN; ++i) - { - int x = dev->dev_addr[i]; - x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4); - x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2); - addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1); - } - eth_hw_addr_set(dev, addr); - } -#endif /* CONFIG_PPC_PMAC */ - - /* Test for a bad enet address */ - status = test_bad_enet(dev, status); - - return status; -} - -/* -** Test for enet addresses in the first 32 bytes. -*/ -static int -de4x5_bad_srom(struct de4x5_private *lp) -{ - int i, status = 0; - - for (i = 0; i < ARRAY_SIZE(enet_det); i++) { - if (!memcmp(&lp->srom, &enet_det[i], 3) && - !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) { - if (i == 0) { - status = SMC; - } else if (i == 1) { - status = ACCTON; - } - break; - } - } - - return status; -} - -static void -srom_repair(struct net_device *dev, int card) -{ - struct de4x5_private *lp = netdev_priv(dev); - - switch(card) { - case SMC: - memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom)); - memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN); - memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100); - lp->useSROM = true; - break; - } -} - -/* -** Assume that the irq's do not follow the PCI spec - this is seems -** to be true so far (2 for 2). -*/ -static int -test_bad_enet(struct net_device *dev, int status) -{ - struct de4x5_private *lp = netdev_priv(dev); - int i, tmp; - - for (tmp=0,i=0; idev_addr[i]; - if ((tmp == 0) || (tmp == 0x5fa)) { - if ((lp->chipset == last.chipset) && - (lp->bus_num == last.bus) && (lp->bus_num > 0)) { - eth_addr_inc(last.addr); - eth_hw_addr_set(dev, last.addr); - - if (!an_exception(lp)) { - dev->irq = last.irq; - } - - status = 0; - } - } else if (!status) { - last.chipset = lp->chipset; - last.bus = lp->bus_num; - last.irq = dev->irq; - for (i=0; idev_addr[i]; - } - - return status; -} - -/* -** List of board exceptions with correctly wired IRQs -*/ -static int -an_exception(struct de4x5_private *lp) -{ - if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) && - (*(u_short *)lp->srom.sub_system_id == 0x95e0)) { - return -1; - } - - return 0; -} - -/* -** SROM Read -*/ -static short -srom_rd(u_long addr, u_char offset) -{ - sendto_srom(SROM_RD | SROM_SR, addr); - - srom_latch(SROM_RD | SROM_SR | DT_CS, addr); - srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr); - srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset); - - return srom_data(SROM_RD | SROM_SR | DT_CS, addr); -} - -static void -srom_latch(u_int command, u_long addr) -{ - sendto_srom(command, addr); - sendto_srom(command | DT_CLK, addr); - sendto_srom(command, addr); -} - -static void -srom_command(u_int command, u_long addr) -{ - srom_latch(command, addr); - srom_latch(command, addr); - srom_latch((command & 0x0000ff00) | DT_CS, addr); -} - -static void -srom_address(u_int command, u_long addr, u_char offset) -{ - int i, a; - - a = offset << 2; - for (i=0; i<6; i++, a <<= 1) { - srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr); - } - udelay(1); - - i = (getfrom_srom(addr) >> 3) & 0x01; -} - -static short -srom_data(u_int command, u_long addr) -{ - int i; - short word = 0; - s32 tmp; - - for (i=0; i<16; i++) { - sendto_srom(command | DT_CLK, addr); - tmp = getfrom_srom(addr); - sendto_srom(command, addr); - - word = (word << 1) | ((tmp >> 3) & 0x01); - } - - sendto_srom(command & 0x0000ff00, addr); - - return word; -} - -/* -static void -srom_busy(u_int command, u_long addr) -{ - sendto_srom((command & 0x0000ff00) | DT_CS, addr); - - while (!((getfrom_srom(addr) >> 3) & 0x01)) { - mdelay(1); - } - - sendto_srom(command & 0x0000ff00, addr); -} -*/ - -static void -sendto_srom(u_int command, u_long addr) -{ - outl(command, addr); - udelay(1); -} - -static int -getfrom_srom(u_long addr) -{ - s32 tmp; - - tmp = inl(addr); - udelay(1); - - return tmp; -} - -static int -srom_infoleaf_info(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - int i, count; - u_char *p; - - /* Find the infoleaf decoder function that matches this chipset */ - for (i=0; ichipset == infoleaf_array[i].chipset) break; - } - if (i == INFOLEAF_SIZE) { - lp->useSROM = false; - printk("%s: Cannot find correct chipset for SROM decoding!\n", - dev->name); - return -ENXIO; - } - - lp->infoleaf_fn = infoleaf_array[i].fn; - - /* Find the information offset that this function should use */ - count = *((u_char *)&lp->srom + 19); - p = (u_char *)&lp->srom + 26; - - if (count > 1) { - for (i=count; i; --i, p+=3) { - if (lp->device == *p) break; - } - if (i == 0) { - lp->useSROM = false; - printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n", - dev->name, lp->device); - return -ENXIO; - } - } - - lp->infoleaf_offset = get_unaligned_le16(p + 1); - - return 0; -} - -/* -** This routine loads any type 1 or 3 MII info into the mii device -** struct and executes any type 5 code to reset PHY devices for this -** controller. -** The info for the MII devices will be valid since the index used -** will follow the discovery process from MII address 1-31 then 0. -*/ -static void -srom_init(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; - u_char count; - - p+=2; - if (lp->chipset == DC21140) { - lp->cache.gepc = (*p++ | GEP_CTRL); - gep_wr(lp->cache.gepc, dev); - } - - /* Block count */ - count = *p++; - - /* Jump the infoblocks to find types */ - for (;count; --count) { - if (*p < 128) { - p += COMPACT_LEN; - } else if (*(p+1) == 5) { - type5_infoblock(dev, 1, p); - p += ((*p & BLOCK_LEN) + 1); - } else if (*(p+1) == 4) { - p += ((*p & BLOCK_LEN) + 1); - } else if (*(p+1) == 3) { - type3_infoblock(dev, 1, p); - p += ((*p & BLOCK_LEN) + 1); - } else if (*(p+1) == 2) { - p += ((*p & BLOCK_LEN) + 1); - } else if (*(p+1) == 1) { - type1_infoblock(dev, 1, p); - p += ((*p & BLOCK_LEN) + 1); - } else { - p += ((*p & BLOCK_LEN) + 1); - } - } -} - -/* -** A generic routine that writes GEP control, data and reset information -** to the GEP register (21140) or csr15 GEP portion (2114[23]). -*/ -static void -srom_exec(struct net_device *dev, u_char *p) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - u_char count = (p ? *p++ : 0); - u_short *w = (u_short *)p; - - if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return; - - if (lp->chipset != DC21140) RESET_SIA; - - while (count--) { - gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? - *p++ : get_unaligned_le16(w++)), dev); - mdelay(2); /* 2ms per action */ - } - - if (lp->chipset != DC21140) { - outl(lp->cache.csr14, DE4X5_STRR); - outl(lp->cache.csr13, DE4X5_SICR); - } -} - -/* -** Basically this function is a NOP since it will never be called, -** unless I implement the DC21041 SROM functions. There's no need -** since the existing code will be satisfactory for all boards. -*/ -static int -dc21041_infoleaf(struct net_device *dev) -{ - return DE4X5_AUTOSENSE_MS; -} - -static int -dc21140_infoleaf(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char count = 0; - u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; - int next_tick = DE4X5_AUTOSENSE_MS; - - /* Read the connection type */ - p+=2; - - /* GEP control */ - lp->cache.gepc = (*p++ | GEP_CTRL); - - /* Block count */ - count = *p++; - - /* Recursively figure out the info blocks */ - if (*p < 128) { - next_tick = dc_infoblock[COMPACT](dev, count, p); - } else { - next_tick = dc_infoblock[*(p+1)](dev, count, p); - } - - if (lp->tcount == count) { - lp->media = NC; - if (lp->media != lp->c_media) { - de4x5_dbg_media(dev); - lp->c_media = lp->media; - } - lp->media = INIT; - lp->tcount = 0; - lp->tx_enable = false; - } - - return next_tick & ~TIMER_CB; -} - -static int -dc21142_infoleaf(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char count = 0; - u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; - int next_tick = DE4X5_AUTOSENSE_MS; - - /* Read the connection type */ - p+=2; - - /* Block count */ - count = *p++; - - /* Recursively figure out the info blocks */ - if (*p < 128) { - next_tick = dc_infoblock[COMPACT](dev, count, p); - } else { - next_tick = dc_infoblock[*(p+1)](dev, count, p); - } - - if (lp->tcount == count) { - lp->media = NC; - if (lp->media != lp->c_media) { - de4x5_dbg_media(dev); - lp->c_media = lp->media; - } - lp->media = INIT; - lp->tcount = 0; - lp->tx_enable = false; - } - - return next_tick & ~TIMER_CB; -} - -static int -dc21143_infoleaf(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char count = 0; - u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; - int next_tick = DE4X5_AUTOSENSE_MS; - - /* Read the connection type */ - p+=2; - - /* Block count */ - count = *p++; - - /* Recursively figure out the info blocks */ - if (*p < 128) { - next_tick = dc_infoblock[COMPACT](dev, count, p); - } else { - next_tick = dc_infoblock[*(p+1)](dev, count, p); - } - if (lp->tcount == count) { - lp->media = NC; - if (lp->media != lp->c_media) { - de4x5_dbg_media(dev); - lp->c_media = lp->media; - } - lp->media = INIT; - lp->tcount = 0; - lp->tx_enable = false; - } - - return next_tick & ~TIMER_CB; -} - -/* -** The compact infoblock is only designed for DC21140[A] chips, so -** we'll reuse the dc21140m_autoconf function. Non MII media only. -*/ -static int -compact_infoblock(struct net_device *dev, u_char count, u_char *p) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char flags, csr6; - - /* Recursively figure out the info blocks */ - if (--count > lp->tcount) { - if (*(p+COMPACT_LEN) < 128) { - return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN); - } else { - return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN); - } - } - - if ((lp->media == INIT) && (lp->timeout < 0)) { - lp->ibn = COMPACT; - lp->active = 0; - gep_wr(lp->cache.gepc, dev); - lp->infoblock_media = (*p++) & COMPACT_MC; - lp->cache.gep = *p++; - csr6 = *p++; - flags = *p++; - - lp->asBitValid = (flags & 0x80) ? 0 : -1; - lp->defMedium = (flags & 0x40) ? -1 : 0; - lp->asBit = 1 << ((csr6 >> 1) & 0x07); - lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; - lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); - lp->useMII = false; - - de4x5_switch_mac_port(dev); - } - - return dc21140m_autoconf(dev); -} - -/* -** This block describes non MII media for the DC21140[A] only. -*/ -static int -type0_infoblock(struct net_device *dev, u_char count, u_char *p) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char flags, csr6, len = (*p & BLOCK_LEN)+1; - - /* Recursively figure out the info blocks */ - if (--count > lp->tcount) { - if (*(p+len) < 128) { - return dc_infoblock[COMPACT](dev, count, p+len); - } else { - return dc_infoblock[*(p+len+1)](dev, count, p+len); - } - } - - if ((lp->media == INIT) && (lp->timeout < 0)) { - lp->ibn = 0; - lp->active = 0; - gep_wr(lp->cache.gepc, dev); - p+=2; - lp->infoblock_media = (*p++) & BLOCK0_MC; - lp->cache.gep = *p++; - csr6 = *p++; - flags = *p++; - - lp->asBitValid = (flags & 0x80) ? 0 : -1; - lp->defMedium = (flags & 0x40) ? -1 : 0; - lp->asBit = 1 << ((csr6 >> 1) & 0x07); - lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; - lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); - lp->useMII = false; - - de4x5_switch_mac_port(dev); - } - - return dc21140m_autoconf(dev); -} - -/* These functions are under construction! */ - -static int -type1_infoblock(struct net_device *dev, u_char count, u_char *p) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char len = (*p & BLOCK_LEN)+1; - - /* Recursively figure out the info blocks */ - if (--count > lp->tcount) { - if (*(p+len) < 128) { - return dc_infoblock[COMPACT](dev, count, p+len); - } else { - return dc_infoblock[*(p+len+1)](dev, count, p+len); - } - } - - p += 2; - if (lp->state == INITIALISED) { - lp->ibn = 1; - lp->active = *p++; - lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); - lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); - lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; - lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; - lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; - lp->phy[lp->active].ttm = get_unaligned_le16(p); - return 0; - } else if ((lp->media == INIT) && (lp->timeout < 0)) { - lp->ibn = 1; - lp->active = *p; - lp->infoblock_csr6 = OMR_MII_100; - lp->useMII = true; - lp->infoblock_media = ANS; - - de4x5_switch_mac_port(dev); - } - - return dc21140m_autoconf(dev); -} - -static int -type2_infoblock(struct net_device *dev, u_char count, u_char *p) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char len = (*p & BLOCK_LEN)+1; - - /* Recursively figure out the info blocks */ - if (--count > lp->tcount) { - if (*(p+len) < 128) { - return dc_infoblock[COMPACT](dev, count, p+len); - } else { - return dc_infoblock[*(p+len+1)](dev, count, p+len); - } - } - - if ((lp->media == INIT) && (lp->timeout < 0)) { - lp->ibn = 2; - lp->active = 0; - p += 2; - lp->infoblock_media = (*p) & MEDIA_CODE; - - if ((*p++) & EXT_FIELD) { - lp->cache.csr13 = get_unaligned_le16(p); p += 2; - lp->cache.csr14 = get_unaligned_le16(p); p += 2; - lp->cache.csr15 = get_unaligned_le16(p); p += 2; - } else { - lp->cache.csr13 = CSR13; - lp->cache.csr14 = CSR14; - lp->cache.csr15 = CSR15; - } - lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; - lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); - lp->infoblock_csr6 = OMR_SIA; - lp->useMII = false; - - de4x5_switch_mac_port(dev); - } - - return dc2114x_autoconf(dev); -} - -static int -type3_infoblock(struct net_device *dev, u_char count, u_char *p) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char len = (*p & BLOCK_LEN)+1; - - /* Recursively figure out the info blocks */ - if (--count > lp->tcount) { - if (*(p+len) < 128) { - return dc_infoblock[COMPACT](dev, count, p+len); - } else { - return dc_infoblock[*(p+len+1)](dev, count, p+len); - } - } - - p += 2; - if (lp->state == INITIALISED) { - lp->ibn = 3; - lp->active = *p++; - if (MOTO_SROM_BUG) lp->active = 0; - /* if (MOTO_SROM_BUG) statement indicates lp->active could - * be 8 (i.e. the size of array lp->phy) */ - if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy))) - return -EINVAL; - lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); - lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); - lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; - lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; - lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; - lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2; - lp->phy[lp->active].mci = *p; - return 0; - } else if ((lp->media == INIT) && (lp->timeout < 0)) { - lp->ibn = 3; - lp->active = *p; - if (MOTO_SROM_BUG) lp->active = 0; - lp->infoblock_csr6 = OMR_MII_100; - lp->useMII = true; - lp->infoblock_media = ANS; - - de4x5_switch_mac_port(dev); - } - - return dc2114x_autoconf(dev); -} - -static int -type4_infoblock(struct net_device *dev, u_char count, u_char *p) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char flags, csr6, len = (*p & BLOCK_LEN)+1; - - /* Recursively figure out the info blocks */ - if (--count > lp->tcount) { - if (*(p+len) < 128) { - return dc_infoblock[COMPACT](dev, count, p+len); - } else { - return dc_infoblock[*(p+len+1)](dev, count, p+len); - } - } - - if ((lp->media == INIT) && (lp->timeout < 0)) { - lp->ibn = 4; - lp->active = 0; - p+=2; - lp->infoblock_media = (*p++) & MEDIA_CODE; - lp->cache.csr13 = CSR13; /* Hard coded defaults */ - lp->cache.csr14 = CSR14; - lp->cache.csr15 = CSR15; - lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; - lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2; - csr6 = *p++; - flags = *p++; - - lp->asBitValid = (flags & 0x80) ? 0 : -1; - lp->defMedium = (flags & 0x40) ? -1 : 0; - lp->asBit = 1 << ((csr6 >> 1) & 0x07); - lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; - lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); - lp->useMII = false; - - de4x5_switch_mac_port(dev); - } - - return dc2114x_autoconf(dev); -} - -/* -** This block type provides information for resetting external devices -** (chips) through the General Purpose Register. -*/ -static int -type5_infoblock(struct net_device *dev, u_char count, u_char *p) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_char len = (*p & BLOCK_LEN)+1; - - /* Recursively figure out the info blocks */ - if (--count > lp->tcount) { - if (*(p+len) < 128) { - return dc_infoblock[COMPACT](dev, count, p+len); - } else { - return dc_infoblock[*(p+len+1)](dev, count, p+len); - } - } - - /* Must be initializing to run this code */ - if ((lp->state == INITIALISED) || (lp->media == INIT)) { - p+=2; - lp->rst = p; - srom_exec(dev, lp->rst); - } - - return DE4X5_AUTOSENSE_MS; -} - -/* -** MII Read/Write -*/ - -static int -mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr) -{ - mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */ - mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */ - mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */ - mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ - mii_address(phyreg, ioaddr); /* PHY Register to read */ - mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */ - - return mii_rdata(ioaddr); /* Read data */ -} - -static void -mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr) -{ - mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */ - mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */ - mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */ - mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ - mii_address(phyreg, ioaddr); /* PHY Register to write */ - mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */ - data = mii_swap(data, 16); /* Swap data bit ordering */ - mii_wdata(data, 16, ioaddr); /* Write data */ -} - -static int -mii_rdata(u_long ioaddr) -{ - int i; - s32 tmp = 0; - - for (i=0; i<16; i++) { - tmp <<= 1; - tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr); - } - - return tmp; -} - -static void -mii_wdata(int data, int len, u_long ioaddr) -{ - int i; - - for (i=0; i>= 1; - } -} - -static void -mii_address(u_char addr, u_long ioaddr) -{ - int i; - - addr = mii_swap(addr, 5); - for (i=0; i<5; i++) { - sendto_mii(MII_MWR | MII_WR, addr, ioaddr); - addr >>= 1; - } -} - -static void -mii_ta(u_long rw, u_long ioaddr) -{ - if (rw == MII_STWR) { - sendto_mii(MII_MWR | MII_WR, 1, ioaddr); - sendto_mii(MII_MWR | MII_WR, 0, ioaddr); - } else { - getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */ - } -} - -static int -mii_swap(int data, int len) -{ - int i, tmp = 0; - - for (i=0; i>= 1; - } - - return tmp; -} - -static void -sendto_mii(u32 command, int data, u_long ioaddr) -{ - u32 j; - - j = (data & 1) << 17; - outl(command | j, ioaddr); - udelay(1); - outl(command | MII_MDC | j, ioaddr); - udelay(1); -} - -static int -getfrom_mii(u32 command, u_long ioaddr) -{ - outl(command, ioaddr); - udelay(1); - outl(command | MII_MDC, ioaddr); - udelay(1); - - return (inl(ioaddr) >> 19) & 1; -} - -/* -** Here's 3 ways to calculate the OUI from the ID registers. -*/ -static int -mii_get_oui(u_char phyaddr, u_long ioaddr) -{ -/* - union { - u_short reg; - u_char breg[2]; - } a; - int i, r2, r3, ret=0;*/ - int r2; - - /* Read r2 and r3 */ - r2 = mii_rd(MII_ID0, phyaddr, ioaddr); - mii_rd(MII_ID1, phyaddr, ioaddr); - /* SEEQ and Cypress way * / - / * Shuffle r2 and r3 * / - a.reg=0; - r3 = ((r3>>10)|(r2<<6))&0x0ff; - r2 = ((r2>>2)&0x3fff); - - / * Bit reverse r3 * / - for (i=0;i<8;i++) { - ret<<=1; - ret |= (r3&1); - r3>>=1; - } - - / * Bit reverse r2 * / - for (i=0;i<16;i++) { - a.reg<<=1; - a.reg |= (r2&1); - r2>>=1; - } - - / * Swap r2 bytes * / - i=a.breg[0]; - a.breg[0]=a.breg[1]; - a.breg[1]=i; - - return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */ -/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */ - return r2; /* (I did it) My way */ -} - -/* -** The SROM spec forces us to search addresses [1-31 0]. Bummer. -*/ -static int -mii_get_phy(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - int i, j, k, n, limit=ARRAY_SIZE(phy_info); - int id; - - lp->active = 0; - lp->useMII = true; - - /* Search the MII address space for possible PHY devices */ - for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) { - lp->phy[lp->active].addr = i; - if (i==0) n++; /* Count cycles */ - while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */ - id = mii_get_oui(i, DE4X5_MII); - if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ - for (j=0; jphy[k].id; k++); - if (k < DE4X5_MAX_PHY) { - memcpy((char *)&lp->phy[k], - (char *)&phy_info[j], sizeof(struct phy_table)); - lp->phy[k].addr = i; - lp->mii_cnt++; - lp->active++; - } else { - goto purgatory; /* Stop the search */ - } - break; - } - if ((j == limit) && (i < DE4X5_MAX_MII)) { - for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); - if (k < DE4X5_MAX_PHY) { - lp->phy[k].addr = i; - lp->phy[k].id = id; - lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ - lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ - lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ - lp->mii_cnt++; - lp->active++; - printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); - j = de4x5_debug; - de4x5_debug |= DEBUG_MII; - de4x5_dbg_mii(dev, k); - de4x5_debug = j; - printk("\n"); - } else { - goto purgatory; - } - } - } - purgatory: - lp->active = 0; - if (lp->phy[0].id) { /* Reset the PHY devices */ - for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/ - mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); - while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); - - de4x5_dbg_mii(dev, k); - } - } - if (!lp->mii_cnt) lp->useMII = false; - - return lp->mii_cnt; -} - -static char * -build_setup_frame(struct net_device *dev, int mode) -{ - struct de4x5_private *lp = netdev_priv(dev); - int i; - char *pa = lp->setup_frame; - - /* Initialise the setup frame */ - if (mode == ALL) { - memset(lp->setup_frame, 0, SETUP_FRAME_LEN); - } - - if (lp->setup_f == HASH_PERF) { - for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; idev_addr[i]; /* Host address */ - if (i & 0x01) pa += 2; - } - *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80; - } else { - for (i=0; idev_addr[i]; - if (i & 0x01) pa += 4; - } - for (i=0; itimer); -} - -static long -de4x5_switch_mac_port(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - s32 omr; - - STOP_DE4X5; - - /* Assert the OMR_PS bit in CSR6 */ - omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | - OMR_FDX)); - omr |= lp->infoblock_csr6; - if (omr & OMR_PS) omr |= OMR_HBD; - outl(omr, DE4X5_OMR); - - /* Soft Reset */ - RESET_DE4X5; - - /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */ - if (lp->chipset == DC21140) { - gep_wr(lp->cache.gepc, dev); - gep_wr(lp->cache.gep, dev); - } else if ((lp->chipset & ~0x0ff) == DC2114x) { - reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15); - } - - /* Restore CSR6 */ - outl(omr, DE4X5_OMR); - - /* Reset CSR8 */ - inl(DE4X5_MFC); - - return omr; -} - -static void -gep_wr(s32 data, struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - if (lp->chipset == DC21140) { - outl(data, DE4X5_GEP); - } else if ((lp->chipset & ~0x00ff) == DC2114x) { - outl((data<<16) | lp->cache.csr15, DE4X5_SIGR); - } -} - -static int -gep_rd(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - if (lp->chipset == DC21140) { - return inl(DE4X5_GEP); - } else if ((lp->chipset & ~0x00ff) == DC2114x) { - return inl(DE4X5_SIGR) & 0x000fffff; - } - - return 0; -} - -static void -yawn(struct net_device *dev, int state) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return; - - if(lp->bus == EISA) { - switch(state) { - case WAKEUP: - outb(WAKEUP, PCI_CFPM); - mdelay(10); - break; - - case SNOOZE: - outb(SNOOZE, PCI_CFPM); - break; - - case SLEEP: - outl(0, DE4X5_SICR); - outb(SLEEP, PCI_CFPM); - break; - } - } else { - struct pci_dev *pdev = to_pci_dev (lp->gendev); - switch(state) { - case WAKEUP: - pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP); - mdelay(10); - break; - - case SNOOZE: - pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE); - break; - - case SLEEP: - outl(0, DE4X5_SICR); - pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP); - break; - } - } -} - -static void -de4x5_parse_params(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - char *p, *q, t; - - lp->params.fdx = false; - lp->params.autosense = AUTO; - - if (args == NULL) return; - - if ((p = strstr(args, dev->name))) { - if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p); - t = *q; - *q = '\0'; - - if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true; - - if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) { - if (strstr(p, "TP_NW")) { - lp->params.autosense = TP_NW; - } else if (strstr(p, "TP")) { - lp->params.autosense = TP; - } else if (strstr(p, "BNC_AUI")) { - lp->params.autosense = BNC; - } else if (strstr(p, "BNC")) { - lp->params.autosense = BNC; - } else if (strstr(p, "AUI")) { - lp->params.autosense = AUI; - } else if (strstr(p, "10Mb")) { - lp->params.autosense = _10Mb; - } else if (strstr(p, "100Mb")) { - lp->params.autosense = _100Mb; - } else if (strstr(p, "AUTO")) { - lp->params.autosense = AUTO; - } - } - *q = t; - } -} - -static void -de4x5_dbg_open(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - int i; - - if (de4x5_debug & DEBUG_OPEN) { - printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq); - printk("\tphysical address: %pM\n", dev->dev_addr); - printk("Descriptor head addresses:\n"); - printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring); - printk("Descriptor addresses:\nRX: "); - for (i=0;irxRingSize-1;i++){ - if (i < 3) { - printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status); - } - } - printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status); - printk("TX: "); - for (i=0;itxRingSize-1;i++){ - if (i < 3) { - printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status); - } - } - printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status); - printk("Descriptor buffers:\nRX: "); - for (i=0;irxRingSize-1;i++){ - if (i < 3) { - printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf)); - } - } - printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf)); - printk("TX: "); - for (i=0;itxRingSize-1;i++){ - if (i < 3) { - printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf)); - } - } - printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); - printk("Ring size:\nRX: %d\nTX: %d\n", - (short)lp->rxRingSize, - (short)lp->txRingSize); - } -} - -static void -de4x5_dbg_mii(struct net_device *dev, int k) -{ - struct de4x5_private *lp = netdev_priv(dev); - u_long iobase = dev->base_addr; - - if (de4x5_debug & DEBUG_MII) { - printk("\nMII device address: %d\n", lp->phy[k].addr); - printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII)); - printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII)); - printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII)); - printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII)); - if (lp->phy[k].id != BROADCOM_T4) { - printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII)); - printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII)); - } - printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII)); - if (lp->phy[k].id != BROADCOM_T4) { - printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII)); - printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII)); - } else { - printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII)); - } - } -} - -static void -de4x5_dbg_media(struct net_device *dev) -{ - struct de4x5_private *lp = netdev_priv(dev); - - if (lp->media != lp->c_media) { - if (de4x5_debug & DEBUG_MEDIA) { - printk("%s: media is %s%s\n", dev->name, - (lp->media == NC ? "unconnected, link down or incompatible connection" : - (lp->media == TP ? "TP" : - (lp->media == ANS ? "TP/Nway" : - (lp->media == BNC ? "BNC" : - (lp->media == AUI ? "AUI" : - (lp->media == BNC_AUI ? "BNC/AUI" : - (lp->media == EXT_SIA ? "EXT SIA" : - (lp->media == _100Mb ? "100Mb/s" : - (lp->media == _10Mb ? "10Mb/s" : - "???" - ))))))))), (lp->fdx?" full duplex.":".")); - } - lp->c_media = lp->media; - } -} - -static void -de4x5_dbg_srom(struct de4x5_srom *p) -{ - int i; - - if (de4x5_debug & DEBUG_SROM) { - printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id)); - printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id)); - printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc)); - printk("SROM version: %02x\n", (u_char)(p->version)); - printk("# controllers: %02x\n", (u_char)(p->num_controllers)); - - printk("Hardware Address: %pM\n", p->ieee_addr); - printk("CRC checksum: %04x\n", (u_short)(p->chksum)); - for (i=0; i<64; i++) { - printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i)); - } - } -} - -static void -de4x5_dbg_rx(struct sk_buff *skb, int len) -{ - int i, j; - - if (de4x5_debug & DEBUG_RX) { - printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n", - skb->data, &skb->data[6], - (u_char)skb->data[12], - (u_char)skb->data[13], - len); - for (j=0; len>0;j+=16, len-=16) { - printk(" %03x: ",j); - for (i=0; i<16 && idata[i+j]); - } - printk("\n"); - } - } -} - -/* -** Perform IOCTL call functions here. Some are privileged operations and the -** effective uid is checked in those cases. In the normal course of events -** this function is only used for my testing. -*/ -static int -de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) -{ - struct de4x5_private *lp = netdev_priv(dev); - struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru; - u_long iobase = dev->base_addr; - int i, j, status = 0; - s32 omr; - union { - u8 addr[144]; - u16 sval[72]; - u32 lval[36]; - } tmp; - u_long flags = 0; - - if (cmd != SIOCDEVPRIVATE || in_compat_syscall()) - return -EOPNOTSUPP; - - switch(ioc->cmd) { - case DE4X5_GET_HWADDR: /* Get the hardware address */ - ioc->len = ETH_ALEN; - for (i=0; idev_addr[i]; - } - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; - break; - - case DE4X5_SET_HWADDR: /* Set the hardware address */ - if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT; - if (netif_queue_stopped(dev)) - return -EBUSY; - netif_stop_queue(dev); - eth_hw_addr_set(dev, tmp.addr); - build_setup_frame(dev, PHYS_ADDR_ONLY); - /* Set up the descriptor and give ownership to the card */ - load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | - SETUP_FRAME_LEN, (struct sk_buff *)1); - lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; - outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ - netif_wake_queue(dev); /* Unlock the TX ring */ - break; - - case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */ - if (!capable(CAP_NET_ADMIN)) return -EPERM; - printk("%s: Boo!\n", dev->name); - break; - - case DE4X5_MCA_EN: /* Enable pass all multicast addressing */ - if (!capable(CAP_NET_ADMIN)) return -EPERM; - omr = inl(DE4X5_OMR); - omr |= OMR_PM; - outl(omr, DE4X5_OMR); - break; - - case DE4X5_GET_STATS: /* Get the driver statistics */ - { - struct pkt_stats statbuf; - ioc->len = sizeof(statbuf); - spin_lock_irqsave(&lp->lock, flags); - memcpy(&statbuf, &lp->pktStats, ioc->len); - spin_unlock_irqrestore(&lp->lock, flags); - if (copy_to_user(ioc->data, &statbuf, ioc->len)) - return -EFAULT; - break; - } - case DE4X5_CLR_STATS: /* Zero out the driver statistics */ - if (!capable(CAP_NET_ADMIN)) return -EPERM; - spin_lock_irqsave(&lp->lock, flags); - memset(&lp->pktStats, 0, sizeof(lp->pktStats)); - spin_unlock_irqrestore(&lp->lock, flags); - break; - - case DE4X5_GET_OMR: /* Get the OMR Register contents */ - tmp.addr[0] = inl(DE4X5_OMR); - if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT; - break; - - case DE4X5_SET_OMR: /* Set the OMR Register contents */ - if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT; - outl(tmp.addr[0], DE4X5_OMR); - break; - - case DE4X5_GET_REG: /* Get the DE4X5 Registers */ - j = 0; - tmp.lval[0] = inl(DE4X5_STS); j+=4; - tmp.lval[1] = inl(DE4X5_BMR); j+=4; - tmp.lval[2] = inl(DE4X5_IMR); j+=4; - tmp.lval[3] = inl(DE4X5_OMR); j+=4; - tmp.lval[4] = inl(DE4X5_SISR); j+=4; - tmp.lval[5] = inl(DE4X5_SICR); j+=4; - tmp.lval[6] = inl(DE4X5_STRR); j+=4; - tmp.lval[7] = inl(DE4X5_SIGR); j+=4; - ioc->len = j; - if (copy_to_user(ioc->data, tmp.lval, ioc->len)) - return -EFAULT; - break; - -#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ -/* - case DE4X5_DUMP: - j = 0; - tmp.addr[j++] = dev->irq; - for (i=0; idev_addr[i]; - } - tmp.addr[j++] = lp->rxRingSize; - tmp.lval[j>>2] = (long)lp->rx_ring; j+=4; - tmp.lval[j>>2] = (long)lp->tx_ring; j+=4; - - for (i=0;irxRingSize-1;i++){ - if (i < 3) { - tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; - } - } - tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; - for (i=0;itxRingSize-1;i++){ - if (i < 3) { - tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; - } - } - tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; - - for (i=0;irxRingSize-1;i++){ - if (i < 3) { - tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; - } - } - tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; - for (i=0;itxRingSize-1;i++){ - if (i < 3) { - tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; - } - } - tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; - - for (i=0;irxRingSize;i++){ - tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4; - } - for (i=0;itxRingSize;i++){ - tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4; - } - - tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4; - tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4; - tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4; - tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4; - tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4; - tmp.lval[j>>2] = inl(DE4X5_STS); j+=4; - tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4; - tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4; - tmp.lval[j>>2] = lp->chipset; j+=4; - if (lp->chipset == DC21140) { - tmp.lval[j>>2] = gep_rd(dev); j+=4; - } else { - tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4; - tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4; - tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4; - tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4; - } - tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4; - if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { - tmp.lval[j>>2] = lp->active; j+=4; - tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - if (lp->phy[lp->active].id != BROADCOM_T4) { - tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - } - tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - if (lp->phy[lp->active].id != BROADCOM_T4) { - tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - } else { - tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4; - } - } - - tmp.addr[j++] = lp->txRingSize; - tmp.addr[j++] = netif_queue_stopped(dev); - - ioc->len = j; - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; - break; - -*/ - default: - return -EOPNOTSUPP; - } - - return status; -} - -static int __init de4x5_module_init (void) -{ - int err = 0; - -#ifdef CONFIG_PCI - err = pci_register_driver(&de4x5_pci_driver); -#endif -#ifdef CONFIG_EISA - err |= eisa_driver_register (&de4x5_eisa_driver); -#endif - - return err; -} - -static void __exit de4x5_module_exit (void) -{ -#ifdef CONFIG_PCI - pci_unregister_driver (&de4x5_pci_driver); -#endif -#ifdef CONFIG_EISA - eisa_driver_unregister (&de4x5_eisa_driver); -#endif -} - -module_init (de4x5_module_init); -module_exit (de4x5_module_exit); diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h deleted file mode 100644 index 1bfdc9b117f6..000000000000 --- a/drivers/net/ethernet/dec/tulip/de4x5.h +++ /dev/null @@ -1,1017 +0,0 @@ -/* - Copyright 1994 Digital Equipment Corporation. - - This software may be used and distributed according to the terms of the - GNU General Public License, incorporated herein by reference. - - The author may be reached as davies@wanton.lkg.dec.com or Digital - Equipment Corporation, 550 King Street, Littleton MA 01460. - - ========================================================================= -*/ - -/* -** DC21040 CSR<1..15> Register Address Map -*/ -#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */ -#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */ -#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */ -#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */ -#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */ -#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */ -#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */ -#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */ -#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */ -#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */ -#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */ -#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */ -#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */ -#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */ -#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */ -#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/ -#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */ -#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */ -#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */ -#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */ -#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */ - -/* -** EISA Register Address Map -*/ -#define EISA_ID iobase+0x0c80 /* EISA ID Registers */ -#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */ -#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */ -#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */ -#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */ -#define EISA_CR iobase+0x0c84 /* EISA Control Register */ -#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */ -#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */ -#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */ -#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */ -#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */ - -/* -** PCI/EISA Configuration Registers Address Map -*/ -#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */ -#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */ -#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */ -#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */ -#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */ -#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */ -#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */ -#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */ -#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */ -#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */ -#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */ - -/* -** EISA Configuration Register 0 bit definitions -*/ -#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */ -#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */ -#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */ -#define ER0_ISTS 0x10 /* Interrupt Status (X) */ -#define ER0_LI 0x08 /* Latch Interrupts */ -#define ER0_INTL 0x06 /* INTerrupt Level */ -#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */ - -/* -** EISA Configuration Register 1 bit definitions -*/ -#define ER1_IAM 0xe0 /* ISA Address Mode */ -#define ER1_IAE 0x10 /* ISA Addressing Enable */ -#define ER1_UPIN 0x0f /* User Pins */ - -/* -** EISA Configuration Register 2 bit definitions -*/ -#define ER2_BRS 0xc0 /* Boot ROM Size */ -#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */ - -/* -** EISA Configuration Register 3 bit definitions -*/ -#define ER3_BWE 0x40 /* Burst Write Enable */ -#define ER3_BRE 0x04 /* Burst Read Enable */ -#define ER3_LSR 0x02 /* Local Software Reset */ - -/* -** PCI Configuration ID Register (PCI_CFID). The Device IDs are left -** shifted 8 bits to allow detection of DC21142 and DC21143 variants with -** the configuration revision register step number. -*/ -#define CFID_DID 0xff00 /* Device ID */ -#define CFID_VID 0x00ff /* Vendor ID */ -#define DC21040_DID 0x0200 /* Unique Device ID # */ -#define DC21040_VID 0x1011 /* DC21040 Manufacturer */ -#define DC21041_DID 0x1400 /* Unique Device ID # */ -#define DC21041_VID 0x1011 /* DC21041 Manufacturer */ -#define DC21140_DID 0x0900 /* Unique Device ID # */ -#define DC21140_VID 0x1011 /* DC21140 Manufacturer */ -#define DC2114x_DID 0x1900 /* Unique Device ID # */ -#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */ - -/* -** Chipset defines -*/ -#define DC21040 DC21040_DID -#define DC21041 DC21041_DID -#define DC21140 DC21140_DID -#define DC2114x DC2114x_DID -#define DC21142 (DC2114x_DID | 0x0010) -#define DC21143 (DC2114x_DID | 0x0030) -#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */ - -#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID)) -#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID)) -#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID)) -#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID)) -#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142)) -#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143)) - -/* -** PCI Configuration Command/Status Register (PCI_CFCS) -*/ -#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */ -#define CFCS_SSE 0x40000000 /* Signal System Error (S) */ -#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */ -#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */ -#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */ -#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */ -#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */ -#define CFCS_SEE 0x00000100 /* System Error Enable (C) */ -#define CFCS_PER 0x00000040 /* Parity Error Response (C) */ -#define CFCS_MO 0x00000004 /* Master Operation (C) */ -#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */ -#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */ - -/* -** PCI Configuration Revision Register (PCI_CFRV) -*/ -#define CFRV_BC 0xff000000 /* Base Class */ -#define CFRV_SC 0x00ff0000 /* Subclass */ -#define CFRV_RN 0x000000f0 /* Revision Number */ -#define CFRV_SN 0x0000000f /* Step Number */ -#define BASE_CLASS 0x02000000 /* Indicates Network Controller */ -#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */ -#define STEP_NUMBER 0x00000020 /* Increments for future chips */ -#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */ -#define CFRV_MASK 0xffff0000 /* Register mask */ - -/* -** PCI Configuration Latency Timer Register (PCI_CFLT) -*/ -#define CFLT_BC 0x0000ff00 /* Latency Timer bits */ - -/* -** PCI Configuration Base I/O Address Register (PCI_CBIO) -*/ -#define CBIO_MASK -128 /* Base I/O Address Mask */ -#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */ - -/* -** PCI Configuration Card Information Structure Register (PCI_CCIS) -*/ -#define CCIS_ROMI 0xf0000000 /* ROM Image */ -#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */ -#define CCIS_ASI 0x00000007 /* Address Space Indicator */ - -/* -** PCI Configuration Subsystem ID Register (PCI_SSID) -*/ -#define SSID_SSID 0xffff0000 /* Subsystem ID */ -#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */ - -/* -** PCI Configuration Expansion ROM Base Address Register (PCI_CBER) -*/ -#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */ -#define CBER_ROME 0x00000001 /* ROM Enable */ - -/* -** PCI Configuration Interrupt Register (PCI_CFIT) -*/ -#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */ -#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */ -#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */ -#define CFIT_IRQL 0x000000ff /* Interrupt Line */ - -/* -** PCI Configuration Power Management Area Register (PCI_CFPM) -*/ -#define SLEEP 0x80 /* Power Saving Sleep Mode */ -#define SNOOZE 0x40 /* Power Saving Snooze Mode */ -#define WAKEUP 0x00 /* Power Saving Wakeup */ - -#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */ -#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */ - -/* -** DC21040 Bus Mode Register (DE4X5_BMR) -*/ -#define BMR_RML 0x00200000 /* [Memory] Read Multiple */ -#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */ -#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */ -#define BMR_DAS 0x00010000 /* Diagnostic Address Space */ -#define BMR_CAL 0x0000c000 /* Cache Alignment */ -#define BMR_PBL 0x00003f00 /* Programmable Burst Length */ -#define BMR_BLE 0x00000080 /* Big/Little Endian */ -#define BMR_DSL 0x0000007c /* Descriptor Skip Length */ -#define BMR_BAR 0x00000002 /* Bus ARbitration */ -#define BMR_SWR 0x00000001 /* Software Reset */ - - /* Timings here are for 10BASE-T/AUI only*/ -#define TAP_NOPOLL 0x00000000 /* No automatic polling */ -#define TAP_200US 0x00020000 /* TX automatic polling every 200us */ -#define TAP_800US 0x00040000 /* TX automatic polling every 800us */ -#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */ -#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */ -#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */ -#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */ -#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */ - -#define CAL_NOUSE 0x00000000 /* Not used */ -#define CAL_8LONG 0x00004000 /* 8-longword alignment */ -#define CAL_16LONG 0x00008000 /* 16-longword alignment */ -#define CAL_32LONG 0x0000c000 /* 32-longword alignment */ - -#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */ -#define PBL_1 0x00000100 /* 1 longword DMA burst length */ -#define PBL_2 0x00000200 /* 2 longwords DMA burst length */ -#define PBL_4 0x00000400 /* 4 longwords DMA burst length */ -#define PBL_8 0x00000800 /* 8 longwords DMA burst length */ -#define PBL_16 0x00001000 /* 16 longwords DMA burst length */ -#define PBL_32 0x00002000 /* 32 longwords DMA burst length */ - -#define DSL_0 0x00000000 /* 0 longword / descriptor */ -#define DSL_1 0x00000004 /* 1 longword / descriptor */ -#define DSL_2 0x00000008 /* 2 longwords / descriptor */ -#define DSL_4 0x00000010 /* 4 longwords / descriptor */ -#define DSL_8 0x00000020 /* 8 longwords / descriptor */ -#define DSL_16 0x00000040 /* 16 longwords / descriptor */ -#define DSL_32 0x00000080 /* 32 longwords / descriptor */ - -/* -** DC21040 Transmit Poll Demand Register (DE4X5_TPD) -*/ -#define TPD 0x00000001 /* Transmit Poll Demand */ - -/* -** DC21040 Receive Poll Demand Register (DE4X5_RPD) -*/ -#define RPD 0x00000001 /* Receive Poll Demand */ - -/* -** DC21040 Receive Ring Base Address Register (DE4X5_RRBA) -*/ -#define RRBA 0xfffffffc /* RX Descriptor List Start Address */ - -/* -** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA) -*/ -#define TRBA 0xfffffffc /* TX Descriptor List Start Address */ - -/* -** Status Register (DE4X5_STS) -*/ -#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */ -#define STS_BE 0x03800000 /* Bus Error Bits */ -#define STS_TS 0x00700000 /* Transmit Process State */ -#define STS_RS 0x000e0000 /* Receive Process State */ -#define STS_NIS 0x00010000 /* Normal Interrupt Summary */ -#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */ -#define STS_ER 0x00004000 /* Early Receive */ -#define STS_FBE 0x00002000 /* Fatal Bus Error */ -#define STS_SE 0x00002000 /* System Error */ -#define STS_LNF 0x00001000 /* Link Fail */ -#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */ -#define STS_TM 0x00000800 /* Timer Expired (DC21041) */ -#define STS_ETI 0x00000400 /* Early Transmit Interrupt */ -#define STS_AT 0x00000400 /* AUI/TP Pin */ -#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */ -#define STS_RPS 0x00000100 /* Receive Process Stopped */ -#define STS_RU 0x00000080 /* Receive Buffer Unavailable */ -#define STS_RI 0x00000040 /* Receive Interrupt */ -#define STS_UNF 0x00000020 /* Transmit Underflow */ -#define STS_LNP 0x00000010 /* Link Pass */ -#define STS_ANC 0x00000010 /* Autonegotiation Complete */ -#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */ -#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */ -#define STS_TPS 0x00000002 /* Transmit Process Stopped */ -#define STS_TI 0x00000001 /* Transmit Interrupt */ - -#define EB_PAR 0x00000000 /* Parity Error */ -#define EB_MA 0x00800000 /* Master Abort */ -#define EB_TA 0x01000000 /* Target Abort */ -#define EB_RES0 0x01800000 /* Reserved */ -#define EB_RES1 0x02000000 /* Reserved */ - -#define TS_STOP 0x00000000 /* Stopped */ -#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */ -#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */ -#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */ -#define TS_RES 0x00400000 /* Reserved */ -#define TS_SPKT 0x00500000 /* Setup Packet */ -#define TS_SUSP 0x00600000 /* Suspended */ -#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */ - -#define RS_STOP 0x00000000 /* Stopped */ -#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */ -#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */ -#define RS_WFRP 0x00060000 /* Wait for Receive Packet */ -#define RS_SUSP 0x00080000 /* Suspended */ -#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */ -#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */ -#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */ - -#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */ - -/* -** Operation Mode Register (DE4X5_OMR) -*/ -#define OMR_SC 0x80000000 /* Special Capture Effect Enable */ -#define OMR_RA 0x40000000 /* Receive All */ -#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */ -#define OMR_SCR 0x01000000 /* Scrambler Mode */ -#define OMR_PCS 0x00800000 /* PCS Function */ -#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */ -#define OMR_SF 0x00200000 /* Store and Forward */ -#define OMR_HBD 0x00080000 /* HeartBeat Disable */ -#define OMR_PS 0x00040000 /* Port Select */ -#define OMR_CA 0x00020000 /* Capture Effect Enable */ -#define OMR_BP 0x00010000 /* Back Pressure */ -#define OMR_TR 0x0000c000 /* Threshold Control Bits */ -#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */ -#define OMR_FC 0x00001000 /* Force Collision Mode */ -#define OMR_OM 0x00000c00 /* Operating Mode */ -#define OMR_FDX 0x00000200 /* Full Duplex Mode */ -#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */ -#define OMR_PM 0x00000080 /* Pass All Multicast */ -#define OMR_PR 0x00000040 /* Promiscuous Mode */ -#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */ -#define OMR_IF 0x00000010 /* Inverse Filtering */ -#define OMR_PB 0x00000008 /* Pass Bad Frames */ -#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */ -#define OMR_SR 0x00000002 /* Start/Stop Receive */ -#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */ - -#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */ -#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */ -#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */ -#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */ - -#define OMR_DEF (OMR_SDP) -#define OMR_SIA (OMR_SDP | OMR_TTM) -#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS) -#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS) -#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS) - -/* -** DC21040 Interrupt Mask Register (DE4X5_IMR) -*/ -#define IMR_GPM 0x04000000 /* General Purpose Port Mask */ -#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */ -#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */ -#define IMR_ERM 0x00004000 /* Early Receive Mask */ -#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */ -#define IMR_SEM 0x00002000 /* System Error Mask */ -#define IMR_LFM 0x00001000 /* Link Fail Mask */ -#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */ -#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */ -#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */ -#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */ -#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */ -#define IMR_RSM 0x00000100 /* Receive Stopped Mask */ -#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */ -#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */ -#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */ -#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */ -#define IMR_LPM 0x00000010 /* Link Pass */ -#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */ -#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */ -#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */ -#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */ - -/* -** Missed Frames and FIFO Overflow Counters (DE4X5_MFC) -*/ -#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */ -#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */ -#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */ -#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */ -#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */ - -/* -** DC21040 Ethernet Address PROM (DE4X5_APROM) -*/ -#define APROM_DN 0x80000000 /* Data Not Valid */ -#define APROM_DT 0x000000ff /* Address Byte */ - -/* -** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM) -*/ -#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */ -#define BROM_RD 0x00004000 /* Read from Boot ROM */ -#define BROM_WR 0x00002000 /* Write to Boot ROM */ -#define BROM_BR 0x00001000 /* Select Boot ROM when set */ -#define BROM_SR 0x00000800 /* Select Serial ROM when set */ -#define BROM_REG 0x00000400 /* External Register Select */ -#define BROM_DT 0x000000ff /* Data Byte */ - -/* -** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII) -*/ -#define MII_MDI 0x00080000 /* MII Management Data In */ -#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */ -#define MII_MRD 0x00040000 /* MII Management Define Read Mode */ -#define MII_MWR 0x00000000 /* MII Management Define Write Mode */ -#define MII_MDT 0x00020000 /* MII Management Data Out */ -#define MII_MDC 0x00010000 /* MII Management Clock */ -#define MII_RD 0x00004000 /* Read from MII */ -#define MII_WR 0x00002000 /* Write to MII */ -#define MII_SEL 0x00000800 /* Select MII when RESET */ - -#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */ -#define SROM_RD 0x00004000 /* Read from Boot ROM */ -#define SROM_WR 0x00002000 /* Write to Boot ROM */ -#define SROM_BR 0x00001000 /* Select Boot ROM when set */ -#define SROM_SR 0x00000800 /* Select Serial ROM when set */ -#define SROM_REG 0x00000400 /* External Register Select */ -#define SROM_DT 0x000000ff /* Data Byte */ - -#define DT_OUT 0x00000008 /* Serial Data Out */ -#define DT_IN 0x00000004 /* Serial Data In */ -#define DT_CLK 0x00000002 /* Serial ROM Clock */ -#define DT_CS 0x00000001 /* Serial ROM Chip Select */ - -#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */ -#define MII_TEST 0xaaaaaaaa /* MII Test Signal */ -#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */ -#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */ - -#define MII_CR 0x00 /* MII Management Control Register */ -#define MII_SR 0x01 /* MII Management Status Register */ -#define MII_ID0 0x02 /* PHY Identifier Register 0 */ -#define MII_ID1 0x03 /* PHY Identifier Register 1 */ -#define MII_ANA 0x04 /* Auto Negotiation Advertisement */ -#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */ -#define MII_ANE 0x06 /* Auto Negotiation Expansion */ -#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */ - -#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */ - -/* -** MII Management Control Register -*/ -#define MII_CR_RST 0x8000 /* RESET the PHY chip */ -#define MII_CR_LPBK 0x4000 /* Loopback enable */ -#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */ -#define MII_CR_10 0x0000 /* Set 10Mb/s */ -#define MII_CR_100 0x2000 /* Set 100Mb/s */ -#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */ -#define MII_CR_PD 0x0800 /* Power Down */ -#define MII_CR_ISOL 0x0400 /* Isolate Mode */ -#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */ -#define MII_CR_FDM 0x0100 /* Full Duplex Mode */ -#define MII_CR_CTE 0x0080 /* Collision Test Enable */ - -/* -** MII Management Status Register -*/ -#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */ -#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */ -#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */ -#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */ -#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */ -#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/ -#define MII_SR_RFD 0x0010 /* Remote Fault Detected */ -#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */ -#define MII_SR_LKS 0x0004 /* Link Status */ -#define MII_SR_JABD 0x0002 /* Jabber Detect */ -#define MII_SR_XC 0x0001 /* Extended Capabilities */ - -/* -** MII Management Auto Negotiation Advertisement Register -*/ -#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */ -#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */ -#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */ -#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */ -#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */ -#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */ -#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */ -#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */ - -/* -** MII Management Auto Negotiation Remote End Register -*/ -#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */ -#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */ -#define MII_ANLPA_RF 0x2000 /* Remote Fault */ -#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */ -#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */ -#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */ -#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */ -#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */ -#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */ -#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */ -#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */ - -/* -** SROM Media Definitions (ABG SROM Section) -*/ -#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */ -#define MEDIA_MII 0x0040 /* MII Present on the adapter */ -#define MEDIA_FIBRE 0x0008 /* Fibre Media present */ -#define MEDIA_AUI 0x0004 /* AUI Media present */ -#define MEDIA_TP 0x0002 /* TP Media present */ -#define MEDIA_BNC 0x0001 /* BNC Media present */ - -/* -** SROM Definitions (Digital Semiconductor Format) -*/ -#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */ -#define SROM_SSID 0x0002 /* Sub-system ID offset */ -#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */ -#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */ -#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/ -#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */ -#define SROM_SFV 0x0012 /* SROM Format Version offset */ -#define SROM_CCNT 0x0013 /* Controller Count offset */ -#define SROM_HWADD 0x0014 /* Hardware Address offset */ -#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/ -#define SROM_CRC 0x007e /* SROM CRC offset */ - -/* -** SROM Media Connection Definitions -*/ -#define SROM_10BT 0x0000 /* 10BASE-T half duplex */ -#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */ -#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */ -#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */ -#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */ -#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */ -#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */ -#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */ -#define SROM_100BT4 0x0006 /* 100BASE-T4 */ -#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */ -#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */ -#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */ -#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */ -#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */ -#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */ -#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */ -#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */ -#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */ -#define SROM_PAO 0x8800 /* Powerup Autosense Only */ -#define SROM_NSMI 0xffff /* No Selected Media Information */ - -/* -** SROM Media Definitions -*/ -#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */ -#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */ -#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */ -#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */ -#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */ -#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */ -#define SROM_100BASET4 0x0006 /* 100BASE-T4 */ -#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */ -#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */ - -#define BLOCK_LEN 0x7f /* Extended blocks length mask */ -#define EXT_FIELD 0x40 /* Extended blocks extension field bit */ -#define MEDIA_CODE 0x3f /* Extended blocks media code mask */ - -/* -** SROM Compact Format Block Masks -*/ -#define COMPACT_FI 0x80 /* Format Indicator */ -#define COMPACT_LEN 0x04 /* Length */ -#define COMPACT_MC 0x3f /* Media Code */ - -/* -** SROM Extended Format Block Type 0 Masks -*/ -#define BLOCK0_FI 0x80 /* Format Indicator */ -#define BLOCK0_MCS 0x80 /* Media Code byte Sign */ -#define BLOCK0_MC 0x3f /* Media Code */ - -/* -** DC21040 Full Duplex Register (DE4X5_FDR) -*/ -#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */ - -/* -** DC21041 General Purpose Timer Register (DE4X5_GPT) -*/ -#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */ -#define GPT_VAL 0x0000ffff /* Timer Value */ - -/* -** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits) -*/ -/* Valid ONLY for DE500 hardware */ -#define GEP_LNP 0x00000080 /* Link Pass (input) */ -#define GEP_SLNK 0x00000040 /* SYM LINK (input) */ -#define GEP_SDET 0x00000020 /* Signal Detect (input) */ -#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */ -#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */ -#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */ -#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */ -#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */ -#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */ -#define GEP_CTRL 0x00000100 /* GEP control bit */ - -/* -** SIA Register Defaults -*/ -#define CSR13 0x00000001 -#define CSR14 0x0003ff7f /* Autonegotiation disabled */ -#define CSR15 0x00000008 - -/* -** SIA Status Register (DE4X5_SISR) -*/ -#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */ -#define SISR_LPN 0x00008000 /* Link Partner Negotiable */ -#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */ -#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */ -#define SISR_TRF 0x00000800 /* Transmit Remote Fault */ -#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */ -#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/ -#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */ -#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */ -#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */ -#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */ -#define SISR_DAO 0x00000080 /* PLL All One */ -#define SISR_DAZ 0x00000040 /* PLL All Zero */ -#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */ -#define SISR_DSD 0x00000010 /* PLL Self-Test Done */ -#define SISR_APS 0x00000008 /* Auto Polarity State */ -#define SISR_LKF 0x00000004 /* Link Fail Status */ -#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */ -#define SISR_NCR 0x00000002 /* Network Connection Error */ -#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */ -#define SISR_PAUI 0x00000001 /* AUI_TP Indication */ -#define SISR_MRA 0x00000001 /* MII Receive Port Activity */ - -#define ANS_NDIS 0x00000000 /* Nway disable */ -#define ANS_TDIS 0x00001000 /* Transmit Disable */ -#define ANS_ADET 0x00002000 /* Ability Detect */ -#define ANS_ACK 0x00003000 /* Acknowledge */ -#define ANS_CACK 0x00004000 /* Complete Acknowledge */ -#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */ -#define ANS_LCHK 0x00006000 /* Link Check */ - -#define SISR_RST 0x00000301 /* CSR12 reset */ -#define SISR_ANR 0x00001301 /* Autonegotiation restart */ - -/* -** SIA Connectivity Register (DE4X5_SICR) -*/ -#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */ -#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */ -#define SICR_OE24 0x00004000 /* Output Enable 2 4 */ -#define SICR_OE13 0x00002000 /* Output Enable 1 3 */ -#define SICR_IE 0x00001000 /* Input Enable */ -#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */ -#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */ -#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/ -#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/ -#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */ -#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */ -#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/ -#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */ -#define SICR_ASE 0x00000080 /* APLL Start Enable*/ -#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */ -#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */ -#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */ -#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */ -#define SICR_CAC 0x00000004 /* CSR Auto Configuration */ -#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */ -#define SICR_SRL 0x00000001 /* SIA Reset */ -#define SIA_RESET 0x00000000 /* SIA Reset Value */ - -/* -** SIA Transmit and Receive Register (DE4X5_STRR) -*/ -#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */ -#define STRR_SPP 0x00004000 /* Set Polarity Plus */ -#define STRR_APE 0x00002000 /* Auto Polarity Enable */ -#define STRR_LTE 0x00001000 /* Link Test Enable */ -#define STRR_SQE 0x00000800 /* Signal Quality Enable */ -#define STRR_CLD 0x00000400 /* Collision Detect Enable */ -#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */ -#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */ -#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */ -#define STRR_HDE 0x00000040 /* Half Duplex Enable */ -#define STRR_CPEN 0x00000030 /* Compensation Enable */ -#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */ -#define STRR_DREN 0x00000004 /* Driver Enable */ -#define STRR_LBK 0x00000002 /* Loopback Enable */ -#define STRR_ECEN 0x00000001 /* Encoder Enable */ -#define STRR_RESET 0xffffffff /* Reset value for STRR */ - -/* -** SIA General Register (DE4X5_SIGR) -*/ -#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */ -#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */ -#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */ -#define SIGR_CWE 0x08000000 /* Control Write Enable */ -#define SIGR_RME 0x04000000 /* Receive Match Enable */ -#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */ -#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */ -#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */ -#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */ -#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */ -#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */ -#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */ -#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */ -#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */ -#define SIGR_FRL 0x00002000 /* Force Receiver Low */ -#define SIGR_DPST 0x00001000 /* PLL Self Test Start */ -#define SIGR_LSD 0x00000800 /* LED Stretch Disable */ -#define SIGR_FLF 0x00000400 /* Force Link Fail */ -#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */ -#define SIGR_TSCK 0x00000100 /* Test Clock */ -#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */ -#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */ -#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */ -#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */ -#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */ -#define SIGR_JCK 0x00000004 /* Jabber Clock */ -#define SIGR_HUJ 0x00000002 /* Host Unjab */ -#define SIGR_JBD 0x00000001 /* Jabber Disable */ -#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */ - -/* -** Receive Descriptor Bit Summary -*/ -#define R_OWN 0x80000000 /* Own Bit */ -#define RD_FF 0x40000000 /* Filtering Fail */ -#define RD_FL 0x3fff0000 /* Frame Length */ -#define RD_ES 0x00008000 /* Error Summary */ -#define RD_LE 0x00004000 /* Length Error */ -#define RD_DT 0x00003000 /* Data Type */ -#define RD_RF 0x00000800 /* Runt Frame */ -#define RD_MF 0x00000400 /* Multicast Frame */ -#define RD_FS 0x00000200 /* First Descriptor */ -#define RD_LS 0x00000100 /* Last Descriptor */ -#define RD_TL 0x00000080 /* Frame Too Long */ -#define RD_CS 0x00000040 /* Collision Seen */ -#define RD_FT 0x00000020 /* Frame Type */ -#define RD_RJ 0x00000010 /* Receive Watchdog */ -#define RD_RE 0x00000008 /* Report on MII Error */ -#define RD_DB 0x00000004 /* Dribbling Bit */ -#define RD_CE 0x00000002 /* CRC Error */ -#define RD_OF 0x00000001 /* Overflow */ - -#define RD_RER 0x02000000 /* Receive End Of Ring */ -#define RD_RCH 0x01000000 /* Second Address Chained */ -#define RD_RBS2 0x003ff800 /* Buffer 2 Size */ -#define RD_RBS1 0x000007ff /* Buffer 1 Size */ - -/* -** Transmit Descriptor Bit Summary -*/ -#define T_OWN 0x80000000 /* Own Bit */ -#define TD_ES 0x00008000 /* Error Summary */ -#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */ -#define TD_LO 0x00000800 /* Loss Of Carrier */ -#define TD_NC 0x00000400 /* No Carrier */ -#define TD_LC 0x00000200 /* Late Collision */ -#define TD_EC 0x00000100 /* Excessive Collisions */ -#define TD_HF 0x00000080 /* Heartbeat Fail */ -#define TD_CC 0x00000078 /* Collision Counter */ -#define TD_LF 0x00000004 /* Link Fail */ -#define TD_UF 0x00000002 /* Underflow Error */ -#define TD_DE 0x00000001 /* Deferred */ - -#define TD_IC 0x80000000 /* Interrupt On Completion */ -#define TD_LS 0x40000000 /* Last Segment */ -#define TD_FS 0x20000000 /* First Segment */ -#define TD_FT1 0x10000000 /* Filtering Type */ -#define TD_SET 0x08000000 /* Setup Packet */ -#define TD_AC 0x04000000 /* Add CRC Disable */ -#define TD_TER 0x02000000 /* Transmit End Of Ring */ -#define TD_TCH 0x01000000 /* Second Address Chained */ -#define TD_DPD 0x00800000 /* Disabled Padding */ -#define TD_FT0 0x00400000 /* Filtering Type */ -#define TD_TBS2 0x003ff800 /* Buffer 2 Size */ -#define TD_TBS1 0x000007ff /* Buffer 1 Size */ - -#define PERFECT_F 0x00000000 -#define HASH_F TD_FT0 -#define INVERSE_F TD_FT1 -#define HASH_O_F (TD_FT1 | TD_F0) - -/* -** Media / mode state machine definitions -** User selectable: -*/ -#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */ -#define TP_NW 0x0002 /* 10Base-T with Nway */ -#define BNC 0x0004 /* Thinwire */ -#define AUI 0x0008 /* Thickwire */ -#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */ -#define _10Mb 0x0040 /* 10Mb/s Ethernet */ -#define _100Mb 0x0080 /* 100Mb/s Ethernet */ -#define AUTO 0x4000 /* Auto sense the media or speed */ - -/* -** Internal states -*/ -#define NC 0x0000 /* No Connection */ -#define ANS 0x0020 /* Intermediate AutoNegotiation State */ -#define SPD_DET 0x0100 /* Parallel speed detection */ -#define INIT 0x0200 /* Initial state */ -#define EXT_SIA 0x0400 /* External SIA for motherboard chip */ -#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */ -#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */ -#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */ -#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */ -#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */ -#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */ -#define MII 0x1000 /* MII on the 21143 */ - -#define TIMER_CB 0x80000000 /* Timer callback detection */ - -/* -** DE4X5 DEBUG Options -*/ -#define DEBUG_NONE 0x0000 /* No DEBUG messages */ -#define DEBUG_VERSION 0x0001 /* Print version message */ -#define DEBUG_MEDIA 0x0002 /* Print media messages */ -#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */ -#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */ -#define DEBUG_SROM 0x0010 /* Print SROM messages */ -#define DEBUG_MII 0x0020 /* Print MII messages */ -#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */ -#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */ -#define DEBUG_PCICFG 0x0100 -#define DEBUG_ALL 0x01ff - -/* -** Miscellaneous -*/ -#define PCI 0 -#define EISA 1 - -#define DE4X5_HASH_TABLE_LEN 512 /* Bits */ -#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */ - -#define SETUP_FRAME_LEN 192 /* Bytes */ -#define IMPERF_PA_OFFSET 156 /* Bytes */ - -#define POLL_DEMAND 1 - -#define LOST_MEDIA_THRESHOLD 3 - -#define MASK_INTERRUPTS 1 -#define UNMASK_INTERRUPTS 0 - -#define DE4X5_STRLEN 8 - -#define DE4X5_INIT 0 /* Initialisation time */ -#define DE4X5_RUN 1 /* Run time */ - -#define DE4X5_SAVE_STATE 0 -#define DE4X5_RESTORE_STATE 1 - -/* -** Address Filtering Modes -*/ -#define PERFECT 0 /* 16 perfect physical addresses */ -#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */ -#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */ -#define ALL_HASH 3 /* Hashes all physical & multicast addrs */ - -#define ALL 0 /* Clear out all the setup frame */ -#define PHYS_ADDR_ONLY 1 /* Update the physical address only */ - -/* -** Adapter state -*/ -#define INITIALISED 0 /* After h/w initialised and mem alloc'd */ -#define CLOSED 1 /* Ready for opening */ -#define OPEN 2 /* Running */ - -/* -** Various wait times -*/ -#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */ -#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */ - -/* -** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since -** the vendors seem split 50-50 on how to calculate the OUI register values -** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()]. -*/ -#define NATIONAL_TX 0x2000 -#define BROADCOM_T4 0x03e0 -#define SEEQ_T4 0x0016 -#define CYPRESS_T4 0x0014 - -/* -** Speed Selection stuff -*/ -#define SET_10Mb {\ - if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ - omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\ - if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\ - mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ - }\ - omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\ - outl(omr, DE4X5_OMR);\ - if (!lp->useSROM) lp->cache.gep = 0;\ - } else if (lp->useSROM && !lp->useMII) {\ - omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ - omr |= (lp->fdx ? OMR_FDX : 0);\ - outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\ - } else {\ - omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ - omr |= (lp->fdx ? OMR_FDX : 0);\ - outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\ - lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\ - gep_wr(lp->cache.gep, dev);\ - }\ -} - -#define SET_100Mb {\ - if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ - int fdx=0;\ - if (lp->phy[lp->active].id == NATIONAL_TX) {\ - mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\ - 0x18, lp->phy[lp->active].addr, DE4X5_MII);\ - }\ - omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\ - sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\ - if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\ - if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\ - mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ - }\ - if (fdx) omr |= OMR_FDX;\ - outl(omr, DE4X5_OMR);\ - if (!lp->useSROM) lp->cache.gep = 0;\ - } else if (lp->useSROM && !lp->useMII) {\ - omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ - omr |= (lp->fdx ? OMR_FDX : 0);\ - outl(omr | lp->infoblock_csr6, DE4X5_OMR);\ - } else {\ - omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ - omr |= (lp->fdx ? OMR_FDX : 0);\ - outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\ - lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\ - gep_wr(lp->cache.gep, dev);\ - }\ -} - -/* FIX ME so I don't jam 10Mb networks */ -#define SET_100Mb_PDET {\ - if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ - mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ - omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ - outl(omr, DE4X5_OMR);\ - } else if (lp->useSROM && !lp->useMII) {\ - omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ - outl(omr, DE4X5_OMR);\ - } else {\ - omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ - outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\ - lp->cache.gep = (GEP_FDXD | GEP_MODE);\ - gep_wr(lp->cache.gep, dev);\ - }\ -} - -/* -** Include the IOCTL stuff -*/ -#include - -struct de4x5_ioctl { - unsigned short cmd; /* Command to run */ - unsigned short len; /* Length of the data buffer */ - unsigned char __user *data; /* Pointer to the data buffer */ -}; - -/* -** Recognised commands for the driver -*/ -#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */ -#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */ -/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */ -#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ -#define DE4X5_GET_MCA 0x06 /* Get a multicast address */ -#define DE4X5_SET_MCA 0x07 /* Set a multicast address */ -#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */ -#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */ -#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */ -#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */ -#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */ -#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ -#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ - -#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008) -- cgit v1.2.3 From a682d18433005f9609be95c46924ddca827122de Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 May 2022 13:10:34 +0200 Subject: can: peak_usb: fix typo in comment Spelling mistake (triple letters) in comment. Detected with the help of Coccinelle. Link: https://lore.kernel.org/all/20220521111145.81697-24-Julia.Lawall@inria.fr Signed-off-by: Julia Lawall Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 17dc178f555b..091c631ebe23 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -533,7 +533,7 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir) { struct pcan_usb *pdev = mc->pdev; - /* acccording to the content of the packet */ + /* according to the content of the packet */ switch (ir) { case PCAN_USB_ERR_CNT_DEC: case PCAN_USB_ERR_CNT_INC: -- cgit v1.2.3 From 3e88445a3a5afc65929c578fe2ffc03420588993 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 20 May 2022 12:46:59 -0700 Subject: can: kvaser_usb: silence a GCC 12 -Warray-bounds warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This driver does a lot of casting of smaller buffers to struct kvaser_cmd_ext, GCC 12 does not like that: | drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c:489:65: warning: array subscript ‘struct kvaser_cmd_ext[0]’ is partly outside array bounds of ‘unsigned char[32]’ [-Warray-bounds] | drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c:489:23: note: in expansion of macro ‘le16_to_cpu’ | 489 | ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len); | | ^~~~~~~~~~~ Temporarily silence this warning (move it to W=1 builds). Link: https://lore.kernel.org/all/20220520194659.2356903-1-kuba@kernel.org Signed-off-by: Jakub Kicinski Tested-by: Marc Kleine-Budde Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/kvaser_usb/Makefile | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/can/usb/kvaser_usb/Makefile b/drivers/net/can/usb/kvaser_usb/Makefile index cf260044f0b9..b20d951a0790 100644 --- a/drivers/net/can/usb/kvaser_usb/Makefile +++ b/drivers/net/can/usb/kvaser_usb/Makefile @@ -1,3 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o kvaser_usb-y = kvaser_usb_core.o kvaser_usb_leaf.o kvaser_usb_hydra.o + +# FIXME: temporarily silence -Warray-bounds on non W=1+ builds +ifndef KBUILD_EXTRA_WARN +CFLAGS_kvaser_usb_hydra.o += -Wno-array-bounds +endif -- cgit v1.2.3 From 8f445a3ec3fd36feb90c5b2ef8d21e2492819cdc Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Mon, 23 May 2022 14:33:44 +0200 Subject: can: ctucanfd: platform: add missing dependency to HAS_IOMEM The kernel test robot noticed that the ctucanfd platform driver fails during modpost on platforms that don't support IOMEM. | ERROR: modpost: "devm_ioremap_resource" [drivers/net/can/ctucanfd/ctucanfd_platform.ko] undefined! This patch adds the missing HAS_IOMEM dependency. Link: https://lore.kernel.org/all/20220523123720.1656611-1-mkl@pengutronix.de Fixes: e8f0c23a2415 ("can: ctucanfd: CTU CAN FD open-source IP core - platform/SoC support.") Reported-by: kernel test robot Acked-by: Pavel Pisa Cc: Ondrej Ille Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ctucanfd/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig index 3c383612eb17..6e2073351a8f 100644 --- a/drivers/net/can/ctucanfd/Kconfig +++ b/drivers/net/can/ctucanfd/Kconfig @@ -23,7 +23,7 @@ config CAN_CTUCANFD_PCI config CAN_CTUCANFD_PLATFORM tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver" - depends on OF || COMPILE_TEST + depends on HAS_IOMEM && (OF || COMPILE_TEST) select CAN_CTUCANFD help The core has been tested together with OpenCores SJA1000 -- cgit v1.2.3 From 48a75b979940f8aa838143e976455aa0e629e638 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 24 May 2022 07:56:55 -0700 Subject: ath6kl: Use cc-disable-warning to disable -Wdangling-pointer Clang does not support this option so the build fails: error: unknown warning option '-Wno-dangling-pointer' [-Werror,-Wunknown-warning-option] Use cc-disable-warning so that the option is only added when it is supported. Fixes: bd1d129daa3e ("wifi: ath6k: silence false positive -Wno-dangling-pointer warning on GCC 12") Reported-by: "kernelci.org bot" Signed-off-by: Nathan Chancellor Reviewed-by: Tom Rix Link: https://lore.kernel.org/r/20220524145655.869822-1-nathan@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/wireless/ath/ath6kl/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile index 01cc0d50fee6..a75bfa9fd1cf 100644 --- a/drivers/net/wireless/ath/ath6kl/Makefile +++ b/drivers/net/wireless/ath/ath6kl/Makefile @@ -38,7 +38,7 @@ ath6kl_core-y += recovery.o # FIXME: temporarily silence -Wdangling-pointer on non W=1+ builds ifndef KBUILD_EXTRA_WARN -CFLAGS_htc_mbox.o += -Wno-dangling-pointer +CFLAGS_htc_mbox.o += $(call cc-disable-warning, dangling-pointer) endif ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o -- cgit v1.2.3 From 8119c9ee7854033c8aba11059bec4ddd146fd63e Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 19 May 2022 14:21:44 -0700 Subject: ptp: ocp: 32-bit fixups for pci start address Use 'resource_size_t' instead of 'unsigned long' when computing the pci start address, for the benefit of 32-bit platforms. Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 860672d6a03c..957c0522a02f 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -1403,7 +1403,7 @@ static const struct devlink_ops ptp_ocp_devlink_ops = { }; static void __iomem * -__ptp_ocp_get_mem(struct ptp_ocp *bp, unsigned long start, int size) +__ptp_ocp_get_mem(struct ptp_ocp *bp, resource_size_t start, int size) { struct resource res = DEFINE_RES_MEM_NAMED(start, size, "ptp_ocp"); @@ -1413,7 +1413,7 @@ __ptp_ocp_get_mem(struct ptp_ocp *bp, unsigned long start, int size) static void __iomem * ptp_ocp_get_mem(struct ptp_ocp *bp, struct ocp_resource *r) { - unsigned long start; + resource_size_t start; start = pci_resource_start(bp->pdev, 0) + r->offset; return __ptp_ocp_get_mem(bp, start, r->size); @@ -1427,7 +1427,7 @@ ptp_ocp_set_irq_resource(struct resource *res, int irq) } static void -ptp_ocp_set_mem_resource(struct resource *res, unsigned long start, int size) +ptp_ocp_set_mem_resource(struct resource *res, resource_size_t start, int size) { struct resource r = DEFINE_RES_MEM(start, size); *res = r; @@ -1440,7 +1440,7 @@ ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r) struct pci_dev *pdev = bp->pdev; struct platform_device *p; struct resource res[2]; - unsigned long start; + resource_size_t start; int id; start = pci_resource_start(pdev, 0) + r->offset; @@ -1467,7 +1467,7 @@ ptp_ocp_i2c_bus(struct pci_dev *pdev, struct ocp_resource *r, int id) { struct ptp_ocp_i2c_info *info; struct resource res[2]; - unsigned long start; + resource_size_t start; info = r->extra; start = pci_resource_start(pdev, 0) + r->offset; -- cgit v1.2.3 From 3a35e53a11bcde88ea89cbf839771e6af4681daf Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 19 May 2022 14:21:45 -0700 Subject: ptp: ocp: Remove #ifdefs around PCI IDs These #ifdefs are not required, so remove them. Suggested-by: Jakub Kicinski Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 957c0522a02f..d2cdb2a05c36 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -20,13 +20,8 @@ #include #include -#ifndef PCI_VENDOR_ID_FACEBOOK -#define PCI_VENDOR_ID_FACEBOOK 0x1d9b -#endif - -#ifndef PCI_DEVICE_ID_FACEBOOK_TIMECARD -#define PCI_DEVICE_ID_FACEBOOK_TIMECARD 0x0400 -#endif +#define PCI_VENDOR_ID_FACEBOOK 0x1d9b +#define PCI_DEVICE_ID_FACEBOOK_TIMECARD 0x0400 static struct class timecard_class = { .owner = THIS_MODULE, -- cgit v1.2.3 From 81fa652e168558fa35bd81d6c2427319b7b8589f Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 19 May 2022 14:21:46 -0700 Subject: ptp: ocp: add Celestica timecard PCI ids Celestica is producing card with their own vendor id and device id. Add these ids to driver to support this card. Signed-off-by: Vadim Fedorenko Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index d2cdb2a05c36..b5f2b7769028 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -23,6 +23,9 @@ #define PCI_VENDOR_ID_FACEBOOK 0x1d9b #define PCI_DEVICE_ID_FACEBOOK_TIMECARD 0x0400 +#define PCI_VENDOR_ID_CELESTICA 0x18d4 +#define PCI_DEVICE_ID_CELESTICA_TIMECARD 0x1008 + static struct class timecard_class = { .owner = THIS_MODULE, .name = "timecard", @@ -629,7 +632,8 @@ static struct ocp_resource ocp_fb_resource[] = { static const struct pci_device_id ptp_ocp_pcidev_id[] = { { PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) }, - { 0 } + { PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) }, + { } }; MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id); -- cgit v1.2.3 From 5a728ac578c07f29d8b36bbd907cdea132c1607d Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 19 May 2022 14:21:47 -0700 Subject: ptp: ocp: revise firmware display Preparse the firmware image information into loader/tag/version, and set the fw capabilities based on the tag/version. Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 64 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index b5f2b7769028..f44269e3d3c8 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -308,7 +308,9 @@ struct ptp_ocp { int gnss2_port; int mac_port; /* miniature atomic clock */ int nmea_port; - u32 fw_version; + bool fw_loader; + u8 fw_tag; + u16 fw_version; u8 board_id[OCP_BOARD_ID_LEN]; u8 serial[OCP_SERIAL_LEN]; bool has_eeprom_data; @@ -1359,6 +1361,7 @@ ptp_ocp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { struct ptp_ocp *bp = devlink_priv(devlink); + const char *fw_image; char buf[32]; int err; @@ -1366,13 +1369,9 @@ ptp_ocp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, if (err) return err; - if (bp->fw_version & 0xffff) { - sprintf(buf, "%d", bp->fw_version); - err = devlink_info_version_running_put(req, "fw", buf); - } else { - sprintf(buf, "%d", bp->fw_version >> 16); - err = devlink_info_version_running_put(req, "loader", buf); - } + fw_image = bp->fw_loader ? "loader" : "fw"; + sprintf(buf, "%d.%d", bp->fw_tag, bp->fw_version); + err = devlink_info_version_running_put(req, fw_image, buf); if (err) return err; @@ -1931,22 +1930,49 @@ ptp_ocp_fb_set_pins(struct ptp_ocp *bp) return 0; } +static void +ptp_ocp_fb_set_version(struct ptp_ocp *bp) +{ + u64 cap = OCP_CAP_BASIC; + u32 version; + + version = ioread32(&bp->image->version); + + /* if lower 16 bits are empty, this is the fw loader. */ + if ((version & 0xffff) == 0) { + version = version >> 16; + bp->fw_loader = true; + } + + bp->fw_tag = version >> 15; + bp->fw_version = version & 0x7fff; + + if (bp->fw_tag) { + /* FPGA firmware */ + if (version >= 5) + cap |= OCP_CAP_SIGNAL | OCP_CAP_FREQ; + } else { + /* SOM firmware */ + if (version >= 19) + cap |= OCP_CAP_SIGNAL; + if (version >= 20) + cap |= OCP_CAP_FREQ; + } + + bp->fw_cap = cap; +} + /* FB specific board initializers; last "resource" registered. */ static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) { - int ver, err; + int err; bp->flash_start = 1024 * 4096; bp->eeprom_map = fb_eeprom_map; bp->fw_version = ioread32(&bp->image->version); - bp->fw_cap = OCP_CAP_BASIC; - ver = bp->fw_version & 0xffff; - if (ver >= 19) - bp->fw_cap |= OCP_CAP_SIGNAL; - if (ver >= 20) - bp->fw_cap |= OCP_CAP_FREQ; + ptp_ocp_fb_set_version(bp); ptp_ocp_tod_init(bp); ptp_ocp_nmea_out_init(bp); @@ -3497,14 +3523,6 @@ ptp_ocp_info(struct ptp_ocp *bp) ptp_ocp_phc_info(bp); - dev_info(dev, "version %x\n", bp->fw_version); - if (bp->fw_version & 0xffff) - dev_info(dev, "regular image, version %d\n", - bp->fw_version & 0xffff); - else - dev_info(dev, "golden image, version %d\n", - bp->fw_version >> 16); - ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200); ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200); ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600); -- cgit v1.2.3 From aa56a7ffc0fb9ad5f754af79ee91ebe9ec96f28d Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 19 May 2022 14:21:48 -0700 Subject: ptp: ocp: parameterize input/output sma selectors Group the sma input/output tables together and select the correct group from the bp information. This allows adding new groups with different sma mappings. Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index f44269e3d3c8..26f7830388b0 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -321,6 +321,7 @@ struct ptp_ocp { u64 fw_cap; struct ptp_ocp_signal signal[4]; struct ptp_ocp_sma_connector sma[4]; + u8 sma_tbl; }; #define OCP_REQ_TIMESTAMP BIT(0) @@ -699,6 +700,10 @@ static struct ocp_selector ptp_ocp_sma_out[] = { { } }; +static struct ocp_selector *ocp_sma_tbl[][2] = { + { ptp_ocp_sma_in, ptp_ocp_sma_out }, +}; + static const char * ptp_ocp_select_name_from_val(struct ocp_selector *tbl, int val) { @@ -2088,35 +2093,35 @@ __handle_signal_inputs(struct ptp_ocp *bp, u32 val) */ static ssize_t -ptp_ocp_show_output(u32 val, char *buf, int def_val) +ptp_ocp_show_output(struct ocp_selector *tbl, u32 val, char *buf, int def_val) { const char *name; ssize_t count; count = sysfs_emit(buf, "OUT: "); - name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, val); + name = ptp_ocp_select_name_from_val(tbl, val); if (!name) - name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, def_val); + name = ptp_ocp_select_name_from_val(tbl, def_val); count += sysfs_emit_at(buf, count, "%s\n", name); return count; } static ssize_t -ptp_ocp_show_inputs(u32 val, char *buf, int def_val) +ptp_ocp_show_inputs(struct ocp_selector *tbl, u32 val, char *buf, int def_val) { const char *name; ssize_t count; int i; count = sysfs_emit(buf, "IN: "); - for (i = 0; i < ARRAY_SIZE(ptp_ocp_sma_in); i++) { - if (val & ptp_ocp_sma_in[i].value) { - name = ptp_ocp_sma_in[i].name; + for (i = 0; tbl[i].name; i++) { + if (val & tbl[i].value) { + name = tbl[i].name; count += sysfs_emit_at(buf, count, "%s ", name); } } if (!val && def_val >= 0) { - name = ptp_ocp_select_name_from_val(ptp_ocp_sma_in, def_val); + name = ptp_ocp_select_name_from_val(tbl, def_val); count += sysfs_emit_at(buf, count, "%s ", name); } if (count) @@ -2126,9 +2131,9 @@ ptp_ocp_show_inputs(u32 val, char *buf, int def_val) } static int -sma_parse_inputs(const char *buf, enum ptp_ocp_sma_mode *mode) +sma_parse_inputs(struct ocp_selector *tbl[], const char *buf, + enum ptp_ocp_sma_mode *mode) { - struct ocp_selector *tbl[] = { ptp_ocp_sma_in, ptp_ocp_sma_out }; int idx, count, dir; char **argv; int ret; @@ -2187,17 +2192,20 @@ ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, char *buf, int default_in_val, int default_out_val) { struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1]; + struct ocp_selector **tbl; u32 val; + tbl = ocp_sma_tbl[bp->sma_tbl]; + val = ptp_ocp_sma_get(bp, sma_nr, sma->mode) & SMA_SELECT_MASK; if (sma->mode == SMA_MODE_IN) { if (sma->disabled) val = SMA_DISABLE; - return ptp_ocp_show_inputs(val, buf, default_in_val); + return ptp_ocp_show_inputs(tbl[0], val, buf, default_in_val); } - return ptp_ocp_show_output(val, buf, default_out_val); + return ptp_ocp_show_output(tbl[1], val, buf, default_out_val); } static ssize_t @@ -2288,7 +2296,7 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr) int val; mode = sma->mode; - val = sma_parse_inputs(buf, &mode); + val = sma_parse_inputs(ocp_sma_tbl[bp->sma_tbl], buf, &mode); if (val < 0) return val; @@ -2377,7 +2385,9 @@ static ssize_t available_sma_inputs_show(struct device *dev, struct device_attribute *attr, char *buf) { - return ptp_ocp_select_table_show(ptp_ocp_sma_in, buf); + struct ptp_ocp *bp = dev_get_drvdata(dev); + + return ptp_ocp_select_table_show(ocp_sma_tbl[bp->sma_tbl][0], buf); } static DEVICE_ATTR_RO(available_sma_inputs); @@ -2385,7 +2395,9 @@ static ssize_t available_sma_outputs_show(struct device *dev, struct device_attribute *attr, char *buf) { - return ptp_ocp_select_table_show(ptp_ocp_sma_out, buf); + struct ptp_ocp *bp = dev_get_drvdata(dev); + + return ptp_ocp_select_table_show(ocp_sma_tbl[bp->sma_tbl][1], buf); } static DEVICE_ATTR_RO(available_sma_outputs); -- cgit v1.2.3 From 3f3fe41c0bdfe541aa7aa9a8625efe3bd3ef60cc Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 19 May 2022 14:21:49 -0700 Subject: ptp: ocp: constify selectors The ocp selectors are all constant, so label them as such. Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 26f7830388b0..e3269dc79b42 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -648,7 +648,7 @@ struct ocp_selector { int value; }; -static struct ocp_selector ptp_ocp_clock[] = { +static const struct ocp_selector ptp_ocp_clock[] = { { .name = "NONE", .value = 0 }, { .name = "TOD", .value = 1 }, { .name = "IRIG", .value = 2 }, @@ -665,7 +665,7 @@ static struct ocp_selector ptp_ocp_clock[] = { #define SMA_SELECT_MASK ((1U << 15) - 1) #define SMA_DISABLE 0x10000 -static struct ocp_selector ptp_ocp_sma_in[] = { +static const struct ocp_selector ptp_ocp_sma_in[] = { { .name = "10Mhz", .value = 0x0000 }, { .name = "PPS1", .value = 0x0001 }, { .name = "PPS2", .value = 0x0002 }, @@ -683,7 +683,7 @@ static struct ocp_selector ptp_ocp_sma_in[] = { { } }; -static struct ocp_selector ptp_ocp_sma_out[] = { +static const struct ocp_selector ptp_ocp_sma_out[] = { { .name = "10Mhz", .value = 0x0000 }, { .name = "PHC", .value = 0x0001 }, { .name = "MAC", .value = 0x0002 }, @@ -700,12 +700,12 @@ static struct ocp_selector ptp_ocp_sma_out[] = { { } }; -static struct ocp_selector *ocp_sma_tbl[][2] = { +static const struct ocp_selector *ocp_sma_tbl[][2] = { { ptp_ocp_sma_in, ptp_ocp_sma_out }, }; static const char * -ptp_ocp_select_name_from_val(struct ocp_selector *tbl, int val) +ptp_ocp_select_name_from_val(const struct ocp_selector *tbl, int val) { int i; @@ -716,7 +716,7 @@ ptp_ocp_select_name_from_val(struct ocp_selector *tbl, int val) } static int -ptp_ocp_select_val_from_name(struct ocp_selector *tbl, const char *name) +ptp_ocp_select_val_from_name(const struct ocp_selector *tbl, const char *name) { const char *select; int i; @@ -730,7 +730,7 @@ ptp_ocp_select_val_from_name(struct ocp_selector *tbl, const char *name) } static ssize_t -ptp_ocp_select_table_show(struct ocp_selector *tbl, char *buf) +ptp_ocp_select_table_show(const struct ocp_selector *tbl, char *buf) { ssize_t count; int i; @@ -2093,7 +2093,8 @@ __handle_signal_inputs(struct ptp_ocp *bp, u32 val) */ static ssize_t -ptp_ocp_show_output(struct ocp_selector *tbl, u32 val, char *buf, int def_val) +ptp_ocp_show_output(const struct ocp_selector *tbl, u32 val, char *buf, + int def_val) { const char *name; ssize_t count; @@ -2107,7 +2108,8 @@ ptp_ocp_show_output(struct ocp_selector *tbl, u32 val, char *buf, int def_val) } static ssize_t -ptp_ocp_show_inputs(struct ocp_selector *tbl, u32 val, char *buf, int def_val) +ptp_ocp_show_inputs(const struct ocp_selector *tbl, u32 val, char *buf, + int def_val) { const char *name; ssize_t count; @@ -2131,7 +2133,7 @@ ptp_ocp_show_inputs(struct ocp_selector *tbl, u32 val, char *buf, int def_val) } static int -sma_parse_inputs(struct ocp_selector *tbl[], const char *buf, +sma_parse_inputs(const struct ocp_selector * const tbl[], const char *buf, enum ptp_ocp_sma_mode *mode) { int idx, count, dir; @@ -2192,7 +2194,7 @@ ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, char *buf, int default_in_val, int default_out_val) { struct ptp_ocp_sma_connector *sma = &bp->sma[sma_nr - 1]; - struct ocp_selector **tbl; + const struct ocp_selector * const *tbl; u32 val; tbl = ocp_sma_tbl[bp->sma_tbl]; -- cgit v1.2.3 From caab82cdbfe449c1597ab455c09d0fa514ef6947 Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 19 May 2022 14:21:50 -0700 Subject: ptp: ocp: vectorize the sma accessor functions Move the SMA get and set functions into an operations vector for different boards. Create wrappers for the accessor functions. Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 314 +++++++++++++++++++++++++++----------------------- 1 file changed, 169 insertions(+), 145 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index e3269dc79b42..82fefd588add 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -321,7 +321,7 @@ struct ptp_ocp { u64 fw_cap; struct ptp_ocp_signal signal[4]; struct ptp_ocp_sma_connector sma[4]; - u8 sma_tbl; + const struct ocp_sma_op *sma_op; }; #define OCP_REQ_TIMESTAMP BIT(0) @@ -700,10 +700,31 @@ static const struct ocp_selector ptp_ocp_sma_out[] = { { } }; -static const struct ocp_selector *ocp_sma_tbl[][2] = { - { ptp_ocp_sma_in, ptp_ocp_sma_out }, +struct ocp_sma_op { + const struct ocp_selector *tbl[2]; + u32 (*get)(struct ptp_ocp *bp, int sma_nr); + int (*set_inputs)(struct ptp_ocp *bp, int sma_nr, u32 val); + int (*set_output)(struct ptp_ocp *bp, int sma_nr, u32 val); }; +static u32 +ptp_ocp_sma_get(struct ptp_ocp *bp, int sma_nr) +{ + return bp->sma_op->get(bp, sma_nr); +} + +static int +ptp_ocp_sma_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + return bp->sma_op->set_inputs(bp, sma_nr, val); +} + +static int +ptp_ocp_sma_set_output(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + return bp->sma_op->set_output(bp, sma_nr, val); +} + static const char * ptp_ocp_select_name_from_val(const struct ocp_selector *tbl, int val) { @@ -1875,6 +1896,140 @@ ptp_ocp_attr_group_add(struct ptp_ocp *bp, return err; } +static void +ptp_ocp_enable_fpga(u32 __iomem *reg, u32 bit, bool enable) +{ + u32 ctrl; + bool on; + + ctrl = ioread32(reg); + on = ctrl & bit; + if (on ^ enable) { + ctrl &= ~bit; + ctrl |= enable ? bit : 0; + iowrite32(ctrl, reg); + } +} + +static void +ptp_ocp_irig_out(struct ptp_ocp *bp, bool enable) +{ + return ptp_ocp_enable_fpga(&bp->irig_out->ctrl, + IRIG_M_CTRL_ENABLE, enable); +} + +static void +ptp_ocp_irig_in(struct ptp_ocp *bp, bool enable) +{ + return ptp_ocp_enable_fpga(&bp->irig_in->ctrl, + IRIG_S_CTRL_ENABLE, enable); +} + +static void +ptp_ocp_dcf_out(struct ptp_ocp *bp, bool enable) +{ + return ptp_ocp_enable_fpga(&bp->dcf_out->ctrl, + DCF_M_CTRL_ENABLE, enable); +} + +static void +ptp_ocp_dcf_in(struct ptp_ocp *bp, bool enable) +{ + return ptp_ocp_enable_fpga(&bp->dcf_in->ctrl, + DCF_S_CTRL_ENABLE, enable); +} + +static void +__handle_signal_outputs(struct ptp_ocp *bp, u32 val) +{ + ptp_ocp_irig_out(bp, val & 0x00100010); + ptp_ocp_dcf_out(bp, val & 0x00200020); +} + +static void +__handle_signal_inputs(struct ptp_ocp *bp, u32 val) +{ + ptp_ocp_irig_in(bp, val & 0x00100010); + ptp_ocp_dcf_in(bp, val & 0x00200020); +} + +static u32 +ptp_ocp_sma_fb_get(struct ptp_ocp *bp, int sma_nr) +{ + u32 __iomem *gpio; + u32 shift; + + if (bp->sma[sma_nr - 1].fixed_fcn) + return (sma_nr - 1) & 1; + + if (bp->sma[sma_nr - 1].mode == SMA_MODE_IN) + gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; + else + gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; + shift = sma_nr & 1 ? 0 : 16; + + return (ioread32(gpio) >> shift) & 0xffff; +} + +static int +ptp_ocp_sma_fb_set_output(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + u32 reg, mask, shift; + unsigned long flags; + u32 __iomem *gpio; + + gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; + shift = sma_nr & 1 ? 0 : 16; + + mask = 0xffff << (16 - shift); + + spin_lock_irqsave(&bp->lock, flags); + + reg = ioread32(gpio); + reg = (reg & mask) | (val << shift); + + __handle_signal_outputs(bp, reg); + + iowrite32(reg, gpio); + + spin_unlock_irqrestore(&bp->lock, flags); + + return 0; +} + +static int +ptp_ocp_sma_fb_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + u32 reg, mask, shift; + unsigned long flags; + u32 __iomem *gpio; + + gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; + shift = sma_nr & 1 ? 0 : 16; + + mask = 0xffff << (16 - shift); + + spin_lock_irqsave(&bp->lock, flags); + + reg = ioread32(gpio); + reg = (reg & mask) | (val << shift); + + __handle_signal_inputs(bp, reg); + + iowrite32(reg, gpio); + + spin_unlock_irqrestore(&bp->lock, flags); + + return 0; +} + +static const struct ocp_sma_op ocp_fb_sma_op = { + .tbl = { ptp_ocp_sma_in, ptp_ocp_sma_out }, + .get = ptp_ocp_sma_fb_get, + .set_inputs = ptp_ocp_sma_fb_set_inputs, + .set_output = ptp_ocp_sma_fb_set_output, +}; + static void ptp_ocp_sma_init(struct ptp_ocp *bp) { @@ -1976,6 +2131,7 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r) bp->flash_start = 1024 * 4096; bp->eeprom_map = fb_eeprom_map; bp->fw_version = ioread32(&bp->image->version); + bp->sma_op = &ocp_fb_sma_op; ptp_ocp_fb_set_version(bp); @@ -2027,71 +2183,6 @@ ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data) return err; } -static void -ptp_ocp_enable_fpga(u32 __iomem *reg, u32 bit, bool enable) -{ - u32 ctrl; - bool on; - - ctrl = ioread32(reg); - on = ctrl & bit; - if (on ^ enable) { - ctrl &= ~bit; - ctrl |= enable ? bit : 0; - iowrite32(ctrl, reg); - } -} - -static void -ptp_ocp_irig_out(struct ptp_ocp *bp, bool enable) -{ - return ptp_ocp_enable_fpga(&bp->irig_out->ctrl, - IRIG_M_CTRL_ENABLE, enable); -} - -static void -ptp_ocp_irig_in(struct ptp_ocp *bp, bool enable) -{ - return ptp_ocp_enable_fpga(&bp->irig_in->ctrl, - IRIG_S_CTRL_ENABLE, enable); -} - -static void -ptp_ocp_dcf_out(struct ptp_ocp *bp, bool enable) -{ - return ptp_ocp_enable_fpga(&bp->dcf_out->ctrl, - DCF_M_CTRL_ENABLE, enable); -} - -static void -ptp_ocp_dcf_in(struct ptp_ocp *bp, bool enable) -{ - return ptp_ocp_enable_fpga(&bp->dcf_in->ctrl, - DCF_S_CTRL_ENABLE, enable); -} - -static void -__handle_signal_outputs(struct ptp_ocp *bp, u32 val) -{ - ptp_ocp_irig_out(bp, val & 0x00100010); - ptp_ocp_dcf_out(bp, val & 0x00200020); -} - -static void -__handle_signal_inputs(struct ptp_ocp *bp, u32 val) -{ - ptp_ocp_irig_in(bp, val & 0x00100010); - ptp_ocp_dcf_in(bp, val & 0x00200020); -} - -/* - * ANT0 == gps (in) - * ANT1 == sma1 (in) - * ANT2 == sma2 (in) - * ANT3 == sma3 (out) - * ANT4 == sma4 (out) - */ - static ssize_t ptp_ocp_show_output(const struct ocp_selector *tbl, u32 val, char *buf, int def_val) @@ -2171,24 +2262,6 @@ out: return ret; } -static u32 -ptp_ocp_sma_get(struct ptp_ocp *bp, int sma_nr, enum ptp_ocp_sma_mode mode) -{ - u32 __iomem *gpio; - u32 shift; - - if (bp->sma[sma_nr - 1].fixed_fcn) - return (sma_nr - 1) & 1; - - if (mode == SMA_MODE_IN) - gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; - else - gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; - shift = sma_nr & 1 ? 0 : 16; - - return (ioread32(gpio) >> shift) & 0xffff; -} - static ssize_t ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, char *buf, int default_in_val, int default_out_val) @@ -2197,9 +2270,8 @@ ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, char *buf, const struct ocp_selector * const *tbl; u32 val; - tbl = ocp_sma_tbl[bp->sma_tbl]; - - val = ptp_ocp_sma_get(bp, sma_nr, sma->mode) & SMA_SELECT_MASK; + tbl = bp->sma_op->tbl; + val = ptp_ocp_sma_get(bp, sma_nr) & SMA_SELECT_MASK; if (sma->mode == SMA_MODE_IN) { if (sma->disabled) @@ -2242,54 +2314,6 @@ sma4_show(struct device *dev, struct device_attribute *attr, char *buf) return ptp_ocp_sma_show(bp, 4, buf, -1, 1); } -static void -ptp_ocp_sma_store_output(struct ptp_ocp *bp, int sma_nr, u32 val) -{ - u32 reg, mask, shift; - unsigned long flags; - u32 __iomem *gpio; - - gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; - shift = sma_nr & 1 ? 0 : 16; - - mask = 0xffff << (16 - shift); - - spin_lock_irqsave(&bp->lock, flags); - - reg = ioread32(gpio); - reg = (reg & mask) | (val << shift); - - __handle_signal_outputs(bp, reg); - - iowrite32(reg, gpio); - - spin_unlock_irqrestore(&bp->lock, flags); -} - -static void -ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, int sma_nr, u32 val) -{ - u32 reg, mask, shift; - unsigned long flags; - u32 __iomem *gpio; - - gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; - shift = sma_nr & 1 ? 0 : 16; - - mask = 0xffff << (16 - shift); - - spin_lock_irqsave(&bp->lock, flags); - - reg = ioread32(gpio); - reg = (reg & mask) | (val << shift); - - __handle_signal_inputs(bp, reg); - - iowrite32(reg, gpio); - - spin_unlock_irqrestore(&bp->lock, flags); -} - static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr) { @@ -2298,7 +2322,7 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr) int val; mode = sma->mode; - val = sma_parse_inputs(ocp_sma_tbl[bp->sma_tbl], buf, &mode); + val = sma_parse_inputs(bp->sma_op->tbl, buf, &mode); if (val < 0) return val; @@ -2315,9 +2339,9 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr) if (mode != sma->mode) { if (mode == SMA_MODE_IN) - ptp_ocp_sma_store_output(bp, sma_nr, 0); + ptp_ocp_sma_set_output(bp, sma_nr, 0); else - ptp_ocp_sma_store_inputs(bp, sma_nr, 0); + ptp_ocp_sma_set_inputs(bp, sma_nr, 0); sma->mode = mode; } @@ -2328,11 +2352,11 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr) val = 0; if (mode == SMA_MODE_IN) - ptp_ocp_sma_store_inputs(bp, sma_nr, val); + val = ptp_ocp_sma_set_inputs(bp, sma_nr, val); else - ptp_ocp_sma_store_output(bp, sma_nr, val); + val = ptp_ocp_sma_set_output(bp, sma_nr, val); - return 0; + return val; } static ssize_t @@ -2389,7 +2413,7 @@ available_sma_inputs_show(struct device *dev, { struct ptp_ocp *bp = dev_get_drvdata(dev); - return ptp_ocp_select_table_show(ocp_sma_tbl[bp->sma_tbl][0], buf); + return ptp_ocp_select_table_show(bp->sma_op->tbl[0], buf); } static DEVICE_ATTR_RO(available_sma_inputs); @@ -2399,7 +2423,7 @@ available_sma_outputs_show(struct device *dev, { struct ptp_ocp *bp = dev_get_drvdata(dev); - return ptp_ocp_select_table_show(ocp_sma_tbl[bp->sma_tbl][1], buf); + return ptp_ocp_select_table_show(bp->sma_op->tbl[1], buf); } static DEVICE_ATTR_RO(available_sma_outputs); -- cgit v1.2.3 From ee4cd7250c8f5fd47b77b2235057f4b52c1576a6 Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 19 May 2022 14:21:51 -0700 Subject: ptp: ocp: add .init function for sma_op vector Create an .init function for the op vector, and a corresponding wrapper function, for different sma mapping setups. Add a default_fcn to the sma information, and use it when displaying information for pins which have fixed functions. Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 82fefd588add..3856e9c24333 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -243,6 +243,7 @@ struct ptp_ocp_sma_connector { bool fixed_fcn; bool fixed_dir; bool disabled; + u8 default_fcn; }; struct ocp_attr_group { @@ -702,11 +703,18 @@ static const struct ocp_selector ptp_ocp_sma_out[] = { struct ocp_sma_op { const struct ocp_selector *tbl[2]; + void (*init)(struct ptp_ocp *bp); u32 (*get)(struct ptp_ocp *bp, int sma_nr); int (*set_inputs)(struct ptp_ocp *bp, int sma_nr, u32 val); int (*set_output)(struct ptp_ocp *bp, int sma_nr, u32 val); }; +static void +ptp_ocp_sma_init(struct ptp_ocp *bp) +{ + return bp->sma_op->init(bp); +} + static u32 ptp_ocp_sma_get(struct ptp_ocp *bp, int sma_nr) { @@ -2023,15 +2031,8 @@ ptp_ocp_sma_fb_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val) return 0; } -static const struct ocp_sma_op ocp_fb_sma_op = { - .tbl = { ptp_ocp_sma_in, ptp_ocp_sma_out }, - .get = ptp_ocp_sma_fb_get, - .set_inputs = ptp_ocp_sma_fb_set_inputs, - .set_output = ptp_ocp_sma_fb_set_output, -}; - static void -ptp_ocp_sma_init(struct ptp_ocp *bp) +ptp_ocp_sma_fb_init(struct ptp_ocp *bp) { u32 reg; int i; @@ -2041,6 +2042,8 @@ ptp_ocp_sma_init(struct ptp_ocp *bp) bp->sma[1].mode = SMA_MODE_IN; bp->sma[2].mode = SMA_MODE_OUT; bp->sma[3].mode = SMA_MODE_OUT; + for (i = 0; i < 4; i++) + bp->sma[i].default_fcn = i & 1; /* If no SMA1 map, the pin functions and directions are fixed. */ if (!bp->sma_map1) { @@ -2069,6 +2072,14 @@ ptp_ocp_sma_init(struct ptp_ocp *bp) } } +static const struct ocp_sma_op ocp_fb_sma_op = { + .tbl = { ptp_ocp_sma_in, ptp_ocp_sma_out }, + .init = ptp_ocp_sma_fb_init, + .get = ptp_ocp_sma_fb_get, + .set_inputs = ptp_ocp_sma_fb_set_inputs, + .set_output = ptp_ocp_sma_fb_set_output, +}; + static int ptp_ocp_fb_set_pins(struct ptp_ocp *bp) { @@ -2330,7 +2341,7 @@ ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr) return -EOPNOTSUPP; if (sma->fixed_fcn) { - if (val != ((sma_nr - 1) & 1)) + if (val != sma->default_fcn) return -EOPNOTSUPP; return 0; } -- cgit v1.2.3 From b88fdbba931e9c79772120cb60a6ea8adaf0b451 Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 19 May 2022 14:21:52 -0700 Subject: ptp: ocp: fix PPS source selector debugfs reporting The NTL timecard design has a PPS1 selector which selects the the PPS source automatically, according to Section 1.9 of the documentation. If there is a SMA PPS input detected: - send signal to MAC and PPS slave selector. If there is a MAC PPS input detected: - send GNSS1 to the MAC - send MAC to the PPS slave If there is a GNSS1 input detected: - send GNSS1 to the MAC - send GNSS1 to the PPS slave.MAC Change the debugfs summary so it reflects the correct mapping, for assistance in debugging. No functional change. Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 3856e9c24333..9fac96fd0fd4 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -3099,10 +3099,10 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) struct device *dev = s->private; struct ptp_system_timestamp sts; struct ts_reg __iomem *ts_reg; + char *buf, *src, *mac_src; struct timespec64 ts; struct ptp_ocp *bp; u16 sma_val[4][2]; - char *src, *buf; u32 ctrl, val; bool on, map; int i; @@ -3265,17 +3265,26 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) if (bp->pps_select) { val = ioread32(&bp->pps_select->gpio1); src = &buf[80]; - if (val & 0x01) + mac_src = "GNSS1"; + if (val & 0x01) { gpio_input_map(src, bp, sma_val, 0, NULL); - else if (val & 0x02) + mac_src = src; + } else if (val & 0x02) { src = "MAC"; - else if (val & 0x04) + } else if (val & 0x04) { src = "GNSS1"; - else + } else { src = "----"; + mac_src = src; + } } else { src = "?"; + mac_src = src; } + seq_printf(s, "MAC PPS1 src: %s\n", mac_src); + + gpio_input_map(buf, bp, sma_val, 1, "GNSS2"); + seq_printf(s, "MAC PPS2 src: %s\n", buf); /* assumes automatic switchover/selection */ val = ioread32(&bp->reg->select); @@ -3300,12 +3309,6 @@ ptp_ocp_summary_show(struct seq_file *s, void *data) seq_printf(s, "%7s: %s, state: %s\n", "PHC src", buf, val & OCP_STATUS_IN_SYNC ? "sync" : "unsynced"); - /* reuses PPS1 src from earlier */ - seq_printf(s, "MAC PPS1 src: %s\n", src); - - gpio_input_map(buf, bp, sma_val, 1, "GNSS2"); - seq_printf(s, "MAC PPS2 src: %s\n", buf); - if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) { struct timespec64 sys_ts; s64 pre_ns, post_ns, ns; -- cgit v1.2.3 From 3c3673bde50c31d1f5ab0b39e9eeb555285786bb Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Thu, 19 May 2022 14:21:53 -0700 Subject: ptp: ocp: Add firmware header checks Right now it's possible to flash any kind of binary via devlink and break the card easily. This diff adds an optional header check when installing the firmware. Signed-off-by: Vadim Fedorenko Signed-off-by: Jonathan Lemon Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 73 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 9fac96fd0fd4..4519ef42b458 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -19,6 +19,7 @@ #include #include #include +#include #define PCI_VENDOR_ID_FACEBOOK 0x1d9b #define PCI_DEVICE_ID_FACEBOOK_TIMECARD 0x0400 @@ -213,6 +214,17 @@ struct ptp_ocp_flash_info { void *data; }; +struct ptp_ocp_firmware_header { + char magic[4]; + __be16 pci_vendor_id; + __be16 pci_device_id; + __be32 image_size; + __be16 hw_revision; + __be16 crc; +}; + +#define OCP_FIRMWARE_MAGIC_HEADER "OCPC" + struct ptp_ocp_i2c_info { const char *name; unsigned long fixed_rate; @@ -1323,25 +1335,81 @@ ptp_ocp_find_flash(struct ptp_ocp *bp) return dev; } +static int +ptp_ocp_devlink_fw_image(struct devlink *devlink, const struct firmware *fw, + const u8 **data, size_t *size) +{ + struct ptp_ocp *bp = devlink_priv(devlink); + const struct ptp_ocp_firmware_header *hdr; + size_t offset, length; + u16 crc; + + hdr = (const struct ptp_ocp_firmware_header *)fw->data; + if (memcmp(hdr->magic, OCP_FIRMWARE_MAGIC_HEADER, 4)) { + devlink_flash_update_status_notify(devlink, + "No firmware header found, flashing raw image", + NULL, 0, 0); + offset = 0; + length = fw->size; + goto out; + } + + if (be16_to_cpu(hdr->pci_vendor_id) != bp->pdev->vendor || + be16_to_cpu(hdr->pci_device_id) != bp->pdev->device) { + devlink_flash_update_status_notify(devlink, + "Firmware image compatibility check failed", + NULL, 0, 0); + return -EINVAL; + } + + offset = sizeof(*hdr); + length = be32_to_cpu(hdr->image_size); + if (length != (fw->size - offset)) { + devlink_flash_update_status_notify(devlink, + "Firmware image size check failed", + NULL, 0, 0); + return -EINVAL; + } + + crc = crc16(0xffff, &fw->data[offset], length); + if (be16_to_cpu(hdr->crc) != crc) { + devlink_flash_update_status_notify(devlink, + "Firmware image CRC check failed", + NULL, 0, 0); + return -EINVAL; + } + +out: + *data = &fw->data[offset]; + *size = length; + + return 0; +} + static int ptp_ocp_devlink_flash(struct devlink *devlink, struct device *dev, const struct firmware *fw) { struct mtd_info *mtd = dev_get_drvdata(dev); struct ptp_ocp *bp = devlink_priv(devlink); - size_t off, len, resid, wrote; + size_t off, len, size, resid, wrote; struct erase_info erase; size_t base, blksz; - int err = 0; + const u8 *data; + int err; + + err = ptp_ocp_devlink_fw_image(devlink, fw, &data, &size); + if (err) + goto out; off = 0; base = bp->flash_start; blksz = 4096; - resid = fw->size; + resid = size; while (resid) { devlink_flash_update_status_notify(devlink, "Flashing", - NULL, off, fw->size); + NULL, off, size); len = min_t(size_t, resid, blksz); erase.addr = base + off; @@ -1351,7 +1419,7 @@ ptp_ocp_devlink_flash(struct devlink *devlink, struct device *dev, if (err) goto out; - err = mtd_write(mtd, base + off, len, &wrote, &fw->data[off]); + err = mtd_write(mtd, base + off, len, &wrote, data + off); if (err) goto out; -- cgit v1.2.3